+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.auth;
+
+import com.qcloud.cos.auth.COSCredentialsProvider;
+import org.apache.hadoop.conf.Configuration;
+
+import javax.annotation.Nullable;
+import java.net.URI;
+
+/**
+ * The base class for COS credential providers which take a URI or
+ * configuration in their constructor.
+ */
+public abstract class AbstractCOSCredentialsProvider
+ implements COSCredentialsProvider {
+ private final URI uri;
+ private final Configuration conf;
+
+ public AbstractCOSCredentialsProvider(@Nullable URI uri,
+ Configuration conf) {
+ this.uri = uri;
+ this.conf = conf;
+ }
+
+ public URI getUri() {
+ return uri;
+ }
+
+ public Configuration getConf() {
+ return conf;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialProviderList.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialProviderList.java
deleted file mode 100644
index e900b997e4858..0000000000000
--- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialProviderList.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.cosn.auth;
-
-import java.io.Closeable;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import com.google.common.base.Preconditions;
-import com.qcloud.cos.auth.AnonymousCOSCredentials;
-import com.qcloud.cos.auth.COSCredentials;
-import com.qcloud.cos.auth.COSCredentialsProvider;
-import com.qcloud.cos.exception.CosClientException;
-import com.qcloud.cos.utils.StringUtils;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * a list of cos credentials provider.
- */
-public class COSCredentialProviderList implements
- COSCredentialsProvider, AutoCloseable {
- private static final Logger LOG =
- LoggerFactory.getLogger(COSCredentialProviderList.class);
-
- private static final String NO_COS_CREDENTIAL_PROVIDERS =
- "No COS Credential Providers";
- private static final String CREDENTIALS_REQUESTED_WHEN_CLOSED =
- "Credentials requested after provider list was closed";
-
- private final List providers =
- new ArrayList<>(1);
- private boolean reuseLastProvider = true;
- private COSCredentialsProvider lastProvider;
-
- private final AtomicInteger refCount = new AtomicInteger(1);
- private final AtomicBoolean isClosed = new AtomicBoolean(false);
-
- public COSCredentialProviderList() {
- }
-
- public COSCredentialProviderList(
- Collection providers) {
- this.providers.addAll(providers);
- }
-
- public void add(COSCredentialsProvider provider) {
- this.providers.add(provider);
- }
-
- public int getRefCount() {
- return this.refCount.get();
- }
-
- public void checkNotEmpty() {
- if (this.providers.isEmpty()) {
- throw new NoAuthWithCOSException(NO_COS_CREDENTIAL_PROVIDERS);
- }
- }
-
- public COSCredentialProviderList share() {
- Preconditions.checkState(!this.closed(), "Provider list is closed");
- this.refCount.incrementAndGet();
- return this;
- }
-
- public boolean closed() {
- return this.isClosed.get();
- }
-
- @Override
- public COSCredentials getCredentials() {
- if (this.closed()) {
- throw new NoAuthWithCOSException(CREDENTIALS_REQUESTED_WHEN_CLOSED);
- }
-
- this.checkNotEmpty();
-
- if (this.reuseLastProvider && this.lastProvider != null) {
- return this.lastProvider.getCredentials();
- }
-
- for (COSCredentialsProvider provider : this.providers) {
- try {
- COSCredentials credentials = provider.getCredentials();
- if (!StringUtils.isNullOrEmpty(credentials.getCOSAccessKeyId())
- && !StringUtils.isNullOrEmpty(credentials.getCOSSecretKey())
- || credentials instanceof AnonymousCOSCredentials) {
- this.lastProvider = provider;
- return credentials;
- }
- } catch (CosClientException e) {
- LOG.warn("No credentials provided by {}: {}", provider, e.toString());
- }
- }
-
- throw new NoAuthWithCOSException(
- "No COS Credentials provided by " + this.providers.toString());
- }
-
- @Override
- public void close() throws Exception {
- if (this.closed()) {
- return;
- }
-
- int remainder = this.refCount.decrementAndGet();
- if (remainder != 0) {
- return;
- }
- this.isClosed.set(true);
-
- for (COSCredentialsProvider provider : this.providers) {
- if (provider instanceof Closeable) {
- ((Closeable) provider).close();
- }
- }
- }
-
-}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialsProviderList.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialsProviderList.java
new file mode 100644
index 0000000000000..e4c59a5a27611
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialsProviderList.java
@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.auth;
+
+import java.io.Closeable;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.google.common.base.Preconditions;
+import com.qcloud.cos.auth.AnonymousCOSCredentials;
+import com.qcloud.cos.auth.COSCredentials;
+import com.qcloud.cos.auth.COSCredentialsProvider;
+import com.qcloud.cos.utils.StringUtils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * a list of cos credentials provider.
+ */
+public class COSCredentialsProviderList implements
+ COSCredentialsProvider, AutoCloseable {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(COSCredentialsProviderList.class);
+
+ private static final String NO_COS_CREDENTIAL_PROVIDERS =
+ "No COS Credential Providers";
+ private static final String CREDENTIALS_REQUESTED_WHEN_CLOSED =
+ "Credentials requested after provider list was closed";
+
+ private final List providers =
+ new ArrayList(1);
+ private boolean reuseLastProvider = true;
+ private COSCredentialsProvider lastProvider;
+
+ private final AtomicInteger refCount = new AtomicInteger(1);
+ private final AtomicBoolean isClosed = new AtomicBoolean(false);
+
+ public COSCredentialsProviderList() {
+ }
+
+ public COSCredentialsProviderList(
+ Collection providers) {
+ this.providers.addAll(providers);
+ }
+
+ public void add(COSCredentialsProvider provider) {
+ this.providers.add(provider);
+ }
+
+ public int getRefCount() {
+ return this.refCount.get();
+ }
+
+ public void checkNotEmpty() {
+ if (this.providers.isEmpty()) {
+ throw new NoAuthWithCOSException(NO_COS_CREDENTIAL_PROVIDERS);
+ }
+ }
+
+ public COSCredentialsProviderList share() {
+ Preconditions.checkState(!this.closed(), "Provider list is closed");
+ this.refCount.incrementAndGet();
+ return this;
+ }
+
+ public boolean closed() {
+ return this.isClosed.get();
+ }
+
+ @Override
+ public COSCredentials getCredentials() {
+ if (this.closed()) {
+ throw new NoAuthWithCOSException(CREDENTIALS_REQUESTED_WHEN_CLOSED);
+ }
+
+ this.checkNotEmpty();
+
+ if (this.reuseLastProvider && this.lastProvider != null) {
+ return this.lastProvider.getCredentials();
+ }
+
+ for (COSCredentialsProvider provider : this.providers) {
+ COSCredentials credentials = provider.getCredentials();
+ if (null != credentials
+ && !StringUtils.isNullOrEmpty(credentials.getCOSAccessKeyId())
+ && !StringUtils.isNullOrEmpty(credentials.getCOSSecretKey())
+ || credentials instanceof AnonymousCOSCredentials) {
+ this.lastProvider = provider;
+ return credentials;
+ }
+ }
+
+ throw new NoAuthWithCOSException(
+ "No COS Credentials provided by " + this.providers.toString());
+ }
+
+ @Override
+ public void refresh() {
+ if (this.closed()) {
+ return;
+ }
+
+ for (COSCredentialsProvider cosCredentialsProvider : this.providers) {
+ cosCredentialsProvider.refresh();
+ }
+ }
+
+ @Override
+ public void close() throws Exception {
+ if (this.closed()) {
+ return;
+ }
+
+ int remainder = this.refCount.decrementAndGet();
+ if (remainder != 0) {
+ return;
+ }
+ this.isClosed.set(true);
+
+ for (COSCredentialsProvider provider : this.providers) {
+ if (provider instanceof Closeable) {
+ ((Closeable) provider).close();
+ }
+ }
+ }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialProvider.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialProvider.java
deleted file mode 100644
index 0a7786b882f8b..0000000000000
--- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialProvider.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.cosn.auth;
-
-import com.qcloud.cos.auth.BasicCOSCredentials;
-import com.qcloud.cos.auth.COSCredentials;
-import com.qcloud.cos.auth.COSCredentialsProvider;
-import com.qcloud.cos.exception.CosClientException;
-import com.qcloud.cos.utils.StringUtils;
-
-import org.apache.hadoop.fs.cosn.Constants;
-
-/**
- * the provider obtaining the cos credentials from the environment variables.
- */
-public class EnvironmentVariableCredentialProvider
- implements COSCredentialsProvider {
- @Override
- public COSCredentials getCredentials() {
- String secretId = System.getenv(Constants.COSN_SECRET_ID_ENV);
- String secretKey = System.getenv(Constants.COSN_SECRET_KEY_ENV);
-
- secretId = StringUtils.trim(secretId);
- secretKey = StringUtils.trim(secretKey);
-
- if (!StringUtils.isNullOrEmpty(secretId)
- && !StringUtils.isNullOrEmpty(secretKey)) {
- return new BasicCOSCredentials(secretId, secretKey);
- } else {
- throw new CosClientException(
- "Unable to load COS credentials from environment variables" +
- "(COS_SECRET_ID or COS_SECRET_KEY)");
- }
- }
-
- @Override
- public String toString() {
- return "EnvironmentVariableCredentialProvider{}";
- }
-}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialsProvider.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialsProvider.java
new file mode 100644
index 0000000000000..baa76908b6147
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialsProvider.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.auth;
+
+import com.qcloud.cos.auth.BasicCOSCredentials;
+import com.qcloud.cos.auth.COSCredentials;
+import com.qcloud.cos.auth.COSCredentialsProvider;
+import com.qcloud.cos.utils.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.cosn.Constants;
+
+import javax.annotation.Nullable;
+import java.net.URI;
+
+/**
+ * The provider obtaining the cos credentials from the environment variables.
+ */
+public class EnvironmentVariableCredentialsProvider
+ extends AbstractCOSCredentialsProvider implements COSCredentialsProvider {
+
+ public EnvironmentVariableCredentialsProvider(@Nullable URI uri,
+ Configuration conf) {
+ super(uri, conf);
+ }
+
+ @Override
+ public COSCredentials getCredentials() {
+ String secretId = System.getenv(Constants.COSN_SECRET_ID_ENV);
+ String secretKey = System.getenv(Constants.COSN_SECRET_KEY_ENV);
+
+ secretId = StringUtils.trim(secretId);
+ secretKey = StringUtils.trim(secretKey);
+
+ if (!StringUtils.isNullOrEmpty(secretId)
+ && !StringUtils.isNullOrEmpty(secretKey)) {
+ return new BasicCOSCredentials(secretId, secretKey);
+ }
+
+ return null;
+ }
+
+ @Override
+ public void refresh() {
+ }
+
+ @Override
+ public String toString() {
+ return String.format("EnvironmentVariableCredentialsProvider{%s, %s}",
+ Constants.COSN_SECRET_ID_ENV,
+ Constants.COSN_SECRET_KEY_ENV);
+ }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialProvider.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialProvider.java
deleted file mode 100644
index f0635fc0d00cf..0000000000000
--- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialProvider.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.cosn.auth;
-
-import com.qcloud.cos.auth.BasicCOSCredentials;
-import com.qcloud.cos.auth.COSCredentials;
-import com.qcloud.cos.auth.COSCredentialsProvider;
-import com.qcloud.cos.exception.CosClientException;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.cosn.CosNConfigKeys;
-
-/**
- * Get the credentials from the hadoop configuration.
- */
-public class SimpleCredentialProvider implements COSCredentialsProvider {
- private String secretId;
- private String secretKey;
-
- public SimpleCredentialProvider(Configuration conf) {
- this.secretId = conf.get(
- CosNConfigKeys.COSN_SECRET_ID_KEY
- );
- this.secretKey = conf.get(
- CosNConfigKeys.COSN_SECRET_KEY_KEY
- );
- }
-
- @Override
- public COSCredentials getCredentials() {
- if (!StringUtils.isEmpty(this.secretId)
- && !StringUtils.isEmpty(this.secretKey)) {
- return new BasicCOSCredentials(this.secretId, this.secretKey);
- }
- throw new CosClientException("secret id or secret key is unset");
- }
-
-}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialsProvider.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialsProvider.java
new file mode 100644
index 0000000000000..107574a87c3aa
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialsProvider.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.auth;
+
+import com.qcloud.cos.auth.BasicCOSCredentials;
+import com.qcloud.cos.auth.COSCredentials;
+import com.qcloud.cos.auth.COSCredentialsProvider;
+import com.qcloud.cos.utils.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.cosn.CosNConfigKeys;
+
+import javax.annotation.Nullable;
+import java.net.URI;
+
+/**
+ * Get the credentials from the hadoop configuration.
+ */
+public class SimpleCredentialsProvider
+ extends AbstractCOSCredentialsProvider implements COSCredentialsProvider {
+ private String secretId;
+ private String secretKey;
+
+ public SimpleCredentialsProvider(@Nullable URI uri, Configuration conf) {
+ super(uri, conf);
+ if (null != conf) {
+ this.secretId = conf.get(
+ CosNConfigKeys.COSN_SECRET_ID_KEY);
+ this.secretKey = conf.get(
+ CosNConfigKeys.COSN_SECRET_KEY_KEY);
+ }
+ }
+
+ @Override
+ public COSCredentials getCredentials() {
+ if (!StringUtils.isNullOrEmpty(this.secretId)
+ && !StringUtils.isNullOrEmpty(this.secretKey)) {
+ return new BasicCOSCredentials(this.secretId, this.secretKey);
+ }
+ return null;
+ }
+
+ @Override
+ public void refresh() {
+ }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/site/markdown/cloud-storage/index.md b/hadoop-cloud-storage-project/hadoop-cos/src/site/markdown/cloud-storage/index.md
similarity index 93%
rename from hadoop-cloud-storage-project/hadoop-cos/site/markdown/cloud-storage/index.md
rename to hadoop-cloud-storage-project/hadoop-cos/src/site/markdown/cloud-storage/index.md
index 7049b3f0f013f..9c96ac3659815 100644
--- a/hadoop-cloud-storage-project/hadoop-cos/site/markdown/cloud-storage/index.md
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/site/markdown/cloud-storage/index.md
@@ -130,20 +130,19 @@ Each user needs to properly configure the credentials ( User's secreteId and sec
```xml
fs.cosn.credentials.provider
- org.apache.hadoop.fs.auth.SimpleCredentialProvider
+ org.apache.hadoop.fs.auth.SimpleCredentialsProvider
This option allows the user to specify how to get the credentials.
Comma-separated class names of credential provider classes which implement
com.qcloud.cos.auth.COSCredentialsProvider:
- 1.org.apache.hadoop.fs.auth.SimpleCredentialProvider: Obtain the secret id and secret key
- from fs.cosn.userinfo.secretId and fs.cosn.userinfo.secretKey in core-site.xml
- 2.org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider: Obtain the secret id and secret key from system environment variables named COS_SECRET_ID and COS_SECRET_KEY
+ 1.org.apache.hadoop.fs.auth.SimpleCredentialsProvider: Obtain the secret id and secret key from fs.cosn.userinfo.secretId and fs.cosn.userinfo.secretKey in core-site.xml
+ 2.org.apache.hadoop.fs.auth.EnvironmentVariableCredentialsProvider: Obtain the secret id and secret key from system environment variables named COS_SECRET_ID and COS_SECRET_KEY
If unspecified, the default order of credential providers is:
- 1. org.apache.hadoop.fs.auth.SimpleCredentialProvider
- 2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider
+ 1. org.apache.hadoop.fs.auth.SimpleCredentialsProvider
+ 2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialsProvider
@@ -237,7 +236,7 @@ Hadoop-COS provides rich runtime properties to set, and most of these do not req
| properties | description | default value | required |
|:----------:|:-----------|:-------------:|:--------:|
| fs.defaultFS | Configure the default file system used by Hadoop.| None | NO |
-| fs.cosn.credentials.provider | This option allows the user to specify how to get the credentials. Comma-separated class names of credential provider classes which implement com.qcloud.cos.auth.COSCredentialsProvider: 1. org.apache.hadoop.fs.cos.auth.SimpleCredentialProvider: Obtain the secret id and secret key from `fs.cosn.userinfo.secretId` and `fs.cosn.userinfo.secretKey` in core-site.xml; 2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider: Obtain the secret id and secret key from system environment variables named `COSN_SECRET_ID` and `COSN_SECRET_KEY`.
If unspecified, the default order of credential providers is: 1. org.apache.hadoop.fs.auth.SimpleCredentialProvider; 2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider. | None | NO |
+| fs.cosn.credentials.provider | This option allows the user to specify how to get the credentials. Comma-separated class names of credential provider classes which implement com.qcloud.cos.auth.COSCredentialsProvider: 1. org.apache.hadoop.fs.cos.auth.SimpleCredentialsProvider: Obtain the secret id and secret key from `fs.cosn.userinfo.secretId` and `fs.cosn.userinfo.secretKey` in core-site.xml; 2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialsProvider: Obtain the secret id and secret key from system environment variables named `COSN_SECRET_ID` and `COSN_SECRET_KEY`.
If unspecified, the default order of credential providers is: 1. org.apache.hadoop.fs.auth.SimpleCredentialsProvider; 2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialsProvider. | None | NO |
| fs.cosn.userinfo.secretId/secretKey | The API key information of your account | None | YES |
| fs.cosn.bucket.region | The region where the bucket is located. | None | YES |
| fs.cosn.impl | The implementation class of the CosN filesystem. | None | YES |
diff --git a/hadoop-cloud-storage-project/hadoop-cos/site/resources/css/site.css b/hadoop-cloud-storage-project/hadoop-cos/src/site/resources/css/site.css
similarity index 100%
rename from hadoop-cloud-storage-project/hadoop-cos/site/resources/css/site.css
rename to hadoop-cloud-storage-project/hadoop-cos/src/site/resources/css/site.css
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/TestCosCredentials.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/TestCosCredentials.java
new file mode 100644
index 0000000000000..8b74f3639ddbd
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/TestCosCredentials.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import com.qcloud.cos.auth.COSCredentials;
+import com.qcloud.cos.auth.COSCredentialsProvider;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+public class TestCosCredentials {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestCosCredentials.class);
+
+ private final URI fsUri;
+
+ private final String testCosNSecretId = "secretId";
+ private final String testCosNSecretKey = "secretKey";
+ private final String testCosNEnvSecretId = "env_secretId";
+ private final String testCosNEnvSecretKey = "env_secretKey";
+
+ public TestCosCredentials() throws URISyntaxException {
+ // A fake uri for tests.
+ this.fsUri = new URI("cosn://test-bucket-1250000000");
+ }
+
+ @Test
+ public void testSimpleCredentialsProvider() throws Throwable {
+ Configuration configuration = new Configuration();
+ configuration.set(CosNConfigKeys.COSN_SECRET_ID_KEY,
+ testCosNSecretId);
+ configuration.set(CosNConfigKeys.COSN_SECRET_KEY_KEY,
+ testCosNSecretKey);
+ validateCredentials(this.fsUri, configuration);
+ }
+
+ @Test
+ public void testEnvironmentCredentialsProvider() throws Throwable {
+ Configuration configuration = new Configuration();
+ // Set EnvironmentVariableCredentialsProvider as the CosCredentials
+ // Provider.
+ configuration.set(CosNConfigKeys.COSN_CREDENTIALS_PROVIDER,
+ "org.apache.hadoop.fs.cosn.EnvironmentVariableCredentialsProvider");
+ // Set the environment variables storing the secret id and secret key.
+ System.setProperty(Constants.COSN_SECRET_ID_ENV, testCosNEnvSecretId);
+ System.setProperty(Constants.COSN_SECRET_KEY_ENV, testCosNEnvSecretKey);
+ validateCredentials(this.fsUri, configuration);
+ }
+
+ private void validateCredentials(URI uri, Configuration configuration)
+ throws IOException {
+ if (null != configuration) {
+ COSCredentialsProvider credentialsProvider =
+ CosNUtils.createCosCredentialsProviderSet(uri, configuration);
+ COSCredentials cosCredentials = credentialsProvider.getCredentials();
+ assertNotNull("The cos credentials obtained is null.", cosCredentials);
+ if (configuration.get(
+ CosNConfigKeys.COSN_CREDENTIALS_PROVIDER).compareToIgnoreCase(
+ "org.apache.hadoop.fs.cosn.EnvironmentVariableCredentialsProvider")
+ == 0) {
+ if (null == cosCredentials.getCOSAccessKeyId()
+ || cosCredentials.getCOSAccessKeyId().isEmpty()
+ || null == cosCredentials.getCOSSecretKey()
+ || cosCredentials.getCOSSecretKey().isEmpty()) {
+ String failMessage = String.format(
+ "Test EnvironmentVariableCredentialsProvider failed. The " +
+ "expected is [secretId: %s, secretKey: %s], but got null or" +
+ " empty.", testCosNEnvSecretId, testCosNEnvSecretKey);
+ fail(failMessage);
+ }
+
+ if (cosCredentials.getCOSAccessKeyId()
+ .compareTo(testCosNEnvSecretId) != 0
+ || cosCredentials.getCOSSecretKey()
+ .compareTo(testCosNEnvSecretKey) != 0) {
+ String failMessage = String.format("Test " +
+ "EnvironmentVariableCredentialsProvider failed. " +
+ "The expected is [secretId: %s, secretKey: %s], but got is " +
+ "[secretId:%s, secretKey:%s].", testCosNEnvSecretId,
+ testCosNEnvSecretKey, cosCredentials.getCOSAccessKeyId(),
+ cosCredentials.getCOSSecretKey());
+ }
+ // expected
+ } else {
+ if (null == cosCredentials.getCOSAccessKeyId()
+ || cosCredentials.getCOSAccessKeyId().isEmpty()
+ || null == cosCredentials.getCOSSecretKey()
+ || cosCredentials.getCOSSecretKey().isEmpty()) {
+ String failMessage = String.format(
+ "Test COSCredentials failed. The " +
+ "expected is [secretId: %s, secretKey: %s], but got null or" +
+ " empty.", testCosNSecretId, testCosNSecretKey);
+ fail(failMessage);
+ }
+ if (cosCredentials.getCOSAccessKeyId()
+ .compareTo(testCosNSecretId) != 0
+ || cosCredentials.getCOSSecretKey()
+ .compareTo(testCosNSecretKey) != 0) {
+ String failMessage = String.format("Test " +
+ "EnvironmentVariableCredentialsProvider failed. " +
+ "The expected is [secretId: %s, secretKey: %s], but got is " +
+ "[secretId:%s, secretKey:%s].", testCosNSecretId,
+ testCosNSecretKey, cosCredentials.getCOSAccessKeyId(),
+ cosCredentials.getCOSSecretKey());
+ fail(failMessage);
+ }
+ // expected
+ }
+ }
+ }
+}
diff --git a/hadoop-cloud-storage-project/pom.xml b/hadoop-cloud-storage-project/pom.xml
index f39e8c3aaf9f8..da0d88a8117b8 100644
--- a/hadoop-cloud-storage-project/pom.xml
+++ b/hadoop-cloud-storage-project/pom.xml
@@ -20,11 +20,11 @@
org.apache.hadoophadoop-project
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOT../hadoop-projecthadoop-cloud-storage-project
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOTApache Hadoop Cloud Storage ProjectApache Hadoop Cloud Storage Projectpom
diff --git a/hadoop-common-project/hadoop-annotations/pom.xml b/hadoop-common-project/hadoop-annotations/pom.xml
index 738f0ada8f1e9..a262d55b0426c 100644
--- a/hadoop-common-project/hadoop-annotations/pom.xml
+++ b/hadoop-common-project/hadoop-annotations/pom.xml
@@ -20,11 +20,11 @@
org.apache.hadoophadoop-project
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOT../../hadoop-projecthadoop-annotations
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOTApache Hadoop AnnotationsApache Hadoop Annotationsjar
diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml
index fb904912999b8..4deda432797e0 100644
--- a/hadoop-common-project/hadoop-auth-examples/pom.xml
+++ b/hadoop-common-project/hadoop-auth-examples/pom.xml
@@ -20,11 +20,11 @@
org.apache.hadoophadoop-project
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOT../../hadoop-projecthadoop-auth-examples
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOTwarApache Hadoop Auth Examples
diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml
index 20a3e7059b154..4ff3bc14927fe 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -20,11 +20,11 @@
org.apache.hadoophadoop-project
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOT../../hadoop-projecthadoop-auth
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOTjarApache Hadoop Auth
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
index 1093d8a2539eb..488400647cf06 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
@@ -99,7 +99,10 @@ public void put(URI uri, Map> responseHeaders) {
cookies = HttpCookie.parse(header);
} catch (IllegalArgumentException iae) {
// don't care. just skip malformed cookie headers.
- LOG.debug("Cannot parse cookie header: " + header, iae);
+ // When header is empty - "Cannot parse cookie header, header = ,
+ // reason = Empty cookie header string"
+ LOG.debug("Cannot parse cookie header, header = {}, reason = {} ",
+ header, iae.getMessage());
continue;
}
for (HttpCookie cookie : cookies) {
diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index 802197e33cbcd..cf5c3874d1063 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -460,4 +460,10 @@
+
+
+
+
+
+
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 84d3ae5b5addc..dd058812fc774 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -20,11 +20,11 @@
org.apache.hadoophadoop-project-dist
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOT../../hadoop-project-disthadoop-common
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOTApache Hadoop CommonApache Hadoop Commonjar
@@ -39,6 +39,10 @@
+
+ org.apache.hadoop.thirdparty
+ hadoop-shaded-protobuf_3_7
+ org.apache.hadoophadoop-annotations
@@ -90,8 +94,8 @@
compile
- javax.activation
- javax.activation-api
+ jakarta.activation
+ jakarta.activation-apiruntime
@@ -272,6 +276,11 @@
sshd-coretest
+
+ org.apache.ftpserver
+ ftpserver-core
+ test
+ org.apache.htrace
@@ -346,12 +355,12 @@
org.wildfly.opensslwildfly-openssl
- provided
+ test
- org.assertj
- assertj-core
- test
+ org.wildfly.openssl
+ wildfly-openssl-java
+ provided
@@ -394,6 +403,36 @@
+
+ com.google.code.maven-replacer-plugin
+ replacer
+
+
+ replace-generated-sources
+
+ false
+
+
+
+ replace-generated-test-sources
+
+ false
+
+
+
+ replace-sources
+
+ false
+
+
+
+ replace-test-sources
+
+ false
+
+
+
+ org.apache.hadoophadoop-maven-plugins
@@ -842,9 +881,13 @@
parallel-tests-createdir
+ process-test-resourcesparallel-tests-createdir
+
+ ${test.build.data}
+
@@ -852,6 +895,7 @@
org.apache.maven.pluginsmaven-surefire-plugin
+ ${ignoreTestFailure}${testsThreadCount}false${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 484fe2302f9ba..4be554aef6c25 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -213,7 +213,7 @@ function hadoop_privilege_check
[[ "${EUID}" = 0 ]]
}
-## @description Execute a command via su when running as root
+## @description Execute a command via sudo when running as root
## @description if the given user is found or exit with
## @description failure if not.
## @description otherwise just run it. (This is intended to
@@ -224,14 +224,14 @@ function hadoop_privilege_check
## @param user
## @param commandstring
## @return exitstatus
-function hadoop_su
+function hadoop_sudo
{
declare user=$1
shift
if hadoop_privilege_check; then
if hadoop_verify_user_resolves user; then
- su -l "${user}" -- "$@"
+ sudo -u "${user}" -- "$@"
else
hadoop_error "ERROR: Refusing to run as root: ${user} account is not found. Aborting."
return 1
@@ -241,7 +241,7 @@ function hadoop_su
fi
}
-## @description Execute a command via su when running as root
+## @description Execute a command via sudo when running as root
## @description with extra support for commands that might
## @description legitimately start as root (e.g., datanode)
## @description (This is intended to
@@ -259,7 +259,7 @@ function hadoop_uservar_su
#
# if $EUID != 0, then exec
# if $EUID =0 then
- # if hdfs_subcmd_user is defined, call hadoop_su to exec
+ # if hdfs_subcmd_user is defined, call hadoop_sudo to exec
# if hdfs_subcmd_user is not defined, error
#
# For secure daemons, this means both the secure and insecure env vars need to be
@@ -283,7 +283,7 @@ function hadoop_uservar_su
svar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_USER)
if [[ -n "${!uvar}" ]]; then
- hadoop_su "${!uvar}" "$@"
+ hadoop_sudo "${!uvar}" "$@"
elif [[ -n "${!svar}" ]]; then
## if we are here, then SECURE_USER with no USER defined
## we are already privileged, so just run the command and hope
@@ -596,11 +596,6 @@ function hadoop_bootstrap
YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
- HDDS_DIR=${HDDS_DIR:-"share/hadoop/hdds"}
- HDDS_LIB_JARS_DIR=${HDDS_LIB_JARS_DIR:-"share/hadoop/hdds/lib"}
- OZONE_DIR=${OZONE_DIR:-"share/hadoop/ozone"}
- OZONE_LIB_JARS_DIR=${OZONE_LIB_JARS_DIR:-"share/hadoop/ozone/lib"}
- OZONEFS_DIR=${OZONEFS_DIR:-"share/hadoop/ozonefs"}
HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_HOME}}
HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
@@ -1342,7 +1337,7 @@ function hadoop_add_to_classpath_tools
# shellcheck disable=SC1090
. "${HADOOP_LIBEXEC_DIR}/tools/${module}.sh"
else
- hadoop_error "ERROR: Tools helper ${HADOOP_LIBEXEC_DIR}/tools/${module}.sh was not found."
+ hadoop_debug "Tools helper ${HADOOP_LIBEXEC_DIR}/tools/${module}.sh was not found."
fi
if declare -f hadoop_classpath_tools_${module} >/dev/null 2>&1; then
@@ -1921,6 +1916,22 @@ function hadoop_start_secure_daemon
exit 1
fi
+ if [[ -z "${HADOOP_DAEMON_JSVC_EXTRA_OPTS}" ]]; then
+ # If HADOOP_DAEMON_JSVC_EXTRA_OPTS is not set
+ if ${jsvc} -help | grep -q "\-cwd"; then
+ # Check if jsvc -help has entry for option -cwd
+ hadoop_debug "Your jsvc supports -cwd option." \
+ "Adding option '-cwd .'. See HADOOP-16276 for details."
+ HADOOP_DAEMON_JSVC_EXTRA_OPTS="-cwd ."
+ else
+ hadoop_debug "Your jsvc doesn't support -cwd option." \
+ "No need to add option '-cwd .'. See HADOOP-16276 for details."
+ fi
+ else
+ hadoop_debug "HADOOP_DAEMON_JSVC_EXTRA_OPTS is set." \
+ "Ignoring jsvc -cwd option detection and addition."
+ fi
+
# note that shellcheck will throw a
# bogus for-our-use-case 2086 here.
# it doesn't properly support multi-line situations
@@ -2035,7 +2046,8 @@ function hadoop_start_secure_daemon_wrapper
hadoop_error "ERROR: Cannot disconnect ${daemonname} process $!"
fi
# capture the ulimit output
- su "${HADOOP_SECURE_USER}" -c 'bash -c "ulimit -a"' >> "${jsvcoutfile}" 2>&1
+ #shellcheck disable=SC2024
+ sudo -u "${HADOOP_SECURE_USER}" bash -c "ulimit -a" >> "${jsvcoutfile}" 2>&1
#shellcheck disable=SC2086
if ! ps -p $! >/dev/null 2>&1; then
return 1
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
index e43cd95b047ee..f4625f5999b1c 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
@@ -390,15 +390,6 @@ export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
#
# export HDFS_DFSROUTER_OPTS=""
-###
-# Ozone Manager specific parameters
-###
-# Specify the JVM options to be used when starting the Ozone Manager.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_OM_OPTS=""
-
###
# HDFS StorageContainerManager specific parameters
###
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 7f9ea462679b3..52d2c1ff038e6 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -282,13 +282,6 @@ log4j.appender.NMAUDIT.MaxBackupIndex=${nm.audit.log.maxbackupindex}
#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log
#log4j.appender.nodemanagerrequestlog.RetainDays=3
-#Http Server request logs for Ozone S3Gateway
-log4j.logger.http.requests.s3gateway=INFO,s3gatewayrequestlog
-log4j.appender.s3gatewayrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
-log4j.appender.s3gatewayrequestlog.Filename=${hadoop.log.dir}/jetty-s3gateway-yyyy_mm_dd.log
-log4j.appender.s3gatewayrequestlog.RetainDays=3
-
-
# WebHdfs request log on datanodes
# Specify -Ddatanode.webhdfs.logger=INFO,HTTPDRFA on datanode startup to
# direct the log to a separate file.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 180bde26574ca..9751a9b66945c 100755
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -51,7 +51,6 @@
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
-import java.util.LinkedList;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
@@ -1691,7 +1690,11 @@ public boolean getBoolean(String name, boolean defaultValue) {
return true;
else if (StringUtils.equalsIgnoreCase("false", valueString))
return false;
- else return defaultValue;
+ else {
+ LOG.warn("Invalid value for boolean: " + valueString +
+ ", choose default value: " + defaultValue + " for " + name);
+ return defaultValue;
+ }
}
/**
@@ -3350,6 +3353,7 @@ void parseNext() throws IOException, XMLStreamException {
handleStartElement();
break;
case XMLStreamConstants.CHARACTERS:
+ case XMLStreamConstants.CDATA:
if (parseToken) {
char[] text = reader.getTextCharacters();
token.append(text, reader.getTextStart(), reader.getTextLength());
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index 0453ca14537c3..1df68b647c99a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -31,7 +31,6 @@
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
-import java.util.Set;
import java.util.StringTokenizer;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
@@ -45,6 +44,7 @@
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.impl.AbstractFSBuilderImpl;
+import org.apache.hadoop.fs.impl.OpenFileParameters;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
@@ -1355,22 +1355,20 @@ public boolean equals(Object other) {
* setting up the expectation that the {@code get()} call
* is needed to evaluate the result.
* @param path path to the file
- * @param mandatoryKeys set of options declared as mandatory.
- * @param options options set during the build sequence.
- * @param bufferSize buffer size
+ * @param parameters open file parameters from the builder.
* @return a future which will evaluate to the opened file.
* @throws IOException failure to resolve the link.
* @throws IllegalArgumentException unknown mandatory key
*/
public CompletableFuture openFileWithOptions(Path path,
- Set mandatoryKeys,
- Configuration options,
- int bufferSize) throws IOException {
- AbstractFSBuilderImpl.rejectUnknownMandatoryKeys(mandatoryKeys,
+ final OpenFileParameters parameters) throws IOException {
+ AbstractFSBuilderImpl.rejectUnknownMandatoryKeys(
+ parameters.getMandatoryKeys(),
Collections.emptySet(),
"for " + path);
return LambdaUtils.eval(
- new CompletableFuture<>(), () -> open(path, bufferSize));
+ new CompletableFuture<>(), () ->
+ open(path, parameters.getBufferSize()));
}
public boolean hasPathCapability(final Path path,
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchListingOperations.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchListingOperations.java
new file mode 100644
index 0000000000000..f72b1e288eb49
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchListingOperations.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Interface filesystems MAY implement to offer a batched list.
+ * If implemented, filesystems SHOULD declare
+ * {@link CommonPathCapabilities#FS_EXPERIMENTAL_BATCH_LISTING} to be a supported
+ * path capability.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public interface BatchListingOperations {
+
+ /**
+ * Batched listing API that returns {@link PartialListing}s for the
+ * passed Paths.
+ *
+ * @param paths List of paths to list.
+ * @return RemoteIterator that returns corresponding PartialListings.
+ * @throws IOException failure
+ */
+ RemoteIterator> batchedListStatusIterator(
+ List paths) throws IOException;
+
+ /**
+ * Batched listing API that returns {@link PartialListing}s for the passed
+ * Paths. The PartialListing will contain {@link LocatedFileStatus} entries
+ * with locations.
+ *
+ * @param paths List of paths to list.
+ * @return RemoteIterator that returns corresponding PartialListings.
+ * @throws IOException failure
+ */
+ RemoteIterator>
+ batchedListLocatedStatusIterator(
+ List paths) throws IOException;
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
index 92476d77ddb44..58dc82d2efb2d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
@@ -47,6 +47,7 @@ public abstract class CachingGetSpaceUsed implements Closeable, GetSpaceUsed {
private final long jitter;
private final String dirPath;
private Thread refreshUsed;
+ private boolean shouldFirstRefresh;
/**
* This is the constructor used by the builder.
@@ -79,16 +80,30 @@ public CachingGetSpaceUsed(CachingGetSpaceUsed.Builder builder)
this.refreshInterval = interval;
this.jitter = jitter;
this.used.set(initialUsed);
+ this.shouldFirstRefresh = true;
}
void init() {
if (used.get() < 0) {
used.set(0);
+ if (!shouldFirstRefresh) {
+ // Skip initial refresh operation, so we need to do first refresh
+ // operation immediately in refresh thread.
+ initRefeshThread(true);
+ return;
+ }
refresh();
}
+ initRefeshThread(false);
+ }
+ /**
+ * RunImmediately should set true, if we skip the first refresh.
+ * @param runImmediately The param default should be false.
+ */
+ private void initRefeshThread (boolean runImmediately) {
if (refreshInterval > 0) {
- refreshUsed = new Thread(new RefreshThread(this),
+ refreshUsed = new Thread(new RefreshThread(this, runImmediately),
"refreshUsed-" + dirPath);
refreshUsed.setDaemon(true);
refreshUsed.start();
@@ -100,6 +115,14 @@ void init() {
protected abstract void refresh();
+ /**
+ * Reset that if we need to do the first refresh.
+ * @param shouldFirstRefresh The flag value to set.
+ */
+ protected void setShouldFirstRefresh(boolean shouldFirstRefresh) {
+ this.shouldFirstRefresh = shouldFirstRefresh;
+ }
+
/**
* @return an estimate of space used in the directory path.
*/
@@ -156,9 +179,11 @@ public void close() throws IOException {
private static final class RefreshThread implements Runnable {
final CachingGetSpaceUsed spaceUsed;
+ private boolean runImmediately;
- RefreshThread(CachingGetSpaceUsed spaceUsed) {
+ RefreshThread(CachingGetSpaceUsed spaceUsed, boolean runImmediately) {
this.spaceUsed = spaceUsed;
+ this.runImmediately = runImmediately;
}
@Override
@@ -176,7 +201,10 @@ public void run() {
}
// Make sure that after the jitter we didn't end up at 0.
refreshInterval = Math.max(refreshInterval, 1);
- Thread.sleep(refreshInterval);
+ if (!runImmediately) {
+ Thread.sleep(refreshInterval);
+ }
+ runImmediately = false;
// update the used variable
spaceUsed.refresh();
} catch (InterruptedException e) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index 5e5d29a28bfce..cc9c284c9fa55 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -27,8 +27,6 @@
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
-import java.util.Locale;
-import java.util.Set;
import java.util.concurrent.CompletableFuture;
import com.google.common.base.Preconditions;
@@ -37,6 +35,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.impl.AbstractFSBuilderImpl;
import org.apache.hadoop.fs.impl.FutureDataInputStreamBuilderImpl;
+import org.apache.hadoop.fs.impl.OpenFileParameters;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.DataChecksum;
@@ -845,14 +844,14 @@ public FutureDataInputStreamBuilder openFile(final Path path)
@Override
protected CompletableFuture openFileWithOptions(
final Path path,
- final Set mandatoryKeys,
- final Configuration options,
- final int bufferSize) throws IOException {
- AbstractFSBuilderImpl.rejectUnknownMandatoryKeys(mandatoryKeys,
+ final OpenFileParameters parameters) throws IOException {
+ AbstractFSBuilderImpl.rejectUnknownMandatoryKeys(
+ parameters.getMandatoryKeys(),
Collections.emptySet(),
"for " + path);
return LambdaUtils.eval(
- new CompletableFuture<>(), () -> open(path, bufferSize));
+ new CompletableFuture<>(),
+ () -> open(path, parameters.getBufferSize()));
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java
index bbbf073cc241c..908d67723e7be 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ClusterStorageCapacityExceededException.java
@@ -27,7 +27,7 @@
* cluster filesystem is exceeded. See also
* https://issues.apache.org/jira/browse/MAPREDUCE-7148.
*/
-@InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" })
+@InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce", "Tez" })
@InterfaceStability.Evolving
public class ClusterStorageCapacityExceededException extends IOException {
private static final long serialVersionUID = 1L;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 8c09db1284cff..c08af395ad2f9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -114,6 +114,9 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
"callqueue.overflow.trigger.failover";
public static final boolean IPC_CALLQUEUE_SERVER_FAILOVER_ENABLE_DEFAULT =
false;
+ /** Callqueue subqueue capacity weights. */
+ public static final String IPC_CALLQUEUE_CAPACITY_WEIGHTS_KEY =
+ "callqueue.capacity.weights";
/**
* IPC scheduler priority levels.
@@ -426,4 +429,13 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
"dfs.client.ignore.namenode.default.kms.uri";
public static final boolean
DFS_CLIENT_IGNORE_NAMENODE_DEFAULT_KMS_URI_DEFAULT = false;
+
+ /**
+ * Whether or not ThreadMXBean is used for getting thread info in JvmMetrics,
+ * ThreadGroup approach is preferred for better performance.
+ */
+ public static final String HADOOP_METRICS_JVM_USE_THREAD_MXBEAN =
+ "hadoop.metrics.jvm.use-thread-mxbean";
+ public static final boolean HADOOP_METRICS_JVM_USE_THREAD_MXBEAN_DEFAULT =
+ false;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index a68012b06d2bc..ce132f9a37891 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -988,5 +988,14 @@ public class CommonConfigurationKeysPublic {
public static final String HADOOP_PROMETHEUS_ENABLED =
"hadoop.prometheus.endpoint.enabled";
public static final boolean HADOOP_PROMETHEUS_ENABLED_DEFAULT = false;
+
+ /**
+ * @see
+ *
+ * core-default.xml
+ */
+ public static final String HADOOP_HTTP_IDLE_TIMEOUT_MS_KEY =
+ "hadoop.http.idle_timeout.ms";
+ public static final int HADOOP_HTTP_IDLE_TIMEOUT_MS_DEFAULT = 1000;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java
index 31e6bac0ccee5..fb46ef81e36fa 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.fs;
+import org.apache.hadoop.classification.InterfaceStability;
+
/**
* Common path capabilities.
*/
@@ -123,4 +125,10 @@ private CommonPathCapabilities() {
*/
public static final String FS_XATTRS = "fs.capability.paths.xattrs";
+ /**
+ * Probe for support for {@link BatchListingOperations}.
+ */
+ @InterfaceStability.Unstable
+ public static final String FS_EXPERIMENTAL_BATCH_LISTING =
+ "fs.capability.batch.listing";
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
index cdbd10f636dd3..20e205a8b32cf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
@@ -281,6 +281,21 @@ public int hashCode() {
private static final String ALL_HEADER = QUOTA_HEADER + SUMMARY_HEADER;
+ /**
+ * Output format:<-------18-------> <----------24---------->
+ * <----------24---------->. <-------------28------------> SNAPSHOT_LENGTH
+ * SNAPSHOT_FILE_COUNT SNAPSHOT_DIR_COUNT SNAPSHOT_SPACE_CONSUMED
+ */
+ private static final String SNAPSHOT_FORMAT = "%18s %24s %24s %28s ";
+
+ private static final String[] SNAPSHOT_HEADER_FIELDS =
+ new String[] {"SNAPSHOT_LENGTH", "SNAPSHOT_FILE_COUNT",
+ "SNAPSHOT_DIR_COUNT", "SNAPSHOT_SPACE_CONSUMED"};
+
+ /** The header string. */
+ private static final String SNAPSHOT_HEADER =
+ String.format(SNAPSHOT_FORMAT, (Object[]) SNAPSHOT_HEADER_FIELDS);
+
/** Return the header of the output.
* if qOption is false, output directory count, file count, and content size;
@@ -293,7 +308,9 @@ public static String getHeader(boolean qOption) {
return qOption ? ALL_HEADER : SUMMARY_HEADER;
}
-
+ public static String getSnapshotHeader() {
+ return SNAPSHOT_HEADER;
+ }
/**
* Returns the names of the fields from the summary header.
@@ -416,7 +433,7 @@ public String toString(boolean qOption, boolean hOption, boolean tOption,
}
/**
- * Formats a size to be human readable or in bytes
+ * Formats a size to be human readable or in bytes.
* @param size value to be formatted
* @param humanReadable flag indicating human readable or not
* @return String representation of the size
@@ -426,4 +443,17 @@ private String formatSize(long size, boolean humanReadable) {
? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
: String.valueOf(size);
}
+
+ /**
+ * Return the string representation of the snapshot counts in the output
+ * format.
+ * @param hOption flag indicating human readable or not
+ * @return String representation of the snapshot counts
+ */
+ public String toSnapshot(boolean hOption) {
+ return String.format(SNAPSHOT_FORMAT, formatSize(snapshotLength, hOption),
+ formatSize(snapshotFileCount, hOption),
+ formatSize(snapshotDirectoryCount, hOption),
+ formatSize(snapshotSpaceConsumed, hOption));
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
index 58b5f704bb831..71993713ad2eb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
@@ -123,7 +123,13 @@ public enum CreateFlag {
* locality. The first block replica should be placed randomly within the
* cluster. Subsequent block replicas should follow DataNode locality rules.
*/
- IGNORE_CLIENT_LOCALITY((short) 0x100);
+ IGNORE_CLIENT_LOCALITY((short) 0x100),
+
+ /**
+ * Advise that a block replica NOT be written to the local rack DataNode where
+ * 'local' means the same rack as the client is being run on.
+ */
+ NO_LOCAL_RACK((short) 0x120);
private final short mode;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
index a8f294f379158..3a139781e0372 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
@@ -24,13 +24,13 @@
import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
-import java.util.Set;
import java.util.concurrent.CompletableFuture;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options.ChecksumOpt;
+import org.apache.hadoop.fs.impl.OpenFileParameters;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
@@ -266,20 +266,17 @@ public List> getDelegationTokens(String renewer) throws IOException {
/**
* Open a file by delegating to
- * {@link FileSystem#openFileWithOptions(Path, Set, Configuration, int)}.
+ * {@link FileSystem#openFileWithOptions(Path, org.apache.hadoop.fs.impl.OpenFileParameters)}.
* @param path path to the file
- * @param mandatoryKeys set of options declared as mandatory.
- * @param options options set during the build sequence.
- * @param bufferSize buffer size
- * @return a future which will evaluate to the opened file.
+ * @param parameters open file parameters from the builder.
+ *
+ * @return a future which will evaluate to the opened file.ControlAlpha
* @throws IOException failure to resolve the link.
* @throws IllegalArgumentException unknown mandatory key
*/
public CompletableFuture openFileWithOptions(Path path,
- Set mandatoryKeys,
- Configuration options,
- int bufferSize) throws IOException {
- return fsImpl.openFileWithOptions(path, mandatoryKeys, options, bufferSize);
+ final OpenFileParameters parameters) throws IOException {
+ return fsImpl.openFileWithOptions(path, parameters);
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index b2c1369a9c1fe..ba0064f0813d3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -47,7 +47,7 @@
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.impl.FutureDataInputStreamBuilderImpl;
import org.apache.hadoop.fs.impl.FsLinkResolution;
-import org.apache.hadoop.fs.impl.PathCapabilitiesSupport;
+import org.apache.hadoop.fs.impl.OpenFileParameters;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
@@ -483,7 +483,7 @@ public static FileContext getFileContext(final URI defaultFsUri,
*/
public static FileContext getFileContext(final Configuration aConf)
throws UnsupportedFileSystemException {
- final URI defaultFsUri = URI.create(aConf.get(FS_DEFAULT_NAME_KEY,
+ final URI defaultFsUri = URI.create(aConf.getTrimmed(FS_DEFAULT_NAME_KEY,
FS_DEFAULT_NAME_DEFAULT));
if ( defaultFsUri.getScheme() != null
&& !defaultFsUri.getScheme().trim().isEmpty()) {
@@ -2924,16 +2924,18 @@ protected FSDataInputStreamBuilder(
@Override
public CompletableFuture build() throws IOException {
final Path absF = fixRelativePart(getPath());
+ OpenFileParameters parameters = new OpenFileParameters()
+ .withMandatoryKeys(getMandatoryKeys())
+ .withOptions(getOptions())
+ .withBufferSize(getBufferSize())
+ .withStatus(getStatus());
return new FSLinkResolver>() {
@Override
public CompletableFuture next(
final AbstractFileSystem fs,
final Path p)
throws IOException {
- return fs.openFileWithOptions(p,
- getMandatoryKeys(),
- getOptions(),
- getBufferSize());
+ return fs.openFileWithOptions(p, parameters);
}
}.resolve(FileContext.this, absF);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 2376c051c99f9..abb31ed869591 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -58,11 +58,13 @@
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.impl.AbstractFSBuilderImpl;
import org.apache.hadoop.fs.impl.FutureDataInputStreamBuilderImpl;
+import org.apache.hadoop.fs.impl.OpenFileParameters;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsCreateModes;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
@@ -130,6 +132,25 @@
* New methods may be marked as Unstable or Evolving for their initial release,
* as a warning that they are new and may change based on the
* experience of use in applications.
+ * Important note for developers
+ *
+ * If you're making changes here to the public API or protected methods,
+ * you must review the following subclasses and make sure that
+ * they are filtering/passing through new methods as appropriate.
+ *
+ * {@link FilterFileSystem}: methods are passed through.
+ * {@link ChecksumFileSystem}: checksums are created and
+ * verified.
+ * {@code TestHarFileSystem} will need its {@code MustNotImplement}
+ * interface updated.
+ *
+ * There are some external places your changes will break things.
+ * Do co-ordinate changes here.
+ *
+ * HBase: HBoss
+ * Hive: HiveShim23
+ * {@code shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java}
+ *
*****************************************************************/
@SuppressWarnings("DeprecatedIsStillUsed")
@InterfaceAudience.Public
@@ -257,7 +278,8 @@ public static FileSystem get(Configuration conf) throws IOException {
* @return the uri of the default filesystem
*/
public static URI getDefaultUri(Configuration conf) {
- URI uri = URI.create(fixName(conf.get(FS_DEFAULT_NAME_KEY, DEFAULT_FS)));
+ URI uri =
+ URI.create(fixName(conf.getTrimmed(FS_DEFAULT_NAME_KEY, DEFAULT_FS)));
if (uri.getScheme() == null) {
throw new IllegalArgumentException("No scheme in default FS: " + uri);
}
@@ -2159,24 +2181,19 @@ protected class DirListingIterator implements
private DirectoryEntries entries;
private int i = 0;
- DirListingIterator(Path path) {
+ DirListingIterator(Path path) throws IOException {
this.path = path;
+ this.entries = listStatusBatch(path, null);
}
@Override
public boolean hasNext() throws IOException {
- if (entries == null) {
- fetchMore();
- }
return i < entries.getEntries().length ||
entries.hasMore();
}
private void fetchMore() throws IOException {
- byte[] token = null;
- if (entries != null) {
- token = entries.getToken();
- }
+ byte[] token = entries.getToken();
entries = listStatusBatch(path, token);
i = 0;
}
@@ -3391,9 +3408,22 @@ private static FileSystem createFileSystem(URI uri, Configuration conf)
Tracer tracer = FsTracer.get(conf);
try(TraceScope scope = tracer.newScope("FileSystem#createFileSystem")) {
scope.addKVAnnotation("scheme", uri.getScheme());
- Class> clazz = getFileSystemClass(uri.getScheme(), conf);
- FileSystem fs = (FileSystem)ReflectionUtils.newInstance(clazz, conf);
- fs.initialize(uri, conf);
+ Class extends FileSystem> clazz =
+ getFileSystemClass(uri.getScheme(), conf);
+ FileSystem fs = ReflectionUtils.newInstance(clazz, conf);
+ try {
+ fs.initialize(uri, conf);
+ } catch (IOException | RuntimeException e) {
+ // exception raised during initialization.
+ // log summary at warn and full stack at debug
+ LOGGER.warn("Failed to initialize fileystem {}: {}",
+ uri, e.toString());
+ LOGGER.debug("Failed to initialize fileystem", e);
+ // then (robustly) close the FS, so as to invoke any
+ // cleanup code.
+ IOUtils.cleanupWithLogger(LOGGER, fs);
+ throw e;
+ }
return fs;
}
}
@@ -4449,43 +4479,39 @@ public FutureDataInputStreamBuilder openFile(PathHandle pathHandle)
* the action of opening the file should begin.
*
* The base implementation performs a blocking
- * call to {@link #open(Path, int)}in this call;
+ * call to {@link #open(Path, int)} in this call;
* the actual outcome is in the returned {@code CompletableFuture}.
* This avoids having to create some thread pool, while still
* setting up the expectation that the {@code get()} call
* is needed to evaluate the result.
* @param path path to the file
- * @param mandatoryKeys set of options declared as mandatory.
- * @param options options set during the build sequence.
- * @param bufferSize buffer size
+ * @param parameters open file parameters from the builder.
* @return a future which will evaluate to the opened file.
* @throws IOException failure to resolve the link.
* @throws IllegalArgumentException unknown mandatory key
*/
protected CompletableFuture openFileWithOptions(
final Path path,
- final Set mandatoryKeys,
- final Configuration options,
- final int bufferSize) throws IOException {
- AbstractFSBuilderImpl.rejectUnknownMandatoryKeys(mandatoryKeys,
+ final OpenFileParameters parameters) throws IOException {
+ AbstractFSBuilderImpl.rejectUnknownMandatoryKeys(
+ parameters.getMandatoryKeys(),
Collections.emptySet(),
"for " + path);
return LambdaUtils.eval(
- new CompletableFuture<>(), () -> open(path, bufferSize));
+ new CompletableFuture<>(), () ->
+ open(path, parameters.getBufferSize()));
}
/**
* Execute the actual open file operation.
* The base implementation performs a blocking
- * call to {@link #open(Path, int)}in this call;
+ * call to {@link #open(Path, int)} in this call;
* the actual outcome is in the returned {@code CompletableFuture}.
* This avoids having to create some thread pool, while still
* setting up the expectation that the {@code get()} call
* is needed to evaluate the result.
* @param pathHandle path to the file
- * @param mandatoryKeys set of options declared as mandatory.
- * @param options options set during the build sequence.
- * @param bufferSize buffer size
+ * @param parameters open file parameters from the builder.
* @return a future which will evaluate to the opened file.
* @throws IOException failure to resolve the link.
* @throws IllegalArgumentException unknown mandatory key
@@ -4494,14 +4520,13 @@ protected CompletableFuture openFileWithOptions(
*/
protected CompletableFuture openFileWithOptions(
final PathHandle pathHandle,
- final Set mandatoryKeys,
- final Configuration options,
- final int bufferSize) throws IOException {
- AbstractFSBuilderImpl.rejectUnknownMandatoryKeys(mandatoryKeys,
+ final OpenFileParameters parameters) throws IOException {
+ AbstractFSBuilderImpl.rejectUnknownMandatoryKeys(
+ parameters.getMandatoryKeys(),
Collections.emptySet(), "");
CompletableFuture result = new CompletableFuture<>();
try {
- result.complete(open(pathHandle, bufferSize));
+ result.complete(open(pathHandle, parameters.getBufferSize()));
} catch (UnsupportedOperationException tx) {
// fail fast here
throw tx;
@@ -4603,12 +4628,17 @@ protected FSDataInputStreamBuilder(
@Override
public CompletableFuture build() throws IOException {
Optional optionalPath = getOptionalPath();
+ OpenFileParameters parameters = new OpenFileParameters()
+ .withMandatoryKeys(getMandatoryKeys())
+ .withOptions(getOptions())
+ .withBufferSize(getBufferSize())
+ .withStatus(super.getStatus()); // explicit to avoid IDE warnings
if(optionalPath.isPresent()) {
return getFS().openFileWithOptions(optionalPath.get(),
- getMandatoryKeys(), getOptions(), getBufferSize());
+ parameters);
} else {
return getFS().openFileWithOptions(getPathHandle(),
- getMandatoryKeys(), getOptions(), getBufferSize());
+ parameters);
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 4566686a126fe..7bc93f9bf5db8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -21,16 +21,20 @@
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
+import java.io.BufferedWriter;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
+import java.io.OutputStreamWriter;
import java.net.InetAddress;
import java.net.URI;
import java.net.UnknownHostException;
import java.nio.charset.Charset;
+import java.nio.charset.CharsetEncoder;
+import java.nio.charset.StandardCharsets;
import java.nio.file.AccessDeniedException;
import java.nio.file.FileSystems;
import java.nio.file.Files;
@@ -38,6 +42,7 @@
import java.util.Enumeration;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@@ -1633,4 +1638,235 @@ public static boolean compareFs(FileSystem srcFs, FileSystem destFs) {
// check for ports
return srcUri.getPort()==dstUri.getPort();
}
+
+ /**
+ * Writes bytes to a file. This utility method opens the file for writing,
+ * creating the file if it does not exist, or overwrites an existing file. All
+ * bytes in the byte array are written to the file.
+ *
+ * @param fs the file system with which to create the file
+ * @param path the path to the file
+ * @param bytes the byte array with the bytes to write
+ *
+ * @return the file system
+ *
+ * @throws NullPointerException if any of the arguments are {@code null}
+ * @throws IOException if an I/O error occurs creating or writing to the file
+ */
+ public static FileSystem write(final FileSystem fs, final Path path,
+ final byte[] bytes) throws IOException {
+
+ Objects.requireNonNull(path);
+ Objects.requireNonNull(bytes);
+
+ try (FSDataOutputStream out = fs.createFile(path).overwrite(true).build()) {
+ out.write(bytes);
+ }
+
+ return fs;
+ }
+
+ /**
+ * Writes bytes to a file. This utility method opens the file for writing,
+ * creating the file if it does not exist, or overwrites an existing file. All
+ * bytes in the byte array are written to the file.
+ *
+ * @param fileContext the file context with which to create the file
+ * @param path the path to the file
+ * @param bytes the byte array with the bytes to write
+ *
+ * @return the file context
+ *
+ * @throws NullPointerException if any of the arguments are {@code null}
+ * @throws IOException if an I/O error occurs creating or writing to the file
+ */
+ public static FileContext write(final FileContext fileContext,
+ final Path path, final byte[] bytes) throws IOException {
+
+ Objects.requireNonNull(path);
+ Objects.requireNonNull(bytes);
+
+ try (FSDataOutputStream out =
+ fileContext.create(path).overwrite(true).build()) {
+ out.write(bytes);
+ }
+
+ return fileContext;
+ }
+
+ /**
+ * Write lines of text to a file. Each line is a char sequence and is written
+ * to the file in sequence with each line terminated by the platform's line
+ * separator, as defined by the system property {@code
+ * line.separator}. Characters are encoded into bytes using the specified
+ * charset. This utility method opens the file for writing, creating the file
+ * if it does not exist, or overwrites an existing file.
+ *
+ * @param fs the file system with which to create the file
+ * @param path the path to the file
+ * @param lines a Collection to iterate over the char sequences
+ * @param cs the charset to use for encoding
+ *
+ * @return the file system
+ *
+ * @throws NullPointerException if any of the arguments are {@code null}
+ * @throws IOException if an I/O error occurs creating or writing to the file
+ */
+ public static FileSystem write(final FileSystem fs, final Path path,
+ final Iterable extends CharSequence> lines, final Charset cs)
+ throws IOException {
+
+ Objects.requireNonNull(path);
+ Objects.requireNonNull(lines);
+ Objects.requireNonNull(cs);
+
+ CharsetEncoder encoder = cs.newEncoder();
+ try (FSDataOutputStream out = fs.createFile(path).overwrite(true).build();
+ BufferedWriter writer =
+ new BufferedWriter(new OutputStreamWriter(out, encoder))) {
+ for (CharSequence line : lines) {
+ writer.append(line);
+ writer.newLine();
+ }
+ }
+ return fs;
+ }
+
+ /**
+ * Write lines of text to a file. Each line is a char sequence and is written
+ * to the file in sequence with each line terminated by the platform's line
+ * separator, as defined by the system property {@code
+ * line.separator}. Characters are encoded into bytes using the specified
+ * charset. This utility method opens the file for writing, creating the file
+ * if it does not exist, or overwrites an existing file.
+ *
+ * @param fileContext the file context with which to create the file
+ * @param path the path to the file
+ * @param lines a Collection to iterate over the char sequences
+ * @param cs the charset to use for encoding
+ *
+ * @return the file context
+ *
+ * @throws NullPointerException if any of the arguments are {@code null}
+ * @throws IOException if an I/O error occurs creating or writing to the file
+ */
+ public static FileContext write(final FileContext fileContext,
+ final Path path, final Iterable extends CharSequence> lines,
+ final Charset cs) throws IOException {
+
+ Objects.requireNonNull(path);
+ Objects.requireNonNull(lines);
+ Objects.requireNonNull(cs);
+
+ CharsetEncoder encoder = cs.newEncoder();
+ try (FSDataOutputStream out = fileContext.create(path).overwrite(true).build();
+ BufferedWriter writer =
+ new BufferedWriter(new OutputStreamWriter(out, encoder))) {
+ for (CharSequence line : lines) {
+ writer.append(line);
+ writer.newLine();
+ }
+ }
+ return fileContext;
+ }
+
+ /**
+ * Write a line of text to a file. Characters are encoded into bytes using the
+ * specified charset. This utility method opens the file for writing, creating
+ * the file if it does not exist, or overwrites an existing file.
+ *
+ * @param fs the file system with which to create the file
+ * @param path the path to the file
+ * @param charseq the char sequence to write to the file
+ * @param cs the charset to use for encoding
+ *
+ * @return the file system
+ *
+ * @throws NullPointerException if any of the arguments are {@code null}
+ * @throws IOException if an I/O error occurs creating or writing to the file
+ */
+ public static FileSystem write(final FileSystem fs, final Path path,
+ final CharSequence charseq, final Charset cs) throws IOException {
+
+ Objects.requireNonNull(path);
+ Objects.requireNonNull(charseq);
+ Objects.requireNonNull(cs);
+
+ CharsetEncoder encoder = cs.newEncoder();
+ try (FSDataOutputStream out = fs.createFile(path).overwrite(true).build();
+ BufferedWriter writer =
+ new BufferedWriter(new OutputStreamWriter(out, encoder))) {
+ writer.append(charseq);
+ }
+ return fs;
+ }
+
+ /**
+ * Write a line of text to a file. Characters are encoded into bytes using the
+ * specified charset. This utility method opens the file for writing, creating
+ * the file if it does not exist, or overwrites an existing file.
+ *
+ * @param FileContext the file context with which to create the file
+ * @param path the path to the file
+ * @param charseq the char sequence to write to the file
+ * @param cs the charset to use for encoding
+ *
+ * @return the file context
+ *
+ * @throws NullPointerException if any of the arguments are {@code null}
+ * @throws IOException if an I/O error occurs creating or writing to the file
+ */
+ public static FileContext write(final FileContext fs, final Path path,
+ final CharSequence charseq, final Charset cs) throws IOException {
+
+ Objects.requireNonNull(path);
+ Objects.requireNonNull(charseq);
+ Objects.requireNonNull(cs);
+
+ CharsetEncoder encoder = cs.newEncoder();
+ try (FSDataOutputStream out = fs.create(path).overwrite(true).build();
+ BufferedWriter writer =
+ new BufferedWriter(new OutputStreamWriter(out, encoder))) {
+ writer.append(charseq);
+ }
+ return fs;
+ }
+
+ /**
+ * Write a line of text to a file. Characters are encoded into bytes using
+ * UTF-8. This utility method opens the file for writing, creating the file if
+ * it does not exist, or overwrites an existing file.
+ *
+ * @param fs the files system with which to create the file
+ * @param path the path to the file
+ * @param charseq the char sequence to write to the file
+ *
+ * @return the file system
+ *
+ * @throws NullPointerException if any of the arguments are {@code null}
+ * @throws IOException if an I/O error occurs creating or writing to the file
+ */
+ public static FileSystem write(final FileSystem fs, final Path path,
+ final CharSequence charseq) throws IOException {
+ return write(fs, path, charseq, StandardCharsets.UTF_8);
+ }
+
+ /**
+ * Write a line of text to a file. Characters are encoded into bytes using
+ * UTF-8. This utility method opens the file for writing, creating the file if
+ * it does not exist, or overwrites an existing file.
+ *
+ * @param fileContext the files system with which to create the file
+ * @param path the path to the file
+ * @param charseq the char sequence to write to the file
+ *
+ * @return the file context
+ *
+ * @throws NullPointerException if any of the arguments are {@code null}
+ * @throws IOException if an I/O error occurs creating or writing to the file
+ */
+ public static FileContext write(final FileContext fileContext,
+ final Path path, final CharSequence charseq) throws IOException {
+ return write(fileContext, path, charseq, StandardCharsets.UTF_8);
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index 3bc3cb2e9b07a..cf12ea3898a7f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -25,12 +25,12 @@
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
-import java.util.Set;
import java.util.concurrent.CompletableFuture;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.impl.OpenFileParameters;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
@@ -714,20 +714,15 @@ public FutureDataInputStreamBuilder openFile(final PathHandle pathHandle)
@Override
protected CompletableFuture openFileWithOptions(
final Path path,
- final Set mandatoryKeys,
- final Configuration options,
- final int bufferSize) throws IOException {
- return fs.openFileWithOptions(path, mandatoryKeys, options, bufferSize);
+ final OpenFileParameters parameters) throws IOException {
+ return fs.openFileWithOptions(path, parameters);
}
@Override
protected CompletableFuture openFileWithOptions(
final PathHandle pathHandle,
- final Set mandatoryKeys,
- final Configuration options,
- final int bufferSize) throws IOException {
- return fs.openFileWithOptions(pathHandle, mandatoryKeys, options,
- bufferSize);
+ final OpenFileParameters parameters) throws IOException {
+ return fs.openFileWithOptions(pathHandle, parameters);
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
index 731a52a7b4137..e197506edc88b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
@@ -26,13 +26,12 @@
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
-import java.util.Set;
import java.util.concurrent.CompletableFuture;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem.Statistics;
+import org.apache.hadoop.fs.impl.OpenFileParameters;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
@@ -440,10 +439,8 @@ public Collection extends BlockStoragePolicySpi> getAllStoragePolicies()
@Override
public CompletableFuture openFileWithOptions(
final Path path,
- final Set mandatoryKeys,
- final Configuration options,
- final int bufferSize) throws IOException {
- return myFs.openFileWithOptions(path, mandatoryKeys, options, bufferSize);
+ final OpenFileParameters parameters) throws IOException {
+ return myFs.openFileWithOptions(path, parameters);
}
public boolean hasPathCapability(final Path path,
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java
index cfef1c3827917..07c16b22358c1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java
@@ -42,4 +42,6 @@ public interface FsConstants {
*/
public static final URI VIEWFS_URI = URI.create("viewfs:///");
public static final String VIEWFS_SCHEME = "viewfs";
+ String FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN =
+ "fs.viewfs.overload.scheme.target.%s.impl";
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FutureDataInputStreamBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FutureDataInputStreamBuilder.java
index 774d30927df2c..27a522e593001 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FutureDataInputStreamBuilder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FutureDataInputStreamBuilder.java
@@ -47,4 +47,15 @@ public interface FutureDataInputStreamBuilder
CompletableFuture build()
throws IllegalArgumentException, UnsupportedOperationException,
IOException;
+
+ /**
+ * A FileStatus may be provided to the open request.
+ * It is up to the implementation whether to use this or not.
+ * @param status status.
+ * @return the builder.
+ */
+ default FutureDataInputStreamBuilder withFileStatus(FileStatus status) {
+ return this;
+ }
+
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java
index a6b37b32bb564..1f0a06d7dd98f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemPathHandle.java
@@ -16,7 +16,7 @@
*/
package org.apache.hadoop.fs;
-import com.google.protobuf.ByteString;
+import org.apache.hadoop.thirdparty.protobuf.ByteString;
import org.apache.hadoop.fs.FSProtos.LocalFileSystemPathHandleProto;
import java.io.IOException;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java
new file mode 100644
index 0000000000000..043f84612dc8b
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang3.builder.ToStringBuilder;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ipc.RemoteException;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * A partial listing of the children of a parent directory. Since it is a
+ * partial listing, multiple PartialListing may need to be combined to obtain
+ * the full listing of a parent directory.
+ *
+ * ListingBatch behaves similar to a Future, in that getting the result via
+ * {@link #get()} will throw an Exception if there was a failure.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class PartialListing {
+ private final Path listedPath;
+ private final List partialListing;
+ private final RemoteException exception;
+
+ public PartialListing(Path listedPath, List partialListing) {
+ this(listedPath, partialListing, null);
+ }
+
+ public PartialListing(Path listedPath, RemoteException exception) {
+ this(listedPath, null, exception);
+ }
+
+ private PartialListing(Path listedPath, List partialListing,
+ RemoteException exception) {
+ Preconditions.checkArgument(partialListing == null ^ exception == null);
+ this.partialListing = partialListing;
+ this.listedPath = listedPath;
+ this.exception = exception;
+ }
+
+ /**
+ * Partial listing of the path being listed. In the case where the path is
+ * a file. The list will be a singleton with the file itself.
+ *
+ * @return Partial listing of the path being listed.
+ * @throws IOException if there was an exception getting the listing.
+ */
+ public List get() throws IOException {
+ if (exception != null) {
+ throw exception.unwrapRemoteException();
+ }
+ return partialListing;
+ }
+
+ /**
+ * Path being listed.
+ *
+ * @return the path being listed.
+ */
+ public Path getListedPath() {
+ return listedPath;
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringBuilder(this)
+ .append("listedPath", listedPath)
+ .append("partialListing", partialListing)
+ .append("exception", exception)
+ .toString();
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
index 3472362dc4792..11cc93401748e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
@@ -40,14 +40,13 @@ public class QuotaUsage {
/** Builder class for QuotaUsage. */
public static class Builder {
public Builder() {
- this.quota = -1;
- this.spaceQuota = -1;
+ this.quota = -1L;
+ this.spaceQuota = -1L;
typeConsumed = new long[StorageType.values().length];
typeQuota = new long[StorageType.values().length];
- for (int i = 0; i < typeQuota.length; i++) {
- typeQuota[i] = -1;
- }
+
+ Arrays.fill(typeQuota, -1L);
}
public Builder fileAndDirectoryCount(long count) {
@@ -71,9 +70,8 @@ public Builder spaceQuota(long spaceQuota) {
}
public Builder typeConsumed(long[] typeConsumed) {
- for (int i = 0; i < typeConsumed.length; i++) {
- this.typeConsumed[i] = typeConsumed[i];
- }
+ System.arraycopy(typeConsumed, 0, this.typeConsumed, 0,
+ typeConsumed.length);
return this;
}
@@ -88,9 +86,7 @@ public Builder typeConsumed(StorageType type, long consumed) {
}
public Builder typeQuota(long[] typeQuota) {
- for (int i = 0; i < typeQuota.length; i++) {
- this.typeQuota[i] = typeQuota[i];
- }
+ System.arraycopy(typeQuota, 0, this.typeQuota, 0, typeQuota.length);
return this;
}
@@ -153,32 +149,21 @@ public long getSpaceQuota() {
/** Return storage type quota. */
public long getTypeQuota(StorageType type) {
- return (typeQuota != null) ? typeQuota[type.ordinal()] : -1;
+ return (typeQuota != null) ? typeQuota[type.ordinal()] : -1L;
}
/** Return storage type consumed. */
public long getTypeConsumed(StorageType type) {
- return (typeConsumed != null) ? typeConsumed[type.ordinal()] : 0;
- }
-
- /** Return storage type quota. */
- private long[] getTypesQuota() {
- return typeQuota;
- }
-
- /** Return storage type quota. */
- private long[] getTypesConsumed() {
- return typeConsumed;
+ return (typeConsumed != null) ? typeConsumed[type.ordinal()] : 0L;
}
/** Return true if any storage type quota has been set. */
public boolean isTypeQuotaSet() {
- if (typeQuota == null) {
- return false;
- }
- for (StorageType t : StorageType.getTypesSupportingQuota()) {
- if (typeQuota[t.ordinal()] > 0) {
- return true;
+ if (typeQuota != null) {
+ for (StorageType t : StorageType.getTypesSupportingQuota()) {
+ if (typeQuota[t.ordinal()] > 0L) {
+ return true;
+ }
}
}
return false;
@@ -186,45 +171,58 @@ public boolean isTypeQuotaSet() {
/** Return true if any storage type consumption information is available. */
public boolean isTypeConsumedAvailable() {
- if (typeConsumed == null) {
- return false;
- }
- for (StorageType t : StorageType.getTypesSupportingQuota()) {
- if (typeConsumed[t.ordinal()] > 0) {
- return true;
+ if (typeConsumed != null) {
+ for (StorageType t : StorageType.getTypesSupportingQuota()) {
+ if (typeConsumed[t.ordinal()] > 0L) {
+ return true;
+ }
}
}
return false;
}
@Override
- public boolean equals(Object to) {
- return (this == to || (to instanceof QuotaUsage &&
- getFileAndDirectoryCount() ==
- ((QuotaUsage) to).getFileAndDirectoryCount() &&
- getQuota() == ((QuotaUsage) to).getQuota() &&
- getSpaceConsumed() == ((QuotaUsage) to).getSpaceConsumed() &&
- getSpaceQuota() == ((QuotaUsage) to).getSpaceQuota() &&
- Arrays.equals(getTypesQuota(), ((QuotaUsage) to).getTypesQuota()) &&
- Arrays.equals(getTypesConsumed(),
- ((QuotaUsage) to).getTypesConsumed())));
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result
+ + (int) (fileAndDirectoryCount ^ (fileAndDirectoryCount >>> 32));
+ result = prime * result + (int) (quota ^ (quota >>> 32));
+ result = prime * result + (int) (spaceConsumed ^ (spaceConsumed >>> 32));
+ result = prime * result + (int) (spaceQuota ^ (spaceQuota >>> 32));
+ result = prime * result + Arrays.hashCode(typeConsumed);
+ result = prime * result + Arrays.hashCode(typeQuota);
+ return result;
}
@Override
- public int hashCode() {
- long result = (getFileAndDirectoryCount() ^ getQuota() ^
- getSpaceConsumed() ^ getSpaceQuota());
- if (getTypesQuota() != null) {
- for (long quota : getTypesQuota()) {
- result ^= quota;
- }
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
}
- if (getTypesConsumed() != null) {
- for (long consumed : getTypesConsumed()) {
- result ^= consumed;
- }
+ if (!(obj instanceof QuotaUsage)) {
+ return false;
+ }
+ QuotaUsage other = (QuotaUsage) obj;
+ if (fileAndDirectoryCount != other.fileAndDirectoryCount) {
+ return false;
+ }
+ if (quota != other.quota) {
+ return false;
+ }
+ if (spaceConsumed != other.spaceConsumed) {
+ return false;
+ }
+ if (spaceQuota != other.spaceQuota) {
+ return false;
+ }
+ if (!Arrays.equals(typeConsumed, other.typeConsumed)) {
+ return false;
+ }
+ if (!Arrays.equals(typeQuota, other.typeQuota)) {
+ return false;
}
- return (int)result;
+ return true;
}
/**
@@ -292,11 +290,11 @@ protected String getQuotaUsage(boolean hOption) {
String spaceQuotaStr = QUOTA_NONE;
String spaceQuotaRem = QUOTA_INF;
- if (quota > 0) {
+ if (quota > 0L) {
quotaStr = formatSize(quota, hOption);
quotaRem = formatSize(quota-fileAndDirectoryCount, hOption);
}
- if (spaceQuota >= 0) {
+ if (spaceQuota >= 0L) {
spaceQuotaStr = formatSize(spaceQuota, hOption);
spaceQuotaRem = formatSize(spaceQuota - spaceConsumed, hOption);
}
@@ -307,20 +305,20 @@ protected String getQuotaUsage(boolean hOption) {
protected String getTypesQuotaUsage(boolean hOption,
List types) {
- StringBuffer content = new StringBuffer();
+ StringBuilder content = new StringBuilder();
for (StorageType st : types) {
long typeQuota = getTypeQuota(st);
long typeConsumed = getTypeConsumed(st);
String quotaStr = QUOTA_NONE;
String quotaRem = QUOTA_INF;
- if (typeQuota >= 0) {
+ if (typeQuota >= 0L) {
quotaStr = formatSize(typeQuota, hOption);
quotaRem = formatSize(typeQuota - typeConsumed, hOption);
}
- content.append(String.format(STORAGE_TYPE_SUMMARY_FORMAT,
- quotaStr, quotaRem));
+ content.append(
+ String.format(STORAGE_TYPE_SUMMARY_FORMAT, quotaStr, quotaRem));
}
return content.toString();
}
@@ -332,7 +330,7 @@ protected String getTypesQuotaUsage(boolean hOption,
* @return storage header string
*/
public static String getStorageTypeHeader(List storageTypes) {
- StringBuffer header = new StringBuffer();
+ StringBuilder header = new StringBuilder();
for (StorageType st : storageTypes) {
/* the field length is 13/17 for quota and remain quota
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
index 4b144bfddf6c6..28db2c9a1a227 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
@@ -20,6 +20,7 @@
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
+import java.io.OutputStream;
import java.net.ConnectException;
import java.net.URI;
@@ -41,6 +42,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.Progressable;
import org.slf4j.Logger;
@@ -110,7 +112,9 @@ public void initialize(URI uri, Configuration conf) throws IOException { // get
// get port information from uri, (overrides info in conf)
int port = uri.getPort();
- port = (port == -1) ? FTP.DEFAULT_PORT : port;
+ if(port == -1){
+ port = conf.getInt(FS_FTP_HOST_PORT, FTP.DEFAULT_PORT);
+ }
conf.setInt(FS_FTP_HOST_PORT, port);
// get user/password information from URI (overrides info in conf)
@@ -340,8 +344,19 @@ public FSDataOutputStream create(Path file, FsPermission permission,
// file. The FTP client connection is closed when close() is called on the
// FSDataOutputStream.
client.changeWorkingDirectory(parent.toUri().getPath());
- FSDataOutputStream fos = new FSDataOutputStream(client.storeFileStream(file
- .getName()), statistics) {
+ OutputStream outputStream = client.storeFileStream(file.getName());
+
+ if (!FTPReply.isPositivePreliminary(client.getReplyCode())) {
+ // The ftpClient is an inconsistent state. Must close the stream
+ // which in turn will logout and disconnect from FTP server
+ if (outputStream != null) {
+ IOUtils.closeStream(outputStream);
+ }
+ disconnect(client);
+ throw new IOException("Unable to create file: " + file + ", Aborting");
+ }
+
+ FSDataOutputStream fos = new FSDataOutputStream(outputStream, statistics) {
@Override
public void close() throws IOException {
super.close();
@@ -356,12 +371,6 @@ public void close() throws IOException {
}
}
};
- if (!FTPReply.isPositivePreliminary(client.getReplyCode())) {
- // The ftpClient is an inconsistent state. Must close the stream
- // which in turn will logout and disconnect from FTP server
- fos.close();
- throw new IOException("Unable to create file: " + file + ", Aborting");
- }
return fos;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureDataInputStreamBuilderImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureDataInputStreamBuilderImpl.java
index 2aa4a5d95fcc7..24a8d49747fe6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureDataInputStreamBuilderImpl.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureDataInputStreamBuilderImpl.java
@@ -26,12 +26,13 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FutureDataInputStreamBuilder;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathHandle;
-import static com.google.common.base.Preconditions.checkNotNull;
+import static java.util.Objects.requireNonNull;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -60,6 +61,12 @@ public abstract class FutureDataInputStreamBuilderImpl
private int bufferSize;
+ /**
+ * File status passed in through a {@link #withFileStatus(FileStatus)}
+ * call; null otherwise.
+ */
+ private FileStatus status;
+
/**
* Construct from a {@link FileContext}.
*
@@ -69,8 +76,8 @@ public abstract class FutureDataInputStreamBuilderImpl
*/
protected FutureDataInputStreamBuilderImpl(@Nonnull FileContext fc,
@Nonnull Path path) throws IOException {
- super(checkNotNull(path));
- checkNotNull(fc);
+ super(requireNonNull(path, "path"));
+ requireNonNull(fc, "file context");
this.fileSystem = null;
bufferSize = IO_FILE_BUFFER_SIZE_DEFAULT;
}
@@ -82,8 +89,8 @@ protected FutureDataInputStreamBuilderImpl(@Nonnull FileContext fc,
*/
protected FutureDataInputStreamBuilderImpl(@Nonnull FileSystem fileSystem,
@Nonnull Path path) {
- super(checkNotNull(path));
- this.fileSystem = checkNotNull(fileSystem);
+ super(requireNonNull(path, "path"));
+ this.fileSystem = requireNonNull(fileSystem, "fileSystem");
initFromFS();
}
@@ -108,7 +115,7 @@ private void initFromFS() {
}
protected FileSystem getFS() {
- checkNotNull(fileSystem);
+ requireNonNull(fileSystem, "fileSystem");
return fileSystem;
}
@@ -138,4 +145,18 @@ public FutureDataInputStreamBuilder builder() {
public FutureDataInputStreamBuilder getThisBuilder() {
return this;
}
+
+ @Override
+ public FutureDataInputStreamBuilder withFileStatus(FileStatus st) {
+ this.status = requireNonNull(st, "status");
+ return this;
+ }
+
+ /**
+ * Get any status set in {@link #withFileStatus(FileStatus)}.
+ * @return a status value or null.
+ */
+ protected FileStatus getStatus() {
+ return status;
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/OpenFileParameters.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/OpenFileParameters.java
new file mode 100644
index 0000000000000..77b4ff52696a3
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/OpenFileParameters.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.impl;
+
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * All the parameters from the openFile builder for the
+ * {@code openFileWithOptions} commands.
+ *
+ * If/when new attributes added to the builder, this class will be extended.
+ */
+public class OpenFileParameters {
+
+ /**
+ * Set of options declared as mandatory.
+ */
+ private Set mandatoryKeys;
+
+ /**
+ * Options set during the build sequence.
+ */
+ private Configuration options;
+
+ /**
+ * Buffer size.
+ */
+ private int bufferSize;
+
+ /**
+ * Optional file status.
+ */
+ private FileStatus status;
+
+ public OpenFileParameters() {
+ }
+
+ public OpenFileParameters withMandatoryKeys(final Set keys) {
+ this.mandatoryKeys = requireNonNull(keys);
+ return this;
+ }
+
+ public OpenFileParameters withOptions(final Configuration opts) {
+ this.options = requireNonNull(opts);
+ return this;
+ }
+
+ public OpenFileParameters withBufferSize(final int size) {
+ this.bufferSize = size;
+ return this;
+ }
+
+ public OpenFileParameters withStatus(final FileStatus st) {
+ this.status = st;
+ return this;
+ }
+
+ public Set getMandatoryKeys() {
+ return mandatoryKeys;
+ }
+
+ public Configuration getOptions() {
+ return options;
+ }
+
+ public int getBufferSize() {
+ return bufferSize;
+ }
+
+ public FileStatus getStatus() {
+ return status;
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
index ed33357b51d2b..a91b50f2e9fa7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
@@ -19,7 +19,6 @@
import java.io.FileNotFoundException;
import java.io.IOException;
-import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.net.URLDecoder;
@@ -516,20 +515,21 @@ public FSDataInputStream open(Path f, int bufferSize) throws IOException {
disconnect(channel);
throw new IOException(String.format(E_PATH_DIR, f));
}
- InputStream is;
try {
// the path could be a symbolic link, so get the real path
absolute = new Path("/", channel.realpath(absolute.toUri().getPath()));
-
- is = channel.get(absolute.toUri().getPath());
} catch (SftpException e) {
throw new IOException(e);
}
- return new FSDataInputStream(new SFTPInputStream(is, statistics)){
+ return new FSDataInputStream(
+ new SFTPInputStream(channel, absolute, statistics)){
@Override
public void close() throws IOException {
- super.close();
- disconnect(channel);
+ try {
+ super.close();
+ } finally {
+ disconnect(channel);
+ }
}
};
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPInputStream.java
index 7af299bd113e1..d0f9a8d0887ca 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPInputStream.java
@@ -15,62 +15,107 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package org.apache.hadoop.fs.sftp;
+import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
+import com.jcraft.jsch.ChannelSftp;
+import com.jcraft.jsch.SftpATTRS;
+import com.jcraft.jsch.SftpException;
+
+import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
/** SFTP FileSystem input stream. */
class SFTPInputStream extends FSInputStream {
- public static final String E_SEEK_NOTSUPPORTED = "Seek not supported";
- public static final String E_NULL_INPUTSTREAM = "Null InputStream";
- public static final String E_STREAM_CLOSED = "Stream closed";
-
+ private final ChannelSftp channel;
+ private final Path path;
private InputStream wrappedStream;
private FileSystem.Statistics stats;
private boolean closed;
private long pos;
+ private long nextPos;
+ private long contentLength;
- SFTPInputStream(InputStream stream, FileSystem.Statistics stats) {
-
- if (stream == null) {
- throw new IllegalArgumentException(E_NULL_INPUTSTREAM);
+ SFTPInputStream(ChannelSftp channel, Path path, FileSystem.Statistics stats)
+ throws IOException {
+ try {
+ this.channel = channel;
+ this.path = path;
+ this.stats = stats;
+ this.wrappedStream = channel.get(path.toUri().getPath());
+ SftpATTRS stat = channel.lstat(path.toString());
+ this.contentLength = stat.getSize();
+ } catch (SftpException e) {
+ throw new IOException(e);
}
- this.wrappedStream = stream;
- this.stats = stats;
+ }
- this.pos = 0;
- this.closed = false;
+ @Override
+ public synchronized void seek(long position) throws IOException {
+ checkNotClosed();
+ if (position < 0) {
+ throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
+ }
+ nextPos = position;
}
@Override
- public void seek(long position) throws IOException {
- throw new IOException(E_SEEK_NOTSUPPORTED);
+ public synchronized int available() throws IOException {
+ checkNotClosed();
+ long remaining = contentLength - nextPos;
+ if (remaining > Integer.MAX_VALUE) {
+ return Integer.MAX_VALUE;
+ }
+ return (int) remaining;
+ }
+
+ private void seekInternal() throws IOException {
+ if (pos == nextPos) {
+ return;
+ }
+ if (nextPos > pos) {
+ long skipped = wrappedStream.skip(nextPos - pos);
+ pos = pos + skipped;
+ }
+ if (nextPos < pos) {
+ wrappedStream.close();
+ try {
+ wrappedStream = channel.get(path.toUri().getPath());
+ pos = wrappedStream.skip(nextPos);
+ } catch (SftpException e) {
+ throw new IOException(e);
+ }
+ }
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
- throw new IOException(E_SEEK_NOTSUPPORTED);
+ return false;
}
@Override
- public long getPos() throws IOException {
- return pos;
+ public synchronized long getPos() throws IOException {
+ return nextPos;
}
@Override
public synchronized int read() throws IOException {
- if (closed) {
- throw new IOException(E_STREAM_CLOSED);
+ checkNotClosed();
+ if (this.contentLength == 0 || (nextPos >= contentLength)) {
+ return -1;
}
-
+ seekInternal();
int byteRead = wrappedStream.read();
if (byteRead >= 0) {
pos++;
+ nextPos++;
}
if (stats != null & byteRead >= 0) {
stats.incrementBytesRead(1);
@@ -78,23 +123,6 @@ public synchronized int read() throws IOException {
return byteRead;
}
- public synchronized int read(byte[] buf, int off, int len)
- throws IOException {
- if (closed) {
- throw new IOException(E_STREAM_CLOSED);
- }
-
- int result = wrappedStream.read(buf, off, len);
- if (result > 0) {
- pos += result;
- }
- if (stats != null & result > 0) {
- stats.incrementBytesRead(result);
- }
-
- return result;
- }
-
public synchronized void close() throws IOException {
if (closed) {
return;
@@ -103,4 +131,12 @@ public synchronized void close() throws IOException {
wrappedStream.close();
closed = true;
}
+
+ private void checkNotClosed() throws IOException {
+ if (closed) {
+ throw new IOException(
+ path.toUri() + ": " + FSExceptionMessages.STREAM_IS_CLOSED
+ );
+ }
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
index 0802a00b01bc8..ca9961aeb65a0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
@@ -415,7 +415,6 @@ protected void copyStreamToTarget(InputStream in, PathData target)
targetFs.setWriteChecksum(writeChecksum);
targetFs.writeStreamToFile(in, tempTarget, lazyPersist, direct);
if (!direct) {
- targetFs.deleteOnExit(tempTarget.path);
targetFs.rename(tempTarget, target);
}
} finally {
@@ -491,25 +490,18 @@ void writeStreamToFile(InputStream in, PathData target,
throws IOException {
FSDataOutputStream out = null;
try {
- out = create(target, lazyPersist, direct);
+ out = create(target, lazyPersist);
IOUtils.copyBytes(in, out, getConf(), true);
- } catch (IOException e) {
- // failure: clean up if we got as far as creating the file
- if (!direct && out != null) {
- try {
- fs.delete(target.path, false);
- } catch (IOException ignored) {
- }
- }
- throw e;
} finally {
+ if (!direct) {
+ deleteOnExit(target.path);
+ }
IOUtils.closeStream(out); // just in case copyBytes didn't
}
}
// tag created files as temp files
- FSDataOutputStream create(PathData item, boolean lazyPersist,
- boolean direct)
+ FSDataOutputStream create(PathData item, boolean lazyPersist)
throws IOException {
if (lazyPersist) {
long defaultBlockSize;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
index 4622c75fbd410..39958a9cb1c9d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
@@ -239,26 +239,35 @@ protected void processOptions(LinkedList args)
* Copy local files to a remote filesystem
*/
public static class Put extends CommandWithDestination {
+ private ThreadPoolExecutor executor = null;
+ private int numThreads = 1;
+
+ private static final int MAX_THREADS =
+ Runtime.getRuntime().availableProcessors() * 2;
+
public static final String NAME = "put";
public static final String USAGE =
- "[-f] [-p] [-l] [-d] ... ";
+ "[-f] [-p] [-l] [-d] [-t ] ... ";
public static final String DESCRIPTION =
- "Copy files from the local file system " +
- "into fs. Copying fails if the file already " +
- "exists, unless the -f flag is given.\n" +
- "Flags:\n" +
- " -p : Preserves access and modification times, ownership and the mode.\n" +
- " -f : Overwrites the destination if it already exists.\n" +
- " -l : Allow DataNode to lazily persist the file to disk. Forces\n" +
- " replication factor of 1. This flag will result in reduced\n" +
- " durability. Use with care.\n" +
+ "Copy files from the local file system " +
+ "into fs. Copying fails if the file already " +
+ "exists, unless the -f flag is given.\n" +
+ "Flags:\n" +
+ " -p : Preserves timestamps, ownership and the mode.\n" +
+ " -f : Overwrites the destination if it already exists.\n" +
+ " -t : Number of threads to be used, default is 1.\n" +
+ " -l : Allow DataNode to lazily persist the file to disk. Forces" +
+ " replication factor of 1. This flag will result in reduced" +
+ " durability. Use with care.\n" +
" -d : Skip creation of temporary file(._COPYING_).\n";
@Override
protected void processOptions(LinkedList args) throws IOException {
CommandFormat cf =
new CommandFormat(1, Integer.MAX_VALUE, "f", "p", "l", "d");
+ cf.addOptionWithValue("t");
cf.parse(args);
+ setNumberThreads(cf.getOptValue("t"));
setOverwrite(cf.getOpt("f"));
setPreserve(cf.getOpt("p"));
setLazyPersist(cf.getOpt("l"));
@@ -288,32 +297,22 @@ protected void processArguments(LinkedList args)
copyStreamToTarget(System.in, getTargetPath(args.get(0)));
return;
}
- super.processArguments(args);
- }
- }
- public static class CopyFromLocal extends Put {
- private ThreadPoolExecutor executor = null;
- private int numThreads = 1;
+ executor = new ThreadPoolExecutor(numThreads, numThreads, 1,
+ TimeUnit.SECONDS, new ArrayBlockingQueue<>(1024),
+ new ThreadPoolExecutor.CallerRunsPolicy());
+ super.processArguments(args);
- private static final int MAX_THREADS =
- Runtime.getRuntime().availableProcessors() * 2;
- public static final String NAME = "copyFromLocal";
- public static final String USAGE =
- "[-f] [-p] [-l] [-d] [-t ] ... ";
- public static final String DESCRIPTION =
- "Copy files from the local file system " +
- "into fs. Copying fails if the file already " +
- "exists, unless the -f flag is given.\n" +
- "Flags:\n" +
- " -p : Preserves access and modification times, ownership and the" +
- " mode.\n" +
- " -f : Overwrites the destination if it already exists.\n" +
- " -t : Number of threads to be used, default is 1.\n" +
- " -l : Allow DataNode to lazily persist the file to disk. Forces" +
- " replication factor of 1. This flag will result in reduced" +
- " durability. Use with care.\n" +
- " -d : Skip creation of temporary file(._COPYING_).\n";
+ // issue the command and then wait for it to finish
+ executor.shutdown();
+ try {
+ executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES);
+ } catch (InterruptedException e) {
+ executor.shutdownNow();
+ displayError(e);
+ Thread.currentThread().interrupt();
+ }
+ }
private void setNumberThreads(String numberThreadsString) {
if (numberThreadsString == null) {
@@ -330,22 +329,6 @@ private void setNumberThreads(String numberThreadsString) {
}
}
- @Override
- protected void processOptions(LinkedList args) throws IOException {
- CommandFormat cf =
- new CommandFormat(1, Integer.MAX_VALUE, "f", "p", "l", "d");
- cf.addOptionWithValue("t");
- cf.parse(args);
- setNumberThreads(cf.getOptValue("t"));
- setOverwrite(cf.getOpt("f"));
- setPreserve(cf.getOpt("p"));
- setLazyPersist(cf.getOpt("l"));
- setDirectWrite(cf.getOpt("d"));
- getRemoteDestination(args);
- // should have a -r option
- setRecursive(true);
- }
-
private void copyFile(PathData src, PathData target) throws IOException {
if (isPathRecursable(src)) {
throw new PathIsDirectoryException(src.toString());
@@ -372,25 +355,6 @@ protected void copyFileToTarget(PathData src, PathData target)
executor.submit(task);
}
- @Override
- protected void processArguments(LinkedList args)
- throws IOException {
- executor = new ThreadPoolExecutor(numThreads, numThreads, 1,
- TimeUnit.SECONDS, new ArrayBlockingQueue<>(1024),
- new ThreadPoolExecutor.CallerRunsPolicy());
- super.processArguments(args);
-
- // issue the command and then wait for it to finish
- executor.shutdown();
- try {
- executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES);
- } catch (InterruptedException e) {
- executor.shutdownNow();
- displayError(e);
- Thread.currentThread().interrupt();
- }
- }
-
@VisibleForTesting
public int getNumThreads() {
return numThreads;
@@ -401,6 +365,12 @@ public ThreadPoolExecutor getExecutor() {
return executor;
}
}
+
+ public static class CopyFromLocal extends Put {
+ public static final String NAME = "copyFromLocal";
+ public static final String USAGE = Put.USAGE;
+ public static final String DESCRIPTION = "Identical to the -put command.";
+ }
public static class CopyToLocal extends Get {
public static final String NAME = "copyToLocal";
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
index 22d8be53e97a6..ab7e1951bcd3f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
@@ -56,13 +56,14 @@ public static void registerCommands(CommandFactory factory) {
//return the quota, namespace count and disk space usage.
private static final String OPTION_QUOTA_AND_USAGE = "u";
private static final String OPTION_ECPOLICY = "e";
+ private static final String OPTION_SNAPSHOT_COUNT = "s";
public static final String NAME = "count";
public static final String USAGE =
"[-" + OPTION_QUOTA + "] [-" + OPTION_HUMAN + "] [-" + OPTION_HEADER
+ "] [-" + OPTION_TYPE + " []] [-" +
OPTION_QUOTA_AND_USAGE + "] [-" + OPTION_EXCLUDE_SNAPSHOT
- + "] [-" + OPTION_ECPOLICY
+ + "] [-" + OPTION_ECPOLICY + "] [-" + OPTION_SNAPSHOT_COUNT
+ "] ...";
public static final String DESCRIPTION =
"Count the number of directories, files and bytes under the paths\n" +
@@ -93,7 +94,8 @@ public static void registerCommands(CommandFactory factory) {
"the storage types.\n" +
"The -" + OPTION_QUOTA_AND_USAGE + " option shows the quota and \n" +
"the usage against the quota without the detailed content summary."+
- "The -"+ OPTION_ECPOLICY +" option shows the erasure coding policy.";
+ "The -" + OPTION_ECPOLICY + " option shows the erasure coding policy."
+ + "The -" + OPTION_SNAPSHOT_COUNT + " option shows snapshot counts.";
private boolean showQuotas;
private boolean humanReadable;
@@ -102,6 +104,7 @@ public static void registerCommands(CommandFactory factory) {
private boolean showQuotasAndUsageOnly;
private boolean excludeSnapshots;
private boolean displayECPolicy;
+ private boolean showSnapshot;
/** Constructor */
public Count() {}
@@ -123,7 +126,7 @@ protected void processOptions(LinkedList args) {
CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE,
OPTION_QUOTA, OPTION_HUMAN, OPTION_HEADER, OPTION_QUOTA_AND_USAGE,
OPTION_EXCLUDE_SNAPSHOT,
- OPTION_ECPOLICY);
+ OPTION_ECPOLICY, OPTION_SNAPSHOT_COUNT);
cf.addOptionWithValue(OPTION_TYPE);
cf.parse(args);
if (args.isEmpty()) { // default path is the current working directory
@@ -134,6 +137,7 @@ protected void processOptions(LinkedList args) {
showQuotasAndUsageOnly = cf.getOpt(OPTION_QUOTA_AND_USAGE);
excludeSnapshots = cf.getOpt(OPTION_EXCLUDE_SNAPSHOT);
displayECPolicy = cf.getOpt(OPTION_ECPOLICY);
+ showSnapshot = cf.getOpt(OPTION_SNAPSHOT_COUNT);
if (showQuotas || showQuotasAndUsageOnly) {
String types = cf.getOptValue(OPTION_TYPE);
@@ -165,6 +169,9 @@ protected void processOptions(LinkedList args) {
if(displayECPolicy){
headString.append("ERASURECODING_POLICY ");
}
+ if (showSnapshot) {
+ headString.append(ContentSummary.getSnapshotHeader());
+ }
headString.append("PATHNAME");
out.println(headString.toString());
}
@@ -205,6 +212,10 @@ protected void processPath(PathData src) throws IOException {
outputString.append(summary.getErasureCodingPolicy())
.append(" ");
}
+ if (showSnapshot) {
+ ContentSummary summary = src.fs.getContentSummary(src.path);
+ outputString.append(summary.toSnapshot(isHumanReadable()));
+ }
outputString.append(src);
out.println(outputString.toString());
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
index 784bbf33f7826..ea8378dc4551b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
@@ -108,7 +108,7 @@ protected void processRawArguments(LinkedList args)
HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY,
HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_DEFAULT);
if (displayWarnings) {
- final String defaultFs = getConf().get(FS_DEFAULT_NAME_KEY);
+ final String defaultFs = getConf().getTrimmed(FS_DEFAULT_NAME_KEY);
final boolean missingDefaultFs =
defaultFs == null || defaultFs.equals(FS_DEFAULT_NAME_DEFAULT);
if (missingDefaultFs) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
index 5ef42775ea58b..c20293e1a5adb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
@@ -25,7 +25,7 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.PathIOException;
import org.apache.hadoop.fs.PathExistsException;
-import org.apache.hadoop.fs.shell.CopyCommands.Put;
+import org.apache.hadoop.fs.shell.CopyCommands.CopyFromLocal;
/** Various commands for moving files */
@InterfaceAudience.Private
@@ -41,12 +41,22 @@ public static void registerCommands(CommandFactory factory) {
/**
* Move local files to a remote filesystem
*/
- public static class MoveFromLocal extends Put {
+ public static class MoveFromLocal extends CopyFromLocal {
public static final String NAME = "moveFromLocal";
- public static final String USAGE = " ... ";
+ public static final String USAGE =
+ "[-f] [-p] [-l] [-d] ... ";
public static final String DESCRIPTION =
- "Same as -put, except that the source is " +
- "deleted after it's copied.";
+ "Same as -put, except that the source is " +
+ "deleted after it's copied\n" +
+ "and -t option has not yet implemented.";
+
+ @Override
+ protected void processOptions(LinkedList args) throws IOException {
+ if(args.contains("-t")) {
+ throw new CommandFormat.UnknownOptionException("-t");
+ }
+ super.processOptions(args);
+ }
@Override
protected void processPath(PathData src, PathData target) throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
index 4c3dae9a9f99b..6dd1f6589478e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
@@ -135,6 +135,17 @@ public static void addLinkMerge(Configuration conf, final URI[] targets) {
addLinkMerge(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, targets);
}
+ /**
+ * Add nfly link to configuration for the given mount table.
+ */
+ public static void addLinkNfly(Configuration conf, String mountTableName,
+ String src, String settings, final String targets) {
+ conf.set(
+ getConfigViewFsPrefix(mountTableName) + "."
+ + Constants.CONFIG_VIEWFS_LINK_NFLY + "." + settings + "." + src,
+ targets);
+ }
+
/**
*
* @param conf
@@ -149,9 +160,7 @@ public static void addLinkNfly(Configuration conf, String mountTableName,
settings = settings == null
? "minReplication=2,repairOnRead=true"
: settings;
-
- conf.set(getConfigViewFsPrefix(mountTableName) + "." +
- Constants.CONFIG_VIEWFS_LINK_NFLY + "." + settings + "." + src,
+ addLinkNfly(conf, mountTableName, src, settings,
StringUtils.uriToString(targets));
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
index 37f1a16800e7d..0a5d4b46ce2d8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
@@ -30,6 +30,11 @@ public interface Constants {
* Prefix for the config variable prefix for the ViewFs mount-table
*/
public static final String CONFIG_VIEWFS_PREFIX = "fs.viewfs.mounttable";
+
+ /**
+ * Prefix for the config variable for the ViewFs mount-table path.
+ */
+ String CONFIG_VIEWFS_MOUNTTABLE_PATH = CONFIG_VIEWFS_PREFIX + ".path";
/**
* Prefix for the home dir for the mount table - if not specified
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/FsGetter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/FsGetter.java
new file mode 100644
index 0000000000000..071af11e63bf2
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/FsGetter.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+
+/**
+ * File system instance getter.
+ */
+@Private
+class FsGetter {
+
+ /**
+ * Gets new file system instance of given uri.
+ */
+ public FileSystem getNewInstance(URI uri, Configuration conf)
+ throws IOException {
+ return FileSystem.newInstance(uri, conf);
+ }
+
+ /**
+ * Gets file system instance of given uri.
+ */
+ public FileSystem get(URI uri, Configuration conf) throws IOException {
+ return FileSystem.get(uri, conf);
+ }
+}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/HCFSMountTableConfigLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/HCFSMountTableConfigLoader.java
new file mode 100644
index 0000000000000..3968e3650cf39
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/HCFSMountTableConfigLoader.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * An implementation for Apache Hadoop compatible file system based mount-table
+ * file loading.
+ */
+public class HCFSMountTableConfigLoader implements MountTableConfigLoader {
+ private static final String REGEX_DOT = "[.]";
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(HCFSMountTableConfigLoader.class);
+ private Path mountTable = null;
+
+ /**
+ * Loads the mount-table configuration from hadoop compatible file system and
+ * add the configuration items to given configuration. Mount-table
+ * configuration format should be suffixed with version number.
+ * Format: mount-table..xml
+ * Example: mount-table.1.xml
+ * When user wants to update mount-table, the expectation is to upload new
+ * mount-table configuration file with monotonically increasing integer as
+ * version number. This API loads the highest version number file. We can
+ * also configure single file path directly.
+ *
+ * @param mountTableConfigPath : A directory path where mount-table files
+ * stored or a mount-table file path. We recommend to configure
+ * directory with the mount-table version files.
+ * @param conf : to add the mount table as resource.
+ */
+ @Override
+ public void load(String mountTableConfigPath, Configuration conf)
+ throws IOException {
+ this.mountTable = new Path(mountTableConfigPath);
+ String scheme = mountTable.toUri().getScheme();
+ FsGetter fsGetter = new ViewFileSystemOverloadScheme.ChildFsGetter(scheme);
+ try (FileSystem fs = fsGetter.getNewInstance(mountTable.toUri(), conf)) {
+ RemoteIterator listFiles =
+ fs.listFiles(mountTable, false);
+ LocatedFileStatus lfs = null;
+ int higherVersion = -1;
+ while (listFiles.hasNext()) {
+ LocatedFileStatus curLfs = listFiles.next();
+ String cur = curLfs.getPath().getName();
+ String[] nameParts = cur.split(REGEX_DOT);
+ if (nameParts.length < 2) {
+ logInvalidFileNameFormat(cur);
+ continue; // invalid file name
+ }
+ int curVersion = higherVersion;
+ try {
+ curVersion = Integer.parseInt(nameParts[nameParts.length - 2]);
+ } catch (NumberFormatException nfe) {
+ logInvalidFileNameFormat(cur);
+ continue;
+ }
+
+ if (curVersion > higherVersion) {
+ higherVersion = curVersion;
+ lfs = curLfs;
+ }
+ }
+
+ if (lfs == null) {
+ // No valid mount table file found.
+ // TODO: Should we fail? Currently viewfs init will fail if no mount
+ // links anyway.
+ LOGGER.warn("No valid mount-table file exist at: {}. At least one "
+ + "mount-table file should present with the name format: "
+ + "mount-table..xml", mountTableConfigPath);
+ return;
+ }
+ // Latest version file.
+ Path latestVersionMountTable = lfs.getPath();
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("Loading the mount-table {} into configuration.",
+ latestVersionMountTable);
+ }
+ try (FSDataInputStream open = fs.open(latestVersionMountTable)) {
+ Configuration newConf = new Configuration(false);
+ newConf.addResource(open);
+ // This will add configuration props as resource, instead of stream
+ // itself. So, that stream can be closed now.
+ conf.addResource(newConf);
+ }
+ }
+ }
+
+ private void logInvalidFileNameFormat(String cur) {
+ LOGGER.warn("Invalid file name format for mount-table version file: {}. "
+ + "The valid file name format is mount-table-name..xml",
+ cur);
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
index 69923438ecc20..50c839b52b654 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
@@ -123,6 +123,7 @@ static class INodeDir extends INode {
private final Map> children = new HashMap<>();
private T internalDirFs = null; //filesystem of this internal directory
private boolean isRoot = false;
+ private INodeLink fallbackLink = null;
INodeDir(final String pathToNode, final UserGroupInformation aUgi) {
super(pathToNode, aUgi);
@@ -149,6 +150,17 @@ boolean isRoot() {
return isRoot;
}
+ INodeLink getFallbackLink() {
+ return fallbackLink;
+ }
+
+ void addFallbackLink(INodeLink link) throws IOException {
+ if (!isRoot) {
+ throw new IOException("Fallback link can only be added for root");
+ }
+ this.fallbackLink = link;
+ }
+
Map> getChildren() {
return Collections.unmodifiableMap(children);
}
@@ -580,6 +592,7 @@ protected InodeTree(final Configuration config, final String viewName)
}
}
rootFallbackLink = fallbackLink;
+ getRootDir().addFallbackLink(rootFallbackLink);
}
if (!gotMountTableEntry) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/MountTableConfigLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/MountTableConfigLoader.java
new file mode 100644
index 0000000000000..bc2c3ea93c58c
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/MountTableConfigLoader.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * An interface for loading mount-table configuration. This class can have more
+ * APIs like refreshing mount tables automatically etc.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface MountTableConfigLoader {
+
+ /**
+ * Loads the mount-table configuration into given configuration.
+ *
+ * @param mountTableConfigPath - Path of the mount table. It can be a file or
+ * a directory in the case of multiple versions of mount-table
+ * files(Recommended option).
+ * @param conf - Configuration object to add mount table.
+ */
+ void load(String mountTableConfigPath, Configuration conf)
+ throws IOException;
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NflyFSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NflyFSystem.java
index 53966b8afbfcc..85af68af31434 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NflyFSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NflyFSystem.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.fs.viewfs;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -59,7 +59,7 @@
*/
@Private
final class NflyFSystem extends FileSystem {
- private static final Log LOG = LogFactory.getLog(NflyFSystem.class);
+ private static final Logger LOG = LoggerFactory.getLogger(NflyFSystem.class);
private static final String NFLY_TMP_PREFIX = "_nfly_tmp_";
enum NflyKey {
@@ -212,6 +212,21 @@ private static String getRack(String rackString) {
*/
private NflyFSystem(URI[] uris, Configuration conf, int minReplication,
EnumSet nflyFlags) throws IOException {
+ this(uris, conf, minReplication, nflyFlags, null);
+ }
+
+ /**
+ * Creates a new Nfly instance.
+ *
+ * @param uris the list of uris in the mount point
+ * @param conf configuration object
+ * @param minReplication minimum copies to commit a write op
+ * @param nflyFlags modes such readMostRecent
+ * @param fsGetter to get the file system instance with the given uri
+ * @throws IOException
+ */
+ private NflyFSystem(URI[] uris, Configuration conf, int minReplication,
+ EnumSet nflyFlags, FsGetter fsGetter) throws IOException {
if (uris.length < minReplication) {
throw new IOException(minReplication + " < " + uris.length
+ ": Minimum replication < #destinations");
@@ -238,8 +253,14 @@ private NflyFSystem(URI[] uris, Configuration conf, int minReplication,
nodes = new NflyNode[uris.length];
final Iterator rackIter = rackStrings.iterator();
for (int i = 0; i < nodes.length; i++) {
- nodes[i] = new NflyNode(hostStrings.get(i), rackIter.next(), uris[i],
- conf);
+ if (fsGetter != null) {
+ nodes[i] = new NflyNode(hostStrings.get(i), rackIter.next(),
+ new ChRootedFileSystem(fsGetter.getNewInstance(uris[i], conf),
+ uris[i]));
+ } else {
+ nodes[i] =
+ new NflyNode(hostStrings.get(i), rackIter.next(), uris[i], conf);
+ }
}
// sort all the uri's by distance from myNode, the local file system will
// automatically be the the first one.
@@ -921,7 +942,7 @@ private static void processThrowable(NflyNode nflyNode, String op,
* @throws IOException
*/
static FileSystem createFileSystem(URI[] uris, Configuration conf,
- String settings) throws IOException {
+ String settings, FsGetter fsGetter) throws IOException {
// assert settings != null
int minRepl = DEFAULT_MIN_REPLICATION;
EnumSet nflyFlags = EnumSet.noneOf(NflyKey.class);
@@ -946,6 +967,6 @@ static FileSystem createFileSystem(URI[] uris, Configuration conf,
throw new IllegalArgumentException(nflyKey + ": Infeasible");
}
}
- return new NflyFSystem(uris, conf, minRepl, nflyFlags);
+ return new NflyFSystem(uris, conf, minRepl, nflyFlags, fsGetter);
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index faa374a39789b..4f02feeebec8b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -18,9 +18,9 @@
package org.apache.hadoop.fs.viewfs;
import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
-import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE;
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT;
+import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
import java.io.FileNotFoundException;
import java.io.IOException;
@@ -35,9 +35,9 @@
import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
-import java.util.Map.Entry;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -96,16 +96,28 @@ static AccessControlException readOnlyMountTable(final String operation,
return readOnlyMountTable(operation, p.toString());
}
+ /**
+ * Gets file system creator instance.
+ */
+ protected FsGetter fsGetter() {
+ return new FsGetter();
+ }
+
/**
* Caching children filesystems. HADOOP-15565.
*/
static class InnerCache {
private Map map = new HashMap<>();
+ private FsGetter fsCreator;
+
+ InnerCache(FsGetter fsCreator) {
+ this.fsCreator = fsCreator;
+ }
FileSystem get(URI uri, Configuration config) throws IOException {
Key key = new Key(uri);
if (map.get(key) == null) {
- FileSystem fs = FileSystem.newInstance(uri, config);
+ FileSystem fs = fsCreator.getNewInstance(uri, config);
map.put(key, fs);
return fs;
} else {
@@ -193,7 +205,7 @@ public URI[] getTargetFileSystemURIs() {
final long creationTime; // of the the mount table
final UserGroupInformation ugi; // the user/group of user who created mtable
- URI myUri;
+ private URI myUri;
private Path workingDir;
Configuration config;
InodeTree fsState; // the fs state; ie the mount table
@@ -255,13 +267,13 @@ public void initialize(final URI theUri, final Configuration conf)
config = conf;
enableInnerCache = config.getBoolean(CONFIG_VIEWFS_ENABLE_INNER_CACHE,
CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT);
- final InnerCache innerCache = new InnerCache();
+ FsGetter fsGetter = fsGetter();
+ final InnerCache innerCache = new InnerCache(fsGetter);
// Now build client side view (i.e. client side mount table) from config.
final String authority = theUri.getAuthority();
try {
- myUri = new URI(FsConstants.VIEWFS_SCHEME, authority, "/", null, null);
+ myUri = new URI(getScheme(), authority, "/", null, null);
fsState = new InodeTree(conf, authority) {
-
@Override
protected FileSystem getTargetFileSystem(final URI uri)
throws URISyntaxException, IOException {
@@ -269,7 +281,7 @@ protected FileSystem getTargetFileSystem(final URI uri)
if (enableInnerCache) {
fs = innerCache.get(uri, config);
} else {
- fs = FileSystem.get(uri, config);
+ fs = fsGetter.get(uri, config);
}
return new ChRootedFileSystem(fs, uri);
}
@@ -283,7 +295,8 @@ protected FileSystem getTargetFileSystem(final INodeDir dir)
@Override
protected FileSystem getTargetFileSystem(final String settings,
final URI[] uris) throws URISyntaxException, IOException {
- return NflyFSystem.createFileSystem(uris, config, settings);
+ return NflyFSystem.createFileSystem(uris, config, settings,
+ fsGetter);
}
};
workingDir = this.getHomeDirectory();
@@ -1167,10 +1180,19 @@ public FileStatus getFileStatus(Path f) throws IOException {
}
+ /**
+ * {@inheritDoc}
+ *
+ * Note: listStatus on root("/") considers listing from fallbackLink if
+ * available. If the same directory name is present in configured mount
+ * path as well as in fallback link, then only the configured mount path
+ * will be listed in the returned result.
+ */
@Override
public FileStatus[] listStatus(Path f) throws AccessControlException,
FileNotFoundException, IOException {
checkPathIsSlash(f);
+ FileStatus[] fallbackStatuses = listStatusForFallbackLink();
FileStatus[] result = new FileStatus[theInternalDir.getChildren().size()];
int i = 0;
for (Entry> iEntry :
@@ -1193,7 +1215,45 @@ public FileStatus[] listStatus(Path f) throws AccessControlException,
myUri, null));
}
}
- return result;
+ if (fallbackStatuses.length > 0) {
+ return consolidateFileStatuses(fallbackStatuses, result);
+ } else {
+ return result;
+ }
+ }
+
+ private FileStatus[] consolidateFileStatuses(FileStatus[] fallbackStatuses,
+ FileStatus[] mountPointStatuses) {
+ ArrayList result = new ArrayList<>();
+ Set pathSet = new HashSet<>();
+ for (FileStatus status : mountPointStatuses) {
+ result.add(status);
+ pathSet.add(status.getPath().getName());
+ }
+ for (FileStatus status : fallbackStatuses) {
+ if (!pathSet.contains(status.getPath().getName())) {
+ result.add(status);
+ }
+ }
+ return result.toArray(new FileStatus[0]);
+ }
+
+ private FileStatus[] listStatusForFallbackLink() throws IOException {
+ if (theInternalDir.isRoot() &&
+ theInternalDir.getFallbackLink() != null) {
+ FileSystem linkedFs =
+ theInternalDir.getFallbackLink().getTargetFileSystem();
+ // Fallback link is only applicable for root
+ FileStatus[] statuses = linkedFs.listStatus(new Path("/"));
+ for (FileStatus status : statuses) {
+ // Fix the path back to viewfs scheme
+ status.setPath(
+ new Path(myUri.toString(), status.getPath().getName()));
+ }
+ return statuses;
+ } else {
+ return new FileStatus[0];
+ }
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java
new file mode 100644
index 0000000000000..36f9cd104cb6b
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java
@@ -0,0 +1,259 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.net.URI;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+
+/******************************************************************************
+ * This class is extended from the ViewFileSystem for the overloaded scheme
+ * file system. Mount link configurations and in-memory mount table
+ * building behaviors are inherited from ViewFileSystem. Unlike ViewFileSystem
+ * scheme (viewfs://), the users would be able to use any scheme.
+ *
+ * To use this class, the following configurations need to be added in
+ * core-site.xml file.
+ * 1) fs..impl
+ * = org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme
+ * 2) fs.viewfs.overload.scheme.target..impl
+ * = "
+ *
+ * Here can be any scheme, but with that scheme there should be a
+ * hadoop compatible file system available. Second configuration value should
+ * be the respective scheme's file system implementation class.
+ * Example: if scheme is configured with "hdfs", then the 2nd configuration
+ * class name will be org.apache.hadoop.hdfs.DistributedFileSystem.
+ * if scheme is configured with "s3a", then the 2nd configuration class name
+ * will be org.apache.hadoop.fs.s3a.S3AFileSystem.
+ *
+ * Use Case 1:
+ * ===========
+ * If users want some of their existing cluster (hdfs://Cluster)
+ * data to mount with other hdfs and object store clusters(hdfs://NN1,
+ * o3fs://bucket1.volume1/, s3a://bucket1/)
+ *
+ * fs.viewfs.mounttable.Cluster./user = hdfs://NN1/user
+ * fs.viewfs.mounttable.Cluster./data = o3fs://bucket1.volume1/data
+ * fs.viewfs.mounttable.Cluster./backup = s3a://bucket1/backup/
+ *
+ * Op1: Create file hdfs://Cluster/user/fileA will go to hdfs://NN1/user/fileA
+ * Op2: Create file hdfs://Cluster/data/datafile will go to
+ * o3fs://bucket1.volume1/data/datafile
+ * Op3: Create file hdfs://Cluster/backup/data.zip will go to
+ * s3a://bucket1/backup/data.zip
+ *
+ * Use Case 2:
+ * ===========
+ * If users want some of their existing cluster (s3a://bucketA/)
+ * data to mount with other hdfs and object store clusters
+ * (hdfs://NN1, o3fs://bucket1.volume1/)
+ *
+ * fs.viewfs.mounttable.bucketA./user = hdfs://NN1/user
+ * fs.viewfs.mounttable.bucketA./data = o3fs://bucket1.volume1/data
+ * fs.viewfs.mounttable.bucketA./salesDB = s3a://bucketA/salesDB/
+ *
+ * Op1: Create file s3a://bucketA/user/fileA will go to hdfs://NN1/user/fileA
+ * Op2: Create file s3a://bucketA/data/datafile will go to
+ * o3fs://bucket1.volume1/data/datafile
+ * Op3: Create file s3a://bucketA/salesDB/dbfile will go to
+ * s3a://bucketA/salesDB/dbfile
+ *****************************************************************************/
+@InterfaceAudience.LimitedPrivate({ "MapReduce", "HBase", "Hive" })
+@InterfaceStability.Evolving
+public class ViewFileSystemOverloadScheme extends ViewFileSystem {
+ private URI myUri;
+ public ViewFileSystemOverloadScheme() throws IOException {
+ super();
+ }
+
+ @Override
+ public String getScheme() {
+ return myUri.getScheme();
+ }
+
+ @Override
+ public void initialize(URI theUri, Configuration conf) throws IOException {
+ this.myUri = theUri;
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Initializing the ViewFileSystemOverloadScheme with the uri: "
+ + theUri);
+ }
+ String mountTableConfigPath =
+ conf.get(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH);
+ if (null != mountTableConfigPath) {
+ MountTableConfigLoader loader = new HCFSMountTableConfigLoader();
+ loader.load(mountTableConfigPath, conf);
+ } else {
+ // TODO: Should we fail here.?
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Missing configuration for fs.viewfs.mounttable.path. Proceeding"
+ + "with core-site.xml mount-table information if avaialable.");
+ }
+ }
+ super.initialize(theUri, conf);
+ }
+
+ /**
+ * This method is overridden because in ViewFileSystemOverloadScheme if
+ * overloaded scheme matches with mounted target fs scheme, file system
+ * should be created without going into fs..impl based resolution.
+ * Otherwise it will end up in an infinite loop as the target will be
+ * resolved again to ViewFileSystemOverloadScheme as fs..impl points
+ * to ViewFileSystemOverloadScheme. So, below method will initialize the
+ * fs.viewfs.overload.scheme.target..impl. Other schemes can
+ * follow fs.newInstance
+ */
+ @Override
+ protected FsGetter fsGetter() {
+ return new ChildFsGetter(getScheme());
+ }
+
+ /**
+ * This class checks whether the rooScheme is same as URI scheme. If both are
+ * same, then it will initialize file systems by using the configured
+ * fs.viewfs.overload.scheme.target..impl class.
+ */
+ static class ChildFsGetter extends FsGetter {
+
+ private final String rootScheme;
+
+ ChildFsGetter(String rootScheme) {
+ this.rootScheme = rootScheme;
+ }
+
+ @Override
+ public FileSystem getNewInstance(URI uri, Configuration conf)
+ throws IOException {
+ if (uri.getScheme().equals(this.rootScheme)) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(
+ "The file system initialized uri scheme is matching with the "
+ + "given target uri scheme. The target uri is: " + uri);
+ }
+ /*
+ * Avoid looping when target fs scheme is matching to overloaded scheme.
+ */
+ return createFileSystem(uri, conf);
+ } else {
+ return FileSystem.newInstance(uri, conf);
+ }
+ }
+
+ /**
+ * When ViewFileSystemOverloadScheme scheme and target uri scheme are
+ * matching, it will not take advantage of FileSystem cache as it will
+ * create instance directly. For caching needs please set
+ * "fs.viewfs.enable.inner.cache" to true.
+ */
+ @Override
+ public FileSystem get(URI uri, Configuration conf) throws IOException {
+ if (uri.getScheme().equals(this.rootScheme)) {
+ // Avoid looping when target fs scheme is matching to overloaded
+ // scheme.
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(
+ "The file system initialized uri scheme is matching with the "
+ + "given target uri scheme. So, the target file system "
+ + "instances will not be cached. To cache fs instances, "
+ + "please set fs.viewfs.enable.inner.cache to true. "
+ + "The target uri is: " + uri);
+ }
+ return createFileSystem(uri, conf);
+ } else {
+ return FileSystem.get(uri, conf);
+ }
+ }
+
+ private FileSystem createFileSystem(URI uri, Configuration conf)
+ throws IOException {
+ final String fsImplConf = String.format(
+ FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN,
+ uri.getScheme());
+ Class> clazz = conf.getClass(fsImplConf, null);
+ if (clazz == null) {
+ throw new UnsupportedFileSystemException(
+ String.format("%s=null: %s: %s", fsImplConf,
+ "No overload scheme fs configured", uri.getScheme()));
+ }
+ FileSystem fs = (FileSystem) newInstance(clazz, uri, conf);
+ fs.initialize(uri, conf);
+ return fs;
+ }
+
+ private T newInstance(Class theClass, URI uri, Configuration conf) {
+ T result;
+ try {
+ Constructor meth = theClass.getConstructor();
+ meth.setAccessible(true);
+ result = meth.newInstance();
+ } catch (InvocationTargetException e) {
+ Throwable cause = e.getCause();
+ if (cause instanceof RuntimeException) {
+ throw (RuntimeException) cause;
+ } else {
+ throw new RuntimeException(cause);
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return result;
+ }
+
+ }
+
+ /**
+ * This is an admin only API to give access to its child raw file system, if
+ * the path is link. If the given path is an internal directory(path is from
+ * mount paths tree), it will initialize the file system of given path uri
+ * directly. If path cannot be resolved to any internal directory or link, it
+ * will throw NotInMountpointException. Please note, this API will not return
+ * chrooted file system. Instead, this API will get actual raw file system
+ * instances.
+ *
+ * @param path - fs uri path
+ * @param conf - configuration
+ * @throws IOException
+ */
+ public FileSystem getRawFileSystem(Path path, Configuration conf)
+ throws IOException {
+ InodeTree.ResolveResult res;
+ try {
+ res = fsState.resolve(getUriPath(path), true);
+ return res.isInternalDir() ? fsGetter().get(path.toUri(), conf)
+ : ((ChRootedFileSystem) res.targetFileSystem).getMyFs();
+ } catch (FileNotFoundException e) {
+ // No link configured with passed path.
+ throw new NotInMountpointException(path,
+ "No link found for the given path.");
+ }
+ }
+
+}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
index 2c8c1a538e433..607bdb8d423a0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
@@ -25,10 +25,12 @@
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.EnumSet;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
+import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -950,10 +952,19 @@ public int getUriDefaultPort() {
return -1;
}
+ /**
+ * {@inheritDoc}
+ *
+ * Note: listStatus on root("/") considers listing from fallbackLink if
+ * available. If the same directory name is present in configured mount
+ * path as well as in fallback link, then only the configured mount path
+ * will be listed in the returned result.
+ */
@Override
public FileStatus[] listStatus(final Path f) throws AccessControlException,
IOException {
checkPathIsSlash(f);
+ FileStatus[] fallbackStatuses = listStatusForFallbackLink();
FileStatus[] result = new FileStatus[theInternalDir.getChildren().size()];
int i = 0;
for (Entry> iEntry :
@@ -979,7 +990,45 @@ public FileStatus[] listStatus(final Path f) throws AccessControlException,
myUri, null));
}
}
- return result;
+ if (fallbackStatuses.length > 0) {
+ return consolidateFileStatuses(fallbackStatuses, result);
+ } else {
+ return result;
+ }
+ }
+
+ private FileStatus[] consolidateFileStatuses(FileStatus[] fallbackStatuses,
+ FileStatus[] mountPointStatuses) {
+ ArrayList result = new ArrayList<>();
+ Set pathSet = new HashSet<>();
+ for (FileStatus status : mountPointStatuses) {
+ result.add(status);
+ pathSet.add(status.getPath().getName());
+ }
+ for (FileStatus status : fallbackStatuses) {
+ if (!pathSet.contains(status.getPath().getName())) {
+ result.add(status);
+ }
+ }
+ return result.toArray(new FileStatus[0]);
+ }
+
+ private FileStatus[] listStatusForFallbackLink() throws IOException {
+ if (theInternalDir.isRoot() &&
+ theInternalDir.getFallbackLink() != null) {
+ AbstractFileSystem linkedFs =
+ theInternalDir.getFallbackLink().getTargetFileSystem();
+ // Fallback link is only applicable for root
+ FileStatus[] statuses = linkedFs.listStatus(new Path("/"));
+ for (FileStatus status : statuses) {
+ // Fix the path back to viewfs scheme
+ status.setPath(
+ new Path(myUri.toString(), status.getPath().getName()));
+ }
+ return statuses;
+ } else {
+ return new FileStatus[0];
+ }
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/package-info.java
new file mode 100644
index 0000000000000..89986d0e5ef69
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/package-info.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * ViewFileSystem and ViewFileSystemOverloadScheme classes.
+ */
+@InterfaceAudience.LimitedPrivate({"MapReduce", "HBase", "Hive" })
+@InterfaceStability.Stable
+package org.apache.hadoop.fs.viewfs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
index 12de2ef91c413..828a17bcb972e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
@@ -579,6 +579,11 @@ public synchronized void processResult(int rc, String path, Object ctx,
fatalError(errorMessage);
}
+ @VisibleForTesting
+ public boolean getWantToBeInElection() {
+ return wantToBeInElection;
+ }
+
/**
* We failed to become active. Re-join the election, but
* sleep for a few seconds after terminating our existing
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
index 0693dce4281ec..0950ea7e01c57 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
@@ -39,7 +39,6 @@
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
-import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -52,16 +51,15 @@
@InterfaceAudience.Private
public abstract class HAAdmin extends Configured implements Tool {
-
- private static final String FORCEFENCE = "forcefence";
- private static final String FORCEACTIVE = "forceactive";
-
+
+ protected static final String FORCEACTIVE = "forceactive";
+
/**
* Undocumented flag which allows an administrator to use manual failover
* state transitions even when auto-failover is enabled. This is an unsafe
* operation, which is why it is not documented in the usage below.
*/
- private static final String FORCEMANUAL = "forcemanual";
+ protected static final String FORCEMANUAL = "forcemanual";
private static final Logger LOG = LoggerFactory.getLogger(HAAdmin.class);
private int rpcTimeoutForChecks = -1;
@@ -72,15 +70,6 @@ public abstract class HAAdmin extends Configured implements Tool {
new UsageInfo("[--"+FORCEACTIVE+"] ", "Transitions the service into Active state"))
.put("-transitionToStandby",
new UsageInfo("", "Transitions the service into Standby state"))
- .put("-transitionToObserver",
- new UsageInfo("",
- "Transitions the service into Observer state"))
- .put("-failover",
- new UsageInfo("[--"+FORCEFENCE+"] [--"+FORCEACTIVE+"] ",
- "Failover from the first service to the second.\n" +
- "Unconditionally fence services if the --"+FORCEFENCE+" option is used.\n" +
- "Try to failover to the target service even if it is not ready if the " +
- "--" + FORCEACTIVE + " option is used."))
.put("-getServiceState",
new UsageInfo("", "Returns the state of the service"))
.put("-getAllServiceState",
@@ -99,6 +88,14 @@ public abstract class HAAdmin extends Configured implements Tool {
protected PrintStream out = System.out;
private RequestSource requestSource = RequestSource.REQUEST_BY_USER;
+ protected RequestSource getRequestSource() {
+ return requestSource;
+ }
+
+ protected void setRequestSource(RequestSource requestSource) {
+ this.requestSource = requestSource;
+ }
+
protected HAAdmin() {
super();
}
@@ -118,34 +115,44 @@ protected String getUsageString() {
return "Usage: HAAdmin";
}
- protected void printUsage(PrintStream errOut) {
- errOut.println(getUsageString());
- for (Map.Entry e : USAGE.entrySet()) {
+ protected void printUsage(PrintStream pStr,
+ Map helpEntries) {
+ pStr.println(getUsageString());
+ for (Map.Entry e : helpEntries.entrySet()) {
String cmd = e.getKey();
UsageInfo usage = e.getValue();
-
+
if (usage.args == null) {
- errOut.println(" [" + cmd + "]");
+ pStr.println(" [" + cmd + "]");
} else {
- errOut.println(" [" + cmd + " " + usage.args + "]");
+ pStr.println(" [" + cmd + " " + usage.args + "]");
}
}
- errOut.println();
- ToolRunner.printGenericCommandUsage(errOut);
+ pStr.println();
+ ToolRunner.printGenericCommandUsage(pStr);
}
-
- private void printUsage(PrintStream errOut, String cmd) {
- UsageInfo usage = USAGE.get(cmd);
+
+ protected void printUsage(PrintStream pStr) {
+ printUsage(pStr, USAGE);
+ }
+
+ protected void printUsage(PrintStream pStr, String cmd,
+ Map helpEntries) {
+ UsageInfo usage = helpEntries.get(cmd);
if (usage == null) {
throw new RuntimeException("No usage for cmd " + cmd);
}
if (usage.args == null) {
- errOut.println(getUsageString() + " [" + cmd + "]");
+ pStr.println(getUsageString() + " [" + cmd + "]");
} else {
- errOut.println(getUsageString() + " [" + cmd + " " + usage.args + "]");
+ pStr.println(getUsageString() + " [" + cmd + " " + usage.args + "]");
}
}
+ protected void printUsage(PrintStream pStr, String cmd) {
+ printUsage(pStr, cmd, USAGE);
+ }
+
private int transitionToActive(final CommandLine cmd)
throws IOException, ServiceFailedException {
String[] argv = cmd.getArgs();
@@ -225,27 +232,6 @@ private int transitionToStandby(final CommandLine cmd)
return 0;
}
- private int transitionToObserver(final CommandLine cmd)
- throws IOException, ServiceFailedException {
- String[] argv = cmd.getArgs();
- if (argv.length != 1) {
- errOut.println("transitionToObserver: incorrect number of arguments");
- printUsage(errOut, "-transitionToObserver");
- return -1;
- }
-
- HAServiceTarget target = resolveTarget(argv[0]);
- if (!checkSupportObserver(target)) {
- return -1;
- }
- if (!checkManualStateManagementOK(target)) {
- return -1;
- }
- HAServiceProtocol proto = target.getProxy(getConf(), 0);
- HAServiceProtocolHelper.transitionToObserver(proto, createReqInfo());
- return 0;
- }
-
/**
* Ensure that we are allowed to manually manage the HA state of the target
* service. If automatic failover is configured, then the automatic
@@ -255,7 +241,7 @@ private int transitionToObserver(final CommandLine cmd)
* @param target the target to check
* @return true if manual state management is allowed
*/
- private boolean checkManualStateManagementOK(HAServiceTarget target) {
+ protected boolean checkManualStateManagementOK(HAServiceTarget target) {
if (target.isAutoFailoverEnabled()) {
if (requestSource != RequestSource.REQUEST_BY_USER_FORCED) {
errOut.println(
@@ -274,93 +260,19 @@ private boolean checkManualStateManagementOK(HAServiceTarget target) {
return true;
}
- /**
- * Check if the target supports the Observer state.
- * @param target the target to check
- * @return true if the target support Observer state, false otherwise.
- */
- private boolean checkSupportObserver(HAServiceTarget target) {
- if (target.supportObserver()) {
- return true;
- } else {
- errOut.println(
- "The target " + target + " doesn't support Observer state.");
- return false;
- }
- }
-
- private StateChangeRequestInfo createReqInfo() {
+ protected StateChangeRequestInfo createReqInfo() {
return new StateChangeRequestInfo(requestSource);
}
- private int failover(CommandLine cmd)
- throws IOException, ServiceFailedException {
- boolean forceFence = cmd.hasOption(FORCEFENCE);
- boolean forceActive = cmd.hasOption(FORCEACTIVE);
-
- int numOpts = cmd.getOptions() == null ? 0 : cmd.getOptions().length;
- final String[] args = cmd.getArgs();
-
- if (numOpts > 3 || args.length != 2) {
- errOut.println("failover: incorrect arguments");
- printUsage(errOut, "-failover");
- return -1;
- }
-
- HAServiceTarget fromNode = resolveTarget(args[0]);
- HAServiceTarget toNode = resolveTarget(args[1]);
-
- // Check that auto-failover is consistently configured for both nodes.
- Preconditions.checkState(
- fromNode.isAutoFailoverEnabled() ==
- toNode.isAutoFailoverEnabled(),
- "Inconsistent auto-failover configs between %s and %s!",
- fromNode, toNode);
-
- if (fromNode.isAutoFailoverEnabled()) {
- if (forceFence || forceActive) {
- // -forceActive doesn't make sense with auto-HA, since, if the node
- // is not healthy, then its ZKFC will immediately quit the election
- // again the next time a health check runs.
- //
- // -forceFence doesn't seem to have any real use cases with auto-HA
- // so it isn't implemented.
- errOut.println(FORCEFENCE + " and " + FORCEACTIVE + " flags not " +
- "supported with auto-failover enabled.");
- return -1;
- }
- try {
- return gracefulFailoverThroughZKFCs(toNode);
- } catch (UnsupportedOperationException e){
- errOut.println("Failover command is not supported with " +
- "auto-failover enabled: " + e.getLocalizedMessage());
- return -1;
- }
- }
-
- FailoverController fc = new FailoverController(getConf(),
- requestSource);
-
- try {
- fc.failover(fromNode, toNode, forceFence, forceActive);
- out.println("Failover from "+args[0]+" to "+args[1]+" successful");
- } catch (FailoverFailedException ffe) {
- errOut.println("Failover failed: " + ffe.getLocalizedMessage());
- return -1;
- }
- return 0;
- }
-
-
/**
* Initiate a graceful failover by talking to the target node's ZKFC.
* This sends an RPC to the ZKFC, which coordinates the failover.
- *
+ *
* @param toNode the node to fail to
* @return status code (0 for success)
* @throws IOException if failover does not succeed
*/
- private int gracefulFailoverThroughZKFCs(HAServiceTarget toNode)
+ protected int gracefulFailoverThroughZKFCs(HAServiceTarget toNode)
throws IOException {
int timeout = FailoverController.getRpcTimeoutToNewActive(getConf());
@@ -443,45 +355,52 @@ public int run(String[] argv) throws Exception {
return -1;
}
}
-
- protected int runCmd(String[] argv) throws Exception {
+
+ protected boolean checkParameterValidity(String[] argv,
+ Map helpEntries){
+
if (argv.length < 1) {
- printUsage(errOut);
- return -1;
+ printUsage(errOut, helpEntries);
+ return false;
}
String cmd = argv[0];
-
if (!cmd.startsWith("-")) {
- errOut.println("Bad command '" + cmd + "': expected command starting with '-'");
- printUsage(errOut);
- return -1;
+ errOut.println("Bad command '" + cmd +
+ "': expected command starting with '-'");
+ printUsage(errOut, helpEntries);
+ return false;
}
-
- if (!USAGE.containsKey(cmd)) {
+
+ if (!helpEntries.containsKey(cmd)) {
errOut.println(cmd.substring(1) + ": Unknown command");
- printUsage(errOut);
+ printUsage(errOut, helpEntries);
+ return false;
+ }
+ return true;
+ }
+
+ protected boolean checkParameterValidity(String[] argv){
+ return checkParameterValidity(argv, USAGE);
+ }
+
+ protected int runCmd(String[] argv) throws Exception {
+ if (!checkParameterValidity(argv, USAGE)){
return -1;
}
-
- Options opts = new Options();
+ String cmd = argv[0];
+ Options opts = new Options();
// Add command-specific options
- if ("-failover".equals(cmd)) {
- addFailoverCliOpts(opts);
- }
if("-transitionToActive".equals(cmd)) {
addTransitionToActiveCliOpts(opts);
}
// Mutative commands take FORCEMANUAL option
if ("-transitionToActive".equals(cmd) ||
- "-transitionToStandby".equals(cmd) ||
- "-transitionToObserver".equals(cmd) ||
- "-failover".equals(cmd)) {
+ "-transitionToStandby".equals(cmd)) {
opts.addOption(FORCEMANUAL, false,
"force manual control even if auto-failover is enabled");
}
-
CommandLine cmdLine = parseOpts(cmd, opts, argv);
if (cmdLine == null) {
// error already printed
@@ -502,10 +421,6 @@ protected int runCmd(String[] argv) throws Exception {
return transitionToActive(cmdLine);
} else if ("-transitionToStandby".equals(cmd)) {
return transitionToStandby(cmdLine);
- } else if ("-transitionToObserver".equals(cmd)) {
- return transitionToObserver(cmdLine);
- } else if ("-failover".equals(cmd)) {
- return failover(cmdLine);
} else if ("-getServiceState".equals(cmd)) {
return getServiceState(cmdLine);
} else if ("-getAllServiceState".equals(cmd)) {
@@ -544,7 +459,7 @@ protected int getAllServiceState() {
return 0;
}
- private boolean confirmForceManual() throws IOException {
+ protected boolean confirmForceManual() throws IOException {
return ToolRunner.confirmPrompt(
"You have specified the --" + FORCEMANUAL + " flag. This flag is " +
"dangerous, as it can induce a split-brain scenario that WILL " +
@@ -559,16 +474,7 @@ private boolean confirmForceManual() throws IOException {
"Are you sure you want to continue?");
}
- /**
- * Add CLI options which are specific to the failover command and no
- * others.
- */
- private void addFailoverCliOpts(Options failoverOpts) {
- failoverOpts.addOption(FORCEFENCE, false, "force fencing");
- failoverOpts.addOption(FORCEACTIVE, false, "force failover");
- // Don't add FORCEMANUAL, since that's added separately for all commands
- // that change state.
- }
+
/**
* Add CLI options which are specific to the transitionToActive command and
@@ -577,39 +483,47 @@ private void addFailoverCliOpts(Options failoverOpts) {
private void addTransitionToActiveCliOpts(Options transitionToActiveCliOpts) {
transitionToActiveCliOpts.addOption(FORCEACTIVE, false, "force active");
}
-
- private CommandLine parseOpts(String cmdName, Options opts, String[] argv) {
+
+ protected CommandLine parseOpts(String cmdName, Options opts, String[] argv,
+ Map helpEntries) {
try {
// Strip off the first arg, since that's just the command name
- argv = Arrays.copyOfRange(argv, 1, argv.length);
+ argv = Arrays.copyOfRange(argv, 1, argv.length);
return new GnuParser().parse(opts, argv);
} catch (ParseException pe) {
errOut.println(cmdName.substring(1) +
": incorrect arguments");
- printUsage(errOut, cmdName);
+ printUsage(errOut, cmdName, helpEntries);
return null;
}
}
- private int help(String[] argv) {
+ protected CommandLine parseOpts(String cmdName, Options opts, String[] argv) {
+ return parseOpts(cmdName, opts, argv, USAGE);
+ }
+ protected int help(String[] argv) {
+ return help(argv, USAGE);
+ }
+
+ protected int help(String[] argv, Map helpEntries) {
if (argv.length == 1) { // only -help
- printUsage(out);
+ printUsage(out, helpEntries);
return 0;
} else if (argv.length != 2) {
- printUsage(errOut, "-help");
+ printUsage(errOut, "-help", helpEntries);
return -1;
}
String cmd = argv[1];
if (!cmd.startsWith("-")) {
cmd = "-" + cmd;
}
- UsageInfo usageInfo = USAGE.get(cmd);
+ UsageInfo usageInfo = helpEntries.get(cmd);
if (usageInfo == null) {
errOut.println(cmd + ": Unknown command");
- printUsage(errOut);
+ printUsage(errOut, helpEntries);
return -1;
}
-
+
if (usageInfo.args == null) {
out.println(cmd + ": " + usageInfo.help);
} else {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java
index 7ea5188ad8338..61ea53c420ab1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java
@@ -20,6 +20,7 @@
import java.io.IOException;
import java.net.InetSocketAddress;
+import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -33,7 +34,7 @@
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.authorize.PolicyProvider;
-import com.google.protobuf.BlockingService;
+import org.apache.hadoop.thirdparty.protobuf.BlockingService;
@InterfaceAudience.LimitedPrivate("HDFS")
@InterfaceStability.Evolving
@@ -63,6 +64,12 @@ public class ZKFCRpcServer implements ZKFCProtocol {
// set service-level authorization security policy
if (conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
+ if (policy == null) {
+ throw new HadoopIllegalArgumentException(
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION
+ + "is configured to true but service-level"
+ + "authorization security policy is null.");
+ }
server.refreshServiceAcl(conf, policy);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index ee4ca1a6084a9..3718b7cdb0cc0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -157,7 +157,10 @@ public HAServiceTarget getLocalTarget() {
return localTarget;
}
- HAServiceState getServiceState() { return serviceState; }
+ @VisibleForTesting
+ public HAServiceState getServiceState() {
+ return serviceState;
+ }
public int run(final String[] args) throws Exception {
if (!localTarget.isAutoFailoverEnabled()) {
@@ -315,9 +318,10 @@ private void initHM() {
healthMonitor.addServiceStateCallback(new ServiceStateCallBacks());
healthMonitor.start();
}
-
+
protected void initRPC() throws IOException {
InetSocketAddress bindAddr = getRpcAddressToBindTo();
+ LOG.info("ZKFC RpcServer binding to {}", bindAddr);
rpcServer = new ZKFCRpcServer(conf, bindAddr, this, getPolicyProvider());
}
@@ -799,7 +803,9 @@ private void recheckElectability() {
switch (lastHealthState) {
case SERVICE_HEALTHY:
- elector.joinElection(targetToData(localTarget));
+ if(serviceState != HAServiceState.OBSERVER) {
+ elector.joinElection(targetToData(localTarget));
+ }
if (quitElectionOnBadState) {
quitElectionOnBadState = false;
}
@@ -909,7 +915,7 @@ protected synchronized void setLastHealthState(HealthMonitor.State newState) {
}
@VisibleForTesting
- ActiveStandbyElector getElectorForTests() {
+ public ActiveStandbyElector getElectorForTests() {
return elector;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java
index fec519f3761d4..e53820cd13107 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java
@@ -43,8 +43,8 @@
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.UserGroupInformation;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
+import org.apache.hadoop.thirdparty.protobuf.RpcController;
+import org.apache.hadoop.thirdparty.protobuf.ServiceException;
/**
* This class is the client side translator to translate the requests made on
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
index 72787cfe9937a..8613a469779f3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
@@ -40,8 +40,8 @@
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
+import org.apache.hadoop.thirdparty.protobuf.RpcController;
+import org.apache.hadoop.thirdparty.protobuf.ServiceException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java
index 62896fa8e7418..7001d93995f0f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java
@@ -34,8 +34,8 @@
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
+import org.apache.hadoop.thirdparty.protobuf.RpcController;
+import org.apache.hadoop.thirdparty.protobuf.ServiceException;
public class ZKFCProtocolClientSideTranslatorPB implements
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolServerSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolServerSideTranslatorPB.java
index 549499885df41..f822200ab9fa0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolServerSideTranslatorPB.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolServerSideTranslatorPB.java
@@ -29,8 +29,8 @@
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
+import org.apache.hadoop.thirdparty.protobuf.RpcController;
+import org.apache.hadoop.thirdparty.protobuf.ServiceException;
@InterfaceAudience.Private
@InterfaceStability.Stable
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 7e7d64423f169..3fd74f0e89a27 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -154,6 +154,10 @@ public final class HttpServer2 implements FilterContainer {
public static final String FILTER_INITIALIZER_PROPERTY
= "hadoop.http.filter.initializers";
+ public static final String HTTP_SNI_HOST_CHECK_ENABLED_KEY
+ = "hadoop.http.sni.host.check.enabled";
+ public static final boolean HTTP_SNI_HOST_CHECK_ENABLED_DEFAULT = false;
+
// The ServletContext attribute where the daemon Configuration
// gets stored.
public static final String CONF_CONTEXT_ATTRIBUTE = "hadoop.conf";
@@ -233,6 +237,8 @@ public static class Builder {
private boolean xFrameEnabled;
private XFrameOption xFrameOption = XFrameOption.SAMEORIGIN;
+ private boolean sniHostCheckEnabled;
+
public Builder setName(String name){
this.name = name;
return this;
@@ -377,6 +383,17 @@ public Builder setXFrameOption(String option) {
return this;
}
+ /**
+ * Enable or disable sniHostCheck.
+ *
+ * @param sniHostCheckEnabled Enable sniHostCheck if true, else disable it.
+ * @return Builder.
+ */
+ public Builder setSniHostCheckEnabled(boolean sniHostCheckEnabled) {
+ this.sniHostCheckEnabled = sniHostCheckEnabled;
+ return this;
+ }
+
/**
* A wrapper of {@link Configuration#getPassword(String)}. It returns
* String instead of char[].
@@ -471,6 +488,13 @@ public HttpServer2 build() throws IOException {
int backlogSize = conf.getInt(HTTP_SOCKET_BACKLOG_SIZE_KEY,
HTTP_SOCKET_BACKLOG_SIZE_DEFAULT);
+ // If setSniHostCheckEnabled() is used to enable SNI hostname check,
+ // configuration lookup is skipped.
+ if (!sniHostCheckEnabled) {
+ sniHostCheckEnabled = conf.getBoolean(HTTP_SNI_HOST_CHECK_ENABLED_KEY,
+ HTTP_SNI_HOST_CHECK_ENABLED_DEFAULT);
+ }
+
for (URI ep : endpoints) {
final ServerConnector connector;
String scheme = ep.getScheme();
@@ -514,22 +538,29 @@ private ServerConnector createHttpChannelConnector(
private ServerConnector createHttpsChannelConnector(
Server server, HttpConfiguration httpConfig) {
httpConfig.setSecureScheme(HTTPS_SCHEME);
- httpConfig.addCustomizer(new SecureRequestCustomizer());
+ httpConfig.addCustomizer(
+ new SecureRequestCustomizer(sniHostCheckEnabled));
ServerConnector conn = createHttpChannelConnector(server, httpConfig);
SslContextFactory.Server sslContextFactory =
new SslContextFactory.Server();
sslContextFactory.setNeedClientAuth(needsClientAuth);
- sslContextFactory.setKeyManagerPassword(keyPassword);
+ if (keyPassword != null) {
+ sslContextFactory.setKeyManagerPassword(keyPassword);
+ }
if (keyStore != null) {
sslContextFactory.setKeyStorePath(keyStore);
sslContextFactory.setKeyStoreType(keyStoreType);
- sslContextFactory.setKeyStorePassword(keyStorePassword);
+ if (keyStorePassword != null) {
+ sslContextFactory.setKeyStorePassword(keyStorePassword);
+ }
}
if (trustStore != null) {
sslContextFactory.setTrustStorePath(trustStore);
sslContextFactory.setTrustStoreType(trustStoreType);
- sslContextFactory.setTrustStorePassword(trustStorePassword);
+ if (trustStorePassword != null) {
+ sslContextFactory.setTrustStorePassword(trustStorePassword);
+ }
}
if(null != excludeCiphers && !excludeCiphers.isEmpty()) {
sslContextFactory.setExcludeCipherSuites(
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java
index 44b6aaa7af310..c6bc0536473e9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java
@@ -18,8 +18,10 @@
package org.apache.hadoop.io;
-import java.io.*;
-import java.lang.reflect.Array;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -42,7 +44,7 @@
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ArrayWritable implements Writable {
- private Class extends Writable> valueClass;
+ private final Class extends Writable> valueClass;
private Writable[] values;
public ArrayWritable(Class extends Writable> valueClass) {
@@ -64,7 +66,7 @@ public ArrayWritable(String[] strings) {
}
}
- public Class getValueClass() {
+ public Class extends Writable> getValueClass() {
return valueClass;
}
@@ -77,16 +79,16 @@ public String[] toStrings() {
}
public Object toArray() {
- Object result = Array.newInstance(valueClass, values.length);
- for (int i = 0; i < values.length; i++) {
- Array.set(result, i, values[i]);
- }
- return result;
+ return Arrays.copyOf(values, values.length);
}
- public void set(Writable[] values) { this.values = values; }
+ public void set(Writable[] values) {
+ this.values = values;
+ }
- public Writable[] get() { return values; }
+ public Writable[] get() {
+ return values;
+ }
@Override
public void readFields(DataInput in) throws IOException {
@@ -106,5 +108,11 @@ public void write(DataOutput out) throws IOException {
}
}
+ @Override
+ public String toString() {
+ return "ArrayWritable [valueClass=" + valueClass + ", values="
+ + Arrays.toString(values) + "]";
+ }
+
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java
index 7d7b75ba05a00..c5538c9e56e85 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java
@@ -19,6 +19,9 @@
package org.apache.hadoop.io;
import java.io.IOException;
+import java.util.Arrays;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
import java.io.DataInput;
import java.io.DataOutput;
@@ -35,17 +38,22 @@
@InterfaceStability.Stable
public class BytesWritable extends BinaryComparable
implements WritableComparable {
+ private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
private static final int LENGTH_BYTES = 4;
- private static final byte[] EMPTY_BYTES = {};
+
+ private static final byte[] EMPTY_BYTES = new byte[0];
private int size;
private byte[] bytes;
-
+
/**
* Create a zero-size sequence.
*/
- public BytesWritable() {this(EMPTY_BYTES);}
-
+ public BytesWritable() {
+ this.bytes = EMPTY_BYTES;
+ this.size = 0;
+ }
+
/**
* Create a BytesWritable using the byte array as the initial value.
* @param bytes This array becomes the backing storage for the object.
@@ -65,17 +73,15 @@ public BytesWritable(byte[] bytes, int length) {
this.bytes = bytes;
this.size = length;
}
-
+
/**
* Get a copy of the bytes that is exactly the length of the data.
* See {@link #getBytes()} for faster access to the underlying array.
*/
public byte[] copyBytes() {
- byte[] result = new byte[size];
- System.arraycopy(bytes, 0, result, 0, size);
- return result;
+ return Arrays.copyOf(bytes, size);
}
-
+
/**
* Get the data backing the BytesWritable. Please use {@link #copyBytes()}
* if you need the returned array to be precisely the length of the data.
@@ -111,7 +117,7 @@ public int getLength() {
public int getSize() {
return getLength();
}
-
+
/**
* Change the size of the buffer. The values in the old range are preserved
* and any new values are undefined. The capacity is changed if it is
@@ -121,41 +127,37 @@ public int getSize() {
public void setSize(int size) {
if (size > getCapacity()) {
// Avoid overflowing the int too early by casting to a long.
- long newSize = Math.min(Integer.MAX_VALUE, (3L * size) / 2L);
+ long newSize = Math.min(MAX_ARRAY_SIZE, (3L * size) / 2L);
setCapacity((int) newSize);
}
this.size = size;
}
-
+
/**
* Get the capacity, which is the maximum size that could handled without
* resizing the backing storage.
+ *
* @return The number of bytes
*/
public int getCapacity() {
return bytes.length;
}
-
+
/**
- * Change the capacity of the backing storage.
- * The data is preserved.
- * @param new_cap The new capacity in bytes.
+ * Change the capacity of the backing storage. The data is preserved.
+ *
+ * @param capacity The new capacity in bytes.
*/
- public void setCapacity(int new_cap) {
- if (new_cap != getCapacity()) {
- byte[] new_data = new byte[new_cap];
- if (new_cap < size) {
- size = new_cap;
- }
- if (size != 0) {
- System.arraycopy(bytes, 0, new_data, 0, size);
- }
- bytes = new_data;
+ public void setCapacity(final int capacity) {
+ if (capacity != getCapacity()) {
+ this.size = Math.min(size, capacity);
+ this.bytes = Arrays.copyOf(this.bytes, capacity);
}
}
/**
* Set the BytesWritable to the contents of the given newData.
+ *
* @param newData the value to set this BytesWritable to.
*/
public void set(BytesWritable newData) {
@@ -163,7 +165,8 @@ public void set(BytesWritable newData) {
}
/**
- * Set the value to a copy of the given byte range
+ * Set the value to a copy of the given byte range.
+ *
* @param newData the new values to copy in
* @param offset the offset in newData to start at
* @param length the number of bytes to copy
@@ -174,25 +177,18 @@ public void set(byte[] newData, int offset, int length) {
System.arraycopy(newData, offset, bytes, 0, size);
}
- // inherit javadoc
@Override
public void readFields(DataInput in) throws IOException {
setSize(0); // clear the old data
setSize(in.readInt());
in.readFully(bytes, 0, size);
}
-
- // inherit javadoc
+
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(size);
out.write(bytes, 0, size);
}
-
- @Override
- public int hashCode() {
- return super.hashCode();
- }
/**
* Are the two byte sequences equal?
@@ -204,25 +200,19 @@ public boolean equals(Object right_obj) {
return false;
}
+ @Override
+ public int hashCode() {
+ return super.hashCode();
+ }
+
/**
* Generate the stream of bytes as hex pairs separated by ' '.
*/
@Override
- public String toString() {
- StringBuilder sb = new StringBuilder(3*size);
- for (int idx = 0; idx < size; idx++) {
- // if not the first, put a blank separator in
- if (idx != 0) {
- sb.append(' ');
- }
- String num = Integer.toHexString(0xff & bytes[idx]);
- // if it is only one digit, add a leading 0.
- if (num.length() < 2) {
- sb.append('0');
- }
- sb.append(num);
- }
- return sb.toString();
+ public String toString() {
+ return IntStream.range(0, size)
+ .mapToObj(idx -> String.format("%02x", bytes[idx]))
+ .collect(Collectors.joining(" "));
}
/** A Comparator optimized for BytesWritable. */
@@ -230,20 +220,20 @@ public static class Comparator extends WritableComparator {
public Comparator() {
super(BytesWritable.class);
}
-
+
/**
* Compare the buffers in serialized form.
*/
@Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
- return compareBytes(b1, s1+LENGTH_BYTES, l1-LENGTH_BYTES,
- b2, s2+LENGTH_BYTES, l2-LENGTH_BYTES);
+ return compareBytes(b1, s1 + LENGTH_BYTES, l1 - LENGTH_BYTES,
+ b2, s2 + LENGTH_BYTES, l2 - LENGTH_BYTES);
}
}
-
+
static { // register this comparator
WritableComparator.define(BytesWritable.class, new Comparator());
}
-
+
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java
index 0f0f5c7405a6c..b35a32f288b4b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java
@@ -30,7 +30,7 @@
import org.apache.hadoop.conf.*;
import org.apache.hadoop.util.ProtoUtil;
-import com.google.protobuf.Message;
+import org.apache.hadoop.thirdparty.protobuf.Message;
/** A polymorphic Writable that writes an instance with it's class name.
* Handles arrays, strings and primitive types without a Writable wrapper.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
index 3ab327fe76a30..716de3deb4278 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
@@ -24,11 +24,11 @@
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.charset.CharacterCodingException;
-import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CharsetEncoder;
import java.nio.charset.CodingErrorAction;
import java.nio.charset.MalformedInputException;
+import java.nio.charset.StandardCharsets;
import java.text.CharacterIterator;
import java.text.StringCharacterIterator;
import java.util.Arrays;
@@ -52,63 +52,67 @@
@InterfaceStability.Stable
public class Text extends BinaryComparable
implements WritableComparable {
-
+
private static final ThreadLocal ENCODER_FACTORY =
new ThreadLocal() {
@Override
protected CharsetEncoder initialValue() {
- return Charset.forName("UTF-8").newEncoder().
+ return StandardCharsets.UTF_8.newEncoder().
onMalformedInput(CodingErrorAction.REPORT).
onUnmappableCharacter(CodingErrorAction.REPORT);
}
};
-
+
private static final ThreadLocal DECODER_FACTORY =
new ThreadLocal() {
@Override
protected CharsetDecoder initialValue() {
- return Charset.forName("UTF-8").newDecoder().
+ return StandardCharsets.UTF_8.newDecoder().
onMalformedInput(CodingErrorAction.REPORT).
onUnmappableCharacter(CodingErrorAction.REPORT);
}
};
-
- private static final byte [] EMPTY_BYTES = new byte[0];
-
- private byte[] bytes;
- private int length;
+ private static final byte[] EMPTY_BYTES = new byte[0];
+
+ private byte[] bytes = EMPTY_BYTES;
+ private int length = 0;
+
+ /**
+ * Construct an empty text string.
+ */
public Text() {
- bytes = EMPTY_BYTES;
}
- /** Construct from a string.
+ /**
+ * Construct from a string.
*/
public Text(String string) {
set(string);
}
- /** Construct from another text. */
+ /**
+ * Construct from another text.
+ */
public Text(Text utf8) {
set(utf8);
}
- /** Construct from a byte array.
+ /**
+ * Construct from a byte array.
*/
public Text(byte[] utf8) {
set(utf8);
}
-
+
/**
* Get a copy of the bytes that is exactly the length of the data.
* See {@link #getBytes()} for faster access to the underlying array.
*/
public byte[] copyBytes() {
- byte[] result = new byte[length];
- System.arraycopy(bytes, 0, result, 0, length);
- return result;
+ return Arrays.copyOf(bytes, length);
}
-
+
/**
* Returns the raw bytes; however, only data up to {@link #getLength()} is
* valid. Please use {@link #copyBytes()} if you
@@ -119,12 +123,14 @@ public byte[] getBytes() {
return bytes;
}
- /** Returns the number of bytes in the byte array */
+ /**
+ * Returns the number of bytes in the byte array.
+ */
@Override
public int getLength() {
return length;
}
-
+
/**
* Returns the Unicode Scalar Value (32-bit integer value)
* for the character at position. Note that this
@@ -136,15 +142,15 @@ public int getLength() {
public int charAt(int position) {
if (position > this.length) return -1; // too long
if (position < 0) return -1; // duh.
-
+
ByteBuffer bb = (ByteBuffer)ByteBuffer.wrap(bytes).position(position);
return bytesToCodePoint(bb.slice());
}
-
+
public int find(String what) {
return find(what, 0);
}
-
+
/**
* Finds any occurrence of what in the backing
* buffer, starting as position start. The starting
@@ -156,11 +162,11 @@ public int find(String what) {
*/
public int find(String what, int start) {
try {
- ByteBuffer src = ByteBuffer.wrap(this.bytes,0,this.length);
+ ByteBuffer src = ByteBuffer.wrap(this.bytes, 0, this.length);
ByteBuffer tgt = encode(what);
byte b = tgt.get();
src.position(start);
-
+
while (src.hasRemaining()) {
if (b == src.get()) { // matching first byte
src.mark(); // save position in loop
@@ -186,54 +192,63 @@ public int find(String what, int start) {
}
return -1; // not found
} catch (CharacterCodingException e) {
- // can't get here
- e.printStackTrace();
- return -1;
+ throw new RuntimeException("Should not have happened", e);
}
- }
- /** Set to contain the contents of a string.
+ }
+
+ /**
+ * Set to contain the contents of a string.
*/
public void set(String string) {
try {
ByteBuffer bb = encode(string, true);
bytes = bb.array();
length = bb.limit();
- }catch(CharacterCodingException e) {
- throw new RuntimeException("Should not have happened ", e);
+ } catch (CharacterCodingException e) {
+ throw new RuntimeException("Should not have happened", e);
}
}
- /** Set to a utf8 byte array
+ /**
+ * Set to a utf8 byte array.
*/
public void set(byte[] utf8) {
set(utf8, 0, utf8.length);
}
-
- /** copy a text. */
+
+ /**
+ * Copy a text.
+ */
public void set(Text other) {
set(other.getBytes(), 0, other.getLength());
}
/**
- * Set the Text to range of bytes
+ * Set the Text to range of bytes.
+ *
* @param utf8 the data to copy from
* @param start the first position of the new string
* @param len the number of bytes of the new string
*/
public void set(byte[] utf8, int start, int len) {
- setCapacity(len, false);
+ ensureCapacity(len);
System.arraycopy(utf8, start, bytes, 0, len);
this.length = len;
}
/**
- * Append a range of bytes to the end of the given text
+ * Append a range of bytes to the end of the given text.
+ *
* @param utf8 the data to copy from
* @param start the first position to append from utf8
* @param len the number of bytes to append
*/
public void append(byte[] utf8, int start, int len) {
- setCapacity(length + len, true);
+ byte[] original = bytes;
+ int capacity = Math.max(length + len, length + (length >> 1));
+ if (ensureCapacity(capacity)) {
+ System.arraycopy(original, 0, bytes, 0, length);
+ }
System.arraycopy(utf8, start, bytes, length, len);
length += len;
}
@@ -250,47 +265,39 @@ public void clear() {
length = 0;
}
- /*
+ /**
* Sets the capacity of this Text object to at least
- * len bytes. If the current buffer is longer,
- * then the capacity and existing content of the buffer are
- * unchanged. If len is larger
- * than the current capacity, the Text object's capacity is
- * increased to match.
- * @param len the number of bytes we need
- * @param keepData should the old data be kept
- */
- private void setCapacity(int len, boolean keepData) {
- if (bytes == null || bytes.length < len) {
- if (bytes != null && keepData) {
- bytes = Arrays.copyOf(bytes, Math.max(len,length << 1));
- } else {
- bytes = new byte[len];
- }
+ * capacity bytes. If the current buffer is longer, then the
+ * capacity and existing content of the buffer are unchanged. If
+ * capacity is larger than the current capacity, the Text
+ * object's capacity is increased to match and any existing data is lost.
+ *
+ * @param capacity the number of bytes we need
+ * @return true if the internal array was resized or false otherwise
+ */
+ private boolean ensureCapacity(final int capacity) {
+ if (bytes.length < capacity) {
+ bytes = new byte[capacity];
+ return true;
}
+ return false;
}
-
- /**
- * Convert text back to string
- * @see java.lang.Object#toString()
- */
+
@Override
public String toString() {
try {
return decode(bytes, 0, length);
} catch (CharacterCodingException e) {
- throw new RuntimeException("Should not have happened " , e);
+ throw new RuntimeException("Should not have happened", e);
}
}
-
- /** deserialize
- */
+
@Override
public void readFields(DataInput in) throws IOException {
int newLength = WritableUtils.readVInt(in);
readWithKnownLength(in, newLength);
}
-
+
public void readFields(DataInput in, int maxLength) throws IOException {
int newLength = WritableUtils.readVInt(in);
if (newLength < 0) {
@@ -303,7 +310,9 @@ public void readFields(DataInput in, int maxLength) throws IOException {
readWithKnownLength(in, newLength);
}
- /** Skips over one Text in the input. */
+ /**
+ * Skips over one Text in the input.
+ */
public static void skip(DataInput in) throws IOException {
int length = WritableUtils.readVInt(in);
WritableUtils.skipFully(in, length);
@@ -315,14 +324,14 @@ public static void skip(DataInput in) throws IOException {
* format.
*/
public void readWithKnownLength(DataInput in, int len) throws IOException {
- setCapacity(len, false);
+ ensureCapacity(len);
in.readFully(bytes, 0, len);
length = len;
}
- /** serialize
- * write this object to out
- * length uses zero-compressed encoding
+ /**
+ * Serialize. Write this object to out length uses zero-compressed encoding.
+ *
* @see Writable#write(DataOutput)
*/
@Override
@@ -341,7 +350,10 @@ public void write(DataOutput out, int maxLength) throws IOException {
out.write(bytes, 0, length);
}
- /** Returns true iff o is a Text with the same contents. */
+ /**
+ * Returns true iff o is a Text with the same length and same
+ * contents.
+ */
@Override
public boolean equals(Object o) {
if (o instanceof Text)
@@ -365,7 +377,7 @@ public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
int n1 = WritableUtils.decodeVIntSize(b1[s1]);
int n2 = WritableUtils.decodeVIntSize(b2[s2]);
- return compareBytes(b1, s1+n1, l1-n1, b2, s2+n2, l2-n2);
+ return compareBytes(b1, s1 + n1, l1 - n1, b2, s2 + n2, l2 - n2);
}
}
@@ -383,12 +395,12 @@ public int compare(byte[] b1, int s1, int l1,
public static String decode(byte[] utf8) throws CharacterCodingException {
return decode(ByteBuffer.wrap(utf8), true);
}
-
+
public static String decode(byte[] utf8, int start, int length)
throws CharacterCodingException {
return decode(ByteBuffer.wrap(utf8, start, length), true);
}
-
+
/**
* Converts the provided byte array to a String using the
* UTF-8 encoding. If replace is true, then
@@ -400,7 +412,7 @@ public static String decode(byte[] utf8, int start, int length, boolean replace)
throws CharacterCodingException {
return decode(ByteBuffer.wrap(utf8, start, length), replace);
}
-
+
private static String decode(ByteBuffer utf8, boolean replace)
throws CharacterCodingException {
CharsetDecoder decoder = DECODER_FACTORY.get();
@@ -463,7 +475,7 @@ public static ByteBuffer encode(String string, boolean replace)
public static String readString(DataInput in) throws IOException {
return readString(in, Integer.MAX_VALUE);
}
-
+
/** Read a UTF8 encoded string with a maximum size
*/
public static String readString(DataInput in, int maxLength)
@@ -473,8 +485,9 @@ public static String readString(DataInput in, int maxLength)
in.readFully(bytes, 0, length);
return decode(bytes);
}
-
- /** Write a UTF8 encoded string to out
+
+ /**
+ * Write a UTF8 encoded string to out.
*/
public static int writeString(DataOutput out, String s) throws IOException {
ByteBuffer bytes = encode(s);
@@ -484,7 +497,8 @@ public static int writeString(DataOutput out, String s) throws IOException {
return length;
}
- /** Write a UTF8 encoded string with a maximum size to out
+ /**
+ * Write a UTF8 encoded string with a maximum size to out.
*/
public static int writeString(DataOutput out, String s, int maxLength)
throws IOException {
@@ -501,24 +515,26 @@ public static int writeString(DataOutput out, String s, int maxLength)
}
////// states for validateUTF8
-
+
private static final int LEAD_BYTE = 0;
private static final int TRAIL_BYTE_1 = 1;
private static final int TRAIL_BYTE = 2;
- /**
- * Check if a byte array contains valid utf-8
+ /**
+ * Check if a byte array contains valid UTF-8.
+ *
* @param utf8 byte array
- * @throws MalformedInputException if the byte array contains invalid utf-8
+ * @throws MalformedInputException if the byte array contains invalid UTF-8
*/
public static void validateUTF8(byte[] utf8) throws MalformedInputException {
- validateUTF8(utf8, 0, utf8.length);
+ validateUTF8(utf8, 0, utf8.length);
}
-
+
/**
- * Check to see if a byte array is valid utf-8
+ * Check to see if a byte array is valid UTF-8.
+ *
* @param utf8 the array of bytes
* @param start the offset of the first byte in the array
* @param len the length of the byte sequence
@@ -641,7 +657,6 @@ public static int bytesToCodePoint(ByteBuffer bytes) {
return ch;
}
-
static final int offsetsFromUTF8[] =
{ 0x00000000, 0x00003080,
0x000E2080, 0x03C82080, 0xFA082080, 0x82082080 };
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
index 99590eda679af..7fd5633daa698 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
@@ -236,7 +236,7 @@ public Decompressor createDecompressor() {
*/
@Override
public String getDefaultExtension() {
- return ".bz2";
+ return CodecConstants.BZIP2_CODEC_EXTENSION;
}
private static class BZip2CompressionOutputStream extends
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecConstants.java
new file mode 100644
index 0000000000000..96410a18ebcb5
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecConstants.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.compress;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Codec related constants.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public final class CodecConstants {
+
+ private CodecConstants() {
+ }
+ /**
+ * Default extension for {@link org.apache.hadoop.io.compress.DefaultCodec}.
+ */
+ public static final String DEFAULT_CODEC_EXTENSION = ".deflate";
+
+ /**
+ * Default extension for {@link org.apache.hadoop.io.compress.BZip2Codec}.
+ */
+ public static final String BZIP2_CODEC_EXTENSION = ".bz2";
+
+ /**
+ * Default extension for {@link org.apache.hadoop.io.compress.GzipCodec}.
+ */
+ public static final String GZIP_CODEC_EXTENSION = ".gz";
+
+ /**
+ * Default extension for {@link org.apache.hadoop.io.compress.Lz4Codec}.
+ */
+ public static final String LZ4_CODEC_EXTENSION = ".lz4";
+
+ /**
+ * Default extension for
+ * {@link org.apache.hadoop.io.compress.PassthroughCodec}.
+ */
+ public static final String PASSTHROUGH_CODEC_EXTENSION = ".passthrough";
+
+ /**
+ * Default extension for {@link org.apache.hadoop.io.compress.SnappyCodec}.
+ */
+ public static final String SNAPPY_CODEC_EXTENSION = ".snappy";
+
+ /**
+ * Default extension for {@link org.apache.hadoop.io.compress.ZStandardCodec}.
+ */
+ public static final String ZSTANDARD_CODEC_EXTENSION = ".zst";
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
index 33f39ef9297fb..d2ffb22eaafb3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
@@ -114,7 +114,7 @@ public DirectDecompressor createDirectDecompressor() {
@Override
public String getDefaultExtension() {
- return ".deflate";
+ return CodecConstants.DEFAULT_CODEC_EXTENSION;
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
index 9bd861da9e890..1535e8c3d386e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
@@ -206,7 +206,7 @@ public DirectDecompressor createDirectDecompressor() {
@Override
public String getDefaultExtension() {
- return ".gz";
+ return CodecConstants.GZIP_CODEC_EXTENSION;
}
static final class GzipZlibCompressor extends ZlibCompressor {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java
index 45b5e9cdabd28..ba6b487150501 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java
@@ -221,6 +221,6 @@ public Decompressor createDecompressor() {
*/
@Override
public String getDefaultExtension() {
- return ".lz4";
+ return CodecConstants.LZ4_CODEC_EXTENSION;
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/PassthroughCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/PassthroughCodec.java
index a3f0bffeebc0f..074762c0e8f7a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/PassthroughCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/PassthroughCodec.java
@@ -77,7 +77,8 @@ public class PassthroughCodec
* This default extension is here so that if no extension has been defined,
* some value is still returned: {@value}..
*/
- public static final String DEFAULT_EXTENSION = ".passthrough";
+ public static final String DEFAULT_EXTENSION =
+ CodecConstants.PASSTHROUGH_CODEC_EXTENSION;
private Configuration conf;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
index cd0c7880376bf..686f30c9f89a2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
@@ -225,6 +225,6 @@ public DirectDecompressor createDirectDecompressor() {
*/
@Override
public String getDefaultExtension() {
- return ".snappy";
+ return CodecConstants.SNAPPY_CODEC_EXTENSION;
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/ZStandardCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/ZStandardCodec.java
index c56bbba3b5959..a7afebc0c49ae 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/ZStandardCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/ZStandardCodec.java
@@ -230,7 +230,7 @@ public Decompressor createDecompressor() {
*/
@Override
public String getDefaultExtension() {
- return ".zst";
+ return CodecConstants.ZSTANDARD_CODEC_EXTENSION;
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
index 973afa33e3f35..160b8e029e56b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
@@ -224,28 +224,31 @@ public long getLength() {
* JNI wrapper of persist memory operations.
*/
public static class Pmem {
- // check whether the address is a Pmem address or DIMM address
+ // Check whether the address is a Pmem address or DIMM address
public static boolean isPmem(long address, long length) {
return NativeIO.POSIX.isPmemCheck(address, length);
}
- // create a pmem file and memory map it
- public static PmemMappedRegion mapBlock(String path, long length) {
- return NativeIO.POSIX.pmemCreateMapFile(path, length);
+ // Map a file in persistent memory, if the given file exists,
+ // directly map it. If not, create the named file on persistent memory
+ // and then map it.
+ public static PmemMappedRegion mapBlock(
+ String path, long length, boolean isFileExist) {
+ return NativeIO.POSIX.pmemMapFile(path, length, isFileExist);
}
- // unmap a pmem file
+ // Unmap a pmem file
public static boolean unmapBlock(long address, long length) {
return NativeIO.POSIX.pmemUnMap(address, length);
}
- // copy data from disk file(src) to pmem file(dest), without flush
+ // Copy data from disk file(src) to pmem file(dest), without flush
public static void memCopy(byte[] src, long dest, boolean isPmem,
long length) {
NativeIO.POSIX.pmemCopy(src, dest, isPmem, length);
}
- // flush the memory content to persistent storage
+ // Flush the memory content to persistent storage
public static void memSync(PmemMappedRegion region) {
if (region.isPmem()) {
NativeIO.POSIX.pmemDrain();
@@ -261,8 +264,8 @@ public static String getPmdkLibPath() {
private static native String getPmdkLibPath();
private static native boolean isPmemCheck(long address, long length);
- private static native PmemMappedRegion pmemCreateMapFile(String path,
- long length);
+ private static native PmemMappedRegion pmemMapFile(String path,
+ long length, boolean isFileExist);
private static native boolean pmemUnMap(long address, long length);
private static native void pmemCopy(byte[] src, long dest, boolean isPmem,
long length);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index ba07db4c2ae5e..fcbcc868cf6dd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -34,6 +34,7 @@
import javax.security.sasl.SaslException;
+import org.apache.hadoop.ipc.ObserverRetryOnActiveException;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.StandbyException;
@@ -678,7 +679,7 @@ public RetryAction shouldRetry(Exception e, int retries,
e instanceof UnknownHostException ||
e instanceof StandbyException ||
e instanceof ConnectTimeoutException ||
- isWrappedStandbyException(e)) {
+ shouldFailoverOnException(e)) {
return new RetryAction(RetryAction.RetryDecision.FAILOVER_AND_RETRY,
getFailoverOrRetrySleepTime(failovers));
} else if (e instanceof RetriableException
@@ -689,7 +690,8 @@ public RetryAction shouldRetry(Exception e, int retries,
} else if (e instanceof InvalidToken) {
return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
"Invalid or Cancelled Token");
- } else if (e instanceof AccessControlException) {
+ } else if (e instanceof AccessControlException ||
+ hasWrappedAccessControlException(e)) {
return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
"Access denied");
} else if (e instanceof SocketException
@@ -729,12 +731,13 @@ private static long calculateExponentialTime(long time, int retries) {
return calculateExponentialTime(time, retries, Long.MAX_VALUE);
}
- private static boolean isWrappedStandbyException(Exception e) {
+ private static boolean shouldFailoverOnException(Exception e) {
if (!(e instanceof RemoteException)) {
return false;
}
Exception unwrapped = ((RemoteException)e).unwrapRemoteException(
- StandbyException.class);
+ StandbyException.class,
+ ObserverRetryOnActiveException.class);
return unwrapped instanceof StandbyException;
}
@@ -759,4 +762,13 @@ static RetriableException getWrappedRetriableException(Exception e) {
return unwrapped instanceof RetriableException ?
(RetriableException) unwrapped : null;
}
+
+ private static boolean hasWrappedAccessControlException(Exception e) {
+ Throwable throwable = e;
+ while (!(throwable instanceof AccessControlException) &&
+ throwable.getCause() != null) {
+ throwable = throwable.getCause();
+ }
+ return throwable instanceof AccessControlException;
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
index 7e43974ba9cb3..c035a42d4a751 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
@@ -23,7 +23,7 @@
import org.apache.hadoop.io.retry.RetryPolicies.MultipleLinearRandomRetry;
import org.apache.hadoop.ipc.RemoteException;
-import com.google.protobuf.ServiceException;
+import org.apache.hadoop.thirdparty.protobuf.ServiceException;
import org.apache.hadoop.ipc.RetriableException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
index 81b7d34d0d1e0..53ac34b61272f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
@@ -22,6 +22,7 @@
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.util.AbstractQueue;
+import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
import java.util.concurrent.BlockingQueue;
@@ -77,8 +78,10 @@ public CallQueueManager(Class extends BlockingQueue> backingClass,
int priorityLevels = parseNumLevels(namespace, conf);
this.scheduler = createScheduler(schedulerClass, priorityLevels,
namespace, conf);
+ int[] capacityWeights = parseCapacityWeights(priorityLevels,
+ namespace, conf);
BlockingQueue bq = createCallQueueInstance(backingClass,
- priorityLevels, maxQueueSize, namespace, conf);
+ priorityLevels, maxQueueSize, namespace, capacityWeights, conf);
this.clientBackOffEnabled = clientBackOffEnabled;
this.serverFailOverEnabled = conf.getBoolean(
namespace + "." +
@@ -146,13 +149,14 @@ private static T createScheduler(
private > T createCallQueueInstance(
Class theClass, int priorityLevels, int maxLen, String ns,
- Configuration conf) {
+ int[] capacityWeights, Configuration conf) {
// Used for custom, configurable callqueues
try {
Constructor ctor = theClass.getDeclaredConstructor(int.class,
- int.class, String.class, Configuration.class);
- return ctor.newInstance(priorityLevels, maxLen, ns, conf);
+ int.class, String.class, int[].class, Configuration.class);
+ return ctor.newInstance(priorityLevels, maxLen, ns,
+ capacityWeights, conf);
} catch (RuntimeException e) {
throw e;
} catch (InvocationTargetException e) {
@@ -343,6 +347,47 @@ private static int parseNumLevels(String ns, Configuration conf) {
return retval;
}
+ /**
+ * Read the weights of capacity in callqueue and pass the value to
+ * callqueue constructions.
+ */
+ private static int[] parseCapacityWeights(
+ int priorityLevels, String ns, Configuration conf) {
+ int[] weights = conf.getInts(ns + "." +
+ CommonConfigurationKeys.IPC_CALLQUEUE_CAPACITY_WEIGHTS_KEY);
+ if (weights.length == 0) {
+ weights = getDefaultQueueCapacityWeights(priorityLevels);
+ } else if (weights.length != priorityLevels) {
+ throw new IllegalArgumentException(
+ CommonConfigurationKeys.IPC_CALLQUEUE_CAPACITY_WEIGHTS_KEY + " must "
+ + "specify " + priorityLevels + " capacity weights: one for each "
+ + "priority level");
+ } else {
+ // only allow positive numbers
+ for (int w : weights) {
+ if (w <= 0) {
+ throw new IllegalArgumentException(
+ CommonConfigurationKeys.IPC_CALLQUEUE_CAPACITY_WEIGHTS_KEY +
+ " only takes positive weights. " + w + " capacity weight " +
+ "found");
+ }
+ }
+ }
+ return weights;
+ }
+
+ /**
+ * By default, queue capacity is the same for all priority levels.
+ *
+ * @param priorityLevels number of levels
+ * @return default weights
+ */
+ public static int[] getDefaultQueueCapacityWeights(int priorityLevels) {
+ int[] weights = new int[priorityLevels];
+ Arrays.fill(weights, 1);
+ return weights;
+ }
+
/**
* Replaces active queue with the newly requested one and transfers
* all calls to the newQ before returning.
@@ -355,8 +400,9 @@ public synchronized void swapQueue(
this.scheduler.stop();
RpcScheduler newScheduler = createScheduler(schedulerClass, priorityLevels,
ns, conf);
+ int[] capacityWeights = parseCapacityWeights(priorityLevels, ns, conf);
BlockingQueue newQ = createCallQueueInstance(queueClassToUse,
- priorityLevels, maxSize, ns, conf);
+ priorityLevels, maxSize, ns, capacityWeights, conf);
// Our current queue becomes the old queue
BlockingQueue oldQ = putRef.get();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 358c0d7ac3448..688eed647c209 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -761,8 +761,17 @@ public Object run() throws IOException, InterruptedException {
throw (IOException) new IOException(msg).initCause(ex);
}
} else {
- LOG.warn("Exception encountered while connecting to "
- + "the server : " + ex);
+ // With RequestHedgingProxyProvider, one rpc call will send multiple
+ // requests to all namenodes. After one request return successfully,
+ // all other requests will be interrupted. It's not a big problem,
+ // and should not print a warning log.
+ if (ex instanceof InterruptedIOException) {
+ LOG.debug("Exception encountered while connecting to the server",
+ ex);
+ } else {
+ LOG.warn("Exception encountered while connecting to the server ",
+ ex);
+ }
}
if (ex instanceof RemoteException)
throw (RemoteException) ex;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
index ffeafb5c0dc70..3e952eb63c3ff 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
@@ -42,6 +42,7 @@
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ipc.metrics.DecayRpcSchedulerDetailedMetrics;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsSource;
@@ -154,6 +155,10 @@ public class DecayRpcScheduler implements RpcScheduler,
private final AtomicDoubleArray responseTimeAvgInLastWindow;
private final AtomicLongArray responseTimeCountInLastWindow;
+ // RPC queue time rates per queue
+ private final DecayRpcSchedulerDetailedMetrics
+ decayRpcSchedulerDetailedMetrics;
+
// Pre-computed scheduling decisions during the decay sweep are
// atomically swapped in as a read-only map
private final AtomicReference
+
+ hadoop.http.idle_timeout.ms
+ 1000
+
+ NN/JN/DN Server connection timeout in milliseconds.
+
+
+
@@ -904,7 +912,7 @@
fs.protected.directoriesA comma-separated list of directories which cannot
- be deleted even by the superuser unless they are empty. This
+ be deleted or renamed even by the superuser unless they are empty. This
setting can be used to guard important system directories
against accidental deletion due to administrator error.
@@ -1090,8 +1098,8 @@
Uses the values of fs.s3a.access.key and fs.s3a.secret.key.
* com.amazonaws.auth.EnvironmentVariableCredentialsProvider: supports
configuration of AWS access key ID and secret access key in
- environment variables named AWS_ACCESS_KEY_ID and
- AWS_SECRET_ACCESS_KEY, as documented in the AWS SDK.
+ environment variables named AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY,
+ and AWS_SESSION_TOKEN as documented in the AWS SDK.
* org.apache.hadoop.fs.s3a.auth.IAMInstanceCredentialsProvider: picks up
IAM credentials of any EC2 VM or AWS container in which the process is running.
@@ -1553,7 +1561,6 @@
-
fs.s3a.s3guard.cli.prune.age86400000
@@ -1624,6 +1631,27 @@
+
+ fs.s3a.s3guard.ddb.table.sse.enabled
+ false
+
+ Whether server-side encryption (SSE) is enabled or disabled on the table.
+ By default it's disabled, meaning SSE is set to AWS owned CMK.
+
+
+
+
+ fs.s3a.s3guard.ddb.table.sse.cmk
+
+
+ The KMS Customer Master Key (CMK) used for the KMS encryption on the table.
+ To specify a CMK, this config value can be its key ID, Amazon Resource Name
+ (ARN), alias name, or alias ARN. Users only need to provide this config if
+ the key is different from the default DynamoDB KMS Master Key, which is
+ alias/aws/dynamodb.
+
+
+
fs.s3a.s3guard.ddb.max.retries9
@@ -1678,7 +1706,7 @@
fs.s3a.retry.throttle.limit
- ${fs.s3a.attempts.maximum}
+ 20
Number of times to retry any throttled request.
@@ -1686,9 +1714,12 @@
fs.s3a.retry.throttle.interval
- 1000ms
+ 100ms
- Interval between retry attempts on throttled requests.
+ Initial between retry attempts on throttled requests, +/- 50%. chosen at random.
+ i.e. for an intial value of 3000ms, the initial delay would be in the range 1500ms to 4500ms.
+ Backoffs are exponential; again randomness is used to avoid the thundering heard problem.
+ 500ms is the default value used by the AWS S3 Retry policy.
@@ -1920,6 +1951,23 @@
+
+ fs.s3a.connection.request.timeout
+ 0
+
+ Time out on HTTP requests to the AWS service; 0 means no timeout.
+ Measured in seconds; the usual time suffixes are all supported
+
+ Important: this is the maximum duration of any AWS service call,
+ including upload and copy operations. If non-zero, it must be larger
+ than the time to upload multi-megabyte blocks to S3 from the client,
+ and to rename many-GB files. Use with care.
+
+ Values that are larger than Integer.MAX_VALUE milliseconds are
+ converged to Integer.MAX_VALUE milliseconds
+
+
+
fs.s3a.etag.checksum.enabledfalse
@@ -1979,11 +2027,16 @@
If secure connections to S3 are enabled, configures the SSL
implementation used to encrypt connections to S3. Supported values are:
- "default_jsse" and "default_jsse_with_gcm". "default_jsse" uses the Java
- Secure Socket Extension package (JSSE). However, when running on Java 8,
- the GCM cipher is removed from the list of enabled ciphers. This is due
- to performance issues with GCM in Java 8. "default_jsse_with_gcm" uses
- the JSSE with the default list of cipher suites.
+ "default_jsse", "default_jsse_with_gcm", "default", and "openssl".
+ "default_jsse" uses the Java Secure Socket Extension package (JSSE).
+ However, when running on Java 8, the GCM cipher is removed from the list
+ of enabled ciphers. This is due to performance issues with GCM in Java 8.
+ "default_jsse_with_gcm" uses the JSSE with the default list of cipher
+ suites. "default_jsse_with_gcm" is equivalent to the behavior prior to
+ this feature being introduced. "default" attempts to use OpenSSL rather
+ than the JSSE for SSL encryption, if OpenSSL libraries cannot be loaded,
+ it falls back to the "default_jsse" behavior. "openssl" attempts to use
+ OpenSSL as well, but fails if OpenSSL libraries cannot be loaded.
@@ -2674,7 +2727,7 @@
${user.home}/hadoop-http-auth-signature-secret
The signature secret for signing the authentication tokens.
- The same secret should be used for JT/NN/DN/TT configurations.
+ The same secret should be used for RM/NM/NN/DN configurations.
@@ -2929,6 +2982,7 @@
The supported SSL protocols. The parameter will only be used from
DatanodeHttpServer.
+ Starting from Hadoop 3.3.0, TLSv1.3 is supported with Java 11 Runtime.
@@ -3018,6 +3072,14 @@
+
+ ha.failover-controller.active-standby-elector.zk.op.retries
+ 3
+
+ The number of zookeeper operation retry times in ActiveStandbyElector
+
+
+
ha.failover-controller.cli-check.rpc-timeout.ms20000
@@ -3782,4 +3844,21 @@
fs space usage statistics refresh jitter in msec.
+
+
+ hadoop.http.sni.host.check.enabled
+ false
+
+ Enable Server Name Indication (SNI) host check for HTTPS enabled server.
+
+
+
+
+ hadoop.metrics.jvm.use-thread-mxbean
+ false
+
+ Whether or not ThreadMXBean is used for getting thread info in JvmMetrics,
+ ThreadGroup approach is preferred for better performance.
+
+
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties b/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
index cbbb88764d1f8..dc37949851cf5 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
+++ b/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
@@ -19,7 +19,7 @@
# contains key properties for setting up the application classloader
system.classes.default=java.,\
javax.accessibility.,\
- javax.activation.,\
+ -javax.activation.,\
javax.activity.,\
javax.annotation.,\
javax.annotation.processing.,\
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
index 7be6a192c9c57..7f61d3bd45592 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
@@ -27,7 +27,7 @@ This document does not cover advanced topics such as [Security](./SecureMode.htm
Prerequisites
-------------
-* Install Java. See the [Hadoop Wiki](http://wiki.apache.org/hadoop/HadoopJavaVersions) for known good versions.
+* Install Java. See the [Hadoop Wiki](https://cwiki.apache.org/confluence/display/HADOOP/Hadoop+Java+Versions) for known good versions.
* Download a stable version of Hadoop from Apache mirrors.
Installation
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/DownstreamDev.md b/hadoop-common-project/hadoop-common/src/site/markdown/DownstreamDev.md
index b47f83bbadf50..b04bc2488f8ae 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/DownstreamDev.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/DownstreamDev.md
@@ -430,4 +430,3 @@ please contact the developer mailing list for the relevant component(s):
* [hdfs-dev](mailto:hdfs-dev@hadoop.apache.org)
* [mapreduce-dev](mailto:mapreduce-dev@hadoop.apache.org)
* [yarn-dev](mailto:yarn-dev@hadoop.apache.org)
-* [submarine-dev](mailto:submarine-dev@hadoop.apache.org)
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FairCallQueue.md b/hadoop-common-project/hadoop-common/src/site/markdown/FairCallQueue.md
index 22ac05a53b951..887d3053d263e 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FairCallQueue.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FairCallQueue.md
@@ -126,6 +126,7 @@ omitted.
|:---- |:---- |:---- |:--- |
| backoff.enable | General | Whether or not to enable client backoff when a queue is full. | false |
| callqueue.impl | General | The fully qualified name of a class to use as the implementation of a call queue. Use `org.apache.hadoop.ipc.FairCallQueue` for the Fair Call Queue. | `java.util.concurrent.LinkedBlockingQueue` (FIFO queue) |
+| callqueue.capacity.weights | General | The capacity allocation weights among all subqueues. A postive int array whose length is equal to the `scheduler.priority.levels` is expected where each int is the relative weight out of total capacity. i.e. if a queue with capacity weight `w`, its queue capacity is `capacity * w/sum(weights)` |
| scheduler.impl | General | The fully qualified name of a class to use as the implementation of the scheduler. Use `org.apache.hadoop.ipc.DecayRpcScheduler` in conjunction with the Fair Call Queue. | `org.apache.hadoop.ipc.DefaultRpcScheduler` (no-op scheduler) If using FairCallQueue, defaults to `org.apache.hadoop.ipc.DecayRpcScheduler` |
| scheduler.priority.levels | RpcScheduler, CallQueue | How many priority levels to use within the scheduler and call queue. | 4 |
| faircallqueue.multiplexer.weights | WeightedRoundRobinMultiplexer | How much weight to give to each priority queue. This should be a comma-separated list of length equal to the number of priority levels. | Weights descend by a factor of 2 (e.g., for 4 levels: `8,4,2,1`) |
@@ -151,6 +152,10 @@ processed.
ipc.8020.callqueue.implorg.apache.hadoop.ipc.FairCallQueue
+
+ ipc.8020.callqueue.capacity.weights
+ 7,3
+ ipc.8020.scheduler.implorg.apache.hadoop.ipc.DecayRpcScheduler
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index 7df2cce574b68..9c5586913d900 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -122,30 +122,17 @@ Options
copyFromLocal
-------------
-Usage: `hadoop fs -copyFromLocal URI`
-
-Similar to the `fs -put` command, except that the source is restricted to a local file reference.
-
-Options:
-
-* `-p` : Preserves access and modification times, ownership and the permissions.
-(assuming the permissions can be propagated across filesystems)
-* `-f` : Overwrites the destination if it already exists.
-* `-l` : Allow DataNode to lazily persist the file to disk, Forces a replication
- factor of 1. This flag will result in reduced durability. Use with care.
-* `-d` : Skip creation of temporary file with the suffix `._COPYING_`.
+Identical to the -put command.
copyToLocal
-----------
-Usage: `hadoop fs -copyToLocal [-ignorecrc] [-crc] URI `
-
-Similar to get command, except that the destination is restricted to a local file reference.
+Identical to the -get command.
count
-----
-Usage: `hadoop fs -count [-q] [-h] [-v] [-x] [-t []] [-u] [-e] `
+Usage: `hadoop fs -count [-q] [-h] [-v] [-x] [-t []] [-u] [-e] [-s] `
Count the number of directories, files and bytes under the paths that match the specified file pattern. Get the quota and the usage. The output columns with -count are: DIR\_COUNT, FILE\_COUNT, CONTENT\_SIZE, PATHNAME
@@ -169,6 +156,8 @@ The output columns with -count -e are: DIR\_COUNT, FILE\_COUNT, CONTENT_SIZE, ER
The ERASURECODING\_POLICY is name of the policy for the file. If a erasure coding policy is setted on that file, it will return name of the policy. If no erasure coding policy is setted, it will return \"Replicated\" which means it use replication storage strategy.
+The -s option shows the snapshot counts for each directory.
+
Example:
* `hadoop fs -count hdfs://nn1.example.com/file1 hdfs://nn2.example.com/file2`
@@ -179,6 +168,7 @@ Example:
* `hadoop fs -count -u -h hdfs://nn1.example.com/file1`
* `hadoop fs -count -u -h -v hdfs://nn1.example.com/file1`
* `hadoop fs -count -e hdfs://nn1.example.com/file1`
+* `hadoop fs -count -s hdfs://nn1.example.com/file1`
Exit Code:
@@ -535,7 +525,7 @@ Returns 0 on success and -1 on error.
put
---
-Usage: `hadoop fs -put [-f] [-p] [-l] [-d] [ - | .. ]. `
+Usage: `hadoop fs -put [-f] [-p] [-l] [-d] [-t ] [ - | .. ]. `
Copy single src, or multiple srcs from local file system to the destination file system.
Also reads input from stdin and writes to destination file system if the source is set to "-"
@@ -547,6 +537,8 @@ Options:
* `-p` : Preserves access and modification times, ownership and the permissions.
(assuming the permissions can be propagated across filesystems)
* `-f` : Overwrites the destination if it already exists.
+* `-t ` : Number of threads to be used, default is 1. Useful
+ when uploading a directory containing more than 1 file.
* `-l` : Allow DataNode to lazily persist the file to disk, Forces a replication
factor of 1. This flag will result in reduced durability. Use with care.
* `-d` : Skip creation of temporary file with the suffix `._COPYING_`.
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 2d0f23293bfa3..43a3f331f0f30 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -56,6 +56,7 @@ Each metrics record contains tags such as ProcessName, SessionID and Hostname as
| `GcNumWarnThresholdExceeded` | Number of times that the GC warn threshold is exceeded |
| `GcNumInfoThresholdExceeded` | Number of times that the GC info threshold is exceeded |
| `GcTotalExtraSleepTime` | Total GC extra sleep time in msec |
+| `GcTimePercentage` | The percentage (0..100) of time that the JVM spent in GC pauses within the observation window if `dfs.namenode.gc.time.monitor.enable` is set to true. Use `dfs.namenode.gc.time.monitor.sleep.interval.ms` to specify the sleep interval in msec. Use `dfs.namenode.gc.time.monitor.observation.window.ms` to specify the observation window in msec. |
rpc context
===========
@@ -122,6 +123,17 @@ FairCallQueue metrics will only exist if FairCallQueue is enabled. Each metric e
| `FairCallQueueSize_p`*Priority* | Current number of calls in priority queue |
| `FairCallQueueOverflowedCalls_p`*Priority* | Total number of overflowed calls in priority queue |
+DecayRpcSchedulerDetailed
+-------------------------
+
+DecayRpcSchedulerDetailed metrics only exist when DecayRpcScheduler is used (FairCallQueue enabled). It is an addition
+to FairCallQueue metrics. For each level of priority, rpcqueue and rpcprocessing detailed metrics are exposed.
+
+| Name | Description |
+|:---- | :---- |
+| `DecayRPCSchedulerPriority.`*Priority*`.RpcQueueTime` | RpcQueueTime metrics for each priority |
+| `DecayRPCSchedulerPriority.`*Priority*`.RpcProcessingTime` | RpcProcessingTime metrics for each priority |
+
rpcdetailed context
===================
@@ -441,6 +453,22 @@ Each metrics record contains tags such as SessionId and Hostname as additional i
| `EcReconstructionBytesRead` | Total number of bytes read by erasure coding worker |
| `EcReconstructionBytesWritten` | Total number of bytes written by erasure coding worker |
| `EcReconstructionRemoteBytesRead` | Total number of bytes remote read by erasure coding worker |
+| `CreateRbwOpNumOps` | Total number of create rbw operations |
+| `CreateRbwOpAvgTime` | Average time of create rbw operations in milliseconds |
+| `RecoverRbwOpNumOps` | Total number of recovery rbw operations |
+| `RecoverRbwOpAvgTime` | Average time of recovery rbw operations in milliseconds |
+| `ConvertTemporaryToRbwOpNumOps` | Total number of convert temporary to rbw operations |
+| `ConvertTemporaryToRbwOpAvgTime` | Average time of convert temporary to rbw operations in milliseconds |
+| `CreateTemporaryOpNumOps` | Total number of create temporary operations |
+| `CreateTemporaryOpAvgTime` | Average time of create temporary operations in milliseconds |
+| `FinalizeBlockOpNumOps` | Total number of finalize block operations |
+| `FinalizeBlockOpAvgTime` | Average time of finalize block operations in milliseconds |
+| `UnfinalizeBlockOpNumOps` | Total number of un-finalize block operations |
+| `UnfinalizeBlockOpAvgTime` | Average time of un-finalize block operations in milliseconds |
+| `CheckAndUpdateOpNumOps` | Total number of check and update operations |
+| `CheckAndUpdateOpAvgTime` | Average time of check and update operations in milliseconds |
+| `UpdateReplicaUnderRecoveryOpNumOps` | Total number of update replica under recovery operations |
+| `UpdateReplicaUnderRecoveryOpAvgTime` | Average time of update replica under recovery operations in milliseconds |
FsVolume
--------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/RackAwareness.md b/hadoop-common-project/hadoop-common/src/site/markdown/RackAwareness.md
index 89946d18308a0..9b3b1d78ed126 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/RackAwareness.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/RackAwareness.md
@@ -64,7 +64,7 @@ rack and is unable to do so as there is only a single rack named
python Example
--------------
```python
-#!/usr/bin/python
+#!/usr/bin/python3
# this script makes assumptions about the physical environment.
# 1) each rack is its own layer 3 network with a /24 subnet, which
# could be typical where each rack has its own
@@ -94,9 +94,9 @@ for ip in sys.argv: # loop over lis
address = '{0}/{1}'.format(ip, netmask) # format address string so it looks like 'ip/netmask' to make netaddr work
try:
network_address = netaddr.IPNetwork(address).network # calculate and print network address
- print "/{0}".format(network_address)
+ print("/{0}".format(network_address))
except:
- print "/rack-unknown" # print catch-all value if unable to calculate network address
+ print("/rack-unknown") # print catch-all value if unable to calculate network address
```
bash Example
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
index 18fb52dd55c09..45c084bb543be 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
@@ -39,7 +39,7 @@ $H3 Required Software
Required software for Linux include:
-1. Java™ must be installed. Recommended Java versions are described at [HadoopJavaVersions](http://wiki.apache.org/hadoop/HadoopJavaVersions).
+1. Java™ must be installed. Recommended Java versions are described at [HadoopJavaVersions](https://cwiki.apache.org/confluence/display/HADOOP/Hadoop+Java+Versions).
2. ssh must be installed and sshd must be running to use the Hadoop scripts that manage remote Hadoop daemons if the optional start and stop scripts are to be used. Additionally, it is recommmended that pdsh also be installed for better ssh resource management.
@@ -130,7 +130,7 @@ If you cannot ssh to localhost without a passphrase, execute the following comma
$H3 Execution
-The following instructions are to run a MapReduce job locally. If you want to execute a job on YARN, see [YARN on Single Node](#YARN_on_Single_Node).
+The following instructions are to run a MapReduce job locally. If you want to execute a job on YARN, see [YARN on Single Node](#YARN_on_a_Single_Node).
1. Format the filesystem:
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index a2458ee891448..665e328447d5b 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
@@ -43,6 +43,15 @@ The implementations of `FileSystem` shipped with Apache Hadoop
All the requirements of a valid FileSystem are considered implicit preconditions and postconditions:
all operations on a valid FileSystem MUST result in a new FileSystem that is also valid.
+## Feasible features
+
+### Protected directories
+
+HDFS has the notion of *Protected Directories*, which are declared in
+the option `fs.protected.directories`. Any attempt to delete or rename
+such a directory or a parent thereof raises an `AccessControlException`.
+Accordingly, any attempt to delete the root directory SHALL, if there is
+a protected directory, result in such an exception being raised.
## Predicates and other state access operations
@@ -477,11 +486,11 @@ running out of memory as it calculates the partitions.
Any FileSystem that does not actually break files into blocks SHOULD
return a number for this that results in efficient processing.
-A FileSystem MAY make this user-configurable (the S3 and Swift filesystem clients do this).
+A FileSystem MAY make this user-configurable (the object store connectors usually do this).
### `long getDefaultBlockSize(Path p)`
-Get the "default" block size for a path —that is, the block size to be used
+Get the "default" block size for a path --that is, the block size to be used
when writing objects to a path in the filesystem.
#### Preconditions
@@ -530,14 +539,21 @@ on the filesystem.
### `boolean mkdirs(Path p, FsPermission permission)`
-Create a directory and all its parents
+Create a directory and all its parents.
#### Preconditions
+The path must either be a directory or not exist
+
if exists(FS, p) and not isDir(FS, p) :
raise [ParentNotDirectoryException, FileAlreadyExistsException, IOException]
+No ancestor may be a file
+
+ forall d = ancestors(FS, p) :
+ if exists(FS, d) and not isDir(FS, d) :
+ raise [ParentNotDirectoryException, FileAlreadyExistsException, IOException]
#### Postconditions
@@ -577,6 +593,11 @@ Writing to or overwriting a directory must fail.
if isDir(FS, p) : raise {FileAlreadyExistsException, FileNotFoundException, IOException}
+No ancestor may be a file
+
+ forall d = ancestors(FS, p) :
+ if exists(FS, d) and not isDir(FS, d) :
+ raise [ParentNotDirectoryException, FileAlreadyExistsException, IOException]
FileSystems may reject the request for other
reasons, such as the FS being read-only (HDFS),
@@ -584,7 +605,8 @@ the block size being below the minimum permitted (HDFS),
the replication count being out of range (HDFS),
quotas on namespace or filesystem being exceeded, reserved
names, etc. All rejections SHOULD be `IOException` or a subclass thereof
-and MAY be a `RuntimeException` or subclass. For instance, HDFS may raise a `InvalidPathException`.
+and MAY be a `RuntimeException` or subclass.
+For instance, HDFS may raise an `InvalidPathException`.
#### Postconditions
@@ -709,24 +731,29 @@ exists in the metadata, but no copies of any its blocks can be located;
Creates a [`FSDataInputStreamBuilder`](fsdatainputstreambuilder.html)
to construct a operation to open the file at `path` for reading.
-
When `build()` is invoked on the returned `FSDataInputStreamBuilder` instance,
the builder parameters are verified and
-`openFileWithOptions(Path, Set, Configuration, int)` invoked.
+`openFileWithOptions(Path, OpenFileParameters)` invoked.
This (protected) operation returns a `CompletableFuture`
which, when its `get()` method is called, either returns an input
stream of the contents of opened file, or raises an exception.
-The base implementation of the `openFileWithOptions(PathHandle, Set, Configuration, int)`
+The base implementation of the `openFileWithOptions(PathHandle, OpenFileParameters)`
ultimately invokes `open(Path, int)`.
Thus the chain `openFile(path).build().get()` has the same preconditions
and postconditions as `open(Path p, int bufferSize)`
+However, there is one difference which implementations are free to
+take advantage of:
+
+The returned stream MAY implement a lazy open where file non-existence or
+access permission failures may not surface until the first `read()` of the
+actual data.
-The `openFile()` operation may check the state of the filesystem during this
-call, but as the state of the filesystem may change betwen this call and
+The `openFile()` operation may check the state of the filesystem during its
+invocation, but as the state of the filesystem may change betwen this call and
the actual `build()` and `get()` operations, this file-specific
preconditions (file exists, file is readable, etc) MUST NOT be checked here.
@@ -757,6 +784,10 @@ It SHOULD be possible to always open a file without specifying any options,
so as to present a consistent model to users. However, an implementation MAY
opt to require one or more mandatory options to be set.
+The returned stream may perform "lazy" evaluation of file access. This is
+relevant for object stores where the probes for existence are expensive, and,
+even with an asynchronous open, may be considered needless.
+
### `FSDataInputStreamBuilder openFile(PathHandle)`
Creates a `FSDataInputStreamBuilder` to build an operation to open a file.
@@ -765,13 +796,13 @@ to construct a operation to open the file identified by the given `PathHandle` f
When `build()` is invoked on the returned `FSDataInputStreamBuilder` instance,
the builder parameters are verified and
-`openFileWithOptions(PathHandle, Set, Configuration, int)` invoked.
+`openFileWithOptions(PathHandle, OpenFileParameters)` invoked.
This (protected) operation returns a `CompletableFuture`
which, when its `get()` method is called, either returns an input
stream of the contents of opened file, or raises an exception.
-The base implementation of the `openFileWithOptions(Path,PathHandle, Set, Configuration, int)` method
+The base implementation of the `openFileWithOptions(PathHandle, OpenFileParameters)` method
returns a future which invokes `open(Path, int)`.
Thus the chain `openFile(pathhandle).build().get()` has the same preconditions
@@ -1009,12 +1040,6 @@ filesystem is desired.
1. Object Stores: see [Object Stores: root directory deletion](#object-stores-rm-root).
-HDFS has the notion of *Protected Directories*, which are declared in
-the option `fs.protected.directories`. Any attempt to delete such a directory
-or a parent thereof raises an `AccessControlException`. Accordingly, any
-attempt to delete the root directory SHALL, if there is a protected directory,
-result in such an exception being raised.
-
This specification does not recommend any specific action. Do note, however,
that the POSIX model assumes that there is a permissions model such that normal
users do not have the permission to delete that root directory; it is an action
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstreambuilder.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstreambuilder.md
index a7c393d9a41c1..eadba174fc1a6 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstreambuilder.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdatainputstreambuilder.md
@@ -43,6 +43,31 @@ path validation.
Set the size of the buffer to be used.
+### `FSDataInputStreamBuilder withFileStatus(FileStatus status)`
+
+A `FileStatus` instance which refers to the file being opened.
+
+This MAY be used by implementations to short-circuit checks for the file,
+So potentially saving on remote calls especially to object stores.
+
+Requirements:
+
+* `status != null`
+* `status.getPath()` == the resolved path of the file being opened.
+
+The path validation MUST take place if the store uses the `FileStatus` when
+it opens files, and MAY be performed otherwise. The validation
+SHOULD be postponed until the `build()` operation.
+
+This operation should be considered a hint to the filesystem.
+
+If a filesystem implementation extends the `FileStatus` returned in its
+implementation MAY use this information when opening the file.
+
+This is relevant with those stores which return version/etag information,
+including the S3A and ABFS connectors -they MAY use this to guarantee that
+the file they opened is exactly the one returned in the listing.
+
### Set optional or mandatory parameters
FSDataInputStreamBuilder opt(String key, ...)
@@ -56,6 +81,7 @@ of `FileSystem`.
out = fs.openFile(path)
.opt("fs.s3a.experimental.input.fadvise", "random")
.must("fs.s3a.readahead.range", 256 * 1024)
+ .withFileStatus(statusFromListing)
.build()
.get();
```
@@ -76,6 +102,21 @@ builder methods (i.e., `bufferSize()`) and `opt()`/`must()` is as follows:
> The last option specified defines the value and its optional/mandatory state.
+If the `FileStatus` option passed in `withFileStatus()` is used, implementations
+MUST accept all subclasses of `FileStatus`, including `LocatedFileStatus`,
+rather than just any FS-specific subclass implemented by the implementation
+(e.g `S3AFileStatus`). They MAY simply ignore those which are not the
+custom subclasses.
+
+This is critical to ensure safe use of the feature: directory listing/
+status serialization/deserialization can result result in the `withFileStatus()`
+argumennt not being the custom subclass returned by the Filesystem instance's
+own `getFileStatus()`, `listFiles()`, `listLocatedStatus()` calls, etc.
+
+In such a situation the implementations must:
+
+1. Validate the path (always).
+1. Use the status/convert to the custom type, *or* simply discard it.
## Builder interface
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 3a4bccede069f..1ce23a0eb81f2 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -135,6 +135,30 @@ public void initializeMemberVariables() {
xmlPropsToSkipCompare.add("fs.azure.saskey.usecontainersaskeyforallaccess");
xmlPropsToSkipCompare.add("fs.azure.user.agent.prefix");
+ // FairCallQueue configs that includes dynamic ports in its keys
+ xmlPropsToSkipCompare.add("ipc.[port_number].backoff.enable");
+ xmlPropsToSkipCompare.add("ipc.[port_number].callqueue.impl");
+ xmlPropsToSkipCompare.add("ipc.[port_number].scheduler.impl");
+ xmlPropsToSkipCompare.add("ipc.[port_number].scheduler.priority.levels");
+ xmlPropsToSkipCompare.add(
+ "ipc.[port_number].faircallqueue.multiplexer.weights");
+ xmlPropsToSkipCompare.add("ipc.[port_number].identity-provider.impl");
+ xmlPropsToSkipCompare.add("ipc.[port_number].cost-provider.impl");
+ xmlPropsToSkipCompare.add("ipc.[port_number].decay-scheduler.period-ms");
+ xmlPropsToSkipCompare.add("ipc.[port_number].decay-scheduler.decay-factor");
+ xmlPropsToSkipCompare.add("ipc.[port_number].decay-scheduler.thresholds");
+ xmlPropsToSkipCompare.add(
+ "ipc.[port_number].decay-scheduler.backoff.responsetime.enable");
+ xmlPropsToSkipCompare.add(
+ "ipc.[port_number].decay-scheduler.backoff.responsetime.thresholds");
+ xmlPropsToSkipCompare.add(
+ "ipc.[port_number].decay-scheduler.metrics.top.user.count");
+ xmlPropsToSkipCompare.add("ipc.[port_number].weighted-cost.lockshared");
+ xmlPropsToSkipCompare.add("ipc.[port_number].weighted-cost.lockexclusive");
+ xmlPropsToSkipCompare.add("ipc.[port_number].weighted-cost.handler");
+ xmlPropsToSkipCompare.add("ipc.[port_number].weighted-cost.lockfree");
+ xmlPropsToSkipCompare.add("ipc.[port_number].weighted-cost.response");
+
// Deprecated properties. These should eventually be removed from the
// class.
configurationPropsToSkipCompare
@@ -202,6 +226,6 @@ public void initializeMemberVariables() {
// - org.apache.hadoop.io.SequenceFile
xmlPropsToSkipCompare.add("io.seqfile.local.dir");
-
+ xmlPropsToSkipCompare.add("hadoop.http.sni.host.check.enabled");
}
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index a6adb9f20a3ef..81c53959478b4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -2553,4 +2553,41 @@ public void testResourceRace() {
// Thread 1
config.get("secondParse");
}
+
+ @Test
+ public void testCDATA() throws IOException {
+ String xml = new String(
+ "" +
+ "" +
+ "cdata" +
+ "cdata]]>" +
+ "\n" +
+ "" +
+ "cdata-multiple" +
+ "cdata1]]> and cdata2]]>" +
+ "\n" +
+ "" +
+ "cdata-multiline" +
+ "cdata\nmultiline<>]]>" +
+ "\n" +
+ "" +
+ "cdata-whitespace" +
+ " prefix cdata]]>\nsuffix " +
+ "\n" +
+ "");
+ Configuration conf = checkCDATA(xml.getBytes());
+ ByteArrayOutputStream os = new ByteArrayOutputStream();
+ conf.writeXml(os);
+ checkCDATA(os.toByteArray());
+ }
+
+ private static Configuration checkCDATA(byte[] bytes) {
+ Configuration conf = new Configuration(false);
+ conf.addResource(new ByteArrayInputStream(bytes));
+ assertEquals(">cdata", conf.get("cdata"));
+ assertEquals(">cdata1 and >cdata2", conf.get("cdata-multiple"));
+ assertEquals(">cdata\nmultiline<>", conf.get("cdata-multiline"));
+ assertEquals(" prefix >cdata\nsuffix ", conf.get("cdata-whitespace"));
+ return conf;
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index a4ccee3f7f58e..8065b3f61f52c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -862,7 +862,8 @@ private void assertListFilesFinds(Path dir, Path subdir) throws IOException {
found);
}
- private void assertListStatusFinds(Path dir, Path subdir) throws IOException {
+ protected void assertListStatusFinds(Path dir, Path subdir)
+ throws IOException {
FileStatus[] stats = fs.listStatus(dir);
boolean found = false;
StringBuilder builder = new StringBuilder();
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java
index 7cc7ae4094974..98f9f2021f8b4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java
@@ -253,4 +253,40 @@ public void testToStringHumanNoShowQuota() {
String expected = " 32.6 K 211.9 M 8.0 E ";
assertEquals(expected, contentSummary.toString(false, true));
}
+
+ // check the toSnapshot method with human readable.
+ @Test
+ public void testToSnapshotHumanReadable() {
+ long snapshotLength = Long.MAX_VALUE;
+ long snapshotFileCount = 222222222;
+ long snapshotDirectoryCount = 33333;
+ long snapshotSpaceConsumed = 222256578;
+
+ ContentSummary contentSummary = new ContentSummary.Builder()
+ .snapshotLength(snapshotLength).snapshotFileCount(snapshotFileCount)
+ .snapshotDirectoryCount(snapshotDirectoryCount)
+ .snapshotSpaceConsumed(snapshotSpaceConsumed).build();
+ String expected =
+ " 8.0 E 211.9 M 32.6 K "
+ + " 212.0 M ";
+ assertEquals(expected, contentSummary.toSnapshot(true));
+ }
+
+ // check the toSnapshot method with human readable disabled.
+ @Test
+ public void testToSnapshotNotHumanReadable() {
+ long snapshotLength = 1111;
+ long snapshotFileCount = 2222;
+ long snapshotDirectoryCount = 3333;
+ long snapshotSpaceConsumed = 4444;
+
+ ContentSummary contentSummary = new ContentSummary.Builder()
+ .snapshotLength(snapshotLength).snapshotFileCount(snapshotFileCount)
+ .snapshotDirectoryCount(snapshotDirectoryCount)
+ .snapshotSpaceConsumed(snapshotSpaceConsumed).build();
+ String expected =
+ " 1111 2222 3333 "
+ + " 4444 ";
+ assertEquals(expected, contentSummary.toSnapshot(false));
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java
index 96fac57518bfd..2919de20bffd9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java
@@ -61,8 +61,8 @@ public void testFileContextResolveAfs() throws IOException {
fc.createSymlink(localPath, linkPath, true);
Set afsList = fc.resolveAbstractFileSystems(linkPath);
Assert.assertEquals(1, afsList.size());
- localFs.deleteOnExit(localPath);
- localFs.deleteOnExit(linkPath);
+ localFs.delete(linkPath, true);
+ localFs.delete(localPath, true);
localFs.close();
}
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java
index 4d627a5e8e256..10ad8a14487ef 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemInitialization.java
@@ -18,14 +18,24 @@
package org.apache.hadoop.fs;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Progressable;
+import java.io.FileNotFoundException;
import java.io.IOException;
+import java.net.URI;
import java.net.URL;
import java.util.ServiceConfigurationError;
import org.junit.Test;
+
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.*;
+/**
+ * Tests related to filesystem creation and lifecycle.
+ */
public class TestFileSystemInitialization {
/**
@@ -55,4 +65,119 @@ public void testMissingLibraries() {
} catch (Exception | ServiceConfigurationError expected) {
}
}
+
+ @Test
+ public void testNewInstanceFailure() throws Throwable {
+ intercept(IOException.class, FailingFileSystem.INITIALIZE, () ->
+ FileSystem.newInstance(new URI("failing://localhost"), FailingFileSystem
+ .failingConf()));
+ assertThat(FailingFileSystem.initCount).describedAs("init count")
+ .isEqualTo(1);
+ assertThat(FailingFileSystem.closeCount).describedAs("close count")
+ .isEqualTo(1);
+ }
+
+ /**
+ * An FS which will fail on both init and close, and update
+ * counters of invocations as it does so.
+ */
+ public static class FailingFileSystem extends FileSystem {
+
+ public static final String INITIALIZE = "initialize()";
+
+ public static final String CLOSE = "close()";
+
+ private static int initCount;
+
+ private static int closeCount;
+
+ private static Configuration failingConf() {
+ final Configuration conf = new Configuration(false);
+ conf.setClass("fs.failing.impl", FailingFileSystem.class,
+ FileSystem.class);
+ return conf;
+ }
+
+ @Override
+ public void initialize(final URI name, final Configuration conf)
+ throws IOException {
+ super.initialize(name, conf);
+ initCount++;
+ throw new IOException(INITIALIZE);
+ }
+
+ @Override
+ public void close() throws IOException {
+ closeCount++;
+ throw new IOException(CLOSE);
+ }
+
+ @Override
+ public URI getUri() {
+ return null;
+ }
+
+ @Override
+ public FSDataInputStream open(final Path f, final int bufferSize)
+ throws IOException {
+ return null;
+ }
+
+ @Override
+ public FSDataOutputStream create(final Path f,
+ final FsPermission permission,
+ final boolean overwrite,
+ final int bufferSize,
+ final short replication,
+ final long blockSize,
+ final Progressable progress) throws IOException {
+ return null;
+ }
+
+ @Override
+ public FSDataOutputStream append(final Path f,
+ final int bufferSize,
+ final Progressable progress) throws IOException {
+ return null;
+ }
+
+ @Override
+ public boolean rename(final Path src, final Path dst) throws IOException {
+ return false;
+ }
+
+ @Override
+ public boolean delete(final Path f, final boolean recursive)
+ throws IOException {
+ return false;
+ }
+
+ @Override
+ public FileStatus[] listStatus(final Path f)
+ throws FileNotFoundException, IOException {
+ return new FileStatus[0];
+ }
+
+ @Override
+ public void setWorkingDirectory(final Path new_dir) {
+
+ }
+
+ @Override
+ public Path getWorkingDirectory() {
+ return null;
+ }
+
+ @Override
+ public boolean mkdirs(final Path f, final FsPermission permission)
+ throws IOException {
+ return false;
+ }
+
+ @Override
+ public FileStatus getFileStatus(final Path f) throws IOException {
+ return null;
+ }
+ }
+
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
index 5d22a6a2a4896..1ca1f241e5e9d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.fs;
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
+import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
@@ -44,6 +45,7 @@
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.jar.Attributes;
@@ -64,22 +66,38 @@
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
+import org.junit.Rule;
import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TestFileUtil {
private static final Logger LOG = LoggerFactory.getLogger(TestFileUtil.class);
- private static final File TEST_DIR = GenericTestUtils.getTestDir("fu");
+ @Rule
+ public TemporaryFolder testFolder = new TemporaryFolder();
+
private static final String FILE = "x";
private static final String LINK = "y";
private static final String DIR = "dir";
- private final File del = new File(TEST_DIR, "del");
- private final File tmp = new File(TEST_DIR, "tmp");
- private final File dir1 = new File(del, DIR + "1");
- private final File dir2 = new File(del, DIR + "2");
- private final File partitioned = new File(TEST_DIR, "partitioned");
+
+ private static final String FILE_1_NAME = "file1";
+
+ private File del;
+ private File tmp;
+ private File dir1;
+ private File dir2;
+ private File partitioned;
+
+ private File xSubDir;
+ private File xSubSubDir;
+ private File ySubDir;
+
+ private File file2;
+ private File file22;
+ private File file3;
+ private File zlink;
private InetAddress inet1;
private InetAddress inet2;
@@ -116,21 +134,34 @@ public class TestFileUtil {
* file: part-r-00000, contents: "foo"
* file: part-r-00001, contents: "bar"
*/
- @Ignore
- private void setupDirs() throws IOException {
- Assert.assertFalse(del.exists());
- Assert.assertFalse(tmp.exists());
- Assert.assertFalse(partitioned.exists());
- del.mkdirs();
- tmp.mkdirs();
- partitioned.mkdirs();
+ @Before
+ public void setup() throws IOException {
+ del = testFolder.newFolder("del");
+ tmp = testFolder.newFolder("tmp");
+ partitioned = testFolder.newFolder("partitioned");
+
+ zlink = new File(del, "zlink");
+
+ xSubDir = new File(del, "xSubDir");
+ xSubSubDir = new File(xSubDir, "xSubSubDir");
+ ySubDir = new File(del, "ySubDir");
+
+
+ file2 = new File(xSubDir, "file2");
+ file22 = new File(xSubSubDir, "file22");
+ file3 = new File(ySubDir, "file3");
+
+ dir1 = new File(del, DIR + "1");
+ dir2 = new File(del, DIR + "2");
+
+ FileUtils.forceMkdir(dir1);
+ FileUtils.forceMkdir(dir2);
+
new File(del, FILE).createNewFile();
File tmpFile = new File(tmp, FILE);
tmpFile.createNewFile();
- // create directories
- dir1.mkdirs();
- dir2.mkdirs();
+ // create files
new File(dir1, FILE).createNewFile();
new File(dir2, FILE).createNewFile();
@@ -151,6 +182,11 @@ private void setupDirs() throws IOException {
FileUtil.symLink(del.toString(), dir1.toString() + "/cycle");
}
+ @After
+ public void tearDown() throws IOException {
+ testFolder.delete();
+ }
+
/**
* Creates a new file in the specified directory, with the specified name and
* the specified file contents. This method will add a newline terminator to
@@ -175,7 +211,6 @@ private File createFile(File directory, String name, String contents)
@Test (timeout = 30000)
public void testListFiles() throws IOException {
- setupDirs();
//Test existing files case
File[] files = FileUtil.listFiles(partitioned);
Assert.assertEquals(2, files.length);
@@ -202,7 +237,6 @@ public void testListFiles() throws IOException {
@Test (timeout = 30000)
public void testListAPI() throws IOException {
- setupDirs();
//Test existing files case
String[] files = FileUtil.list(partitioned);
Assert.assertEquals("Unexpected number of pre-existing files", 2, files.length);
@@ -227,30 +261,8 @@ public void testListAPI() throws IOException {
}
}
- @Before
- public void before() throws IOException {
- cleanupImpl();
- }
-
- @After
- public void tearDown() throws IOException {
- cleanupImpl();
- }
-
- private void cleanupImpl() throws IOException {
- FileUtil.fullyDelete(del, true);
- Assert.assertTrue(!del.exists());
-
- FileUtil.fullyDelete(tmp, true);
- Assert.assertTrue(!tmp.exists());
-
- FileUtil.fullyDelete(partitioned, true);
- Assert.assertTrue(!partitioned.exists());
- }
-
@Test (timeout = 30000)
public void testFullyDelete() throws IOException {
- setupDirs();
boolean ret = FileUtil.fullyDelete(del);
Assert.assertTrue(ret);
Assert.assertFalse(del.exists());
@@ -265,8 +277,6 @@ public void testFullyDelete() throws IOException {
*/
@Test (timeout = 30000)
public void testFullyDeleteSymlinks() throws IOException {
- setupDirs();
-
File link = new File(del, LINK);
Assert.assertEquals(5, del.list().length);
// Since tmpDir is symlink to tmp, fullyDelete(tmpDir) should not
@@ -295,7 +305,6 @@ public void testFullyDeleteSymlinks() throws IOException {
*/
@Test (timeout = 30000)
public void testFullyDeleteDanglingSymlinks() throws IOException {
- setupDirs();
// delete the directory tmp to make tmpDir a dangling link to dir tmp and
// to make y as a dangling link to file tmp/x
boolean ret = FileUtil.fullyDelete(tmp);
@@ -322,7 +331,6 @@ public void testFullyDeleteDanglingSymlinks() throws IOException {
@Test (timeout = 30000)
public void testFullyDeleteContents() throws IOException {
- setupDirs();
boolean ret = FileUtil.fullyDeleteContents(del);
Assert.assertTrue(ret);
Assert.assertTrue(del.exists());
@@ -336,15 +344,6 @@ private void validateTmpDir() {
Assert.assertTrue(new File(tmp, FILE).exists());
}
- private final File xSubDir = new File(del, "xSubDir");
- private final File xSubSubDir = new File(xSubDir, "xSubSubDir");
- private final File ySubDir = new File(del, "ySubDir");
- private static final String file1Name = "file1";
- private final File file2 = new File(xSubDir, "file2");
- private final File file22 = new File(xSubSubDir, "file22");
- private final File file3 = new File(ySubDir, "file3");
- private final File zlink = new File(del, "zlink");
-
/**
* Creates a directory which can not be deleted completely.
*
@@ -366,36 +365,30 @@ private void validateTmpDir() {
* @throws IOException
*/
private void setupDirsAndNonWritablePermissions() throws IOException {
- Assert.assertFalse("The directory del should not have existed!",
- del.exists());
- del.mkdirs();
- new MyFile(del, file1Name).createNewFile();
+ new MyFile(del, FILE_1_NAME).createNewFile();
// "file1" is non-deletable by default, see MyFile.delete().
xSubDir.mkdirs();
file2.createNewFile();
-
+
xSubSubDir.mkdirs();
file22.createNewFile();
-
+
revokePermissions(file22);
revokePermissions(xSubSubDir);
-
+
revokePermissions(file2);
revokePermissions(xSubDir);
-
+
ySubDir.mkdirs();
file3.createNewFile();
- Assert.assertFalse("The directory tmp should not have existed!",
- tmp.exists());
- tmp.mkdirs();
File tmpFile = new File(tmp, FILE);
tmpFile.createNewFile();
FileUtil.symLink(tmpFile.toString(), zlink.toString());
}
-
+
private static void grantPermissions(final File f) {
FileUtil.setReadable(f, true);
FileUtil.setWritable(f, true);
@@ -417,7 +410,7 @@ private void validateAndSetWritablePermissions(
Assert.assertFalse("The return value should have been false.", ret);
Assert.assertTrue("The file file1 should not have been deleted.",
- new File(del, file1Name).exists());
+ new File(del, FILE_1_NAME).exists());
Assert.assertEquals(
"The directory xSubDir *should* not have been deleted.",
@@ -445,7 +438,7 @@ public void testFailFullyDelete() throws IOException {
boolean ret = FileUtil.fullyDelete(new MyFile(del));
validateAndSetWritablePermissions(true, ret);
}
-
+
@Test (timeout = 30000)
public void testFailFullyDeleteGrantPermissions() throws IOException {
setupDirsAndNonWritablePermissions();
@@ -482,7 +475,7 @@ public MyFile(File parent, String child) {
public boolean delete() {
LOG.info("Trying to delete myFile " + getAbsolutePath());
boolean bool = false;
- if (getName().equals(file1Name)) {
+ if (getName().equals(FILE_1_NAME)) {
bool = false;
} else {
bool = super.delete();
@@ -532,7 +525,7 @@ public void testFailFullyDeleteContentsGrantPermissions() throws IOException {
// this time the directories with revoked permissions *should* be deleted:
validateAndSetWritablePermissions(false, ret);
}
-
+
/**
* Test that getDU is able to handle cycles caused due to symbolic links
* and that directory sizes are not added to the final calculated size
@@ -540,9 +533,7 @@ public void testFailFullyDeleteContentsGrantPermissions() throws IOException {
*/
@Test (timeout = 30000)
public void testGetDU() throws Exception {
- setupDirs();
-
- long du = FileUtil.getDU(TEST_DIR);
+ long du = FileUtil.getDU(testFolder.getRoot());
// Only two files (in partitioned). Each has 3 characters + system-specific
// line separator.
final long expected = 2 * (3 + System.getProperty("line.separator").length());
@@ -591,8 +582,6 @@ public void testGetDU() throws Exception {
@Test (timeout = 30000)
public void testUnTar() throws IOException {
- setupDirs();
-
// make a simple tar:
final File simpleTar = new File(del, FILE);
OutputStream os = new FileOutputStream(simpleTar);
@@ -629,7 +618,6 @@ public void testUnTar() throws IOException {
@Test (timeout = 30000)
public void testReplaceFile() throws IOException {
- setupDirs();
final File srcFile = new File(tmp, "src");
// src exists, and target does not exist:
@@ -671,7 +659,6 @@ public void testReplaceFile() throws IOException {
@Test (timeout = 30000)
public void testCreateLocalTempFile() throws IOException {
- setupDirs();
final File baseFile = new File(tmp, "base");
File tmp1 = FileUtil.createLocalTempFile(baseFile, "foo", false);
File tmp2 = FileUtil.createLocalTempFile(baseFile, "foo", true);
@@ -687,8 +674,7 @@ public void testCreateLocalTempFile() throws IOException {
@Test (timeout = 30000)
public void testUnZip() throws IOException {
- setupDirs();
- // make a simple zip
+ // make sa simple zip
final File simpleZip = new File(del, FILE);
OutputStream os = new FileOutputStream(simpleZip);
ZipOutputStream tos = new ZipOutputStream(os);
@@ -724,7 +710,6 @@ public void testUnZip() throws IOException {
@Test (timeout = 30000)
public void testUnZip2() throws IOException {
- setupDirs();
// make a simple zip
final File simpleZip = new File(del, FILE);
OutputStream os = new FileOutputStream(simpleZip);
@@ -755,8 +740,6 @@ public void testUnZip2() throws IOException {
* Test method copy(FileSystem srcFS, Path src, File dst, boolean deleteSource, Configuration conf)
*/
public void testCopy5() throws IOException {
- setupDirs();
-
URI uri = tmp.toURI();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.newInstance(uri, conf);
@@ -846,9 +829,6 @@ public void testStat2Paths2() {
@Test (timeout = 30000)
public void testSymlink() throws Exception {
- Assert.assertFalse(del.exists());
- del.mkdirs();
-
byte[] data = "testSymLink".getBytes();
File file = new File(del, FILE);
@@ -881,9 +861,6 @@ public void testSymlink() throws Exception {
*/
@Test (timeout = 30000)
public void testSymlinkRenameTo() throws Exception {
- Assert.assertFalse(del.exists());
- del.mkdirs();
-
File file = new File(del, FILE);
file.createNewFile();
File link = new File(del, "_link");
@@ -913,9 +890,6 @@ public void testSymlinkRenameTo() throws Exception {
*/
@Test (timeout = 30000)
public void testSymlinkDelete() throws Exception {
- Assert.assertFalse(del.exists());
- del.mkdirs();
-
File file = new File(del, FILE);
file.createNewFile();
File link = new File(del, "_link");
@@ -937,9 +911,6 @@ public void testSymlinkDelete() throws Exception {
*/
@Test (timeout = 30000)
public void testSymlinkLength() throws Exception {
- Assert.assertFalse(del.exists());
- del.mkdirs();
-
byte[] data = "testSymLinkData".getBytes();
File file = new File(del, FILE);
@@ -976,9 +947,6 @@ public void testSymlinkLength() throws Exception {
*/
@Test
public void testSymlinkWithNullInput() throws IOException {
- Assert.assertFalse(del.exists());
- del.mkdirs();
-
File file = new File(del, FILE);
File link = new File(del, "_link");
@@ -996,9 +964,6 @@ public void testSymlinkWithNullInput() throws IOException {
// The operation should fail and returns 1
result = FileUtil.symLink(null, link.getAbsolutePath());
Assert.assertEquals(1, result);
-
- file.delete();
- link.delete();
}
/**
@@ -1009,9 +974,6 @@ public void testSymlinkWithNullInput() throws IOException {
*/
@Test
public void testSymlinkFileAlreadyExists() throws IOException {
- Assert.assertFalse(del.exists());
- del.mkdirs();
-
File file = new File(del, FILE);
File link = new File(del, "_link");
@@ -1027,9 +989,6 @@ public void testSymlinkFileAlreadyExists() throws IOException {
result1 = FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
Assert.assertEquals(1, result1);
-
- file.delete();
- link.delete();
}
/**
@@ -1041,19 +1000,16 @@ public void testSymlinkFileAlreadyExists() throws IOException {
*/
@Test
public void testSymlinkSameFile() throws IOException {
- Assert.assertFalse(del.exists());
- del.mkdirs();
-
File file = new File(del, FILE);
+ file.delete();
+
// Create a symbolic link
// The operation should succeed
int result =
FileUtil.symLink(file.getAbsolutePath(), file.getAbsolutePath());
Assert.assertEquals(0, result);
-
- file.delete();
}
/**
@@ -1065,8 +1021,6 @@ public void testSymlinkSameFile() throws IOException {
*/
@Test
public void testSymlink2DifferentFile() throws IOException {
- Assert.assertFalse(del.exists());
- del.mkdirs();
File file = new File(del, FILE);
File fileSecond = new File(del, FILE + "_1");
File link = new File(del, "_link");
@@ -1083,10 +1037,6 @@ public void testSymlink2DifferentFile() throws IOException {
FileUtil.symLink(fileSecond.getAbsolutePath(), link.getAbsolutePath());
Assert.assertEquals(1, result);
-
- file.delete();
- fileSecond.delete();
- link.delete();
}
/**
@@ -1098,8 +1048,6 @@ public void testSymlink2DifferentFile() throws IOException {
*/
@Test
public void testSymlink2DifferentLinks() throws IOException {
- Assert.assertFalse(del.exists());
- del.mkdirs();
File file = new File(del, FILE);
File link = new File(del, "_link");
File linkSecond = new File(del, "_link_1");
@@ -1116,10 +1064,6 @@ public void testSymlink2DifferentLinks() throws IOException {
FileUtil.symLink(file.getAbsolutePath(), linkSecond.getAbsolutePath());
Assert.assertEquals(0, result);
-
- file.delete();
- link.delete();
- linkSecond.delete();
}
private void doUntarAndVerify(File tarFile, File untarDir)
@@ -1164,10 +1108,6 @@ public void testUntar() throws IOException {
@Test (timeout = 30000)
public void testCreateJarWithClassPath() throws Exception {
- // setup test directory for files
- Assert.assertFalse(tmp.exists());
- Assert.assertTrue(tmp.mkdirs());
-
// create files expected to match a wildcard
List wildcardMatches = Arrays.asList(new File(tmp, "wildcard1.jar"),
new File(tmp, "wildcard2.jar"), new File(tmp, "wildcard3.JAR"),
@@ -1256,9 +1196,6 @@ public void testGetJarsInDirectory() throws Exception {
assertTrue("no jars should be returned for a bogus path",
jars.isEmpty());
- // setup test directory for files
- assertFalse(tmp.exists());
- assertTrue(tmp.mkdirs());
// create jar files to be returned
File jar1 = new File(tmp, "wildcard1.jar");
@@ -1364,7 +1301,6 @@ public void testCompareFsDirectories() throws Exception {
@Test(timeout = 8000)
public void testCreateSymbolicLinkUsingJava() throws IOException {
- setupDirs();
final File simpleTar = new File(del, FILE);
OutputStream os = new FileOutputStream(simpleTar);
TarArchiveOutputStream tos = new TarArchiveOutputStream(os);
@@ -1458,9 +1394,6 @@ public void testReadSymlinkWithNullInput() {
*/
@Test
public void testReadSymlink() throws IOException {
- Assert.assertFalse(del.exists());
- del.mkdirs();
-
File file = new File(del, FILE);
File link = new File(del, "_link");
@@ -1469,9 +1402,6 @@ public void testReadSymlink() throws IOException {
String result = FileUtil.readLink(link);
Assert.assertEquals(file.getAbsolutePath(), result);
-
- file.delete();
- link.delete();
}
/**
@@ -1482,9 +1412,6 @@ public void testReadSymlink() throws IOException {
*/
@Test
public void testReadSymlinkWithAFileAsInput() throws IOException {
- Assert.assertFalse(del.exists());
- del.mkdirs();
-
File file = new File(del, FILE);
String result = FileUtil.readLink(file);
@@ -1493,6 +1420,166 @@ public void testReadSymlinkWithAFileAsInput() throws IOException {
file.delete();
}
+ /**
+ * Test that bytes are written out correctly to the local file system.
+ */
+ @Test
+ public void testWriteBytesFileSystem() throws IOException {
+ URI uri = tmp.toURI();
+ Configuration conf = new Configuration();
+ FileSystem fs = FileSystem.get(uri, conf);
+ Path testPath = new Path(new Path(uri), "writebytes.out");
+
+ byte[] write = new byte[] {0x00, 0x01, 0x02, 0x03};
+
+ FileUtil.write(fs, testPath, write);
+
+ byte[] read = FileUtils.readFileToByteArray(new File(testPath.toUri()));
+
+ assertArrayEquals(write, read);
+ }
+
+ /**
+ * Test that a Collection of Strings are written out correctly to the local
+ * file system.
+ */
+ @Test
+ public void testWriteStringsFileSystem() throws IOException {
+ URI uri = tmp.toURI();
+ Configuration conf = new Configuration();
+ FileSystem fs = FileSystem.get(uri, conf);
+ Path testPath = new Path(new Path(uri), "writestrings.out");
+
+ Collection write = Arrays.asList("over", "the", "lazy", "dog");
+
+ FileUtil.write(fs, testPath, write, StandardCharsets.UTF_8);
+
+ List read =
+ FileUtils.readLines(new File(testPath.toUri()), StandardCharsets.UTF_8);
+
+ assertEquals(write, read);
+ }
+
+ /**
+ * Test that a String is written out correctly to the local file system.
+ */
+ @Test
+ public void testWriteStringFileSystem() throws IOException {
+ URI uri = tmp.toURI();
+ Configuration conf = new Configuration();
+ FileSystem fs = FileSystem.get(uri, conf);
+ Path testPath = new Path(new Path(uri), "writestring.out");
+
+ String write = "A" + "\u00ea" + "\u00f1" + "\u00fc" + "C";
+
+ FileUtil.write(fs, testPath, write, StandardCharsets.UTF_8);
+
+ String read = FileUtils.readFileToString(new File(testPath.toUri()),
+ StandardCharsets.UTF_8);
+
+ assertEquals(write, read);
+ }
+
+ /**
+ * Test that a String is written out correctly to the local file system
+ * without specifying a character set.
+ */
+ @Test
+ public void testWriteStringNoCharSetFileSystem() throws IOException {
+ URI uri = tmp.toURI();
+ Configuration conf = new Configuration();
+ FileSystem fs = FileSystem.get(uri, conf);
+ Path testPath = new Path(new Path(uri), "writestring.out");
+
+ String write = "A" + "\u00ea" + "\u00f1" + "\u00fc" + "C";
+ FileUtil.write(fs, testPath, write);
+
+ String read = FileUtils.readFileToString(new File(testPath.toUri()),
+ StandardCharsets.UTF_8);
+
+ assertEquals(write, read);
+ }
+
+ /**
+ * Test that bytes are written out correctly to the local file system.
+ */
+ @Test
+ public void testWriteBytesFileContext() throws IOException {
+ URI uri = tmp.toURI();
+ Configuration conf = new Configuration();
+ FileContext fc = FileContext.getFileContext(uri, conf);
+ Path testPath = new Path(new Path(uri), "writebytes.out");
+
+ byte[] write = new byte[] {0x00, 0x01, 0x02, 0x03};
+
+ FileUtil.write(fc, testPath, write);
+
+ byte[] read = FileUtils.readFileToByteArray(new File(testPath.toUri()));
+
+ assertArrayEquals(write, read);
+ }
+
+ /**
+ * Test that a Collection of Strings are written out correctly to the local
+ * file system.
+ */
+ @Test
+ public void testWriteStringsFileContext() throws IOException {
+ URI uri = tmp.toURI();
+ Configuration conf = new Configuration();
+ FileContext fc = FileContext.getFileContext(uri, conf);
+ Path testPath = new Path(new Path(uri), "writestrings.out");
+
+ Collection write = Arrays.asList("over", "the", "lazy", "dog");
+
+ FileUtil.write(fc, testPath, write, StandardCharsets.UTF_8);
+
+ List read =
+ FileUtils.readLines(new File(testPath.toUri()), StandardCharsets.UTF_8);
+
+ assertEquals(write, read);
+ }
+
+ /**
+ * Test that a String is written out correctly to the local file system.
+ */
+ @Test
+ public void testWriteStringFileContext() throws IOException {
+ URI uri = tmp.toURI();
+ Configuration conf = new Configuration();
+ FileContext fc = FileContext.getFileContext(uri, conf);
+ Path testPath = new Path(new Path(uri), "writestring.out");
+
+ String write = "A" + "\u00ea" + "\u00f1" + "\u00fc" + "C";
+
+ FileUtil.write(fc, testPath, write, StandardCharsets.UTF_8);
+
+ String read = FileUtils.readFileToString(new File(testPath.toUri()),
+ StandardCharsets.UTF_8);
+
+ assertEquals(write, read);
+ }
+
+ /**
+ * Test that a String is written out correctly to the local file system
+ * without specifying a character set.
+ */
+ @Test
+ public void testWriteStringNoCharSetFileContext() throws IOException {
+ URI uri = tmp.toURI();
+ Configuration conf = new Configuration();
+ FileContext fc = FileContext.getFileContext(uri, conf);
+ Path testPath = new Path(new Path(uri), "writestring.out");
+
+ String write = "A" + "\u00ea" + "\u00f1" + "\u00fc" + "C";
+ FileUtil.write(fc, testPath, write);
+
+ String read = FileUtils.readFileToString(new File(testPath.toUri()),
+ StandardCharsets.UTF_8);
+
+ assertEquals(write, read);
+ }
+
/**
* The size of FileSystem cache.
*/
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
index 3b923e05bd3a5..2097633839112 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.fs;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.impl.OpenFileParameters;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
@@ -242,15 +243,11 @@ FutureDataInputStreamBuilder openFile(PathHandle pathHandle)
CompletableFuture openFileWithOptions(
PathHandle pathHandle,
- Set mandatoryKeys,
- Configuration options,
- int bufferSize) throws IOException;
+ OpenFileParameters parameters) throws IOException;
CompletableFuture openFileWithOptions(
Path path,
- Set mandatoryKeys,
- Configuration options,
- int bufferSize) throws IOException;
+ OpenFileParameters parameters) throws IOException;
}
@Test
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
index 07c99e0b6a528..79222ce67d6cf 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
@@ -22,11 +22,11 @@
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
-import org.junit.internal.AssumptionViolatedException;
+import org.junit.AssumptionViolatedException;
-import java.io.FileNotFoundException;
import java.io.IOException;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
@@ -40,7 +40,7 @@
* Test creating files, overwrite options etc.
*/
public abstract class AbstractContractCreateTest extends
- AbstractFSContractTestBase {
+ AbstractFSContractTestBase {
/**
* How long to wait for a path to become visible.
@@ -113,7 +113,6 @@ private void testOverwriteExistingFile(boolean useBuilder) throws Throwable {
* This test catches some eventual consistency problems that blobstores exhibit,
* as we are implicitly verifying that updates are consistent. This
* is why different file lengths and datasets are used
- * @throws Throwable
*/
@Test
public void testOverwriteExistingFile() throws Throwable {
@@ -137,10 +136,6 @@ private void testOverwriteEmptyDirectory(boolean useBuilder)
} catch (FileAlreadyExistsException expected) {
//expected
handleExpectedException(expected);
- } catch (FileNotFoundException e) {
- handleRelaxedException("overwriting a dir with a file ",
- "FileAlreadyExistsException",
- e);
} catch (IOException e) {
handleRelaxedException("overwriting a dir with a file ",
"FileAlreadyExistsException",
@@ -189,10 +184,6 @@ private void testOverwriteNonEmptyDirectory(boolean useBuilder)
} catch (FileAlreadyExistsException expected) {
//expected
handleExpectedException(expected);
- } catch (FileNotFoundException e) {
- handleRelaxedException("overwriting a dir with a file ",
- "FileAlreadyExistsException",
- e);
} catch (IOException e) {
handleRelaxedException("overwriting a dir with a file ",
"FileAlreadyExistsException",
@@ -332,4 +323,117 @@ public void testCreateMakesParentDirs() throws Throwable {
assertTrue("Grandparent directory does not appear to be a directory",
fs.getFileStatus(grandparent).isDirectory());
}
+
+ @Test
+ public void testCreateFileUnderFile() throws Throwable {
+ describe("Verify that it is forbidden to create file/file");
+ if (isSupported(CREATE_FILE_UNDER_FILE_ALLOWED)) {
+ // object store or some file systems: downgrade to a skip so that the
+ // failure is visible in test results
+ skip("This filesystem supports creating files under files");
+ }
+ Path grandparent = methodPath();
+ Path parent = new Path(grandparent, "parent");
+ expectCreateUnderFileFails(
+ "creating a file under a file",
+ grandparent,
+ parent);
+ }
+
+ @Test
+ public void testCreateUnderFileSubdir() throws Throwable {
+ describe("Verify that it is forbidden to create file/dir/file");
+ if (isSupported(CREATE_FILE_UNDER_FILE_ALLOWED)) {
+ // object store or some file systems: downgrade to a skip so that the
+ // failure is visible in test results
+ skip("This filesystem supports creating files under files");
+ }
+ Path grandparent = methodPath();
+ Path parent = new Path(grandparent, "parent");
+ Path child = new Path(parent, "child");
+ expectCreateUnderFileFails(
+ "creating a file under a subdirectory of a file",
+ grandparent,
+ child);
+ }
+
+
+ @Test
+ public void testMkdirUnderFile() throws Throwable {
+ describe("Verify that it is forbidden to create file/dir");
+ Path grandparent = methodPath();
+ Path parent = new Path(grandparent, "parent");
+ expectMkdirsUnderFileFails("mkdirs() under a file",
+ grandparent, parent);
+ }
+
+ @Test
+ public void testMkdirUnderFileSubdir() throws Throwable {
+ describe("Verify that it is forbidden to create file/dir/dir");
+ Path grandparent = methodPath();
+ Path parent = new Path(grandparent, "parent");
+ Path child = new Path(parent, "child");
+ expectMkdirsUnderFileFails("mkdirs() file/dir",
+ grandparent, child);
+
+ try {
+ // create the child
+ mkdirs(child);
+ } catch (FileAlreadyExistsException | ParentNotDirectoryException ex) {
+ // either of these may be raised.
+ handleExpectedException(ex);
+ } catch (IOException e) {
+ handleRelaxedException("creating a file under a subdirectory of a file ",
+ "FileAlreadyExistsException",
+ e);
+ }
+ }
+
+ /**
+ * Expect that touch() will fail because the parent is a file.
+ * @param action action for message
+ * @param file filename to create
+ * @param descendant path under file
+ * @throws Exception failure
+ */
+ protected void expectCreateUnderFileFails(String action,
+ Path file, Path descendant)
+ throws Exception {
+ createFile(file);
+ try {
+ // create the child
+ createFile(descendant);
+ } catch (FileAlreadyExistsException | ParentNotDirectoryException ex) {
+ //expected
+ handleExpectedException(ex);
+ } catch (IOException e) {
+ handleRelaxedException(action,
+ "ParentNotDirectoryException",
+ e);
+ }
+ }
+
+ protected void expectMkdirsUnderFileFails(String action,
+ Path file, Path descendant)
+ throws Exception {
+ createFile(file);
+ try {
+ // now mkdirs
+ mkdirs(descendant);
+ } catch (FileAlreadyExistsException | ParentNotDirectoryException ex) {
+ //expected
+ handleExpectedException(ex);
+ } catch (IOException e) {
+ handleRelaxedException(action,
+ "ParentNotDirectoryException",
+ e);
+ }
+ }
+
+ private void createFile(Path path) throws IOException {
+ byte[] data = dataset(256, 'a', 'z');
+ FileSystem fs = getFileSystem();
+ writeDataset(fs, path, data, data.length, 1024 * 1024,
+ true);
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
index 6809fb339b562..328c8e1377904 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
@@ -86,7 +86,7 @@ public void testDeleteNonEmptyDirNonRecursive() throws Throwable {
@Test
public void testDeleteNonEmptyDirRecursive() throws Throwable {
- Path path = path("testDeleteNonEmptyDirNonRecursive");
+ Path path = path("testDeleteNonEmptyDirRecursive");
mkdirs(path);
Path file = new Path(path, "childfile");
ContractTestUtils.writeTextFile(getFileSystem(), file, "goodbye, world",
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
index 85bd137813f66..f63314d39292e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
@@ -279,6 +279,14 @@ public void testListFilesNoDir() throws Throwable {
}
}
+ @Test
+ public void testListStatusIteratorNoDir() throws Throwable {
+ describe("test the listStatusIterator call on a path which is not " +
+ "present");
+ intercept(FileNotFoundException.class,
+ () -> getFileSystem().listStatusIterator(path("missing")));
+ }
+
@Test
public void testLocatedStatusNoDir() throws Throwable {
describe("test the LocatedStatus call on a path which is not present");
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
index b6e94a664165e..a43053180fbf8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
@@ -281,6 +281,7 @@ public void testOpenFileApplyRead() throws Throwable {
createFile(fs, path, true,
dataset(len, 0x40, 0x80));
CompletableFuture readAllBytes = fs.openFile(path)
+ .withFileStatus(fs.getFileStatus(path))
.build()
.thenApply(ContractTestUtils::readStream);
assertEquals("Wrong number of bytes read value",
@@ -302,4 +303,12 @@ public void testOpenFileApplyAsyncRead() throws Throwable {
accepted.get());
}
+ @Test
+ public void testOpenFileNullStatus() throws Throwable {
+ describe("use openFile() with a null status");
+ Path path = path("testOpenFileNullStatus");
+ intercept(NullPointerException.class,
+ () -> getFileSystem().openFile(path).withFileStatus(null));
+ }
+
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java
index 2751294beb92c..78ff2541483a3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java
@@ -29,10 +29,10 @@
import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
/**
- * Test creating files, overwrite options &c
+ * Test renaming files.
*/
public abstract class AbstractContractRenameTest extends
- AbstractFSContractTestBase {
+ AbstractFSContractTestBase {
@Test
public void testRenameNewFileSameDir() throws Throwable {
@@ -83,7 +83,8 @@ public void testRenameNonexistentFile() throws Throwable {
"FileNotFoundException",
e);
}
- assertPathDoesNotExist("rename nonexistent file created a destination file", target);
+ assertPathDoesNotExist("rename nonexistent file created a destination file",
+ target);
}
/**
@@ -112,7 +113,7 @@ public void testRenameFileOverExistingFile() throws Throwable {
// the filesystem supports rename(file, file2) by overwriting file2
assertTrue("Rename returned false", renamed);
- destUnchanged = false;
+ destUnchanged = false;
} else {
// rename is rejected by returning 'false' or throwing an exception
if (renamed && !renameReturnsFalseOnRenameDestExists) {
@@ -129,12 +130,13 @@ public void testRenameFileOverExistingFile() throws Throwable {
// verify that the destination file is as expected based on the expected
// outcome
verifyFileContents(getFileSystem(), destFile,
- destUnchanged? destData: srcData);
+ destUnchanged ? destData: srcData);
}
@Test
public void testRenameDirIntoExistingDir() throws Throwable {
- describe("Verify renaming a dir into an existing dir puts it underneath"
+ describe("Verify renaming a dir into an existing dir puts it"
+ + " underneath"
+" and leaves existing files alone");
FileSystem fs = getFileSystem();
String sourceSubdir = "source";
@@ -145,15 +147,15 @@ public void testRenameDirIntoExistingDir() throws Throwable {
Path destDir = path("dest");
Path destFilePath = new Path(destDir, "dest-512.txt");
- byte[] destDateset = dataset(512, 'A', 'Z');
- writeDataset(fs, destFilePath, destDateset, destDateset.length, 1024, false);
+ byte[] destData = dataset(512, 'A', 'Z');
+ writeDataset(fs, destFilePath, destData, destData.length, 1024, false);
assertIsFile(destFilePath);
boolean rename = rename(srcDir, destDir);
Path renamedSrc = new Path(destDir, sourceSubdir);
assertIsFile(destFilePath);
assertIsDirectory(renamedSrc);
- verifyFileContents(fs, destFilePath, destDateset);
+ verifyFileContents(fs, destFilePath, destData);
assertTrue("rename returned false though the contents were copied", rename);
}
@@ -204,7 +206,8 @@ public void testRenameWithNonEmptySubDir() throws Throwable {
assertPathExists("not created in src/sub dir",
new Path(srcSubDir, "subfile.txt"));
- fs.rename(srcDir, finalDir);
+ rename(srcDir, finalDir);
+
// Accept both POSIX rename behavior and CLI rename behavior
if (renameRemoveEmptyDest) {
// POSIX rename behavior
@@ -285,4 +288,54 @@ protected void validateAncestorsMoved(Path src, Path dst, String nestedPath)
}
}
+ @Test
+ public void testRenameFileUnderFile() throws Exception {
+ String action = "rename directly under file";
+ describe(action);
+ Path base = methodPath();
+ Path grandparent = new Path(base, "file");
+ expectRenameUnderFileFails(action,
+ grandparent,
+ new Path(base, "testRenameSrc"),
+ new Path(grandparent, "testRenameTarget"));
+ }
+
+ @Test
+ public void testRenameFileUnderFileSubdir() throws Exception {
+ String action = "rename directly under file/subdir";
+ describe(action);
+ Path base = methodPath();
+ Path grandparent = new Path(base, "file");
+ Path parent = new Path(grandparent, "parent");
+ expectRenameUnderFileFails(action,
+ grandparent,
+ new Path(base, "testRenameSrc"),
+ new Path(parent, "testRenameTarget"));
+ }
+
+ protected void expectRenameUnderFileFails(String action,
+ Path file, Path renameSrc, Path renameTarget)
+ throws Exception {
+ byte[] data = dataset(256, 'a', 'z');
+ FileSystem fs = getFileSystem();
+ writeDataset(fs, file, data, data.length, 1024 * 1024,
+ true);
+ writeDataset(fs, renameSrc, data, data.length, 1024 * 1024,
+ true);
+ String outcome;
+ boolean renamed;
+ try {
+ renamed = rename(renameSrc, renameTarget);
+ outcome = action + ": rename (" + renameSrc + ", " + renameTarget
+ + ")= " + renamed;
+ } catch (IOException e) {
+ // raw local raises an exception here
+ renamed = false;
+ outcome = "rename raised an exception: " + e;
+ }
+ assertPathDoesNotExist("after " + outcome, renameTarget);
+ assertFalse(outcome, renamed);
+ assertPathExists(action, renameSrc);
+ }
+
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java
index 7ba32bafa552b..5eb1e892f83d5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java
@@ -18,12 +18,13 @@
package org.apache.hadoop.fs.contract;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.Path;
-
import org.junit.Test;
import java.io.IOException;
+import java.util.Arrays;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.Path;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
@@ -34,21 +35,22 @@
public abstract class AbstractContractUnbufferTest extends AbstractFSContractTestBase {
private Path file;
+ private byte[] fileBytes;
@Override
public void setup() throws Exception {
super.setup();
skipIfUnsupported(SUPPORTS_UNBUFFER);
file = path("unbufferFile");
- createFile(getFileSystem(), file, true,
- dataset(TEST_FILE_LEN, 0, 255));
+ fileBytes = dataset(TEST_FILE_LEN, 0, 255);
+ createFile(getFileSystem(), file, true, fileBytes);
}
@Test
public void testUnbufferAfterRead() throws IOException {
describe("unbuffer a file after a single read");
try (FSDataInputStream stream = getFileSystem().open(file)) {
- assertEquals(128, stream.read(new byte[128]));
+ validateFullFileContents(stream);
unbuffer(stream);
}
}
@@ -58,15 +60,14 @@ public void testUnbufferBeforeRead() throws IOException {
describe("unbuffer a file before a read");
try (FSDataInputStream stream = getFileSystem().open(file)) {
unbuffer(stream);
- assertEquals(128, stream.read(new byte[128]));
+ validateFullFileContents(stream);
}
}
@Test
public void testUnbufferEmptyFile() throws IOException {
Path emptyFile = path("emptyUnbufferFile");
- createFile(getFileSystem(), emptyFile, true,
- dataset(TEST_FILE_LEN, 0, 255));
+ getFileSystem().create(emptyFile, true).close();
describe("unbuffer an empty file");
try (FSDataInputStream stream = getFileSystem().open(emptyFile)) {
unbuffer(stream);
@@ -79,13 +80,15 @@ public void testUnbufferOnClosedFile() throws IOException {
FSDataInputStream stream = null;
try {
stream = getFileSystem().open(file);
- assertEquals(128, stream.read(new byte[128]));
+ validateFullFileContents(stream);
} finally {
if (stream != null) {
stream.close();
}
}
- unbuffer(stream);
+ if (stream != null) {
+ unbuffer(stream);
+ }
}
@Test
@@ -94,32 +97,58 @@ public void testMultipleUnbuffers() throws IOException {
try (FSDataInputStream stream = getFileSystem().open(file)) {
unbuffer(stream);
unbuffer(stream);
- assertEquals(128, stream.read(new byte[128]));
+ validateFullFileContents(stream);
unbuffer(stream);
unbuffer(stream);
}
}
- @Test
+ @Test
public void testUnbufferMultipleReads() throws IOException {
describe("unbuffer a file multiple times");
try (FSDataInputStream stream = getFileSystem().open(file)) {
unbuffer(stream);
- assertEquals(128, stream.read(new byte[128]));
+ validateFileContents(stream, TEST_FILE_LEN / 8, 0);
unbuffer(stream);
- assertEquals(128, stream.read(new byte[128]));
- assertEquals(128, stream.read(new byte[128]));
+ validateFileContents(stream, TEST_FILE_LEN / 8, TEST_FILE_LEN / 8);
+ validateFileContents(stream, TEST_FILE_LEN / 4, TEST_FILE_LEN / 4);
unbuffer(stream);
- assertEquals(128, stream.read(new byte[128]));
- assertEquals(128, stream.read(new byte[128]));
- assertEquals(128, stream.read(new byte[128]));
+ validateFileContents(stream, TEST_FILE_LEN / 2, TEST_FILE_LEN / 2);
unbuffer(stream);
+ assertEquals("stream should be at end of file", TEST_FILE_LEN,
+ stream.getPos());
}
}
private void unbuffer(FSDataInputStream stream) throws IOException {
long pos = stream.getPos();
stream.unbuffer();
- assertEquals(pos, stream.getPos());
+ assertEquals("unbuffer unexpectedly changed the stream position", pos,
+ stream.getPos());
+ }
+
+ protected void validateFullFileContents(FSDataInputStream stream)
+ throws IOException {
+ validateFileContents(stream, TEST_FILE_LEN, 0);
+ }
+
+ protected void validateFileContents(FSDataInputStream stream, int length,
+ int startIndex)
+ throws IOException {
+ byte[] streamData = new byte[length];
+ assertEquals("failed to read expected number of bytes from "
+ + "stream", length, stream.read(streamData));
+ byte[] validateFileBytes;
+ if (startIndex == 0 && length == fileBytes.length) {
+ validateFileBytes = fileBytes;
+ } else {
+ validateFileBytes = Arrays.copyOfRange(fileBytes, startIndex,
+ startIndex + length);
+ }
+ assertArrayEquals("invalid file contents", validateFileBytes, streamData);
+ }
+
+ protected Path getFile() {
+ return file;
}
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java
index f09496a6082c8..76d3116c3abdc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java
@@ -69,6 +69,14 @@ public void init() throws IOException {
}
+ /**
+ * Any teardown logic can go here.
+ * @throws IOException IO problems
+ */
+ public void teardown() throws IOException {
+
+ }
+
/**
* Add a configuration resource to this instance's configuration
* @param resource resource reference
@@ -113,7 +121,7 @@ public FileSystem getFileSystem(URI uri) throws IOException {
public abstract FileSystem getTestFileSystem() throws IOException;
/**
- * Get the scheme of this FS
+ * Get the scheme of this FS.
* @return the scheme this FS supports
*/
public abstract String getScheme();
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java
index 1cd2164fad300..ac9de6d7bfe8c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java
@@ -82,6 +82,15 @@ public static void nameTestThread() {
Thread.currentThread().setName("JUnit");
}
+ @Before
+ public void nameThread() {
+ Thread.currentThread().setName("JUnit-" + getMethodName());
+ }
+
+ protected String getMethodName() {
+ return methodName.getMethodName();
+ }
+
/**
* This must be implemented by all instantiated test cases.
* -provide the FS contract
@@ -172,6 +181,7 @@ protected int getTestTimeoutMillis() {
*/
@Before
public void setup() throws Exception {
+ Thread.currentThread().setName("setup");
LOG.debug("== Setup ==");
contract = createContract(createConfiguration());
contract.init();
@@ -200,8 +210,12 @@ public void setup() throws Exception {
*/
@After
public void teardown() throws Exception {
+ Thread.currentThread().setName("teardown");
LOG.debug("== Teardown ==");
deleteTestDirInTeardown();
+ if (contract != null) {
+ contract.teardown();
+ }
LOG.debug("== Teardown complete ==");
}
@@ -225,6 +239,15 @@ protected Path path(String filepath) throws IOException {
new Path(getContract().getTestPath(), filepath));
}
+ /**
+ * Get a path whose name ends with the name of this method.
+ * @return a path implicitly unique amongst all methods in this class
+ * @throws IOException IO problems
+ */
+ protected Path methodPath() throws IOException {
+ return path(methodName.getMethodName());
+ }
+
/**
* Take a simple path like "/something" and turn it into
* a qualified path against the test FS.
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java
index 91a112141e987..3f31c07742c59 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java
@@ -51,6 +51,15 @@ public interface ContractOptions {
*/
String CREATE_VISIBILITY_DELAYED = "create-visibility-delayed";
+ /**
+ * Flag to indicate that it is possible to create a file under a file.
+ * This is a complete violation of the filesystem rules, but it is one
+ * which object stores have been known to do for performance
+ * and because nobody has ever noticed.
+ * {@value}
+ */
+ String CREATE_FILE_UNDER_FILE_ALLOWED = "create-file-under-file-allowed";
+
/**
* Is a filesystem case sensitive.
* Some of the filesystems that say "no" here may mean
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index f61634943bb7f..4789630f95f1c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -418,8 +418,9 @@ public static boolean rm(FileSystem fileSystem,
public static void rename(FileSystem fileSystem, Path src, Path dst)
throws IOException {
rejectRootOperation(src, false);
- assertTrue(fileSystem.rename(src, dst));
- assertPathDoesNotExist(fileSystem, "renamed", src);
+ assertTrue("rename(" + src + ", " + dst + ") failed",
+ fileSystem.rename(src, dst));
+ assertPathDoesNotExist(fileSystem, "renamed source dir", src);
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/SFTPContract.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/SFTPContract.java
new file mode 100644
index 0000000000000..f72a2aec86242
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/SFTPContract.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.contract.sftp;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.apache.hadoop.fs.sftp.SFTPFileSystem;
+import org.apache.sshd.common.NamedFactory;
+import org.apache.sshd.server.SshServer;
+import org.apache.sshd.server.auth.UserAuth;
+import org.apache.sshd.server.auth.password.UserAuthPasswordFactory;
+import org.apache.sshd.server.keyprovider.SimpleGeneratorHostKeyProvider;
+import org.apache.sshd.server.subsystem.sftp.SftpSubsystemFactory;
+
+public class SFTPContract extends AbstractFSContract {
+
+ private static final String CONTRACT_XML = "contract/sftp.xml";
+ private static final URI TEST_URI =
+ URI.create("sftp://user:password@localhost");
+ private final String testDataDir =
+ new FileSystemTestHelper().getTestRootDir();
+ private final Configuration conf;
+ private SshServer sshd;
+
+ public SFTPContract(Configuration conf) {
+ super(conf);
+ addConfResource(CONTRACT_XML);
+ this.conf = conf;
+ }
+
+ @Override
+ public void init() throws IOException {
+ sshd = SshServer.setUpDefaultServer();
+ // ask OS to assign a port
+ sshd.setPort(0);
+ sshd.setKeyPairProvider(new SimpleGeneratorHostKeyProvider());
+
+ List> userAuthFactories = new ArrayList<>();
+ userAuthFactories.add(new UserAuthPasswordFactory());
+
+ sshd.setUserAuthFactories(userAuthFactories);
+ sshd.setPasswordAuthenticator((username, password, session) ->
+ username.equals("user") && password.equals("password")
+ );
+
+ sshd.setSubsystemFactories(
+ Collections.singletonList(new SftpSubsystemFactory()));
+
+ sshd.start();
+ int port = sshd.getPort();
+
+ conf.setClass("fs.sftp.impl", SFTPFileSystem.class, FileSystem.class);
+ conf.setInt("fs.sftp.host.port", port);
+ conf.setBoolean("fs.sftp.impl.disable.cache", true);
+ }
+
+ @Override
+ public void teardown() throws IOException {
+ if (sshd != null) {
+ sshd.stop();
+ }
+ }
+
+ @Override
+ public FileSystem getTestFileSystem() throws IOException {
+ return FileSystem.get(TEST_URI, conf);
+ }
+
+ @Override
+ public String getScheme() {
+ return "sftp";
+ }
+
+ @Override
+ public Path getTestPath() {
+ try {
+ FileSystem fs = FileSystem.get(
+ URI.create("sftp://user:password@localhost"), conf
+ );
+ return fs.makeQualified(new Path(testDataDir));
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/TestSFTPContractSeek.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/TestSFTPContractSeek.java
new file mode 100644
index 0000000000000..20f4116b98019
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/TestSFTPContractSeek.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.contract.sftp;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+public class TestSFTPContractSeek extends AbstractContractSeekTest {
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return new SFTPContract(conf);
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/FtpTestServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/FtpTestServer.java
new file mode 100644
index 0000000000000..eca26dea5b39b
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/FtpTestServer.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ftp;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Arrays;
+
+import org.apache.ftpserver.FtpServer;
+import org.apache.ftpserver.FtpServerFactory;
+import org.apache.ftpserver.ftplet.Authority;
+import org.apache.ftpserver.ftplet.FtpException;
+import org.apache.ftpserver.ftplet.UserManager;
+import org.apache.ftpserver.impl.DefaultFtpServer;
+import org.apache.ftpserver.listener.Listener;
+import org.apache.ftpserver.listener.ListenerFactory;
+import org.apache.ftpserver.usermanager.PropertiesUserManagerFactory;
+import org.apache.ftpserver.usermanager.impl.BaseUser;
+
+/**
+ * Helper class facilitating to manage a local ftp
+ * server for unit tests purposes only.
+ */
+public class FtpTestServer {
+
+ private int port;
+ private Path ftpRoot;
+ private UserManager userManager;
+ private FtpServer server;
+
+ public FtpTestServer(Path ftpRoot) {
+ this.ftpRoot = ftpRoot;
+ this.userManager = new PropertiesUserManagerFactory().createUserManager();
+ FtpServerFactory serverFactory = createServerFactory();
+ serverFactory.setUserManager(userManager);
+ this.server = serverFactory.createServer();
+ }
+
+ public FtpTestServer start() throws Exception {
+ server.start();
+ Listener listener = ((DefaultFtpServer) server)
+ .getListeners()
+ .get("default");
+ port = listener.getPort();
+ return this;
+ }
+
+ public Path getFtpRoot() {
+ return ftpRoot;
+ }
+
+ public int getPort() {
+ return port;
+ }
+
+ public void stop() {
+ if (!server.isStopped()) {
+ server.stop();
+ }
+ }
+
+ public BaseUser addUser(String name, String password,
+ Authority... authorities) throws IOException, FtpException {
+
+ BaseUser user = new BaseUser();
+ user.setName(name);
+ user.setPassword(password);
+ Path userHome = Files.createDirectory(ftpRoot.resolve(name));
+ user.setHomeDirectory(userHome.toString());
+ user.setAuthorities(Arrays.asList(authorities));
+ userManager.save(user);
+ return user;
+ }
+
+ private FtpServerFactory createServerFactory() {
+ FtpServerFactory serverFactory = new FtpServerFactory();
+ ListenerFactory defaultListener = new ListenerFactory();
+ defaultListener.setPort(0);
+ serverFactory.addListener("default", defaultListener.createListener());
+ return serverFactory;
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java
index 3d41ccb91d6c4..d3750e64469b2 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java
@@ -17,18 +17,35 @@
*/
package org.apache.hadoop.fs.ftp;
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.util.Comparator;
+
import com.google.common.base.Preconditions;
import org.apache.commons.net.ftp.FTP;
-
import org.apache.commons.net.ftp.FTPClient;
import org.apache.commons.net.ftp.FTPFile;
+import org.apache.ftpserver.usermanager.impl.BaseUser;
+import org.apache.ftpserver.usermanager.impl.WritePermission;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.After;
+import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
-
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertEquals;
/**
@@ -37,9 +54,75 @@
*/
public class TestFTPFileSystem {
+ private FtpTestServer server;
+ private java.nio.file.Path testDir;
@Rule
public Timeout testTimeout = new Timeout(180000);
+ @Before
+ public void setUp() throws Exception {
+ testDir = Files.createTempDirectory(
+ GenericTestUtils.getTestDir().toPath(), getClass().getName()
+ );
+ server = new FtpTestServer(testDir).start();
+ }
+
+ @After
+ @SuppressWarnings("ResultOfMethodCallIgnored")
+ public void tearDown() throws Exception {
+ if (server != null) {
+ server.stop();
+ Files.walk(testDir)
+ .sorted(Comparator.reverseOrder())
+ .map(java.nio.file.Path::toFile)
+ .forEach(File::delete);
+ }
+ }
+
+ @Test
+ public void testCreateWithWritePermissions() throws Exception {
+ BaseUser user = server.addUser("test", "password", new WritePermission());
+ Configuration configuration = new Configuration();
+ configuration.set("fs.defaultFS", "ftp:///");
+ configuration.set("fs.ftp.host", "localhost");
+ configuration.setInt("fs.ftp.host.port", server.getPort());
+ configuration.set("fs.ftp.user.localhost", user.getName());
+ configuration.set("fs.ftp.password.localhost", user.getPassword());
+ configuration.setBoolean("fs.ftp.impl.disable.cache", true);
+
+ FileSystem fs = FileSystem.get(configuration);
+ byte[] bytesExpected = "hello world".getBytes(StandardCharsets.UTF_8);
+ try (FSDataOutputStream outputStream = fs.create(new Path("test1.txt"))) {
+ outputStream.write(bytesExpected);
+ }
+ try (FSDataInputStream input = fs.open(new Path("test1.txt"))) {
+ assertThat(bytesExpected, equalTo(IOUtils.readFullyToByteArray(input)));
+ }
+ }
+
+ @Test
+ public void testCreateWithoutWritePermissions() throws Exception {
+ BaseUser user = server.addUser("test", "password");
+ Configuration configuration = new Configuration();
+ configuration.set("fs.defaultFS", "ftp:///");
+ configuration.set("fs.ftp.host", "localhost");
+ configuration.setInt("fs.ftp.host.port", server.getPort());
+ configuration.set("fs.ftp.user.localhost", user.getName());
+ configuration.set("fs.ftp.password.localhost", user.getPassword());
+ configuration.setBoolean("fs.ftp.impl.disable.cache", true);
+
+ FileSystem fs = FileSystem.get(configuration);
+ byte[] bytesExpected = "hello world".getBytes(StandardCharsets.UTF_8);
+ LambdaTestUtils.intercept(
+ IOException.class, "Unable to create file: test1.txt, Aborting",
+ () -> {
+ try (FSDataOutputStream out = fs.create(new Path("test1.txt"))) {
+ out.write(bytesExpected);
+ }
+ }
+ );
+ }
+
@Test
public void testFTPDefaultPort() throws Exception {
FTPFileSystem ftp = new FTPFileSystem();
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
index b74e75d9ef73d..4b3bd2f94075c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
@@ -31,6 +31,7 @@
import java.util.Arrays;
import java.util.EnumSet;
import java.util.Random;
+import java.util.UUID;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
@@ -295,7 +296,8 @@ private void read() throws IOException {
*/
private void write() throws IOException {
String dirName = dirs.get(r.nextInt(dirs.size()));
- Path file = new Path(dirName, hostname+id);
+ Path file =
+ new Path(dirName, hostname + id + UUID.randomUUID().toString());
double fileSize = 0;
while ((fileSize = r.nextGaussian()+2)<=0) {}
genFile(file, (long)(fileSize*BLOCK_SIZE));
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopy.java
index f73e83d858bc7..9172f85eb9cb7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopy.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopy.java
@@ -121,7 +121,7 @@ public void testInterruptedCreate() throws Exception {
tryCopyStream(in, false);
verify(mockFs, never()).rename(any(Path.class), any(Path.class));
- verify(mockFs, never()).delete(eq(tmpPath), anyBoolean());
+ verify(mockFs).delete(eq(tmpPath), anyBoolean());
verify(mockFs, never()).delete(eq(path), anyBoolean());
verify(mockFs, never()).close();
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
index b5adfcf76157c..f101fed26bbf8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
@@ -411,6 +411,25 @@ public void processPathWithQuotasByMultipleStorageTypes() throws Exception {
verifyNoMoreInteractions(out);
}
+ @Test
+ public void processPathWithSnapshotHeader() throws Exception {
+ Path path = new Path("mockfs:/test");
+ when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
+ PrintStream out = mock(PrintStream.class);
+ Count count = new Count();
+ count.out = out;
+ LinkedList options = new LinkedList();
+ options.add("-s");
+ options.add("-v");
+ options.add("dummy");
+ count.processOptions(options);
+ String withSnapshotHeader = " DIR_COUNT FILE_COUNT CONTENT_SIZE "
+ + " SNAPSHOT_LENGTH SNAPSHOT_FILE_COUNT "
+ + " SNAPSHOT_DIR_COUNT SNAPSHOT_SPACE_CONSUMED PATHNAME";
+ verify(out).println(withSnapshotHeader);
+ verifyNoMoreInteractions(out);
+ }
+
@Test
public void getCommandName() {
Count count = new Count();
@@ -448,7 +467,8 @@ public void getUsage() {
Count count = new Count();
String actual = count.getUsage();
String expected =
- "-count [-q] [-h] [-v] [-t []] [-u] [-x] [-e] ...";
+ "-count [-q] [-h] [-v] [-t []]"
+ + " [-u] [-x] [-e] [-s] ...";
assertEquals("Count.getUsage", expected, actual);
}
@@ -480,7 +500,8 @@ public void getDescription() {
+ "storage types.\n"
+ "The -u option shows the quota and \n"
+ "the usage against the quota without the detailed content summary."
- + "The -e option shows the erasure coding policy.";
+ + "The -e option shows the erasure coding policy."
+ + "The -s option shows snapshot counts.";
assertEquals("Count.getDescription", expected, actual);
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestMove.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestMove.java
index 1f379448ee86c..b9e87d3dacefe 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestMove.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestMove.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.PathExistsException;
+import org.apache.hadoop.fs.shell.CommandFormat.UnknownOptionException;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -93,6 +94,12 @@ public void testMoveTargetExistsWithoutExplicitRename() throws Exception {
assertTrue("Rename should have failed with path exists exception",
cmd.error instanceof PathExistsException);
}
+
+ @Test(expected = UnknownOptionException.class)
+ public void testMoveFromLocalDoesNotAllowTOption() {
+ new MoveCommands.MoveFromLocal().run("-t", "2",
+ null, null);
+ }
static class MockFileSystem extends FilterFileSystem {
Configuration conf;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestHCFSMountTableConfigLoader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestHCFSMountTableConfigLoader.java
new file mode 100644
index 0000000000000..bf7a6e32c8e93
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestHCFSMountTableConfigLoader.java
@@ -0,0 +1,165 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Tests the mount table loading.
+ */
+public class TestHCFSMountTableConfigLoader {
+
+ private static final String DOT = ".";
+
+ private static final String TARGET_TWO = "/tar2";
+
+ private static final String TARGET_ONE = "/tar1";
+
+ private static final String SRC_TWO = "/src2";
+
+ private static final String SRC_ONE = "/src1";
+
+ private static final String TABLE_NAME = "test";
+
+ private MountTableConfigLoader loader = new HCFSMountTableConfigLoader();
+
+ private static FileSystem fsTarget;
+ private static Configuration conf;
+ private static Path targetTestRoot;
+ private static FileSystemTestHelper fileSystemTestHelper =
+ new FileSystemTestHelper();
+ private static File oldVersionMountTableFile;
+ private static File newVersionMountTableFile;
+ private static final String MOUNT_LINK_KEY_SRC_ONE =
+ new StringBuilder(Constants.CONFIG_VIEWFS_PREFIX).append(DOT)
+ .append(TABLE_NAME).append(DOT).append(Constants.CONFIG_VIEWFS_LINK)
+ .append(DOT).append(SRC_ONE).toString();
+ private static final String MOUNT_LINK_KEY_SRC_TWO =
+ new StringBuilder(Constants.CONFIG_VIEWFS_PREFIX).append(DOT)
+ .append(TABLE_NAME).append(DOT).append(Constants.CONFIG_VIEWFS_LINK)
+ .append(DOT).append(SRC_TWO).toString();
+
+ @BeforeClass
+ public static void init() throws Exception {
+ fsTarget = new LocalFileSystem();
+ fsTarget.initialize(new URI("file:///"), new Configuration());
+ targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
+ fsTarget.delete(targetTestRoot, true);
+ fsTarget.mkdirs(targetTestRoot);
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ conf = new Configuration();
+ conf.set(String.format(
+ FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, "file"),
+ LocalFileSystem.class.getName());
+ oldVersionMountTableFile =
+ new File(new URI(targetTestRoot.toString() + "/table.1.xml"));
+ oldVersionMountTableFile.createNewFile();
+ newVersionMountTableFile =
+ new File(new URI(targetTestRoot.toString() + "/table.2.xml"));
+ newVersionMountTableFile.createNewFile();
+ }
+
+ @Test
+ public void testMountTableFileLoadingWhenMultipleFilesExist()
+ throws Exception {
+ ViewFsTestSetup.addMountLinksToFile(TABLE_NAME,
+ new String[] {SRC_ONE, SRC_TWO }, new String[] {TARGET_ONE,
+ TARGET_TWO },
+ new Path(newVersionMountTableFile.toURI()), conf);
+ loader.load(targetTestRoot.toString(), conf);
+ Assert.assertEquals(conf.get(MOUNT_LINK_KEY_SRC_TWO), TARGET_TWO);
+ Assert.assertEquals(conf.get(MOUNT_LINK_KEY_SRC_ONE), TARGET_ONE);
+ }
+
+ @Test
+ public void testMountTableFileWithInvalidFormat() throws Exception {
+ Path path = new Path(new URI(
+ targetTestRoot.toString() + "/testMountTableFileWithInvalidFormat/"));
+ fsTarget.mkdirs(path);
+ File invalidMountFileName =
+ new File(new URI(path.toString() + "/table.InvalidVersion.xml"));
+ invalidMountFileName.createNewFile();
+ // Adding mount links to make sure it will not read it.
+ ViewFsTestSetup.addMountLinksToFile(TABLE_NAME,
+ new String[] {SRC_ONE, SRC_TWO }, new String[] {TARGET_ONE,
+ TARGET_TWO },
+ new Path(invalidMountFileName.toURI()), conf);
+ // Pass mount table directory
+ loader.load(path.toString(), conf);
+ Assert.assertEquals(null, conf.get(MOUNT_LINK_KEY_SRC_TWO));
+ Assert.assertEquals(null, conf.get(MOUNT_LINK_KEY_SRC_ONE));
+ invalidMountFileName.delete();
+ }
+
+ @Test
+ public void testMountTableFileWithInvalidFormatWithNoDotsInName()
+ throws Exception {
+ Path path = new Path(new URI(targetTestRoot.toString()
+ + "/testMountTableFileWithInvalidFormatWithNoDots/"));
+ fsTarget.mkdirs(path);
+ File invalidMountFileName =
+ new File(new URI(path.toString() + "/tableInvalidVersionxml"));
+ invalidMountFileName.createNewFile();
+ // Pass mount table directory
+ loader.load(path.toString(), conf);
+ Assert.assertEquals(null, conf.get(MOUNT_LINK_KEY_SRC_TWO));
+ Assert.assertEquals(null, conf.get(MOUNT_LINK_KEY_SRC_ONE));
+ invalidMountFileName.delete();
+ }
+
+ @Test(expected = FileNotFoundException.class)
+ public void testLoadWithMountFile() throws Exception {
+ loader.load(new URI(targetTestRoot.toString() + "/Non-Existent-File.xml")
+ .toString(), conf);
+ }
+
+ @Test
+ public void testLoadWithNonExistentMountFile() throws Exception {
+ ViewFsTestSetup.addMountLinksToFile(TABLE_NAME,
+ new String[] {SRC_ONE, SRC_TWO },
+ new String[] {TARGET_ONE, TARGET_TWO },
+ new Path(oldVersionMountTableFile.toURI()), conf);
+ loader.load(oldVersionMountTableFile.toURI().toString(), conf);
+ Assert.assertEquals(conf.get(MOUNT_LINK_KEY_SRC_TWO), TARGET_TWO);
+ Assert.assertEquals(conf.get(MOUNT_LINK_KEY_SRC_ONE), TARGET_ONE);
+ }
+
+ @AfterClass
+ public static void tearDown() throws IOException {
+ fsTarget.delete(targetTestRoot, true);
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeCentralMountTableConfig.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeCentralMountTableConfig.java
new file mode 100644
index 0000000000000..1527e3c1f30d8
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeCentralMountTableConfig.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.net.URISyntaxException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.junit.Before;
+
+/**
+ * Test the TestViewFSOverloadSchemeCentralMountTableConfig with mount-table
+ * configuration files in configured fs location.
+ */
+public class TestViewFSOverloadSchemeCentralMountTableConfig
+ extends TestViewFileSystemOverloadSchemeLocalFileSystem {
+ private Path oldMountTablePath;
+ private Path latestMountTablepath;
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ // Mount table name format: mount-table..xml
+ String mountTableFileName1 = "mount-table.1.xml";
+ String mountTableFileName2 = "mount-table.2.xml";
+ oldMountTablePath =
+ new Path(getTestRoot() + File.separator + mountTableFileName1);
+ latestMountTablepath =
+ new Path(getTestRoot() + File.separator + mountTableFileName2);
+ getConf().set(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH,
+ getTestRoot().toString());
+ File f = new File(oldMountTablePath.toUri());
+ f.createNewFile(); // Just creating empty mount-table file.
+ File f2 = new File(latestMountTablepath.toUri());
+ latestMountTablepath = new Path(f2.toURI());
+ f2.createNewFile();
+ }
+
+ /**
+ * This method saves the mount links in a local files.
+ */
+ @Override
+ void addMountLinks(String mountTable, String[] sources, String[] targets,
+ Configuration conf) throws IOException, URISyntaxException {
+ // we don't use conf here, instead we use config paths to store links.
+ // Mount-table old version file mount-table-.xml
+ try (BufferedWriter out = new BufferedWriter(
+ new FileWriter(new File(oldMountTablePath.toUri())))) {
+ out.write("\n");
+ // Invalid tag. This file should not be read.
+ out.write("\\\name//\\>");
+ out.write("\n");
+ out.flush();
+ }
+ ViewFsTestSetup.addMountLinksToFile(mountTable, sources, targets,
+ latestMountTablepath, conf);
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java
new file mode 100644
index 0000000000000..ac7a1a6899425
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ *
+ * Test the TestViewFileSystemOverloadSchemeLF using a file with authority:
+ * file://mountTableName/ i.e, the authority is used to load a mount table.
+ */
+public class TestViewFileSystemOverloadSchemeLocalFileSystem {
+ private static final String FILE = "file";
+ private static final Log LOG =
+ LogFactory.getLog(TestViewFileSystemOverloadSchemeLocalFileSystem.class);
+ private FileSystem fsTarget;
+ private Configuration conf;
+ private Path targetTestRoot;
+ private FileSystemTestHelper fileSystemTestHelper =
+ new FileSystemTestHelper();
+
+ @Before
+ public void setUp() throws Exception {
+ conf = new Configuration();
+ conf.set(String.format("fs.%s.impl", FILE),
+ ViewFileSystemOverloadScheme.class.getName());
+ conf.set(String.format(
+ FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, FILE),
+ LocalFileSystem.class.getName());
+ fsTarget = new LocalFileSystem();
+ fsTarget.initialize(new URI("file:///"), conf);
+ // create the test root on local_fs
+ targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
+ fsTarget.delete(targetTestRoot, true);
+ fsTarget.mkdirs(targetTestRoot);
+ }
+
+ /**
+ * Adds the given mount links to config. sources contains mount link src and
+ * the respective index location in targets contains the target uri.
+ */
+ void addMountLinks(String mountTable, String[] sources, String[] targets,
+ Configuration config) throws IOException, URISyntaxException {
+ ViewFsTestSetup.addMountLinksToConf(mountTable, sources, targets, config);
+ }
+
+ /**
+ * Tests write file and read file with ViewFileSystemOverloadScheme.
+ */
+ @Test
+ public void testLocalTargetLinkWriteSimple()
+ throws IOException, URISyntaxException {
+ LOG.info("Starting testLocalTargetLinkWriteSimple");
+ final String testString = "Hello Local!...";
+ final Path lfsRoot = new Path("/lfsRoot");
+ addMountLinks(null, new String[] {lfsRoot.toString() },
+ new String[] {targetTestRoot + "/local" }, conf);
+ try (FileSystem lViewFs = FileSystem.get(URI.create("file:///"), conf)) {
+ final Path testPath = new Path(lfsRoot, "test.txt");
+ try (FSDataOutputStream fsDos = lViewFs.create(testPath)) {
+ fsDos.writeUTF(testString);
+ }
+
+ try (FSDataInputStream lViewIs = lViewFs.open(testPath)) {
+ Assert.assertEquals(testString, lViewIs.readUTF());
+ }
+ }
+ }
+
+ /**
+ * Tests create file and delete file with ViewFileSystemOverloadScheme.
+ */
+ @Test
+ public void testLocalFsCreateAndDelete() throws Exception {
+ LOG.info("Starting testLocalFsCreateAndDelete");
+ addMountLinks("mt", new String[] {"/lfsroot" },
+ new String[] {targetTestRoot + "/wd2" }, conf);
+ final URI mountURI = URI.create("file://mt/");
+ try (FileSystem lViewFS = FileSystem.get(mountURI, conf)) {
+ Path testPath = new Path(mountURI.toString() + "/lfsroot/test");
+ lViewFS.createNewFile(testPath);
+ Assert.assertTrue(lViewFS.exists(testPath));
+ lViewFS.delete(testPath, true);
+ Assert.assertFalse(lViewFS.exists(testPath));
+ }
+ }
+
+ /**
+ * Tests root level file with linkMergeSlash with
+ * ViewFileSystemOverloadScheme.
+ */
+ @Test
+ public void testLocalFsLinkSlashMerge() throws Exception {
+ LOG.info("Starting testLocalFsLinkSlashMerge");
+ addMountLinks("mt",
+ new String[] {Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH },
+ new String[] {targetTestRoot + "/wd2" }, conf);
+ final URI mountURI = URI.create("file://mt/");
+ try (FileSystem lViewFS = FileSystem.get(mountURI, conf)) {
+ Path fileOnRoot = new Path(mountURI.toString() + "/NewFile");
+ lViewFS.createNewFile(fileOnRoot);
+ Assert.assertTrue(lViewFS.exists(fileOnRoot));
+ }
+ }
+
+ /**
+ * Tests with linkMergeSlash and other mounts in
+ * ViewFileSystemOverloadScheme.
+ */
+ @Test(expected = IOException.class)
+ public void testLocalFsLinkSlashMergeWithOtherMountLinks() throws Exception {
+ LOG.info("Starting testLocalFsLinkSlashMergeWithOtherMountLinks");
+ addMountLinks("mt",
+ new String[] {"/lfsroot", Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH },
+ new String[] {targetTestRoot + "/wd2", targetTestRoot + "/wd2" }, conf);
+ final URI mountURI = URI.create("file://mt/");
+ FileSystem.get(mountURI, conf);
+ Assert.fail("A merge slash cannot be configured with other mount links.");
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (null != fsTarget) {
+ fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
+ fsTarget.close();
+ }
+ }
+
+ /**
+ * Returns the test root dir.
+ */
+ public Path getTestRoot() {
+ return this.targetTestRoot;
+ }
+
+ /**
+ * Returns the conf.
+ */
+ public Configuration getConf() {
+ return this.conf;
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
index 4902d733e954b..59588a527f46e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
@@ -1279,7 +1279,8 @@ public void testLinkTarget() throws Exception {
@Test
public void testViewFileSystemInnerCache() throws Exception {
- ViewFileSystem.InnerCache cache = new ViewFileSystem.InnerCache();
+ ViewFileSystem.InnerCache cache =
+ new ViewFileSystem.InnerCache(new FsGetter());
FileSystem fs = cache.get(fsTarget.getUri(), conf);
// InnerCache caches filesystem.
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java
index 9b7e17f4a601a..efced73943ed5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java
@@ -17,16 +17,21 @@
*/
package org.apache.hadoop.fs.viewfs;
+import java.io.IOException;
import java.net.URI;
+import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestHelper;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.viewfs.ConfigUtil;
+import org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme.ChildFsGetter;
import org.apache.hadoop.util.Shell;
import org.eclipse.jetty.util.log.Log;
+import org.junit.Assert;
/**
@@ -132,4 +137,84 @@ static void linkUpFirstComponents(Configuration conf, String path,
+ firstComponent + "->" + linkTarget);
}
+ /**
+ * Adds the given mount links to the given Hadoop compatible file system path.
+ * Mount link mappings are in sources, targets at their respective index
+ * locations.
+ */
+ static void addMountLinksToFile(String mountTable, String[] sources,
+ String[] targets, Path mountTableConfPath, Configuration conf)
+ throws IOException, URISyntaxException {
+ ChildFsGetter cfs = new ViewFileSystemOverloadScheme.ChildFsGetter(
+ mountTableConfPath.toUri().getScheme());
+ try (FileSystem fs = cfs.getNewInstance(mountTableConfPath.toUri(),
+ conf)) {
+ try (FSDataOutputStream out = fs.create(mountTableConfPath)) {
+ String prefix =
+ new StringBuilder(Constants.CONFIG_VIEWFS_PREFIX).append(".")
+ .append((mountTable == null
+ ? Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE
+ : mountTable))
+ .append(".").toString();
+ out.writeBytes("");
+ for (int i = 0; i < sources.length; i++) {
+ String src = sources[i];
+ String target = targets[i];
+ boolean isNfly = src.startsWith(Constants.CONFIG_VIEWFS_LINK_NFLY);
+ out.writeBytes("");
+ if (isNfly) {
+ String[] srcParts = src.split("[.]");
+ Assert.assertEquals("Invalid NFlyLink format", 3, srcParts.length);
+ String actualSrc = srcParts[srcParts.length - 1];
+ String params = srcParts[srcParts.length - 2];
+ out.writeBytes(prefix + Constants.CONFIG_VIEWFS_LINK_NFLY + "."
+ + params + "." + actualSrc);
+ } else if (Constants.CONFIG_VIEWFS_LINK_FALLBACK.equals(src)) {
+ out.writeBytes(prefix + Constants.CONFIG_VIEWFS_LINK_FALLBACK);
+ } else if (Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH.equals(src)) {
+ out.writeBytes(prefix + Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH);
+ } else {
+ out.writeBytes(prefix + Constants.CONFIG_VIEWFS_LINK + "." + src);
+ }
+ out.writeBytes("");
+ out.writeBytes("");
+ out.writeBytes(target);
+ out.writeBytes("");
+ out.flush();
+ }
+ out.writeBytes((""));
+ out.flush();
+ }
+ }
+ }
+
+ /**
+ * Adds the given mount links to the configuration. Mount link mappings are
+ * in sources, targets at their respective index locations.
+ */
+ public static void addMountLinksToConf(String mountTable, String[] sources,
+ String[] targets, Configuration config) throws URISyntaxException {
+ for (int i = 0; i < sources.length; i++) {
+ String src = sources[i];
+ String target = targets[i];
+ String mountTableName = mountTable == null ?
+ Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE : mountTable;
+ boolean isNfly = src.startsWith(Constants.CONFIG_VIEWFS_LINK_NFLY);
+ if (isNfly) {
+ String[] srcParts = src.split("[.]");
+ Assert.assertEquals("Invalid NFlyLink format", 3, srcParts.length);
+ String actualSrc = srcParts[srcParts.length - 1];
+ String params = srcParts[srcParts.length - 2];
+ ConfigUtil.addLinkNfly(config, mountTableName, actualSrc, params,
+ target);
+ } else if (src.equals(Constants.CONFIG_VIEWFS_LINK_FALLBACK)) {
+ ConfigUtil.addLinkFallback(config, mountTableName, new URI(target));
+ } else if (src.equals(Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH)) {
+ ConfigUtil.addLinkMergeSlash(config, mountTableName, new URI(target));
+ } else {
+ ConfigUtil.addLink(config, mountTableName, src, new URI(target));
+ }
+ }
+ }
+
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java
index 064527c3fed6d..6505fbb8224f8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java
@@ -22,7 +22,7 @@
import java.net.InetSocketAddress;
import java.util.ArrayList;
-import com.google.protobuf.BlockingService;
+import org.apache.hadoop.thirdparty.protobuf.BlockingService;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java
index 0e59aa1004666..63b9c63646d8b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java
@@ -83,11 +83,6 @@ public void testAdminUsage() throws Exception {
assertOutputContains("transitionToActive: incorrect number of arguments");
assertEquals(-1, runTool("-transitionToActive", "x", "y"));
assertOutputContains("transitionToActive: incorrect number of arguments");
- assertEquals(-1, runTool("-failover"));
- assertOutputContains("failover: incorrect arguments");
- assertOutputContains("failover: incorrect arguments");
- assertEquals(-1, runTool("-failover", "foo:1234"));
- assertOutputContains("failover: incorrect arguments");
}
@Test
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
index cc1174b2d2c72..63c87830b4529 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
@@ -19,15 +19,22 @@
import static org.junit.Assert.*;
+import java.net.InetSocketAddress;
import java.security.NoSuchAlgorithmException;
import com.google.common.base.Supplier;
+import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.HealthMonitor.State;
import org.apache.hadoop.ha.MiniZKFCCluster.DummyZKFC;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
+import org.apache.hadoop.security.authorize.Service;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooKeeper;
@@ -128,6 +135,46 @@ public void testNoZK() throws Exception {
runFC(svc));
}
+ @Test
+ public void testPolicyProviderForZKFCRpcServer() throws Exception {
+ Configuration myconf = new Configuration();
+ myconf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
+ true);
+
+ DummyHAService dummyHAService = new DummyHAService(HAServiceState.ACTIVE,
+ new InetSocketAddress(0), false);
+ MiniZKFCCluster.DummyZKFC dummyZKFC =
+ new MiniZKFCCluster.DummyZKFC(myconf, dummyHAService);
+
+ // initialize ZKFCRpcServer with null policy
+ LambdaTestUtils.intercept(HadoopIllegalArgumentException.class,
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION
+ + "is configured to true but service-level"
+ + "authorization security policy is null.",
+ () -> new ZKFCRpcServer(myconf, new InetSocketAddress(0),
+ dummyZKFC, null));
+
+ // initialize ZKFCRpcServer with dummy policy
+ PolicyProvider dummyPolicy = new PolicyProvider() {
+ private final Service[] services = new Service[] {
+ new Service(CommonConfigurationKeys.SECURITY_ZKFC_PROTOCOL_ACL,
+ ZKFCProtocol.class),
+ new Service(
+ CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_POLICY,
+ RefreshAuthorizationPolicyProtocol.class),
+ };
+ @Override
+ public Service[] getServices() {
+ return this.services;
+ }
+ };
+
+ ZKFCRpcServer server = new ZKFCRpcServer(myconf,
+ new InetSocketAddress(0), dummyZKFC, dummyPolicy);
+ server.start();
+ server.stopAndJoin();
+ }
+
@Test
public void testFormatOneClusterLeavesOtherClustersAlone() throws Exception {
DummyHAService svc = cluster.getService(1);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index 5f7a264190953..cc76b4ad6d975 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -41,6 +41,7 @@
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -62,16 +63,15 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
LoggerFactory.getLogger(TestSSLHttpServer.class);
private static final String HTTPS_CIPHER_SUITES_KEY = "https.cipherSuites";
private static final String JAVAX_NET_DEBUG_KEY = "javax.net.debug";
- private static final String SSL_SERVER_KEYSTORE_PROP_PREFIX = "ssl.server" +
- ".keystore";
- private static final String SSL_SERVER_TRUSTSTORE_PROP_PREFIX = "ssl.server" +
- ".truststore";
+ static final String SSL_SERVER_KEYSTORE_PROP_PREFIX = "ssl.server.keystore";
+ static final String SSL_SERVER_TRUSTSTORE_PROP_PREFIX = "ssl.server" +
+ ".truststore";
- private static final String SERVLET_NAME_LONGHEADER = "longheader";
- private static final String SERVLET_PATH_LONGHEADER =
+ static final String SERVLET_NAME_LONGHEADER = "longheader";
+ static final String SERVLET_PATH_LONGHEADER =
"/" + SERVLET_NAME_LONGHEADER;
- private static final String SERVLET_NAME_ECHO = "echo";
- private static final String SERVLET_PATH_ECHO = "/" + SERVLET_NAME_ECHO;
+ static final String SERVLET_NAME_ECHO = "echo";
+ static final String SERVLET_PATH_ECHO = "/" + SERVLET_NAME_ECHO;
private static HttpServer2 server;
private static String keystoreDir;
@@ -79,7 +79,7 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
private static SSLFactory clientSslFactory;
private static String cipherSuitesPropertyValue;
private static String sslDebugPropertyValue;
- private static final String EXCLUDED_CIPHERS =
+ static final String EXCLUDED_CIPHERS =
"TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
+ "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n"
+ "SSL_RSA_WITH_DES_CBC_SHA,"
@@ -87,9 +87,11 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
+ "SSL_RSA_EXPORT_WITH_RC4_40_MD5,\t \n"
+ "SSL_RSA_EXPORT_WITH_DES40_CBC_SHA,"
+ "SSL_RSA_WITH_RC4_128_MD5 \t";
- private static final String ONE_ENABLED_CIPHERS = EXCLUDED_CIPHERS
+ private static final String ONE_ENABLED_CIPHERS_TLS1_2 = EXCLUDED_CIPHERS
+ ",TLS_RSA_WITH_AES_128_CBC_SHA";
- private static final String EXCLUSIVE_ENABLED_CIPHERS
+ private static final String ONE_ENABLED_CIPHERS_TLS1_3 = EXCLUDED_CIPHERS
+ + ",TLS_AES_128_GCM_SHA256";
+ private static final String EXCLUSIVE_ENABLED_CIPHERS_TLS1_2
= "\tTLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, \n"
+ "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,"
+ "TLS_RSA_WITH_AES_128_CBC_SHA,"
@@ -97,8 +99,12 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
+ "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,"
+ "TLS_DHE_RSA_WITH_AES_128_CBC_SHA,\t\n "
+ "TLS_DHE_DSS_WITH_AES_128_CBC_SHA";
+ private static final String EXCLUSIVE_ENABLED_CIPHERS_TLS1_3 =
+ EXCLUSIVE_ENABLED_CIPHERS_TLS1_2 + ",TLS_AES_128_GCM_SHA256";
- private static final String INCLUDED_PROTOCOLS = "SSLv2Hello,TLSv1.1";
+
+ static final String INCLUDED_PROTOCOLS = "TLSv1.2";
+ static final String INCLUDED_PROTOCOLS_JDK11 = "TLSv1.3,TLSv1.2";
@BeforeClass
public static void setup() throws Exception {
@@ -166,7 +172,7 @@ public static void cleanup() throws Exception {
* This ensures that the value https.cipherSuites does
* not affect the result of tests.
*/
- private static void storeHttpsCipherSuites() {
+ static void storeHttpsCipherSuites() {
String cipherSuites = System.getProperty(HTTPS_CIPHER_SUITES_KEY);
if (cipherSuites != null) {
LOG.info(
@@ -177,7 +183,7 @@ private static void storeHttpsCipherSuites() {
System.clearProperty(HTTPS_CIPHER_SUITES_KEY);
}
- private static void restoreHttpsCipherSuites() {
+ static void restoreHttpsCipherSuites() {
if (cipherSuitesPropertyValue != null) {
LOG.info("Restoring property {} to value: {}", HTTPS_CIPHER_SUITES_KEY,
cipherSuitesPropertyValue);
@@ -186,7 +192,7 @@ private static void restoreHttpsCipherSuites() {
}
}
- private static void turnOnSSLDebugLogging() {
+ static void turnOnSSLDebugLogging() {
String sslDebug = System.getProperty(JAVAX_NET_DEBUG_KEY);
if (sslDebug != null) {
sslDebugPropertyValue = sslDebug;
@@ -194,7 +200,7 @@ private static void turnOnSSLDebugLogging() {
System.setProperty(JAVAX_NET_DEBUG_KEY, "all");
}
- private static void restoreSSLDebugLogging() {
+ static void restoreSSLDebugLogging() {
if (sslDebugPropertyValue != null) {
System.setProperty(JAVAX_NET_DEBUG_KEY, sslDebugPropertyValue);
sslDebugPropertyValue = null;
@@ -292,22 +298,41 @@ public void testExcludedCiphers() throws Exception {
@Test
public void testIncludedProtocols() throws Exception {
URL url = new URL(baseUrl, SERVLET_PATH_ECHO + "?a=b&c=d");
+
+ String includedProtocols = INCLUDED_PROTOCOLS;
+ if (Shell.isJavaVersionAtLeast(11)) {
+ includedProtocols = INCLUDED_PROTOCOLS_JDK11;
+ }
HttpsURLConnection conn =
getConnectionWithPreferredProtocolSSLSocketFactory(url,
- INCLUDED_PROTOCOLS);
+ includedProtocols);
assertFalse("included protocol list is empty",
- INCLUDED_PROTOCOLS.isEmpty());
+ includedProtocols.isEmpty());
readFromConnection(conn);
+
+ PreferredProtocolSSLSocketFactory factory =
+ (PreferredProtocolSSLSocketFactory)conn.getSSLSocketFactory();
+
+ if (Shell.isJavaVersionAtLeast(11)) {
+ assertEquals("TLSv1.3", factory.getSocket().getSession().getProtocol());
+ } else {
+ assertEquals("TLSv1.2", factory.getSocket().getSession().getProtocol());
+ }
}
/** Test that verified that additionally included cipher
- * TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA is only available cipher for working
+ * TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA (TLS 1.2) or
+ * TLS_AES_128_GCM_SHA256 (TLS 1.3) is only available cipher for working
* TLS connection from client to server disabled for all other common ciphers.
*/
@Test
public void testOneEnabledCiphers() throws Exception {
- testEnabledCiphers(ONE_ENABLED_CIPHERS);
+ if (Shell.isJavaVersionAtLeast(11)) {
+ testEnabledCiphers(ONE_ENABLED_CIPHERS_TLS1_3);
+ } else {
+ testEnabledCiphers(ONE_ENABLED_CIPHERS_TLS1_2);
+ }
}
/** Test verifies that mutually exclusive server's disabled cipher suites and
@@ -315,7 +340,11 @@ public void testOneEnabledCiphers() throws Exception {
*/
@Test
public void testExclusiveEnabledCiphers() throws Exception {
- testEnabledCiphers(EXCLUSIVE_ENABLED_CIPHERS);
+ if (Shell.isJavaVersionAtLeast(11)) {
+ testEnabledCiphers(EXCLUSIVE_ENABLED_CIPHERS_TLS1_3);
+ } else {
+ testEnabledCiphers(EXCLUSIVE_ENABLED_CIPHERS_TLS1_2);
+ }
}
private void testEnabledCiphers(String ciphers) throws
@@ -406,6 +435,7 @@ private void setEnabledCipherSuites(SSLSocket sslSocket) {
private class PreferredProtocolSSLSocketFactory extends SSLSocketFactory {
private final SSLSocketFactory delegateSocketFactory;
private final String[] enabledProtocols;
+ private SSLSocket sslSocket;
PreferredProtocolSSLSocketFactory(SSLSocketFactory sslSocketFactory,
String[] enabledProtocols) {
@@ -417,6 +447,10 @@ private class PreferredProtocolSSLSocketFactory extends SSLSocketFactory {
}
}
+ public SSLSocket getSocket() {
+ return sslSocket;
+ }
+
@Override
public String[] getDefaultCipherSuites() {
return delegateSocketFactory.getDefaultCipherSuites();
@@ -430,7 +464,7 @@ public String[] getSupportedCipherSuites() {
@Override
public Socket createSocket(Socket socket, String string, int i, boolean bln)
throws IOException {
- SSLSocket sslSocket = (SSLSocket) delegateSocketFactory.createSocket(
+ sslSocket = (SSLSocket) delegateSocketFactory.createSocket(
socket, string, i, bln);
setEnabledProtocols(sslSocket);
return sslSocket;
@@ -438,7 +472,7 @@ public Socket createSocket(Socket socket, String string, int i, boolean bln)
@Override
public Socket createSocket(String string, int i) throws IOException {
- SSLSocket sslSocket = (SSLSocket) delegateSocketFactory.createSocket(
+ sslSocket = (SSLSocket) delegateSocketFactory.createSocket(
string, i);
setEnabledProtocols(sslSocket);
return sslSocket;
@@ -447,7 +481,7 @@ public Socket createSocket(String string, int i) throws IOException {
@Override
public Socket createSocket(String string, int i, InetAddress ia, int i1)
throws IOException {
- SSLSocket sslSocket = (SSLSocket) delegateSocketFactory.createSocket(
+ sslSocket = (SSLSocket) delegateSocketFactory.createSocket(
string, i, ia, i1);
setEnabledProtocols(sslSocket);
return sslSocket;
@@ -455,7 +489,7 @@ public Socket createSocket(String string, int i, InetAddress ia, int i1)
@Override
public Socket createSocket(InetAddress ia, int i) throws IOException {
- SSLSocket sslSocket = (SSLSocket) delegateSocketFactory.createSocket(ia,
+ sslSocket = (SSLSocket) delegateSocketFactory.createSocket(ia,
i);
setEnabledProtocols(sslSocket);
return sslSocket;
@@ -464,7 +498,7 @@ public Socket createSocket(InetAddress ia, int i) throws IOException {
@Override
public Socket createSocket(InetAddress ia, int i, InetAddress ia1, int i1)
throws IOException {
- SSLSocket sslSocket = (SSLSocket) delegateSocketFactory.createSocket(ia,
+ sslSocket = (SSLSocket) delegateSocketFactory.createSocket(ia,
i, ia1, i1);
setEnabledProtocols(sslSocket);
return sslSocket;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServerConfigs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServerConfigs.java
new file mode 100644
index 0000000000000..e88eba342874c
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServerConfigs.java
@@ -0,0 +1,266 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.http;
+
+import com.google.common.base.Supplier;
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.hadoop.http.TestSSLHttpServer.EXCLUDED_CIPHERS;
+import static org.apache.hadoop.http.TestSSLHttpServer.INCLUDED_PROTOCOLS;
+import static org.apache.hadoop.http.TestSSLHttpServer.SSL_SERVER_KEYSTORE_PROP_PREFIX;
+import static org.apache.hadoop.http.TestSSLHttpServer.SSL_SERVER_TRUSTSTORE_PROP_PREFIX;
+import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.CLIENT_KEY_STORE_PASSWORD_DEFAULT;
+import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.SERVER_KEY_STORE_PASSWORD_DEFAULT;
+import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.TRUST_STORE_PASSWORD_DEFAULT;
+
+/**
+ * Test suit for testing KeyStore and TrustStore password settings.
+ */
+public class TestSSLHttpServerConfigs {
+
+ private static final String BASEDIR =
+ GenericTestUtils.getTempPath(TestSSLHttpServer.class.getSimpleName());
+
+ private static Configuration conf;
+ private static Configuration sslConf;
+ private static String keystoreDir;
+ private static String sslConfDir;
+ private static final String SERVER_PWD = SERVER_KEY_STORE_PASSWORD_DEFAULT;
+ private static final String CLIENT_PWD = CLIENT_KEY_STORE_PASSWORD_DEFAULT;
+ private static final String TRUST_STORE_PWD = TRUST_STORE_PASSWORD_DEFAULT;
+
+ @Before
+ public void start() throws Exception {
+ TestSSLHttpServer.turnOnSSLDebugLogging();
+ TestSSLHttpServer.storeHttpsCipherSuites();
+
+ conf = new Configuration();
+ conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
+
+ File base = new File(BASEDIR);
+ FileUtil.fullyDelete(base);
+ base.mkdirs();
+ keystoreDir = new File(BASEDIR).getAbsolutePath();
+ sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
+ }
+
+ @After
+ public void shutdown() throws Exception {
+ FileUtil.fullyDelete(new File(BASEDIR));
+ KeyStoreTestUtil.cleanupSSLConfig(keystoreDir, sslConfDir);
+ TestSSLHttpServer.restoreHttpsCipherSuites();
+ TestSSLHttpServer.restoreSSLDebugLogging();
+ }
+
+ /**
+ * Setup KeyStore and TrustStore with given passwords.
+ */
+ private void setupKeyStores(String serverPassword,
+ String clientPassword, String trustStorePassword) throws Exception {
+
+ KeyStoreTestUtil.setupSSLConfig(keystoreDir, sslConfDir, conf, false, true,
+ EXCLUDED_CIPHERS, serverPassword, clientPassword, trustStorePassword);
+
+ sslConf = KeyStoreTestUtil.getSslConfig();
+ sslConf.set(SSLFactory.SSL_ENABLED_PROTOCOLS_KEY, INCLUDED_PROTOCOLS);
+ conf.set(SSLFactory.SSL_ENABLED_PROTOCOLS_KEY, INCLUDED_PROTOCOLS);
+ }
+
+ /**
+ * Build HttpServer2 using the given passwords to access KeyStore/ TrustStore.
+ */
+ private HttpServer2 setupServer(String keyStoreKeyPassword,
+ String keyStorePassword, String trustStorePassword) throws Exception {
+
+ HttpServer2 server = new HttpServer2.Builder().setName("test")
+ .addEndpoint(new URI("https://localhost")).setConf(conf)
+ .keyPassword(keyStoreKeyPassword)
+ .keyStore(sslConf.get(SSL_SERVER_KEYSTORE_PROP_PREFIX + ".location"),
+ keyStorePassword,
+ sslConf.get(SSL_SERVER_KEYSTORE_PROP_PREFIX + ".type", "jks"))
+ .trustStore(
+ sslConf.get(SSL_SERVER_TRUSTSTORE_PROP_PREFIX + ".location"),
+ trustStorePassword,
+ sslConf.get(SSL_SERVER_TRUSTSTORE_PROP_PREFIX + ".type", "jks"))
+ .excludeCiphers(sslConf.get("ssl.server.exclude.cipher.list")).build();
+
+ return server;
+ }
+
+ /**
+ * Test if HttpServer2 start succeeds in validating KeyStore/ TrustStore
+ * using the given passowords.
+ */
+ private void testServerStart(String keyStoreKeyPassword,
+ String keyStorePassword, String trustStorePassword) throws Exception {
+ HttpServer2 server = setupServer(keyStoreKeyPassword, keyStorePassword,
+ trustStorePassword);
+ try {
+ server.start();
+
+ GenericTestUtils.waitFor(new Supplier() {
+ @Override
+ public Boolean get() {
+ return server.isAlive();
+ }
+ }, 200, 100000);
+ } finally {
+ server.stop();
+ }
+ }
+
+ @Test(timeout=120000)
+ public void testServerSetup() throws Exception {
+ setupKeyStores(SERVER_PWD, CLIENT_PWD, TRUST_STORE_PWD);
+ testServerStart(SERVER_PWD, SERVER_PWD, TRUST_STORE_PWD);
+ }
+
+ @Test(timeout=120000)
+ public void testServerSetupWithoutTrustPassword() throws Exception {
+ setupKeyStores(SERVER_PWD, CLIENT_PWD, TRUST_STORE_PWD);
+ testServerStart(SERVER_PWD, SERVER_PWD, null);
+ }
+
+ @Test(timeout=120000)
+ public void testServerSetupWithoutKeyStorePassword() throws Exception {
+ setupKeyStores(SERVER_PWD, CLIENT_PWD, TRUST_STORE_PWD);
+ testServerStart(SERVER_PWD, null, null);
+ }
+
+ @Test(timeout=120000)
+ public void testServerSetupWithoutKeyStoreKeyPassword() throws Exception {
+ setupKeyStores(SERVER_PWD, CLIENT_PWD, TRUST_STORE_PWD);
+ testServerStart(null, SERVER_PWD, null);
+ }
+
+ @Test(timeout=120000)
+ public void testServerSetupWithNoKeyStorePassword() throws Exception {
+ setupKeyStores(SERVER_PWD, CLIENT_PWD, TRUST_STORE_PWD);
+ // Accessing KeyStore without either of KeyStore.KeyPassword or KeyStore
+ // .password should fail.
+ try {
+ testServerStart(null, null, null);
+ Assert.fail("Server should have failed to start without any " +
+ "KeyStore password.");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Problem starting http server",
+ e);
+ }
+ }
+
+ @Test(timeout=120000)
+ public void testServerSetupWithWrongKeyStorePassword() throws Exception {
+ setupKeyStores(SERVER_PWD, CLIENT_PWD, TRUST_STORE_PWD);
+
+ // Accessing KeyStore with wrong keyStore password/ keyPassword should fail.
+ try {
+ testServerStart(SERVER_PWD, "wrongPassword", null);
+ Assert.fail("Server should have failed to start with wrong " +
+ "KeyStore password.");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Keystore was tampered with, " +
+ "or password was incorrect", e);
+ }
+
+ try {
+ testServerStart("wrongPassword", SERVER_PWD, null);
+ Assert.fail("Server should have failed to start with wrong " +
+ "KeyStore password.");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Problem starting http server",
+ e);
+ GenericTestUtils.assertExceptionContains("Cannot recover key",
+ e.getCause());
+ }
+ }
+
+ @Test(timeout=120000)
+ public void testKeyStoreSetupWithoutTrustStorePassword() throws Exception {
+ // Setup TrustStore without TrustStore password
+ setupKeyStores(SERVER_PWD, CLIENT_PWD, "");
+
+ // Accessing TrustStore without password (null password) should succeed
+ testServerStart(SERVER_PWD, SERVER_PWD, null);
+
+ // Accessing TrustStore with wrong password (even if password is not
+ // set) should fail.
+ try {
+ testServerStart(SERVER_PWD, SERVER_PWD, "wrongPassword");
+ Assert.fail("Server should have failed to start with wrong " +
+ "TrustStore password.");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Keystore was tampered with, " +
+ "or password was incorrect", e);
+ }
+ }
+
+ @Test(timeout=120000)
+ public void testKeyStoreSetupWithoutKeyStorePassword() throws Exception {
+ // Setup KeyStore without KeyStore password
+ setupKeyStores(SERVER_PWD, "", TRUST_STORE_PWD);
+
+ // Accessing KeyStore without password (null password) should succeed
+ testServerStart(SERVER_PWD, null, TRUST_STORE_PWD);
+
+ // Accessing KeyStore with wrong password (even if password is not
+ // set) should fail.
+ try {
+ testServerStart(SERVER_PWD, "wrongPassword", TRUST_STORE_PWD);
+ Assert.fail("Server should have failed to start with wrong " +
+ "KeyStore password.");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Keystore was tampered with, " +
+ "or password was incorrect", e);
+ }
+ }
+
+ @Test(timeout=120000)
+ public void testKeyStoreSetupWithoutPassword() throws Exception {
+ // Setup KeyStore without any password
+ setupKeyStores("", "", "");
+
+ // Accessing KeyStore with either one of KeyStore.Password or KeyStore
+ // .KeyPassword as empty string should pass. If the password is null, it
+ // is not set in SSLContextFactory while setting up the server.
+ testServerStart("", null, null);
+ testServerStart(null, "", null);
+
+ try {
+ testServerStart(null, null, null);
+ Assert.fail("Server should have failed to start without " +
+ "KeyStore password.");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Problem starting http server",
+ e);
+ GenericTestUtils.assertExceptionContains("Password must not be null",
+ e.getCause());
+ }
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayWritable.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayWritable.java
index ac8ad2e725920..20d4f08612964 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayWritable.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayWritable.java
@@ -18,12 +18,12 @@
package org.apache.hadoop.io;
-import java.io.*;
-
+import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assert.assertArrayEquals;
+
+import java.io.IOException;
+
import org.junit.Test;
@@ -84,23 +84,14 @@ public void testArrayWritableToArray() {
/**
* test {@link ArrayWritable} constructor with null
*/
- @Test
+ @Test(expected = IllegalArgumentException.class)
public void testNullArgument() {
- try {
- Class extends Writable> valueClass = null;
- new ArrayWritable(valueClass);
- fail("testNullArgument error !!!");
- } catch (IllegalArgumentException exp) {
- //should be for test pass
- } catch (Exception e) {
- fail("testNullArgument error !!!");
- }
+ new ArrayWritable((Class extends Writable>) null);
}
/**
* test {@link ArrayWritable} constructor with {@code String[]} as a parameter
*/
- @SuppressWarnings("deprecation")
@Test
public void testArrayWritableStringConstructor() {
String[] original = { "test1", "test2", "test3" };
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBloomMapFile.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBloomMapFile.java
index 740540d5d23c8..a80f6e07b3878 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBloomMapFile.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBloomMapFile.java
@@ -27,6 +27,8 @@
import java.util.Collections;
import java.util.List;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
@@ -49,6 +51,8 @@
import org.junit.Test;
public class TestBloomMapFile {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestBloomMapFile.class);
private static Configuration conf = new Configuration();
private static final Path TEST_ROOT = new Path(GenericTestUtils.getTempPath(
TestMapFile.class.getSimpleName()));
@@ -107,7 +111,7 @@ public void testMembershipTest() throws Exception {
System.out.println("False positives: " + falsePos);
assertTrue(falsePos < 2);
} finally {
- IOUtils.cleanup(null, writer, reader);
+ IOUtils.cleanupWithLogger(LOG, writer, reader);
}
}
@@ -136,7 +140,7 @@ private void checkMembershipVaryingSizedKeys(List keys)
reader.close();
fs.delete(qualifiedDirName, true);
} finally {
- IOUtils.cleanup(null, writer, reader);
+ IOUtils.cleanupWithLogger(LOG, writer, reader);
}
}
@@ -173,7 +177,7 @@ public void testDeleteFile() {
} catch (Exception ex) {
fail("unexpect ex in testDeleteFile !!!");
} finally {
- IOUtils.cleanup(null, writer);
+ IOUtils.cleanupWithLogger(LOG, writer);
}
}
@@ -202,7 +206,7 @@ public void testIOExceptionInWriterConstructor() {
} catch (Exception ex) {
fail("unexpect ex in testIOExceptionInWriterConstructor !!!");
} finally {
- IOUtils.cleanup(null, writer, reader);
+ IOUtils.cleanupWithLogger(LOG, writer, reader);
}
}
@@ -237,7 +241,7 @@ public void testGetBloomMapFile() {
} catch (Exception ex) {
fail("unexpect ex in testGetBloomMapFile !!!");
} finally {
- IOUtils.cleanup(null, writer, reader);
+ IOUtils.cleanupWithLogger(LOG, writer, reader);
}
}
@@ -286,7 +290,7 @@ public void testBloomMapFileConstructors() {
} catch (Exception ex) {
fail("testBloomMapFileConstructors error !!!");
} finally {
- IOUtils.cleanup(null, writer);
+ IOUtils.cleanupWithLogger(LOG, writer);
}
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
index 7ec422732ecd8..d8a22f358adaa 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
@@ -26,6 +26,8 @@
import java.util.Iterator;
import java.util.List;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
@@ -48,7 +50,7 @@
import static org.mockito.Mockito.*;
public class TestMapFile {
-
+ private static final Logger LOG = LoggerFactory.getLogger(TestMapFile.class);
private static final Path TEST_DIR = new Path(GenericTestUtils.getTempPath(
TestMapFile.class.getSimpleName()));
@@ -187,7 +189,7 @@ public void testGetClosestOnCurrentApi() throws Exception {
closest = (Text) reader.getClosest(key, value, true);
assertEquals(new Text("91"), closest);
} finally {
- IOUtils.cleanup(null, writer, reader);
+ IOUtils.cleanupWithLogger(LOG, writer, reader);
}
}
@@ -211,7 +213,7 @@ public void testMidKeyOnCurrentApi() throws Exception {
reader = createReader(TEST_PREFIX, IntWritable.class);
assertEquals(new IntWritable((SIZE - 1) / 2), reader.midKey());
} finally {
- IOUtils.cleanup(null, writer, reader);
+ IOUtils.cleanupWithLogger(LOG, writer, reader);
}
}
@@ -233,7 +235,7 @@ public void testRename() {
} catch (IOException ex) {
fail("testRename error " + ex);
} finally {
- IOUtils.cleanup(null, writer);
+ IOUtils.cleanupWithLogger(LOG, writer);
}
}
@@ -265,7 +267,7 @@ public void testRenameWithException() {
assertEquals("testRenameWithException invalid IOExceptionMessage !!!",
ex.getMessage(), ERROR_MESSAGE);
} finally {
- IOUtils.cleanup(null, writer);
+ IOUtils.cleanupWithLogger(LOG, writer);
}
}
@@ -292,7 +294,7 @@ public void testRenameWithFalse() {
assertTrue("testRenameWithFalse invalid IOExceptionMessage error !!!", ex
.getMessage().startsWith(ERROR_MESSAGE));
} finally {
- IOUtils.cleanup(null, writer);
+ IOUtils.cleanupWithLogger(LOG, writer);
}
}
@@ -319,7 +321,7 @@ public void testWriteWithFailDirCreation() {
assertTrue("testWriteWithFailDirCreation ex error !!!", ex.getMessage()
.startsWith(ERROR_MESSAGE));
} finally {
- IOUtils.cleanup(null, writer);
+ IOUtils.cleanupWithLogger(LOG, writer);
}
}
@@ -347,7 +349,7 @@ public void testOnFinalKey() {
} catch (IOException ex) {
fail("testOnFinalKey error !!!");
} finally {
- IOUtils.cleanup(null, writer, reader);
+ IOUtils.cleanupWithLogger(LOG, writer, reader);
}
}
@@ -392,7 +394,7 @@ public void testReaderGetClosest() throws Exception {
} catch (IOException ex) {
/* Should be thrown to pass the test */
} finally {
- IOUtils.cleanup(null, writer, reader);
+ IOUtils.cleanupWithLogger(LOG, writer, reader);
}
}
@@ -410,7 +412,7 @@ public void testReaderWithWrongValueClass() {
} catch (IOException ex) {
/* Should be thrown to pass the test */
} finally {
- IOUtils.cleanup(null, writer);
+ IOUtils.cleanupWithLogger(LOG, writer);
}
}
@@ -451,7 +453,7 @@ public void testReaderKeyIteration() {
} catch (IOException ex) {
fail("reader seek error !!!");
} finally {
- IOUtils.cleanup(null, writer, reader);
+ IOUtils.cleanupWithLogger(LOG, writer, reader);
}
}
@@ -482,7 +484,7 @@ public void testFix() {
} catch (Exception ex) {
fail("testFix error !!!");
} finally {
- IOUtils.cleanup(null, writer);
+ IOUtils.cleanupWithLogger(LOG, writer);
}
}
@@ -588,7 +590,7 @@ public void testDeprecatedConstructors() {
} catch (IOException e) {
fail(e.getMessage());
} finally {
- IOUtils.cleanup(null, writer, reader);
+ IOUtils.cleanupWithLogger(LOG, writer, reader);
}
}
@@ -607,7 +609,7 @@ public void testKeyLessWriterCreation() {
} catch (Exception e) {
fail("fail in testKeyLessWriterCreation. Other ex !!!");
} finally {
- IOUtils.cleanup(null, writer);
+ IOUtils.cleanupWithLogger(LOG, writer);
}
}
/**
@@ -636,7 +638,7 @@ public void testPathExplosionWriterCreation() {
} catch (Exception e) {
fail("fail in testPathExplosionWriterCreation. Other ex !!!");
} finally {
- IOUtils.cleanup(null, writer);
+ IOUtils.cleanupWithLogger(LOG, writer);
}
}
@@ -657,7 +659,7 @@ public void testDescOrderWithThrowExceptionWriterAppend() {
} catch (Exception e) {
fail("testDescOrderWithThrowExceptionWriterAppend other ex throw !!!");
} finally {
- IOUtils.cleanup(null, writer);
+ IOUtils.cleanupWithLogger(LOG, writer);
}
}
@@ -745,7 +747,7 @@ public void testGetClosest() throws Exception {
closest = (Text) reader.getClosest(key, value, true);
assertEquals(new Text("90"), closest);
} finally {
- IOUtils.cleanup(null, writer, reader);
+ IOUtils.cleanupWithLogger(LOG, writer, reader);
}
}
@@ -768,7 +770,7 @@ public void testMidKey() throws Exception {
reader = new MapFile.Reader(qualifiedDirName, conf);
assertEquals(new IntWritable(1), reader.midKey());
} finally {
- IOUtils.cleanup(null, writer, reader);
+ IOUtils.cleanupWithLogger(LOG, writer, reader);
}
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestObjectWritableProtos.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestObjectWritableProtos.java
index 93704fb5fc676..f3012ded25bb5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestObjectWritableProtos.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestObjectWritableProtos.java
@@ -24,8 +24,8 @@
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
-import com.google.protobuf.DescriptorProtos;
-import com.google.protobuf.Message;
+import org.apache.hadoop.thirdparty.protobuf.DescriptorProtos;
+import org.apache.hadoop.thirdparty.protobuf.Message;
/**
* Test case for the use of Protocol Buffers within ObjectWritable.
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java
index 044824356ed30..cf64bbc0f9457 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java
@@ -649,8 +649,9 @@ public void testCreateWriterOnExistingFile() throws IOException {
@Test
public void testRecursiveSeqFileCreate() throws IOException {
FileSystem fs = FileSystem.getLocal(conf);
- Path name = new Path(new Path(GenericTestUtils.getTempPath(
- "recursiveCreateDir")), "file");
+ Path parentDir = new Path(GenericTestUtils.getTempPath(
+ "recursiveCreateDir"));
+ Path name = new Path(parentDir, "file");
boolean createParent = false;
try {
@@ -662,11 +663,16 @@ public void testRecursiveSeqFileCreate() throws IOException {
// Expected
}
- createParent = true;
- SequenceFile.createWriter(fs, conf, name, RandomDatum.class,
- RandomDatum.class, 512, (short) 1, 4096, createParent,
- CompressionType.NONE, null, new Metadata());
- // should succeed, fails if exception thrown
+ try {
+ createParent = true;
+ SequenceFile.createWriter(fs, conf, name, RandomDatum.class,
+ RandomDatum.class, 512, (short) 1, 4096, createParent,
+ CompressionType.NONE, null, new Metadata());
+ // should succeed, fails if exception thrown
+ } finally {
+ fs.deleteOnExit(parentDir);
+ fs.close();
+ }
}
@Test
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
index 59856a4de11f9..54df39955d6cf 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
@@ -291,9 +291,9 @@ public void testTextText() throws CharacterCodingException {
a.append("xdefgxxx".getBytes(), 1, 4);
assertEquals("modified aliased string", "abc", b.toString());
assertEquals("appended string incorrectly", "abcdefg", a.toString());
- // add an extra byte so that capacity = 14 and length = 8
+ // add an extra byte so that capacity = 10 and length = 8
a.append(new byte[]{'d'}, 0, 1);
- assertEquals(14, a.getBytes().length);
+ assertEquals(10, a.getBytes().length);
assertEquals(8, a.copyBytes().length);
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java
index 35f84b950e427..8be2dce06d1fe 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java
@@ -126,7 +126,7 @@ private void addPair(T compressor, E decompressor, String name) {
builder.add(new TesterPair(name, compressor, decompressor));
}
- public void test() throws InstantiationException, IllegalAccessException {
+ public void test() throws Exception {
pairs = builder.build();
pairs = assertionDelegate.filterOnAssumeWhat(pairs);
@@ -287,47 +287,45 @@ private boolean checkSetInputArrayIndexOutOfBoundsException(
@Override
public void assertCompression(String name, Compressor compressor,
- Decompressor decompressor, byte[] rawData) {
+ Decompressor decompressor, byte[] rawData) throws Exception {
int cSize = 0;
int decompressedSize = 0;
- byte[] compressedResult = new byte[rawData.length];
+ // Snappy compression can increase data size
+ int maxCompressedLength = 32 + rawData.length + rawData.length/6;
+ byte[] compressedResult = new byte[maxCompressedLength];
byte[] decompressedBytes = new byte[rawData.length];
- try {
- assertTrue(
- joiner.join(name, "compressor.needsInput before error !!!"),
- compressor.needsInput());
- assertTrue(
+ assertTrue(
+ joiner.join(name, "compressor.needsInput before error !!!"),
+ compressor.needsInput());
+ assertEquals(
joiner.join(name, "compressor.getBytesWritten before error !!!"),
- compressor.getBytesWritten() == 0);
- compressor.setInput(rawData, 0, rawData.length);
- compressor.finish();
- while (!compressor.finished()) {
- cSize += compressor.compress(compressedResult, 0,
- compressedResult.length);
- }
- compressor.reset();
-
- assertTrue(
- joiner.join(name, "decompressor.needsInput() before error !!!"),
- decompressor.needsInput());
- decompressor.setInput(compressedResult, 0, cSize);
- assertFalse(
- joiner.join(name, "decompressor.needsInput() after error !!!"),
- decompressor.needsInput());
- while (!decompressor.finished()) {
- decompressedSize = decompressor.decompress(decompressedBytes, 0,
- decompressedBytes.length);
- }
- decompressor.reset();
- assertTrue(joiner.join(name, " byte size not equals error !!!"),
- decompressedSize == rawData.length);
- assertArrayEquals(
- joiner.join(name, " byte arrays not equals error !!!"), rawData,
- decompressedBytes);
- } catch (Exception ex) {
- fail(joiner.join(name, ex.getMessage()));
+ 0, compressor.getBytesWritten());
+ compressor.setInput(rawData, 0, rawData.length);
+ compressor.finish();
+ while (!compressor.finished()) {
+ cSize += compressor.compress(compressedResult, 0,
+ compressedResult.length);
+ }
+ compressor.reset();
+
+ assertTrue(
+ joiner.join(name, "decompressor.needsInput() before error !!!"),
+ decompressor.needsInput());
+ decompressor.setInput(compressedResult, 0, cSize);
+ assertFalse(
+ joiner.join(name, "decompressor.needsInput() after error !!!"),
+ decompressor.needsInput());
+ while (!decompressor.finished()) {
+ decompressedSize = decompressor.decompress(decompressedBytes, 0,
+ decompressedBytes.length);
}
+ decompressor.reset();
+ assertEquals(joiner.join(name, " byte size not equals error !!!"),
+ rawData.length, decompressedSize);
+ assertArrayEquals(
+ joiner.join(name, " byte arrays not equals error !!!"), rawData,
+ decompressedBytes);
}
}),
@@ -519,6 +517,6 @@ abstract static class TesterCompressionStrategy {
protected final Logger logger = Logger.getLogger(getClass());
abstract void assertCompression(String name, Compressor compressor,
- Decompressor decompressor, byte[] originalRawData);
+ Decompressor decompressor, byte[] originalRawData) throws Exception;
}
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java
index cc986c7e0aea4..c8900bad1df56 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.io.compress.snappy;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -44,11 +45,16 @@
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.junit.Assume.*;
public class TestSnappyCompressorDecompressor {
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestSnappyCompressorDecompressor.class);
+
@Before
public void before() {
assumeTrue(SnappyCodec.isNativeCodeLoaded());
@@ -167,40 +173,41 @@ public void testSnappyDecompressorCompressAIOBException() {
}
@Test
- public void testSnappyCompressDecompress() {
+ public void testSnappyCompressDecompress() throws Exception {
int BYTE_SIZE = 1024 * 54;
byte[] bytes = BytesGenerator.get(BYTE_SIZE);
SnappyCompressor compressor = new SnappyCompressor();
- try {
- compressor.setInput(bytes, 0, bytes.length);
- assertTrue("SnappyCompressDecompress getBytesRead error !!!",
- compressor.getBytesRead() > 0);
- assertTrue(
- "SnappyCompressDecompress getBytesWritten before compress error !!!",
- compressor.getBytesWritten() == 0);
-
- byte[] compressed = new byte[BYTE_SIZE];
- int cSize = compressor.compress(compressed, 0, compressed.length);
- assertTrue(
- "SnappyCompressDecompress getBytesWritten after compress error !!!",
- compressor.getBytesWritten() > 0);
-
- SnappyDecompressor decompressor = new SnappyDecompressor(BYTE_SIZE);
- // set as input for decompressor only compressed data indicated with cSize
- decompressor.setInput(compressed, 0, cSize);
- byte[] decompressed = new byte[BYTE_SIZE];
- decompressor.decompress(decompressed, 0, decompressed.length);
-
- assertTrue("testSnappyCompressDecompress finished error !!!",
- decompressor.finished());
- Assert.assertArrayEquals(bytes, decompressed);
- compressor.reset();
- decompressor.reset();
- assertTrue("decompressor getRemaining error !!!",
- decompressor.getRemaining() == 0);
- } catch (Exception e) {
- fail("testSnappyCompressDecompress ex error!!!");
- }
+ compressor.setInput(bytes, 0, bytes.length);
+ assertTrue("SnappyCompressDecompress getBytesRead error !!!",
+ compressor.getBytesRead() > 0);
+ assertEquals(
+ "SnappyCompressDecompress getBytesWritten before compress error !!!",
+ 0, compressor.getBytesWritten());
+
+ // snappy compression may increase data size.
+ // This calculation comes from "Snappy::MaxCompressedLength(size_t)"
+ int maxSize = 32 + BYTE_SIZE + BYTE_SIZE / 6;
+ byte[] compressed = new byte[maxSize];
+ int cSize = compressor.compress(compressed, 0, compressed.length);
+ LOG.info("input size: {}", BYTE_SIZE);
+ LOG.info("compressed size: {}", cSize);
+ assertTrue(
+ "SnappyCompressDecompress getBytesWritten after compress error !!!",
+ compressor.getBytesWritten() > 0);
+
+ SnappyDecompressor decompressor = new SnappyDecompressor();
+ // set as input for decompressor only compressed data indicated with cSize
+ decompressor.setInput(compressed, 0, cSize);
+ byte[] decompressed = new byte[BYTE_SIZE];
+ decompressor.decompress(decompressed, 0, decompressed.length);
+
+ assertTrue("testSnappyCompressDecompress finished error !!!",
+ decompressor.finished());
+ Assert.assertArrayEquals(bytes, decompressed);
+ compressor.reset();
+ decompressor.reset();
+ assertEquals("decompressor getRemaining error !!!",
+ 0, decompressor.getRemaining());
}
@Test
@@ -278,7 +285,38 @@ public void testSnappyBlockCompression() {
fail("testSnappyBlockCompression ex error !!!");
}
}
-
+
+ @Test
+ // The buffer size is smaller than the input.
+ public void testSnappyCompressDecompressWithSmallBuffer() throws Exception {
+ int inputSize = 1024 * 50;
+ int bufferSize = 512;
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ byte[] buffer = new byte[bufferSize];
+ byte[] input = BytesGenerator.get(inputSize);
+
+ SnappyCompressor compressor = new SnappyCompressor();
+ compressor.setInput(input, 0, inputSize);
+ compressor.finish();
+ while (!compressor.finished()) {
+ int len = compressor.compress(buffer, 0, buffer.length);
+ out.write(buffer, 0, len);
+ }
+ byte[] compressed = out.toByteArray();
+ assertThat(compressed).hasSizeGreaterThan(0);
+ out.reset();
+
+ SnappyDecompressor decompressor = new SnappyDecompressor();
+ decompressor.setInput(compressed, 0, compressed.length);
+ while (!decompressor.finished()) {
+ int len = decompressor.decompress(buffer, 0, buffer.length);
+ out.write(buffer, 0, len);
+ }
+ byte[] decompressed = out.toByteArray();
+
+ assertThat(decompressed).isEqualTo(input);
+ }
+
private void compressDecompressLoop(int rawDataSize) throws IOException {
byte[] rawData = BytesGenerator.get(rawDataSize);
byte[] compressedResult = new byte[rawDataSize+20];
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFile.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFile.java
index 80aeef2d63672..ea20fbeda3d05 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFile.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFile.java
@@ -109,7 +109,7 @@ private int readAndCheckbytes(Scanner scanner, int start, int n)
byte[] val = readValue(scanner);
String keyStr = String.format(localFormatter, i);
String valStr = value + keyStr;
- assertTrue("btyes for keys do not match " + keyStr + " "
+ assertTrue("bytes for keys do not match " + keyStr + " "
+ new String(key), Arrays.equals(keyStr.getBytes(), key));
assertTrue("bytes for vals do not match " + valStr + " "
+ new String(val), Arrays.equals(
@@ -117,7 +117,7 @@ private int readAndCheckbytes(Scanner scanner, int start, int n)
assertTrue(scanner.advance());
key = readKey(scanner);
val = readValue(scanner);
- assertTrue("btyes for keys do not match", Arrays.equals(
+ assertTrue("bytes for keys do not match", Arrays.equals(
keyStr.getBytes(), key));
assertTrue("bytes for vals do not match", Arrays.equals(
valStr.getBytes(), val));
@@ -146,11 +146,11 @@ private int readLargeRecords(Scanner scanner, int start, int n)
for (int i = start; i < (start + n); i++) {
byte[] key = readKey(scanner);
String keyStr = String.format(localFormatter, i);
- assertTrue("btyes for keys do not match", Arrays.equals(
+ assertTrue("bytes for keys do not match", Arrays.equals(
keyStr.getBytes(), key));
scanner.advance();
key = readKey(scanner);
- assertTrue("btyes for keys do not match", Arrays.equals(
+ assertTrue("bytes for keys do not match", Arrays.equals(
keyStr.getBytes(), key));
scanner.advance();
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
index a14928c7b4e24..c21fa443ddcc4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
@@ -800,7 +800,7 @@ public void testPmemCheckParameters() {
// Incorrect file length
try {
- NativeIO.POSIX.Pmem.mapBlock(filePath, length);
+ NativeIO.POSIX.Pmem.mapBlock(filePath, length, false);
fail("Illegal length parameter should be detected");
} catch (Exception e) {
LOG.info(e.getMessage());
@@ -810,7 +810,7 @@ public void testPmemCheckParameters() {
filePath = "/mnt/pmem0/test_native_io";
length = -1L;
try {
- NativeIO.POSIX.Pmem.mapBlock(filePath, length);
+ NativeIO.POSIX.Pmem.mapBlock(filePath, length, false);
fail("Illegal length parameter should be detected");
}catch (Exception e) {
LOG.info(e.getMessage());
@@ -837,10 +837,10 @@ public void testPmemMapMultipleFiles() {
for (int i = 0; i < fileNumber; i++) {
String path = filePath + i;
LOG.info("File path = " + path);
- NativeIO.POSIX.Pmem.mapBlock(path, length);
+ NativeIO.POSIX.Pmem.mapBlock(path, length, false);
}
try {
- NativeIO.POSIX.Pmem.mapBlock(filePath, length);
+ NativeIO.POSIX.Pmem.mapBlock(filePath, length, false);
fail("Request map extra file when persistent memory is all occupied");
} catch (Exception e) {
LOG.info(e.getMessage());
@@ -863,7 +863,7 @@ public void testPmemMapBigFile() {
length = volumeSize + 1024L;
try {
LOG.info("File length = " + length);
- NativeIO.POSIX.Pmem.mapBlock(filePath, length);
+ NativeIO.POSIX.Pmem.mapBlock(filePath, length, false);
fail("File length exceeds persistent memory total volume size");
}catch (Exception e) {
LOG.info(e.getMessage());
@@ -881,7 +881,8 @@ public void testPmemCopy() throws IOException {
// memory device.
String filePath = "/mnt/pmem0/copy";
long length = 4096;
- PmemMappedRegion region = NativeIO.POSIX.Pmem.mapBlock(filePath, length);
+ PmemMappedRegion region = NativeIO.POSIX.Pmem.mapBlock(
+ filePath, length, false);
assertTrue(NativeIO.POSIX.Pmem.isPmem(region.getAddress(), length));
assertFalse(NativeIO.POSIX.Pmem.isPmem(region.getAddress(), length + 100));
assertFalse(NativeIO.POSIX.Pmem.isPmem(region.getAddress() + 100, length));
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
index 3b42bb46e828c..e1fc29f88126b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
@@ -377,4 +377,23 @@ public void testNoRetryOnAccessControlException() throws Exception {
assertEquals(RetryDecision.FAIL, caughtRetryAction.action);
}
}
+
+ @Test
+ public void testWrappedAccessControlException() throws Exception {
+ RetryPolicy policy = mock(RetryPolicy.class);
+ RetryPolicy realPolicy = RetryPolicies.failoverOnNetworkException(5);
+ setupMockPolicy(policy, realPolicy);
+
+ UnreliableInterface unreliable = (UnreliableInterface) RetryProxy.create(
+ UnreliableInterface.class, unreliableImpl, policy);
+
+ try {
+ unreliable.failsWithWrappedAccessControlException();
+ fail("Should fail");
+ } catch (IOException expected) {
+ verify(policy, times(1)).shouldRetry(any(Exception.class), anyInt(),
+ anyInt(), anyBoolean());
+ assertEquals(RetryDecision.FAIL, caughtRetryAction.action);
+ }
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
index a20d898988400..15a84bbad4a66 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
@@ -139,6 +139,13 @@ public void failsWithAccessControlExceptionEightTimes()
}
}
+ public void failsWithWrappedAccessControlException()
+ throws IOException {
+ AccessControlException ace = new AccessControlException();
+ IOException ioe = new IOException(ace);
+ throw new IOException(ioe);
+ }
+
@Override
public String succeedsOnceThenFailsReturningString()
throws UnreliableException, IOException, StandbyException {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java
index 738a76086bae2..80bf47dc23bea 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java
@@ -83,6 +83,10 @@ public static class FatalException extends UnreliableException {
void failsWithAccessControlExceptionEightTimes()
throws AccessControlException;
+ @Idempotent
+ void failsWithWrappedAccessControlException()
+ throws IOException;
+
public String succeedsOnceThenFailsReturningString()
throws UnreliableException, StandbyException, IOException;
@Idempotent
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
index 9356dabe2f701..bbb4ec21812e3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.ipc;
import com.google.common.base.Joiner;
-import com.google.protobuf.BlockingService;
+import org.apache.hadoop.thirdparty.protobuf.BlockingService;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java
index bb4717ed36d35..38b3fe5681024 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java
@@ -176,6 +176,12 @@ public void assertCanPut(CallQueueManager cq, int numberOfPuts,
private static final Class extends RpcScheduler> schedulerClass
= CallQueueManager.convertSchedulerClass(DefaultRpcScheduler.class);
+ private static final Class extends BlockingQueue> fcqueueClass
+ = CallQueueManager.convertQueueClass(FairCallQueue.class, FakeCall.class);
+
+ private static final Class extends RpcScheduler> rpcSchedulerClass
+ = CallQueueManager.convertSchedulerClass(DecayRpcScheduler.class);
+
@Test
public void testCallQueueCapacity() throws InterruptedException {
manager = new CallQueueManager(queueClass, schedulerClass, false,
@@ -319,6 +325,55 @@ public void testSwapUnderContention() throws InterruptedException {
assertEquals(totalCallsConsumed, totalCallsCreated);
}
+ @Test
+ public void testQueueCapacity() throws InterruptedException {
+ int capacity = 4;
+ String ns = "ipc.8020";
+ conf.setInt("ipc.8020.scheduler.priority.levels", 2);
+ conf.set("ipc.8020.callqueue.capacity.weights", "1,3");
+ manager = new CallQueueManager<>(fcqueueClass, rpcSchedulerClass, false,
+ capacity, ns, conf);
+
+ // insert 4 calls with 2 at each priority
+ // since the queue with priority 0 has only 1 capacity, the second call
+ // with p0 will be overflowed to queue with priority 1
+ for (int i = 0; i < capacity; i++) {
+ FakeCall fc = new FakeCall(i);
+ fc.setPriorityLevel(i%2);
+ manager.put(fc);
+ }
+
+ // get calls, the order should be
+ // call 0 with p0
+ // call 1 with p1
+ // call 2 with p0 since overflow
+ // call 3 with p1
+ assertEquals(manager.take().priorityLevel, 0);
+ assertEquals(manager.take().priorityLevel, 1);
+ assertEquals(manager.take().priorityLevel, 0);
+ assertEquals(manager.take().priorityLevel, 1);
+
+ conf.set("ipc.8020.callqueue.capacity.weights", "1,1");
+ manager = new CallQueueManager<>(fcqueueClass, rpcSchedulerClass, false,
+ capacity, ns, conf);
+
+ for (int i = 0; i < capacity; i++) {
+ FakeCall fc = new FakeCall(i);
+ fc.setPriorityLevel(i%2);
+ manager.put(fc);
+ }
+
+ // get calls, the order should be
+ // call 0 with p0
+ // call 2 with p0
+ // call 1 with p1
+ // call 3 with p1
+ assertEquals(manager.take().priorityLevel, 0);
+ assertEquals(manager.take().priorityLevel, 0);
+ assertEquals(manager.take().priorityLevel, 1);
+ assertEquals(manager.take().priorityLevel, 1);
+ }
+
public static class ExceptionFakeCall implements Schedulable {
public ExceptionFakeCall() {
throw new IllegalArgumentException("Exception caused by call queue " +
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java
index 7bdc6b5e96d0c..71723325e2c86 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java
@@ -66,15 +66,15 @@ public void testZeroScheduler() {
@SuppressWarnings("deprecation")
public void testParsePeriod() {
// By default
- scheduler = new DecayRpcScheduler(1, "", new Configuration());
+ scheduler = new DecayRpcScheduler(1, "ipc.1", new Configuration());
assertEquals(DecayRpcScheduler.IPC_SCHEDULER_DECAYSCHEDULER_PERIOD_DEFAULT,
scheduler.getDecayPeriodMillis());
// Custom
Configuration conf = new Configuration();
- conf.setLong("ns." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY,
+ conf.setLong("ipc.2." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY,
1058);
- scheduler = new DecayRpcScheduler(1, "ns", conf);
+ scheduler = new DecayRpcScheduler(1, "ipc.2", conf);
assertEquals(1058L, scheduler.getDecayPeriodMillis());
}
@@ -82,15 +82,15 @@ public void testParsePeriod() {
@SuppressWarnings("deprecation")
public void testParseFactor() {
// Default
- scheduler = new DecayRpcScheduler(1, "", new Configuration());
+ scheduler = new DecayRpcScheduler(1, "ipc.3", new Configuration());
assertEquals(DecayRpcScheduler.IPC_SCHEDULER_DECAYSCHEDULER_FACTOR_DEFAULT,
scheduler.getDecayFactor(), 0.00001);
// Custom
Configuration conf = new Configuration();
- conf.set("prefix." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_FACTOR_KEY,
+ conf.set("ipc.4." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_FACTOR_KEY,
"0.125");
- scheduler = new DecayRpcScheduler(1, "prefix", conf);
+ scheduler = new DecayRpcScheduler(1, "ipc.4", conf);
assertEquals(0.125, scheduler.getDecayFactor(), 0.00001);
}
@@ -106,23 +106,23 @@ public void assertEqualDecimalArrays(double[] a, double[] b) {
public void testParseThresholds() {
// Defaults vary by number of queues
Configuration conf = new Configuration();
- scheduler = new DecayRpcScheduler(1, "", conf);
+ scheduler = new DecayRpcScheduler(1, "ipc.5", conf);
assertEqualDecimalArrays(new double[]{}, scheduler.getThresholds());
- scheduler = new DecayRpcScheduler(2, "", conf);
+ scheduler = new DecayRpcScheduler(2, "ipc.6", conf);
assertEqualDecimalArrays(new double[]{0.5}, scheduler.getThresholds());
- scheduler = new DecayRpcScheduler(3, "", conf);
+ scheduler = new DecayRpcScheduler(3, "ipc.7", conf);
assertEqualDecimalArrays(new double[]{0.25, 0.5}, scheduler.getThresholds());
- scheduler = new DecayRpcScheduler(4, "", conf);
+ scheduler = new DecayRpcScheduler(4, "ipc.8", conf);
assertEqualDecimalArrays(new double[]{0.125, 0.25, 0.5}, scheduler.getThresholds());
// Custom
conf = new Configuration();
- conf.set("ns." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_THRESHOLDS_KEY,
+ conf.set("ipc.9." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_THRESHOLDS_KEY,
"1, 10, 20, 50, 85");
- scheduler = new DecayRpcScheduler(6, "ns", conf);
+ scheduler = new DecayRpcScheduler(6, "ipc.9", conf);
assertEqualDecimalArrays(new double[]{0.01, 0.1, 0.2, 0.5, 0.85}, scheduler.getThresholds());
}
@@ -130,8 +130,9 @@ public void testParseThresholds() {
@SuppressWarnings("deprecation")
public void testAccumulate() {
Configuration conf = new Configuration();
- conf.set("ns." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY, "99999999"); // Never flush
- scheduler = new DecayRpcScheduler(1, "ns", conf);
+ conf.set("ipc.10." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY,
+ "99999999"); // Never flush
+ scheduler = new DecayRpcScheduler(1, "ipc.10", conf);
assertEquals(0, scheduler.getCallCostSnapshot().size()); // empty first
@@ -151,11 +152,11 @@ public void testAccumulate() {
@SuppressWarnings("deprecation")
public void testDecay() throws Exception {
Configuration conf = new Configuration();
- conf.setLong("ns." // Never decay
+ conf.setLong("ipc.11." // Never decay
+ DecayRpcScheduler.IPC_SCHEDULER_DECAYSCHEDULER_PERIOD_KEY, 999999999);
- conf.setDouble("ns."
+ conf.setDouble("ipc.11."
+ DecayRpcScheduler.IPC_SCHEDULER_DECAYSCHEDULER_FACTOR_KEY, 0.5);
- scheduler = new DecayRpcScheduler(1, "ns", conf);
+ scheduler = new DecayRpcScheduler(1, "ipc.11", conf);
assertEquals(0, scheduler.getTotalCallSnapshot());
@@ -202,7 +203,7 @@ public void testDecay() throws Exception {
@SuppressWarnings("deprecation")
public void testPriority() throws Exception {
Configuration conf = new Configuration();
- final String namespace = "ns";
+ final String namespace = "ipc.12";
conf.set(namespace + "." + DecayRpcScheduler
.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY, "99999999"); // Never flush
conf.set(namespace + "." + DecayRpcScheduler
@@ -239,9 +240,11 @@ public void testPriority() throws Exception {
@SuppressWarnings("deprecation")
public void testPeriodic() throws InterruptedException {
Configuration conf = new Configuration();
- conf.set("ns." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY, "10");
- conf.set("ns." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_FACTOR_KEY, "0.5");
- scheduler = new DecayRpcScheduler(1, "ns", conf);
+ conf.set(
+ "ipc.13." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY, "10");
+ conf.set(
+ "ipc.13." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_FACTOR_KEY, "0.5");
+ scheduler = new DecayRpcScheduler(1, "ipc.13", conf);
assertEquals(10, scheduler.getDecayPeriodMillis());
assertEquals(0, scheduler.getTotalCallSnapshot());
@@ -269,7 +272,7 @@ public void testNPEatInitialization() throws InterruptedException {
// MetricsSystemImpl to true
DefaultMetricsSystem.initialize("NameNode");
Configuration conf = new Configuration();
- scheduler = new DecayRpcScheduler(1, "ns", conf);
+ scheduler = new DecayRpcScheduler(1, "ipc.14", conf);
// check if there is npe in log
assertFalse(bytes.toString().contains("NullPointerException"));
} finally {
@@ -280,7 +283,7 @@ public void testNPEatInitialization() throws InterruptedException {
@Test
public void testUsingWeightedTimeCostProvider() {
- scheduler = getSchedulerWithWeightedTimeCostProvider(3);
+ scheduler = getSchedulerWithWeightedTimeCostProvider(3, "ipc.15");
// 3 details in increasing order of cost. Although medium has a longer
// duration, the shared lock is weighted less than the exclusive lock
@@ -330,7 +333,7 @@ public void testUsingWeightedTimeCostProvider() {
@Test
public void testUsingWeightedTimeCostProviderWithZeroCostCalls() {
- scheduler = getSchedulerWithWeightedTimeCostProvider(2);
+ scheduler = getSchedulerWithWeightedTimeCostProvider(2, "ipc.16");
ProcessingDetails emptyDetails =
new ProcessingDetails(TimeUnit.MILLISECONDS);
@@ -347,7 +350,7 @@ public void testUsingWeightedTimeCostProviderWithZeroCostCalls() {
@Test
public void testUsingWeightedTimeCostProviderNoRequests() {
- scheduler = getSchedulerWithWeightedTimeCostProvider(2);
+ scheduler = getSchedulerWithWeightedTimeCostProvider(2, "ipc.18");
assertEquals(0, scheduler.getPriorityLevel(mockCall("A")));
}
@@ -357,13 +360,13 @@ public void testUsingWeightedTimeCostProviderNoRequests() {
* normal decaying disabled.
*/
private static DecayRpcScheduler getSchedulerWithWeightedTimeCostProvider(
- int priorityLevels) {
+ int priorityLevels, String ns) {
Configuration conf = new Configuration();
- conf.setClass("ns." + CommonConfigurationKeys.IPC_COST_PROVIDER_KEY,
+ conf.setClass(ns + "." + CommonConfigurationKeys.IPC_COST_PROVIDER_KEY,
WeightedTimeCostProvider.class, CostProvider.class);
- conf.setLong("ns."
+ conf.setLong(ns + "."
+ DecayRpcScheduler.IPC_SCHEDULER_DECAYSCHEDULER_PERIOD_KEY, 999999);
- return new DecayRpcScheduler(priorityLevels, "ns", conf);
+ return new DecayRpcScheduler(priorityLevels, ns, conf);
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
index e6a5f5e564c1f..1fed9a317642a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
@@ -104,6 +104,9 @@ public void testTotalCapacityOfSubQueues() {
assertThat(fairCallQueue.remainingCapacity()).isEqualTo(1025);
fairCallQueue = new FairCallQueue(7, 1025, "ns", conf);
assertThat(fairCallQueue.remainingCapacity()).isEqualTo(1025);
+ fairCallQueue = new FairCallQueue(7, 1025, "ns",
+ new int[]{7, 6, 5, 4, 3, 2, 1}, conf);
+ assertThat(fairCallQueue.remainingCapacity()).isEqualTo(1025);
}
@Test
@@ -157,6 +160,61 @@ public int getAndAdvanceCurrentIndex() {
assertNull(fcq.poll());
}
+ @Test
+ public void testQueueCapacity() {
+ int numQueues = 2;
+ int capacity = 4;
+ Configuration conf = new Configuration();
+ List calls = new ArrayList<>();
+
+ // default weights i.e. all queues share capacity
+ fcq = new FairCallQueue(numQueues, 4, "ns", conf);
+ FairCallQueue fcq1 = new FairCallQueue(
+ numQueues, capacity, "ns", new int[]{1, 3}, conf);
+
+ for (int i=0; i < capacity; i++) {
+ Schedulable call = mockCall("u", i%2);
+ calls.add(call);
+ fcq.add(call);
+ fcq1.add(call);
+ }
+
+ final AtomicInteger currentIndex = new AtomicInteger();
+ fcq.setMultiplexer(new RpcMultiplexer(){
+ @Override
+ public int getAndAdvanceCurrentIndex() {
+ return currentIndex.get();
+ }
+ });
+ fcq1.setMultiplexer(new RpcMultiplexer(){
+ @Override
+ public int getAndAdvanceCurrentIndex() {
+ return currentIndex.get();
+ }
+ });
+
+ // either queue will have two calls
+ // v
+ // 0 1
+ // 2 3
+ currentIndex.set(1);
+ assertSame(calls.get(1), fcq.poll());
+ assertSame(calls.get(3), fcq.poll());
+ assertSame(calls.get(0), fcq.poll());
+ assertSame(calls.get(2), fcq.poll());
+
+ // queues with different number of calls
+ // v
+ // 0 1
+ // 2
+ // 3
+ currentIndex.set(1);
+ assertSame(calls.get(1), fcq1.poll());
+ assertSame(calls.get(2), fcq1.poll());
+ assertSame(calls.get(3), fcq1.poll());
+ assertSame(calls.get(0), fcq1.poll());
+ }
+
@SuppressWarnings("unchecked")
@Test
public void testInsertionWithFailover() {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java
index 76a93cf71b03c..dfb9e934f6055 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java
@@ -39,9 +39,9 @@
import org.junit.Assert;
import org.junit.Test;
-import com.google.protobuf.BlockingService;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
+import org.apache.hadoop.thirdparty.protobuf.BlockingService;
+import org.apache.hadoop.thirdparty.protobuf.RpcController;
+import org.apache.hadoop.thirdparty.protobuf.ServiceException;
public class TestProtoBufRPCCompatibility {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java
index 3053f87511885..facb8fdd8b191 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java
@@ -17,9 +17,9 @@
*/
package org.apache.hadoop.ipc;
-import com.google.protobuf.BlockingService;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
+import org.apache.hadoop.thirdparty.protobuf.BlockingService;
+import org.apache.hadoop.thirdparty.protobuf.RpcController;
+import org.apache.hadoop.thirdparty.protobuf.ServiceException;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java
index 476b1979b2a54..32300d4f876e1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java
@@ -26,9 +26,9 @@
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
-import com.google.protobuf.BlockingService;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
+import org.apache.hadoop.thirdparty.protobuf.BlockingService;
+import org.apache.hadoop.thirdparty.protobuf.RpcController;
+import org.apache.hadoop.thirdparty.protobuf.ServiceException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.protobuf.TestProtos;
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcHandoffProto;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index 0da0b47529f99..640ca3d2b89ed 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.ipc;
-import com.google.protobuf.ServiceException;
+import org.apache.hadoop.thirdparty.protobuf.ServiceException;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCServerShutdown.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCServerShutdown.java
index aee8893538330..39705b06c67c0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCServerShutdown.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCServerShutdown.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.ipc;
-import com.google.protobuf.ServiceException;
+import org.apache.hadoop.thirdparty.protobuf.ServiceException;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.junit.Before;
import org.junit.Test;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
index 2f2d36f7b45d7..bf24d680dde2e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
@@ -18,9 +18,9 @@
package org.apache.hadoop.ipc;
-import com.google.protobuf.BlockingService;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
+import org.apache.hadoop.thirdparty.protobuf.BlockingService;
+import org.apache.hadoop.thirdparty.protobuf.RpcController;
+import org.apache.hadoop.thirdparty.protobuf.ServiceException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcWritable.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcWritable.java
index 837f5797121eb..6beae7d12b4c7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcWritable.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcWritable.java
@@ -29,7 +29,7 @@
import org.junit.Assert;
import org.junit.Test;
-import com.google.protobuf.Message;
+import org.apache.hadoop.thirdparty.protobuf.Message;
public class TestRpcWritable {//extends TestRpcBase {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
index 4f8a6d29f7244..72f73822b6fd0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.ipc;
-import com.google.protobuf.ServiceException;
+import org.apache.hadoop.thirdparty.protobuf.ServiceException;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/metrics/TestDecayRpcSchedulerDetailedMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/metrics/TestDecayRpcSchedulerDetailedMetrics.java
new file mode 100644
index 0000000000000..01d407ba26010
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/metrics/TestDecayRpcSchedulerDetailedMetrics.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc.metrics;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.DecayRpcScheduler;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.junit.Test;
+
+public class TestDecayRpcSchedulerDetailedMetrics {
+
+ @Test
+ public void metricsRegistered() {
+ Configuration conf = new Configuration();
+ DecayRpcScheduler scheduler = new DecayRpcScheduler(4, "ipc.8020", conf);
+ MetricsSystem metricsSystem = DefaultMetricsSystem.instance();
+ DecayRpcSchedulerDetailedMetrics metrics =
+ scheduler.getDecayRpcSchedulerDetailedMetrics();
+
+ assertNotNull(metricsSystem.getSource(metrics.getName()));
+
+ scheduler.stop();
+
+ assertNull(metricsSystem.getSource(metrics.getName()));
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java
index b5f62b189040e..5d20abdd8bf10 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java
@@ -149,6 +149,19 @@ interface TestProtocol {
assertGauge("BarAvgTime", 0.0, rb);
}
+ @Test public void testMutableRatesWithAggregationInitWithArray() {
+ MetricsRecordBuilder rb = mockMetricsRecordBuilder();
+ MutableRatesWithAggregation rates = new MutableRatesWithAggregation();
+
+ rates.init(new String[]{"Foo", "Bar"});
+ rates.snapshot(rb, false);
+
+ assertCounter("FooNumOps", 0L, rb);
+ assertGauge("FooAvgTime", 0.0, rb);
+ assertCounter("BarNumOps", 0L, rb);
+ assertGauge("BarAvgTime", 0.0, rb);
+ }
+
@Test public void testMutableRatesWithAggregationSingleThread() {
MutableRatesWithAggregation rates = new MutableRatesWithAggregation();
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java
index 37a3a2affccfe..6fdd64dca7c30 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.metrics2.source;
+import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl;
import org.apache.hadoop.util.GcTimeMonitor;
import org.junit.After;
import org.junit.Assert;
@@ -37,6 +38,7 @@
import org.apache.hadoop.util.JvmPauseMonitor;
import java.util.ArrayList;
+import java.util.Iterator;
import java.util.List;
import static org.apache.hadoop.metrics2.source.JvmMetricsInfo.*;
@@ -65,7 +67,7 @@ public void testJvmPauseMonitorPresence() {
pauseMonitor = new JvmPauseMonitor();
pauseMonitor.init(new Configuration());
pauseMonitor.start();
- JvmMetrics jvmMetrics = new JvmMetrics("test", "test");
+ JvmMetrics jvmMetrics = new JvmMetrics("test", "test", false);
jvmMetrics.setPauseMonitor(pauseMonitor);
MetricsRecordBuilder rb = getMetrics(jvmMetrics);
MetricsCollector mc = rb.parent();
@@ -91,7 +93,7 @@ public void testJvmPauseMonitorPresence() {
public void testGcTimeMonitorPresence() {
gcTimeMonitor = new GcTimeMonitor(60000, 1000, 70, null);
gcTimeMonitor.start();
- JvmMetrics jvmMetrics = new JvmMetrics("test", "test");
+ JvmMetrics jvmMetrics = new JvmMetrics("test", "test", false);
jvmMetrics.setGcTimeMonitor(gcTimeMonitor);
MetricsRecordBuilder rb = getMetrics(jvmMetrics);
MetricsCollector mc = rb.parent();
@@ -226,4 +228,89 @@ public void testJvmMetricsSingletonWithDifferentProcessNames() {
Assert.assertEquals("unexpected process name of the singleton instance",
process1Name, jvmMetrics2.processName);
}
+
+ /**
+ * Performance test for JvmMetrics#getMetrics, comparing performance of
+ * getting thread usage from ThreadMXBean with that from ThreadGroup.
+ */
+ @Test
+ public void testGetMetricsPerf() {
+ JvmMetrics jvmMetricsUseMXBean = new JvmMetrics("test", "test", true);
+ JvmMetrics jvmMetrics = new JvmMetrics("test", "test", false);
+ MetricsCollectorImpl collector = new MetricsCollectorImpl();
+ // warm up
+ jvmMetrics.getMetrics(collector, true);
+ jvmMetricsUseMXBean.getMetrics(collector, true);
+ // test cases with different numbers of threads
+ int[] numThreadsCases = {100, 200, 500, 1000, 2000, 3000};
+ List threads = new ArrayList();
+ for (int numThreads : numThreadsCases) {
+ updateThreadsAndWait(threads, numThreads);
+ long startNs = System.nanoTime();
+ jvmMetricsUseMXBean.getMetrics(collector, true);
+ long processingNsFromMXBean = System.nanoTime() - startNs;
+ startNs = System.nanoTime();
+ jvmMetrics.getMetrics(collector, true);
+ long processingNsFromGroup = System.nanoTime() - startNs;
+ System.out.println(
+ "#Threads=" + numThreads + ", ThreadMXBean=" + processingNsFromMXBean
+ + " ns, ThreadGroup=" + processingNsFromGroup + " ns, ratio: " + (
+ processingNsFromMXBean / processingNsFromGroup));
+ }
+ // cleanup
+ updateThreadsAndWait(threads, 0);
+ }
+
+ private static void updateThreadsAndWait(List threads,
+ int expectedNumThreads) {
+ // add/remove threads according to expected number
+ int addNum = expectedNumThreads - threads.size();
+ if (addNum > 0) {
+ for (int i = 0; i < addNum; i++) {
+ TestThread testThread = new TestThread();
+ testThread.start();
+ threads.add(testThread);
+ }
+ } else if (addNum < 0) {
+ for (int i = 0; i < Math.abs(addNum); i++) {
+ threads.get(i).exit = true;
+ }
+ } else {
+ return;
+ }
+ // wait for threads to reach the expected number
+ while (true) {
+ Iterator it = threads.iterator();
+ while (it.hasNext()) {
+ if (it.next().exited) {
+ it.remove();
+ }
+ }
+ if (threads.size() == expectedNumThreads) {
+ break;
+ } else {
+ try {
+ Thread.sleep(500);
+ } catch (InterruptedException e) {
+ //ignore
+ }
+ }
+ }
+ }
+
+ static class TestThread extends Thread {
+ private volatile boolean exit = false;
+ private boolean exited = false;
+ @Override
+ public void run() {
+ while (!exit) {
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ exited = true;
+ }
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/MockDomainNameResolver.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/MockDomainNameResolver.java
index aa9370933722f..3e436f3a22f8c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/MockDomainNameResolver.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/MockDomainNameResolver.java
@@ -39,8 +39,8 @@ public class MockDomainNameResolver implements DomainNameResolver {
public static final byte[] BYTE_ADDR_2 = new byte[]{10, 1, 1, 2};
public static final String ADDR_1 = "10.1.1.1";
public static final String ADDR_2 = "10.1.1.2";
- public static final String FQDN_1 = "host01.com";
- public static final String FQDN_2 = "host02.com";
+ public static final String FQDN_1 = "host01.test";
+ public static final String FQDN_2 = "host02.test";
/** Internal mapping of domain names and IP addresses. */
private Map addrs = new TreeMap<>();
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java
index fbed6052a5c03..328cf11c20fa6 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.net;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Arrays;
@@ -193,6 +194,10 @@ public void testChooseRandom() {
}
assertEquals("Random is not selecting the nodes it should",
2, histogram.size());
+
+ Node val = cluster.chooseRandom("/d1", "/d", Collections.emptyList());
+ assertNotNull(val);
+
}
@Test
@@ -229,6 +234,15 @@ public void testChooseRandomExcluded() {
assertSame("node3", node.getName());
}
+ @Test
+ public void testNodeBaseNormalizeRemoveLeadingSlash() {
+ assertEquals("/d1", NodeBase.normalize("/d1///"));
+ assertEquals("/d1", NodeBase.normalize("/d1/"));
+ assertEquals("/d1", NodeBase.normalize("/d1"));
+ assertEquals("", NodeBase.normalize("///"));
+ assertEquals("", NodeBase.normalize("/"));
+ }
+
private NodeElement getNewNode(String name, String rackLocation) {
NodeElement node = new NodeElement(name);
node.setNetworkLocation(rackLocation);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
index b11b1e96ded59..76284932c43de 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
@@ -95,7 +95,25 @@ public void testAvoidLoopbackTcpSockets() throws Throwable {
assertInException(se, "Invalid argument");
}
}
-
+
+ @Test
+ public void testInvalidAddress() throws Throwable {
+ Configuration conf = new Configuration();
+
+ Socket socket = NetUtils.getDefaultSocketFactory(conf)
+ .createSocket();
+ socket.bind(new InetSocketAddress("127.0.0.1", 0));
+ try {
+ NetUtils.connect(socket,
+ new InetSocketAddress("invalid-test-host",
+ 0), 20000);
+ socket.close();
+ fail("Should not have connected");
+ } catch (UnknownHostException uhe) {
+ LOG.info("Got exception: ", uhe);
+ }
+ }
+
@Test
public void testSocketReadTimeoutWithChannel() throws Exception {
doSocketReadTimeoutTest(true);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
index f1c03cf5df470..76c74a37a0695 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
@@ -40,7 +40,7 @@
import static org.junit.Assert.*;
/**
- * This tests timout out from SocketInputStream and
+ * This tests timeout out from SocketInputStream and
* SocketOutputStream using pipes.
*
* Normal read and write using these streams are tested by pretty much
@@ -185,4 +185,42 @@ public void doWork() throws Exception {
}
}
}
+
+ @Test
+ public void testSocketIOWithTimeoutInterrupted() throws Exception {
+ Pipe pipe = Pipe.open();
+ final int timeout = TIMEOUT * 10;
+
+ try (Pipe.SourceChannel source = pipe.source();
+ InputStream in = new SocketInputStream(source, timeout)) {
+
+ TestingThread thread = new TestingThread(ctx) {
+ @Override
+ public void doWork() throws Exception {
+ try {
+ in.read();
+ fail("Did not fail with interrupt");
+ } catch (InterruptedIOException ste) {
+ String detail = ste.getMessage();
+ String totalString = "Total timeout mills is " + timeout;
+ String leftString = "millis timeout left";
+
+ assertTrue(detail.contains(totalString));
+ assertTrue(detail.contains(leftString));
+ }
+ }
+ };
+
+ ctx.addThread(thread);
+ ctx.startThreads();
+ // If the thread is interrupted before it calls read()
+ // then it throws ClosedByInterruptException due to
+ // some Java quirk. Waiting for it to call read()
+ // gets it into select(), so we get the expected
+ // InterruptedIOException.
+ Thread.sleep(1000);
+ thread.interrupt();
+ ctx.stop();
+ }
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java
index 3293903e6470b..c86b9ae344195 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.security;
-import com.google.protobuf.ServiceException;
+import org.apache.hadoop.thirdparty.protobuf.ServiceException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.Text;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
index 4c471da4e8c35..46e9f92258502 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
@@ -22,6 +22,7 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
+import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
@@ -66,7 +67,7 @@ public void setup() throws IOException {
public static class FakeGroupMapping extends ShellBasedUnixGroupsMapping {
// any to n mapping
- private static Set allGroups = new HashSet();
+ private static Set allGroups = new LinkedHashSet();
private static Set blackList = new HashSet();
private static int requestCount = 0;
private static long getGroupsDelayMs = 0;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
index 0d30e6e410be1..f027d3b39bec1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
@@ -68,6 +68,10 @@
public class KeyStoreTestUtil {
+ public final static String SERVER_KEY_STORE_PASSWORD_DEFAULT = "serverP";
+ public final static String CLIENT_KEY_STORE_PASSWORD_DEFAULT = "clientP";
+ public final static String TRUST_STORE_PASSWORD_DEFAULT = "trustP";
+
public static String getClasspathDir(Class klass) throws Exception {
String file = klass.getName();
file = file.replace('.', '/') + ".class";
@@ -257,30 +261,57 @@ public static void setupSSLConfig(String keystoresDir, String sslConfDir,
setupSSLConfig(keystoresDir, sslConfDir, conf, useClientCert, true,"");
}
- /**
- * Performs complete setup of SSL configuration in preparation for testing an
- * SSLFactory. This includes keys, certs, keystores, truststores, the server
- * SSL configuration file, the client SSL configuration file, and the master
- * configuration file read by the SSLFactory.
- *
- * @param keystoresDir
- * @param sslConfDir
- * @param conf
- * @param useClientCert
- * @param trustStore
- * @param excludeCiphers
- * @throws Exception
- */
- public static void setupSSLConfig(String keystoresDir, String sslConfDir,
- Configuration conf, boolean useClientCert,
- boolean trustStore, String excludeCiphers)
- throws Exception {
+ /**
+ * Performs complete setup of SSL configuration in preparation for testing an
+ * SSLFactory. This includes keys, certs, keystores, truststores, the server
+ * SSL configuration file, the client SSL configuration file, and the master
+ * configuration file read by the SSLFactory.
+ *
+ * @param keystoresDir
+ * @param sslConfDir
+ * @param conf
+ * @param useClientCert
+ * @param trustStore
+ * @param excludeCiphers
+ * @throws Exception
+ */
+ public static void setupSSLConfig(String keystoresDir, String sslConfDir,
+ Configuration conf, boolean useClientCert, boolean trustStore,
+ String excludeCiphers) throws Exception {
+ setupSSLConfig(keystoresDir, sslConfDir, conf, useClientCert, trustStore,
+ excludeCiphers, SERVER_KEY_STORE_PASSWORD_DEFAULT,
+ CLIENT_KEY_STORE_PASSWORD_DEFAULT, TRUST_STORE_PASSWORD_DEFAULT);
+ }
+
+
+ /**
+ * Performs complete setup of SSL configuration in preparation for testing an
+ * SSLFactory. This includes keys, certs, keystores, truststores, the server
+ * SSL configuration file, the client SSL configuration file, and the master
+ * configuration file read by the SSLFactory and the passwords required to
+ * access the keyStores (Server and Client KeyStore Passwords and
+ * TrustStore Password).
+ *
+ * @param keystoresDir
+ * @param sslConfDir
+ * @param conf
+ * @param useClientCert
+ * @param trustStore
+ * @param excludeCiphers
+ * @param serverPassword
+ * @param clientPassword
+ * @param trustPassword
+ * @throws Exception
+ */
+ @SuppressWarnings("checkstyle:parameternumber")
+ public static void setupSSLConfig(String keystoresDir, String sslConfDir,
+ Configuration conf, boolean useClientCert, boolean trustStore,
+ String excludeCiphers, String serverPassword, String clientPassword,
+ String trustPassword) throws Exception {
+
String clientKS = keystoresDir + "/clientKS.jks";
- String clientPassword = "clientP";
String serverKS = keystoresDir + "/serverKS.jks";
- String serverPassword = "serverP";
String trustKS = null;
- String trustPassword = "trustP";
File sslClientConfFile = new File(sslConfDir, getClientSSLConfigFileName());
File sslServerConfFile = new File(sslConfDir, getServerSSLConfigFileName());
@@ -310,10 +341,10 @@ public static void setupSSLConfig(String keystoresDir, String sslConfDir,
KeyStoreTestUtil.createTrustStore(trustKS, trustPassword, certs);
}
- Configuration clientSSLConf = createClientSSLConfig(clientKS, clientPassword,
- clientPassword, trustKS, excludeCiphers);
- Configuration serverSSLConf = createServerSSLConfig(serverKS, serverPassword,
- serverPassword, trustKS, excludeCiphers);
+ Configuration clientSSLConf = createClientSSLConfig(clientKS,
+ clientPassword, clientPassword, trustKS, trustPassword, excludeCiphers);
+ Configuration serverSSLConf = createServerSSLConfig(serverKS,
+ serverPassword, serverPassword, trustKS, trustPassword, excludeCiphers);
saveConfig(sslClientConfFile, clientSSLConf);
saveConfig(sslServerConfFile, serverSSLConf);
@@ -336,9 +367,10 @@ public static void setupSSLConfig(String keystoresDir, String sslConfDir,
* @return Configuration for client SSL
*/
public static Configuration createClientSSLConfig(String clientKS,
- String password, String keyPassword, String trustKS) {
+ String password, String keyPassword, String trustKS,
+ String trustPassword) {
return createSSLConfig(SSLFactory.Mode.CLIENT,
- clientKS, password, keyPassword, trustKS, "");
+ clientKS, password, keyPassword, trustKS, trustPassword, "");
}
/**
@@ -353,10 +385,11 @@ public static Configuration createClientSSLConfig(String clientKS,
* @param excludeCiphers String comma separated ciphers to exclude
* @return Configuration for client SSL
*/
- public static Configuration createClientSSLConfig(String clientKS,
- String password, String keyPassword, String trustKS, String excludeCiphers) {
+ public static Configuration createClientSSLConfig(String clientKS,
+ String password, String keyPassword, String trustKS,
+ String trustPassword, String excludeCiphers) {
return createSSLConfig(SSLFactory.Mode.CLIENT,
- clientKS, password, keyPassword, trustKS, excludeCiphers);
+ clientKS, password, keyPassword, trustKS, trustPassword, excludeCiphers);
}
/**
@@ -372,9 +405,10 @@ public static Configuration createClientSSLConfig(String clientKS,
* @throws java.io.IOException
*/
public static Configuration createServerSSLConfig(String serverKS,
- String password, String keyPassword, String trustKS) throws IOException {
+ String password, String keyPassword, String trustKS, String trustPassword)
+ throws IOException {
return createSSLConfig(SSLFactory.Mode.SERVER,
- serverKS, password, keyPassword, trustKS, "");
+ serverKS, password, keyPassword, trustKS, trustPassword, "");
}
/**
@@ -390,10 +424,11 @@ public static Configuration createServerSSLConfig(String serverKS,
* @return
* @throws IOException
*/
- public static Configuration createServerSSLConfig(String serverKS,
- String password, String keyPassword, String trustKS, String excludeCiphers) throws IOException {
+ public static Configuration createServerSSLConfig(String serverKS,
+ String password, String keyPassword, String trustKS, String trustPassword,
+ String excludeCiphers) throws IOException {
return createSSLConfig(SSLFactory.Mode.SERVER,
- serverKS, password, keyPassword, trustKS, excludeCiphers);
+ serverKS, password, keyPassword, trustKS, trustPassword, excludeCiphers);
}
/**
@@ -445,8 +480,8 @@ private static String getSSLConfigFileName(String base) {
* @return Configuration for SSL
*/
private static Configuration createSSLConfig(SSLFactory.Mode mode,
- String keystore, String password, String keyPassword, String trustKS, String excludeCiphers) {
- String trustPassword = "trustP";
+ String keystore, String password, String keyPassword, String trustKS,
+ String trustStorePwd, String excludeCiphers) {
Configuration sslConf = new Configuration(false);
if (keystore != null) {
@@ -466,10 +501,10 @@ private static Configuration createSSLConfig(SSLFactory.Mode mode,
sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY), trustKS);
}
- if (trustPassword != null) {
+ if (trustStorePwd != null) {
sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY),
- trustPassword);
+ trustStorePwd);
}
if(null != excludeCiphers && !excludeCiphers.isEmpty()) {
sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
index 9f149b74277e9..9b4d1f205ff58 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.security.ssl;
+import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.TRUST_STORE_PASSWORD_DEFAULT;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration;
@@ -407,7 +408,7 @@ private void checkSSLFactoryInitWithPasswords(SSLFactory.Mode mode,
String keystore = new File(KEYSTORES_DIR, "keystore.jks").getAbsolutePath();
String truststore = new File(KEYSTORES_DIR, "truststore.jks")
.getAbsolutePath();
- String trustPassword = "trustP";
+ String trustPassword = TRUST_STORE_PASSWORD_DEFAULT;
// Create keys, certs, keystore, and truststore.
KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA");
@@ -433,7 +434,7 @@ private void checkSSLFactoryInitWithPasswords(SSLFactory.Mode mode,
if (mode == SSLFactory.Mode.SERVER) {
sslConfFileName = "ssl-server.xml";
sslConf = KeyStoreTestUtil.createServerSSLConfig(keystore, confPassword,
- confKeyPassword, truststore);
+ confKeyPassword, truststore, trustPassword);
if (useCredProvider) {
File testDir = GenericTestUtils.getTestDir();
final Path jksPath = new Path(testDir.toString(), "test.jks");
@@ -444,7 +445,7 @@ private void checkSSLFactoryInitWithPasswords(SSLFactory.Mode mode,
} else {
sslConfFileName = "ssl-client.xml";
sslConf = KeyStoreTestUtil.createClientSSLConfig(keystore, confPassword,
- confKeyPassword, truststore);
+ confKeyPassword, truststore, trustPassword);
}
KeyStoreTestUtil.saveConfig(new File(sslConfsDir, sslConfFileName), sslConf);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
index c9571ff21e847..b2e177976b6d5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
@@ -217,6 +217,58 @@ public void testNodeUpAferAWhile() throws Exception {
}
}
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testMultiNodeCompeteForSeqNum() throws Exception {
+ DelegationTokenManager tm1, tm2 = null;
+ String connectString = zkServer.getConnectString();
+ Configuration conf = getSecretConf(connectString);
+ conf.setInt(
+ ZKDelegationTokenSecretManager.ZK_DTSM_TOKEN_SEQNUM_BATCH_SIZE, 1000);
+ tm1 = new DelegationTokenManager(conf, new Text("bla"));
+ tm1.init();
+
+ Token token1 =
+ (Token) tm1.createToken(
+ UserGroupInformation.getCurrentUser(), "foo");
+ Assert.assertNotNull(token1);
+ AbstractDelegationTokenIdentifier id1 =
+ tm1.getDelegationTokenSecretManager().decodeTokenIdentifier(token1);
+ Assert.assertEquals(
+ "Token seq should be the same", 1, id1.getSequenceNumber());
+ Token token2 =
+ (Token) tm1.createToken(
+ UserGroupInformation.getCurrentUser(), "foo");
+ Assert.assertNotNull(token2);
+ AbstractDelegationTokenIdentifier id2 =
+ tm1.getDelegationTokenSecretManager().decodeTokenIdentifier(token2);
+ Assert.assertEquals(
+ "Token seq should be the same", 2, id2.getSequenceNumber());
+
+ tm2 = new DelegationTokenManager(conf, new Text("bla"));
+ tm2.init();
+
+ Token token3 =
+ (Token) tm2.createToken(
+ UserGroupInformation.getCurrentUser(), "foo");
+ Assert.assertNotNull(token3);
+ AbstractDelegationTokenIdentifier id3 =
+ tm2.getDelegationTokenSecretManager().decodeTokenIdentifier(token3);
+ Assert.assertEquals(
+ "Token seq should be the same", 1001, id3.getSequenceNumber());
+ Token token4 =
+ (Token) tm2.createToken(
+ UserGroupInformation.getCurrentUser(), "foo");
+ Assert.assertNotNull(token4);
+ AbstractDelegationTokenIdentifier id4 =
+ tm2.getDelegationTokenSecretManager().decodeTokenIdentifier(token4);
+ Assert.assertEquals(
+ "Token seq should be the same", 1002, id4.getSequenceNumber());
+
+ verifyDestroy(tm1, conf);
+ verifyDestroy(tm2, conf);
+ }
+
@SuppressWarnings("unchecked")
@Test
public void testRenewTokenSingleManager() throws Exception {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/AbstractHadoopTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/AbstractHadoopTestBase.java
new file mode 100644
index 0000000000000..e18119ccafcb8
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/AbstractHadoopTestBase.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.test;
+
+import java.util.concurrent.TimeUnit;
+
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+import org.junit.rules.Timeout;
+
+/**
+ * A base class for JUnit5+ tests that sets a default timeout for all tests
+ * that subclass this test.
+ *
+ * Threads are named to the method being executed, for ease of diagnostics
+ * in logs and thread dumps.
+ *
+ * Unlike {@link HadoopTestBase} this class does not extend JUnit Assert
+ * so is easier to use with AssertJ.
+ */
+public abstract class AbstractHadoopTestBase {
+
+ /**
+ * System property name to set the test timeout: {@value}.
+ */
+ public static final String PROPERTY_TEST_DEFAULT_TIMEOUT =
+ "test.default.timeout";
+
+ /**
+ * The default timeout (in milliseconds) if the system property
+ * {@link #PROPERTY_TEST_DEFAULT_TIMEOUT}
+ * is not set: {@value}.
+ */
+ public static final int TEST_DEFAULT_TIMEOUT_VALUE = 100000;
+
+ /**
+ * The JUnit rule that sets the default timeout for tests.
+ */
+ @Rule
+ public Timeout defaultTimeout = retrieveTestTimeout();
+
+ /**
+ * Retrieve the test timeout from the system property
+ * {@link #PROPERTY_TEST_DEFAULT_TIMEOUT}, falling back to
+ * the value in {@link #TEST_DEFAULT_TIMEOUT_VALUE} if the
+ * property is not defined.
+ * @return the recommended timeout for tests
+ */
+ public static Timeout retrieveTestTimeout() {
+ String propval = System.getProperty(PROPERTY_TEST_DEFAULT_TIMEOUT,
+ Integer.toString(
+ TEST_DEFAULT_TIMEOUT_VALUE));
+ int millis;
+ try {
+ millis = Integer.parseInt(propval);
+ } catch (NumberFormatException e) {
+ //fall back to the default value, as the property cannot be parsed
+ millis = TEST_DEFAULT_TIMEOUT_VALUE;
+ }
+ return new Timeout(millis, TimeUnit.MILLISECONDS);
+ }
+
+ /**
+ * The method name.
+ */
+ @Rule
+ public TestName methodName = new TestName();
+
+ /**
+ * Get the method name; defaults to the value of {@link #methodName}.
+ * Subclasses may wish to override it, which will tune the thread naming.
+ * @return the name of the method.
+ */
+ protected String getMethodName() {
+ return methodName.getMethodName();
+ }
+
+ /**
+ * Static initializer names this thread "JUnit".
+ */
+ @BeforeClass
+ public static void nameTestThread() {
+ Thread.currentThread().setName("JUnit");
+ }
+
+ /**
+ * Before each method, the thread is renamed to match the method name.
+ */
+ @Before
+ public void nameThreadToMethod() {
+ Thread.currentThread().setName("JUnit-" + getMethodName());
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 0082452e514b5..9e91634873607 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -61,7 +61,6 @@
import org.mockito.stubbing.Answer;
import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
@@ -378,11 +377,15 @@ public static void assertExceptionContains(String expectedText,
* time
* @throws InterruptedException if the method is interrupted while waiting
*/
- public static void waitFor(Supplier check, int checkEveryMillis,
- int waitForMillis) throws TimeoutException, InterruptedException {
- Preconditions.checkNotNull(check, ERROR_MISSING_ARGUMENT);
- Preconditions.checkArgument(waitForMillis >= checkEveryMillis,
- ERROR_INVALID_ARGUMENT);
+ public static void waitFor(final Supplier check,
+ final long checkEveryMillis, final long waitForMillis)
+ throws TimeoutException, InterruptedException {
+ if (check == null) {
+ throw new NullPointerException(ERROR_MISSING_ARGUMENT);
+ }
+ if (waitForMillis < checkEveryMillis) {
+ throw new IllegalArgumentException(ERROR_INVALID_ARGUMENT);
+ }
long st = Time.monotonicNow();
boolean result = check.get();
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java
index cb7df4b011a2f..2e34054d55322 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.test;
+import java.util.concurrent.TimeUnit;
+
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
@@ -59,7 +61,7 @@ public abstract class HadoopTestBase extends Assert {
* property is not defined.
* @return the recommended timeout for tests
*/
- public static Timeout retrieveTestTimeout() {
+ protected Timeout retrieveTestTimeout() {
String propval = System.getProperty(PROPERTY_TEST_DEFAULT_TIMEOUT,
Integer.toString(
TEST_DEFAULT_TIMEOUT_VALUE));
@@ -70,7 +72,7 @@ public static Timeout retrieveTestTimeout() {
//fall back to the default value, as the property cannot be parsed
millis = TEST_DEFAULT_TIMEOUT_VALUE;
}
- return new Timeout(millis);
+ return new Timeout(millis, TimeUnit.MILLISECONDS);
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
index db36154c158ac..ad265afc3a022 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
@@ -406,7 +406,7 @@ public static E intercept(
throws Exception {
try {
eval.call();
- throw new AssertionError("Expected an exception");
+ throw new AssertionError("Expected an exception of type " + clazz);
} catch (Throwable e) {
if (clazz.isAssignableFrom(e.getClass())) {
return (E)e;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/FakeTimer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/FakeTimer.java
index 1b17ce7cc9d8b..05d66d39f56ee 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/FakeTimer.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/FakeTimer.java
@@ -29,17 +29,19 @@
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class FakeTimer extends Timer {
+ private long now;
private long nowNanos;
/** Constructs a FakeTimer with a non-zero value */
public FakeTimer() {
// Initialize with a non-trivial value.
+ now = 1577836800000L; // 2020-01-01 00:00:00,000+0000
nowNanos = TimeUnit.MILLISECONDS.toNanos(1000);
}
@Override
public long now() {
- return TimeUnit.NANOSECONDS.toMillis(nowNanos);
+ return now;
}
@Override
@@ -54,6 +56,7 @@ public long monotonicNowNanos() {
/** Increases the time by milliseconds */
public void advance(long advMillis) {
+ now += advMillis;
nowNanos += TimeUnit.MILLISECONDS.toNanos(advMillis);
}
@@ -62,6 +65,7 @@ public void advance(long advMillis) {
* @param advNanos Nanoseconds to advance by.
*/
public void advanceNanos(long advNanos) {
+ now += TimeUnit.NANOSECONDS.toMillis(advNanos);
nowNanos += advNanos;
}
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDurationInfo.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDurationInfo.java
index d1fa70319eb84..b6abde8762902 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDurationInfo.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDurationInfo.java
@@ -35,6 +35,14 @@ public void testDurationInfoCreation() throws Exception {
Thread.sleep(1000);
info.finished();
Assert.assertTrue(info.value() > 0);
+
+ info = new DurationInfo(log, true, "test format %s", "value");
+ Assert.assertEquals("test format value: duration 0:00.000s",
+ info.toString());
+
+ info = new DurationInfo(log, false, "test format %s", "value");
+ Assert.assertEquals("test format value: duration 0:00.000s",
+ info.toString());
}
@Test
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java
index fd9966feb064e..60dda981d4a1a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java
@@ -19,6 +19,7 @@
import java.io.File;
import java.io.FileWriter;
+import java.io.IOException;
import java.nio.file.NoSuchFileException;
import java.util.Map;
@@ -347,4 +348,62 @@ public void testHostFileReaderWithTimeout() throws Exception {
assertTrue(excludes.get("host5") == 1800);
assertTrue(excludes.get("host6") == 1800);
}
-}
+
+ @Test
+ public void testLazyRefresh() throws IOException {
+ FileWriter efw = new FileWriter(excludesFile);
+ FileWriter ifw = new FileWriter(includesFile);
+
+ efw.write("host1\n");
+ efw.write("host2\n");
+ efw.close();
+ ifw.write("host3\n");
+ ifw.write("host4\n");
+ ifw.close();
+
+ HostsFileReader hfp = new HostsFileReader(includesFile, excludesFile);
+
+ ifw = new FileWriter(includesFile);
+ ifw.close();
+
+ efw = new FileWriter(excludesFile, true);
+ efw.write("host3\n");
+ efw.write("host4\n");
+ efw.close();
+
+ hfp.lazyRefresh(includesFile, excludesFile);
+
+ HostDetails details = hfp.getHostDetails();
+ HostDetails lazyDetails = hfp.getLazyLoadedHostDetails();
+
+ assertEquals("Details: no. of excluded hosts", 2,
+ details.getExcludedHosts().size());
+ assertEquals("Details: no. of included hosts", 2,
+ details.getIncludedHosts().size());
+ assertEquals("LazyDetails: no. of excluded hosts", 4,
+ lazyDetails.getExcludedHosts().size());
+ assertEquals("LayDetails: no. of included hosts", 0,
+ lazyDetails.getIncludedHosts().size());
+
+ hfp.finishRefresh();
+
+ details = hfp.getHostDetails();
+ assertEquals("Details: no. of excluded hosts", 4,
+ details.getExcludedHosts().size());
+ assertEquals("Details: no. of included hosts", 0,
+ details.getIncludedHosts().size());
+ assertNull("Lazy host details should be null",
+ hfp.getLazyLoadedHostDetails());
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testFinishRefreshWithoutLazyRefresh() throws IOException {
+ FileWriter efw = new FileWriter(excludesFile);
+ FileWriter ifw = new FileWriter(includesFile);
+ efw.close();
+ ifw.close();
+
+ HostsFileReader hfp = new HostsFileReader(includesFile, excludesFile);
+ hfp.finishRefresh();
+ }
+}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java
index 44158ec5b0f18..c47ff0712d201 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java
@@ -17,9 +17,11 @@
*/
package org.apache.hadoop.util;
+import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -117,12 +119,14 @@ public long monotonicNow() {
final AtomicLong wlogged = new AtomicLong(0);
final AtomicLong wsuppresed = new AtomicLong(0);
+ final AtomicLong wMaxWait = new AtomicLong(0);
InstrumentedLock lock = new InstrumentedLock(
testname, LOG, mlock, 2000, 300, mclock) {
@Override
- void logWarning(long lockHeldTime, long suppressed) {
+ void logWarning(long lockHeldTime, SuppressedSnapshot stats) {
wlogged.incrementAndGet();
- wsuppresed.set(suppressed);
+ wsuppresed.set(stats.getSuppressedCount());
+ wMaxWait.set(stats.getMaxSuppressedWait());
}
};
@@ -132,12 +136,14 @@ void logWarning(long lockHeldTime, long suppressed) {
lock.unlock(); // t = 200
assertEquals(0, wlogged.get());
assertEquals(0, wsuppresed.get());
+ assertEquals(0, wMaxWait.get());
lock.lock(); // t = 200
time.set(700);
lock.unlock(); // t = 700
assertEquals(1, wlogged.get());
assertEquals(0, wsuppresed.get());
+ assertEquals(0, wMaxWait.get());
// despite the lock held time is greater than threshold
// suppress the log warning due to the logging gap
@@ -147,6 +153,7 @@ void logWarning(long lockHeldTime, long suppressed) {
lock.unlock(); // t = 1100
assertEquals(1, wlogged.get());
assertEquals(0, wsuppresed.get());
+ assertEquals(0, wMaxWait.get());
// log a warning message when the lock held time is greater the threshold
// and the logging time gap is satisfied. Also should display suppressed
@@ -157,6 +164,106 @@ void logWarning(long lockHeldTime, long suppressed) {
lock.unlock(); // t = 2800
assertEquals(2, wlogged.get());
assertEquals(1, wsuppresed.get());
+ assertEquals(400, wMaxWait.get());
+ }
+
+ /**
+ * Test the lock logs warning when lock wait / queue time is greater than
+ * threshold and not log warning otherwise.
+ * @throws Exception
+ */
+ @Test(timeout=10000)
+ public void testLockLongWaitReport() throws Exception {
+ String testname = name.getMethodName();
+ final AtomicLong time = new AtomicLong(0);
+ Timer mclock = new Timer() {
+ @Override
+ public long monotonicNow() {
+ return time.get();
+ }
+ };
+ Lock mlock = new ReentrantLock(true); //mock(Lock.class);
+
+ final AtomicLong wlogged = new AtomicLong(0);
+ final AtomicLong wsuppresed = new AtomicLong(0);
+ final AtomicLong wMaxWait = new AtomicLong(0);
+ InstrumentedLock lock = new InstrumentedLock(
+ testname, LOG, mlock, 2000, 300, mclock) {
+ @Override
+ void logWaitWarning(long lockHeldTime, SuppressedSnapshot stats) {
+ wlogged.incrementAndGet();
+ wsuppresed.set(stats.getSuppressedCount());
+ wMaxWait.set(stats.getMaxSuppressedWait());
+ }
+ };
+
+ // do not log warning when the lock held time is short
+ lock.lock(); // t = 0
+
+ Thread competingThread = lockUnlockThread(lock);
+ time.set(200);
+ lock.unlock(); // t = 200
+ competingThread.join();
+ assertEquals(0, wlogged.get());
+ assertEquals(0, wsuppresed.get());
+ assertEquals(0, wMaxWait.get());
+
+
+ lock.lock(); // t = 200
+ competingThread = lockUnlockThread(lock);
+ time.set(700);
+ lock.unlock(); // t = 700
+ competingThread.join();
+
+ // The competing thread will have waited for 500ms, so it should log
+ assertEquals(1, wlogged.get());
+ assertEquals(0, wsuppresed.get());
+ assertEquals(0, wMaxWait.get());
+
+ // despite the lock wait time is greater than threshold
+ // suppress the log warning due to the logging gap
+ // (not recorded in wsuppressed until next log message)
+ lock.lock(); // t = 700
+ competingThread = lockUnlockThread(lock);
+ time.set(1100);
+ lock.unlock(); // t = 1100
+ competingThread.join();
+ assertEquals(1, wlogged.get());
+ assertEquals(0, wsuppresed.get());
+ assertEquals(0, wMaxWait.get());
+
+ // log a warning message when the lock held time is greater the threshold
+ // and the logging time gap is satisfied. Also should display suppressed
+ // previous warnings.
+ time.set(2400);
+ lock.lock(); // t = 2400
+ competingThread = lockUnlockThread(lock);
+ time.set(2800);
+ lock.unlock(); // t = 2800
+ competingThread.join();
+ assertEquals(2, wlogged.get());
+ assertEquals(1, wsuppresed.get());
+ assertEquals(400, wMaxWait.get());
+ }
+
+ private Thread lockUnlockThread(Lock lock) throws InterruptedException {
+ CountDownLatch countDownLatch = new CountDownLatch(1);
+ Thread t = new Thread(() -> {
+ try {
+ assertFalse(lock.tryLock());
+ countDownLatch.countDown();
+ lock.lock();
+ } finally {
+ lock.unlock();
+ }
+ });
+ t.start();
+ countDownLatch.await();
+ // Even with the countdown latch, the main thread releases the lock
+ // before this thread actually starts waiting on it, so introducing a
+ // short sleep so the competing thread can block on the lock as intended.
+ Thread.sleep(3);
+ return t;
}
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java
index 3e1a88bd0ad22..1ea3ef1860860 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java
@@ -146,9 +146,10 @@ public long monotonicNow() {
InstrumentedReadLock readLock = new InstrumentedReadLock(testname, LOG,
readWriteLock, 2000, 300, mclock) {
@Override
- protected void logWarning(long lockHeldTime, long suppressed) {
+ protected void logWarning(
+ long lockHeldTime, SuppressedSnapshot stats) {
wlogged.incrementAndGet();
- wsuppresed.set(suppressed);
+ wsuppresed.set(stats.getSuppressedCount());
}
};
@@ -200,9 +201,9 @@ public long monotonicNow() {
InstrumentedWriteLock writeLock = new InstrumentedWriteLock(testname, LOG,
readWriteLock, 2000, 300, mclock) {
@Override
- protected void logWarning(long lockHeldTime, long suppressed) {
+ protected void logWarning(long lockHeldTime, SuppressedSnapshot stats) {
wlogged.incrementAndGet();
- wsuppresed.set(suppressed);
+ wsuppresed.set(stats.getSuppressedCount());
}
};
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNodeHealthScriptRunner.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNodeHealthScriptRunner.java
deleted file mode 100644
index 2748c0b581a88..0000000000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNodeHealthScriptRunner.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.util;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.util.TimerTask;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-public class TestNodeHealthScriptRunner {
-
- protected static File testRootDir = new File("target",
- TestNodeHealthScriptRunner.class.getName() +
- "-localDir").getAbsoluteFile();
-
- private File nodeHealthscriptFile = new File(testRootDir,
- Shell.appendScriptExtension("failingscript"));
-
- @Before
- public void setup() {
- testRootDir.mkdirs();
- }
-
- @After
- public void tearDown() throws Exception {
- if (testRootDir.exists()) {
- FileContext.getLocalFSFileContext().delete(
- new Path(testRootDir.getAbsolutePath()), true);
- }
- }
-
- private void writeNodeHealthScriptFile(String scriptStr, boolean setExecutable)
- throws IOException {
- PrintWriter pw = null;
- try {
- FileUtil.setWritable(nodeHealthscriptFile, true);
- FileUtil.setReadable(nodeHealthscriptFile, true);
- pw = new PrintWriter(new FileOutputStream(nodeHealthscriptFile));
- pw.println(scriptStr);
- pw.flush();
- } finally {
- pw.close();
- }
- FileUtil.setExecutable(nodeHealthscriptFile, setExecutable);
- }
-
- @Test
- public void testNodeHealthScriptShouldRun() throws IOException {
- Assert.assertFalse("Node health script should start",
- NodeHealthScriptRunner.shouldRun(
- nodeHealthscriptFile.getAbsolutePath()));
- writeNodeHealthScriptFile("", false);
- // Node health script should not start if the node health script is not
- // executable.
- Assert.assertFalse("Node health script should start",
- NodeHealthScriptRunner.shouldRun(
- nodeHealthscriptFile.getAbsolutePath()));
- writeNodeHealthScriptFile("", true);
- Assert.assertTrue("Node health script should start",
- NodeHealthScriptRunner.shouldRun(
- nodeHealthscriptFile.getAbsolutePath()));
- }
-
- @Test
- public void testNodeHealthScript() throws Exception {
- String errorScript = "echo ERROR\n echo \"Tracker not healthy\"";
- String normalScript = "echo \"I am all fine\"";
- String timeOutScript =
- Shell.WINDOWS ? "@echo off\nping -n 4 127.0.0.1 >nul\necho \"I am fine\""
- : "sleep 4\necho \"I am fine\"";
- String exitCodeScript = "exit 127";
-
- Configuration conf = new Configuration();
- writeNodeHealthScriptFile(normalScript, true);
- NodeHealthScriptRunner nodeHealthScriptRunner = new NodeHealthScriptRunner(
- nodeHealthscriptFile.getAbsolutePath(),
- 500, 1000, new String[] {});
- nodeHealthScriptRunner.init(conf);
- TimerTask timerTask = nodeHealthScriptRunner.getTimerTask();
-
- timerTask.run();
- // Normal Script runs successfully
- Assert.assertTrue("Node health status reported unhealthy",
- nodeHealthScriptRunner.isHealthy());
- Assert.assertEquals("", nodeHealthScriptRunner.getHealthReport());
-
- // Error script.
- writeNodeHealthScriptFile(errorScript, true);
- // Run timer
- timerTask.run();
- Assert.assertFalse("Node health status reported healthy",
- nodeHealthScriptRunner.isHealthy());
- Assert.assertTrue(
- nodeHealthScriptRunner.getHealthReport().contains("ERROR"));
-
- // Healthy script.
- writeNodeHealthScriptFile(normalScript, true);
- timerTask.run();
- Assert.assertTrue("Node health status reported unhealthy",
- nodeHealthScriptRunner.isHealthy());
- Assert.assertEquals("", nodeHealthScriptRunner.getHealthReport());
-
- // Timeout script.
- writeNodeHealthScriptFile(timeOutScript, true);
- timerTask.run();
- Assert.assertFalse("Node health status reported healthy even after timeout",
- nodeHealthScriptRunner.isHealthy());
- Assert.assertEquals(
- NodeHealthScriptRunner.NODE_HEALTH_SCRIPT_TIMED_OUT_MSG,
- nodeHealthScriptRunner.getHealthReport());
-
- // Exit code 127
- writeNodeHealthScriptFile(exitCodeScript, true);
- timerTask.run();
- Assert.assertTrue("Node health status reported unhealthy",
- nodeHealthScriptRunner.isHealthy());
- Assert.assertEquals("", nodeHealthScriptRunner.getHealthReport());
- }
-}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestProtoUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestProtoUtil.java
index 6b72089faab84..4792fd49b98cf 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestProtoUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestProtoUtil.java
@@ -33,7 +33,7 @@
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto.OperationProto;
import org.junit.Test;
-import com.google.protobuf.CodedOutputStream;
+import org.apache.hadoop.thirdparty.protobuf.CodedOutputStream;
public class TestProtoUtil {
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/contract/sftp.xml b/hadoop-common-project/hadoop-common/src/test/resources/contract/sftp.xml
new file mode 100644
index 0000000000000..20a24b7e54061
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/resources/contract/sftp.xml
@@ -0,0 +1,79 @@
+
+
+
+
+
+
+ fs.contract.test.root-tests-enabled
+ false
+
+
+
+ fs.contract.is-case-sensitive
+ true
+
+
+
+ fs.contract.supports-append
+ false
+
+
+
+ fs.contract.supports-atomic-directory-delete
+ true
+
+
+
+ fs.contract.supports-atomic-rename
+ true
+
+
+
+ fs.contract.supports-block-locality
+ false
+
+
+
+ fs.contract.supports-concat
+ false
+
+
+
+ fs.contract.supports-seek
+ true
+
+
+
+ fs.contract.rejects-seek-past-eof
+ true
+
+
+
+ fs.contract.supports-strict-exceptions
+ true
+
+
+
+ fs.contract.supports-unix-permissions
+ false
+
+
+
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index 392d39170d5fe..cbc50b9d1c683 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -282,7 +282,7 @@
RegexpComparator
- ^-count \[-q\] \[-h\] \[-v\] \[-t \[<storage type>\]\] \[-u\] \[-x\] \[-e\] <path> \.\.\. :( )*
+ ^-count \[-q\] \[-h\] \[-v\] \[-t \[<storage type>\]\] \[-u\] \[-x\] \[-e\] \[-s\] <path> \.\.\. :( )*RegexpComparator
@@ -496,7 +496,10 @@
RegexpComparator
- ^-put \[-f\] \[-p\] \[-l\] \[-d\] <localsrc> \.\.\. <dst> :( )*
+
+ RegexpComparator
+ ^-put \[-f\] \[-p\] \[-l\] \[-d\] \[-t <thread count>\] <localsrc> \.\.\. <dst> :\s*
+ RegexpComparator
@@ -512,15 +515,19 @@
RegexpComparator
- ^\s*-p Preserves access and modification times, ownership and the mode.( )*
+ ^\s*-p Preserves timestamps, ownership and the mode.( )*
+
+
+ RegexpComparator
+ ^\s*-f Overwrites the destination if it already exists.( )*RegexpComparator
- ^\s*-f Overwrites the destination if it already exists.( )*
+ ^\s*-t <thread count> Number of threads to be used, default is 1.( )*RegexpComparator
- ^\s*-l Allow DataNode to lazily persist the file to disk. Forces( )*
+ ^\s*-l Allow DataNode to lazily persist the file to disk. Forces( )*RegexpComparator
@@ -532,7 +539,7 @@
RegexpComparator
- ^\s*-d Skip creation of temporary file\(<dst>\._COPYING_\).( )*
+ ^\s*-d Skip creation of temporary file\(<dst>\._COPYING_\).( )*
@@ -551,47 +558,7 @@
RegexpComparator
- ^\s*Copy files from the local file system into fs.( )*Copying fails if the file already( )*
-
-
- RegexpComparator
- ^\s*exists, unless the -f flag is given.( )*
-
-
- RegexpComparator
- ^\s*Flags:( )*
-
-
- RegexpComparator
- ^\s*-p Preserves access and modification times, ownership and the( )*
-
-
- RegexpComparator
- ^\s*mode.( )*
-
-
- RegexpComparator
- ^\s*-f Overwrites the destination if it already exists.( )*
-
-
- RegexpComparator
- ^\s*-t <thread count> Number of threads to be used, default is 1.( )*
-
-
- RegexpComparator
- ^\s*-l Allow DataNode to lazily persist the file to disk. Forces( )*
-
-
- RegexpComparator
- ^\s*replication factor of 1. This flag will result in reduced( )*
-
-
- RegexpComparator
- ^\s*durability. Use with care.( )*
-
-
- RegexpComparator
- ^\s*-d Skip creation of temporary file\(<dst>\._COPYING_\).( )*
+ ^\s*Identical to the -put command\.\s*
@@ -606,11 +573,14 @@
RegexpComparator
- ^-moveFromLocal <localsrc> \.\.\. <dst> :\s*
+ ^-moveFromLocal \[-f\] \[-p\] \[-l\] \[-d\] <localsrc> \.\.\. <dst> :\s*RegexpComparator
- ^( |\t)*Same as -put, except that the source is deleted after it's copied.
+ ^( |\t)*Same as -put, except that the source is deleted after it's copied
+
+ RegexpComparator
+ ^\s* and -t option has not yet implemented.
diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml
index 6f4ff09952ac4..acfdeeac50b0a 100644
--- a/hadoop-common-project/hadoop-kms/pom.xml
+++ b/hadoop-common-project/hadoop-kms/pom.xml
@@ -22,11 +22,11 @@
org.apache.hadoophadoop-project
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOT../../hadoop-projecthadoop-kms
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOTjarApache Hadoop KMS
@@ -186,6 +186,7 @@
org.apache.maven.pluginsmaven-surefire-plugin
+ ${ignoreTestFailure}1false1
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
index 0640e25b76c4b..da597b4da5f81 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
@@ -37,6 +37,7 @@
import org.apache.hadoop.util.VersionInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.slf4j.bridge.SLF4JBridgeHandler;
@InterfaceAudience.Private
public class KMSWebApp implements ServletContextListener {
@@ -80,6 +81,11 @@ public class KMSWebApp implements ServletContextListener {
private static KMSAudit kmsAudit;
private static KeyProviderCryptoExtension keyProviderCryptoExtension;
+ static {
+ SLF4JBridgeHandler.removeHandlersForRootLogger();
+ SLF4JBridgeHandler.install();
+ }
+
@Override
public void contextInitialized(ServletContextEvent sce) {
try {
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
index 036231de70da2..639d85521c3ce 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
@@ -22,13 +22,16 @@
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URL;
+import java.util.LinkedHashSet;
+import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.ConfigurationWithLogging;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.source.JvmMetrics;
+import org.apache.hadoop.security.AuthenticationFilterInitializer;
+import org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilterInitializer;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.util.JvmPauseMonitor;
@@ -95,6 +98,22 @@ public class KMSWebServer {
KMSConfiguration.HTTP_PORT_DEFAULT);
URI endpoint = new URI(scheme, null, host, port, null, null, null);
+ String configuredInitializers =
+ conf.get(HttpServer2.FILTER_INITIALIZER_PROPERTY);
+ if (configuredInitializers != null) {
+ Set target = new LinkedHashSet();
+ String[] initializers = configuredInitializers.split(",");
+ for (String init : initializers) {
+ if (!init.equals(AuthenticationFilterInitializer.class.getName()) &&
+ !init.equals(
+ ProxyUserAuthenticationFilterInitializer.class.getName())) {
+ target.add(init);
+ }
+ }
+ String actualInitializers = StringUtils.join(",", target);
+ conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY, actualInitializers);
+ }
+
httpServer = new HttpServer2.Builder()
.setName(NAME)
.setConf(conf)
@@ -168,10 +187,8 @@ public URL getKMSUrl() {
public static void main(String[] args) throws Exception {
KMSConfiguration.initLogging();
StringUtils.startupShutdownMessage(KMSWebServer.class, args, LOG);
- Configuration conf = new ConfigurationWithLogging(
- KMSConfiguration.getKMSConf());
- Configuration sslConf = new ConfigurationWithLogging(
- SSLFactory.readSSLConfiguration(conf, SSLFactory.Mode.SERVER));
+ Configuration conf = KMSConfiguration.getKMSConf();
+ Configuration sslConf = SSLFactory.readSSLConfiguration(conf, SSLFactory.Mode.SERVER);
KMSWebServer kmsWebServer = new KMSWebServer(conf, sslConf);
kmsWebServer.start();
kmsWebServer.join();
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index e37f2753d1818..9190df27ccc2c 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.AuthenticationFilterInitializer;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
@@ -614,7 +615,18 @@ public Void run() throws Exception {
@Test
public void testStartStopHttpPseudo() throws Exception {
- testStartStop(false, false);
+ // Make sure bogus errors don't get emitted.
+ GenericTestUtils.LogCapturer logs =
+ GenericTestUtils.LogCapturer.captureLogs(LoggerFactory.getLogger(
+ "com.sun.jersey.server.wadl.generators.AbstractWadlGeneratorGrammarGenerator"));
+ try {
+ testStartStop(false, false);
+ } finally {
+ logs.stopCapturing();
+ }
+ assertFalse(logs.getOutput().contains(
+ "Couldn't find grammar element for class"));
+
}
@Test
@@ -3068,4 +3080,45 @@ public Void call() throws Exception {
}
});
}
+
+ @Test
+ public void testFilterInitializer() throws Exception {
+ Configuration conf = new Configuration();
+ File testDir = getTestDir();
+ conf = createBaseKMSConf(testDir, conf);
+ conf.set("hadoop.security.authentication", "kerberos");
+ conf.set("hadoop.kms.authentication.token.validity", "1");
+ conf.set("hadoop.kms.authentication.type", "kerberos");
+ conf.set("hadoop.kms.authentication.kerberos.keytab",
+ keytab.getAbsolutePath());
+ conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
+ conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
+ conf.set("hadoop.http.filter.initializers",
+ AuthenticationFilterInitializer.class.getName());
+ conf.set("hadoop.http.authentication.type", "kerberos");
+ conf.set("hadoop.http.authentication.kerberos.principal", "HTTP/localhost");
+ conf.set("hadoop.http.authentication.kerberos.keytab",
+ keytab.getAbsolutePath());
+
+ writeConf(testDir, conf);
+
+ runServer(null, null, testDir, new KMSCallable() {
+ @Override
+ public Void call() throws Exception {
+ final Configuration conf = new Configuration();
+ URL url = getKMSUrl();
+ final URI uri = createKMSUri(getKMSUrl());
+
+ doAs("client", new PrivilegedExceptionAction() {
+ @Override
+ public Void run() throws Exception {
+ final KeyProvider kp = createProvider(uri, conf);
+ Assert.assertTrue(kp.getKeys().isEmpty());
+ return null;
+ }
+ });
+ return null;
+ }
+ });
+ }
}
diff --git a/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties b/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties
index b8e6353b393f9..73c48534a0a01 100644
--- a/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties
+++ b/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties
@@ -26,6 +26,7 @@ log4j.rootLogger=INFO, stdout
log4j.logger.org.apache.hadoop.conf=ERROR
log4j.logger.org.apache.hadoop.crytpo.key.kms.server=ALL
log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
+log4j.logger.com.sun.jersey.server.wadl.generators.AbstractWadlGeneratorGrammarGenerator=OFF
log4j.logger.org.apache.hadoop.security=OFF
log4j.logger.org.apache.directory.server.core=OFF
log4j.logger.org.apache.hadoop.util.NativeCodeLoader=OFF
diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml
index adbd6e32bee58..c76abf750b78d 100644
--- a/hadoop-common-project/hadoop-minikdc/pom.xml
+++ b/hadoop-common-project/hadoop-minikdc/pom.xml
@@ -18,12 +18,12 @@
org.apache.hadoophadoop-project
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOT../../hadoop-project4.0.0hadoop-minikdc
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOTApache Hadoop MiniKDCApache Hadoop MiniKDCjar
diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml
index e0fedaf1434e6..22c56722d1c42 100644
--- a/hadoop-common-project/hadoop-nfs/pom.xml
+++ b/hadoop-common-project/hadoop-nfs/pom.xml
@@ -20,11 +20,11 @@
org.apache.hadoophadoop-project
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOT../../hadoop-projecthadoop-nfs
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOTjarApache Hadoop NFS
diff --git a/hadoop-common-project/hadoop-registry/pom.xml b/hadoop-common-project/hadoop-registry/pom.xml
index dc45309dca296..d5e0150bba94c 100644
--- a/hadoop-common-project/hadoop-registry/pom.xml
+++ b/hadoop-common-project/hadoop-registry/pom.xml
@@ -19,12 +19,12 @@
hadoop-projectorg.apache.hadoop
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOT../../hadoop-project4.0.0hadoop-registry
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOTApache Hadoop Registry
@@ -221,6 +221,7 @@
org.apache.maven.pluginsmaven-surefire-plugin
+ ${ignoreTestFailure}false900-Xmx1024m -XX:+HeapDumpOnOutOfMemoryError
diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml
index 8be2593c21ffd..b36dbf30610ff 100644
--- a/hadoop-common-project/pom.xml
+++ b/hadoop-common-project/pom.xml
@@ -20,11 +20,11 @@
org.apache.hadoophadoop-project
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOT../hadoop-projecthadoop-common-project
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOTApache Hadoop Common ProjectApache Hadoop Common Projectpom
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 07aa7b10a8320..0a5db2565b8c5 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -20,11 +20,11 @@
org.apache.hadoophadoop-project
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOT../hadoop-projecthadoop-dist
- 3.3.0-SNAPSHOT
+ 3.4.0-SNAPSHOTApache Hadoop DistributionApache Hadoop Distributionjar
diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml
deleted file mode 100644
index 673af41aeef0d..0000000000000
--- a/hadoop-hdds/client/pom.xml
+++ /dev/null
@@ -1,44 +0,0 @@
-
-
-
- 4.0.0
-
- org.apache.hadoop
- hadoop-hdds
- 0.5.0-SNAPSHOT
-
-
- hadoop-hdds-client
- 0.5.0-SNAPSHOT
- Apache Hadoop Distributed Data Store Client Library
- Apache Hadoop HDDS Client
- jar
-
-
-
- org.apache.hadoop
- hadoop-hdds-common
-
-
-
- io.netty
- netty-all
-
-
-
-
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ClientCredentialInterceptor.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ClientCredentialInterceptor.java
deleted file mode 100644
index 7a15808b2ea72..0000000000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ClientCredentialInterceptor.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.ratis.thirdparty.io.grpc.CallOptions;
-import org.apache.ratis.thirdparty.io.grpc.Channel;
-import org.apache.ratis.thirdparty.io.grpc.ClientCall;
-import org.apache.ratis.thirdparty.io.grpc.ClientInterceptor;
-import org.apache.ratis.thirdparty.io.grpc.ForwardingClientCall;
-import org.apache.ratis.thirdparty.io.grpc.Metadata;
-import org.apache.ratis.thirdparty.io.grpc.MethodDescriptor;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OBT_METADATA_KEY;
-import static org.apache.hadoop.ozone.OzoneConsts.USER_METADATA_KEY;
-
-/**
- * GRPC client interceptor for ozone block token.
- */
-public class ClientCredentialInterceptor implements ClientInterceptor {
-
- private final String user;
- private final String token;
-
- public ClientCredentialInterceptor(String user, String token) {
- this.user = user;
- this.token = token;
- }
-
- @Override
- public ClientCall interceptCall(
- MethodDescriptor method,
- CallOptions callOptions,
- Channel next) {
-
- return new ForwardingClientCall.SimpleForwardingClientCall(
- next.newCall(method, callOptions)) {
- @Override
- public void start(Listener responseListener, Metadata headers) {
- if (token != null) {
- headers.put(OBT_METADATA_KEY, token);
- }
- if (user != null) {
- headers.put(USER_METADATA_KEY, user);
- }
- super.start(responseListener, headers);
- }
- };
- }
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
deleted file mode 100644
index 04a8a1aaa1db3..0000000000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ /dev/null
@@ -1,466 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc;
-import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc.XceiverClientProtocolServiceStub;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.tracing.GrpcClientInterceptor;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Time;
-
-import io.opentracing.Scope;
-import io.opentracing.util.GlobalTracer;
-import org.apache.ratis.thirdparty.io.grpc.ManagedChannel;
-import org.apache.ratis.thirdparty.io.grpc.Status;
-import org.apache.ratis.thirdparty.io.grpc.netty.GrpcSslContexts;
-import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder;
-import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver;
-import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslContextBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.security.cert.X509Certificate;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-/**
- * A Client for the storageContainer protocol for read object data.
- */
-public class XceiverClientGrpc extends XceiverClientSpi {
- static final Logger LOG = LoggerFactory.getLogger(XceiverClientGrpc.class);
- private static final String COMPONENT = "dn";
- private final Pipeline pipeline;
- private final Configuration config;
- private Map asyncStubs;
- private XceiverClientMetrics metrics;
- private Map channels;
- private final Semaphore semaphore;
- private boolean closed = false;
- private SecurityConfig secConfig;
- private final boolean topologyAwareRead;
- private X509Certificate caCert;
-
- /**
- * Constructs a client that can communicate with the Container framework on
- * data nodes.
- *
- * @param pipeline - Pipeline that defines the machines.
- * @param config -- Ozone Config
- * @param caCert - SCM ca certificate.
- */
- public XceiverClientGrpc(Pipeline pipeline, Configuration config,
- X509Certificate caCert) {
- super();
- Preconditions.checkNotNull(pipeline);
- Preconditions.checkNotNull(config);
- this.pipeline = pipeline;
- this.config = config;
- this.secConfig = new SecurityConfig(config);
- this.semaphore =
- new Semaphore(HddsClientUtils.getMaxOutstandingRequests(config));
- this.metrics = XceiverClientManager.getXceiverClientMetrics();
- this.channels = new HashMap<>();
- this.asyncStubs = new HashMap<>();
- this.topologyAwareRead = config.getBoolean(
- OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY,
- OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_DEFAULT);
- this.caCert = caCert;
- }
-
- /**
- * Constructs a client that can communicate with the Container framework on
- * data nodes.
- *
- * @param pipeline - Pipeline that defines the machines.
- * @param config -- Ozone Config
- */
- public XceiverClientGrpc(Pipeline pipeline, Configuration config) {
- this(pipeline, config, null);
- }
-
- /**
- * To be used when grpc token is not enabled.
- */
- @Override
- public void connect() throws Exception {
- // connect to the closest node, if closest node doesn't exist, delegate to
- // first node, which is usually the leader in the pipeline.
- DatanodeDetails dn = topologyAwareRead ? this.pipeline.getClosestNode() :
- this.pipeline.getFirstNode();
- // just make a connection to the picked datanode at the beginning
- connectToDatanode(dn, null);
- }
-
- /**
- * Passed encoded token to GRPC header when security is enabled.
- */
- @Override
- public void connect(String encodedToken) throws Exception {
- // connect to the closest node, if closest node doesn't exist, delegate to
- // first node, which is usually the leader in the pipeline.
- DatanodeDetails dn = topologyAwareRead ? this.pipeline.getClosestNode() :
- this.pipeline.getFirstNode();
- // just make a connection to the picked datanode at the beginning
- connectToDatanode(dn, encodedToken);
- }
-
- private void connectToDatanode(DatanodeDetails dn, String encodedToken)
- throws IOException {
- // read port from the data node, on failure use default configured
- // port.
- int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
- if (port == 0) {
- port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
- OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
- }
-
- // Add credential context to the client call
- String userName = UserGroupInformation.getCurrentUser().getShortUserName();
- if (LOG.isDebugEnabled()) {
- LOG.debug("Nodes in pipeline : {}", pipeline.getNodes().toString());
- LOG.debug("Connecting to server : {}", dn.getIpAddress());
- }
- NettyChannelBuilder channelBuilder =
- NettyChannelBuilder.forAddress(dn.getIpAddress(), port).usePlaintext()
- .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE)
- .intercept(new ClientCredentialInterceptor(userName, encodedToken),
- new GrpcClientInterceptor());
- if (secConfig.isGrpcTlsEnabled()) {
- SslContextBuilder sslContextBuilder = GrpcSslContexts.forClient();
- if (caCert != null) {
- sslContextBuilder.trustManager(caCert);
- }
- if (secConfig.useTestCert()) {
- channelBuilder.overrideAuthority("localhost");
- }
- channelBuilder.useTransportSecurity().
- sslContext(sslContextBuilder.build());
- } else {
- channelBuilder.usePlaintext();
- }
- ManagedChannel channel = channelBuilder.build();
- XceiverClientProtocolServiceStub asyncStub =
- XceiverClientProtocolServiceGrpc.newStub(channel);
- asyncStubs.put(dn.getUuid(), asyncStub);
- channels.put(dn.getUuid(), channel);
- }
-
- /**
- * Returns if the xceiver client connects to all servers in the pipeline.
- *
- * @return True if the connection is alive, false otherwise.
- */
- @VisibleForTesting
- public boolean isConnected(DatanodeDetails details) {
- return isConnected(channels.get(details.getUuid()));
- }
-
- private boolean isConnected(ManagedChannel channel) {
- return channel != null && !channel.isTerminated() && !channel.isShutdown();
- }
-
- @Override
- public void close() {
- closed = true;
- for (ManagedChannel channel : channels.values()) {
- channel.shutdownNow();
- try {
- channel.awaitTermination(60, TimeUnit.MINUTES);
- } catch (Exception e) {
- LOG.error("Unexpected exception while waiting for channel termination",
- e);
- }
- }
- }
-
- @Override
- public Pipeline getPipeline() {
- return pipeline;
- }
-
- @Override
- public ContainerCommandResponseProto sendCommand(
- ContainerCommandRequestProto request) throws IOException {
- try {
- XceiverClientReply reply;
- reply = sendCommandWithTraceIDAndRetry(request, null);
- ContainerCommandResponseProto responseProto = reply.getResponse().get();
- return responseProto;
- } catch (ExecutionException | InterruptedException e) {
- throw new IOException("Failed to execute command " + request, e);
- }
- }
-
- @Override
- public ContainerCommandResponseProto sendCommand(
- ContainerCommandRequestProto request, List validators)
- throws IOException {
- try {
- XceiverClientReply reply;
- reply = sendCommandWithTraceIDAndRetry(request, validators);
- ContainerCommandResponseProto responseProto = reply.getResponse().get();
- return responseProto;
- } catch (ExecutionException | InterruptedException e) {
- throw new IOException("Failed to execute command " + request, e);
- }
- }
-
- private XceiverClientReply sendCommandWithTraceIDAndRetry(
- ContainerCommandRequestProto request, List validators)
- throws IOException {
- try (Scope scope = GlobalTracer.get()
- .buildSpan("XceiverClientGrpc." + request.getCmdType().name())
- .startActive(true)) {
- ContainerCommandRequestProto finalPayload =
- ContainerCommandRequestProto.newBuilder(request)
- .setTraceID(TracingUtil.exportCurrentSpan()).build();
- return sendCommandWithRetry(finalPayload, validators);
- }
- }
-
- private XceiverClientReply sendCommandWithRetry(
- ContainerCommandRequestProto request, List validators)
- throws IOException {
- ContainerCommandResponseProto responseProto = null;
- IOException ioException = null;
-
- // In case of an exception or an error, we will try to read from the
- // datanodes in the pipeline in a round robin fashion.
-
- // TODO: cache the correct leader info in here, so that any subsequent calls
- // should first go to leader
- XceiverClientReply reply = new XceiverClientReply(null);
- List datanodeList;
- if ((request.getCmdType() == ContainerProtos.Type.ReadChunk ||
- request.getCmdType() == ContainerProtos.Type.GetSmallFile) &&
- topologyAwareRead) {
- datanodeList = pipeline.getNodesInOrder();
- } else {
- datanodeList = pipeline.getNodes();
- // Shuffle datanode list so that clients do not read in the same order
- // every time.
- Collections.shuffle(datanodeList);
- }
- for (DatanodeDetails dn : datanodeList) {
- try {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Executing command " + request + " on datanode " + dn);
- }
- // In case the command gets retried on a 2nd datanode,
- // sendCommandAsyncCall will create a new channel and async stub
- // in case these don't exist for the specific datanode.
- reply.addDatanode(dn);
- responseProto = sendCommandAsync(request, dn).getResponse().get();
- if (validators != null && !validators.isEmpty()) {
- for (CheckedBiFunction validator : validators) {
- validator.apply(request, responseProto);
- }
- }
- break;
- } catch (ExecutionException | InterruptedException | IOException e) {
- LOG.error("Failed to execute command " + request + " on datanode " + dn
- .getUuidString(), e);
- if (!(e instanceof IOException)) {
- if (Status.fromThrowable(e.getCause()).getCode()
- == Status.UNAUTHENTICATED.getCode()) {
- throw new SCMSecurityException("Failed to authenticate with "
- + "GRPC XceiverServer with Ozone block token.");
- }
- ioException = new IOException(e);
- } else {
- ioException = (IOException) e;
- }
- responseProto = null;
- }
- }
-
- if (responseProto != null) {
- reply.setResponse(CompletableFuture.completedFuture(responseProto));
- return reply;
- } else {
- Preconditions.checkNotNull(ioException);
- LOG.error("Failed to execute command {} on the pipeline {}.", request,
- pipeline);
- throw ioException;
- }
- }
-
- // TODO: for a true async API, once the waitable future while executing
- // the command on one channel fails, it should be retried asynchronously
- // on the future Task for all the remaining datanodes.
-
- // Note: this Async api is not used currently used in any active I/O path.
- // In case it gets used, the asynchronous retry logic needs to be plugged
- // in here.
- /**
- * Sends a given command to server gets a waitable future back.
- *
- * @param request Request
- * @return Response to the command
- * @throws IOException
- */
- @Override
- public XceiverClientReply sendCommandAsync(
- ContainerCommandRequestProto request)
- throws IOException, ExecutionException, InterruptedException {
- try (Scope scope = GlobalTracer.get()
- .buildSpan("XceiverClientGrpc." + request.getCmdType().name())
- .startActive(true)) {
-
- ContainerCommandRequestProto finalPayload =
- ContainerCommandRequestProto.newBuilder(request)
- .setTraceID(TracingUtil.exportCurrentSpan())
- .build();
- XceiverClientReply asyncReply =
- sendCommandAsync(finalPayload, pipeline.getFirstNode());
- // TODO : for now make this API sync in nature as async requests are
- // served out of order over XceiverClientGrpc. This needs to be fixed
- // if this API is to be used for I/O path. Currently, this is not
- // used for Read/Write Operation but for tests.
- if (!HddsUtils.isReadOnly(request)) {
- asyncReply.getResponse().get();
- }
- return asyncReply;
- }
- }
-
- private XceiverClientReply sendCommandAsync(
- ContainerCommandRequestProto request, DatanodeDetails dn)
- throws IOException, ExecutionException, InterruptedException {
- if (closed) {
- throw new IOException("This channel is not connected.");
- }
-
- UUID dnId = dn.getUuid();
- ManagedChannel channel = channels.get(dnId);
- // If the channel doesn't exist for this specific datanode or the channel
- // is closed, just reconnect
- String token = request.getEncodedToken();
- if (!isConnected(channel)) {
- reconnect(dn, token);
- }
- if (LOG.isDebugEnabled()) {
- LOG.debug("Send command {} to datanode {}",
- request.getCmdType().toString(), dn.getNetworkFullPath());
- }
- final CompletableFuture replyFuture =
- new CompletableFuture<>();
- semaphore.acquire();
- long requestTime = Time.monotonicNowNanos();
- metrics.incrPendingContainerOpsMetrics(request.getCmdType());
- // create a new grpc stream for each non-async call.
-
- // TODO: for async calls, we should reuse StreamObserver resources.
- final StreamObserver requestObserver =
- asyncStubs.get(dnId)
- .send(new StreamObserver() {
- @Override
- public void onNext(ContainerCommandResponseProto value) {
- replyFuture.complete(value);
- metrics.decrPendingContainerOpsMetrics(request.getCmdType());
- metrics.addContainerOpsLatency(request.getCmdType(),
- Time.monotonicNowNanos() - requestTime);
- semaphore.release();
- }
-
- @Override
- public void onError(Throwable t) {
- replyFuture.completeExceptionally(t);
- metrics.decrPendingContainerOpsMetrics(request.getCmdType());
- metrics.addContainerOpsLatency(request.getCmdType(),
- Time.monotonicNowNanos() - requestTime);
- semaphore.release();
- }
-
- @Override
- public void onCompleted() {
- if (!replyFuture.isDone()) {
- replyFuture.completeExceptionally(new IOException(
- "Stream completed but no reply for request " + request));
- }
- }
- });
- requestObserver.onNext(request);
- requestObserver.onCompleted();
- return new XceiverClientReply(replyFuture);
- }
-
- private void reconnect(DatanodeDetails dn, String encodedToken)
- throws IOException {
- ManagedChannel channel;
- try {
- connectToDatanode(dn, encodedToken);
- channel = channels.get(dn.getUuid());
- } catch (Exception e) {
- LOG.error("Error while connecting: ", e);
- throw new IOException(e);
- }
-
- if (channel == null || !isConnected(channel)) {
- throw new IOException("This channel is not connected.");
- }
- }
-
- @Override
- public XceiverClientReply watchForCommit(long index, long timeout)
- throws InterruptedException, ExecutionException, TimeoutException,
- IOException {
- // there is no notion of watch for commit index in standalone pipeline
- return null;
- };
-
- public long getReplicatedMinCommitIndex() {
- return 0;
- }
- /**
- * Returns pipeline Type.
- *
- * @return - Stand Alone as the type.
- */
- @Override
- public HddsProtos.ReplicationType getPipelineType() {
- return HddsProtos.ReplicationType.STAND_ALONE;
- }
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
deleted file mode 100644
index b15828a153098..0000000000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.RemovalListener;
-import com.google.common.cache.RemovalNotification;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.Config;
-import org.apache.hadoop.hdds.conf.ConfigGroup;
-import org.apache.hadoop.hdds.conf.ConfigType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneSecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.security.cert.CertificateException;
-import java.security.cert.X509Certificate;
-import java.util.concurrent.Callable;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Function;
-
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
-import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE;
-import static org.apache.hadoop.hdds.conf.ConfigTag.PERFORMANCE;
-
-/**
- * XceiverClientManager is responsible for the lifecycle of XceiverClient
- * instances. Callers use this class to acquire an XceiverClient instance
- * connected to the desired container pipeline. When done, the caller also uses
- * this class to release the previously acquired XceiverClient instance.
- *
- *
- * This class caches connection to container for reuse purpose, such that
- * accessing same container frequently will be through the same connection
- * without reestablishing connection. But the connection will be closed if
- * not being used for a period of time.
- */
-public class XceiverClientManager implements Closeable {
- private static final Logger LOG =
- LoggerFactory.getLogger(XceiverClientManager.class);
- //TODO : change this to SCM configuration class
- private final Configuration conf;
- private final Cache clientCache;
- private final boolean useRatis;
- private X509Certificate caCert;
-
- private static XceiverClientMetrics metrics;
- private boolean isSecurityEnabled;
- private final boolean topologyAwareRead;
- /**
- * Creates a new XceiverClientManager for non secured ozone cluster.
- * For security enabled ozone cluster, client should use the other constructor
- * with a valid ca certificate in pem string format.
- *
- * @param conf configuration
- */
- public XceiverClientManager(Configuration conf) throws IOException {
- this(conf, OzoneConfiguration.of(conf).getObject(ScmClientConfig.class),
- null);
- }
-
- public XceiverClientManager(Configuration conf, ScmClientConfig clientConf,
- String caCertPem) throws IOException {
- Preconditions.checkNotNull(clientConf);
- Preconditions.checkNotNull(conf);
- long staleThresholdMs = clientConf.getStaleThreshold(MILLISECONDS);
- this.useRatis = conf.getBoolean(
- ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY,
- ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
- this.conf = conf;
- this.isSecurityEnabled = OzoneSecurityUtil.isSecurityEnabled(conf);
- if (isSecurityEnabled) {
- Preconditions.checkNotNull(caCertPem);
- try {
- this.caCert = CertificateCodec.getX509Cert(caCertPem);
- } catch (CertificateException ex) {
- throw new SCMSecurityException("Error: Fail to get SCM CA certificate",
- ex);
- }
- }
-
- this.clientCache = CacheBuilder.newBuilder()
- .expireAfterAccess(staleThresholdMs, MILLISECONDS)
- .maximumSize(clientConf.getMaxSize())
- .removalListener(
- new RemovalListener() {
- @Override
- public void onRemoval(
- RemovalNotification
- removalNotification) {
- synchronized (clientCache) {
- // Mark the entry as evicted
- XceiverClientSpi info = removalNotification.getValue();
- info.setEvicted();
- }
- }
- }).build();
- topologyAwareRead = conf.getBoolean(
- OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY,
- OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_DEFAULT);
- }
-
- @VisibleForTesting
- public Cache getClientCache() {
- return clientCache;
- }
-
- /**
- * Acquires a XceiverClientSpi connected to a container capable of
- * storing the specified key.
- *
- * If there is already a cached XceiverClientSpi, simply return
- * the cached otherwise create a new one.
- *
- * @param pipeline the container pipeline for the client connection
- * @return XceiverClientSpi connected to a container
- * @throws IOException if a XceiverClientSpi cannot be acquired
- */
- public XceiverClientSpi acquireClient(Pipeline pipeline)
- throws IOException {
- return acquireClient(pipeline, false);
- }
-
- /**
- * Acquires a XceiverClientSpi connected to a container for read.
- *
- * If there is already a cached XceiverClientSpi, simply return
- * the cached otherwise create a new one.
- *
- * @param pipeline the container pipeline for the client connection
- * @return XceiverClientSpi connected to a container
- * @throws IOException if a XceiverClientSpi cannot be acquired
- */
- public XceiverClientSpi acquireClientForReadData(Pipeline pipeline)
- throws IOException {
- return acquireClient(pipeline, true);
- }
-
- private XceiverClientSpi acquireClient(Pipeline pipeline, boolean read)
- throws IOException {
- Preconditions.checkNotNull(pipeline);
- Preconditions.checkArgument(pipeline.getNodes() != null);
- Preconditions.checkArgument(!pipeline.getNodes().isEmpty());
-
- synchronized (clientCache) {
- XceiverClientSpi info = getClient(pipeline, read);
- info.incrementReference();
- return info;
- }
- }
-
- /**
- * Releases a XceiverClientSpi after use.
- *
- * @param client client to release
- * @param invalidateClient if true, invalidates the client in cache
- */
- public void releaseClient(XceiverClientSpi client, boolean invalidateClient) {
- releaseClient(client, invalidateClient, false);
- }
-
- /**
- * Releases a read XceiverClientSpi after use.
- *
- * @param client client to release
- * @param invalidateClient if true, invalidates the client in cache
- */
- public void releaseClientForReadData(XceiverClientSpi client,
- boolean invalidateClient) {
- releaseClient(client, invalidateClient, true);
- }
-
- private void releaseClient(XceiverClientSpi client, boolean invalidateClient,
- boolean read) {
- Preconditions.checkNotNull(client);
- synchronized (clientCache) {
- client.decrementReference();
- if (invalidateClient) {
- Pipeline pipeline = client.getPipeline();
- String key = getPipelineCacheKey(pipeline, read);
- XceiverClientSpi cachedClient = clientCache.getIfPresent(key);
- if (cachedClient == client) {
- clientCache.invalidate(key);
- }
- }
- }
- }
-
- private XceiverClientSpi getClient(Pipeline pipeline, boolean forRead)
- throws IOException {
- HddsProtos.ReplicationType type = pipeline.getType();
- try {
- // create different client for read different pipeline node based on
- // network topology
- String key = getPipelineCacheKey(pipeline, forRead);
- // Append user short name to key to prevent a different user
- // from using same instance of xceiverClient.
- key = isSecurityEnabled ?
- key + UserGroupInformation.getCurrentUser().getShortUserName() : key;
- return clientCache.get(key, new Callable() {
- @Override
- public XceiverClientSpi call() throws Exception {
- XceiverClientSpi client = null;
- switch (type) {
- case RATIS:
- client = XceiverClientRatis.newXceiverClientRatis(pipeline, conf,
- caCert);
- client.connect();
- break;
- case STAND_ALONE:
- client = new XceiverClientGrpc(pipeline, conf, caCert);
- break;
- case CHAINED:
- default:
- throw new IOException("not implemented" + pipeline.getType());
- }
- return client;
- }
- });
- } catch (Exception e) {
- throw new IOException(
- "Exception getting XceiverClient: " + e.toString(), e);
- }
- }
-
- private String getPipelineCacheKey(Pipeline pipeline, boolean forRead) {
- String key = pipeline.getId().getId().toString() + pipeline.getType();
- if (topologyAwareRead && forRead) {
- try {
- key += pipeline.getClosestNode().getHostName();
- } catch (IOException e) {
- LOG.error("Failed to get closest node to create pipeline cache key:" +
- e.getMessage());
- }
- }
- return key;
- }
-
- /**
- * Close and remove all the cached clients.
- */
- @Override
- public void close() {
- //closing is done through RemovalListener
- clientCache.invalidateAll();
- clientCache.cleanUp();
-
- if (metrics != null) {
- metrics.unRegister();
- }
- }
-
- /**
- * Tells us if Ratis is enabled for this cluster.
- * @return True if Ratis is enabled.
- */
- public boolean isUseRatis() {
- return useRatis;
- }
-
- /**
- * Returns hard coded 3 as replication factor.
- * @return 3
- */
- public HddsProtos.ReplicationFactor getFactor() {
- if(isUseRatis()) {
- return HddsProtos.ReplicationFactor.THREE;
- }
- return HddsProtos.ReplicationFactor.ONE;
- }
-
- /**
- * Returns the default replication type.
- * @return Ratis or Standalone
- */
- public HddsProtos.ReplicationType getType() {
- // TODO : Fix me and make Ratis default before release.
- // TODO: Remove this as replication factor and type are pipeline properties
- if(isUseRatis()) {
- return HddsProtos.ReplicationType.RATIS;
- }
- return HddsProtos.ReplicationType.STAND_ALONE;
- }
-
- public Function byteBufferToByteStringConversion(){
- return ByteStringConversion.createByteBufferConversion(conf);
- }
-
- /**
- * Get xceiver client metric.
- */
- public synchronized static XceiverClientMetrics getXceiverClientMetrics() {
- if (metrics == null) {
- metrics = XceiverClientMetrics.create();
- }
-
- return metrics;
- }
-
- /**
- * Configuration for HDDS client.
- */
- @ConfigGroup(prefix = "scm.container.client")
- public static class ScmClientConfig {
-
- private int maxSize;
- private long staleThreshold;
- private int maxOutstandingRequests;
-
- public long getStaleThreshold(TimeUnit unit) {
- return unit.convert(staleThreshold, MILLISECONDS);
- }
-
- @Config(key = "idle.threshold",
- type = ConfigType.TIME, timeUnit = MILLISECONDS,
- defaultValue = "10s",
- tags = { OZONE, PERFORMANCE },
- description =
- "In the standalone pipelines, the SCM clients use netty to "
- + " communicate with the container. It also uses connection pooling"
- + " to reduce client side overheads. This allows a connection to"
- + " stay idle for a while before the connection is closed."
- )
- public void setStaleThreshold(long staleThreshold) {
- this.staleThreshold = staleThreshold;
- }
-
- public int getMaxSize() {
- return maxSize;
- }
-
- @Config(key = "max.size",
- defaultValue = "256",
- tags = { OZONE, PERFORMANCE },
- description =
- "Controls the maximum number of connections that are cached via"
- + " client connection pooling. If the number of connections"
- + " exceed this count, then the oldest idle connection is evicted."
- )
- public void setMaxSize(int maxSize) {
- this.maxSize = maxSize;
- }
-
- public int getMaxOutstandingRequests() {
- return maxOutstandingRequests;
- }
-
- @Config(key = "max.outstanding.requests",
- defaultValue = "100",
- tags = { OZONE, PERFORMANCE },
- description =
- "Controls the maximum number of outstanding async requests that can"
- + " be handled by the Standalone as well as Ratis client."
- )
- public void setMaxOutstandingRequests(int maxOutstandingRequests) {
- this.maxOutstandingRequests = maxOutstandingRequests;
- }
- }
-
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
deleted file mode 100644
index 5d43c5ef22585..0000000000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MutableRate;
-
-/**
- * The client metrics for the Storage Container protocol.
- */
-@InterfaceAudience.Private
-@Metrics(about = "Storage Container Client Metrics", context = "dfs")
-public class XceiverClientMetrics {
- public static final String SOURCE_NAME = XceiverClientMetrics.class
- .getSimpleName();
-
- private @Metric MutableCounterLong pendingOps;
- private @Metric MutableCounterLong totalOps;
- private MutableCounterLong[] pendingOpsArray;
- private MutableCounterLong[] opsArray;
- private MutableRate[] containerOpsLatency;
- private MetricsRegistry registry;
-
- public XceiverClientMetrics() {
- int numEnumEntries = ContainerProtos.Type.values().length;
- this.registry = new MetricsRegistry(SOURCE_NAME);
-
- this.pendingOpsArray = new MutableCounterLong[numEnumEntries];
- this.opsArray = new MutableCounterLong[numEnumEntries];
- this.containerOpsLatency = new MutableRate[numEnumEntries];
- for (int i = 0; i < numEnumEntries; i++) {
- pendingOpsArray[i] = registry.newCounter(
- "numPending" + ContainerProtos.Type.forNumber(i + 1),
- "number of pending" + ContainerProtos.Type.forNumber(i + 1) + " ops",
- (long) 0);
- opsArray[i] = registry
- .newCounter("opCount" + ContainerProtos.Type.forNumber(i + 1),
- "number of" + ContainerProtos.Type.forNumber(i + 1) + " ops",
- (long) 0);
-
- containerOpsLatency[i] = registry.newRate(
- ContainerProtos.Type.forNumber(i + 1) + "Latency",
- "latency of " + ContainerProtos.Type.forNumber(i + 1)
- + " ops");
- }
- }
-
- public static XceiverClientMetrics create() {
- DefaultMetricsSystem.initialize(SOURCE_NAME);
- MetricsSystem ms = DefaultMetricsSystem.instance();
- return ms.register(SOURCE_NAME, "Storage Container Client Metrics",
- new XceiverClientMetrics());
- }
-
- public void incrPendingContainerOpsMetrics(ContainerProtos.Type type) {
- pendingOps.incr();
- totalOps.incr();
- opsArray[type.ordinal()].incr();
- pendingOpsArray[type.ordinal()].incr();
- }
-
- public void decrPendingContainerOpsMetrics(ContainerProtos.Type type) {
- pendingOps.incr(-1);
- pendingOpsArray[type.ordinal()].incr(-1);
- }
-
- public void addContainerOpsLatency(ContainerProtos.Type type,
- long latencyNanos) {
- containerOpsLatency[type.ordinal()].add(latencyNanos);
- }
-
- public long getContainerOpsMetrics(ContainerProtos.Type type) {
- return pendingOpsArray[type.ordinal()].value();
- }
-
- @VisibleForTesting
- public long getTotalOpCount() {
- return totalOps.value();
- }
-
- @VisibleForTesting
- public long getContainerOpCountMetrics(ContainerProtos.Type type) {
- return opsArray[type.ordinal()].value();
- }
-
- public void unRegister() {
- MetricsSystem ms = DefaultMetricsSystem.instance();
- ms.unregisterSource(SOURCE_NAME);
- }
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
deleted file mode 100644
index 04fababf50447..0000000000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import java.io.IOException;
-import java.security.cert.X509Certificate;
-import java.util.Collection;
-import java.util.List;
-import java.util.Objects;
-import java.util.OptionalLong;
-import java.util.UUID;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.CompletionException;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.ratis.RatisHelper;
-import org.apache.ratis.client.RaftClient;
-import org.apache.ratis.grpc.GrpcTlsConfig;
-import org.apache.ratis.proto.RaftProtos;
-import org.apache.ratis.protocol.GroupMismatchException;
-import org.apache.ratis.protocol.RaftClientReply;
-import org.apache.ratis.protocol.RaftException;
-import org.apache.ratis.retry.RetryPolicy;
-import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.rpc.SupportedRpcType;
-import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.ratis.util.TimeDuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
-import io.opentracing.Scope;
-import io.opentracing.util.GlobalTracer;
-
-/**
- * An abstract implementation of {@link XceiverClientSpi} using Ratis.
- * The underlying RPC mechanism can be chosen via the constructor.
- */
-public final class XceiverClientRatis extends XceiverClientSpi {
- public static final Logger LOG =
- LoggerFactory.getLogger(XceiverClientRatis.class);
-
- public static XceiverClientRatis newXceiverClientRatis(
- org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline,
- Configuration ozoneConf) {
- return newXceiverClientRatis(pipeline, ozoneConf, null);
- }
-
- public static XceiverClientRatis newXceiverClientRatis(
- org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline,
- Configuration ozoneConf, X509Certificate caCert) {
- final String rpcType = ozoneConf
- .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
- ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
- final TimeDuration clientRequestTimeout =
- RatisHelper.getClientRequestTimeout(ozoneConf);
- final int maxOutstandingRequests =
- HddsClientUtils.getMaxOutstandingRequests(ozoneConf);
- final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf);
- final GrpcTlsConfig tlsConfig = RatisHelper.createTlsClientConfig(new
- SecurityConfig(ozoneConf), caCert);
- return new XceiverClientRatis(pipeline,
- SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests,
- retryPolicy, tlsConfig, clientRequestTimeout);
- }
-
- private final Pipeline pipeline;
- private final RpcType rpcType;
- private final AtomicReference client = new AtomicReference<>();
- private final int maxOutstandingRequests;
- private final RetryPolicy retryPolicy;
- private final GrpcTlsConfig tlsConfig;
- private final TimeDuration clientRequestTimeout;
-
- // Map to track commit index at every server
- private final ConcurrentHashMap commitInfoMap;
-
- private XceiverClientMetrics metrics;
-
- /**
- * Constructs a client.
- */
- private XceiverClientRatis(Pipeline pipeline, RpcType rpcType,
- int maxOutStandingChunks, RetryPolicy retryPolicy,
- GrpcTlsConfig tlsConfig, TimeDuration timeout) {
- super();
- this.pipeline = pipeline;
- this.rpcType = rpcType;
- this.maxOutstandingRequests = maxOutStandingChunks;
- this.retryPolicy = retryPolicy;
- commitInfoMap = new ConcurrentHashMap<>();
- this.tlsConfig = tlsConfig;
- this.clientRequestTimeout = timeout;
- metrics = XceiverClientManager.getXceiverClientMetrics();
- }
-
- private void updateCommitInfosMap(
- Collection commitInfoProtos) {
- // if the commitInfo map is empty, just update the commit indexes for each
- // of the servers
- if (commitInfoMap.isEmpty()) {
- commitInfoProtos.forEach(proto -> commitInfoMap
- .put(RatisHelper.toDatanodeId(proto.getServer()),
- proto.getCommitIndex()));
- // In case the commit is happening 2 way, just update the commitIndex
- // for the servers which have been successfully updating the commit
- // indexes. This is important because getReplicatedMinCommitIndex()
- // should always return the min commit index out of the nodes which have
- // been replicating data successfully.
- } else {
- commitInfoProtos.forEach(proto -> commitInfoMap
- .computeIfPresent(RatisHelper.toDatanodeId(proto.getServer()),
- (address, index) -> {
- index = proto.getCommitIndex();
- return index;
- }));
- }
- }
-
- /**
- * Returns Ratis as pipeline Type.
- *
- * @return - Ratis
- */
- @Override
- public HddsProtos.ReplicationType getPipelineType() {
- return HddsProtos.ReplicationType.RATIS;
- }
-
- @Override
- public Pipeline getPipeline() {
- return pipeline;
- }
-
- @Override
- public void connect() throws Exception {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Connecting to pipeline:{} datanode:{}", getPipeline().getId(),
- RatisHelper.toRaftPeerId(pipeline.getFirstNode()));
- }
- // TODO : XceiverClient ratis should pass the config value of
- // maxOutstandingRequests so as to set the upper bound on max no of async
- // requests to be handled by raft client
- if (!client.compareAndSet(null,
- RatisHelper.newRaftClient(rpcType, getPipeline(), retryPolicy,
- maxOutstandingRequests, tlsConfig, clientRequestTimeout))) {
- throw new IllegalStateException("Client is already connected.");
- }
- }
-
- @Override
- public void connect(String encodedToken) throws Exception {
- throw new UnsupportedOperationException("Block tokens are not " +
- "implemented for Ratis clients.");
- }
-
- @Override
- public void close() {
- final RaftClient c = client.getAndSet(null);
- if (c != null) {
- closeRaftClient(c);
- }
- }
-
- private void closeRaftClient(RaftClient raftClient) {
- try {
- raftClient.close();
- } catch (IOException e) {
- throw new IllegalStateException(e);
- }
- }
-
- private RaftClient getClient() {
- return Objects.requireNonNull(client.get(), "client is null");
- }
-
-
- @VisibleForTesting
- public ConcurrentHashMap getCommitInfoMap() {
- return commitInfoMap;
- }
-
- private CompletableFuture sendRequestAsync(
- ContainerCommandRequestProto request) {
- try (Scope scope = GlobalTracer.get()
- .buildSpan("XceiverClientRatis." + request.getCmdType().name())
- .startActive(true)) {
- final ContainerCommandRequestMessage message
- = ContainerCommandRequestMessage.toMessage(
- request, TracingUtil.exportCurrentSpan());
- if (HddsUtils.isReadOnly(request)) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("sendCommandAsync ReadOnly {}", message);
- }
- return getClient().sendReadOnlyAsync(message);
- } else {
- if (LOG.isDebugEnabled()) {
- LOG.debug("sendCommandAsync {}", message);
- }
- return getClient().sendAsync(message);
- }
- }
- }
-
- // gets the minimum log index replicated to all servers
- @Override
- public long getReplicatedMinCommitIndex() {
- OptionalLong minIndex =
- commitInfoMap.values().parallelStream().mapToLong(v -> v).min();
- return minIndex.isPresent() ? minIndex.getAsLong() : 0;
- }
-
- private void addDatanodetoReply(UUID address, XceiverClientReply reply) {
- DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
- builder.setUuid(address.toString());
- reply.addDatanode(builder.build());
- }
-
- @Override
- public XceiverClientReply watchForCommit(long index, long timeout)
- throws InterruptedException, ExecutionException, TimeoutException,
- IOException {
- long commitIndex = getReplicatedMinCommitIndex();
- XceiverClientReply clientReply = new XceiverClientReply(null);
- if (commitIndex >= index) {
- // return the min commit index till which the log has been replicated to
- // all servers
- clientReply.setLogIndex(commitIndex);
- return clientReply;
- }
- if (LOG.isDebugEnabled()) {
- LOG.debug("commit index : {} watch timeout : {}", index, timeout);
- }
- RaftClientReply reply;
- try {
- CompletableFuture replyFuture = getClient()
- .sendWatchAsync(index, RaftProtos.ReplicationLevel.ALL_COMMITTED);
- replyFuture.get(timeout, TimeUnit.MILLISECONDS);
- } catch (Exception e) {
- Throwable t = HddsClientUtils.checkForException(e);
- LOG.warn("3 way commit failed on pipeline {}", pipeline, e);
- if (t instanceof GroupMismatchException) {
- throw e;
- }
- reply = getClient()
- .sendWatchAsync(index, RaftProtos.ReplicationLevel.MAJORITY_COMMITTED)
- .get(timeout, TimeUnit.MILLISECONDS);
- List commitInfoProtoList =
- reply.getCommitInfos().stream()
- .filter(i -> i.getCommitIndex() < index)
- .collect(Collectors.toList());
- commitInfoProtoList.parallelStream().forEach(proto -> {
- UUID address = RatisHelper.toDatanodeId(proto.getServer());
- addDatanodetoReply(address, clientReply);
- // since 3 way commit has failed, the updated map from now on will
- // only store entries for those datanodes which have had successful
- // replication.
- commitInfoMap.remove(address);
- LOG.info(
- "Could not commit index {} on pipeline {} to all the nodes. " +
- "Server {} has failed. Committed by majority.",
- index, pipeline, address);
- });
- }
- clientReply.setLogIndex(index);
- return clientReply;
- }
-
- /**
- * Sends a given command to server gets a waitable future back.
- *
- * @param request Request
- * @return Response to the command
- */
- @Override
- public XceiverClientReply sendCommandAsync(
- ContainerCommandRequestProto request) {
- XceiverClientReply asyncReply = new XceiverClientReply(null);
- long requestTime = Time.monotonicNowNanos();
- CompletableFuture raftClientReply =
- sendRequestAsync(request);
- metrics.incrPendingContainerOpsMetrics(request.getCmdType());
- CompletableFuture containerCommandResponse =
- raftClientReply.whenComplete((reply, e) -> {
- if (LOG.isDebugEnabled()) {
- LOG.debug("received reply {} for request: cmdType={} containerID={}"
- + " pipelineID={} traceID={} exception: {}", reply,
- request.getCmdType(), request.getContainerID(),
- request.getPipelineID(), request.getTraceID(), e);
- }
- metrics.decrPendingContainerOpsMetrics(request.getCmdType());
- metrics.addContainerOpsLatency(request.getCmdType(),
- Time.monotonicNowNanos() - requestTime);
- }).thenApply(reply -> {
- try {
- if (!reply.isSuccess()) {
- // in case of raft retry failure, the raft client is
- // not able to connect to the leader hence the pipeline
- // can not be used but this instance of RaftClient will close
- // and refreshed again. In case the client cannot connect to
- // leader, getClient call will fail.
-
- // No need to set the failed Server ID here. Ozone client
- // will directly exclude this pipeline in next allocate block
- // to SCM as in this case, it is the raft client which is not
- // able to connect to leader in the pipeline, though the
- // pipeline can still be functional.
- RaftException exception = reply.getException();
- Preconditions.checkNotNull(exception, "Raft reply failure but " +
- "no exception propagated.");
- throw new CompletionException(exception);
- }
- ContainerCommandResponseProto response =
- ContainerCommandResponseProto
- .parseFrom(reply.getMessage().getContent());
- UUID serverId = RatisHelper.toDatanodeId(reply.getReplierId());
- if (response.getResult() == ContainerProtos.Result.SUCCESS) {
- updateCommitInfosMap(reply.getCommitInfos());
- }
- asyncReply.setLogIndex(reply.getLogIndex());
- addDatanodetoReply(serverId, asyncReply);
- return response;
- } catch (InvalidProtocolBufferException e) {
- throw new CompletionException(e);
- }
- });
- asyncReply.setResponse(containerCommandResponse);
- return asyncReply;
- }
-
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
deleted file mode 100644
index 982fb8ea1eec6..0000000000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
+++ /dev/null
@@ -1,495 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.client;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .ContainerDataProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .ReadContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * This class provides the client-facing APIs of container operations.
- */
-public class ContainerOperationClient implements ScmClient {
-
- private static final Logger LOG =
- LoggerFactory.getLogger(ContainerOperationClient.class);
- private static long containerSizeB = -1;
- private final StorageContainerLocationProtocol
- storageContainerLocationClient;
- private final XceiverClientManager xceiverClientManager;
-
- public ContainerOperationClient(
- StorageContainerLocationProtocol
- storageContainerLocationClient,
- XceiverClientManager xceiverClientManager) {
- this.storageContainerLocationClient = storageContainerLocationClient;
- this.xceiverClientManager = xceiverClientManager;
- }
-
- /**
- * Return the capacity of containers. The current assumption is that all
- * containers have the same capacity. Therefore one static is sufficient for
- * any container.
- * @return The capacity of one container in number of bytes.
- */
- public static long getContainerSizeB() {
- return containerSizeB;
- }
-
- /**
- * Set the capacity of container. Should be exactly once on system start.
- * @param size Capacity of one container in number of bytes.
- */
- public static void setContainerSizeB(long size) {
- containerSizeB = size;
- }
-
-
- @Override
- public ContainerWithPipeline createContainer(String owner)
- throws IOException {
- XceiverClientSpi client = null;
- try {
- ContainerWithPipeline containerWithPipeline =
- storageContainerLocationClient.allocateContainer(
- xceiverClientManager.getType(),
- xceiverClientManager.getFactor(), owner);
- Pipeline pipeline = containerWithPipeline.getPipeline();
- client = xceiverClientManager.acquireClient(pipeline);
-
- Preconditions.checkState(pipeline.isOpen(), String
- .format("Unexpected state=%s for pipeline=%s, expected state=%s",
- pipeline.getPipelineState(), pipeline.getId(),
- Pipeline.PipelineState.OPEN));
- createContainer(client,
- containerWithPipeline.getContainerInfo().getContainerID());
- return containerWithPipeline;
- } finally {
- if (client != null) {
- xceiverClientManager.releaseClient(client, false);
- }
- }
- }
-
- /**
- * Create a container over pipeline specified by the SCM.
- *
- * @param client - Client to communicate with Datanodes.
- * @param containerId - Container ID.
- * @throws IOException
- */
- public void createContainer(XceiverClientSpi client,
- long containerId) throws IOException {
- ContainerProtocolCalls.createContainer(client, containerId, null);
-
- // Let us log this info after we let SCM know that we have completed the
- // creation state.
- if (LOG.isDebugEnabled()) {
- LOG.debug("Created container " + containerId
- + " machines:" + client.getPipeline().getNodes());
- }
- }
-
- /**
- * Creates a pipeline over the machines choosen by the SCM.
- *
- * @param client - Client
- * @param pipeline - pipeline to be createdon Datanodes.
- * @throws IOException
- */
- private void createPipeline(XceiverClientSpi client, Pipeline pipeline)
- throws IOException {
-
- Preconditions.checkNotNull(pipeline.getId(), "Pipeline " +
- "name cannot be null when client create flag is set.");
-
- // Pipeline creation is a three step process.
- //
- // 1. Notify SCM that this client is doing a create pipeline on
- // datanodes.
- //
- // 2. Talk to Datanodes to create the pipeline.
- //
- // 3. update SCM that pipeline creation was successful.
-
- // TODO: this has not been fully implemented on server side
- // SCMClientProtocolServer#notifyObjectStageChange
- // TODO: when implement the pipeline state machine, change
- // the pipeline name (string) to pipeline id (long)
- //storageContainerLocationClient.notifyObjectStageChange(
- // ObjectStageChangeRequestProto.Type.pipeline,
- // pipeline.getPipelineName(),
- // ObjectStageChangeRequestProto.Op.create,
- // ObjectStageChangeRequestProto.Stage.begin);
-
- // client.createPipeline();
- // TODO: Use PipelineManager to createPipeline
-
- //storageContainerLocationClient.notifyObjectStageChange(
- // ObjectStageChangeRequestProto.Type.pipeline,
- // pipeline.getPipelineName(),
- // ObjectStageChangeRequestProto.Op.create,
- // ObjectStageChangeRequestProto.Stage.complete);
-
- // TODO : Should we change the state on the client side ??
- // That makes sense, but it is not needed for the client to work.
- if (LOG.isDebugEnabled()) {
- LOG.debug("Pipeline creation successful. Pipeline: {}",
- pipeline.toString());
- }
- }
-
- @Override
- public ContainerWithPipeline createContainer(HddsProtos.ReplicationType type,
- HddsProtos.ReplicationFactor factor, String owner) throws IOException {
- XceiverClientSpi client = null;
- try {
- // allocate container on SCM.
- ContainerWithPipeline containerWithPipeline =
- storageContainerLocationClient.allocateContainer(type, factor,
- owner);
- Pipeline pipeline = containerWithPipeline.getPipeline();
- client = xceiverClientManager.acquireClient(pipeline);
-
- // connect to pipeline leader and allocate container on leader datanode.
- client = xceiverClientManager.acquireClient(pipeline);
- createContainer(client,
- containerWithPipeline.getContainerInfo().getContainerID());
- return containerWithPipeline;
- } finally {
- if (client != null) {
- xceiverClientManager.releaseClient(client, false);
- }
- }
- }
-
- /**
- * Returns a set of Nodes that meet a query criteria.
- *
- * @param nodeStatuses - Criteria that we want the node to have.
- * @param queryScope - Query scope - Cluster or pool.
- * @param poolName - if it is pool, a pool name is required.
- * @return A set of nodes that meet the requested criteria.
- * @throws IOException
- */
- @Override
- public List queryNode(HddsProtos.NodeState
- nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
- throws IOException {
- return storageContainerLocationClient.queryNode(nodeStatuses, queryScope,
- poolName);
- }
-
- /**
- * Creates a specified replication pipeline.
- */
- @Override
- public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
- HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
- throws IOException {
- return storageContainerLocationClient.createReplicationPipeline(type,
- factor, nodePool);
- }
-
- @Override
- public List listPipelines() throws IOException {
- return storageContainerLocationClient.listPipelines();
- }
-
- @Override
- public void activatePipeline(HddsProtos.PipelineID pipelineID)
- throws IOException {
- storageContainerLocationClient.activatePipeline(pipelineID);
- }
-
- @Override
- public void deactivatePipeline(HddsProtos.PipelineID pipelineID)
- throws IOException {
- storageContainerLocationClient.deactivatePipeline(pipelineID);
- }
-
- @Override
- public void closePipeline(HddsProtos.PipelineID pipelineID)
- throws IOException {
- storageContainerLocationClient.closePipeline(pipelineID);
- }
-
- @Override
- public void close() {
- try {
- xceiverClientManager.close();
- } catch (Exception ex) {
- LOG.error("Can't close " + this.getClass().getSimpleName(), ex);
- }
- }
-
- /**
- * Deletes an existing container.
- *
- * @param containerId - ID of the container.
- * @param pipeline - Pipeline that represents the container.
- * @param force - true to forcibly delete the container.
- * @throws IOException
- */
- @Override
- public void deleteContainer(long containerId, Pipeline pipeline,
- boolean force) throws IOException {
- XceiverClientSpi client = null;
- try {
- client = xceiverClientManager.acquireClient(pipeline);
- ContainerProtocolCalls
- .deleteContainer(client, containerId, force, null);
- storageContainerLocationClient
- .deleteContainer(containerId);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Deleted container {}, machines: {} ", containerId,
- pipeline.getNodes());
- }
- } finally {
- if (client != null) {
- xceiverClientManager.releaseClient(client, false);
- }
- }
- }
-
- /**
- * Delete the container, this will release any resource it uses.
- * @param containerID - containerID.
- * @param force - True to forcibly delete the container.
- * @throws IOException
- */
- @Override
- public void deleteContainer(long containerID, boolean force)
- throws IOException {
- ContainerWithPipeline info = getContainerWithPipeline(containerID);
- deleteContainer(containerID, info.getPipeline(), force);
- }
-
- @Override
- public List listContainer(long startContainerID,
- int count) throws IOException {
- return storageContainerLocationClient.listContainer(
- startContainerID, count);
- }
-
- /**
- * Get meta data from an existing container.
- *
- * @param containerID - ID of the container.
- * @param pipeline - Pipeline where the container is located.
- * @return ContainerInfo
- * @throws IOException
- */
- @Override
- public ContainerDataProto readContainer(long containerID,
- Pipeline pipeline) throws IOException {
- XceiverClientSpi client = null;
- try {
- client = xceiverClientManager.acquireClient(pipeline);
- ReadContainerResponseProto response =
- ContainerProtocolCalls.readContainer(client, containerID, null);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Read container {}, machines: {} ", containerID,
- pipeline.getNodes());
- }
- return response.getContainerData();
- } finally {
- if (client != null) {
- xceiverClientManager.releaseClient(client, false);
- }
- }
- }
-
- /**
- * Get meta data from an existing container.
- * @param containerID - ID of the container.
- * @return ContainerInfo - a message of protobuf which has basic info
- * of a container.
- * @throws IOException
- */
- @Override
- public ContainerDataProto readContainer(long containerID) throws IOException {
- ContainerWithPipeline info = getContainerWithPipeline(containerID);
- return readContainer(containerID, info.getPipeline());
- }
-
- /**
- * Given an id, return the pipeline associated with the container.
- * @param containerId - String Container ID
- * @return Pipeline of the existing container, corresponding to the given id.
- * @throws IOException
- */
- @Override
- public ContainerInfo getContainer(long containerId) throws
- IOException {
- return storageContainerLocationClient.getContainer(containerId);
- }
-
- /**
- * Gets a container by Name -- Throws if the container does not exist.
- *
- * @param containerId - Container ID
- * @return ContainerWithPipeline
- * @throws IOException
- */
- @Override
- public ContainerWithPipeline getContainerWithPipeline(long containerId)
- throws IOException {
- return storageContainerLocationClient.getContainerWithPipeline(containerId);
- }
-
- /**
- * Close a container.
- *
- * @param pipeline the container to be closed.
- * @throws IOException
- */
- @Override
- public void closeContainer(long containerId, Pipeline pipeline)
- throws IOException {
- XceiverClientSpi client = null;
- try {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Close container {}", pipeline);
- }
- /*
- TODO: two orders here, revisit this later:
- 1. close on SCM first, then on data node
- 2. close on data node first, then on SCM
-
- with 1: if client failed after closing on SCM, then there is a
- container SCM thinks as closed, but is actually open. Then SCM will no
- longer allocate block to it, which is fine. But SCM may later try to
- replicate this "closed" container, which I'm not sure is safe.
-
- with 2: if client failed after close on datanode, then there is a
- container SCM thinks as open, but is actually closed. Then SCM will still
- try to allocate block to it. Which will fail when actually doing the
- write. No more data can be written, but at least the correctness and
- consistency of existing data will maintain.
-
- For now, take the #2 way.
- */
- // Actually close the container on Datanode
- client = xceiverClientManager.acquireClient(pipeline);
-
- storageContainerLocationClient.notifyObjectStageChange(
- ObjectStageChangeRequestProto.Type.container,
- containerId,
- ObjectStageChangeRequestProto.Op.close,
- ObjectStageChangeRequestProto.Stage.begin);
-
- ContainerProtocolCalls.closeContainer(client, containerId,
- null);
- // Notify SCM to close the container
- storageContainerLocationClient.notifyObjectStageChange(
- ObjectStageChangeRequestProto.Type.container,
- containerId,
- ObjectStageChangeRequestProto.Op.close,
- ObjectStageChangeRequestProto.Stage.complete);
- } finally {
- if (client != null) {
- xceiverClientManager.releaseClient(client, false);
- }
- }
- }
-
- /**
- * Close a container.
- *
- * @throws IOException
- */
- @Override
- public void closeContainer(long containerId)
- throws IOException {
- ContainerWithPipeline info = getContainerWithPipeline(containerId);
- Pipeline pipeline = info.getPipeline();
- closeContainer(containerId, pipeline);
- }
-
- /**
- * Get the the current usage information.
- * @param containerID - ID of the container.
- * @return the size of the given container.
- * @throws IOException
- */
- @Override
- public long getContainerSize(long containerID) throws IOException {
- // TODO : Fix this, it currently returns the capacity
- // but not the current usage.
- long size = getContainerSizeB();
- if (size == -1) {
- throw new IOException("Container size unknown!");
- }
- return size;
- }
-
- /**
- * Check if SCM is in safe mode.
- *
- * @return Returns true if SCM is in safe mode else returns false.
- * @throws IOException
- */
- public boolean inSafeMode() throws IOException {
- return storageContainerLocationClient.inSafeMode();
- }
-
- /**
- * Force SCM out of safe mode.
- *
- * @return returns true if operation is successful.
- * @throws IOException
- */
- public boolean forceExitSafeMode() throws IOException {
- return storageContainerLocationClient.forceExitSafeMode();
- }
-
- @Override
- public void startReplicationManager() throws IOException {
- storageContainerLocationClient.startReplicationManager();
- }
-
- @Override
- public void stopReplicationManager() throws IOException {
- storageContainerLocationClient.stopReplicationManager();
- }
-
- @Override
- public boolean getReplicationManagerStatus() throws IOException {
- return storageContainerLocationClient.getReplicationManagerStatus();
- }
-
-
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
deleted file mode 100644
index d3bb31aa69878..0000000000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
+++ /dev/null
@@ -1,350 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.client;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB;
-import org.apache.hadoop.hdds.scm.XceiverClientManager.ScmClientConfig;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.http.client.config.RequestConfig;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClients;
-import org.apache.ratis.protocol.AlreadyClosedException;
-import org.apache.ratis.protocol.GroupMismatchException;
-import org.apache.ratis.protocol.NotReplicatedException;
-import org.apache.ratis.protocol.RaftRetryFailureException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.text.ParseException;
-import java.time.Instant;
-import java.time.ZoneId;
-import java.time.ZonedDateTime;
-import java.time.format.DateTimeFormatter;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-/**
- * Utility methods for Ozone and Container Clients.
- *
- * The methods to retrieve SCM service endpoints assume there is a single
- * SCM service instance. This will change when we switch to replicated service
- * instances for redundancy.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public final class HddsClientUtils {
-
- private static final Logger LOG = LoggerFactory.getLogger(
- HddsClientUtils.class);
-
- private static final int NO_PORT = -1;
-
- private HddsClientUtils() {
- }
-
- private static final List> EXCEPTION_LIST =
- new ArrayList>() {{
- add(TimeoutException.class);
- add(StorageContainerException.class);
- add(RaftRetryFailureException.class);
- add(AlreadyClosedException.class);
- add(GroupMismatchException.class);
- // Not Replicated Exception will be thrown if watch For commit
- // does not succeed
- add(NotReplicatedException.class);
- }};
-
- /**
- * Date format that used in ozone. Here the format is thread safe to use.
- */
- private static final ThreadLocal DATE_FORMAT =
- ThreadLocal.withInitial(() -> {
- DateTimeFormatter format =
- DateTimeFormatter.ofPattern(OzoneConsts.OZONE_DATE_FORMAT);
- return format.withZone(ZoneId.of(OzoneConsts.OZONE_TIME_ZONE));
- });
-
-
- /**
- * Convert time in millisecond to a human readable format required in ozone.
- * @return a human readable string for the input time
- */
- public static String formatDateTime(long millis) {
- ZonedDateTime dateTime = ZonedDateTime.ofInstant(
- Instant.ofEpochMilli(millis), DATE_FORMAT.get().getZone());
- return DATE_FORMAT.get().format(dateTime);
- }
-
- /**
- * Convert time in ozone date format to millisecond.
- * @return time in milliseconds
- */
- public static long formatDateTime(String date) throws ParseException {
- Preconditions.checkNotNull(date, "Date string should not be null.");
- return ZonedDateTime.parse(date, DATE_FORMAT.get())
- .toInstant().toEpochMilli();
- }
-
- /**
- * verifies that bucket name / volume name is a valid DNS name.
- *
- * @param resName Bucket or volume Name to be validated
- *
- * @throws IllegalArgumentException
- */
- public static void verifyResourceName(String resName)
- throws IllegalArgumentException {
- if (resName == null) {
- throw new IllegalArgumentException("Bucket or Volume name is null");
- }
-
- if (resName.length() < OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH ||
- resName.length() > OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH) {
- throw new IllegalArgumentException(
- "Bucket or Volume length is illegal, "
- + "valid length is 3-63 characters");
- }
-
- if (resName.charAt(0) == '.' || resName.charAt(0) == '-') {
- throw new IllegalArgumentException(
- "Bucket or Volume name cannot start with a period or dash");
- }
-
- if (resName.charAt(resName.length() - 1) == '.' ||
- resName.charAt(resName.length() - 1) == '-') {
- throw new IllegalArgumentException("Bucket or Volume name "
- + "cannot end with a period or dash");
- }
-
- boolean isIPv4 = true;
- char prev = (char) 0;
-
- for (int index = 0; index < resName.length(); index++) {
- char currChar = resName.charAt(index);
- if (currChar != '.') {
- isIPv4 = ((currChar >= '0') && (currChar <= '9')) && isIPv4;
- }
- if (currChar > 'A' && currChar < 'Z') {
- throw new IllegalArgumentException(
- "Bucket or Volume name does not support uppercase characters");
- }
- if (currChar != '.' && currChar != '-') {
- if (currChar < '0' || (currChar > '9' && currChar < 'a') ||
- currChar > 'z') {
- throw new IllegalArgumentException("Bucket or Volume name has an " +
- "unsupported character : " +
- currChar);
- }
- }
- if (prev == '.' && currChar == '.') {
- throw new IllegalArgumentException("Bucket or Volume name should not " +
- "have two contiguous periods");
- }
- if (prev == '-' && currChar == '.') {
- throw new IllegalArgumentException(
- "Bucket or Volume name should not have period after dash");
- }
- if (prev == '.' && currChar == '-') {
- throw new IllegalArgumentException(
- "Bucket or Volume name should not have dash after period");
- }
- prev = currChar;
- }
-
- if (isIPv4) {
- throw new IllegalArgumentException(
- "Bucket or Volume name cannot be an IPv4 address or all numeric");
- }
- }
-
- /**
- * verifies that bucket / volume name is a valid DNS name.
- *
- * @param resourceNames Array of bucket / volume names to be verified.
- */
- public static void verifyResourceName(String... resourceNames) {
- for (String resourceName : resourceNames) {
- HddsClientUtils.verifyResourceName(resourceName);
- }
- }
-
- /**
- * Checks that object parameters passed as reference is not null.
- *
- * @param references Array of object references to be checked.
- * @param
- */
- public static void checkNotNull(T... references) {
- for (T ref: references) {
- Preconditions.checkNotNull(ref);
- }
- }
-
- /**
- * Returns the cache value to be used for list calls.
- * @param conf Configuration object
- * @return list cache size
- */
- public static int getListCacheSize(Configuration conf) {
- return conf.getInt(OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE,
- OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT);
- }
-
- /**
- * @return a default instance of {@link CloseableHttpClient}.
- */
- public static CloseableHttpClient newHttpClient() {
- return HddsClientUtils.newHttpClient(new Configuration());
- }
-
- /**
- * Returns a {@link CloseableHttpClient} configured by given configuration.
- * If conf is null, returns a default instance.
- *
- * @param conf configuration
- * @return a {@link CloseableHttpClient} instance.
- */
- public static CloseableHttpClient newHttpClient(Configuration conf) {
- long socketTimeout = OzoneConfigKeys
- .OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT;
- long connectionTimeout = OzoneConfigKeys
- .OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT;
- if (conf != null) {
- socketTimeout = conf.getTimeDuration(
- OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
- OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
- TimeUnit.MILLISECONDS);
- connectionTimeout = conf.getTimeDuration(
- OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT,
- OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT,
- TimeUnit.MILLISECONDS);
- }
-
- CloseableHttpClient client = HttpClients.custom()
- .setDefaultRequestConfig(
- RequestConfig.custom()
- .setSocketTimeout(Math.toIntExact(socketTimeout))
- .setConnectTimeout(Math.toIntExact(connectionTimeout))
- .build())
- .build();
- return client;
- }
-
- /**
- * Returns the maximum no of outstanding async requests to be handled by
- * Standalone and Ratis client.
- */
- public static int getMaxOutstandingRequests(Configuration config) {
- return OzoneConfiguration.of(config)
- .getObject(ScmClientConfig.class)
- .getMaxOutstandingRequests();
- }
-
- /**
- * Create a scm block client, used by putKey() and getKey().
- *
- * @return {@link ScmBlockLocationProtocol}
- * @throws IOException
- */
- public static SCMSecurityProtocol getScmSecurityClient(
- OzoneConfiguration conf, UserGroupInformation ugi) throws IOException {
- RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class,
- ProtobufRpcEngine.class);
- long scmVersion =
- RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class);
- InetSocketAddress scmSecurityProtoAdd =
- HddsUtils.getScmAddressForSecurityProtocol(conf);
- SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient =
- new SCMSecurityProtocolClientSideTranslatorPB(
- RPC.getProxy(SCMSecurityProtocolPB.class, scmVersion,
- scmSecurityProtoAdd, ugi, conf,
- NetUtils.getDefaultSocketFactory(conf),
- Client.getRpcTimeout(conf)));
- return scmSecurityClient;
- }
-
- public static Throwable checkForException(Exception e) {
- Throwable t = e;
- while (t != null) {
- for (Class extends Exception> cls : getExceptionList()) {
- if (cls.isInstance(t)) {
- return t;
- }
- }
- t = t.getCause();
- }
- return t;
- }
-
- public static RetryPolicy createRetryPolicy(int maxRetryCount,
- long retryInterval) {
- // retry with fixed sleep between retries
- return RetryPolicies.retryUpToMaximumCountWithFixedSleep(
- maxRetryCount, retryInterval, TimeUnit.MILLISECONDS);
- }
-
- public static Map,
- RetryPolicy> getRetryPolicyByException(int maxRetryCount,
- long retryInterval) {
- Map, RetryPolicy> policyMap = new HashMap<>();
- for (Class extends Exception> ex : EXCEPTION_LIST) {
- if (ex == TimeoutException.class
- || ex == RaftRetryFailureException.class) {
- // retry without sleep
- policyMap.put(ex, createRetryPolicy(maxRetryCount, 0));
- } else {
- // retry with fixed sleep between retries
- policyMap.put(ex, createRetryPolicy(maxRetryCount, retryInterval));
- }
- }
- // Default retry policy
- policyMap
- .put(Exception.class, createRetryPolicy(maxRetryCount, retryInterval));
- return policyMap;
- }
-
- public static List> getExceptionList() {
- return EXCEPTION_LIST;
- }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java
deleted file mode 100644
index 73ad78cd7872c..0000000000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.client;
-
-/**
- * Client facing classes for the container operations.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
deleted file mode 100644
index 9390bc102034c..0000000000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-/**
- * Classes for different type of container service client.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java
deleted file mode 100644
index 40bbd93b16f1e..0000000000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-import com.google.common.annotations.VisibleForTesting;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.fs.Seekable;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.GetBlockResponseProto;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-/**
- * An {@link InputStream} called from KeyInputStream to read a block from the
- * container.
- * This class encapsulates all state management for iterating
- * through the sequence of chunks through {@link ChunkInputStream}.
- */
-public class BlockInputStream extends InputStream implements Seekable {
-
- private static final Logger LOG =
- LoggerFactory.getLogger(BlockInputStream.class);
-
- private static final int EOF = -1;
-
- private final BlockID blockID;
- private final long length;
- private Pipeline pipeline;
- private final Token token;
- private final boolean verifyChecksum;
- private XceiverClientManager xceiverClientManager;
- private XceiverClientSpi xceiverClient;
- private boolean initialized = false;
-
- // List of ChunkInputStreams, one for each chunk in the block
- private List chunkStreams;
-
- // chunkOffsets[i] stores the index of the first data byte in
- // chunkStream i w.r.t the block data.
- // Let’s say we have chunk size as 40 bytes. And let's say the parent
- // block stores data from index 200 and has length 400.
- // The first 40 bytes of this block will be stored in chunk[0], next 40 in
- // chunk[1] and so on. But since the chunkOffsets are w.r.t the block only
- // and not the key, the values in chunkOffsets will be [0, 40, 80,....].
- private long[] chunkOffsets = null;
-
- // Index of the chunkStream corresponding to the current position of the
- // BlockInputStream i.e offset of the data to be read next from this block
- private int chunkIndex;
-
- // Position of the BlockInputStream is maintainted by this variable till
- // the stream is initialized. This position is w.r.t to the block only and
- // not the key.
- // For the above example, if we seek to position 240 before the stream is
- // initialized, then value of blockPosition will be set to 40.
- // Once, the stream is initialized, the position of the stream
- // will be determined by the current chunkStream and its position.
- private long blockPosition = 0;
-
- // Tracks the chunkIndex corresponding to the last blockPosition so that it
- // can be reset if a new position is seeked.
- private int chunkIndexOfPrevPosition;
-
- public BlockInputStream(BlockID blockId, long blockLen, Pipeline pipeline,
- Token token, boolean verifyChecksum,
- XceiverClientManager xceiverClientManager) {
- this.blockID = blockId;
- this.length = blockLen;
- this.pipeline = pipeline;
- this.token = token;
- this.verifyChecksum = verifyChecksum;
- this.xceiverClientManager = xceiverClientManager;
- }
-
- /**
- * Initialize the BlockInputStream. Get the BlockData (list of chunks) from
- * the Container and create the ChunkInputStreams for each Chunk in the Block.
- */
- public synchronized void initialize() throws IOException {
-
- // Pre-check that the stream has not been intialized already
- if (initialized) {
- return;
- }
-
- List chunks = getChunkInfos();
- if (chunks != null && !chunks.isEmpty()) {
- // For each chunk in the block, create a ChunkInputStream and compute
- // its chunkOffset
- this.chunkOffsets = new long[chunks.size()];
- long tempOffset = 0;
-
- this.chunkStreams = new ArrayList<>(chunks.size());
- for (int i = 0; i < chunks.size(); i++) {
- addStream(chunks.get(i));
- chunkOffsets[i] = tempOffset;
- tempOffset += chunks.get(i).getLen();
- }
-
- initialized = true;
- this.chunkIndex = 0;
-
- if (blockPosition > 0) {
- // Stream was seeked to blockPosition before initialization. Seek to the
- // blockPosition now.
- seek(blockPosition);
- }
- }
- }
-
- /**
- * Send RPC call to get the block info from the container.
- * @return List of chunks in this block.
- */
- protected List getChunkInfos() throws IOException {
- // irrespective of the container state, we will always read via Standalone
- // protocol.
- if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
- pipeline = Pipeline.newBuilder(pipeline)
- .setType(HddsProtos.ReplicationType.STAND_ALONE).build();
- }
- xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline);
- boolean success = false;
- List chunks;
- try {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Initializing BlockInputStream for get key to access {}",
- blockID.getContainerID());
- }
-
- if (token != null) {
- UserGroupInformation.getCurrentUser().addToken(token);
- }
- DatanodeBlockID datanodeBlockID = blockID
- .getDatanodeBlockIDProtobuf();
- GetBlockResponseProto response = ContainerProtocolCalls
- .getBlock(xceiverClient, datanodeBlockID);
-
- chunks = response.getBlockData().getChunksList();
- success = true;
- } finally {
- if (!success) {
- xceiverClientManager.releaseClientForReadData(xceiverClient, false);
- }
- }
-
- return chunks;
- }
-
- /**
- * Append another ChunkInputStream to the end of the list. Note that the
- * ChunkInputStream is only created here. The chunk will be read from the
- * Datanode only when a read operation is performed on for that chunk.
- */
- protected synchronized void addStream(ChunkInfo chunkInfo) {
- chunkStreams.add(new ChunkInputStream(chunkInfo, blockID,
- xceiverClient, verifyChecksum));
- }
-
- public synchronized long getRemaining() throws IOException {
- return length - getPos();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public synchronized int read() throws IOException {
- byte[] buf = new byte[1];
- if (read(buf, 0, 1) == EOF) {
- return EOF;
- }
- return Byte.toUnsignedInt(buf[0]);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public synchronized int read(byte[] b, int off, int len) throws IOException {
- if (b == null) {
- throw new NullPointerException();
- }
- if (off < 0 || len < 0 || len > b.length - off) {
- throw new IndexOutOfBoundsException();
- }
- if (len == 0) {
- return 0;
- }
-
- if (!initialized) {
- initialize();
- }
-
- checkOpen();
- int totalReadLen = 0;
- while (len > 0) {
- // if we are at the last chunk and have read the entire chunk, return
- if (chunkStreams.size() == 0 ||
- (chunkStreams.size() - 1 <= chunkIndex &&
- chunkStreams.get(chunkIndex)
- .getRemaining() == 0)) {
- return totalReadLen == 0 ? EOF : totalReadLen;
- }
-
- // Get the current chunkStream and read data from it
- ChunkInputStream current = chunkStreams.get(chunkIndex);
- int numBytesToRead = Math.min(len, (int)current.getRemaining());
- int numBytesRead = current.read(b, off, numBytesToRead);
- if (numBytesRead != numBytesToRead) {
- // This implies that there is either data loss or corruption in the
- // chunk entries. Even EOF in the current stream would be covered in
- // this case.
- throw new IOException(String.format(
- "Inconsistent read for chunkName=%s length=%d numBytesRead=%d",
- current.getChunkName(), current.getLength(), numBytesRead));
- }
- totalReadLen += numBytesRead;
- off += numBytesRead;
- len -= numBytesRead;
- if (current.getRemaining() <= 0 &&
- ((chunkIndex + 1) < chunkStreams.size())) {
- chunkIndex += 1;
- }
- }
- return totalReadLen;
- }
-
- /**
- * Seeks the BlockInputStream to the specified position. If the stream is
- * not initialized, save the seeked position via blockPosition. Otherwise,
- * update the position in 2 steps:
- * 1. Updating the chunkIndex to the chunkStream corresponding to the
- * seeked position.
- * 2. Seek the corresponding chunkStream to the adjusted position.
- *
- * Let’s say we have chunk size as 40 bytes. And let's say the parent block
- * stores data from index 200 and has length 400. If the key was seeked to
- * position 90, then this block will be seeked to position 90.
- * When seek(90) is called on this blockStream, then
- * 1. chunkIndex will be set to 2 (as indices 80 - 120 reside in chunk[2]).
- * 2. chunkStream[2] will be seeked to position 10
- * (= 90 - chunkOffset[2] (= 80)).
- */
- @Override
- public synchronized void seek(long pos) throws IOException {
- if (!initialized) {
- // Stream has not been initialized yet. Save the position so that it
- // can be seeked when the stream is initialized.
- blockPosition = pos;
- return;
- }
-
- checkOpen();
- if (pos < 0 || pos >= length) {
- if (pos == 0) {
- // It is possible for length and pos to be zero in which case
- // seek should return instead of throwing exception
- return;
- }
- throw new EOFException(
- "EOF encountered at pos: " + pos + " for block: " + blockID);
- }
-
- if (chunkIndex >= chunkStreams.size()) {
- chunkIndex = Arrays.binarySearch(chunkOffsets, pos);
- } else if (pos < chunkOffsets[chunkIndex]) {
- chunkIndex =
- Arrays.binarySearch(chunkOffsets, 0, chunkIndex, pos);
- } else if (pos >= chunkOffsets[chunkIndex] + chunkStreams
- .get(chunkIndex).getLength()) {
- chunkIndex = Arrays.binarySearch(chunkOffsets,
- chunkIndex + 1, chunkStreams.size(), pos);
- }
- if (chunkIndex < 0) {
- // Binary search returns -insertionPoint - 1 if element is not present
- // in the array. insertionPoint is the point at which element would be
- // inserted in the sorted array. We need to adjust the chunkIndex
- // accordingly so that chunkIndex = insertionPoint - 1
- chunkIndex = -chunkIndex - 2;
- }
-
- // Reset the previous chunkStream's position
- chunkStreams.get(chunkIndexOfPrevPosition).resetPosition();
-
- // seek to the proper offset in the ChunkInputStream
- chunkStreams.get(chunkIndex).seek(pos - chunkOffsets[chunkIndex]);
- chunkIndexOfPrevPosition = chunkIndex;
- }
-
- @Override
- public synchronized long getPos() throws IOException {
- if (length == 0) {
- return 0;
- }
-
- if (!initialized) {
- // The stream is not initialized yet. Return the blockPosition
- return blockPosition;
- } else {
- return chunkOffsets[chunkIndex] + chunkStreams.get(chunkIndex).getPos();
- }
- }
-
- @Override
- public boolean seekToNewSource(long targetPos) throws IOException {
- return false;
- }
-
- @Override
- public synchronized void close() {
- if (xceiverClientManager != null && xceiverClient != null) {
- xceiverClientManager.releaseClient(xceiverClient, false);
- xceiverClientManager = null;
- xceiverClient = null;
- }
- }
-
- public synchronized void resetPosition() {
- this.blockPosition = 0;
- }
-
- /**
- * Checks if the stream is open. If not, throw an exception.
- *
- * @throws IOException if stream is closed
- */
- protected synchronized void checkOpen() throws IOException {
- if (xceiverClient == null) {
- throw new IOException("BlockInputStream has been closed.");
- }
- }
-
- public BlockID getBlockID() {
- return blockID;
- }
-
- public long getLength() {
- return length;
- }
-
- @VisibleForTesting
- synchronized int getChunkIndex() {
- return chunkIndex;
- }
-
- @VisibleForTesting
- synchronized long getBlockPosition() {
- return blockPosition;
- }
-
- @VisibleForTesting
- synchronized List getChunkStreams() {
- return chunkStreams;
- }
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
deleted file mode 100644
index b15ca3f6c85fc..0000000000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
+++ /dev/null
@@ -1,640 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.XceiverClientReply;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.ozone.common.ChecksumData;
-import org.apache.hadoop.ozone.common.OzoneChecksumException;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.CompletionException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executors;
-import java.util.concurrent.atomic.AtomicReference;
-
-import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
- .putBlockAsync;
-import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
- .writeChunkAsync;
-
-/**
- * An {@link OutputStream} used by the REST service in combination with the
- * SCMClient to write the value of a key to a sequence
- * of container chunks. Writes are buffered locally and periodically written to
- * the container as a new chunk. In order to preserve the semantics that
- * replacement of a pre-existing key is atomic, each instance of the stream has
- * an internal unique identifier. This unique identifier and a monotonically
- * increasing chunk index form a composite key that is used as the chunk name.
- * After all data is written, a putKey call creates or updates the corresponding
- * container key, and this call includes the full list of chunks that make up
- * the key data. The list of chunks is updated all at once. Therefore, a
- * concurrent reader never can see an intermediate state in which different
- * chunks of data from different versions of the key data are interleaved.
- * This class encapsulates all state management for buffering and writing
- * through to the container.
- */
-public class BlockOutputStream extends OutputStream {
- public static final Logger LOG =
- LoggerFactory.getLogger(BlockOutputStream.class);
-
- private volatile BlockID blockID;
-
- private final BlockData.Builder containerBlockData;
- private XceiverClientManager xceiverClientManager;
- private XceiverClientSpi xceiverClient;
- private final ContainerProtos.ChecksumType checksumType;
- private final int bytesPerChecksum;
- private int chunkIndex;
- private int chunkSize;
- private final long streamBufferFlushSize;
- private final long streamBufferMaxSize;
- private BufferPool bufferPool;
- // The IOException will be set by response handling thread in case there is an
- // exception received in the response. If the exception is set, the next
- // request will fail upfront.
- private AtomicReference ioException;
- private ExecutorService responseExecutor;
-
- // the effective length of data flushed so far
- private long totalDataFlushedLength;
-
- // effective data write attempted so far for the block
- private long writtenDataLength;
-
- // List containing buffers for which the putBlock call will
- // update the length in the datanodes. This list will just maintain
- // references to the buffers in the BufferPool which will be cleared
- // when the watchForCommit acknowledges a putBlock logIndex has been
- // committed on all datanodes. This list will be a place holder for buffers
- // which got written between successive putBlock calls.
- private List bufferList;
-
- // This object will maintain the commitIndexes and byteBufferList in order
- // Also, corresponding to the logIndex, the corresponding list of buffers will
- // be released from the buffer pool.
- private final CommitWatcher commitWatcher;
-
- private List failedServers;
-
- /**
- * Creates a new BlockOutputStream.
- *
- * @param blockID block ID
- * @param xceiverClientManager client manager that controls client
- * @param pipeline pipeline where block will be written
- * @param chunkSize chunk size
- * @param bufferPool pool of buffers
- * @param streamBufferFlushSize flush size
- * @param streamBufferMaxSize max size of the currentBuffer
- * @param watchTimeout watch timeout
- * @param checksumType checksum type
- * @param bytesPerChecksum Bytes per checksum
- */
- @SuppressWarnings("parameternumber")
- public BlockOutputStream(BlockID blockID,
- XceiverClientManager xceiverClientManager, Pipeline pipeline,
- int chunkSize, long streamBufferFlushSize, long streamBufferMaxSize,
- long watchTimeout, BufferPool bufferPool, ChecksumType checksumType,
- int bytesPerChecksum)
- throws IOException {
- this.blockID = blockID;
- this.chunkSize = chunkSize;
- KeyValue keyValue =
- KeyValue.newBuilder().setKey("TYPE").setValue("KEY").build();
- this.containerBlockData =
- BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf())
- .addMetadata(keyValue);
- this.xceiverClientManager = xceiverClientManager;
- this.xceiverClient = xceiverClientManager.acquireClient(pipeline);
- this.chunkIndex = 0;
- this.streamBufferFlushSize = streamBufferFlushSize;
- this.streamBufferMaxSize = streamBufferMaxSize;
- this.bufferPool = bufferPool;
- this.checksumType = checksumType;
- this.bytesPerChecksum = bytesPerChecksum;
-
- // A single thread executor handle the responses of async requests
- responseExecutor = Executors.newSingleThreadExecutor();
- commitWatcher = new CommitWatcher(bufferPool, xceiverClient, watchTimeout);
- bufferList = null;
- totalDataFlushedLength = 0;
- writtenDataLength = 0;
- failedServers = new ArrayList<>(0);
- ioException = new AtomicReference<>(null);
- }
-
-
- public BlockID getBlockID() {
- return blockID;
- }
-
- public long getTotalAckDataLength() {
- return commitWatcher.getTotalAckDataLength();
- }
-
- public long getWrittenDataLength() {
- return writtenDataLength;
- }
-
- public List getFailedServers() {
- return failedServers;
- }
-
- @VisibleForTesting
- public XceiverClientSpi getXceiverClient() {
- return xceiverClient;
- }
-
- @VisibleForTesting
- public long getTotalDataFlushedLength() {
- return totalDataFlushedLength;
- }
-
- @VisibleForTesting
- public BufferPool getBufferPool() {
- return bufferPool;
- }
-
- public IOException getIoException() {
- return ioException.get();
- }
-
- @VisibleForTesting
- public Map> getCommitIndex2flushedDataMap() {
- return commitWatcher.getCommitIndex2flushedDataMap();
- }
-
- @Override
- public void write(int b) throws IOException {
- checkOpen();
- byte[] buf = new byte[1];
- buf[0] = (byte) b;
- write(buf, 0, 1);
- }
-
- @Override
- public void write(byte[] b, int off, int len) throws IOException {
- checkOpen();
- if (b == null) {
- throw new NullPointerException();
- }
- if ((off < 0) || (off > b.length) || (len < 0) || ((off + len) > b.length)
- || ((off + len) < 0)) {
- throw new IndexOutOfBoundsException();
- }
- if (len == 0) {
- return;
- }
-
- while (len > 0) {
- int writeLen;
- // Allocate a buffer if needed. The buffer will be allocated only
- // once as needed and will be reused again for multiple blockOutputStream
- // entries.
- ByteBuffer currentBuffer = bufferPool.allocateBufferIfNeeded();
- int pos = currentBuffer.position();
- writeLen =
- Math.min(chunkSize - pos % chunkSize, len);
- currentBuffer.put(b, off, writeLen);
- if (!currentBuffer.hasRemaining()) {
- writeChunk(currentBuffer);
- }
- off += writeLen;
- len -= writeLen;
- writtenDataLength += writeLen;
- if (shouldFlush()) {
- updateFlushLength();
- executePutBlock();
- }
- // Data in the bufferPool can not exceed streamBufferMaxSize
- if (isBufferPoolFull()) {
- handleFullBuffer();
- }
- }
- }
-
- private boolean shouldFlush() {
- return bufferPool.computeBufferData() % streamBufferFlushSize == 0;
- }
-
- private void updateFlushLength() {
- totalDataFlushedLength += writtenDataLength - totalDataFlushedLength;
- }
-
- private boolean isBufferPoolFull() {
- return bufferPool.computeBufferData() == streamBufferMaxSize;
- }
- /**
- * Will be called on the retryPath in case closedContainerException/
- * TimeoutException.
- * @param len length of data to write
- * @throws IOException if error occurred
- */
-
- // In this case, the data is already cached in the currentBuffer.
- public void writeOnRetry(long len) throws IOException {
- if (len == 0) {
- return;
- }
- int count = 0;
- Preconditions.checkArgument(len <= streamBufferMaxSize);
- while (len > 0) {
- long writeLen;
- writeLen = Math.min(chunkSize, len);
- if (writeLen == chunkSize) {
- writeChunk(bufferPool.getBuffer(count));
- }
- len -= writeLen;
- count++;
- writtenDataLength += writeLen;
- // we should not call isBufferFull/shouldFlush here.
- // The buffer might already be full as whole data is already cached in
- // the buffer. We should just validate
- // if we wrote data of size streamBufferMaxSize/streamBufferFlushSize to
- // call for handling full buffer/flush buffer condition.
- if (writtenDataLength % streamBufferFlushSize == 0) {
- // reset the position to zero as now we will be reading the
- // next buffer in the list
- updateFlushLength();
- executePutBlock();
- }
- if (writtenDataLength == streamBufferMaxSize) {
- handleFullBuffer();
- }
- }
- }
-
- /**
- * This is a blocking call. It will wait for the flush till the commit index
- * at the head of the commitIndex2flushedDataMap gets replicated to all or
- * majority.
- * @throws IOException
- */
- private void handleFullBuffer() throws IOException {
- try {
- checkOpen();
- if (!commitWatcher.getFutureMap().isEmpty()) {
- waitOnFlushFutures();
- }
- } catch (InterruptedException | ExecutionException e) {
- setIoException(e);
- adjustBuffersOnException();
- throw getIoException();
- }
- watchForCommit(true);
- }
-
-
- // It may happen that once the exception is encountered , we still might
- // have successfully flushed up to a certain index. Make sure the buffers
- // only contain data which have not been sufficiently replicated
- private void adjustBuffersOnException() {
- commitWatcher.releaseBuffersOnException();
- }
-
- /**
- * calls watchForCommit API of the Ratis Client. For Standalone client,
- * it is a no op.
- * @param bufferFull flag indicating whether bufferFull condition is hit or
- * its called as part flush/close
- * @return minimum commit index replicated to all nodes
- * @throws IOException IOException in case watch gets timed out
- */
- private void watchForCommit(boolean bufferFull) throws IOException {
- checkOpen();
- try {
- XceiverClientReply reply = bufferFull ?
- commitWatcher.watchOnFirstIndex() : commitWatcher.watchOnLastIndex();
- if (reply != null) {
- List dnList = reply.getDatanodes();
- if (!dnList.isEmpty()) {
- Pipeline pipe = xceiverClient.getPipeline();
-
- LOG.warn("Failed to commit BlockId {} on {}. Failed nodes: {}",
- blockID, pipe, dnList);
- failedServers.addAll(dnList);
- }
- }
- } catch (IOException ioe) {
- setIoException(ioe);
- throw getIoException();
- }
- }
-
- private CompletableFuture executePutBlock()
- throws IOException {
- checkOpen();
- long flushPos = totalDataFlushedLength;
- Preconditions.checkNotNull(bufferList);
- List byteBufferList = bufferList;
- bufferList = null;
- Preconditions.checkNotNull(byteBufferList);
-
- CompletableFuture flushFuture;
- try {
- XceiverClientReply asyncReply =
- putBlockAsync(xceiverClient, containerBlockData.build());
- CompletableFuture future =
- asyncReply.getResponse();
- flushFuture = future.thenApplyAsync(e -> {
- try {
- validateResponse(e);
- } catch (IOException sce) {
- throw new CompletionException(sce);
- }
- // if the ioException is not set, putBlock is successful
- if (getIoException() == null) {
- BlockID responseBlockID = BlockID.getFromProtobuf(
- e.getPutBlock().getCommittedBlockLength().getBlockID());
- Preconditions.checkState(blockID.getContainerBlockID()
- .equals(responseBlockID.getContainerBlockID()));
- // updates the bcsId of the block
- blockID = responseBlockID;
- if (LOG.isDebugEnabled()) {
- LOG.debug(
- "Adding index " + asyncReply.getLogIndex() + " commitMap size "
- + commitWatcher.getCommitInfoMapSize() + " flushLength "
- + flushPos + " numBuffers " + byteBufferList.size()
- + " blockID " + blockID + " bufferPool size" + bufferPool
- .getSize() + " currentBufferIndex " + bufferPool
- .getCurrentBufferIndex());
- }
- // for standalone protocol, logIndex will always be 0.
- commitWatcher
- .updateCommitInfoMap(asyncReply.getLogIndex(), byteBufferList);
- }
- return e;
- }, responseExecutor).exceptionally(e -> {
- if (LOG.isDebugEnabled()) {
- LOG.debug(
- "putBlock failed for blockID " + blockID + " with exception " + e
- .getLocalizedMessage());
- }
- CompletionException ce = new CompletionException(e);
- setIoException(ce);
- throw ce;
- });
- } catch (IOException | InterruptedException | ExecutionException e) {
- throw new IOException(
- "Unexpected Storage Container Exception: " + e.toString(), e);
- }
- commitWatcher.getFutureMap().put(flushPos, flushFuture);
- return flushFuture;
- }
-
- @Override
- public void flush() throws IOException {
- if (xceiverClientManager != null && xceiverClient != null
- && bufferPool != null && bufferPool.getSize() > 0) {
- try {
- handleFlush();
- } catch (InterruptedException | ExecutionException e) {
- // just set the exception here as well in order to maintain sanctity of
- // ioException field
- setIoException(e);
- adjustBuffersOnException();
- throw getIoException();
- }
- }
- }
-
-
- private void writeChunk(ByteBuffer buffer)
- throws IOException {
- // This data in the buffer will be pushed to datanode and a reference will
- // be added to the bufferList. Once putBlock gets executed, this list will
- // be marked null. Hence, during first writeChunk call after every putBlock
- // call or during the first call to writeChunk here, the list will be null.
-
- if (bufferList == null) {
- bufferList = new ArrayList<>();
- }
- bufferList.add(buffer);
- // Please note : We are not flipping the slice when we write since
- // the slices are pointing the currentBuffer start and end as needed for
- // the chunk write. Also please note, Duplicate does not create a
- // copy of data, it only creates metadata that points to the data
- // stream.
- ByteBuffer chunk = buffer.duplicate();
- chunk.position(0);
- chunk.limit(buffer.position());
- writeChunkToContainer(chunk);
- }
-
- private void handleFlush()
- throws IOException, InterruptedException, ExecutionException {
- checkOpen();
- // flush the last chunk data residing on the currentBuffer
- if (totalDataFlushedLength < writtenDataLength) {
- ByteBuffer currentBuffer = bufferPool.getCurrentBuffer();
- Preconditions.checkArgument(currentBuffer.position() > 0);
- if (currentBuffer.position() != chunkSize) {
- writeChunk(currentBuffer);
- }
- // This can be a partially filled chunk. Since we are flushing the buffer
- // here, we just limit this buffer to the current position. So that next
- // write will happen in new buffer
- updateFlushLength();
- executePutBlock();
- }
- waitOnFlushFutures();
- watchForCommit(false);
- // just check again if the exception is hit while waiting for the
- // futures to ensure flush has indeed succeeded
-
- // irrespective of whether the commitIndex2flushedDataMap is empty
- // or not, ensure there is no exception set
- checkOpen();
- }
-
- @Override
- public void close() throws IOException {
- if (xceiverClientManager != null && xceiverClient != null
- && bufferPool != null && bufferPool.getSize() > 0) {
- try {
- handleFlush();
- } catch (InterruptedException | ExecutionException e) {
- setIoException(e);
- adjustBuffersOnException();
- throw getIoException();
- } finally {
- cleanup(false);
- }
- // TODO: Turn the below buffer empty check on when Standalone pipeline
- // is removed in the write path in tests
- // Preconditions.checkArgument(buffer.position() == 0);
- // bufferPool.checkBufferPoolEmpty();
-
- }
- }
-
- private void waitOnFlushFutures()
- throws InterruptedException, ExecutionException {
- CompletableFuture combinedFuture = CompletableFuture.allOf(
- commitWatcher.getFutureMap().values().toArray(
- new CompletableFuture[commitWatcher.getFutureMap().size()]));
- // wait for all the transactions to complete
- combinedFuture.get();
- }
-
- private void validateResponse(
- ContainerProtos.ContainerCommandResponseProto responseProto)
- throws IOException {
- try {
- // if the ioException is already set, it means a prev request has failed
- // just throw the exception. The current operation will fail with the
- // original error
- IOException exception = getIoException();
- if (exception != null) {
- throw exception;
- }
- ContainerProtocolCalls.validateContainerResponse(responseProto);
- } catch (StorageContainerException sce) {
- LOG.error("Unexpected Storage Container Exception: ", sce);
- setIoException(sce);
- throw sce;
- }
- }
-
-
- private void setIoException(Exception e) {
- if (getIoException() == null) {
- IOException exception = new IOException(
- "Unexpected Storage Container Exception: " + e.toString(), e);
- ioException.compareAndSet(null, exception);
- }
- }
-
- public void cleanup(boolean invalidateClient) {
- if (xceiverClientManager != null) {
- xceiverClientManager.releaseClient(xceiverClient, invalidateClient);
- }
- xceiverClientManager = null;
- xceiverClient = null;
- commitWatcher.cleanup();
- if (bufferList != null) {
- bufferList.clear();
- }
- bufferList = null;
- responseExecutor.shutdown();
- }
-
- /**
- * Checks if the stream is open or exception has occured.
- * If not, throws an exception.
- *
- * @throws IOException if stream is closed
- */
- private void checkOpen() throws IOException {
- if (isClosed()) {
- throw new IOException("BlockOutputStream has been closed.");
- } else if (getIoException() != null) {
- adjustBuffersOnException();
- throw getIoException();
- }
- }
-
- public boolean isClosed() {
- return xceiverClient == null;
- }
-
- /**
- * Writes buffered data as a new chunk to the container and saves chunk
- * information to be used later in putKey call.
- *
- * @throws IOException if there is an I/O error while performing the call
- * @throws OzoneChecksumException if there is an error while computing
- * checksum
- */
- private void writeChunkToContainer(ByteBuffer chunk) throws IOException {
- int effectiveChunkSize = chunk.remaining();
- ByteString data = bufferPool.byteStringConversion().apply(chunk);
- Checksum checksum = new Checksum(checksumType, bytesPerChecksum);
- ChecksumData checksumData = checksum.computeChecksum(chunk);
- ChunkInfo chunkInfo = ChunkInfo.newBuilder()
- .setChunkName(blockID.getLocalID() + "_chunk_" + ++chunkIndex)
- .setOffset(0)
- .setLen(effectiveChunkSize)
- .setChecksumData(checksumData.getProtoBufMessage())
- .build();
-
- try {
- XceiverClientReply asyncReply =
- writeChunkAsync(xceiverClient, chunkInfo, blockID, data);
- CompletableFuture future =
- asyncReply.getResponse();
- future.thenApplyAsync(e -> {
- try {
- validateResponse(e);
- } catch (IOException sce) {
- future.completeExceptionally(sce);
- }
- return e;
- }, responseExecutor).exceptionally(e -> {
- if (LOG.isDebugEnabled()) {
- LOG.debug(
- "writing chunk failed " + chunkInfo.getChunkName() + " blockID "
- + blockID + " with exception " + e.getLocalizedMessage());
- }
- CompletionException ce = new CompletionException(e);
- setIoException(ce);
- throw ce;
- });
- } catch (IOException | InterruptedException | ExecutionException e) {
- throw new IOException(
- "Unexpected Storage Container Exception: " + e.toString(), e);
- }
- if (LOG.isDebugEnabled()) {
- LOG.debug(
- "writing chunk " + chunkInfo.getChunkName() + " blockID " + blockID
- + " length " + effectiveChunkSize);
- }
- containerBlockData.addChunks(chunkInfo);
- }
-
- @VisibleForTesting
- public void setXceiverClient(XceiverClientSpi xceiverClient) {
- this.xceiverClient = xceiverClient;
- }
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
deleted file mode 100644
index 6d534579c8605..0000000000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.ByteStringConversion;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.function.Function;
-
-/**
- * This class creates and manages pool of n buffers.
- */
-public class BufferPool {
-
- private List bufferList;
- private int currentBufferIndex;
- private final int bufferSize;
- private final int capacity;
- private final Function byteStringConversion;
-
- public BufferPool(int bufferSize, int capacity) {
- this(bufferSize, capacity,
- ByteStringConversion.createByteBufferConversion(null));
- }
-
- public BufferPool(int bufferSize, int capacity,
- Function byteStringConversion){
- this.capacity = capacity;
- this.bufferSize = bufferSize;
- bufferList = new ArrayList<>(capacity);
- currentBufferIndex = -1;
- this.byteStringConversion = byteStringConversion;
- }
-
- public Function byteStringConversion(){
- return byteStringConversion;
- }
-
- public ByteBuffer getCurrentBuffer() {
- return currentBufferIndex == -1 ? null : bufferList.get(currentBufferIndex);
- }
-
- /**
- * If the currentBufferIndex is less than the buffer size - 1,
- * it means, the next buffer in the list has been freed up for
- * rewriting. Reuse the next available buffer in such cases.
- *
- * In case, the currentBufferIndex == buffer.size and buffer size is still
- * less than the capacity to be allocated, just allocate a buffer of size
- * chunk size.
- *
- */
- public ByteBuffer allocateBufferIfNeeded() {
- ByteBuffer buffer = getCurrentBuffer();
- if (buffer != null && buffer.hasRemaining()) {
- return buffer;
- }
- if (currentBufferIndex < bufferList.size() - 1) {
- buffer = getBuffer(currentBufferIndex + 1);
- } else {
- buffer = ByteBuffer.allocate(bufferSize);
- bufferList.add(buffer);
- }
- Preconditions.checkArgument(bufferList.size() <= capacity);
- currentBufferIndex++;
- // TODO: Turn the below precondition check on when Standalone pipeline
- // is removed in the write path in tests
- // Preconditions.checkArgument(buffer.position() == 0);
- return buffer;
- }
-
- public void releaseBuffer(ByteBuffer byteBuffer) {
- // always remove from head of the list and append at last
- ByteBuffer buffer = bufferList.remove(0);
- // Ensure the buffer to be removed is always at the head of the list.
- Preconditions.checkArgument(buffer.equals(byteBuffer));
- buffer.clear();
- bufferList.add(buffer);
- Preconditions.checkArgument(currentBufferIndex >= 0);
- currentBufferIndex--;
- }
-
- public void clearBufferPool() {
- bufferList.clear();
- currentBufferIndex = -1;
- }
-
- public void checkBufferPoolEmpty() {
- Preconditions.checkArgument(computeBufferData() == 0);
- }
-
- public long computeBufferData() {
- return bufferList.stream().mapToInt(value -> value.position())
- .sum();
- }
-
- public int getSize() {
- return bufferList.size();
- }
-
- public ByteBuffer getBuffer(int index) {
- return bufferList.get(index);
- }
-
- int getCurrentBufferIndex() {
- return currentBufferIndex;
- }
-
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
deleted file mode 100644
index f94d2d87340be..0000000000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
+++ /dev/null
@@ -1,544 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.Seekable;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadChunkResponseProto;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.ozone.common.ChecksumData;
-import org.apache.hadoop.ozone.common.OzoneChecksumException;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.ByteBuffer;
-import java.util.List;
-
-/**
- * An {@link InputStream} called from BlockInputStream to read a chunk from the
- * container. Each chunk may contain multiple underlying {@link ByteBuffer}
- * instances.
- */
-public class ChunkInputStream extends InputStream implements Seekable {
-
- private ChunkInfo chunkInfo;
- private final long length;
- private final BlockID blockID;
- private XceiverClientSpi xceiverClient;
- private boolean verifyChecksum;
- private boolean allocated = false;
-
- // Buffer to store the chunk data read from the DN container
- private List buffers;
-
- // Index of the buffers corresponding to the current position of the buffers
- private int bufferIndex;
-
- // The offset of the current data residing in the buffers w.r.t the start
- // of chunk data
- private long bufferOffset;
-
- // The number of bytes of chunk data residing in the buffers currently
- private long bufferLength;
-
- // Position of the ChunkInputStream is maintained by this variable (if a
- // seek is performed. This position is w.r.t to the chunk only and not the
- // block or key. This variable is set only if either the buffers are not
- // yet allocated or the if the allocated buffers do not cover the seeked
- // position. Once the chunk is read, this variable is reset.
- private long chunkPosition = -1;
-
- private static final int EOF = -1;
-
- ChunkInputStream(ChunkInfo chunkInfo, BlockID blockId,
- XceiverClientSpi xceiverClient, boolean verifyChecksum) {
- this.chunkInfo = chunkInfo;
- this.length = chunkInfo.getLen();
- this.blockID = blockId;
- this.xceiverClient = xceiverClient;
- this.verifyChecksum = verifyChecksum;
- }
-
- public synchronized long getRemaining() throws IOException {
- return length - getPos();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public synchronized int read() throws IOException {
- checkOpen();
- int available = prepareRead(1);
- int dataout = EOF;
-
- if (available == EOF) {
- // There is no more data in the chunk stream. The buffers should have
- // been released by now
- Preconditions.checkState(buffers == null);
- } else {
- dataout = Byte.toUnsignedInt(buffers.get(bufferIndex).get());
- }
-
- if (chunkStreamEOF()) {
- // consumer might use getPos to determine EOF,
- // so release buffers when serving the last byte of data
- releaseBuffers();
- }
-
- return dataout;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public synchronized int read(byte[] b, int off, int len) throws IOException {
- // According to the JavaDocs for InputStream, it is recommended that
- // subclasses provide an override of bulk read if possible for performance
- // reasons. In addition to performance, we need to do it for correctness
- // reasons. The Ozone REST service uses PipedInputStream and
- // PipedOutputStream to relay HTTP response data between a Jersey thread and
- // a Netty thread. It turns out that PipedInputStream/PipedOutputStream
- // have a subtle dependency (bug?) on the wrapped stream providing separate
- // implementations of single-byte read and bulk read. Without this, get key
- // responses might close the connection before writing all of the bytes
- // advertised in the Content-Length.
- if (b == null) {
- throw new NullPointerException();
- }
- if (off < 0 || len < 0 || len > b.length - off) {
- throw new IndexOutOfBoundsException();
- }
- if (len == 0) {
- return 0;
- }
- checkOpen();
- int total = 0;
- while (len > 0) {
- int available = prepareRead(len);
- if (available == EOF) {
- // There is no more data in the chunk stream. The buffers should have
- // been released by now
- Preconditions.checkState(buffers == null);
- return total != 0 ? total : EOF;
- }
- buffers.get(bufferIndex).get(b, off + total, available);
- len -= available;
- total += available;
- }
-
- if (chunkStreamEOF()) {
- // smart consumers determine EOF by calling getPos()
- // so we release buffers when serving the final bytes of data
- releaseBuffers();
- }
-
- return total;
- }
-
- /**
- * Seeks the ChunkInputStream to the specified position. This is done by
- * updating the chunkPosition to the seeked position in case the buffers
- * are not allocated or buffers do not contain the data corresponding to
- * the seeked position (determined by buffersHavePosition()). Otherwise,
- * the buffers position is updated to the seeked position.
- */
- @Override
- public synchronized void seek(long pos) throws IOException {
- if (pos < 0 || pos >= length) {
- if (pos == 0) {
- // It is possible for length and pos to be zero in which case
- // seek should return instead of throwing exception
- return;
- }
- throw new EOFException("EOF encountered at pos: " + pos + " for chunk: "
- + chunkInfo.getChunkName());
- }
-
- if (buffersHavePosition(pos)) {
- // The bufferPosition is w.r.t the current chunk.
- // Adjust the bufferIndex and position to the seeked position.
- adjustBufferPosition(pos - bufferOffset);
- } else {
- chunkPosition = pos;
- }
- }
-
- @Override
- public synchronized long getPos() throws IOException {
- if (chunkPosition >= 0) {
- return chunkPosition;
- }
- if (chunkStreamEOF()) {
- return length;
- }
- if (buffersHaveData()) {
- return bufferOffset + buffers.get(bufferIndex).position();
- }
- if (buffersAllocated()) {
- return bufferOffset + bufferLength;
- }
- return 0;
- }
-
- @Override
- public boolean seekToNewSource(long targetPos) throws IOException {
- return false;
- }
-
- @Override
- public synchronized void close() {
- if (xceiverClient != null) {
- xceiverClient = null;
- }
- }
-
- /**
- * Checks if the stream is open. If not, throw an exception.
- *
- * @throws IOException if stream is closed
- */
- protected synchronized void checkOpen() throws IOException {
- if (xceiverClient == null) {
- throw new IOException("BlockInputStream has been closed.");
- }
- }
-
- /**
- * Prepares to read by advancing through buffers or allocating new buffers,
- * as needed until it finds data to return, or encounters EOF.
- * @param len desired lenght of data to read
- * @return length of data available to read, possibly less than desired length
- */
- private synchronized int prepareRead(int len) throws IOException {
- for (;;) {
- if (chunkPosition >= 0) {
- if (buffersHavePosition(chunkPosition)) {
- // The current buffers have the seeked position. Adjust the buffer
- // index and position to point to the chunkPosition.
- adjustBufferPosition(chunkPosition - bufferOffset);
- } else {
- // Read a required chunk data to fill the buffers with seeked
- // position data
- readChunkFromContainer(len);
- }
- }
- if (buffersHaveData()) {
- // Data is available from buffers
- ByteBuffer bb = buffers.get(bufferIndex);
- return len > bb.remaining() ? bb.remaining() : len;
- } else if (dataRemainingInChunk()) {
- // There is more data in the chunk stream which has not
- // been read into the buffers yet.
- readChunkFromContainer(len);
- } else {
- // All available input from this chunk stream has been consumed.
- return EOF;
- }
- }
- }
-
- /**
- * Reads full or partial Chunk from DN Container based on the current
- * position of the ChunkInputStream, the number of bytes of data to read
- * and the checksum boundaries.
- * If successful, then the read data in saved in the buffers so that
- * subsequent read calls can utilize it.
- * @param len number of bytes of data to be read
- * @throws IOException if there is an I/O error while performing the call
- * to Datanode
- */
- private synchronized void readChunkFromContainer(int len) throws IOException {
-
- // index of first byte to be read from the chunk
- long startByteIndex;
- if (chunkPosition >= 0) {
- // If seek operation was called to advance the buffer position, the
- // chunk should be read from that position onwards.
- startByteIndex = chunkPosition;
- } else {
- // Start reading the chunk from the last chunkPosition onwards.
- startByteIndex = bufferOffset + bufferLength;
- }
-
- if (verifyChecksum) {
- // Update the bufferOffset and bufferLength as per the checksum
- // boundary requirement.
- computeChecksumBoundaries(startByteIndex, len);
- } else {
- // Read from the startByteIndex
- bufferOffset = startByteIndex;
- bufferLength = len;
- }
-
- // Adjust the chunkInfo so that only the required bytes are read from
- // the chunk.
- final ChunkInfo adjustedChunkInfo = ChunkInfo.newBuilder(chunkInfo)
- .setOffset(bufferOffset)
- .setLen(bufferLength)
- .build();
-
- ByteString byteString = readChunk(adjustedChunkInfo);
-
- buffers = byteString.asReadOnlyByteBufferList();
- bufferIndex = 0;
- allocated = true;
-
- // If the stream was seeked to position before, then the buffer
- // position should be adjusted as the reads happen at checksum boundaries.
- // The buffers position might need to be adjusted for the following
- // scenarios:
- // 1. Stream was seeked to a position before the chunk was read
- // 2. Chunk was read from index < the current position to account for
- // checksum boundaries.
- adjustBufferPosition(startByteIndex - bufferOffset);
- }
-
- /**
- * Send RPC call to get the chunk from the container.
- */
- @VisibleForTesting
- protected ByteString readChunk(ChunkInfo readChunkInfo) throws IOException {
- ReadChunkResponseProto readChunkResponse;
-
- try {
- List validators =
- ContainerProtocolCalls.getValidatorList();
- validators.add(validator);
-
- readChunkResponse = ContainerProtocolCalls.readChunk(xceiverClient,
- readChunkInfo, blockID, validators);
-
- } catch (IOException e) {
- if (e instanceof StorageContainerException) {
- throw e;
- }
- throw new IOException("Unexpected OzoneException: " + e.toString(), e);
- }
-
- return readChunkResponse.getData();
- }
-
- private CheckedBiFunction validator =
- (request, response) -> {
- final ChunkInfo reqChunkInfo =
- request.getReadChunk().getChunkData();
-
- ReadChunkResponseProto readChunkResponse = response.getReadChunk();
- ByteString byteString = readChunkResponse.getData();
-
- if (byteString.size() != reqChunkInfo.getLen()) {
- // Bytes read from chunk should be equal to chunk size.
- throw new OzoneChecksumException(String
- .format("Inconsistent read for chunk=%s len=%d bytesRead=%d",
- reqChunkInfo.getChunkName(), reqChunkInfo.getLen(),
- byteString.size()));
- }
-
- if (verifyChecksum) {
- ChecksumData checksumData = ChecksumData.getFromProtoBuf(
- chunkInfo.getChecksumData());
-
- // ChecksumData stores checksum for each 'numBytesPerChecksum'
- // number of bytes in a list. Compute the index of the first
- // checksum to match with the read data
-
- int checkumStartIndex = (int) (reqChunkInfo.getOffset() /
- checksumData.getBytesPerChecksum());
- Checksum.verifyChecksum(
- byteString, checksumData, checkumStartIndex);
- }
- };
-
- /**
- * Return the offset and length of bytes that need to be read from the
- * chunk file to cover the checksum boundaries covering the actual start and
- * end of the chunk index to be read.
- * For example, lets say the client is reading from index 120 to 450 in the
- * chunk. And let's say checksum is stored for every 100 bytes in the chunk
- * i.e. the first checksum is for bytes from index 0 to 99, the next for
- * bytes from index 100 to 199 and so on. To verify bytes from 120 to 450,
- * we would need to read from bytes 100 to 499 so that checksum
- * verification can be done.
- *
- * @param startByteIndex the first byte index to be read by client
- * @param dataLen number of bytes to be read from the chunk
- */
- private void computeChecksumBoundaries(long startByteIndex, int dataLen) {
-
- int bytesPerChecksum = chunkInfo.getChecksumData().getBytesPerChecksum();
- // index of the last byte to be read from chunk, inclusively.
- final long endByteIndex = startByteIndex + dataLen - 1;
-
- bufferOffset = (startByteIndex / bytesPerChecksum)
- * bytesPerChecksum; // inclusive
- final long endIndex = ((endByteIndex / bytesPerChecksum) + 1)
- * bytesPerChecksum; // exclusive
- bufferLength = Math.min(endIndex, length) - bufferOffset;
- }
-
- /**
- * Adjust the buffers position to account for seeked position and/ or checksum
- * boundary reads.
- * @param bufferPosition the position to which the buffers must be advanced
- */
- private void adjustBufferPosition(long bufferPosition) {
- // The bufferPosition is w.r.t the current chunk.
- // Adjust the bufferIndex and position to the seeked chunkPosition.
- long tempOffest = 0;
- for (int i = 0; i < buffers.size(); i++) {
- if (bufferPosition - tempOffest >= buffers.get(i).capacity()) {
- tempOffest += buffers.get(i).capacity();
- } else {
- bufferIndex = i;
- break;
- }
- }
- buffers.get(bufferIndex).position((int) (bufferPosition - tempOffest));
-
- // Reset the chunkPosition as chunk stream has been initialized i.e. the
- // buffers have been allocated.
- resetPosition();
- }
-
- /**
- * Check if the buffers have been allocated data and false otherwise.
- */
- private boolean buffersAllocated() {
- return buffers != null && !buffers.isEmpty();
- }
-
- /**
- * Check if the buffers have any data remaining between the current
- * position and the limit.
- */
- private boolean buffersHaveData() {
- boolean hasData = false;
-
- if (buffersAllocated()) {
- while (bufferIndex < (buffers.size())) {
- if (buffers.get(bufferIndex).hasRemaining()) {
- // current buffer has data
- hasData = true;
- break;
- } else {
- if (buffersRemaining()) {
- // move to next available buffer
- ++bufferIndex;
- Preconditions.checkState(bufferIndex < buffers.size());
- } else {
- // no more buffers remaining
- break;
- }
- }
- }
- }
-
- return hasData;
- }
-
- private boolean buffersRemaining() {
- return (bufferIndex < (buffers.size() - 1));
- }
-
- /**
- * Check if curernt buffers have the data corresponding to the input position.
- */
- private boolean buffersHavePosition(long pos) {
- // Check if buffers have been allocated
- if (buffersAllocated()) {
- // Check if the current buffers cover the input position
- return pos >= bufferOffset &&
- pos < bufferOffset + bufferLength;
- }
- return false;
- }
-
- /**
- * Check if there is more data in the chunk which has not yet been read
- * into the buffers.
- */
- private boolean dataRemainingInChunk() {
- long bufferPos;
- if (chunkPosition >= 0) {
- bufferPos = chunkPosition;
- } else {
- bufferPos = bufferOffset + bufferLength;
- }
-
- return bufferPos < length;
- }
-
- /**
- * Check if end of chunkStream has been reached.
- */
- private boolean chunkStreamEOF() {
- if (!allocated) {
- // Chunk data has not been read yet
- return false;
- }
-
- if (buffersHaveData() || dataRemainingInChunk()) {
- return false;
- } else {
- Preconditions.checkState(bufferOffset + bufferLength == length,
- "EOF detected, but not at the last byte of the chunk");
- return true;
- }
- }
-
- /**
- * If EOF is reached, release the buffers.
- */
- private void releaseBuffers() {
- buffers = null;
- bufferIndex = 0;
- }
-
- /**
- * Reset the chunkPosition once the buffers are allocated.
- */
- void resetPosition() {
- this.chunkPosition = -1;
- }
-
- String getChunkName() {
- return chunkInfo.getChunkName();
- }
-
- protected long getLength() {
- return length;
- }
-
- @VisibleForTesting
- protected long getChunkPosition() {
- return chunkPosition;
- }
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java
deleted file mode 100644
index 1d9d55bfbfbb6..0000000000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This class maintains the map of the commitIndexes to be watched for
- * successful replication in the datanodes in a given pipeline. It also releases
- * the buffers associated with the user data back to {@Link BufferPool} once
- * minimum replication criteria is achieved during an ozone key write.
- */
-package org.apache.hadoop.hdds.scm.storage;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.XceiverClientReply;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.ExecutionException;
-
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.stream.Collectors;
-
-/**
- * This class executes watchForCommit on ratis pipeline and releases
- * buffers once data successfully gets replicated.
- */
-public class CommitWatcher {
-
- private static final Logger LOG =
- LoggerFactory.getLogger(CommitWatcher.class);
-
- // A reference to the pool of buffers holding the data
- private BufferPool bufferPool;
-
- // The map should maintain the keys (logIndexes) in order so that while
- // removing we always end up updating incremented data flushed length.
- // Also, corresponding to the logIndex, the corresponding list of buffers will
- // be released from the buffer pool.
- private ConcurrentSkipListMap>
- commitIndex2flushedDataMap;
-
- // future Map to hold up all putBlock futures
- private ConcurrentHashMap>
- futureMap;
-
- private XceiverClientSpi xceiverClient;
-
- private final long watchTimeout;
-
- // total data which has been successfully flushed and acknowledged
- // by all servers
- private long totalAckDataLength;
-
- public CommitWatcher(BufferPool bufferPool, XceiverClientSpi xceiverClient,
- long watchTimeout) {
- this.bufferPool = bufferPool;
- this.xceiverClient = xceiverClient;
- this.watchTimeout = watchTimeout;
- commitIndex2flushedDataMap = new ConcurrentSkipListMap<>();
- totalAckDataLength = 0;
- futureMap = new ConcurrentHashMap<>();
- }
-
- /**
- * just update the totalAckDataLength. In case of failure,
- * we will read the data starting from totalAckDataLength.
- */
- private long releaseBuffers(List indexes) {
- Preconditions.checkArgument(!commitIndex2flushedDataMap.isEmpty());
- for (long index : indexes) {
- Preconditions.checkState(commitIndex2flushedDataMap.containsKey(index));
- List buffers = commitIndex2flushedDataMap.remove(index);
- long length = buffers.stream().mapToLong(value -> {
- int pos = value.position();
- return pos;
- }).sum();
- totalAckDataLength += length;
- // clear the future object from the future Map
- Preconditions.checkNotNull(futureMap.remove(totalAckDataLength));
- for (ByteBuffer byteBuffer : buffers) {
- bufferPool.releaseBuffer(byteBuffer);
- }
- }
- return totalAckDataLength;
- }
-
- public void updateCommitInfoMap(long index, List byteBufferList) {
- commitIndex2flushedDataMap
- .put(index, byteBufferList);
- }
-
- int getCommitInfoMapSize() {
- return commitIndex2flushedDataMap.size();
- }
-
- /**
- * Calls watch for commit for the first index in commitIndex2flushedDataMap to
- * the Ratis client.
- * @return reply reply from raft client
- * @throws IOException in case watchForCommit fails
- */
- public XceiverClientReply watchOnFirstIndex() throws IOException {
- if (!commitIndex2flushedDataMap.isEmpty()) {
- // wait for the first commit index in the commitIndex2flushedDataMap
- // to get committed to all or majority of nodes in case timeout
- // happens.
- long index =
- commitIndex2flushedDataMap.keySet().stream().mapToLong(v -> v).min()
- .getAsLong();
- if (LOG.isDebugEnabled()) {
- LOG.debug("waiting for first index " + index + " to catch up");
- }
- return watchForCommit(index);
- } else {
- return null;
- }
- }
-
- /**
- * Calls watch for commit for the first index in commitIndex2flushedDataMap to
- * the Ratis client.
- * @return reply reply from raft client
- * @throws IOException in case watchForCommit fails
- */
- public XceiverClientReply watchOnLastIndex()
- throws IOException {
- if (!commitIndex2flushedDataMap.isEmpty()) {
- // wait for the commit index in the commitIndex2flushedDataMap
- // to get committed to all or majority of nodes in case timeout
- // happens.
- long index =
- commitIndex2flushedDataMap.keySet().stream().mapToLong(v -> v).max()
- .getAsLong();
- if (LOG.isDebugEnabled()) {
- LOG.debug("waiting for last flush Index " + index + " to catch up");
- }
- return watchForCommit(index);
- } else {
- return null;
- }
- }
-
-
- private void adjustBuffers(long commitIndex) {
- List keyList = commitIndex2flushedDataMap.keySet().stream()
- .filter(p -> p <= commitIndex).collect(Collectors.toList());
- if (keyList.isEmpty()) {
- return;
- } else {
- releaseBuffers(keyList);
- }
- }
-
- // It may happen that once the exception is encountered , we still might
- // have successfully flushed up to a certain index. Make sure the buffers
- // only contain data which have not been sufficiently replicated
- void releaseBuffersOnException() {
- adjustBuffers(xceiverClient.getReplicatedMinCommitIndex());
- }
-
-
- /**
- * calls watchForCommit API of the Ratis Client. For Standalone client,
- * it is a no op.
- * @param commitIndex log index to watch for
- * @return minimum commit index replicated to all nodes
- * @throws IOException IOException in case watch gets timed out
- */
- public XceiverClientReply watchForCommit(long commitIndex)
- throws IOException {
- long index;
- try {
- XceiverClientReply reply =
- xceiverClient.watchForCommit(commitIndex, watchTimeout);
- if (reply == null) {
- index = 0;
- } else {
- index = reply.getLogIndex();
- }
- adjustBuffers(index);
- return reply;
- } catch (TimeoutException | InterruptedException | ExecutionException e) {
- LOG.warn("watchForCommit failed for index " + commitIndex, e);
- IOException ioException = new IOException(
- "Unexpected Storage Container Exception: " + e.toString(), e);
- releaseBuffersOnException();
- throw ioException;
- }
- }
-
- @VisibleForTesting
- public ConcurrentSkipListMap> getCommitIndex2flushedDataMap() {
- return commitIndex2flushedDataMap;
- }
-
- public ConcurrentHashMap> getFutureMap() {
- return futureMap;
- }
-
- public long getTotalAckDataLength() {
- return totalAckDataLength;
- }
-
- public void cleanup() {
- if (commitIndex2flushedDataMap != null) {
- commitIndex2flushedDataMap.clear();
- }
- if (futureMap != null) {
- futureMap.clear();
- }
- commitIndex2flushedDataMap = null;
- }
-}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
deleted file mode 100644
index 6e7ce948784d0..0000000000000
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-/**
- * Low level IO streams to upload/download chunks from container service.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java
deleted file mode 100644
index 042bfd941743e..0000000000000
--- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-import com.google.common.primitives.Bytes;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.client.ContainerBlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.security.token.Token;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-
-import static org.apache.hadoop.hdds.scm.storage.TestChunkInputStream.generateRandomData;
-
-/**
- * Tests for {@link BlockInputStream}'s functionality.
- */
-public class TestBlockInputStream {
-
- private static final int CHUNK_SIZE = 100;
- private static Checksum checksum;
-
- private BlockInputStream blockStream;
- private byte[] blockData;
- private int blockSize;
- private List chunks;
- private Map chunkDataMap;
-
- @Before
- public void setup() throws Exception {
- BlockID blockID = new BlockID(new ContainerBlockID(1, 1));
- checksum = new Checksum(ChecksumType.NONE, CHUNK_SIZE);
- createChunkList(5);
-
- blockStream = new DummyBlockInputStream(blockID, blockSize, null, null,
- false, null);
- }
-
- /**
- * Create a mock list of chunks. The first n-1 chunks of length CHUNK_SIZE
- * and the last chunk with length CHUNK_SIZE/2.
- */
- private void createChunkList(int numChunks)
- throws Exception {
-
- chunks = new ArrayList<>(numChunks);
- chunkDataMap = new HashMap<>();
- blockData = new byte[0];
- int i, chunkLen;
- byte[] byteData;
- String chunkName;
-
- for (i = 0; i < numChunks; i++) {
- chunkName = "chunk-" + i;
- chunkLen = CHUNK_SIZE;
- if (i == numChunks - 1) {
- chunkLen = CHUNK_SIZE / 2;
- }
- byteData = generateRandomData(chunkLen);
- ChunkInfo chunkInfo = ChunkInfo.newBuilder()
- .setChunkName(chunkName)
- .setOffset(0)
- .setLen(chunkLen)
- .setChecksumData(checksum.computeChecksum(
- byteData, 0, chunkLen).getProtoBufMessage())
- .build();
-
- chunkDataMap.put(chunkName, byteData);
- chunks.add(chunkInfo);
-
- blockSize += chunkLen;
- blockData = Bytes.concat(blockData, byteData);
- }
- }
-
- /**
- * A dummy BlockInputStream to mock read block call to DN.
- */
- private class DummyBlockInputStream extends BlockInputStream {
-
- DummyBlockInputStream(BlockID blockId,
- long blockLen,
- Pipeline pipeline,
- Token token,
- boolean verifyChecksum,
- XceiverClientManager xceiverClientManager) {
- super(blockId, blockLen, pipeline, token, verifyChecksum,
- xceiverClientManager);
- }
-
- @Override
- protected List getChunkInfos() {
- return chunks;
- }
-
- @Override
- protected void addStream(ChunkInfo chunkInfo) {
- TestChunkInputStream testChunkInputStream = new TestChunkInputStream();
- getChunkStreams().add(testChunkInputStream.new DummyChunkInputStream(
- chunkInfo, null, null, false,
- chunkDataMap.get(chunkInfo.getChunkName()).clone()));
- }
-
- @Override
- protected synchronized void checkOpen() throws IOException {
- // No action needed
- }
- }
-
- private void seekAndVerify(int pos) throws Exception {
- blockStream.seek(pos);
- Assert.assertEquals("Current position of buffer does not match with the " +
- "seeked position", pos, blockStream.getPos());
- }
-
- /**
- * Match readData with the chunkData byte-wise.
- * @param readData Data read through ChunkInputStream
- * @param inputDataStartIndex first index (inclusive) in chunkData to compare
- * with read data
- * @param length the number of bytes of data to match starting from
- * inputDataStartIndex
- */
- private void matchWithInputData(byte[] readData, int inputDataStartIndex,
- int length) {
- for (int i = inputDataStartIndex; i < inputDataStartIndex + length; i++) {
- Assert.assertEquals(blockData[i], readData[i - inputDataStartIndex]);
- }
- }
-
- @Test
- public void testSeek() throws Exception {
- // Seek to position 0
- int pos = 0;
- seekAndVerify(pos);
- Assert.assertEquals("ChunkIndex is incorrect", 0,
- blockStream.getChunkIndex());
-
- // Before BlockInputStream is initialized (initialization happens during
- // read operation), seek should update the BlockInputStream#blockPosition
- pos = CHUNK_SIZE;
- seekAndVerify(pos);
- Assert.assertEquals("ChunkIndex is incorrect", 0,
- blockStream.getChunkIndex());
- Assert.assertEquals(pos, blockStream.getBlockPosition());
-
- // Initialize the BlockInputStream. After initializtion, the chunkIndex
- // should be updated to correspond to the seeked position.
- blockStream.initialize();
- Assert.assertEquals("ChunkIndex is incorrect", 1,
- blockStream.getChunkIndex());
-
- pos = (CHUNK_SIZE * 4) + 5;
- seekAndVerify(pos);
- Assert.assertEquals("ChunkIndex is incorrect", 4,
- blockStream.getChunkIndex());
-
- try {
- // Try seeking beyond the blockSize.
- pos = blockSize + 10;
- seekAndVerify(pos);
- Assert.fail("Seek to position beyond block size should fail.");
- } catch (EOFException e) {
- System.out.println(e);
- }
-
- // Seek to random positions between 0 and the block size.
- Random random = new Random();
- for (int i = 0; i < 10; i++) {
- pos = random.nextInt(blockSize);
- seekAndVerify(pos);
- }
- }
-
- @Test
- public void testRead() throws Exception {
- // read 200 bytes of data starting from position 50. Chunk0 contains
- // indices 0 to 99, chunk1 from 100 to 199 and chunk3 from 200 to 299. So
- // the read should result in 3 ChunkInputStream reads
- seekAndVerify(50);
- byte[] b = new byte[200];
- blockStream.read(b, 0, 200);
- matchWithInputData(b, 50, 200);
-
- // The new position of the blockInputStream should be the last index read
- // + 1.
- Assert.assertEquals(250, blockStream.getPos());
- Assert.assertEquals(2, blockStream.getChunkIndex());
- }
-
- @Test
- public void testSeekAndRead() throws Exception {
- // Seek to a position and read data
- seekAndVerify(50);
- byte[] b1 = new byte[100];
- blockStream.read(b1, 0, 100);
- matchWithInputData(b1, 50, 100);
-
- // Next read should start from the position of the last read + 1 i.e. 100
- byte[] b2 = new byte[100];
- blockStream.read(b2, 0, 100);
- matchWithInputData(b2, 150, 100);
- }
-}
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
deleted file mode 100644
index a5fe26b5619ab..0000000000000
--- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.EOFException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-
-/**
- * Tests for {@link ChunkInputStream}'s functionality.
- */
-public class TestChunkInputStream {
-
- private static final int CHUNK_SIZE = 100;
- private static final int BYTES_PER_CHECKSUM = 20;
- private static final String CHUNK_NAME = "dummyChunk";
- private static final Random RANDOM = new Random();
- private static Checksum checksum;
-
- private DummyChunkInputStream chunkStream;
- private ChunkInfo chunkInfo;
- private byte[] chunkData;
-
- @Before
- public void setup() throws Exception {
- checksum = new Checksum(ChecksumType.valueOf(
- OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT),
- BYTES_PER_CHECKSUM);
-
- chunkData = generateRandomData(CHUNK_SIZE);
-
- chunkInfo = ChunkInfo.newBuilder()
- .setChunkName(CHUNK_NAME)
- .setOffset(0)
- .setLen(CHUNK_SIZE)
- .setChecksumData(checksum.computeChecksum(
- chunkData, 0, CHUNK_SIZE).getProtoBufMessage())
- .build();
-
- chunkStream = new DummyChunkInputStream(chunkInfo, null, null, true);
- }
-
- static byte[] generateRandomData(int length) {
- byte[] bytes = new byte[length];
- RANDOM.nextBytes(bytes);
- return bytes;
- }
-
- /**
- * A dummy ChunkInputStream to mock read chunk calls to DN.
- */
- public class DummyChunkInputStream extends ChunkInputStream {
-
- // Stores the read chunk data in each readChunk call
- private List readByteBuffers = new ArrayList<>();
-
- DummyChunkInputStream(ChunkInfo chunkInfo,
- BlockID blockId,
- XceiverClientSpi xceiverClient,
- boolean verifyChecksum) {
- super(chunkInfo, blockId, xceiverClient, verifyChecksum);
- }
-
- public DummyChunkInputStream(ChunkInfo chunkInfo,
- BlockID blockId,
- XceiverClientSpi xceiverClient,
- boolean verifyChecksum,
- byte[] data) {
- super(chunkInfo, blockId, xceiverClient, verifyChecksum);
- chunkData = data;
- }
-
- @Override
- protected ByteString readChunk(ChunkInfo readChunkInfo) {
- ByteString byteString = ByteString.copyFrom(chunkData,
- (int) readChunkInfo.getOffset(),
- (int) readChunkInfo.getLen());
- readByteBuffers.add(byteString);
- return byteString;
- }
-
- @Override
- protected void checkOpen() {
- // No action needed
- }
- }
-
- /**
- * Match readData with the chunkData byte-wise.
- * @param readData Data read through ChunkInputStream
- * @param inputDataStartIndex first index (inclusive) in chunkData to compare
- * with read data
- * @param length the number of bytes of data to match starting from
- * inputDataStartIndex
- */
- private void matchWithInputData(byte[] readData, int inputDataStartIndex,
- int length) {
- for (int i = inputDataStartIndex; i < inputDataStartIndex + length; i++) {
- Assert.assertEquals(chunkData[i], readData[i - inputDataStartIndex]);
- }
- }
-
- /**
- * Seek to a position and verify through getPos().
- */
- private void seekAndVerify(int pos) throws Exception {
- chunkStream.seek(pos);
- Assert.assertEquals("Current position of buffer does not match with the " +
- "seeked position", pos, chunkStream.getPos());
- }
-
- @Test
- public void testFullChunkRead() throws Exception {
- byte[] b = new byte[CHUNK_SIZE];
- chunkStream.read(b, 0, CHUNK_SIZE);
-
- matchWithInputData(b, 0, CHUNK_SIZE);
- }
-
- @Test
- public void testPartialChunkRead() throws Exception {
- int len = CHUNK_SIZE / 2;
- byte[] b = new byte[len];
-
- chunkStream.read(b, 0, len);
-
- matchWithInputData(b, 0, len);
-
- // To read chunk data from index 0 to 49 (len = 50), we need to read
- // chunk from offset 0 to 60 as the checksum boundary is at every 20
- // bytes. Verify that 60 bytes of chunk data are read and stored in the
- // buffers.
- matchWithInputData(chunkStream.readByteBuffers.get(0).toByteArray(),
- 0, 60);
-
- }
-
- @Test
- public void testSeek() throws Exception {
- seekAndVerify(0);
-
- try {
- seekAndVerify(CHUNK_SIZE);
- Assert.fail("Seeking to Chunk Length should fail.");
- } catch (EOFException e) {
- GenericTestUtils.assertExceptionContains("EOF encountered at pos: "
- + CHUNK_SIZE + " for chunk: " + CHUNK_NAME, e);
- }
-
- // Seek before read should update the ChunkInputStream#chunkPosition
- seekAndVerify(25);
- Assert.assertEquals(25, chunkStream.getChunkPosition());
-
- // Read from the seeked position.
- // Reading from index 25 to 54 should result in the ChunkInputStream
- // copying chunk data from index 20 to 59 into the buffers (checksum
- // boundaries).
- byte[] b = new byte[30];
- chunkStream.read(b, 0, 30);
- matchWithInputData(b, 25, 30);
- matchWithInputData(chunkStream.readByteBuffers.get(0).toByteArray(),
- 20, 40);
-
- // After read, the position of the chunkStream is evaluated from the
- // buffers and the chunkPosition should be reset to -1.
- Assert.assertEquals(-1, chunkStream.getChunkPosition());
-
- // Seek to a position within the current buffers. Current buffers contain
- // data from index 20 to 59. ChunkPosition should still not be used to
- // set the position.
- seekAndVerify(35);
- Assert.assertEquals(-1, chunkStream.getChunkPosition());
-
- // Seek to a position outside the current buffers. In this case, the
- // chunkPosition should be updated to the seeked position.
- seekAndVerify(75);
- Assert.assertEquals(75, chunkStream.getChunkPosition());
- }
-
- @Test
- public void testSeekAndRead() throws Exception {
- // Seek to a position and read data
- seekAndVerify(50);
- byte[] b1 = new byte[20];
- chunkStream.read(b1, 0, 20);
- matchWithInputData(b1, 50, 20);
-
- // Next read should start from the position of the last read + 1 i.e. 70
- byte[] b2 = new byte[20];
- chunkStream.read(b2, 0, 20);
- matchWithInputData(b2, 70, 20);
- }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/package-info.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/package-info.java
deleted file mode 100644
index abdd04ea967d8..0000000000000
--- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-/**
- * This package contains Ozone InputStream related tests.
- */
-package org.apache.hadoop.hdds.scm.storage;
\ No newline at end of file
diff --git a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
deleted file mode 100644
index 4441b69d8683e..0000000000000
--- a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
deleted file mode 100644
index 9af807f8b9eb0..0000000000000
--- a/hadoop-hdds/common/pom.xml
+++ /dev/null
@@ -1,285 +0,0 @@
-
-
-
- 4.0.0
-
- org.apache.hadoop
- hadoop-hdds
- 0.5.0-SNAPSHOT
-
- hadoop-hdds-common
- 0.5.0-SNAPSHOT
- Apache Hadoop Distributed Data Store Common
- Apache Hadoop HDDS Common
- jar
-
-
- 0.5.0-SNAPSHOT
- 2.11.0
- 3.4.2
- ${hdds.version}
-
-
-
-
- org.apache.hadoop
- hadoop-hdds-config
-
-
-
- javax.annotation
- javax.annotation-api
- 1.2
-
-
-
- org.fusesource.leveldbjni
- leveldbjni-all
-
-
-
- ratis-server
- org.apache.ratis
-
-
- org.slf4j
- slf4j-log4j12
-
-
- io.dropwizard.metrics
- metrics-core
-
-
- org.bouncycastle
- bcprov-jdk15on
-
-
-
-
- ratis-netty
- org.apache.ratis
-
-
- ratis-grpc
- org.apache.ratis
-
-
- com.google.errorprone
- error_prone_annotations
- 2.2.0
- true
-
-
-
- org.rocksdb
- rocksdbjni
- 6.0.1
-
-
- org.apache.hadoop
- hadoop-common
- test
- test-jar
-
-
-
- org.apache.logging.log4j
- log4j-api
- ${log4j2.version}
-
-
- org.apache.logging.log4j
- log4j-core
- ${log4j2.version}
-
-
- com.lmax
- disruptor
- ${disruptor.version}
-
-
- org.apache.commons
- commons-pool2
- 2.6.0
-
-
- org.bouncycastle
- bcpkix-jdk15on
- ${bouncycastle.version}
-
-
-
- commons-validator
- commons-validator
- 1.6
-
-
- org.junit.jupiter
- junit-jupiter-api
-
-
- io.jaegertracing
- jaeger-client
- ${jaeger.version}
-
-
- io.opentracing
- opentracing-util
- 0.31.0
-
-
- org.yaml
- snakeyaml
- 1.16
-
-
-
-
-
-
- ${basedir}/src/main/resources
-
- hdds-version-info.properties
-
- false
-
-
- ${basedir}/src/main/resources
-
- hdds-version-info.properties
-
- true
-
-
-
-
- kr.motd.maven
- os-maven-plugin
- ${os-maven-plugin.version}
-
-
-
-
- org.xolstice.maven.plugins
- protobuf-maven-plugin
- ${protobuf-maven-plugin.version}
- true
-
-
- com.google.protobuf:protoc:${protobuf-compile.version}:exe:${os.detected.classifier}
-
- ${basedir}/src/main/proto/
-
- DatanodeContainerProtocol.proto
-
- target/generated-sources/java
- false
-
-
-
- compile-protoc
-
- compile
- test-compile
- compile-custom
- test-compile-custom
-
-
- grpc-java
-
- io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
-
-
-
-
-
-
- maven-antrun-plugin
-
-
- generate-sources
-
-
-
-
-
-
-
-
-
- run
-
-
-
-
-
- org.apache.hadoop
- hadoop-maven-plugins
-
-
- version-info
- generate-resources
-
- version-info
-
-
-
- ${basedir}/../
-
- */src/main/java/**/*.java
- */src/main/proto/*.proto
-
-
-
-
-
- compile-protoc
-
- protoc
-
-
- ${protobuf.version}
- ${protoc.path}
-
- ${basedir}/src/main/proto
-
-
- ${basedir}/src/main/proto
-
- StorageContainerLocationProtocol.proto
- hdds.proto
- ScmBlockLocationProtocol.proto
- SCMSecurityProtocol.proto
-
-
-
-
-
-
-
- com.github.spotbugs
- spotbugs-maven-plugin
-
- ${basedir}/dev-support/findbugsExcludeFile.xml
-
-
-
-
-
diff --git a/hadoop-hdds/common/src/main/bin/hadoop-config.cmd b/hadoop-hdds/common/src/main/bin/hadoop-config.cmd
deleted file mode 100644
index d77dc5346a1fc..0000000000000
--- a/hadoop-hdds/common/src/main/bin/hadoop-config.cmd
+++ /dev/null
@@ -1,317 +0,0 @@
-@echo off
-@rem Licensed to the Apache Software Foundation (ASF) under one or more
-@rem contributor license agreements. See the NOTICE file distributed with
-@rem this work for additional information regarding copyright ownership.
-@rem The ASF licenses this file to You under the Apache License, Version 2.0
-@rem (the "License"); you may not use this file except in compliance with
-@rem the License. You may obtain a copy of the License at
-@rem
-@rem http://www.apache.org/licenses/LICENSE-2.0
-@rem
-@rem Unless required by applicable law or agreed to in writing, software
-@rem distributed under the License is distributed on an "AS IS" BASIS,
-@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-@rem See the License for the specific language governing permissions and
-@rem limitations under the License.
-
-@rem included in all the hadoop scripts with source command
-@rem should not be executable directly
-@rem also should not be passed any arguments, since we need original %*
-
-if not defined HADOOP_COMMON_DIR (
- set HADOOP_COMMON_DIR=share\hadoop\common
-)
-if not defined HADOOP_COMMON_LIB_JARS_DIR (
- set HADOOP_COMMON_LIB_JARS_DIR=share\hadoop\common\lib
-)
-if not defined HADOOP_COMMON_LIB_NATIVE_DIR (
- set HADOOP_COMMON_LIB_NATIVE_DIR=lib\native
-)
-if not defined HDFS_DIR (
- set HDFS_DIR=share\hadoop\hdfs
-)
-if not defined HDFS_LIB_JARS_DIR (
- set HDFS_LIB_JARS_DIR=share\hadoop\hdfs\lib
-)
-if not defined YARN_DIR (
- set YARN_DIR=share\hadoop\yarn
-)
-if not defined YARN_LIB_JARS_DIR (
- set YARN_LIB_JARS_DIR=share\hadoop\yarn\lib
-)
-if not defined MAPRED_DIR (
- set MAPRED_DIR=share\hadoop\mapreduce
-)
-if not defined MAPRED_LIB_JARS_DIR (
- set MAPRED_LIB_JARS_DIR=share\hadoop\mapreduce\lib
-)
-
-@rem the root of the Hadoop installation
-set HADOOP_HOME=%~dp0
-for %%i in (%HADOOP_HOME%.) do (
- set HADOOP_HOME=%%~dpi
-)
-if "%HADOOP_HOME:~-1%" == "\" (
- set HADOOP_HOME=%HADOOP_HOME:~0,-1%
-)
-
-if not exist %HADOOP_HOME%\share\hadoop\common\hadoop-common-*.jar (
- @echo +================================================================+
- @echo ^| Error: HADOOP_HOME is not set correctly ^|
- @echo +----------------------------------------------------------------+
- @echo ^| Please set your HADOOP_HOME variable to the absolute path of ^|
- @echo ^| the directory that contains the hadoop distribution ^|
- @echo +================================================================+
- exit /b 1
-)
-
-if not defined HADOOP_CONF_DIR (
- set HADOOP_CONF_DIR=%HADOOP_HOME%\etc\hadoop
-)
-
-@rem
-@rem Allow alternate conf dir location.
-@rem
-
-if "%1" == "--config" (
- set HADOOP_CONF_DIR=%2
- shift
- shift
-)
-
-@rem
-@rem check to see it is specified whether to use the workers or the
-@rem masters file
-@rem
-
-if "%1" == "--hosts" (
- set HADOOP_WORKERS=%HADOOP_CONF_DIR%\%2
- shift
- shift
-)
-
-@rem
-@rem Set log level. Default to INFO.
-@rem
-
-if "%1" == "--loglevel" (
- set HADOOP_LOGLEVEL=%2
- shift
- shift
-)
-
-if exist %HADOOP_CONF_DIR%\hadoop-env.cmd (
- call %HADOOP_CONF_DIR%\hadoop-env.cmd
-)
-
-@rem
-@rem setup java environment variables
-@rem
-
-if not defined JAVA_HOME (
- echo Error: JAVA_HOME is not set.
- goto :eof
-)
-
-if not exist %JAVA_HOME%\bin\java.exe (
- echo Error: JAVA_HOME is incorrectly set.
- echo Please update %HADOOP_CONF_DIR%\hadoop-env.cmd
- goto :eof
-)
-
-set JAVA=%JAVA_HOME%\bin\java
-@rem some Java parameters
-set JAVA_HEAP_MAX=-Xmx1000m
-
-@rem
-@rem check envvars which might override default args
-@rem
-
-if defined HADOOP_HEAPSIZE (
- set JAVA_HEAP_MAX=-Xmx%HADOOP_HEAPSIZE%m
-)
-
-@rem
-@rem CLASSPATH initially contains %HADOOP_CONF_DIR%
-@rem
-
-set CLASSPATH=%HADOOP_CONF_DIR%
-
-if not defined HADOOP_COMMON_HOME (
- if exist %HADOOP_HOME%\share\hadoop\common (
- set HADOOP_COMMON_HOME=%HADOOP_HOME%
- )
-)
-
-@rem
-@rem for releases, add core hadoop jar & webapps to CLASSPATH
-@rem
-
-if exist %HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%\webapps (
- set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%
-)
-
-if exist %HADOOP_COMMON_HOME%\%HADOOP_COMMON_LIB_JARS_DIR% (
- set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_LIB_JARS_DIR%\*
-)
-
-set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%\*
-
-@rem
-@rem default log directory % file
-@rem
-
-if not defined HADOOP_LOG_DIR (
- set HADOOP_LOG_DIR=%HADOOP_HOME%\logs
-)
-
-if not defined HADOOP_LOGFILE (
- set HADOOP_LOGFILE=hadoop.log
-)
-
-if not defined HADOOP_LOGLEVEL (
- set HADOOP_LOGLEVEL=INFO
-)
-
-if not defined HADOOP_ROOT_LOGGER (
- set HADOOP_ROOT_LOGGER=%HADOOP_LOGLEVEL%,console
-)
-
-@rem
-@rem default policy file for service-level authorization
-@rem
-
-if not defined HADOOP_POLICYFILE (
- set HADOOP_POLICYFILE=hadoop-policy.xml
-)
-
-@rem
-@rem Determine the JAVA_PLATFORM
-@rem
-
-for /f "delims=" %%A in ('%JAVA% -Xmx32m %HADOOP_JAVA_PLATFORM_OPTS% -classpath "%CLASSPATH%" org.apache.hadoop.util.PlatformName') do set JAVA_PLATFORM=%%A
-@rem replace space with underscore
-set JAVA_PLATFORM=%JAVA_PLATFORM: =_%
-
-@rem
-@rem setup 'java.library.path' for native hadoop code if necessary
-@rem
-
-@rem Check if we're running hadoop directly from the build
-if exist %HADOOP_COMMON_HOME%\target\bin (
- if defined JAVA_LIBRARY_PATH (
- set JAVA_LIBRARY_PATH=%JAVA_LIBRARY_PATH%;%HADOOP_COMMON_HOME%\target\bin
- ) else (
- set JAVA_LIBRARY_PATH=%HADOOP_COMMON_HOME%\target\bin
- )
-)
-
-@rem For the distro case, check the bin folder
-if exist %HADOOP_COMMON_HOME%\bin (
- if defined JAVA_LIBRARY_PATH (
- set JAVA_LIBRARY_PATH=%JAVA_LIBRARY_PATH%;%HADOOP_COMMON_HOME%\bin
- ) else (
- set JAVA_LIBRARY_PATH=%HADOOP_COMMON_HOME%\bin
- )
-)
-
-@rem
-@rem setup a default TOOL_PATH
-@rem
-set TOOL_PATH=%HADOOP_HOME%\share\hadoop\tools\lib\*
-
-set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.log.dir=%HADOOP_LOG_DIR%
-set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.log.file=%HADOOP_LOGFILE%
-set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.home.dir=%HADOOP_HOME%
-set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.id.str=%HADOOP_IDENT_STRING%
-set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.root.logger=%HADOOP_ROOT_LOGGER%
-
-if defined JAVA_LIBRARY_PATH (
- set HADOOP_OPTS=%HADOOP_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH%
-)
-set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.policy.file=%HADOOP_POLICYFILE%
-
-@rem
-@rem Disable ipv6 as it can cause issues
-@rem
-
-set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true
-
-@rem
-@rem put hdfs in classpath if present
-@rem
-
-if not defined HADOOP_HDFS_HOME (
- if exist %HADOOP_HOME%\%HDFS_DIR% (
- set HADOOP_HDFS_HOME=%HADOOP_HOME%
- )
-)
-
-if exist %HADOOP_HDFS_HOME%\%HDFS_DIR%\webapps (
- set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_DIR%
-)
-
-if exist %HADOOP_HDFS_HOME%\%HDFS_LIB_JARS_DIR% (
- set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_LIB_JARS_DIR%\*
-)
-
-set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_DIR%\*
-
-@rem
-@rem put yarn in classpath if present
-@rem
-
-if not defined HADOOP_YARN_HOME (
- if exist %HADOOP_HOME%\%YARN_DIR% (
- set HADOOP_YARN_HOME=%HADOOP_HOME%
- )
-)
-
-if exist %HADOOP_YARN_HOME%\%YARN_DIR%\webapps (
- set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_DIR%
-)
-
-if exist %HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR% (
- set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR%\*
-)
-
-set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_DIR%\*
-
-@rem
-@rem put mapred in classpath if present AND different from YARN
-@rem
-
-if not defined HADOOP_MAPRED_HOME (
- if exist %HADOOP_HOME%\%MAPRED_DIR% (
- set HADOOP_MAPRED_HOME=%HADOOP_HOME%
- )
-)
-
-if not "%HADOOP_MAPRED_HOME%\%MAPRED_DIR%" == "%HADOOP_YARN_HOME%\%YARN_DIR%" (
-
- if exist %HADOOP_MAPRED_HOME%\%MAPRED_DIR%\webapps (
- set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_DIR%
- )
-
- if exist %HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR% (
- set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR%\*
- )
-
- set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_DIR%\*
-)
-
-@rem
-@rem add user-specified CLASSPATH last
-@rem
-
-if defined HADOOP_CLASSPATH (
- if not defined HADOOP_USE_CLIENT_CLASSLOADER (
- if defined HADOOP_USER_CLASSPATH_FIRST (
- set CLASSPATH=%HADOOP_CLASSPATH%;%CLASSPATH%;
- ) else (
- set CLASSPATH=%CLASSPATH%;%HADOOP_CLASSPATH%;
- )
- )
-)
-
-:eof
diff --git a/hadoop-hdds/common/src/main/bin/hadoop-config.sh b/hadoop-hdds/common/src/main/bin/hadoop-config.sh
deleted file mode 100755
index 444b79a362953..0000000000000
--- a/hadoop-hdds/common/src/main/bin/hadoop-config.sh
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/usr/bin/env bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-####
-# IMPORTANT
-####
-
-## The hadoop-config.sh tends to get executed by non-Hadoop scripts.
-## Those parts expect this script to parse/manipulate $@. In order
-## to maintain backward compatibility, this means a surprising
-## lack of functions for bits that would be much better off in
-## a function.
-##
-## In other words, yes, there is some bad things happen here and
-## unless we break the rest of the ecosystem, we can't change it. :(
-
-
-# included in all the hadoop scripts with source command
-# should not be executable directly
-# also should not be passed any arguments, since we need original $*
-#
-# after doing more config, caller should also exec finalize
-# function to finish last minute/default configs for
-# settings that might be different between daemons & interactive
-
-# you must be this high to ride the ride
-if [[ -z "${BASH_VERSINFO[0]}" ]] \
- || [[ "${BASH_VERSINFO[0]}" -lt 3 ]] \
- || [[ "${BASH_VERSINFO[0]}" -eq 3 && "${BASH_VERSINFO[1]}" -lt 2 ]]; then
- echo "bash v3.2+ is required. Sorry."
- exit 1
-fi
-
-# In order to get partially bootstrapped, we need to figure out where
-# we are located. Chances are good that our caller has already done
-# this work for us, but just in case...
-
-if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
- _hadoop_common_this="${BASH_SOURCE-$0}"
- HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hadoop_common_this}")" >/dev/null && pwd -P)
-fi
-
-# get our functions defined for usage later
-if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
- [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh" ]]; then
- # shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
- . "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh"
-elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh" ]]; then
- # shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
- . "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh"
-else
- echo "ERROR: Unable to exec ${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh." 1>&2
- exit 1
-fi
-
-hadoop_deprecate_envvar HADOOP_PREFIX HADOOP_HOME
-
-# allow overrides of the above and pre-defines of the below
-if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
- [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh" ]]; then
- # shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example
- . "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh"
-elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh" ]]; then
- # shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example
- . "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh"
-fi
-
-#
-# IMPORTANT! We are not executing user provided code yet!
-#
-
-# Let's go! Base definitions so we can move forward
-hadoop_bootstrap
-
-# let's find our conf.
-#
-# first, check and process params passed to us
-# we process this in-line so that we can directly modify $@
-# if something downstream is processing that directly,
-# we need to make sure our params have been ripped out
-# note that we do many of them here for various utilities.
-# this provides consistency and forces a more consistent
-# user experience
-
-
-# save these off in case our caller needs them
-# shellcheck disable=SC2034
-HADOOP_USER_PARAMS=("$@")
-
-hadoop_parse_args "$@"
-shift "${HADOOP_PARSE_COUNTER}"
-
-#
-# Setup the base-line environment
-#
-hadoop_find_confdir
-hadoop_exec_hadoopenv
-hadoop_import_shellprofiles
-hadoop_exec_userfuncs
-
-#
-# IMPORTANT! User provided code is now available!
-#
-
-hadoop_exec_user_hadoopenv
-hadoop_verify_confdir
-
-hadoop_deprecate_envvar HADOOP_SLAVES HADOOP_WORKERS
-hadoop_deprecate_envvar HADOOP_SLAVE_NAMES HADOOP_WORKER_NAMES
-hadoop_deprecate_envvar HADOOP_SLAVE_SLEEP HADOOP_WORKER_SLEEP
-
-# do all the OS-specific startup bits here
-# this allows us to get a decent JAVA_HOME,
-# call crle for LD_LIBRARY_PATH, etc.
-hadoop_os_tricks
-
-hadoop_java_setup
-
-hadoop_basic_init
-
-# inject any sub-project overrides, defaults, etc.
-if declare -F hadoop_subproject_init >/dev/null ; then
- hadoop_subproject_init
-fi
-
-hadoop_shellprofiles_init
-
-# get the native libs in there pretty quick
-hadoop_add_javalibpath "${HADOOP_HOME}/build/native"
-hadoop_add_javalibpath "${HADOOP_HOME}/${HADOOP_COMMON_LIB_NATIVE_DIR}"
-
-hadoop_shellprofiles_nativelib
-
-# get the basic java class path for these subprojects
-# in as quickly as possible since other stuff
-# will definitely depend upon it.
-
-hadoop_add_common_to_classpath
-hadoop_shellprofiles_classpath
-
-# user API commands can now be run since the runtime
-# environment has been configured
-hadoop_exec_hadooprc
-
-#
-# backwards compatibility. new stuff should
-# call this when they are ready
-#
-if [[ -z "${HADOOP_NEW_CONFIG}" ]]; then
- hadoop_finalize
-fi
diff --git a/hadoop-hdds/common/src/main/bin/hadoop-daemons.sh b/hadoop-hdds/common/src/main/bin/hadoop-daemons.sh
deleted file mode 100755
index 55304916ad1f7..0000000000000
--- a/hadoop-hdds/common/src/main/bin/hadoop-daemons.sh
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Run a Hadoop command on all slave hosts.
-
-function hadoop_usage
-{
- echo "Usage: hadoop-daemons.sh [--config confdir] [--hosts hostlistfile] (start|stop|status) "
-}
-
-this="${BASH_SOURCE-$0}"
-bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
-
-# let's locate libexec...
-if [[ -n "${HADOOP_HOME}" ]]; then
- HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
-else
- HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
-fi
-
-HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
-# shellcheck disable=SC2034
-HADOOP_NEW_CONFIG=true
-if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
- . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
-else
- echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
- exit 1
-fi
-
-if [[ $# = 0 ]]; then
- hadoop_exit_with_usage 1
-fi
-
-daemonmode=$1
-shift
-
-if [[ -z "${HADOOP_HDFS_HOME}" ]]; then
- hdfsscript="${HADOOP_HOME}/bin/hdfs"
-else
- hdfsscript="${HADOOP_HDFS_HOME}/bin/hdfs"
-fi
-
-hadoop_error "WARNING: Use of this script to ${daemonmode} HDFS daemons is deprecated."
-hadoop_error "WARNING: Attempting to execute replacement \"hdfs --workers --daemon ${daemonmode}\" instead."
-
-#
-# Original input was usually:
-# hadoop-daemons.sh (shell options) (start|stop) (datanode|...) (daemon options)
-# we're going to turn this into
-# hdfs --workers --daemon (start|stop) (rest of options)
-#
-for (( i = 0; i < ${#HADOOP_USER_PARAMS[@]}; i++ ))
-do
- if [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^start$ ]] ||
- [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^stop$ ]] ||
- [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^status$ ]]; then
- unset HADOOP_USER_PARAMS[$i]
- fi
-done
-
-${hdfsscript} --workers --daemon "${daemonmode}" "${HADOOP_USER_PARAMS[@]}"
diff --git a/hadoop-hdds/common/src/main/bin/hadoop-functions.sh b/hadoop-hdds/common/src/main/bin/hadoop-functions.sh
deleted file mode 100755
index 484fe2302f9ba..0000000000000
--- a/hadoop-hdds/common/src/main/bin/hadoop-functions.sh
+++ /dev/null
@@ -1,2732 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# we need to declare this globally as an array, which can only
-# be done outside of a function
-declare -a HADOOP_SUBCMD_USAGE
-declare -a HADOOP_OPTION_USAGE
-declare -a HADOOP_SUBCMD_USAGE_TYPES
-
-## @description Print a message to stderr
-## @audience public
-## @stability stable
-## @replaceable no
-## @param string
-function hadoop_error
-{
- echo "$*" 1>&2
-}
-
-## @description Print a message to stderr if --debug is turned on
-## @audience public
-## @stability stable
-## @replaceable no
-## @param string
-function hadoop_debug
-{
- if [[ -n "${HADOOP_SHELL_SCRIPT_DEBUG}" ]]; then
- echo "DEBUG: $*" 1>&2
- fi
-}
-
-## @description Given a filename or dir, return the absolute version of it
-## @description This works as an alternative to readlink, which isn't
-## @description portable.
-## @audience public
-## @stability stable
-## @param fsobj
-## @replaceable no
-## @return 0 success
-## @return 1 failure
-## @return stdout abspath
-function hadoop_abs
-{
- declare obj=$1
- declare dir
- declare fn
- declare dirret
-
- if [[ ! -e ${obj} ]]; then
- return 1
- elif [[ -d ${obj} ]]; then
- dir=${obj}
- else
- dir=$(dirname -- "${obj}")
- fn=$(basename -- "${obj}")
- fn="/${fn}"
- fi
-
- dir=$(cd -P -- "${dir}" >/dev/null 2>/dev/null && pwd -P)
- dirret=$?
- if [[ ${dirret} = 0 ]]; then
- echo "${dir}${fn}"
- return 0
- fi
- return 1
-}
-
-## @description Given variable $1 delete $2 from it
-## @audience public
-## @stability stable
-## @replaceable no
-function hadoop_delete_entry
-{
- if [[ ${!1} =~ \ ${2}\ ]] ; then
- hadoop_debug "Removing ${2} from ${1}"
- eval "${1}"=\""${!1// ${2} }"\"
- fi
-}
-
-## @description Given variable $1 add $2 to it
-## @audience public
-## @stability stable
-## @replaceable no
-function hadoop_add_entry
-{
- if [[ ! ${!1} =~ \ ${2}\ ]] ; then
- hadoop_debug "Adding ${2} to ${1}"
- #shellcheck disable=SC2140
- eval "${1}"=\""${!1} ${2} "\"
- fi
-}
-
-## @description Given variable $1 determine if $2 is in it
-## @audience public
-## @stability stable
-## @replaceable no
-## @return 0 = yes, 1 = no
-function hadoop_verify_entry
-{
- # this unfortunately can't really be tested by bats. :(
- # so if this changes, be aware that unit tests effectively
- # do this function in them
- [[ ${!1} =~ \ ${2}\ ]]
-}
-
-## @description Check if an array has a given value
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param element
-## @param array
-## @returns 0 = yes
-## @returns 1 = no
-function hadoop_array_contains
-{
- declare element=$1
- shift
- declare val
-
- if [[ "$#" -eq 0 ]]; then
- return 1
- fi
-
- for val in "${@}"; do
- if [[ "${val}" == "${element}" ]]; then
- return 0
- fi
- done
- return 1
-}
-
-## @description Add the `appendstring` if `checkstring` is not
-## @description present in the given array
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param envvar
-## @param appendstring
-function hadoop_add_array_param
-{
- declare arrname=$1
- declare add=$2
-
- declare arrref="${arrname}[@]"
- declare array=("${!arrref}")
-
- if ! hadoop_array_contains "${add}" "${array[@]}"; then
- #shellcheck disable=SC1083,SC2086
- eval ${arrname}=\(\"\${array[@]}\" \"${add}\" \)
- hadoop_debug "$1 accepted $2"
- else
- hadoop_debug "$1 declined $2"
- fi
-}
-
-## @description Sort an array (must not contain regexps)
-## @description present in the given array
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param arrayvar
-function hadoop_sort_array
-{
- declare arrname=$1
- declare arrref="${arrname}[@]"
- declare array=("${!arrref}")
- declare oifs
-
- declare globstatus
- declare -a sa
-
- globstatus=$(set -o | grep noglob | awk '{print $NF}')
-
- set -f
- oifs=${IFS}
-
- # shellcheck disable=SC2034
- IFS=$'\n' sa=($(sort <<<"${array[*]}"))
-
- # shellcheck disable=SC1083
- eval "${arrname}"=\(\"\${sa[@]}\"\)
-
- IFS=${oifs}
- if [[ "${globstatus}" = off ]]; then
- set +f
- fi
-}
-
-## @description Check if we are running with priv
-## @description by default, this implementation looks for
-## @description EUID=0. For OSes that have true priv
-## @description separation, this should be something more complex
-## @audience private
-## @stability evolving
-## @replaceable yes
-## @return 1 = no priv
-## @return 0 = priv
-function hadoop_privilege_check
-{
- [[ "${EUID}" = 0 ]]
-}
-
-## @description Execute a command via su when running as root
-## @description if the given user is found or exit with
-## @description failure if not.
-## @description otherwise just run it. (This is intended to
-## @description be used by the start-*/stop-* scripts.)
-## @audience private
-## @stability evolving
-## @replaceable yes
-## @param user
-## @param commandstring
-## @return exitstatus
-function hadoop_su
-{
- declare user=$1
- shift
-
- if hadoop_privilege_check; then
- if hadoop_verify_user_resolves user; then
- su -l "${user}" -- "$@"
- else
- hadoop_error "ERROR: Refusing to run as root: ${user} account is not found. Aborting."
- return 1
- fi
- else
- "$@"
- fi
-}
-
-## @description Execute a command via su when running as root
-## @description with extra support for commands that might
-## @description legitimately start as root (e.g., datanode)
-## @description (This is intended to
-## @description be used by the start-*/stop-* scripts.)
-## @audience private
-## @stability evolving
-## @replaceable no
-## @param user
-## @param commandstring
-## @return exitstatus
-function hadoop_uservar_su
-{
-
- ## startup matrix:
- #
- # if $EUID != 0, then exec
- # if $EUID =0 then
- # if hdfs_subcmd_user is defined, call hadoop_su to exec
- # if hdfs_subcmd_user is not defined, error
- #
- # For secure daemons, this means both the secure and insecure env vars need to be
- # defined. e.g., HDFS_DATANODE_USER=root HDFS_DATANODE_SECURE_USER=hdfs
- # This function will pick up the "normal" var, switch to that user, then
- # execute the command which will then pick up the "secure" version.
- #
-
- declare program=$1
- declare command=$2
- shift 2
-
- declare uprogram
- declare ucommand
- declare uvar
- declare svar
-
- if hadoop_privilege_check; then
- uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER)
-
- svar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_USER)
-
- if [[ -n "${!uvar}" ]]; then
- hadoop_su "${!uvar}" "$@"
- elif [[ -n "${!svar}" ]]; then
- ## if we are here, then SECURE_USER with no USER defined
- ## we are already privileged, so just run the command and hope
- ## for the best
- "$@"
- else
- hadoop_error "ERROR: Attempting to operate on ${program} ${command} as root"
- hadoop_error "ERROR: but there is no ${uvar} defined. Aborting operation."
- return 1
- fi
- else
- "$@"
- fi
-}
-
-## @description Add a subcommand to the usage output
-## @audience private
-## @stability evolving
-## @replaceable no
-## @param subcommand
-## @param subcommandtype
-## @param subcommanddesc
-function hadoop_add_subcommand
-{
- declare subcmd=$1
- declare subtype=$2
- declare text=$3
-
- hadoop_debug "${subcmd} as a ${subtype}"
-
- hadoop_add_array_param HADOOP_SUBCMD_USAGE_TYPES "${subtype}"
-
- # done in this order so that sort works later
- HADOOP_SUBCMD_USAGE[${HADOOP_SUBCMD_USAGE_COUNTER}]="${subcmd}@${subtype}@${text}"
- ((HADOOP_SUBCMD_USAGE_COUNTER=HADOOP_SUBCMD_USAGE_COUNTER+1))
-}
-
-## @description Add an option to the usage output
-## @audience private
-## @stability evolving
-## @replaceable no
-## @param subcommand
-## @param subcommanddesc
-function hadoop_add_option
-{
- local option=$1
- local text=$2
-
- HADOOP_OPTION_USAGE[${HADOOP_OPTION_USAGE_COUNTER}]="${option}@${text}"
- ((HADOOP_OPTION_USAGE_COUNTER=HADOOP_OPTION_USAGE_COUNTER+1))
-}
-
-## @description Reset the usage information to blank
-## @audience private
-## @stability evolving
-## @replaceable no
-function hadoop_reset_usage
-{
- HADOOP_SUBCMD_USAGE=()
- HADOOP_OPTION_USAGE=()
- HADOOP_SUBCMD_USAGE_TYPES=()
- HADOOP_SUBCMD_USAGE_COUNTER=0
- HADOOP_OPTION_USAGE_COUNTER=0
-}
-
-## @description Print a screen-size aware two-column output
-## @description if reqtype is not null, only print those requested
-## @audience private
-## @stability evolving
-## @replaceable no
-## @param reqtype
-## @param array
-function hadoop_generic_columnprinter
-{
- declare reqtype=$1
- shift
- declare -a input=("$@")
- declare -i i=0
- declare -i counter=0
- declare line
- declare text
- declare option
- declare giventext
- declare -i maxoptsize
- declare -i foldsize
- declare -a tmpa
- declare numcols
- declare brup
-
- if [[ -n "${COLUMNS}" ]]; then
- numcols=${COLUMNS}
- else
- numcols=$(tput cols) 2>/dev/null
- COLUMNS=${numcols}
- fi
-
- if [[ -z "${numcols}"
- || ! "${numcols}" =~ ^[0-9]+$ ]]; then
- numcols=75
- else
- ((numcols=numcols-5))
- fi
-
- while read -r line; do
- tmpa[${counter}]=${line}
- ((counter=counter+1))
- IFS='@' read -ra brup <<< "${line}"
- option="${brup[0]}"
- if [[ ${#option} -gt ${maxoptsize} ]]; then
- maxoptsize=${#option}
- fi
- done < <(for text in "${input[@]}"; do
- echo "${text}"
- done | sort)
-
- i=0
- ((foldsize=numcols-maxoptsize))
-
- until [[ $i -eq ${#tmpa[@]} ]]; do
- IFS='@' read -ra brup <<< "${tmpa[$i]}"
-
- option="${brup[0]}"
- cmdtype="${brup[1]}"
- giventext="${brup[2]}"
-
- if [[ -n "${reqtype}" ]]; then
- if [[ "${cmdtype}" != "${reqtype}" ]]; then
- ((i=i+1))
- continue
- fi
- fi
-
- if [[ -z "${giventext}" ]]; then
- giventext=${cmdtype}
- fi
-
- while read -r line; do
- printf "%-${maxoptsize}s %-s\n" "${option}" "${line}"
- option=" "
- done < <(echo "${giventext}"| fold -s -w ${foldsize})
- ((i=i+1))
- done
-}
-
-## @description generate standard usage output
-## @description and optionally takes a class
-## @audience private
-## @stability evolving
-## @replaceable no
-## @param execname
-## @param true|false
-## @param [text to use in place of SUBCOMMAND]
-function hadoop_generate_usage
-{
- declare cmd=$1
- declare takesclass=$2
- declare subcmdtext=${3:-"SUBCOMMAND"}
- declare haveoptions
- declare optstring
- declare havesubs
- declare subcmdstring
- declare cmdtype
-
- cmd=${cmd##*/}
-
- if [[ -n "${HADOOP_OPTION_USAGE_COUNTER}"
- && "${HADOOP_OPTION_USAGE_COUNTER}" -gt 0 ]]; then
- haveoptions=true
- optstring=" [OPTIONS]"
- fi
-
- if [[ -n "${HADOOP_SUBCMD_USAGE_COUNTER}"
- && "${HADOOP_SUBCMD_USAGE_COUNTER}" -gt 0 ]]; then
- havesubs=true
- subcmdstring=" ${subcmdtext} [${subcmdtext} OPTIONS]"
- fi
-
- echo "Usage: ${cmd}${optstring}${subcmdstring}"
- if [[ ${takesclass} = true ]]; then
- echo " or ${cmd}${optstring} CLASSNAME [CLASSNAME OPTIONS]"
- echo " where CLASSNAME is a user-provided Java class"
- fi
-
- if [[ "${haveoptions}" = true ]]; then
- echo ""
- echo " OPTIONS is none or any of:"
- echo ""
-
- hadoop_generic_columnprinter "" "${HADOOP_OPTION_USAGE[@]}"
- fi
-
- if [[ "${havesubs}" = true ]]; then
- echo ""
- echo " ${subcmdtext} is one of:"
- echo ""
-
- if [[ "${#HADOOP_SUBCMD_USAGE_TYPES[@]}" -gt 0 ]]; then
-
- hadoop_sort_array HADOOP_SUBCMD_USAGE_TYPES
- for subtype in "${HADOOP_SUBCMD_USAGE_TYPES[@]}"; do
- #shellcheck disable=SC2086
- cmdtype="$(tr '[:lower:]' '[:upper:]' <<< ${subtype:0:1})${subtype:1}"
- printf "\n %s Commands:\n\n" "${cmdtype}"
- hadoop_generic_columnprinter "${subtype}" "${HADOOP_SUBCMD_USAGE[@]}"
- done
- else
- hadoop_generic_columnprinter "" "${HADOOP_SUBCMD_USAGE[@]}"
- fi
- echo ""
- echo "${subcmdtext} may print help when invoked w/o parameters or with -h."
- fi
-}
-
-## @description Replace `oldvar` with `newvar` if `oldvar` exists.
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param oldvar
-## @param newvar
-function hadoop_deprecate_envvar
-{
- local oldvar=$1
- local newvar=$2
- local oldval=${!oldvar}
- local newval=${!newvar}
-
- if [[ -n "${oldval}" ]]; then
- hadoop_error "WARNING: ${oldvar} has been replaced by ${newvar}. Using value of ${oldvar}."
- # shellcheck disable=SC2086
- eval ${newvar}=\"${oldval}\"
-
- # shellcheck disable=SC2086
- newval=${oldval}
-
- # shellcheck disable=SC2086
- eval ${newvar}=\"${newval}\"
- fi
-}
-
-## @description Declare `var` being used and print its value.
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param var
-function hadoop_using_envvar
-{
- local var=$1
- local val=${!var}
-
- if [[ -n "${val}" ]]; then
- hadoop_debug "${var} = ${val}"
- fi
-}
-
-## @description Create the directory 'dir'.
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param dir
-function hadoop_mkdir
-{
- local dir=$1
-
- if [[ ! -w "${dir}" ]] && [[ ! -d "${dir}" ]]; then
- hadoop_error "WARNING: ${dir} does not exist. Creating."
- if ! mkdir -p "${dir}"; then
- hadoop_error "ERROR: Unable to create ${dir}. Aborting."
- exit 1
- fi
- fi
-}
-
-## @description Bootstraps the Hadoop shell environment
-## @audience private
-## @stability evolving
-## @replaceable no
-function hadoop_bootstrap
-{
- # the root of the Hadoop installation
- # See HADOOP-6255 for the expected directory structure layout
-
- if [[ -n "${DEFAULT_LIBEXEC_DIR}" ]]; then
- hadoop_error "WARNING: DEFAULT_LIBEXEC_DIR ignored. It has been replaced by HADOOP_DEFAULT_LIBEXEC_DIR."
- fi
-
- # By now, HADOOP_LIBEXEC_DIR should have been defined upstream
- # We can piggyback off of that to figure out where the default
- # HADOOP_FREFIX should be. This allows us to run without
- # HADOOP_HOME ever being defined by a human! As a consequence
- # HADOOP_LIBEXEC_DIR now becomes perhaps the single most powerful
- # env var within Hadoop.
- if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
- hadoop_error "HADOOP_LIBEXEC_DIR is not defined. Exiting."
- exit 1
- fi
- HADOOP_DEFAULT_PREFIX=$(cd -P -- "${HADOOP_LIBEXEC_DIR}/.." >/dev/null && pwd -P)
- HADOOP_HOME=${HADOOP_HOME:-$HADOOP_DEFAULT_PREFIX}
- export HADOOP_HOME
-
- #
- # short-cuts. vendors may redefine these as well, preferably
- # in hadoop-layout.sh
- #
- HADOOP_COMMON_DIR=${HADOOP_COMMON_DIR:-"share/hadoop/common"}
- HADOOP_COMMON_LIB_JARS_DIR=${HADOOP_COMMON_LIB_JARS_DIR:-"share/hadoop/common/lib"}
- HADOOP_COMMON_LIB_NATIVE_DIR=${HADOOP_COMMON_LIB_NATIVE_DIR:-"lib/native"}
- HDFS_DIR=${HDFS_DIR:-"share/hadoop/hdfs"}
- HDFS_LIB_JARS_DIR=${HDFS_LIB_JARS_DIR:-"share/hadoop/hdfs/lib"}
- YARN_DIR=${YARN_DIR:-"share/hadoop/yarn"}
- YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
- MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
- MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
- HDDS_DIR=${HDDS_DIR:-"share/hadoop/hdds"}
- HDDS_LIB_JARS_DIR=${HDDS_LIB_JARS_DIR:-"share/hadoop/hdds/lib"}
- OZONE_DIR=${OZONE_DIR:-"share/hadoop/ozone"}
- OZONE_LIB_JARS_DIR=${OZONE_LIB_JARS_DIR:-"share/hadoop/ozone/lib"}
- OZONEFS_DIR=${OZONEFS_DIR:-"share/hadoop/ozonefs"}
-
- HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_HOME}}
- HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
- HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}
-
- # by default, whatever we are about to run doesn't support
- # daemonization
- HADOOP_SUBCMD_SUPPORTDAEMONIZATION=false
-
- # by default, we have not been self-re-execed
- HADOOP_REEXECED_CMD=false
-
- HADOOP_SUBCMD_SECURESERVICE=false
-
- # This is the default we claim in hadoop-env.sh
- JSVC_HOME=${JSVC_HOME:-"/usr/bin"}
-
- # usage output set to zero
- hadoop_reset_usage
-
- export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
-
- # defaults
- export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
- hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}"
-}
-
-## @description Locate Hadoop's configuration directory
-## @audience private
-## @stability evolving
-## @replaceable no
-function hadoop_find_confdir
-{
- local conf_dir
-
- # An attempt at compatibility with some Hadoop 1.x
- # installs.
- if [[ -e "${HADOOP_HOME}/conf/hadoop-env.sh" ]]; then
- conf_dir="conf"
- else
- conf_dir="etc/hadoop"
- fi
- export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_HOME}/${conf_dir}}"
-
- hadoop_debug "HADOOP_CONF_DIR=${HADOOP_CONF_DIR}"
-}
-
-## @description Validate ${HADOOP_CONF_DIR}
-## @audience public
-## @stability stable
-## @replaceable yes
-## @return will exit on failure conditions
-function hadoop_verify_confdir
-{
- # Check only log4j.properties by default.
- # --loglevel does not work without logger settings in log4j.log4j.properties.
- if [[ ! -f "${HADOOP_CONF_DIR}/log4j.properties" ]]; then
- hadoop_error "WARNING: log4j.properties is not found. HADOOP_CONF_DIR may be incomplete."
- fi
-}
-
-## @description Import the hadoop-env.sh settings
-## @audience private
-## @stability evolving
-## @replaceable no
-function hadoop_exec_hadoopenv
-{
- if [[ -z "${HADOOP_ENV_PROCESSED}" ]]; then
- if [[ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]]; then
- export HADOOP_ENV_PROCESSED=true
- # shellcheck source=./hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
- . "${HADOOP_CONF_DIR}/hadoop-env.sh"
- fi
- fi
-}
-
-## @description Import the replaced functions
-## @audience private
-## @stability evolving
-## @replaceable no
-function hadoop_exec_userfuncs
-{
- if [[ -e "${HADOOP_CONF_DIR}/hadoop-user-functions.sh" ]]; then
- # shellcheck disable=SC1090
- . "${HADOOP_CONF_DIR}/hadoop-user-functions.sh"
- fi
-}
-
-## @description Read the user's settings. This provides for users to
-## @description override and/or append hadoop-env.sh. It is not meant
-## @description as a complete system override.
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_exec_user_hadoopenv
-{
- if [[ -f "${HOME}/.hadoop-env" ]]; then
- hadoop_debug "Applying the user's .hadoop-env"
- # shellcheck disable=SC1090
- . "${HOME}/.hadoop-env"
- fi
-}
-
-## @description Read the user's settings. This provides for users to
-## @description run Hadoop Shell API after system bootstrap
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_exec_hadooprc
-{
- if [[ -f "${HOME}/.hadooprc" ]]; then
- hadoop_debug "Applying the user's .hadooprc"
- # shellcheck disable=SC1090
- . "${HOME}/.hadooprc"
- fi
-}
-
-## @description Import shellprofile.d content
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_import_shellprofiles
-{
- local i
- local files1
- local files2
-
- if [[ -d "${HADOOP_LIBEXEC_DIR}/shellprofile.d" ]]; then
- files1=(${HADOOP_LIBEXEC_DIR}/shellprofile.d/*.sh)
- hadoop_debug "shellprofiles: ${files1[*]}"
- else
- hadoop_error "WARNING: ${HADOOP_LIBEXEC_DIR}/shellprofile.d doesn't exist. Functionality may not work."
- fi
-
- if [[ -d "${HADOOP_CONF_DIR}/shellprofile.d" ]]; then
- files2=(${HADOOP_CONF_DIR}/shellprofile.d/*.sh)
- fi
-
- # enable bundled shellprofiles that come
- # from hadoop-tools. This converts the user-facing HADOOP_OPTIONAL_TOOLS
- # to the HADOOP_TOOLS_OPTIONS that the shell profiles expect.
- # See dist-tools-hooks-maker for how the example HADOOP_OPTIONAL_TOOLS
- # gets populated into hadoop-env.sh
-
- for i in ${HADOOP_OPTIONAL_TOOLS//,/ }; do
- hadoop_add_entry HADOOP_TOOLS_OPTIONS "${i}"
- done
-
- for i in "${files1[@]}" "${files2[@]}"
- do
- if [[ -n "${i}"
- && -f "${i}" ]]; then
- hadoop_debug "Profiles: importing ${i}"
- # shellcheck disable=SC1090
- . "${i}"
- fi
- done
-}
-
-## @description Initialize the registered shell profiles
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_shellprofiles_init
-{
- local i
-
- for i in ${HADOOP_SHELL_PROFILES}
- do
- if declare -F _${i}_hadoop_init >/dev/null ; then
- hadoop_debug "Profiles: ${i} init"
- # shellcheck disable=SC2086
- _${i}_hadoop_init
- fi
- done
-}
-
-## @description Apply the shell profile classpath additions
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_shellprofiles_classpath
-{
- local i
-
- for i in ${HADOOP_SHELL_PROFILES}
- do
- if declare -F _${i}_hadoop_classpath >/dev/null ; then
- hadoop_debug "Profiles: ${i} classpath"
- # shellcheck disable=SC2086
- _${i}_hadoop_classpath
- fi
- done
-}
-
-## @description Apply the shell profile native library additions
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_shellprofiles_nativelib
-{
- local i
-
- for i in ${HADOOP_SHELL_PROFILES}
- do
- if declare -F _${i}_hadoop_nativelib >/dev/null ; then
- hadoop_debug "Profiles: ${i} nativelib"
- # shellcheck disable=SC2086
- _${i}_hadoop_nativelib
- fi
- done
-}
-
-## @description Apply the shell profile final configuration
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_shellprofiles_finalize
-{
- local i
-
- for i in ${HADOOP_SHELL_PROFILES}
- do
- if declare -F _${i}_hadoop_finalize >/dev/null ; then
- hadoop_debug "Profiles: ${i} finalize"
- # shellcheck disable=SC2086
- _${i}_hadoop_finalize
- fi
- done
-}
-
-## @description Initialize the Hadoop shell environment, now that
-## @description user settings have been imported
-## @audience private
-## @stability evolving
-## @replaceable no
-function hadoop_basic_init
-{
- # Some of these are also set in hadoop-env.sh.
- # we still set them here just in case hadoop-env.sh is
- # broken in some way, set up defaults, etc.
- #
- # but it is important to note that if you update these
- # you also need to update hadoop-env.sh as well!!!
-
- CLASSPATH=""
- hadoop_debug "Initialize CLASSPATH"
-
- if [[ -z "${HADOOP_COMMON_HOME}" ]] &&
- [[ -d "${HADOOP_HOME}/${HADOOP_COMMON_DIR}" ]]; then
- export HADOOP_COMMON_HOME="${HADOOP_HOME}"
- fi
-
- # default policy file for service-level authorization
- HADOOP_POLICYFILE=${HADOOP_POLICYFILE:-"hadoop-policy.xml"}
-
- # define HADOOP_HDFS_HOME
- if [[ -z "${HADOOP_HDFS_HOME}" ]] &&
- [[ -d "${HADOOP_HOME}/${HDFS_DIR}" ]]; then
- export HADOOP_HDFS_HOME="${HADOOP_HOME}"
- fi
-
- # define HADOOP_YARN_HOME
- if [[ -z "${HADOOP_YARN_HOME}" ]] &&
- [[ -d "${HADOOP_HOME}/${YARN_DIR}" ]]; then
- export HADOOP_YARN_HOME="${HADOOP_HOME}"
- fi
-
- # define HADOOP_MAPRED_HOME
- if [[ -z "${HADOOP_MAPRED_HOME}" ]] &&
- [[ -d "${HADOOP_HOME}/${MAPRED_DIR}" ]]; then
- export HADOOP_MAPRED_HOME="${HADOOP_HOME}"
- fi
-
- if [[ ! -d "${HADOOP_COMMON_HOME}" ]]; then
- hadoop_error "ERROR: Invalid HADOOP_COMMON_HOME"
- exit 1
- fi
-
- if [[ ! -d "${HADOOP_HDFS_HOME}" ]]; then
- hadoop_error "ERROR: Invalid HADOOP_HDFS_HOME"
- exit 1
- fi
-
- if [[ ! -d "${HADOOP_YARN_HOME}" ]]; then
- hadoop_error "ERROR: Invalid HADOOP_YARN_HOME"
- exit 1
- fi
-
- if [[ ! -d "${HADOOP_MAPRED_HOME}" ]]; then
- hadoop_error "ERROR: Invalid HADOOP_MAPRED_HOME"
- exit 1
- fi
-
- # if for some reason the shell doesn't have $USER defined
- # (e.g., ssh'd in to execute a command)
- # let's get the effective username and use that
- USER=${USER:-$(id -nu)}
- HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
- HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_HOME}/logs"}
- HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
- HADOOP_LOGLEVEL=${HADOOP_LOGLEVEL:-INFO}
- HADOOP_NICENESS=${HADOOP_NICENESS:-0}
- HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}
- HADOOP_PID_DIR=${HADOOP_PID_DIR:-/tmp}
- HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-${HADOOP_LOGLEVEL},console}
- HADOOP_DAEMON_ROOT_LOGGER=${HADOOP_DAEMON_ROOT_LOGGER:-${HADOOP_LOGLEVEL},RFA}
- HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}
- HADOOP_SSH_OPTS=${HADOOP_SSH_OPTS-"-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"}
- HADOOP_SECURE_LOG_DIR=${HADOOP_SECURE_LOG_DIR:-${HADOOP_LOG_DIR}}
- HADOOP_SECURE_PID_DIR=${HADOOP_SECURE_PID_DIR:-${HADOOP_PID_DIR}}
- HADOOP_SSH_PARALLEL=${HADOOP_SSH_PARALLEL:-10}
-}
-
-## @description Set the worker support information to the contents
-## @description of `filename`
-## @audience public
-## @stability stable
-## @replaceable no
-## @param filename
-## @return will exit if file does not exist
-function hadoop_populate_workers_file
-{
- local workersfile=$1
- shift
- if [[ -f "${workersfile}" ]]; then
- HADOOP_WORKERS="${workersfile}"
- elif [[ -f "${HADOOP_CONF_DIR}/${workersfile}" ]]; then
- HADOOP_WORKERS="${HADOOP_CONF_DIR}/${workersfile}"
- else
- hadoop_error "ERROR: Cannot find hosts file \"${workersfile}\""
- hadoop_exit_with_usage 1
- fi
-}
-
-## @description Rotates the given `file` until `number` of
-## @description files exist.
-## @audience public
-## @stability stable
-## @replaceable no
-## @param filename
-## @param [number]
-## @return $? will contain last mv's return value
-function hadoop_rotate_log
-{
- #
- # Users are likely to replace this one for something
- # that gzips or uses dates or who knows what.
- #
- # be aware that &1 and &2 might go through here
- # so don't do anything too crazy...
- #
- local log=$1;
- local num=${2:-5};
-
- if [[ -f "${log}" ]]; then # rotate logs
- while [[ ${num} -gt 1 ]]; do
- #shellcheck disable=SC2086
- let prev=${num}-1
- if [[ -f "${log}.${prev}" ]]; then
- mv "${log}.${prev}" "${log}.${num}"
- fi
- num=${prev}
- done
- mv "${log}" "${log}.${num}"
- fi
-}
-
-## @description Via ssh, log into `hostname` and run `command`
-## @audience private
-## @stability evolving
-## @replaceable yes
-## @param hostname
-## @param command
-## @param [...]
-function hadoop_actual_ssh
-{
- # we are passing this function to xargs
- # should get hostname followed by rest of command line
- local worker=$1
- shift
-
- # shellcheck disable=SC2086
- ssh ${HADOOP_SSH_OPTS} ${worker} $"${@// /\\ }" 2>&1 | sed "s/^/$worker: /"
-}
-
-## @description Connect to ${HADOOP_WORKERS} or ${HADOOP_WORKER_NAMES}
-## @description and execute command.
-## @audience private
-## @stability evolving
-## @replaceable yes
-## @param command
-## @param [...]
-function hadoop_connect_to_hosts
-{
- # shellcheck disable=SC2124
- local params="$@"
- local worker_file
- local tmpslvnames
-
- #
- # ssh (or whatever) to a host
- #
- # User can specify hostnames or a file where the hostnames are (not both)
- if [[ -n "${HADOOP_WORKERS}" && -n "${HADOOP_WORKER_NAMES}" ]] ; then
- hadoop_error "ERROR: Both HADOOP_WORKERS and HADOOP_WORKER_NAMES were defined. Aborting."
- exit 1
- elif [[ -z "${HADOOP_WORKER_NAMES}" ]]; then
- if [[ -n "${HADOOP_WORKERS}" ]]; then
- worker_file=${HADOOP_WORKERS}
- elif [[ -f "${HADOOP_CONF_DIR}/workers" ]]; then
- worker_file=${HADOOP_CONF_DIR}/workers
- elif [[ -f "${HADOOP_CONF_DIR}/slaves" ]]; then
- hadoop_error "WARNING: 'slaves' file has been deprecated. Please use 'workers' file instead."
- worker_file=${HADOOP_CONF_DIR}/slaves
- fi
- fi
-
- # if pdsh is available, let's use it. otherwise default
- # to a loop around ssh. (ugh)
- if [[ -e '/usr/bin/pdsh' ]]; then
- if [[ -z "${HADOOP_WORKER_NAMES}" ]] ; then
- # if we were given a file, just let pdsh deal with it.
- # shellcheck disable=SC2086
- PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \
- -f "${HADOOP_SSH_PARALLEL}" -w ^"${worker_file}" $"${@// /\\ }" 2>&1
- else
- # no spaces allowed in the pdsh arg host list
- # shellcheck disable=SC2086
- tmpslvnames=$(echo ${HADOOP_WORKER_NAMES} | tr -s ' ' ,)
- PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \
- -f "${HADOOP_SSH_PARALLEL}" \
- -w "${tmpslvnames}" $"${@// /\\ }" 2>&1
- fi
- else
- if [[ -z "${HADOOP_WORKER_NAMES}" ]]; then
- HADOOP_WORKER_NAMES=$(sed 's/#.*$//;/^$/d' "${worker_file}")
- fi
- hadoop_connect_to_hosts_without_pdsh "${params}"
- fi
-}
-
-## @description Connect to ${HADOOP_WORKER_NAMES} and execute command
-## @description under the environment which does not support pdsh.
-## @audience private
-## @stability evolving
-## @replaceable yes
-## @param command
-## @param [...]
-function hadoop_connect_to_hosts_without_pdsh
-{
- # shellcheck disable=SC2124
- local params="$@"
- local workers=(${HADOOP_WORKER_NAMES})
- for (( i = 0; i < ${#workers[@]}; i++ ))
- do
- if (( i != 0 && i % HADOOP_SSH_PARALLEL == 0 )); then
- wait
- fi
- # shellcheck disable=SC2086
- hadoop_actual_ssh "${workers[$i]}" ${params} &
- done
- wait
-}
-
-## @description Utility routine to handle --workers mode
-## @audience private
-## @stability evolving
-## @replaceable yes
-## @param commandarray
-function hadoop_common_worker_mode_execute
-{
- #
- # input should be the command line as given by the user
- # in the form of an array
- #
- local argv=("$@")
-
- # if --workers is still on the command line, remove it
- # to prevent loops
- # Also remove --hostnames and --hosts along with arg values
- local argsSize=${#argv[@]};
- for (( i = 0; i < argsSize; i++ ))
- do
- if [[ "${argv[$i]}" =~ ^--workers$ ]]; then
- unset argv[$i]
- elif [[ "${argv[$i]}" =~ ^--hostnames$ ]] ||
- [[ "${argv[$i]}" =~ ^--hosts$ ]]; then
- unset argv[$i];
- let i++;
- unset argv[$i];
- fi
- done
- if [[ ${QATESTMODE} = true ]]; then
- echo "${argv[@]}"
- return
- fi
- hadoop_connect_to_hosts -- "${argv[@]}"
-}
-
-## @description Verify that a shell command was passed a valid
-## @description class name
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param classname
-## @return 0 = success
-## @return 1 = failure w/user message
-function hadoop_validate_classname
-{
- local class=$1
- shift 1
-
- if [[ ! ${class} =~ \. ]]; then
- # assuming the arg is typo of command if it does not conatain ".".
- # class belonging to no package is not allowed as a result.
- hadoop_error "ERROR: ${class} is not COMMAND nor fully qualified CLASSNAME."
- return 1
- fi
- return 0
-}
-
-## @description Append the `appendstring` if `checkstring` is not
-## @description present in the given `envvar`
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param envvar
-## @param checkstring
-## @param appendstring
-function hadoop_add_param
-{
- #
- # general param dedupe..
- # $1 is what we are adding to
- # $2 is the name of what we want to add (key)
- # $3 is the key+value of what we're adding
- #
- # doing it this way allows us to support all sorts of
- # different syntaxes, just so long as they are space
- # delimited
- #
- if [[ ! ${!1} =~ $2 ]] ; then
- #shellcheck disable=SC2140
- eval "$1"="'${!1} $3'"
- if [[ ${!1:0:1} = ' ' ]]; then
- #shellcheck disable=SC2140
- eval "$1"="'${!1# }'"
- fi
- hadoop_debug "$1 accepted $3"
- else
- hadoop_debug "$1 declined $3"
- fi
-}
-
-## @description Register the given `shellprofile` to the Hadoop
-## @description shell subsystem
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param shellprofile
-function hadoop_add_profile
-{
- # shellcheck disable=SC2086
- hadoop_add_param HADOOP_SHELL_PROFILES $1 $1
-}
-
-## @description Add a file system object (directory, file,
-## @description wildcard, ...) to the classpath. Optionally provide
-## @description a hint as to where in the classpath it should go.
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param object
-## @param [before|after]
-## @return 0 = success (added or duplicate)
-## @return 1 = failure (doesn't exist or some other reason)
-function hadoop_add_classpath
-{
- # However, with classpath (& JLP), we can do dedupe
- # along with some sanity checking (e.g., missing directories)
- # since we have a better idea of what is legal
- #
- # for wildcard at end, we can
- # at least check the dir exists
- if [[ $1 =~ ^.*\*$ ]]; then
- local mp
- mp=$(dirname "$1")
- if [[ ! -d "${mp}" ]]; then
- hadoop_debug "Rejected CLASSPATH: $1 (not a dir)"
- return 1
- fi
-
- # no wildcard in the middle, so check existence
- # (doesn't matter *what* it is)
- elif [[ ! $1 =~ ^.*\*.*$ ]] && [[ ! -e "$1" ]]; then
- hadoop_debug "Rejected CLASSPATH: $1 (does not exist)"
- return 1
- fi
- if [[ -z "${CLASSPATH}" ]]; then
- CLASSPATH=$1
- hadoop_debug "Initial CLASSPATH=$1"
- elif [[ ":${CLASSPATH}:" != *":$1:"* ]]; then
- if [[ "$2" = "before" ]]; then
- CLASSPATH="$1:${CLASSPATH}"
- hadoop_debug "Prepend CLASSPATH: $1"
- else
- CLASSPATH+=:$1
- hadoop_debug "Append CLASSPATH: $1"
- fi
- else
- hadoop_debug "Dupe CLASSPATH: $1"
- fi
- return 0
-}
-
-## @description Add a file system object (directory, file,
-## @description wildcard, ...) to the colonpath. Optionally provide
-## @description a hint as to where in the colonpath it should go.
-## @description Prior to adding, objects are checked for duplication
-## @description and check for existence. Many other functions use
-## @description this function as their base implementation
-## @description including `hadoop_add_javalibpath` and `hadoop_add_ldlibpath`.
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param envvar
-## @param object
-## @param [before|after]
-## @return 0 = success (added or duplicate)
-## @return 1 = failure (doesn't exist or some other reason)
-function hadoop_add_colonpath
-{
- # this is CLASSPATH, JLP, etc but with dedupe but no
- # other checking
- if [[ -d "${2}" ]] && [[ ":${!1}:" != *":$2:"* ]]; then
- if [[ -z "${!1}" ]]; then
- # shellcheck disable=SC2086
- eval $1="'$2'"
- hadoop_debug "Initial colonpath($1): $2"
- elif [[ "$3" = "before" ]]; then
- # shellcheck disable=SC2086
- eval $1="'$2:${!1}'"
- hadoop_debug "Prepend colonpath($1): $2"
- else
- # shellcheck disable=SC2086
- eval $1+=":'$2'"
- hadoop_debug "Append colonpath($1): $2"
- fi
- return 0
- fi
- hadoop_debug "Rejected colonpath($1): $2"
- return 1
-}
-
-## @description Add a file system object (directory, file,
-## @description wildcard, ...) to the Java JNI path. Optionally
-## @description provide a hint as to where in the Java JNI path
-## @description it should go.
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param object
-## @param [before|after]
-## @return 0 = success (added or duplicate)
-## @return 1 = failure (doesn't exist or some other reason)
-function hadoop_add_javalibpath
-{
- # specialized function for a common use case
- hadoop_add_colonpath JAVA_LIBRARY_PATH "$1" "$2"
-}
-
-## @description Add a file system object (directory, file,
-## @description wildcard, ...) to the LD_LIBRARY_PATH. Optionally
-## @description provide a hint as to where in the LD_LIBRARY_PATH
-## @description it should go.
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param object
-## @param [before|after]
-## @return 0 = success (added or duplicate)
-## @return 1 = failure (doesn't exist or some other reason)
-function hadoop_add_ldlibpath
-{
- local status
- # specialized function for a common use case
- hadoop_add_colonpath LD_LIBRARY_PATH "$1" "$2"
- status=$?
-
- # note that we export this
- export LD_LIBRARY_PATH
- return ${status}
-}
-
-## @description Add the common/core Hadoop components to the
-## @description environment
-## @audience private
-## @stability evolving
-## @replaceable yes
-## @returns 1 on failure, may exit
-## @returns 0 on success
-function hadoop_add_common_to_classpath
-{
- #
- # get all of the common jars+config in the path
- #
-
- if [[ -z "${HADOOP_COMMON_HOME}"
- || -z "${HADOOP_COMMON_DIR}"
- || -z "${HADOOP_COMMON_LIB_JARS_DIR}" ]]; then
- hadoop_debug "COMMON_HOME=${HADOOP_COMMON_HOME}"
- hadoop_debug "COMMON_DIR=${HADOOP_COMMON_DIR}"
- hadoop_debug "COMMON_LIB_JARS_DIR=${HADOOP_COMMON_LIB_JARS_DIR}"
- hadoop_error "ERROR: HADOOP_COMMON_HOME or related vars are not configured."
- exit 1
- fi
-
- # developers
- if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
- hadoop_add_classpath "${HADOOP_COMMON_HOME}/hadoop-common/target/classes"
- fi
-
- hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"'/*'
- hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"'/*'
-}
-
-## @description Run libexec/tools/module.sh to add to the classpath
-## @description environment
-## @audience private
-## @stability evolving
-## @replaceable yes
-## @param module
-function hadoop_add_to_classpath_tools
-{
- declare module=$1
-
- if [[ -f "${HADOOP_LIBEXEC_DIR}/tools/${module}.sh" ]]; then
- # shellcheck disable=SC1090
- . "${HADOOP_LIBEXEC_DIR}/tools/${module}.sh"
- else
- hadoop_error "ERROR: Tools helper ${HADOOP_LIBEXEC_DIR}/tools/${module}.sh was not found."
- fi
-
- if declare -f hadoop_classpath_tools_${module} >/dev/null 2>&1; then
- "hadoop_classpath_tools_${module}"
- fi
-}
-
-## @description Add the user's custom classpath settings to the
-## @description environment
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_add_to_classpath_userpath
-{
- # Add the user-specified HADOOP_CLASSPATH to the
- # official CLASSPATH env var if HADOOP_USE_CLIENT_CLASSLOADER
- # is not set.
- # Add it first or last depending on if user has
- # set env-var HADOOP_USER_CLASSPATH_FIRST
- # we'll also dedupe it, because we're cool like that.
- #
- declare -a array
- declare -i c=0
- declare -i j
- declare -i i
- declare idx
-
- if [[ -n "${HADOOP_CLASSPATH}" ]]; then
- # I wonder if Java runs on VMS.
- for idx in $(echo "${HADOOP_CLASSPATH}" | tr : '\n'); do
- array[${c}]=${idx}
- ((c=c+1))
- done
-
- # bats gets confused by j getting set to 0
- ((j=c-1)) || ${QATESTMODE}
-
- if [[ -z "${HADOOP_USE_CLIENT_CLASSLOADER}" ]]; then
- if [[ -z "${HADOOP_USER_CLASSPATH_FIRST}" ]]; then
- for ((i=0; i<=j; i++)); do
- hadoop_add_classpath "${array[$i]}" after
- done
- else
- for ((i=j; i>=0; i--)); do
- hadoop_add_classpath "${array[$i]}" before
- done
- fi
- fi
- fi
-}
-
-## @description Routine to configure any OS-specific settings.
-## @audience public
-## @stability stable
-## @replaceable yes
-## @return may exit on failure conditions
-function hadoop_os_tricks
-{
- local bindv6only
-
- HADOOP_IS_CYGWIN=false
- case ${HADOOP_OS_TYPE} in
- Darwin)
- if [[ -z "${JAVA_HOME}" ]]; then
- if [[ -x /usr/libexec/java_home ]]; then
- JAVA_HOME="$(/usr/libexec/java_home)"
- export JAVA_HOME
- else
- JAVA_HOME=/Library/Java/Home
- export JAVA_HOME
- fi
- fi
- ;;
- Linux)
-
- # Newer versions of glibc use an arena memory allocator that
- # causes virtual # memory usage to explode. This interacts badly
- # with the many threads that we use in Hadoop. Tune the variable
- # down to prevent vmem explosion.
- export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
- # we put this in QA test mode off so that non-Linux can test
- if [[ "${QATESTMODE}" = true ]]; then
- return
- fi
-
- # NOTE! HADOOP_ALLOW_IPV6 is a developer hook. We leave it
- # undocumented in hadoop-env.sh because we don't want users to
- # shoot themselves in the foot while devs make IPv6 work.
-
- bindv6only=$(/sbin/sysctl -n net.ipv6.bindv6only 2> /dev/null)
-
- if [[ -n "${bindv6only}" ]] &&
- [[ "${bindv6only}" -eq "1" ]] &&
- [[ "${HADOOP_ALLOW_IPV6}" != "yes" ]]; then
- hadoop_error "ERROR: \"net.ipv6.bindv6only\" is set to 1 "
- hadoop_error "ERROR: Hadoop networking could be broken. Aborting."
- hadoop_error "ERROR: For more info: http://wiki.apache.org/hadoop/HadoopIPv6"
- exit 1
- fi
- ;;
- CYGWIN*)
- # Flag that we're running on Cygwin to trigger path translation later.
- HADOOP_IS_CYGWIN=true
- ;;
- esac
-}
-
-## @description Configure/verify ${JAVA_HOME}
-## @audience public
-## @stability stable
-## @replaceable yes
-## @return may exit on failure conditions
-function hadoop_java_setup
-{
- # Bail if we did not detect it
- if [[ -z "${JAVA_HOME}" ]]; then
- hadoop_error "ERROR: JAVA_HOME is not set and could not be found."
- exit 1
- fi
-
- if [[ ! -d "${JAVA_HOME}" ]]; then
- hadoop_error "ERROR: JAVA_HOME ${JAVA_HOME} does not exist."
- exit 1
- fi
-
- JAVA="${JAVA_HOME}/bin/java"
-
- if [[ ! -x "$JAVA" ]]; then
- hadoop_error "ERROR: $JAVA is not executable."
- exit 1
- fi
-}
-
-## @description Finish Java JNI paths prior to execution
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_finalize_libpaths
-{
- if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
- hadoop_translate_cygwin_path JAVA_LIBRARY_PATH
- hadoop_add_param HADOOP_OPTS java.library.path \
- "-Djava.library.path=${JAVA_LIBRARY_PATH}"
- export LD_LIBRARY_PATH
- fi
-}
-
-## @description Finish Java heap parameters prior to execution
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_finalize_hadoop_heap
-{
- if [[ -n "${HADOOP_HEAPSIZE_MAX}" ]]; then
- if [[ "${HADOOP_HEAPSIZE_MAX}" =~ ^[0-9]+$ ]]; then
- HADOOP_HEAPSIZE_MAX="${HADOOP_HEAPSIZE_MAX}m"
- fi
- hadoop_add_param HADOOP_OPTS Xmx "-Xmx${HADOOP_HEAPSIZE_MAX}"
- fi
-
- # backwards compatibility
- if [[ -n "${HADOOP_HEAPSIZE}" ]]; then
- if [[ "${HADOOP_HEAPSIZE}" =~ ^[0-9]+$ ]]; then
- HADOOP_HEAPSIZE="${HADOOP_HEAPSIZE}m"
- fi
- hadoop_add_param HADOOP_OPTS Xmx "-Xmx${HADOOP_HEAPSIZE}"
- fi
-
- if [[ -n "${HADOOP_HEAPSIZE_MIN}" ]]; then
- if [[ "${HADOOP_HEAPSIZE_MIN}" =~ ^[0-9]+$ ]]; then
- HADOOP_HEAPSIZE_MIN="${HADOOP_HEAPSIZE_MIN}m"
- fi
- hadoop_add_param HADOOP_OPTS Xms "-Xms${HADOOP_HEAPSIZE_MIN}"
- fi
-}
-
-## @description Converts the contents of the variable name
-## @description `varnameref` into the equivalent Windows path.
-## @description If the second parameter is true, then `varnameref`
-## @description is treated as though it was a path list.
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param varnameref
-## @param [true]
-function hadoop_translate_cygwin_path
-{
- if [[ "${HADOOP_IS_CYGWIN}" = "true" ]]; then
- if [[ "$2" = "true" ]]; then
- #shellcheck disable=SC2016
- eval "$1"='$(cygpath -p -w "${!1}" 2>/dev/null)'
- else
- #shellcheck disable=SC2016
- eval "$1"='$(cygpath -w "${!1}" 2>/dev/null)'
- fi
- fi
-}
-
-## @description Adds the HADOOP_CLIENT_OPTS variable to
-## @description HADOOP_OPTS if HADOOP_SUBCMD_SUPPORTDAEMONIZATION is false
-## @audience public
-## @stability stable
-## @replaceable yes
-function hadoop_add_client_opts
-{
- if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = false
- || -z "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" ]]; then
- hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
- HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
- fi
-}
-
-## @description Finish configuring Hadoop specific system properties
-## @description prior to executing Java
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_finalize_hadoop_opts
-{
- hadoop_translate_cygwin_path HADOOP_LOG_DIR
- hadoop_add_param HADOOP_OPTS hadoop.log.dir "-Dhadoop.log.dir=${HADOOP_LOG_DIR}"
- hadoop_add_param HADOOP_OPTS hadoop.log.file "-Dhadoop.log.file=${HADOOP_LOGFILE}"
- hadoop_translate_cygwin_path HADOOP_HOME
- export HADOOP_HOME
- hadoop_add_param HADOOP_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_HOME}"
- hadoop_add_param HADOOP_OPTS hadoop.id.str "-Dhadoop.id.str=${HADOOP_IDENT_STRING}"
- hadoop_add_param HADOOP_OPTS hadoop.root.logger "-Dhadoop.root.logger=${HADOOP_ROOT_LOGGER}"
- hadoop_add_param HADOOP_OPTS hadoop.policy.file "-Dhadoop.policy.file=${HADOOP_POLICYFILE}"
- hadoop_add_param HADOOP_OPTS hadoop.security.logger "-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER}"
-}
-
-## @description Finish Java classpath prior to execution
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_finalize_classpath
-{
- hadoop_add_classpath "${HADOOP_CONF_DIR}" before
-
- # user classpath gets added at the last minute. this allows
- # override of CONF dirs and more
- hadoop_add_to_classpath_userpath
- hadoop_translate_cygwin_path CLASSPATH true
-}
-
-## @description Finish all the remaining environment settings prior
-## @description to executing Java. This is a wrapper that calls
-## @description the other `finalize` routines.
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_finalize
-{
- hadoop_shellprofiles_finalize
-
- hadoop_finalize_classpath
- hadoop_finalize_libpaths
- hadoop_finalize_hadoop_heap
- hadoop_finalize_hadoop_opts
-
- hadoop_translate_cygwin_path HADOOP_HOME
- hadoop_translate_cygwin_path HADOOP_CONF_DIR
- hadoop_translate_cygwin_path HADOOP_COMMON_HOME
- hadoop_translate_cygwin_path HADOOP_HDFS_HOME
- hadoop_translate_cygwin_path HADOOP_YARN_HOME
- hadoop_translate_cygwin_path HADOOP_MAPRED_HOME
-}
-
-## @description Print usage information and exit with the passed
-## @description `exitcode`
-## @audience public
-## @stability stable
-## @replaceable no
-## @param exitcode
-## @return This function will always exit.
-function hadoop_exit_with_usage
-{
- local exitcode=$1
- if [[ -z $exitcode ]]; then
- exitcode=1
- fi
- # shellcheck disable=SC2034
- if declare -F hadoop_usage >/dev/null ; then
- hadoop_usage
- elif [[ -x /usr/bin/cowsay ]]; then
- /usr/bin/cowsay -f elephant "Sorry, no help available."
- else
- hadoop_error "Sorry, no help available."
- fi
- exit $exitcode
-}
-
-## @description Verify that prerequisites have been met prior to
-## @description excuting a privileged program.
-## @audience private
-## @stability evolving
-## @replaceable yes
-## @return This routine may exit.
-function hadoop_verify_secure_prereq
-{
- # if you are on an OS like Illumos that has functional roles
- # and you are using pfexec, you'll probably want to change
- # this.
-
- if ! hadoop_privilege_check && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
- hadoop_error "ERROR: You must be a privileged user in order to run a secure service."
- exit 1
- else
- return 0
- fi
-}
-
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_setup_secure_service
-{
- # need a more complicated setup? replace me!
-
- HADOOP_PID_DIR=${HADOOP_SECURE_PID_DIR}
- HADOOP_LOG_DIR=${HADOOP_SECURE_LOG_DIR}
-}
-
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_verify_piddir
-{
- if [[ -z "${HADOOP_PID_DIR}" ]]; then
- hadoop_error "No pid directory defined."
- exit 1
- fi
- hadoop_mkdir "${HADOOP_PID_DIR}"
- touch "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1
- if [[ $? -gt 0 ]]; then
- hadoop_error "ERROR: Unable to write in ${HADOOP_PID_DIR}. Aborting."
- exit 1
- fi
- rm "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1
-}
-
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_verify_logdir
-{
- if [[ -z "${HADOOP_LOG_DIR}" ]]; then
- hadoop_error "No log directory defined."
- exit 1
- fi
- hadoop_mkdir "${HADOOP_LOG_DIR}"
- touch "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
- if [[ $? -gt 0 ]]; then
- hadoop_error "ERROR: Unable to write in ${HADOOP_LOG_DIR}. Aborting."
- exit 1
- fi
- rm "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
-}
-
-## @description Determine the status of the daemon referenced
-## @description by `pidfile`
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param pidfile
-## @return (mostly) LSB 4.1.0 compatible status
-function hadoop_status_daemon
-{
- #
- # LSB 4.1.0 compatible status command (1)
- #
- # 0 = program is running
- # 1 = dead, but still a pid (2)
- # 2 = (not used by us)
- # 3 = not running
- #
- # 1 - this is not an endorsement of the LSB
- #
- # 2 - technically, the specification says /var/run/pid, so
- # we should never return this value, but we're giving
- # them the benefit of a doubt and returning 1 even if
- # our pid is not in in /var/run .
- #
-
- local pidfile=$1
- shift
-
- local pid
- local pspid
-
- if [[ -f "${pidfile}" ]]; then
- pid=$(cat "${pidfile}")
- if pspid=$(ps -o args= -p"${pid}" 2>/dev/null); then
- # this is to check that the running process we found is actually the same
- # daemon that we're interested in
- if [[ ${pspid} =~ -Dproc_${daemonname} ]]; then
- return 0
- fi
- fi
- return 1
- fi
- return 3
-}
-
-## @description Execute the Java `class`, passing along any `options`.
-## @description Additionally, set the Java property -Dproc_`command`.
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param command
-## @param class
-## @param [options]
-function hadoop_java_exec
-{
- # run a java command. this is used for
- # non-daemons
-
- local command=$1
- local class=$2
- shift 2
-
- hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
- hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
- hadoop_debug "Final JAVA_HOME: ${JAVA_HOME}"
- hadoop_debug "java: ${JAVA}"
- hadoop_debug "Class name: ${class}"
- hadoop_debug "Command line options: $*"
-
- export CLASSPATH
- #shellcheck disable=SC2086
- exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
-}
-
-## @description Start a non-privileged daemon in the foreground.
-## @audience private
-## @stability evolving
-## @replaceable yes
-## @param command
-## @param class
-## @param pidfile
-## @param [options]
-function hadoop_start_daemon
-{
- # this is our non-privileged daemon starter
- # that fires up a daemon in the *foreground*
- # so complex! so wow! much java!
- local command=$1
- local class=$2
- local pidfile=$3
- shift 3
-
- hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
- hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
- hadoop_debug "Final JAVA_HOME: ${JAVA_HOME}"
- hadoop_debug "java: ${JAVA}"
- hadoop_debug "Class name: ${class}"
- hadoop_debug "Command line options: $*"
-
- # this is for the non-daemon pid creation
- #shellcheck disable=SC2086
- echo $$ > "${pidfile}" 2>/dev/null
- if [[ $? -gt 0 ]]; then
- hadoop_error "ERROR: Cannot write ${command} pid ${pidfile}."
- fi
-
- export CLASSPATH
- #shellcheck disable=SC2086
- exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
-}
-
-## @description Start a non-privileged daemon in the background.
-## @audience private
-## @stability evolving
-## @replaceable yes
-## @param command
-## @param class
-## @param pidfile
-## @param outfile
-## @param [options]
-function hadoop_start_daemon_wrapper
-{
- local daemonname=$1
- local class=$2
- local pidfile=$3
- local outfile=$4
- shift 4
-
- local counter
-
- hadoop_rotate_log "${outfile}"
-
- hadoop_start_daemon "${daemonname}" \
- "$class" \
- "${pidfile}" \
- "$@" >> "${outfile}" 2>&1 < /dev/null &
-
- # we need to avoid a race condition here
- # so let's wait for the fork to finish
- # before overriding with the daemonized pid
- (( counter=0 ))
- while [[ ! -f ${pidfile} && ${counter} -le 5 ]]; do
- sleep 1
- (( counter++ ))
- done
-
- # this is for daemon pid creation
- #shellcheck disable=SC2086
- echo $! > "${pidfile}" 2>/dev/null
- if [[ $? -gt 0 ]]; then
- hadoop_error "ERROR: Cannot write ${daemonname} pid ${pidfile}."
- fi
-
- # shellcheck disable=SC2086
- renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
- if [[ $? -gt 0 ]]; then
- hadoop_error "ERROR: Cannot set priority of ${daemonname} process $!"
- fi
-
- # shellcheck disable=SC2086
- disown %+ >/dev/null 2>&1
- if [[ $? -gt 0 ]]; then
- hadoop_error "ERROR: Cannot disconnect ${daemonname} process $!"
- fi
- sleep 1
-
- # capture the ulimit output
- ulimit -a >> "${outfile}" 2>&1
-
- # shellcheck disable=SC2086
- if ! ps -p $! >/dev/null 2>&1; then
- return 1
- fi
- return 0
-}
-
-## @description Start a privileged daemon in the foreground.
-## @audience private
-## @stability evolving
-## @replaceable yes
-## @param command
-## @param class
-## @param daemonpidfile
-## @param daemonoutfile
-## @param daemonerrfile
-## @param wrapperpidfile
-## @param [options]
-function hadoop_start_secure_daemon
-{
- # this is used to launch a secure daemon in the *foreground*
- #
- local daemonname=$1
- local class=$2
-
- # pid file to create for our daemon
- local daemonpidfile=$3
-
- # where to send stdout. jsvc has bad habits so this *may* be &1
- # which means you send it to stdout!
- local daemonoutfile=$4
-
- # where to send stderr. same thing, except &2 = stderr
- local daemonerrfile=$5
- local privpidfile=$6
- shift 6
-
- hadoop_rotate_log "${daemonoutfile}"
- hadoop_rotate_log "${daemonerrfile}"
-
- # shellcheck disable=SC2153
- jsvc="${JSVC_HOME}/jsvc"
- if [[ ! -f "${jsvc}" ]]; then
- hadoop_error "JSVC_HOME is not set or set incorrectly. jsvc is required to run secure"
- hadoop_error "or privileged daemons. Please download and install jsvc from "
- hadoop_error "http://archive.apache.org/dist/commons/daemon/binaries/ "
- hadoop_error "and set JSVC_HOME to the directory containing the jsvc binary."
- exit 1
- fi
-
- # note that shellcheck will throw a
- # bogus for-our-use-case 2086 here.
- # it doesn't properly support multi-line situations
-
- hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
- hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
- hadoop_debug "Final JSVC_HOME: ${JSVC_HOME}"
- hadoop_debug "jsvc: ${jsvc}"
- hadoop_debug "Final HADOOP_DAEMON_JSVC_EXTRA_OPTS: ${HADOOP_DAEMON_JSVC_EXTRA_OPTS}"
- hadoop_debug "Class name: ${class}"
- hadoop_debug "Command line options: $*"
-
- #shellcheck disable=SC2086
- echo $$ > "${privpidfile}" 2>/dev/null
- if [[ $? -gt 0 ]]; then
- hadoop_error "ERROR: Cannot write ${daemonname} pid ${privpidfile}."
- fi
-
- # shellcheck disable=SC2086
- exec "${jsvc}" \
- "-Dproc_${daemonname}" \
- ${HADOOP_DAEMON_JSVC_EXTRA_OPTS} \
- -outfile "${daemonoutfile}" \
- -errfile "${daemonerrfile}" \
- -pidfile "${daemonpidfile}" \
- -nodetach \
- -user "${HADOOP_SECURE_USER}" \
- -cp "${CLASSPATH}" \
- ${HADOOP_OPTS} \
- "${class}" "$@"
-}
-
-## @description Start a privileged daemon in the background.
-## @audience private
-## @stability evolving
-## @replaceable yes
-## @param command
-## @param class
-## @param daemonpidfile
-## @param daemonoutfile
-## @param wrapperpidfile
-## @param warpperoutfile
-## @param daemonerrfile
-## @param [options]
-function hadoop_start_secure_daemon_wrapper
-{
- # this wraps hadoop_start_secure_daemon to take care
- # of the dirty work to launch a daemon in the background!
- local daemonname=$1
- local class=$2
-
- # same rules as hadoop_start_secure_daemon except we
- # have some additional parameters
-
- local daemonpidfile=$3
-
- local daemonoutfile=$4
-
- # the pid file of the subprocess that spawned our
- # secure launcher
- local jsvcpidfile=$5
-
- # the output of the subprocess that spawned our secure
- # launcher
- local jsvcoutfile=$6
-
- local daemonerrfile=$7
- shift 7
-
- local counter
-
- hadoop_rotate_log "${jsvcoutfile}"
-
- hadoop_start_secure_daemon \
- "${daemonname}" \
- "${class}" \
- "${daemonpidfile}" \
- "${daemonoutfile}" \
- "${daemonerrfile}" \
- "${jsvcpidfile}" "$@" >> "${jsvcoutfile}" 2>&1 < /dev/null &
-
- # we need to avoid a race condition here
- # so let's wait for the fork to finish
- # before overriding with the daemonized pid
- (( counter=0 ))
- while [[ ! -f ${daemonpidfile} && ${counter} -le 5 ]]; do
- sleep 1
- (( counter++ ))
- done
-
- #shellcheck disable=SC2086
- if ! echo $! > "${jsvcpidfile}"; then
- hadoop_error "ERROR: Cannot write ${daemonname} pid ${jsvcpidfile}."
- fi
-
- sleep 1
- #shellcheck disable=SC2086
- renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
- if [[ $? -gt 0 ]]; then
- hadoop_error "ERROR: Cannot set priority of ${daemonname} process $!"
- fi
- if [[ -f "${daemonpidfile}" ]]; then
- #shellcheck disable=SC2046
- renice "${HADOOP_NICENESS}" $(cat "${daemonpidfile}" 2>/dev/null) >/dev/null 2>&1
- if [[ $? -gt 0 ]]; then
- hadoop_error "ERROR: Cannot set priority of ${daemonname} process $(cat "${daemonpidfile}" 2>/dev/null)"
- fi
- fi
- #shellcheck disable=SC2046
- disown %+ >/dev/null 2>&1
- if [[ $? -gt 0 ]]; then
- hadoop_error "ERROR: Cannot disconnect ${daemonname} process $!"
- fi
- # capture the ulimit output
- su "${HADOOP_SECURE_USER}" -c 'bash -c "ulimit -a"' >> "${jsvcoutfile}" 2>&1
- #shellcheck disable=SC2086
- if ! ps -p $! >/dev/null 2>&1; then
- return 1
- fi
- return 0
-}
-
-## @description Wait till process dies or till timeout
-## @audience private
-## @stability evolving
-## @param pid
-## @param timeout
-function wait_process_to_die_or_timeout
-{
- local pid=$1
- local timeout=$2
-
- # Normalize timeout
- # Round up or down
- timeout=$(printf "%.0f\n" "${timeout}")
- if [[ ${timeout} -lt 1 ]]; then
- # minimum 1 second
- timeout=1
- fi
-
- # Wait to see if it's still alive
- for (( i=0; i < "${timeout}"; i++ ))
- do
- if kill -0 "${pid}" > /dev/null 2>&1; then
- sleep 1
- else
- break
- fi
- done
-}
-
-## @description Stop the non-privileged `command` daemon with that
-## @description that is running at `pidfile`.
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param command
-## @param pidfile
-function hadoop_stop_daemon
-{
- local cmd=$1
- local pidfile=$2
- shift 2
-
- local pid
- local cur_pid
-
- if [[ -f "${pidfile}" ]]; then
- pid=$(cat "$pidfile")
-
- kill "${pid}" >/dev/null 2>&1
-
- wait_process_to_die_or_timeout "${pid}" "${HADOOP_STOP_TIMEOUT}"
-
- if kill -0 "${pid}" > /dev/null 2>&1; then
- hadoop_error "WARNING: ${cmd} did not stop gracefully after ${HADOOP_STOP_TIMEOUT} seconds: Trying to kill with kill -9"
- kill -9 "${pid}" >/dev/null 2>&1
- fi
- wait_process_to_die_or_timeout "${pid}" "${HADOOP_STOP_TIMEOUT}"
- if ps -p "${pid}" > /dev/null 2>&1; then
- hadoop_error "ERROR: Unable to kill ${pid}"
- else
- cur_pid=$(cat "$pidfile")
- if [[ "${pid}" = "${cur_pid}" ]]; then
- rm -f "${pidfile}" >/dev/null 2>&1
- else
- hadoop_error "WARNING: pid has changed for ${cmd}, skip deleting pid file"
- fi
- fi
- fi
-}
-
-## @description Stop the privileged `command` daemon with that
-## @description that is running at `daemonpidfile` and launched with
-## @description the wrapper at `wrapperpidfile`.
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param command
-## @param daemonpidfile
-## @param wrapperpidfile
-function hadoop_stop_secure_daemon
-{
- local command=$1
- local daemonpidfile=$2
- local privpidfile=$3
- shift 3
- local ret
-
- local daemon_pid
- local priv_pid
- local cur_daemon_pid
- local cur_priv_pid
-
- daemon_pid=$(cat "$daemonpidfile")
- priv_pid=$(cat "$privpidfile")
-
- hadoop_stop_daemon "${command}" "${daemonpidfile}"
- ret=$?
-
- cur_daemon_pid=$(cat "$daemonpidfile")
- cur_priv_pid=$(cat "$privpidfile")
-
- if [[ "${daemon_pid}" = "${cur_daemon_pid}" ]]; then
- rm -f "${daemonpidfile}" >/dev/null 2>&1
- else
- hadoop_error "WARNING: daemon pid has changed for ${command}, skip deleting daemon pid file"
- fi
-
- if [[ "${priv_pid}" = "${cur_priv_pid}" ]]; then
- rm -f "${privpidfile}" >/dev/null 2>&1
- else
- hadoop_error "WARNING: priv pid has changed for ${command}, skip deleting priv pid file"
- fi
- return ${ret}
-}
-
-## @description Manage a non-privileged daemon.
-## @audience private
-## @stability evolving
-## @replaceable yes
-## @param [start|stop|status|default]
-## @param command
-## @param class
-## @param daemonpidfile
-## @param daemonoutfile
-## @param [options]
-function hadoop_daemon_handler
-{
- local daemonmode=$1
- local daemonname=$2
- local class=$3
- local daemon_pidfile=$4
- local daemon_outfile=$5
- shift 5
-
- case ${daemonmode} in
- status)
- hadoop_status_daemon "${daemon_pidfile}"
- exit $?
- ;;
-
- stop)
- hadoop_stop_daemon "${daemonname}" "${daemon_pidfile}"
- exit $?
- ;;
-
- ##COMPAT -- older hadoops would also start daemons by default
- start|default)
- hadoop_verify_piddir
- hadoop_verify_logdir
- hadoop_status_daemon "${daemon_pidfile}"
- if [[ $? == 0 ]]; then
- hadoop_error "${daemonname} is running as process $(cat "${daemon_pidfile}"). Stop it first."
- exit 1
- else
- # stale pid file, so just remove it and continue on
- rm -f "${daemon_pidfile}" >/dev/null 2>&1
- fi
- ##COMPAT - differenticate between --daemon start and nothing
- # "nothing" shouldn't detach
- if [[ "$daemonmode" = "default" ]]; then
- hadoop_start_daemon "${daemonname}" "${class}" "${daemon_pidfile}" "$@"
- else
- hadoop_start_daemon_wrapper "${daemonname}" \
- "${class}" "${daemon_pidfile}" "${daemon_outfile}" "$@"
- fi
- ;;
- esac
-}
-
-## @description Manage a privileged daemon.
-## @audience private
-## @stability evolving
-## @replaceable yes
-## @param [start|stop|status|default]
-## @param command
-## @param class
-## @param daemonpidfile
-## @param daemonoutfile
-## @param wrapperpidfile
-## @param wrapperoutfile
-## @param wrappererrfile
-## @param [options]
-function hadoop_secure_daemon_handler
-{
- local daemonmode=$1
- local daemonname=$2
- local classname=$3
- local daemon_pidfile=$4
- local daemon_outfile=$5
- local priv_pidfile=$6
- local priv_outfile=$7
- local priv_errfile=$8
- shift 8
-
- case ${daemonmode} in
- status)
- hadoop_status_daemon "${daemon_pidfile}"
- exit $?
- ;;
-
- stop)
- hadoop_stop_secure_daemon "${daemonname}" \
- "${daemon_pidfile}" "${priv_pidfile}"
- exit $?
- ;;
-
- ##COMPAT -- older hadoops would also start daemons by default
- start|default)
- hadoop_verify_piddir
- hadoop_verify_logdir
- hadoop_status_daemon "${daemon_pidfile}"
- if [[ $? == 0 ]]; then
- hadoop_error "${daemonname} is running as process $(cat "${daemon_pidfile}"). Stop it first."
- exit 1
- else
- # stale pid file, so just remove it and continue on
- rm -f "${daemon_pidfile}" >/dev/null 2>&1
- fi
-
- ##COMPAT - differenticate between --daemon start and nothing
- # "nothing" shouldn't detach
- if [[ "${daemonmode}" = "default" ]]; then
- hadoop_start_secure_daemon "${daemonname}" "${classname}" \
- "${daemon_pidfile}" "${daemon_outfile}" \
- "${priv_errfile}" "${priv_pidfile}" "$@"
- else
- hadoop_start_secure_daemon_wrapper "${daemonname}" "${classname}" \
- "${daemon_pidfile}" "${daemon_outfile}" \
- "${priv_pidfile}" "${priv_outfile}" "${priv_errfile}" "$@"
- fi
- ;;
- esac
-}
-
-## @description autodetect whether this is a priv subcmd
-## @description by whether or not a priv user var exists
-## @description and if HADOOP_SECURE_CLASSNAME is defined
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param command
-## @param subcommand
-## @return 1 = not priv
-## @return 0 = priv
-function hadoop_detect_priv_subcmd
-{
- declare program=$1
- declare command=$2
-
- if [[ -z "${HADOOP_SECURE_CLASSNAME}" ]]; then
- hadoop_debug "No secure classname defined."
- return 1
- fi
-
- uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_USER)
- if [[ -z "${!uvar}" ]]; then
- hadoop_debug "No secure user defined."
- return 1
- fi
- return 0
-}
-
-## @description Build custom subcommand var
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param command
-## @param subcommand
-## @param customid
-## @return string
-function hadoop_build_custom_subcmd_var
-{
- declare program=$1
- declare command=$2
- declare custom=$3
- declare uprogram
- declare ucommand
-
- if [[ -z "${BASH_VERSINFO[0]}" ]] \
- || [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
- uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]')
- ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]')
- else
- uprogram=${program^^}
- ucommand=${command^^}
- fi
-
- echo "${uprogram}_${ucommand}_${custom}"
-}
-
-## @description Verify that username in a var converts to user id
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param userstring
-## @return 0 for success
-## @return 1 for failure
-function hadoop_verify_user_resolves
-{
- declare userstr=$1
-
- if [[ -z ${userstr} || -z ${!userstr} ]] ; then
- return 1
- fi
-
- id -u "${!userstr}" >/dev/null 2>&1
-}
-
-## @description Verify that ${USER} is allowed to execute the
-## @description given subcommand.
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param command
-## @param subcommand
-## @return return 0 on success
-## @return exit 1 on failure
-function hadoop_verify_user_perm
-{
- declare program=$1
- declare command=$2
- declare uvar
-
- if [[ ${command} =~ \. ]]; then
- return 1
- fi
-
- uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER)
-
- if [[ -n ${!uvar} ]]; then
- if [[ ${!uvar} != "${USER}" ]]; then
- hadoop_error "ERROR: ${command} can only be executed by ${!uvar}."
- exit 1
- fi
- fi
- return 0
-}
-
-## @description Verify that ${USER} is allowed to execute the
-## @description given subcommand.
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param subcommand
-## @return 1 on no re-exec needed
-## @return 0 on need to re-exec
-function hadoop_need_reexec
-{
- declare program=$1
- declare command=$2
- declare uvar
-
- # we've already been re-execed, bail
-
- if [[ "${HADOOP_REEXECED_CMD}" = true ]]; then
- return 1
- fi
-
- if [[ ${command} =~ \. ]]; then
- return 1
- fi
-
- # if we have privilege, and the _USER is defined, and _USER is
- # set to someone who isn't us, then yes, we should re-exec.
- # otherwise no, don't re-exec and let the system deal with it.
-
- if hadoop_privilege_check; then
- uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER)
- if [[ -n ${!uvar} ]]; then
- if [[ ${!uvar} != "${USER}" ]]; then
- return 0
- fi
- fi
- fi
- return 1
-}
-
-## @description Add custom (program)_(command)_OPTS to HADOOP_OPTS.
-## @description Also handles the deprecated cases from pre-3.x.
-## @audience public
-## @stability evolving
-## @replaceable yes
-## @param program
-## @param subcommand
-## @return will exit on failure conditions
-function hadoop_subcommand_opts
-{
- declare program=$1
- declare command=$2
- declare uvar
- declare depvar
- declare uprogram
- declare ucommand
-
- if [[ -z "${program}" || -z "${command}" ]]; then
- return 1
- fi
-
- if [[ ${command} =~ \. ]]; then
- return 1
- fi
-
- # bash 4 and up have built-in ways to upper and lower
- # case the contents of vars. This is faster than
- # calling tr.
-
- ## We don't call hadoop_build_custom_subcmd_var here
- ## since we need to construct this for the deprecation
- ## cases. For Hadoop 4.x, this needs to get cleaned up.
-
- if [[ -z "${BASH_VERSINFO[0]}" ]] \
- || [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
- uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]')
- ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]')
- else
- uprogram=${program^^}
- ucommand=${command^^}
- fi
-
- uvar="${uprogram}_${ucommand}_OPTS"
-
- # Let's handle all of the deprecation cases early
- # HADOOP_NAMENODE_OPTS -> HDFS_NAMENODE_OPTS
-
- depvar="HADOOP_${ucommand}_OPTS"
-
- if [[ "${depvar}" != "${uvar}" ]]; then
- if [[ -n "${!depvar}" ]]; then
- hadoop_deprecate_envvar "${depvar}" "${uvar}"
- fi
- fi
-
- if [[ -n ${!uvar} ]]; then
- hadoop_debug "Appending ${uvar} onto HADOOP_OPTS"
- HADOOP_OPTS="${HADOOP_OPTS} ${!uvar}"
- return 0
- fi
-}
-
-## @description Add custom (program)_(command)_SECURE_EXTRA_OPTS to HADOOP_OPTS.
-## @description This *does not* handle the pre-3.x deprecated cases
-## @audience public
-## @stability stable
-## @replaceable yes
-## @param program
-## @param subcommand
-## @return will exit on failure conditions
-function hadoop_subcommand_secure_opts
-{
- declare program=$1
- declare command=$2
- declare uvar
- declare uprogram
- declare ucommand
-
- if [[ -z "${program}" || -z "${command}" ]]; then
- return 1
- fi
-
- # HDFS_DATANODE_SECURE_EXTRA_OPTS
- # HDFS_NFS3_SECURE_EXTRA_OPTS
- # ...
- uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_EXTRA_OPTS)
-
- if [[ -n ${!uvar} ]]; then
- hadoop_debug "Appending ${uvar} onto HADOOP_OPTS"
- HADOOP_OPTS="${HADOOP_OPTS} ${!uvar}"
- return 0
- fi
-}
-
-## @description Perform the 'hadoop classpath', etc subcommand with the given
-## @description parameters
-## @audience private
-## @stability evolving
-## @replaceable yes
-## @param [parameters]
-## @return will print & exit with no params
-function hadoop_do_classpath_subcommand
-{
- if [[ "$#" -gt 1 ]]; then
- eval "$1"=org.apache.hadoop.util.Classpath
- else
- hadoop_finalize
- echo "${CLASSPATH}"
- exit 0
- fi
-}
-
-## @description generic shell script option parser. sets
-## @description HADOOP_PARSE_COUNTER to set number the
-## @description caller should shift
-## @audience private
-## @stability evolving
-## @replaceable yes
-## @param [parameters, typically "$@"]
-function hadoop_parse_args
-{
- HADOOP_DAEMON_MODE="default"
- HADOOP_PARSE_COUNTER=0
-
- # not all of the options supported here are supported by all commands
- # however these are:
- hadoop_add_option "--config dir" "Hadoop config directory"
- hadoop_add_option "--debug" "turn on shell script debug mode"
- hadoop_add_option "--help" "usage information"
-
- while true; do
- hadoop_debug "hadoop_parse_args: processing $1"
- case $1 in
- --buildpaths)
- HADOOP_ENABLE_BUILD_PATHS=true
- shift
- ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
- ;;
- --config)
- shift
- confdir=$1
- shift
- ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
- if [[ -d "${confdir}" ]]; then
- HADOOP_CONF_DIR="${confdir}"
- elif [[ -z "${confdir}" ]]; then
- hadoop_error "ERROR: No parameter provided for --config "
- hadoop_exit_with_usage 1
- else
- hadoop_error "ERROR: Cannot find configuration directory \"${confdir}\""
- hadoop_exit_with_usage 1
- fi
- ;;
- --daemon)
- shift
- HADOOP_DAEMON_MODE=$1
- shift
- ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
- if [[ -z "${HADOOP_DAEMON_MODE}" || \
- ! "${HADOOP_DAEMON_MODE}" =~ ^st(art|op|atus)$ ]]; then
- hadoop_error "ERROR: --daemon must be followed by either \"start\", \"stop\", or \"status\"."
- hadoop_exit_with_usage 1
- fi
- ;;
- --debug)
- shift
- HADOOP_SHELL_SCRIPT_DEBUG=true
- ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
- ;;
- --help|-help|-h|help|--h|--\?|-\?|\?)
- hadoop_exit_with_usage 0
- ;;
- --hostnames)
- shift
- HADOOP_WORKER_NAMES="$1"
- shift
- ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
- ;;
- --hosts)
- shift
- hadoop_populate_workers_file "$1"
- shift
- ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
- ;;
- --loglevel)
- shift
- # shellcheck disable=SC2034
- HADOOP_LOGLEVEL="$1"
- shift
- ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
- ;;
- --reexec)
- shift
- if [[ "${HADOOP_REEXECED_CMD}" = true ]]; then
- hadoop_error "ERROR: re-exec fork bomb prevention: --reexec already called"
- exit 1
- fi
- HADOOP_REEXECED_CMD=true
- ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
- ;;
- --workers)
- shift
- # shellcheck disable=SC2034
- HADOOP_WORKER_MODE=true
- ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
- ;;
- *)
- break
- ;;
- esac
- done
-
- hadoop_debug "hadoop_parse: asking caller to skip ${HADOOP_PARSE_COUNTER}"
-}
-
-## @description Handle subcommands from main program entries
-## @audience private
-## @stability evolving
-## @replaceable yes
-function hadoop_generic_java_subcmd_handler
-{
- declare priv_outfile
- declare priv_errfile
- declare priv_pidfile
- declare daemon_outfile
- declare daemon_pidfile
- declare secureuser
-
- # The default/expected way to determine if a daemon is going to run in secure
- # mode is defined by hadoop_detect_priv_subcmd. If this returns true
- # then setup the secure user var and tell the world we're in secure mode
-
- if hadoop_detect_priv_subcmd "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"; then
- HADOOP_SUBCMD_SECURESERVICE=true
- secureuser=$(hadoop_build_custom_subcmd_var "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" SECURE_USER)
-
- if ! hadoop_verify_user_resolves "${secureuser}"; then
- hadoop_error "ERROR: User defined in ${secureuser} (${!secureuser}) does not exist. Aborting."
- exit 1
- fi
-
- HADOOP_SECURE_USER="${!secureuser}"
- fi
-
- # check if we're running in secure mode.
- # breaking this up from the above lets 3rd parties
- # do things a bit different
- # secure services require some extra setup
- # if yes, then we need to define all of the priv and daemon stuff
- # if not, then we just need to define daemon stuff.
- # note the daemon vars are purposefully different between the two
-
- if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
-
- hadoop_subcommand_secure_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
-
- hadoop_verify_secure_prereq
- hadoop_setup_secure_service
- priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
- priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.err"
- priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
- daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
- daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
- else
- daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
- daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
- fi
-
- # are we actually in daemon mode?
- # if yes, use the daemon logger and the appropriate log file.
- if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then
- HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}"
- if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
- HADOOP_LOGFILE="hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
- else
- HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
- fi
- fi
-
- # finish defining the environment: system properties, env vars, class paths, etc.
- hadoop_finalize
-
- # do the hard work of launching a daemon or just executing our interactive
- # java class
- if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = true ]]; then
- if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
- hadoop_secure_daemon_handler \
- "${HADOOP_DAEMON_MODE}" \
- "${HADOOP_SUBCMD}" \
- "${HADOOP_SECURE_CLASSNAME}" \
- "${daemon_pidfile}" \
- "${daemon_outfile}" \
- "${priv_pidfile}" \
- "${priv_outfile}" \
- "${priv_errfile}" \
- "${HADOOP_SUBCMD_ARGS[@]}"
- else
- hadoop_daemon_handler \
- "${HADOOP_DAEMON_MODE}" \
- "${HADOOP_SUBCMD}" \
- "${HADOOP_CLASSNAME}" \
- "${daemon_pidfile}" \
- "${daemon_outfile}" \
- "${HADOOP_SUBCMD_ARGS[@]}"
- fi
- exit $?
- else
- hadoop_java_exec "${HADOOP_SUBCMD}" "${HADOOP_CLASSNAME}" "${HADOOP_SUBCMD_ARGS[@]}"
- fi
-}
diff --git a/hadoop-hdds/common/src/main/bin/workers.sh b/hadoop-hdds/common/src/main/bin/workers.sh
deleted file mode 100755
index 05bc5fd8f0fe0..0000000000000
--- a/hadoop-hdds/common/src/main/bin/workers.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Run a shell command on all worker hosts.
-#
-# Environment Variables
-#
-# HADOOP_WORKERS File naming remote hosts.
-# Default is ${HADOOP_CONF_DIR}/workers.
-# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
-# HADOOP_WORKER_SLEEP Seconds to sleep between spawning remote commands.
-# HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
-##
-
-function hadoop_usage
-{
- echo "Usage: workers.sh [--config confdir] command..."
-}
-
-# let's locate libexec...
-if [[ -n "${HADOOP_HOME}" ]]; then
- HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
-else
- this="${BASH_SOURCE-$0}"
- bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
- HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
-fi
-
-HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
-# shellcheck disable=SC2034
-HADOOP_NEW_CONFIG=true
-if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
- . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
-else
- echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
- exit 1
-fi
-
-# if no args specified, show usage
-if [[ $# -le 0 ]]; then
- hadoop_exit_with_usage 1
-fi
-
-hadoop_connect_to_hosts "$@"
diff --git a/hadoop-hdds/common/src/main/conf/core-site.xml b/hadoop-hdds/common/src/main/conf/core-site.xml
deleted file mode 100644
index d2ddf893e49eb..0000000000000
--- a/hadoop-hdds/common/src/main/conf/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
-
-
-
-
-
-
diff --git a/hadoop-hdds/common/src/main/conf/hadoop-env.cmd b/hadoop-hdds/common/src/main/conf/hadoop-env.cmd
deleted file mode 100644
index 971869597f529..0000000000000
--- a/hadoop-hdds/common/src/main/conf/hadoop-env.cmd
+++ /dev/null
@@ -1,90 +0,0 @@
-@echo off
-@rem Licensed to the Apache Software Foundation (ASF) under one or more
-@rem contributor license agreements. See the NOTICE file distributed with
-@rem this work for additional information regarding copyright ownership.
-@rem The ASF licenses this file to You under the Apache License, Version 2.0
-@rem (the "License"); you may not use this file except in compliance with
-@rem the License. You may obtain a copy of the License at
-@rem
-@rem http://www.apache.org/licenses/LICENSE-2.0
-@rem
-@rem Unless required by applicable law or agreed to in writing, software
-@rem distributed under the License is distributed on an "AS IS" BASIS,
-@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-@rem See the License for the specific language governing permissions and
-@rem limitations under the License.
-
-@rem Set Hadoop-specific environment variables here.
-
-@rem The only required environment variable is JAVA_HOME. All others are
-@rem optional. When running a distributed configuration it is best to
-@rem set JAVA_HOME in this file, so that it is correctly defined on
-@rem remote nodes.
-
-@rem The java implementation to use. Required.
-set JAVA_HOME=%JAVA_HOME%
-
-@rem The jsvc implementation to use. Jsvc is required to run secure datanodes.
-@rem set JSVC_HOME=%JSVC_HOME%
-
-@rem set HADOOP_CONF_DIR=
-
-@rem Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
-if exist %HADOOP_HOME%\contrib\capacity-scheduler (
- if not defined HADOOP_CLASSPATH (
- set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
- ) else (
- set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
- )
-)
-
-@rem The maximum amount of heap to use, in MB. Default is 1000.
-@rem set HADOOP_HEAPSIZE=
-@rem set HADOOP_NAMENODE_INIT_HEAPSIZE=""
-
-@rem Extra Java runtime options. Empty by default.
-@rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true
-
-@rem Command specific options appended to HADOOP_OPTS when specified
-if not defined HADOOP_SECURITY_LOGGER (
- set HADOOP_SECURITY_LOGGER=INFO,RFAS
-)
-if not defined HDFS_AUDIT_LOGGER (
- set HDFS_AUDIT_LOGGER=INFO,NullAppender
-)
-
-set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS%
-set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS%
-set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS%
-
-@rem The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-set HADOOP_CLIENT_OPTS=-Xmx512m %HADOOP_CLIENT_OPTS%
-@rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%"
-
-@rem On secure datanodes, user to run the datanode as after dropping privileges
-set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER%
-
-@rem Where log files are stored. %HADOOP_HOME%/logs by default.
-@rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME%
-
-@rem Where log files are stored in the secure data environment.
-set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER%
-
-@rem
-@rem Router-based HDFS Federation specific parameters
-@rem Specify the JVM options to be used when starting the RBF Routers.
-@rem These options will be appended to the options specified as HADOOP_OPTS
-@rem and therefore may override any similar flags set in HADOOP_OPTS
-@rem
-@rem set HADOOP_DFSROUTER_OPTS=""
-@rem
-
-@rem The directory where pid files are stored. /tmp by default.
-@rem NOTE: this should be set to a directory that can only be written to by
-@rem the user that will run the hadoop daemons. Otherwise there is the
-@rem potential for a symlink attack.
-set HADOOP_PID_DIR=%HADOOP_PID_DIR%
-set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR%
-
-@rem A string representing this instance of hadoop. %USERNAME% by default.
-set HADOOP_IDENT_STRING=%USERNAME%
diff --git a/hadoop-hdds/common/src/main/conf/hadoop-env.sh b/hadoop-hdds/common/src/main/conf/hadoop-env.sh
deleted file mode 100644
index e43cd95b047ee..0000000000000
--- a/hadoop-hdds/common/src/main/conf/hadoop-env.sh
+++ /dev/null
@@ -1,439 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hadoop-specific environment variables here.
-
-##
-## THIS FILE ACTS AS THE MASTER FILE FOR ALL HADOOP PROJECTS.
-## SETTINGS HERE WILL BE READ BY ALL HADOOP COMMANDS. THEREFORE,
-## ONE CAN USE THIS FILE TO SET YARN, HDFS, AND MAPREDUCE
-## CONFIGURATION OPTIONS INSTEAD OF xxx-env.sh.
-##
-## Precedence rules:
-##
-## {yarn-env.sh|hdfs-env.sh} > hadoop-env.sh > hard-coded defaults
-##
-## {YARN_xyz|HDFS_xyz} > HADOOP_xyz > hard-coded defaults
-##
-
-# Many of the options here are built from the perspective that users
-# may want to provide OVERWRITING values on the command line.
-# For example:
-#
-# JAVA_HOME=/usr/java/testing hdfs dfs -ls
-#
-# Therefore, the vast majority (BUT NOT ALL!) of these defaults
-# are configured for substitution and not append. If append
-# is preferable, modify this file accordingly.
-
-###
-# Generic settings for HADOOP
-###
-
-# Technically, the only required environment variable is JAVA_HOME.
-# All others are optional. However, the defaults are probably not
-# preferred. Many sites configure these options outside of Hadoop,
-# such as in /etc/profile.d
-
-# The java implementation to use. By default, this environment
-# variable is REQUIRED on ALL platforms except OS X!
-# export JAVA_HOME=
-
-# Location of Hadoop. By default, Hadoop will attempt to determine
-# this location based upon its execution path.
-# export HADOOP_HOME=
-
-# Location of Hadoop's configuration information. i.e., where this
-# file is living. If this is not defined, Hadoop will attempt to
-# locate it based upon its execution path.
-#
-# NOTE: It is recommend that this variable not be set here but in
-# /etc/profile.d or equivalent. Some options (such as
-# --config) may react strangely otherwise.
-#
-# export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
-
-# The maximum amount of heap to use (Java -Xmx). If no unit
-# is provided, it will be converted to MB. Daemons will
-# prefer any Xmx setting in their respective _OPT variable.
-# There is no default; the JVM will autoscale based upon machine
-# memory size.
-# export HADOOP_HEAPSIZE_MAX=
-
-# The minimum amount of heap to use (Java -Xms). If no unit
-# is provided, it will be converted to MB. Daemons will
-# prefer any Xms setting in their respective _OPT variable.
-# There is no default; the JVM will autoscale based upon machine
-# memory size.
-# export HADOOP_HEAPSIZE_MIN=
-
-# Enable extra debugging of Hadoop's JAAS binding, used to set up
-# Kerberos security.
-# export HADOOP_JAAS_DEBUG=true
-
-# Extra Java runtime options for all Hadoop commands. We don't support
-# IPv6 yet/still, so by default the preference is set to IPv4.
-# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
-# For Kerberos debugging, an extended option set logs more information
-# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug"
-
-# Some parts of the shell code may do special things dependent upon
-# the operating system. We have to set this here. See the next
-# section as to why....
-export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
-
-# Extra Java runtime options for some Hadoop commands
-# and clients (i.e., hdfs dfs -blah). These get appended to HADOOP_OPTS for
-# such commands. In most cases, # this should be left empty and
-# let users supply it on the command line.
-# export HADOOP_CLIENT_OPTS=""
-
-#
-# A note about classpaths.
-#
-# By default, Apache Hadoop overrides Java's CLASSPATH
-# environment variable. It is configured such
-# that it starts out blank with new entries added after passing
-# a series of checks (file/dir exists, not already listed aka
-# de-deduplication). During de-deduplication, wildcards and/or
-# directories are *NOT* expanded to keep it simple. Therefore,
-# if the computed classpath has two specific mentions of
-# awesome-methods-1.0.jar, only the first one added will be seen.
-# If two directories are in the classpath that both contain
-# awesome-methods-1.0.jar, then Java will pick up both versions.
-
-# An additional, custom CLASSPATH. Site-wide configs should be
-# handled via the shellprofile functionality, utilizing the
-# hadoop_add_classpath function for greater control and much
-# harder for apps/end-users to accidentally override.
-# Similarly, end users should utilize ${HOME}/.hadooprc .
-# This variable should ideally only be used as a short-cut,
-# interactive way for temporary additions on the command line.
-# export HADOOP_CLASSPATH="/some/cool/path/on/your/machine"
-
-# Should HADOOP_CLASSPATH be first in the official CLASSPATH?
-# export HADOOP_USER_CLASSPATH_FIRST="yes"
-
-# If HADOOP_USE_CLIENT_CLASSLOADER is set, the classpath along
-# with the main jar are handled by a separate isolated
-# client classloader when 'hadoop jar', 'yarn jar', or 'mapred job'
-# is utilized. If it is set, HADOOP_CLASSPATH and
-# HADOOP_USER_CLASSPATH_FIRST are ignored.
-# export HADOOP_USE_CLIENT_CLASSLOADER=true
-
-# HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES overrides the default definition of
-# system classes for the client classloader when HADOOP_USE_CLIENT_CLASSLOADER
-# is enabled. Names ending in '.' (period) are treated as package names, and
-# names starting with a '-' are treated as negative matches. For example,
-# export HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop."
-
-# Enable optional, bundled Hadoop features
-# This is a comma delimited list. It may NOT be overridden via .hadooprc
-# Entries may be added/removed as needed.
-# export HADOOP_OPTIONAL_TOOLS="@@@HADOOP_OPTIONAL_TOOLS@@@"
-
-###
-# Options for remote shell connectivity
-###
-
-# There are some optional components of hadoop that allow for
-# command and control of remote hosts. For example,
-# start-dfs.sh will attempt to bring up all NNs, DNS, etc.
-
-# Options to pass to SSH when one of the "log into a host and
-# start/stop daemons" scripts is executed
-# export HADOOP_SSH_OPTS="-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"
-
-# The built-in ssh handler will limit itself to 10 simultaneous connections.
-# For pdsh users, this sets the fanout size ( -f )
-# Change this to increase/decrease as necessary.
-# export HADOOP_SSH_PARALLEL=10
-
-# Filename which contains all of the hosts for any remote execution
-# helper scripts # such as workers.sh, start-dfs.sh, etc.
-# export HADOOP_WORKERS="${HADOOP_CONF_DIR}/workers"
-
-###
-# Options for all daemons
-###
-#
-
-#
-# Many options may also be specified as Java properties. It is
-# very common, and in many cases, desirable, to hard-set these
-# in daemon _OPTS variables. Where applicable, the appropriate
-# Java property is also identified. Note that many are re-used
-# or set differently in certain contexts (e.g., secure vs
-# non-secure)
-#
-
-# Where (primarily) daemon log files are stored.
-# ${HADOOP_HOME}/logs by default.
-# Java property: hadoop.log.dir
-# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
-
-# A string representing this instance of hadoop. $USER by default.
-# This is used in writing log and pid files, so keep that in mind!
-# Java property: hadoop.id.str
-# export HADOOP_IDENT_STRING=$USER
-
-# How many seconds to pause after stopping a daemon
-# export HADOOP_STOP_TIMEOUT=5
-
-# Where pid files are stored. /tmp by default.
-# export HADOOP_PID_DIR=/tmp
-
-# Default log4j setting for interactive commands
-# Java property: hadoop.root.logger
-# export HADOOP_ROOT_LOGGER=INFO,console
-
-# Default log4j setting for daemons spawned explicitly by
-# --daemon option of hadoop, hdfs, mapred and yarn command.
-# Java property: hadoop.root.logger
-# export HADOOP_DAEMON_ROOT_LOGGER=INFO,RFA
-
-# Default log level and output location for security-related messages.
-# You will almost certainly want to change this on a per-daemon basis via
-# the Java property (i.e., -Dhadoop.security.logger=foo). (Note that the
-# defaults for the NN and 2NN override this by default.)
-# Java property: hadoop.security.logger
-# export HADOOP_SECURITY_LOGGER=INFO,NullAppender
-
-# Default process priority level
-# Note that sub-processes will also run at this level!
-# export HADOOP_NICENESS=0
-
-# Default name for the service level authorization file
-# Java property: hadoop.policy.file
-# export HADOOP_POLICYFILE="hadoop-policy.xml"
-
-#
-# NOTE: this is not used by default! <-----
-# You can define variables right here and then re-use them later on.
-# For example, it is common to use the same garbage collection settings
-# for all the daemons. So one could define:
-#
-# export HADOOP_GC_SETTINGS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps"
-#
-# .. and then use it as per the b option under the namenode.
-
-###
-# Secure/privileged execution
-###
-
-#
-# Out of the box, Hadoop uses jsvc from Apache Commons to launch daemons
-# on privileged ports. This functionality can be replaced by providing
-# custom functions. See hadoop-functions.sh for more information.
-#
-
-# The jsvc implementation to use. Jsvc is required to run secure datanodes
-# that bind to privileged ports to provide authentication of data transfer
-# protocol. Jsvc is not required if SASL is configured for authentication of
-# data transfer protocol using non-privileged ports.
-# export JSVC_HOME=/usr/bin
-
-#
-# This directory contains pids for secure and privileged processes.
-#export HADOOP_SECURE_PID_DIR=${HADOOP_PID_DIR}
-
-#
-# This directory contains the logs for secure and privileged processes.
-# Java property: hadoop.log.dir
-# export HADOOP_SECURE_LOG=${HADOOP_LOG_DIR}
-
-#
-# When running a secure daemon, the default value of HADOOP_IDENT_STRING
-# ends up being a bit bogus. Therefore, by default, the code will
-# replace HADOOP_IDENT_STRING with HADOOP_xx_SECURE_USER. If one wants
-# to keep HADOOP_IDENT_STRING untouched, then uncomment this line.
-# export HADOOP_SECURE_IDENT_PRESERVE="true"
-
-###
-# NameNode specific parameters
-###
-
-# Default log level and output location for file system related change
-# messages. For non-namenode daemons, the Java property must be set in
-# the appropriate _OPTS if one wants something other than INFO,NullAppender
-# Java property: hdfs.audit.logger
-# export HDFS_AUDIT_LOGGER=INFO,NullAppender
-
-# Specify the JVM options to be used when starting the NameNode.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# a) Set JMX options
-# export HDFS_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=1026"
-#
-# b) Set garbage collection logs
-# export HDFS_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
-#
-# c) ... or set them directly
-# export HDFS_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
-
-# this is the default:
-# export HDFS_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
-
-###
-# SecondaryNameNode specific parameters
-###
-# Specify the JVM options to be used when starting the SecondaryNameNode.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# This is the default:
-# export HDFS_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
-
-###
-# DataNode specific parameters
-###
-# Specify the JVM options to be used when starting the DataNode.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# This is the default:
-# export HDFS_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS"
-
-# On secure datanodes, user to run the datanode as after dropping privileges.
-# This **MUST** be uncommented to enable secure HDFS if using privileged ports
-# to provide authentication of data transfer protocol. This **MUST NOT** be
-# defined if SASL is configured for authentication of data transfer protocol
-# using non-privileged ports.
-# This will replace the hadoop.id.str Java property in secure mode.
-# export HDFS_DATANODE_SECURE_USER=hdfs
-
-# Supplemental options for secure datanodes
-# By default, Hadoop uses jsvc which needs to know to launch a
-# server jvm.
-# export HDFS_DATANODE_SECURE_EXTRA_OPTS="-jvm server"
-
-###
-# NFS3 Gateway specific parameters
-###
-# Specify the JVM options to be used when starting the NFS3 Gateway.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_NFS3_OPTS=""
-
-# Specify the JVM options to be used when starting the Hadoop portmapper.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_PORTMAP_OPTS="-Xmx512m"
-
-# Supplemental options for priviliged gateways
-# By default, Hadoop uses jsvc which needs to know to launch a
-# server jvm.
-# export HDFS_NFS3_SECURE_EXTRA_OPTS="-jvm server"
-
-# On privileged gateways, user to run the gateway as after dropping privileges
-# This will replace the hadoop.id.str Java property in secure mode.
-# export HDFS_NFS3_SECURE_USER=nfsserver
-
-###
-# ZKFailoverController specific parameters
-###
-# Specify the JVM options to be used when starting the ZKFailoverController.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_ZKFC_OPTS=""
-
-###
-# QuorumJournalNode specific parameters
-###
-# Specify the JVM options to be used when starting the QuorumJournalNode.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_JOURNALNODE_OPTS=""
-
-###
-# HDFS Balancer specific parameters
-###
-# Specify the JVM options to be used when starting the HDFS Balancer.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_BALANCER_OPTS=""
-
-###
-# HDFS Mover specific parameters
-###
-# Specify the JVM options to be used when starting the HDFS Mover.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_MOVER_OPTS=""
-
-###
-# Router-based HDFS Federation specific parameters
-# Specify the JVM options to be used when starting the RBF Routers.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_DFSROUTER_OPTS=""
-
-###
-# Ozone Manager specific parameters
-###
-# Specify the JVM options to be used when starting the Ozone Manager.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_OM_OPTS=""
-
-###
-# HDFS StorageContainerManager specific parameters
-###
-# Specify the JVM options to be used when starting the HDFS Storage Container Manager.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HDFS_STORAGECONTAINERMANAGER_OPTS=""
-
-###
-# Advanced Users Only!
-###
-
-#
-# When building Hadoop, one can add the class paths to the commands
-# via this special env var:
-# export HADOOP_ENABLE_BUILD_PATHS="true"
-
-#
-# To prevent accidents, shell commands be (superficially) locked
-# to only allow certain users to execute certain subcommands.
-# It uses the format of (command)_(subcommand)_USER.
-#
-# For example, to limit who can execute the namenode command,
-# export HDFS_NAMENODE_USER=hdfs
-
-
-###
-# Registry DNS specific parameters
-###
-# For privileged registry DNS, user to run as after dropping privileges
-# This will replace the hadoop.id.str Java property in secure mode.
-# export HADOOP_REGISTRYDNS_SECURE_USER=yarn
-
-# Supplemental options for privileged registry DNS
-# By default, Hadoop uses jsvc which needs to know to launch a
-# server jvm.
-# export HADOOP_REGISTRYDNS_SECURE_EXTRA_OPTS="-jvm server"
diff --git a/hadoop-hdds/common/src/main/conf/hadoop-metrics2.properties b/hadoop-hdds/common/src/main/conf/hadoop-metrics2.properties
deleted file mode 100644
index f67bf8e4c5b1f..0000000000000
--- a/hadoop-hdds/common/src/main/conf/hadoop-metrics2.properties
+++ /dev/null
@@ -1,99 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink].[instance].[options]
-# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
-
-*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
-# default sampling period, in seconds
-*.period=10
-
-# The namenode-metrics.out will contain metrics from all context
-#namenode.sink.file.filename=namenode-metrics.out
-# Specifying a special sampling period for namenode:
-#namenode.sink.*.period=8
-
-#datanode.sink.file.filename=datanode-metrics.out
-
-#resourcemanager.sink.file.filename=resourcemanager-metrics.out
-
-#nodemanager.sink.file.filename=nodemanager-metrics.out
-
-#mrappmaster.sink.file.filename=mrappmaster-metrics.out
-
-#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out
-
-# the following example split metrics of different
-# context to different sinks (in this case files)
-#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink
-#nodemanager.sink.file_jvm.context=jvm
-#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out
-#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink
-#nodemanager.sink.file_mapred.context=mapred
-#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out
-
-#
-# Below are for sending metrics to Ganglia
-#
-# for Ganglia 3.0 support
-# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30
-#
-# for Ganglia 3.1 support
-# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-
-# *.sink.ganglia.period=10
-
-# default for supportsparse is false
-# *.sink.ganglia.supportsparse=true
-
-#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Tag values to use for the ganglia prefix. If not defined no tags are used.
-# If '*' all tags are used. If specifying multiple tags separate them with
-# commas. Note that the last segment of the property name is the context name.
-#
-# A typical use of tags is separating the metrics by the HDFS rpc port
-# and HDFS service rpc port.
-# For example:
-# With following HDFS configuration:
-# dfs.namenode.rpc-address is set as namenodeAddress:9110
-# dfs.namenode.servicerpc-address is set as namenodeAddress:9111
-# If no tags are used, following metric would be gathered:
-# rpc.rpc.NumOpenConnections
-# If using "*.sink.ganglia.tagsForPrefix.rpc=port",
-# following metrics would be gathered:
-# rpc.rpc.port=9110.NumOpenConnections
-# rpc.rpc.port=9111.NumOpenConnections
-#
-#*.sink.ganglia.tagsForPrefix.jvm=ProcessName
-#*.sink.ganglia.tagsForPrefix.dfs=HAState,IsOutOfSync
-#*.sink.ganglia.tagsForPrefix.rpc=port
-#*.sink.ganglia.tagsForPrefix.rpcdetailed=port
-#*.sink.ganglia.tagsForPrefix.metricssystem=*
-#*.sink.ganglia.tagsForPrefix.ugi=*
-#*.sink.ganglia.tagsForPrefix.mapred=
-
-#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
diff --git a/hadoop-hdds/common/src/main/conf/hadoop-policy.xml b/hadoop-hdds/common/src/main/conf/hadoop-policy.xml
deleted file mode 100644
index 85e4975a78628..0000000000000
--- a/hadoop-hdds/common/src/main/conf/hadoop-policy.xml
+++ /dev/null
@@ -1,275 +0,0 @@
-
-
-
-
-
-
-
-
- security.client.protocol.acl
- *
- ACL for ClientProtocol, which is used by user code
- via the DistributedFileSystem.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.client.datanode.protocol.acl
- *
- ACL for ClientDatanodeProtocol, the client-to-datanode protocol
- for block recovery.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.datanode.protocol.acl
- *
- ACL for DatanodeProtocol, which is used by datanodes to
- communicate with the namenode.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.inter.datanode.protocol.acl
- *
- ACL for InterDatanodeProtocol, the inter-datanode protocol
- for updating generation timestamp.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.namenode.protocol.acl
- *
- ACL for NamenodeProtocol, the protocol used by the secondary
- namenode to communicate with the namenode.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.admin.operations.protocol.acl
- *
- ACL for AdminOperationsProtocol. Used for admin commands.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.refresh.user.mappings.protocol.acl
- *
- ACL for RefreshUserMappingsProtocol. Used to refresh
- users mappings. The ACL is a comma-separated list of user and
- group names. The user and group list is separated by a blank. For
- e.g. "alice,bob users,wheel". A special value of "*" means all
- users are allowed.
-
-
-
- security.refresh.policy.protocol.acl
- *
- ACL for RefreshAuthorizationPolicyProtocol, used by the
- dfsadmin and mradmin commands to refresh the security policy in-effect.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.ha.service.protocol.acl
- *
- ACL for HAService protocol used by HAAdmin to manage the
- active and stand-by states of namenode.
-
-
-
- security.router.admin.protocol.acl
- *
- ACL for RouterAdmin Protocol. The ACL is a comma-separated
- list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
-
- security.zkfc.protocol.acl
- *
- ACL for access to the ZK Failover Controller
-
-
-
-
- security.qjournal.service.protocol.acl
- *
- ACL for QJournalProtocol, used by the NN to communicate with
- JNs when using the QuorumJournalManager for edit logs.
-
-
-
- security.interqjournal.service.protocol.acl
- *
- ACL for InterQJournalProtocol, used by the JN to
- communicate with other JN
-
-
-
-
- security.mrhs.client.protocol.acl
- *
- ACL for HSClientProtocol, used by job clients to
- communciate with the MR History Server job status etc.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
-
-
- security.resourcetracker.protocol.acl
- *
- ACL for ResourceTrackerProtocol, used by the
- ResourceManager and NodeManager to communicate with each other.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.resourcemanager-administration.protocol.acl
- *
- ACL for ResourceManagerAdministrationProtocol, for admin commands.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.applicationclient.protocol.acl
- *
- ACL for ApplicationClientProtocol, used by the ResourceManager
- and applications submission clients to communicate with each other.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.applicationmaster.protocol.acl
- *
- ACL for ApplicationMasterProtocol, used by the ResourceManager
- and ApplicationMasters to communicate with each other.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.containermanagement.protocol.acl
- *
- ACL for ContainerManagementProtocol protocol, used by the NodeManager
- and ApplicationMasters to communicate with each other.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.resourcelocalizer.protocol.acl
- *
- ACL for ResourceLocalizer protocol, used by the NodeManager
- and ResourceLocalizer to communicate with each other.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.job.task.protocol.acl
- *
- ACL for TaskUmbilicalProtocol, used by the map and reduce
- tasks to communicate with the parent tasktracker.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.job.client.protocol.acl
- *
- ACL for MRClientProtocol, used by job clients to
- communciate with the MR ApplicationMaster to query job status etc.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.applicationhistory.protocol.acl
- *
- ACL for ApplicationHistoryProtocol, used by the timeline
- server and the generic history service client to communicate with each other.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.collector-nodemanager.protocol.acl
- *
- ACL for CollectorNodemanagerProtocol, used by nodemanager
- if timeline service v2 is enabled, for the timeline collector and nodemanager
- to communicate with each other.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.applicationmaster-nodemanager.applicationmaster.protocol.acl
- *
- ACL for ApplicationMasterProtocol, used by the Nodemanager
- and ApplicationMasters to communicate.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
-
- security.distributedscheduling.protocol.acl
- *
- ACL for DistributedSchedulingAMProtocol, used by the Nodemanager
- and Resourcemanager to communicate.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.
-
-
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
deleted file mode 100644
index 99972ae900389..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds;
-
-import org.apache.hadoop.hdds.utils.db.DBProfile;
-
-/**
- * This class contains constants for configuration keys and default values
- * used in hdds.
- */
-public final class HddsConfigKeys {
-
- public static final String HDDS_HEARTBEAT_INTERVAL =
- "hdds.heartbeat.interval";
- public static final String HDDS_HEARTBEAT_INTERVAL_DEFAULT =
- "30s";
- public static final String HDDS_NODE_REPORT_INTERVAL =
- "hdds.node.report.interval";
- public static final String HDDS_NODE_REPORT_INTERVAL_DEFAULT =
- "60s";
- public static final String HDDS_CONTAINER_REPORT_INTERVAL =
- "hdds.container.report.interval";
- public static final String HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT =
- "60s";
- public static final String HDDS_PIPELINE_REPORT_INTERVAL =
- "hdds.pipeline.report.interval";
- public static final String HDDS_PIPELINE_REPORT_INTERVAL_DEFAULT =
- "60s";
- public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL =
- "hdds.command.status.report.interval";
- public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT =
- "60s";
- public static final String HDDS_CONTAINER_ACTION_MAX_LIMIT =
- "hdds.container.action.max.limit";
- public static final int HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT =
- 20;
- public static final String HDDS_PIPELINE_ACTION_MAX_LIMIT =
- "hdds.pipeline.action.max.limit";
- public static final int HDDS_PIPELINE_ACTION_MAX_LIMIT_DEFAULT =
- 20;
- // Configuration to allow volume choosing policy.
- public static final String HDDS_DATANODE_VOLUME_CHOOSING_POLICY =
- "hdds.datanode.volume.choosing.policy";
- // DB PKIProfile used by ROCKDB instances.
- public static final String HDDS_DB_PROFILE = "hdds.db.profile";
- public static final DBProfile HDDS_DEFAULT_DB_PROFILE = DBProfile.DISK;
- // Once a container usage crosses this threshold, it is eligible for
- // closing.
- public static final String HDDS_CONTAINER_CLOSE_THRESHOLD =
- "hdds.container.close.threshold";
- public static final float HDDS_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f;
- public static final String HDDS_SCM_SAFEMODE_ENABLED =
- "hdds.scm.safemode.enabled";
-
- public static final boolean HDDS_SCM_SAFEMODE_ENABLED_DEFAULT = true;
- public static final String HDDS_SCM_SAFEMODE_MIN_DATANODE =
- "hdds.scm.safemode.min.datanode";
- public static final int HDDS_SCM_SAFEMODE_MIN_DATANODE_DEFAULT = 1;
-
- public static final String
- HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT =
- "hdds.scm.wait.time.after.safemode.exit";
-
- public static final String
- HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT_DEFAULT = "5m";
-
- public static final String HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK =
- "hdds.scm.safemode.pipeline-availability.check";
- public static final boolean
- HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK_DEFAULT = false;
-
- // % of containers which should have at least one reported replica
- // before SCM comes out of safe mode.
- public static final String HDDS_SCM_SAFEMODE_THRESHOLD_PCT =
- "hdds.scm.safemode.threshold.pct";
- public static final double HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT = 0.99;
-
-
- // percentage of healthy pipelines, where all 3 datanodes are reported in the
- // pipeline.
- public static final String HDDS_SCM_SAFEMODE_HEALTHY_PIPELINE_THRESHOLD_PCT =
- "hdds.scm.safemode.healthy.pipelie.pct";
- public static final double
- HDDS_SCM_SAFEMODE_HEALTHY_PIPELINE_THRESHOLD_PCT_DEFAULT = 0.10;
-
- public static final String HDDS_SCM_SAFEMODE_ONE_NODE_REPORTED_PIPELINE_PCT =
- "hdds.scm.safemode.atleast.one.node.reported.pipeline.pct";
- public static final double
- HDDS_SCM_SAFEMODE_ONE_NODE_REPORTED_PIPELINE_PCT_DEFAULT = 0.90;
-
- public static final String HDDS_LOCK_MAX_CONCURRENCY =
- "hdds.lock.max.concurrency";
- public static final int HDDS_LOCK_MAX_CONCURRENCY_DEFAULT = 100;
- // This configuration setting is used as a fallback location by all
- // Ozone/HDDS services for their metadata. It is useful as a single
- // config point for test/PoC clusters.
- //
- // In any real cluster where performance matters, the SCM, OM and DN
- // metadata locations must be configured explicitly.
- public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs";
-
- public static final String HDDS_PROMETHEUS_ENABLED =
- "hdds.prometheus.endpoint.enabled";
-
- public static final String HDDS_PROFILER_ENABLED =
- "hdds.profiler.endpoint.enabled";
-
- public static final String HDDS_KEY_LEN = "hdds.key.len";
- public static final int HDDS_DEFAULT_KEY_LEN = 2048;
- public static final String HDDS_KEY_ALGORITHM = "hdds.key.algo";
- public static final String HDDS_DEFAULT_KEY_ALGORITHM = "RSA";
- public static final String HDDS_SECURITY_PROVIDER = "hdds.security.provider";
- public static final String HDDS_DEFAULT_SECURITY_PROVIDER = "BC";
- public static final String HDDS_KEY_DIR_NAME = "hdds.key.dir.name";
- public static final String HDDS_KEY_DIR_NAME_DEFAULT = "keys";
- // TODO : Talk to StorageIO classes and see if they can return a secure
- // storage location for each node.
- public static final String HDDS_METADATA_DIR_NAME = "hdds.metadata.dir";
- public static final String HDDS_PRIVATE_KEY_FILE_NAME =
- "hdds.priv.key.file.name";
- public static final String HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT = "private.pem";
- public static final String HDDS_PUBLIC_KEY_FILE_NAME = "hdds.public.key.file"
- + ".name";
- public static final String HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT = "public.pem";
-
- public static final String HDDS_BLOCK_TOKEN_EXPIRY_TIME =
- "hdds.block.token.expiry.time";
- public static final String HDDS_BLOCK_TOKEN_EXPIRY_TIME_DEFAULT = "1d";
- /**
- * Maximum duration of certificates issued by SCM including Self-Signed Roots.
- * The formats accepted are based on the ISO-8601 duration format PnDTnHnMn.nS
- * Default value is 5 years and written as P1865D.
- */
- public static final String HDDS_X509_MAX_DURATION = "hdds.x509.max.duration";
- // Limit Certificate duration to a max value of 5 years.
- public static final String HDDS_X509_MAX_DURATION_DEFAULT= "P1865D";
- public static final String HDDS_X509_SIGNATURE_ALGO =
- "hdds.x509.signature.algorithm";
- public static final String HDDS_X509_SIGNATURE_ALGO_DEFAULT = "SHA256withRSA";
- public static final String HDDS_BLOCK_TOKEN_ENABLED =
- "hdds.block.token.enabled";
- public static final boolean HDDS_BLOCK_TOKEN_ENABLED_DEFAULT = false;
-
- public static final String HDDS_X509_DIR_NAME = "hdds.x509.dir.name";
- public static final String HDDS_X509_DIR_NAME_DEFAULT = "certs";
- public static final String HDDS_X509_FILE_NAME = "hdds.x509.file.name";
- public static final String HDDS_X509_FILE_NAME_DEFAULT = "certificate.crt";
-
- /**
- * Default duration of certificates issued by SCM CA.
- * The formats accepted are based on the ISO-8601 duration format PnDTnHnMn.nS
- * Default value is 5 years and written as P1865D.
- */
- public static final String HDDS_X509_DEFAULT_DURATION = "hdds.x509.default" +
- ".duration";
- // Default Certificate duration to one year.
- public static final String HDDS_X509_DEFAULT_DURATION_DEFAULT = "P365D";
-
- /**
- * Do not instantiate.
- */
- private HddsConfigKeys() {
- }
-
- // Enable TLS for GRPC clients/server in ozone.
- public static final String HDDS_GRPC_TLS_ENABLED = "hdds.grpc.tls.enabled";
- public static final boolean HDDS_GRPC_TLS_ENABLED_DEFAULT = false;
-
- // Choose TLS provider the default is set to OPENSSL for better performance.
- public static final String HDDS_GRPC_TLS_PROVIDER = "hdds.grpc.tls.provider";
- public static final String HDDS_GRPC_TLS_PROVIDER_DEFAULT = "OPENSSL";
-
- // Test only settings for using test signed certificate, authority assume to
- // be localhost.
- public static final String HDDS_GRPC_TLS_TEST_CERT = "hdds.grpc.tls" +
- ".test.cert";
- public static final boolean HDDS_GRPC_TLS_TEST_CERT_DEFAULT = false;
-
- // Comma separated acls (users, groups) allowing clients accessing
- // datanode container protocol
- // when hadoop.security.authorization is true, this needs to be set in
- // hadoop-policy.xml, "*" allows all users/groups to access.
- public static final String
- HDDS_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL =
- "hdds.security.client.datanode.container.protocol.acl";
-
- // Comma separated acls (users, groups) allowing clients accessing
- // scm container protocol
- // when hadoop.security.authorization is true, this needs to be set in
- // hadoop-policy.xml, "*" allows all users/groups to access.
- public static final String HDDS_SECURITY_CLIENT_SCM_CONTAINER_PROTOCOL_ACL =
- "hdds.security.client.scm.container.protocol.acl";
-
- // Comma separated acls (users, groups) allowing clients accessing
- // scm block protocol
- // when hadoop.security.authorization is true, this needs to be set in
- // hadoop-policy.xml, "*" allows all users/groups to access.
- public static final String HDDS_SECURITY_CLIENT_SCM_BLOCK_PROTOCOL_ACL =
- "hdds.security.client.scm.block.protocol.acl";
-
- // Comma separated acls (users, groups) allowing clients accessing
- // scm certificate protocol
- // when hadoop.security.authorization is true, this needs to be set in
- // hadoop-policy.xml, "*" allows all users/groups to access.
- public static final String HDDS_SECURITY_CLIENT_SCM_CERTIFICATE_PROTOCOL_ACL =
- "hdds.security.client.scm.certificate.protocol.acl";
-
- // Determines if the Container Chunk Manager will write user data to disk
- // Set to false only for specific performance tests
- public static final String HDDS_CONTAINER_PERSISTDATA =
- "hdds.container.chunk.persistdata";
- public static final boolean HDDS_CONTAINER_PERSISTDATA_DEFAULT = true;
-
- public static final String HDDS_CONTAINER_SCRUB_ENABLED =
- "hdds.container.scrub.enabled";
- public static final boolean HDDS_CONTAINER_SCRUB_ENABLED_DEFAULT = false;
-
- public static final String HDDS_DATANODE_HTTP_ENABLED_KEY =
- "hdds.datanode.http.enabled";
- public static final String HDDS_DATANODE_HTTP_BIND_HOST_KEY =
- "hdds.datanode.http-bind-host";
- public static final String HDDS_DATANODE_HTTPS_BIND_HOST_KEY =
- "hdds.datanode.https-bind-host";
- public static final String HDDS_DATANODE_HTTP_ADDRESS_KEY =
- "hdds.datanode.http-address";
- public static final String HDDS_DATANODE_HTTPS_ADDRESS_KEY =
- "hdds.datanode.https-address";
-
- public static final String HDDS_DATANODE_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
- public static final int HDDS_DATANODE_HTTP_BIND_PORT_DEFAULT = 9882;
- public static final int HDDS_DATANODE_HTTPS_BIND_PORT_DEFAULT = 9883;
- public static final String
- HDDS_DATANODE_HTTP_KERBEROS_PRINCIPAL_KEY =
- "hdds.datanode.http.kerberos.principal";
- public static final String
- HDDS_DATANODE_HTTP_KERBEROS_KEYTAB_FILE_KEY =
- "hdds.datanode.http.kerberos.keytab";
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java
deleted file mode 100644
index b244b8cf75d74..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds;
-
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * HDDS Id generator.
- */
-public final class HddsIdFactory {
- private HddsIdFactory() {
- }
-
- private static final AtomicLong LONG_COUNTER = new AtomicLong(
- System.currentTimeMillis());
-
- /**
- * Returns an incrementing long. This class doesn't
- * persist initial value for long Id's, so incremental id's after restart
- * may collide with previously generated Id's.
- *
- * @return long
- */
- public static long getLongId() {
- return LONG_COUNTER.incrementAndGet();
- }
-
- /**
- * Returns a uuid.
- *
- * @return UUID.
- */
- public static UUID getUUId() {
- return UUID.randomUUID();
- }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
deleted file mode 100644
index d7b20fdd9172c..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ /dev/null
@@ -1,505 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds;
-
-import javax.management.ObjectName;
-import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
-import java.util.Calendar;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Optional;
-import java.util.TimeZone;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.source.JvmMetrics;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.net.DNS;
-import org.apache.hadoop.net.NetUtils;
-
-import com.google.common.net.HostAndPort;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT;
-
-import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * HDDS specific stateless utility functions.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Stable
-public final class HddsUtils {
-
-
- private static final Logger LOG = LoggerFactory.getLogger(HddsUtils.class);
-
- /**
- * The service ID of the solitary Ozone SCM service.
- */
- public static final String OZONE_SCM_SERVICE_ID = "OzoneScmService";
- public static final String OZONE_SCM_SERVICE_INSTANCE_ID =
- "OzoneScmServiceInstance";
- private static final TimeZone UTC_ZONE = TimeZone.getTimeZone("UTC");
-
-
- private static final int NO_PORT = -1;
-
- private HddsUtils() {
- }
-
- /**
- * Retrieve the socket address that should be used by clients to connect
- * to the SCM.
- *
- * @param conf
- * @return Target InetSocketAddress for the SCM client endpoint.
- */
- public static InetSocketAddress getScmAddressForClients(Configuration conf) {
- Optional host = getHostNameFromConfigKeys(conf,
- ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-
- if (!host.isPresent()) {
- // Fallback to Ozone SCM names.
- Collection scmAddresses = getSCMAddresses(conf);
- if (scmAddresses.size() > 1) {
- throw new IllegalArgumentException(
- ScmConfigKeys.OZONE_SCM_NAMES +
- " must contain a single hostname. Multiple SCM hosts are " +
- "currently unsupported");
- }
- host = Optional.of(scmAddresses.iterator().next().getHostName());
- }
-
- if (!host.isPresent()) {
- throw new IllegalArgumentException(
- ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY + " must be defined. See"
- + " https://wiki.apache.org/hadoop/Ozone#Configuration for "
- + "details"
- + " on configuring Ozone.");
- }
-
- final Optional port = getPortNumberFromConfigKeys(conf,
- ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-
- return NetUtils.createSocketAddr(host.get() + ":" + port
- .orElse(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT));
- }
-
- /**
- * Retrieve the socket address that should be used by clients to connect
- * to the SCM for block service. If
- * {@link ScmConfigKeys#OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY} is not defined
- * then {@link ScmConfigKeys#OZONE_SCM_CLIENT_ADDRESS_KEY} is used. If neither
- * is defined then {@link ScmConfigKeys#OZONE_SCM_NAMES} is used.
- *
- * @param conf
- * @return Target InetSocketAddress for the SCM block client endpoint.
- * @throws IllegalArgumentException if configuration is not defined.
- */
- public static InetSocketAddress getScmAddressForBlockClients(
- Configuration conf) {
- Optional host = getHostNameFromConfigKeys(conf,
- ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY);
-
- if (!host.isPresent()) {
- host = getHostNameFromConfigKeys(conf,
- ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
- }
-
- if (!host.isPresent()) {
- // Fallback to Ozone SCM names.
- Collection scmAddresses = getSCMAddresses(conf);
- if (scmAddresses.size() > 1) {
- throw new IllegalArgumentException(
- ScmConfigKeys.OZONE_SCM_NAMES +
- " must contain a single hostname. Multiple SCM hosts are " +
- "currently unsupported");
- }
- host = Optional.of(scmAddresses.iterator().next().getHostName());
- }
-
- if (!host.isPresent()) {
- throw new IllegalArgumentException(
- ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY
- + " must be defined. See"
- + " https://wiki.apache.org/hadoop/Ozone#Configuration"
- + " for details on configuring Ozone.");
- }
-
- final Optional port = getPortNumberFromConfigKeys(conf,
- ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY);
-
- return NetUtils.createSocketAddr(host.get() + ":" + port
- .orElse(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT));
- }
-
- /**
- * Create a scm security client.
- * @param conf - Ozone configuration.
- *
- * @return {@link SCMSecurityProtocol}
- * @throws IOException
- */
- public static SCMSecurityProtocolClientSideTranslatorPB getScmSecurityClient(
- OzoneConfiguration conf) throws IOException {
- RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class,
- ProtobufRpcEngine.class);
- long scmVersion =
- RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class);
- InetSocketAddress address =
- getScmAddressForSecurityProtocol(conf);
- RetryPolicy retryPolicy =
- RetryPolicies.retryForeverWithFixedSleep(
- 1000, TimeUnit.MILLISECONDS);
- SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient =
- new SCMSecurityProtocolClientSideTranslatorPB(
- RPC.getProtocolProxy(SCMSecurityProtocolPB.class, scmVersion,
- address, UserGroupInformation.getCurrentUser(),
- conf, NetUtils.getDefaultSocketFactory(conf),
- Client.getRpcTimeout(conf), retryPolicy).getProxy());
- return scmSecurityClient;
- }
-
- /**
- * Retrieve the hostname, trying the supplied config keys in order.
- * Each config value may be absent, or if present in the format
- * host:port (the :port part is optional).
- *
- * @param conf - Conf
- * @param keys a list of configuration key names.
- *
- * @return first hostname component found from the given keys, or absent.
- * @throws IllegalArgumentException if any values are not in the 'host'
- * or host:port format.
- */
- public static Optional getHostNameFromConfigKeys(Configuration conf,
- String... keys) {
- for (final String key : keys) {
- final String value = conf.getTrimmed(key);
- final Optional hostName = getHostName(value);
- if (hostName.isPresent()) {
- return hostName;
- }
- }
- return Optional.empty();
- }
-
- /**
- * Gets the hostname or Indicates that it is absent.
- * @param value host or host:port
- * @return hostname
- */
- public static Optional getHostName(String value) {
- if ((value == null) || value.isEmpty()) {
- return Optional.empty();
- }
- String hostname = value.replaceAll("\\:[0-9]+$", "");
- if (hostname.length() == 0) {
- return Optional.empty();
- } else {
- return Optional.of(hostname);
- }
- }
-
- /**
- * Gets the port if there is one, throws otherwise.
- * @param value String in host:port format.
- * @return Port
- */
- public static Optional getHostPort(String value) {
- if ((value == null) || value.isEmpty()) {
- return Optional.empty();
- }
- int port = HostAndPort.fromString(value).getPortOrDefault(NO_PORT);
- if (port == NO_PORT) {
- return Optional.empty();
- } else {
- return Optional.of(port);
- }
- }
-
- /**
- * Retrieve the port number, trying the supplied config keys in order.
- * Each config value may be absent, or if present in the format
- * host:port (the :port part is optional).
- *
- * @param conf Conf
- * @param keys a list of configuration key names.
- *
- * @return first port number component found from the given keys, or absent.
- * @throws IllegalArgumentException if any values are not in the 'host'
- * or host:port format.
- */
- public static Optional getPortNumberFromConfigKeys(
- Configuration conf, String... keys) {
- for (final String key : keys) {
- final String value = conf.getTrimmed(key);
- final Optional hostPort = getHostPort(value);
- if (hostPort.isPresent()) {
- return hostPort;
- }
- }
- return Optional.empty();
- }
-
- /**
- * Retrieve the socket addresses of all storage container managers.
- *
- * @param conf
- * @return A collection of SCM addresses
- * @throws IllegalArgumentException If the configuration is invalid
- */
- public static Collection getSCMAddresses(
- Configuration conf) throws IllegalArgumentException {
- Collection addresses =
- new HashSet();
- Collection names =
- conf.getTrimmedStringCollection(ScmConfigKeys.OZONE_SCM_NAMES);
- if (names == null || names.isEmpty()) {
- throw new IllegalArgumentException(ScmConfigKeys.OZONE_SCM_NAMES
- + " need to be a set of valid DNS names or IP addresses."
- + " Null or empty address list found.");
- }
-
- final Optional defaultPort = Optional
- .of(ScmConfigKeys.OZONE_SCM_DEFAULT_PORT);
- for (String address : names) {
- Optional hostname = getHostName(address);
- if (!hostname.isPresent()) {
- throw new IllegalArgumentException("Invalid hostname for SCM: "
- + hostname);
- }
- Optional port = getHostPort(address);
- InetSocketAddress addr = NetUtils.createSocketAddr(hostname.get(),
- port.orElse(defaultPort.get()));
- addresses.add(addr);
- }
- return addresses;
- }
-
- public static boolean isHddsEnabled(Configuration conf) {
- return conf.getBoolean(OZONE_ENABLED, OZONE_ENABLED_DEFAULT);
- }
-
-
- /**
- * Returns the hostname for this datanode. If the hostname is not
- * explicitly configured in the given config, then it is determined
- * via the DNS class.
- *
- * @param conf Configuration
- *
- * @return the hostname (NB: may not be a FQDN)
- * @throws UnknownHostException if the dfs.datanode.dns.interface
- * option is used and the hostname can not be determined
- */
- public static String getHostName(Configuration conf)
- throws UnknownHostException {
- String name = conf.get(DFS_DATANODE_HOST_NAME_KEY);
- if (name == null) {
- String dnsInterface = conf.get(
- CommonConfigurationKeys.HADOOP_SECURITY_DNS_INTERFACE_KEY);
- String nameServer = conf.get(
- CommonConfigurationKeys.HADOOP_SECURITY_DNS_NAMESERVER_KEY);
- boolean fallbackToHosts = false;
-
- if (dnsInterface == null) {
- // Try the legacy configuration keys.
- dnsInterface = conf.get(DFS_DATANODE_DNS_INTERFACE_KEY);
- nameServer = conf.get(DFS_DATANODE_DNS_NAMESERVER_KEY);
- } else {
- // If HADOOP_SECURITY_DNS_* is set then also attempt hosts file
- // resolution if DNS fails. We will not use hosts file resolution
- // by default to avoid breaking existing clusters.
- fallbackToHosts = true;
- }
-
- name = DNS.getDefaultHost(dnsInterface, nameServer, fallbackToHosts);
- }
- return name;
- }
-
- /**
- * Checks if the container command is read only or not.
- * @param proto ContainerCommand Request proto
- * @return True if its readOnly , false otherwise.
- */
- public static boolean isReadOnly(
- ContainerProtos.ContainerCommandRequestProto proto) {
- switch (proto.getCmdType()) {
- case ReadContainer:
- case ReadChunk:
- case ListBlock:
- case GetBlock:
- case GetSmallFile:
- case ListContainer:
- case ListChunk:
- case GetCommittedBlockLength:
- return true;
- case CloseContainer:
- case WriteChunk:
- case UpdateContainer:
- case CompactChunk:
- case CreateContainer:
- case DeleteChunk:
- case DeleteContainer:
- case DeleteBlock:
- case PutBlock:
- case PutSmallFile:
- default:
- return false;
- }
- }
-
- /**
- * Register the provided MBean with additional JMX ObjectName properties.
- * If additional properties are not supported then fallback to registering
- * without properties.
- *
- * @param serviceName - see {@link MBeans#register}
- * @param mBeanName - see {@link MBeans#register}
- * @param jmxProperties - additional JMX ObjectName properties.
- * @param mBean - the MBean to register.
- * @return the named used to register the MBean.
- */
- public static ObjectName registerWithJmxProperties(
- String serviceName, String mBeanName, Map jmxProperties,
- Object mBean) {
- try {
-
- // Check support for registering with additional properties.
- final Method registerMethod = MBeans.class.getMethod(
- "register", String.class, String.class,
- Map.class, Object.class);
-
- return (ObjectName) registerMethod.invoke(
- null, serviceName, mBeanName, jmxProperties, mBean);
-
- } catch (NoSuchMethodException | IllegalAccessException |
- InvocationTargetException e) {
-
- // Fallback
- if (LOG.isTraceEnabled()) {
- LOG.trace("Registering MBean {} without additional properties {}",
- mBeanName, jmxProperties);
- }
- return MBeans.register(serviceName, mBeanName, mBean);
- }
- }
-
- /**
- * Get the current UTC time in milliseconds.
- * @return the current UTC time in milliseconds.
- */
- public static long getUtcTime() {
- return Calendar.getInstance(UTC_ZONE).getTimeInMillis();
- }
-
- /**
- * Retrieve the socket address that should be used by clients to connect
- * to the SCM for
- * {@link org.apache.hadoop.hdds.protocol.SCMSecurityProtocol}. If
- * {@link ScmConfigKeys#OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY} is not defined
- * then {@link ScmConfigKeys#OZONE_SCM_CLIENT_ADDRESS_KEY} is used. If neither
- * is defined then {@link ScmConfigKeys#OZONE_SCM_NAMES} is used.
- *
- * @param conf
- * @return Target InetSocketAddress for the SCM block client endpoint.
- * @throws IllegalArgumentException if configuration is not defined.
- */
- public static InetSocketAddress getScmAddressForSecurityProtocol(
- Configuration conf) {
- Optional host = getHostNameFromConfigKeys(conf,
- ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY);
-
- if (!host.isPresent()) {
- host = getHostNameFromConfigKeys(conf,
- ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
- }
-
- if (!host.isPresent()) {
- // Fallback to Ozone SCM names.
- Collection scmAddresses = getSCMAddresses(conf);
- if (scmAddresses.size() > 1) {
- throw new IllegalArgumentException(
- ScmConfigKeys.OZONE_SCM_NAMES +
- " must contain a single hostname. Multiple SCM hosts are " +
- "currently unsupported");
- }
- host = Optional.of(scmAddresses.iterator().next().getHostName());
- }
-
- if (!host.isPresent()) {
- throw new IllegalArgumentException(
- ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY
- + " must be defined. See"
- + " https://wiki.apache.org/hadoop/Ozone#Configuration"
- + " for details on configuring Ozone.");
- }
-
- final Optional port = getPortNumberFromConfigKeys(conf,
- ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY);
-
- return NetUtils.createSocketAddr(host.get() + ":" + port
- .orElse(ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT));
- }
-
- /**
- * Initialize hadoop metrics system for Ozone servers.
- * @param configuration OzoneConfiguration to use.
- * @param serverName The logical name of the server components.
- * @return
- */
- public static MetricsSystem initializeMetrics(
- OzoneConfiguration configuration, String serverName) {
- MetricsSystem metricsSystem = DefaultMetricsSystem.initialize(serverName);
- JvmMetrics.create(serverName,
- configuration.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
- DefaultMetricsSystem.instance());
- return metricsSystem;
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
deleted file mode 100644
index 372828b95cefc..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.cli;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.fs.Path;
-
-import com.google.common.annotations.VisibleForTesting;
-import picocli.CommandLine;
-import picocli.CommandLine.ExecutionException;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.RunLast;
-
-/**
- * This is a generic parent class for all the ozone related cli tools.
- */
-public class GenericCli implements Callable, GenericParentCommand {
-
- @Option(names = {"--verbose"},
- description = "More verbose output. Show the stack trace of the errors.")
- private boolean verbose;
-
- @Option(names = {"-D", "--set"})
- private Map configurationOverrides = new HashMap<>();
-
- @Option(names = {"-conf"})
- private String configurationPath;
-
- private final CommandLine cmd;
-
- public GenericCli() {
- cmd = new CommandLine(this);
- }
-
- public void run(String[] argv) {
- try {
- execute(argv);
- } catch (ExecutionException ex) {
- printError(ex.getCause() == null ? ex : ex.getCause());
- System.exit(-1);
- }
- }
-
- @VisibleForTesting
- public void execute(String[] argv) {
- cmd.parseWithHandler(new RunLast(), argv);
- }
-
- protected void printError(Throwable error) {
- //message could be null in case of NPE. This is unexpected so we can
- //print out the stack trace.
- if (verbose || error.getMessage() == null
- || error.getMessage().length() == 0) {
- error.printStackTrace(System.err);
- } else {
- System.err.println(error.getMessage().split("\n")[0]);
- }
- }
-
- @Override
- public Void call() throws Exception {
- throw new MissingSubcommandException(cmd);
- }
-
- @Override
- public OzoneConfiguration createOzoneConfiguration() {
- OzoneConfiguration ozoneConf = new OzoneConfiguration();
- if (configurationPath != null) {
- ozoneConf.addResource(new Path(configurationPath));
- }
- if (configurationOverrides != null) {
- for (Entry entry : configurationOverrides.entrySet()) {
- ozoneConf.set(entry.getKey(), entry.getValue());
- }
- }
- return ozoneConf;
- }
-
- @VisibleForTesting
- public picocli.CommandLine getCmd() {
- return cmd;
- }
-
- @Override
- public boolean isVerbose() {
- return verbose;
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
deleted file mode 100644
index 6abad3e32b8d0..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.cli;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-
-/**
- * Interface to access the higher level parameters.
- */
-public interface GenericParentCommand {
-
- boolean isVerbose();
-
- OzoneConfiguration createOzoneConfiguration();
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java
deleted file mode 100644
index 2f4ac4f170a83..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.cli;
-
-import org.apache.hadoop.hdds.utils.HddsVersionInfo;
-
-import picocli.CommandLine.IVersionProvider;
-
-/**
- * Version provider for the CLI interface.
- */
-public class HddsVersionProvider implements IVersionProvider {
- @Override
- public String[] getVersion() throws Exception {
- String[] result = new String[] {
- HddsVersionInfo.HDDS_VERSION_INFO.getBuildVersion()
- };
- return result;
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java
deleted file mode 100644
index 759476579e93a..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.cli;
-
-import picocli.CommandLine;
-
-/**
- * Exception to throw if subcommand is not selected but required.
- */
-public class MissingSubcommandException extends CommandLine.ParameterException {
-
- public MissingSubcommandException(CommandLine cmd) {
- super(cmd, "Incomplete command");
- }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java
deleted file mode 100644
index 8dcc1d1a3c91a..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Generic helper class to make instantiate picocli based cli tools.
- */
-package org.apache.hadoop.hdds.cli;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
deleted file mode 100644
index 07aa536c4e513..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.client;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.util.Objects;
-
-/**
- * BlockID returned by SCM during allocation of block (containerID + localID).
- */
-public class ContainerBlockID {
- private long containerID;
- private long localID;
-
- public ContainerBlockID(long containerID, long localID) {
- this.containerID = containerID;
- this.localID = localID;
- }
-
- public long getContainerID() {
- return containerID;
- }
-
- public long getLocalID() {
- return localID;
- }
-
- @Override
- public String toString() {
- return new StringBuffer()
- .append("conID: ")
- .append(containerID)
- .append(" locID: ")
- .append(localID).toString();
- }
-
- public HddsProtos.ContainerBlockID getProtobuf() {
- return HddsProtos.ContainerBlockID.newBuilder().
- setContainerID(containerID).setLocalID(localID).build();
- }
-
- public static ContainerBlockID getFromProtobuf(
- HddsProtos.ContainerBlockID containerBlockID) {
- return new ContainerBlockID(containerBlockID.getContainerID(),
- containerBlockID.getLocalID());
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
- ContainerBlockID blockID = (ContainerBlockID) o;
- return containerID == blockID.containerID && localID == blockID.localID;
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(containerID, localID);
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
deleted file mode 100644
index 59708a956b908..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.client;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-
-
-/**
- * represents an OzoneQuota Object that can be applied to
- * a storage volume.
- */
-public class OzoneQuota {
-
- public static final String OZONE_QUOTA_BYTES = "BYTES";
- public static final String OZONE_QUOTA_MB = "MB";
- public static final String OZONE_QUOTA_GB = "GB";
- public static final String OZONE_QUOTA_TB = "TB";
-
- private Units unit;
- private long size;
-
- /** Quota Units.*/
- public enum Units {UNDEFINED, BYTES, KB, MB, GB, TB}
-
- /**
- * Returns size.
- *
- * @return long
- */
- public long getSize() {
- return size;
- }
-
- /**
- * Returns Units.
- *
- * @return Unit in MB, GB or TB
- */
- public Units getUnit() {
- return unit;
- }
-
- /**
- * Constructs a default Quota object.
- */
- public OzoneQuota() {
- this.size = 0;
- this.unit = Units.UNDEFINED;
- }
-
- /**
- * Constructor for Ozone Quota.
- *
- * @param size Long Size
- * @param unit MB, GB or TB
- */
- public OzoneQuota(long size, Units unit) {
- this.size = size;
- this.unit = unit;
- }
-
- /**
- * Formats a quota as a string.
- *
- * @param quota the quota to format
- * @return string representation of quota
- */
- public static String formatQuota(OzoneQuota quota) {
- return String.valueOf(quota.size) + quota.unit;
- }
-
- /**
- * Parses a user provided string and returns the
- * Quota Object.
- *
- * @param quotaString Quota String
- *
- * @return OzoneQuota object
- *
- * @throws IllegalArgumentException
- */
- public static OzoneQuota parseQuota(String quotaString)
- throws IllegalArgumentException {
-
- if ((quotaString == null) || (quotaString.isEmpty())) {
- throw new IllegalArgumentException(
- "Quota string cannot be null or empty.");
- }
-
- String uppercase = quotaString.toUpperCase().replaceAll("\\s+", "");
- String size = "";
- int nSize;
- Units currUnit = Units.MB;
- Boolean found = false;
- if (uppercase.endsWith(OZONE_QUOTA_MB)) {
- size = uppercase
- .substring(0, uppercase.length() - OZONE_QUOTA_MB.length());
- currUnit = Units.MB;
- found = true;
- }
-
- if (uppercase.endsWith(OZONE_QUOTA_GB)) {
- size = uppercase
- .substring(0, uppercase.length() - OZONE_QUOTA_GB.length());
- currUnit = Units.GB;
- found = true;
- }
-
- if (uppercase.endsWith(OZONE_QUOTA_TB)) {
- size = uppercase
- .substring(0, uppercase.length() - OZONE_QUOTA_TB.length());
- currUnit = Units.TB;
- found = true;
- }
-
- if (uppercase.endsWith(OZONE_QUOTA_BYTES)) {
- size = uppercase
- .substring(0, uppercase.length() - OZONE_QUOTA_BYTES.length());
- currUnit = Units.BYTES;
- found = true;
- }
-
- if (!found) {
- throw new IllegalArgumentException(
- "Quota unit not recognized. Supported values are BYTES, MB, GB and " +
- "TB.");
- }
-
- nSize = Integer.parseInt(size);
- if (nSize < 0) {
- throw new IllegalArgumentException("Quota cannot be negative.");
- }
-
- return new OzoneQuota(nSize, currUnit);
- }
-
-
- /**
- * Returns size in Bytes or -1 if there is no Quota.
- */
- public long sizeInBytes() {
- switch (this.unit) {
- case BYTES:
- return this.getSize();
- case MB:
- return this.getSize() * OzoneConsts.MB;
- case GB:
- return this.getSize() * OzoneConsts.GB;
- case TB:
- return this.getSize() * OzoneConsts.TB;
- case UNDEFINED:
- default:
- return -1;
- }
- }
-
- /**
- * Returns OzoneQuota corresponding to size in bytes.
- *
- * @param sizeInBytes size in bytes to be converted
- *
- * @return OzoneQuota object
- */
- public static OzoneQuota getOzoneQuota(long sizeInBytes) {
- long size;
- Units unit;
- if (sizeInBytes % OzoneConsts.TB == 0) {
- size = sizeInBytes / OzoneConsts.TB;
- unit = Units.TB;
- } else if (sizeInBytes % OzoneConsts.GB == 0) {
- size = sizeInBytes / OzoneConsts.GB;
- unit = Units.GB;
- } else if (sizeInBytes % OzoneConsts.MB == 0) {
- size = sizeInBytes / OzoneConsts.MB;
- unit = Units.MB;
- } else {
- size = sizeInBytes;
- unit = Units.BYTES;
- }
- return new OzoneQuota((int)size, unit);
- }
-
- @Override
- public String toString() {
- return size + " " + unit;
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java
deleted file mode 100644
index 044bd6f8334cd..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.client;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-/**
- * The replication factor to be used while writing key into ozone.
- */
-public enum ReplicationFactor {
- ONE(1),
- THREE(3);
-
- /**
- * Integer representation of replication.
- */
- private int value;
-
- /**
- * Initializes ReplicationFactor with value.
- * @param value replication value
- */
- ReplicationFactor(int value) {
- this.value = value;
- }
-
- /**
- * Returns enum value corresponding to the int value.
- * @param value replication value
- * @return ReplicationFactor
- */
- public static ReplicationFactor valueOf(int value) {
- if(value == 1) {
- return ONE;
- }
- if (value == 3) {
- return THREE;
- }
- throw new IllegalArgumentException("Unsupported value: " + value);
- }
-
- public static ReplicationFactor fromProto(
- HddsProtos.ReplicationFactor replicationFactor) {
- if (replicationFactor == null) {
- return null;
- }
- switch (replicationFactor) {
- case ONE:
- return ReplicationFactor.ONE;
- case THREE:
- return ReplicationFactor.THREE;
- default:
- throw new IllegalArgumentException(
- "Unsupported ProtoBuf replication factor: " + replicationFactor);
- }
- }
-
- /**
- * Returns integer representation of ReplicationFactor.
- * @return replication value
- */
- public int getValue() {
- return value;
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java
deleted file mode 100644
index c63896e9e1d13..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.client;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-/**
- * The replication type to be used while writing key into ozone.
- */
-public enum ReplicationType {
- RATIS,
- STAND_ALONE,
- CHAINED;
-
- public static ReplicationType fromProto(
- HddsProtos.ReplicationType replicationType) {
- if (replicationType == null) {
- return null;
- }
- switch (replicationType) {
- case RATIS:
- return ReplicationType.RATIS;
- case STAND_ALONE:
- return ReplicationType.STAND_ALONE;
- case CHAINED:
- return ReplicationType.CHAINED;
- default:
- throw new IllegalArgumentException(
- "Unsupported ProtoBuf replication type: " + replicationType);
- }
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java
deleted file mode 100644
index e81f134b259fe..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.client;
-
-/**
- * Base property types for HDDS containers and replications.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
deleted file mode 100644
index 8beac1663b2b7..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
+++ /dev/null
@@ -1,190 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
-
-import com.google.gson.Gson;
-import java.io.IOException;
-import java.io.Writer;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import javax.ws.rs.core.HttpHeaders;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.http.HttpServer2;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TAGS_SYSTEM_KEY;
-
-/**
- * A servlet to print out the running configuration data.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
-@InterfaceStability.Unstable
-public class HddsConfServlet extends HttpServlet {
-
- private static final long serialVersionUID = 1L;
-
- protected static final String FORMAT_JSON = "json";
- protected static final String FORMAT_XML = "xml";
- private static final String COMMAND = "cmd";
- private static final OzoneConfiguration OZONE_CONFIG =
- new OzoneConfiguration();
- private static final transient Logger LOG =
- LoggerFactory.getLogger(HddsConfServlet.class);
-
-
- /**
- * Return the Configuration of the daemon hosting this servlet.
- * This is populated when the HttpServer starts.
- */
- private Configuration getConfFromContext() {
- Configuration conf = (Configuration) getServletContext().getAttribute(
- HttpServer2.CONF_CONTEXT_ATTRIBUTE);
- assert conf != null;
- return conf;
- }
-
- @Override
- public void doGet(HttpServletRequest request, HttpServletResponse response)
- throws ServletException, IOException {
-
- if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
- request, response)) {
- return;
- }
-
- String format = parseAcceptHeader(request);
- if (FORMAT_XML.equals(format)) {
- response.setContentType("text/xml; charset=utf-8");
- } else if (FORMAT_JSON.equals(format)) {
- response.setContentType("application/json; charset=utf-8");
- }
-
- String name = request.getParameter("name");
- Writer out = response.getWriter();
- String cmd = request.getParameter(COMMAND);
-
- processCommand(cmd, format, request, response, out, name);
- out.close();
- }
-
- private void processCommand(String cmd, String format,
- HttpServletRequest request, HttpServletResponse response, Writer out,
- String name)
- throws IOException {
- try {
- if (cmd == null) {
- if (FORMAT_XML.equals(format)) {
- response.setContentType("text/xml; charset=utf-8");
- } else if (FORMAT_JSON.equals(format)) {
- response.setContentType("application/json; charset=utf-8");
- }
-
- writeResponse(getConfFromContext(), out, format, name);
- } else {
- processConfigTagRequest(request, out);
- }
- } catch (BadFormatException bfe) {
- response.sendError(HttpServletResponse.SC_BAD_REQUEST, bfe.getMessage());
- } catch (IllegalArgumentException iae) {
- response.sendError(HttpServletResponse.SC_NOT_FOUND, iae.getMessage());
- }
- }
-
- @VisibleForTesting
- static String parseAcceptHeader(HttpServletRequest request) {
- String format = request.getHeader(HttpHeaders.ACCEPT);
- return format != null && format.contains(FORMAT_JSON) ?
- FORMAT_JSON : FORMAT_XML;
- }
-
- /**
- * Guts of the servlet - extracted for easy testing.
- */
- static void writeResponse(Configuration conf,
- Writer out, String format, String propertyName)
- throws IOException, IllegalArgumentException, BadFormatException {
- if (FORMAT_JSON.equals(format)) {
- Configuration.dumpConfiguration(conf, propertyName, out);
- } else if (FORMAT_XML.equals(format)) {
- conf.writeXml(propertyName, out);
- } else {
- throw new BadFormatException("Bad format: " + format);
- }
- }
-
- /**
- * Exception for signal bad content type.
- */
- public static class BadFormatException extends Exception {
-
- private static final long serialVersionUID = 1L;
-
- public BadFormatException(String msg) {
- super(msg);
- }
- }
-
- private void processConfigTagRequest(HttpServletRequest request,
- Writer out) throws IOException {
- String cmd = request.getParameter(COMMAND);
- Gson gson = new Gson();
- Configuration config = getOzoneConfig();
-
- switch (cmd) {
- case "getOzoneTags":
- out.write(gson.toJson(config.get(OZONE_TAGS_SYSTEM_KEY)
- .split(",")));
- break;
- case "getPropertyByTag":
- String tags = request.getParameter("tags");
- Map propMap = new HashMap<>();
-
- for (String tag : tags.split(",")) {
- if (config.isPropertyTag(tag)) {
- Properties properties = config.getAllPropertiesByTag(tag);
- propMap.put(tag, properties);
- } else {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Not a valid tag" + tag);
- }
- }
- }
- out.write(gson.toJsonTree(propMap).toString());
- break;
- default:
- throw new IllegalArgumentException(cmd + " is not a valid command.");
- }
-
- }
-
- private static Configuration getOzoneConfig() {
- return OZONE_CONFIG;
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
deleted file mode 100644
index c0486335cdd2a..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ /dev/null
@@ -1,328 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.conf;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Unmarshaller;
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Enumeration;
-import java.util.List;
-import java.util.Properties;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * Configuration for ozone.
- */
-@InterfaceAudience.Private
-public class OzoneConfiguration extends Configuration {
- static {
- activate();
- }
-
- public static OzoneConfiguration of(Configuration conf) {
- Preconditions.checkNotNull(conf);
-
- return conf instanceof OzoneConfiguration
- ? (OzoneConfiguration) conf
- : new OzoneConfiguration(conf);
- }
-
- public OzoneConfiguration() {
- OzoneConfiguration.activate();
- loadDefaults();
- }
-
- public OzoneConfiguration(Configuration conf) {
- super(conf);
- //load the configuration from the classloader of the original conf.
- setClassLoader(conf.getClassLoader());
- if (!(conf instanceof OzoneConfiguration)) {
- loadDefaults();
- }
- }
-
- private void loadDefaults() {
- try {
- //there could be multiple ozone-default-generated.xml files on the
- // classpath, which are generated by the annotation processor.
- // Here we add all of them to the list of the available configuration.
- Enumeration generatedDefaults =
- OzoneConfiguration.class.getClassLoader().getResources(
- "ozone-default-generated.xml");
- while (generatedDefaults.hasMoreElements()) {
- addResource(generatedDefaults.nextElement());
- }
- } catch (IOException e) {
- e.printStackTrace();
- }
- addResource("ozone-site.xml");
- }
-
- public List readPropertyFromXml(URL url) throws JAXBException {
- JAXBContext context = JAXBContext.newInstance(XMLConfiguration.class);
- Unmarshaller um = context.createUnmarshaller();
-
- XMLConfiguration config = (XMLConfiguration) um.unmarshal(url);
- return config.getProperties();
- }
-
- /**
- * Create a Configuration object and inject the required configuration values.
- *
- * @param configurationClass The class where the fields are annotated with
- * the configuration.
- * @return Initiated java object where the config fields are injected.
- */
- public T getObject(Class configurationClass) {
-
- T configuration;
-
- try {
- configuration = configurationClass.newInstance();
- } catch (InstantiationException | IllegalAccessException e) {
- throw new ConfigurationException(
- "Configuration class can't be created: " + configurationClass, e);
- }
- ConfigGroup configGroup =
- configurationClass.getAnnotation(ConfigGroup.class);
- String prefix = configGroup.prefix();
-
- for (Method setterMethod : configurationClass.getMethods()) {
- if (setterMethod.isAnnotationPresent(Config.class)) {
-
- String methodLocation =
- configurationClass + "." + setterMethod.getName();
-
- Config configAnnotation = setterMethod.getAnnotation(Config.class);
-
- String key = prefix + "." + configAnnotation.key();
-
- Class>[] parameterTypes = setterMethod.getParameterTypes();
- if (parameterTypes.length != 1) {
- throw new ConfigurationException(
- "@Config annotation should be used on simple setter: "
- + methodLocation);
- }
-
- ConfigType type = configAnnotation.type();
-
- if (type == ConfigType.AUTO) {
- type = detectConfigType(parameterTypes[0], methodLocation);
- }
-
- //Note: default value is handled by ozone-default.xml. Here we can
- //use any default.
- try {
- switch (type) {
- case STRING:
- setterMethod.invoke(configuration, get(key));
- break;
- case INT:
- setterMethod.invoke(configuration,
- getInt(key, 0));
- break;
- case BOOLEAN:
- setterMethod.invoke(configuration,
- getBoolean(key, false));
- break;
- case LONG:
- setterMethod.invoke(configuration,
- getLong(key, 0));
- break;
- case TIME:
- setterMethod.invoke(configuration,
- getTimeDuration(key, 0, configAnnotation.timeUnit()));
- break;
- default:
- throw new ConfigurationException(
- "Unsupported ConfigType " + type + " on " + methodLocation);
- }
- } catch (InvocationTargetException | IllegalAccessException e) {
- throw new ConfigurationException(
- "Can't inject configuration to " + methodLocation, e);
- }
-
- }
- }
- return configuration;
-
- }
-
- private ConfigType detectConfigType(Class> parameterType,
- String methodLocation) {
- ConfigType type;
- if (parameterType == String.class) {
- type = ConfigType.STRING;
- } else if (parameterType == Integer.class || parameterType == int.class) {
- type = ConfigType.INT;
- } else if (parameterType == Long.class || parameterType == long.class) {
- type = ConfigType.LONG;
- } else if (parameterType == Boolean.class
- || parameterType == boolean.class) {
- type = ConfigType.BOOLEAN;
- } else {
- throw new ConfigurationException(
- "Unsupported configuration type " + parameterType + " in "
- + methodLocation);
- }
- return type;
- }
-
- /**
- * Class to marshall/un-marshall configuration from xml files.
- */
- @XmlAccessorType(XmlAccessType.FIELD)
- @XmlRootElement(name = "configuration")
- public static class XMLConfiguration {
-
- @XmlElement(name = "property", type = Property.class)
- private List properties = new ArrayList<>();
-
- public XMLConfiguration() {
- }
-
- public XMLConfiguration(List properties) {
- this.properties = properties;
- }
-
- public List getProperties() {
- return properties;
- }
-
- public void setProperties(List properties) {
- this.properties = properties;
- }
- }
-
- /**
- * Class to marshall/un-marshall configuration properties from xml files.
- */
- @XmlAccessorType(XmlAccessType.FIELD)
- @XmlRootElement(name = "property")
- public static class Property implements Comparable {
-
- private String name;
- private String value;
- private String tag;
- private String description;
-
- public String getName() {
- return name;
- }
-
- public void setName(String name) {
- this.name = name;
- }
-
- public String getValue() {
- return value;
- }
-
- public void setValue(String value) {
- this.value = value;
- }
-
- public String getTag() {
- return tag;
- }
-
- public void setTag(String tag) {
- this.tag = tag;
- }
-
- public String getDescription() {
- return description;
- }
-
- public void setDescription(String description) {
- this.description = description;
- }
-
- @Override
- public int compareTo(Property o) {
- if (this == o) {
- return 0;
- }
- return this.getName().compareTo(o.getName());
- }
-
- @Override
- public String toString() {
- return this.getName() + " " + this.getValue() + " " + this.getTag();
- }
-
- @Override
- public int hashCode() {
- return this.getName().hashCode();
- }
-
- @Override
- public boolean equals(Object obj) {
- return (obj instanceof Property) && (((Property) obj).getName())
- .equals(this.getName());
- }
- }
-
- public static void activate() {
- // adds the default resources
- Configuration.addDefaultResource("hdfs-default.xml");
- Configuration.addDefaultResource("hdfs-site.xml");
- Configuration.addDefaultResource("ozone-default.xml");
- }
-
- /**
- * The super class method getAllPropertiesByTag
- * does not override values of properties
- * if there is no tag present in the configs of
- * newly added resources.
- *
- * @param tag
- * @return Properties that belong to the tag
- */
- @Override
- public Properties getAllPropertiesByTag(String tag) {
- // Call getProps first to load the newly added resources
- // before calling super.getAllPropertiesByTag
- Properties updatedProps = getProps();
- Properties propertiesByTag = super.getAllPropertiesByTag(tag);
- Properties props = new Properties();
- Enumeration properties = propertiesByTag.propertyNames();
- while (properties.hasMoreElements()) {
- Object propertyName = properties.nextElement();
- // get the current value of the property
- Object value = updatedProps.getProperty(propertyName.toString());
- if (value != null) {
- props.put(propertyName, value);
- }
- }
- return props;
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
deleted file mode 100644
index 948057ebba70b..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.conf;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java
deleted file mode 100644
index b9d7bceb48f95..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.function;
-
-import com.google.protobuf.ServiceException;
-
-/**
- * Functional interface like java.util.function.Function but with
- * checked exception.
- */
-@FunctionalInterface
-public interface FunctionWithServiceException {
-
- /**
- * Applies this function to the given argument.
- *
- * @param t the function argument
- * @return the function result
- */
- R apply(T t) throws ServiceException;
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java
deleted file mode 100644
index 915fe3557e2ce..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Functional interfaces for ozone, similar to java.util.function.
- */
-package org.apache.hadoop.hdds.function;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java
deleted file mode 100644
index f8894e6a7e8e6..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds;
-
-/**
- * Generic HDDS specific configurator and helper classes.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
deleted file mode 100644
index 698a443fc6b44..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
+++ /dev/null
@@ -1,493 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.protocol;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.net.NetConstants;
-import org.apache.hadoop.hdds.scm.net.NodeImpl;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
-/**
- * DatanodeDetails class contains details about DataNode like:
- * - UUID of the DataNode.
- * - IP and Hostname details.
- * - Port details to which the DataNode will be listening.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class DatanodeDetails extends NodeImpl implements
- Comparable {
-/**
- * DataNode's unique identifier in the cluster.
- */
- private final UUID uuid;
-
- private String ipAddress;
- private String hostName;
- private List ports;
- private String certSerialId;
-
- /**
- * Constructs DatanodeDetails instance. DatanodeDetails.Builder is used
- * for instantiating DatanodeDetails.
- * @param uuid DataNode's UUID
- * @param ipAddress IP Address of this DataNode
- * @param hostName DataNode's hostname
- * @param networkLocation DataNode's network location path
- * @param ports Ports used by the DataNode
- * @param certSerialId serial id from SCM issued certificate.
- */
- private DatanodeDetails(String uuid, String ipAddress, String hostName,
- String networkLocation, List ports, String certSerialId) {
- super(hostName, networkLocation, NetConstants.NODE_COST_DEFAULT);
- this.uuid = UUID.fromString(uuid);
- this.ipAddress = ipAddress;
- this.hostName = hostName;
- this.ports = ports;
- this.certSerialId = certSerialId;
- }
-
- protected DatanodeDetails(DatanodeDetails datanodeDetails) {
- super(datanodeDetails.getHostName(), datanodeDetails.getNetworkLocation(),
- datanodeDetails.getCost());
- this.uuid = datanodeDetails.uuid;
- this.ipAddress = datanodeDetails.ipAddress;
- this.hostName = datanodeDetails.hostName;
- this.ports = datanodeDetails.ports;
- this.setNetworkName(datanodeDetails.getNetworkName());
- }
-
- /**
- * Returns the DataNode UUID.
- *
- * @return UUID of DataNode
- */
- public UUID getUuid() {
- return uuid;
- }
-
- /**
- * Returns the string representation of DataNode UUID.
- *
- * @return UUID of DataNode
- */
- public String getUuidString() {
- return uuid.toString();
- }
-
- /**
- * Sets the IP address of Datanode.
- *
- * @param ip IP Address
- */
- public void setIpAddress(String ip) {
- this.ipAddress = ip;
- }
-
- /**
- * Returns IP address of DataNode.
- *
- * @return IP address
- */
- public String getIpAddress() {
- return ipAddress;
- }
-
- /**
- * Sets the Datanode hostname.
- *
- * @param host hostname
- */
- public void setHostName(String host) {
- this.hostName = host;
- }
-
- /**
- * Returns Hostname of DataNode.
- *
- * @return Hostname
- */
- public String getHostName() {
- return hostName;
- }
-
- /**
- * Sets a DataNode Port.
- *
- * @param port DataNode port
- */
- public void setPort(Port port) {
- // If the port is already in the list remove it first and add the
- // new/updated port value.
- ports.remove(port);
- ports.add(port);
- }
-
- /**
- * Returns all the Ports used by DataNode.
- *
- * @return DataNode Ports
- */
- public List getPorts() {
- return ports;
- }
-
- /**
- * Given the name returns port number, null if the asked port is not found.
- *
- * @param name Name of the port
- *
- * @return Port
- */
- public Port getPort(Port.Name name) {
- for (Port port : ports) {
- if (port.getName().equals(name)) {
- return port;
- }
- }
- return null;
- }
-
- /**
- * Returns a DatanodeDetails from the protocol buffers.
- *
- * @param datanodeDetailsProto - protoBuf Message
- * @return DatanodeDetails
- */
- public static DatanodeDetails getFromProtoBuf(
- HddsProtos.DatanodeDetailsProto datanodeDetailsProto) {
- DatanodeDetails.Builder builder = newBuilder();
- builder.setUuid(datanodeDetailsProto.getUuid());
- if (datanodeDetailsProto.hasIpAddress()) {
- builder.setIpAddress(datanodeDetailsProto.getIpAddress());
- }
- if (datanodeDetailsProto.hasHostName()) {
- builder.setHostName(datanodeDetailsProto.getHostName());
- }
- if (datanodeDetailsProto.hasCertSerialId()) {
- builder.setCertSerialId(datanodeDetailsProto.getCertSerialId());
- }
- for (HddsProtos.Port port : datanodeDetailsProto.getPortsList()) {
- builder.addPort(newPort(
- Port.Name.valueOf(port.getName().toUpperCase()), port.getValue()));
- }
- if (datanodeDetailsProto.hasNetworkName()) {
- builder.setNetworkName(datanodeDetailsProto.getNetworkName());
- }
- if (datanodeDetailsProto.hasNetworkLocation()) {
- builder.setNetworkLocation(datanodeDetailsProto.getNetworkLocation());
- }
- return builder.build();
- }
-
- /**
- * Returns a DatanodeDetails protobuf message from a datanode ID.
- * @return HddsProtos.DatanodeDetailsProto
- */
- public HddsProtos.DatanodeDetailsProto getProtoBufMessage() {
- HddsProtos.DatanodeDetailsProto.Builder builder =
- HddsProtos.DatanodeDetailsProto.newBuilder()
- .setUuid(getUuidString());
- if (ipAddress != null) {
- builder.setIpAddress(ipAddress);
- }
- if (hostName != null) {
- builder.setHostName(hostName);
- }
- if (certSerialId != null) {
- builder.setCertSerialId(certSerialId);
- }
- if (!Strings.isNullOrEmpty(getNetworkName())) {
- builder.setNetworkName(getNetworkName());
- }
- if (!Strings.isNullOrEmpty(getNetworkLocation())) {
- builder.setNetworkLocation(getNetworkLocation());
- }
-
- for (Port port : ports) {
- builder.addPorts(HddsProtos.Port.newBuilder()
- .setName(port.getName().toString())
- .setValue(port.getValue())
- .build());
- }
- return builder.build();
- }
-
- @Override
- public String toString() {
- return uuid.toString() + "{" +
- "ip: " +
- ipAddress +
- ", host: " +
- hostName +
- ", networkLocation: " +
- getNetworkLocation() +
- ", certSerialId: " + certSerialId +
- "}";
- }
-
- @Override
- public int compareTo(DatanodeDetails that) {
- return this.getUuid().compareTo(that.getUuid());
- }
-
- @Override
- public boolean equals(Object obj) {
- return obj instanceof DatanodeDetails &&
- uuid.equals(((DatanodeDetails) obj).uuid);
- }
-
- @Override
- public int hashCode() {
- return uuid.hashCode();
- }
-
- /**
- * Returns DatanodeDetails.Builder instance.
- *
- * @return DatanodeDetails.Builder
- */
- public static Builder newBuilder() {
- return new Builder();
- }
-
- /**
- * Builder class for building DatanodeDetails.
- */
- public static final class Builder {
- private String id;
- private String ipAddress;
- private String hostName;
- private String networkName;
- private String networkLocation;
- private List ports;
- private String certSerialId;
-
- /**
- * Default private constructor. To create Builder instance use
- * DatanodeDetails#newBuilder.
- */
- private Builder() {
- ports = new ArrayList<>();
- }
-
- /**
- * Sets the DatanodeUuid.
- *
- * @param uuid DatanodeUuid
- * @return DatanodeDetails.Builder
- */
- public Builder setUuid(String uuid) {
- this.id = uuid;
- return this;
- }
-
- /**
- * Sets the IP address of DataNode.
- *
- * @param ip address
- * @return DatanodeDetails.Builder
- */
- public Builder setIpAddress(String ip) {
- this.ipAddress = ip;
- return this;
- }
-
- /**
- * Sets the hostname of DataNode.
- *
- * @param host hostname
- * @return DatanodeDetails.Builder
- */
- public Builder setHostName(String host) {
- this.hostName = host;
- return this;
- }
-
- /**
- * Sets the network name of DataNode.
- *
- * @param name network name
- * @return DatanodeDetails.Builder
- */
- public Builder setNetworkName(String name) {
- this.networkName = name;
- return this;
- }
-
- /**
- * Sets the network location of DataNode.
- *
- * @param loc location
- * @return DatanodeDetails.Builder
- */
- public Builder setNetworkLocation(String loc) {
- this.networkLocation = loc;
- return this;
- }
-
- /**
- * Adds a DataNode Port.
- *
- * @param port DataNode port
- *
- * @return DatanodeDetails.Builder
- */
- public Builder addPort(Port port) {
- this.ports.add(port);
- return this;
- }
-
- /**
- * Adds certificate serial id.
- *
- * @param certId Serial id of SCM issued certificate.
- *
- * @return DatanodeDetails.Builder
- */
- public Builder setCertSerialId(String certId) {
- this.certSerialId = certId;
- return this;
- }
-
- /**
- * Builds and returns DatanodeDetails instance.
- *
- * @return DatanodeDetails
- */
- public DatanodeDetails build() {
- Preconditions.checkNotNull(id);
- if (networkLocation == null) {
- networkLocation = NetConstants.DEFAULT_RACK;
- }
- DatanodeDetails dn = new DatanodeDetails(id, ipAddress, hostName,
- networkLocation, ports, certSerialId);
- if (networkName != null) {
- dn.setNetworkName(networkName);
- }
- return dn;
- }
- }
-
- /**
- * Constructs a new Port with name and value.
- *
- * @param name Name of the port
- * @param value Port number
- *
- * @return {@code Port} instance
- */
- public static Port newPort(Port.Name name, Integer value) {
- return new Port(name, value);
- }
-
- /**
- * Container to hold DataNode Port details.
- */
- public static final class Port {
-
- /**
- * Ports that are supported in DataNode.
- */
- public enum Name {
- STANDALONE, RATIS, REST
- }
-
- private Name name;
- private Integer value;
-
- /**
- * Private constructor for constructing Port object. Use
- * DatanodeDetails#newPort to create a new Port object.
- *
- * @param name
- * @param value
- */
- private Port(Name name, Integer value) {
- this.name = name;
- this.value = value;
- }
-
- /**
- * Returns the name of the port.
- *
- * @return Port name
- */
- public Name getName() {
- return name;
- }
-
- /**
- * Returns the port number.
- *
- * @return Port number
- */
- public Integer getValue() {
- return value;
- }
-
- @Override
- public int hashCode() {
- return name.hashCode();
- }
-
- /**
- * Ports are considered equal if they have the same name.
- *
- * @param anObject
- * The object to compare this {@code Port} against
- * @return {@code true} if the given object represents a {@code Port}
- and has the same name, {@code false} otherwise
- */
- @Override
- public boolean equals(Object anObject) {
- if (this == anObject) {
- return true;
- }
- if (anObject instanceof Port) {
- return name.equals(((Port) anObject).name);
- }
- return false;
- }
- }
-
- /**
- * Returns serial id of SCM issued certificate.
- *
- * @return certificate serial id
- */
- public String getCertSerialId() {
- return certSerialId;
- }
-
- /**
- * Set certificate serial id of SCM issued certificate.
- *
- */
- public void setCertSerialId(String certSerialId) {
- this.certSerialId = certSerialId;
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java
deleted file mode 100644
index 4036cb17b8477..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.protocol;
-
-import java.io.IOException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.security.KerberosInfo;
-
-/**
- * The protocol used to perform security related operations with SCM.
- */
-@KerberosInfo(
- serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
-@InterfaceAudience.Private
-public interface SCMSecurityProtocol {
-
- @SuppressWarnings("checkstyle:ConstantName")
- /**
- * Version 1: Initial version.
- */
- long versionID = 1L;
-
- /**
- * Get SCM signed certificate for DataNode.
- *
- * @param dataNodeDetails - DataNode Details.
- * @param certSignReq - Certificate signing request.
- * @return byte[] - SCM signed certificate.
- */
- String getDataNodeCertificate(
- DatanodeDetailsProto dataNodeDetails,
- String certSignReq) throws IOException;
-
- /**
- * Get SCM signed certificate for OM.
- *
- * @param omDetails - DataNode Details.
- * @param certSignReq - Certificate signing request.
- * @return String - pem encoded SCM signed
- * certificate.
- */
- String getOMCertificate(OzoneManagerDetailsProto omDetails,
- String certSignReq) throws IOException;
-
- /**
- * Get SCM signed certificate for given certificate serial id if it exists.
- * Throws exception if it's not found.
- *
- * @param certSerialId - Certificate serial id.
- * @return String - pem encoded SCM signed
- * certificate with given cert id if it
- * exists.
- */
- String getCertificate(String certSerialId) throws IOException;
-
- /**
- * Get CA certificate.
- *
- * @return String - pem encoded CA certificate.
- */
- String getCACertificate() throws IOException;
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
deleted file mode 100644
index 7dae0fce02cca..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains HDDS protocol related classes.
- */
-package org.apache.hadoop.hdds.protocol;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
deleted file mode 100644
index efe79a76f31dd..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.protocolPB;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.function.Consumer;
-
-import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCACertificateRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetDataNodeCertRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest.Builder;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityResponse;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.Type;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ipc.RPC;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import static org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetOMCertRequestProto;
-
-/**
- * This class is the client-side translator that forwards requests for
- * {@link SCMSecurityProtocol} to the {@link SCMSecurityProtocolPB} proxy.
- */
-public class SCMSecurityProtocolClientSideTranslatorPB implements
- SCMSecurityProtocol, ProtocolTranslator, Closeable {
-
- /**
- * RpcController is not used and hence is set to null.
- */
- private static final RpcController NULL_RPC_CONTROLLER = null;
- private final SCMSecurityProtocolPB rpcProxy;
-
- public SCMSecurityProtocolClientSideTranslatorPB(
- SCMSecurityProtocolPB rpcProxy) {
- this.rpcProxy = rpcProxy;
- }
-
- /**
- * Helper method to wrap the request and send the message.
- */
- private SCMSecurityResponse submitRequest(
- SCMSecurityProtocolProtos.Type type,
- Consumer builderConsumer) throws IOException {
- final SCMSecurityResponse response;
- try {
-
- Builder builder = SCMSecurityRequest.newBuilder()
- .setCmdType(type)
- .setTraceID(TracingUtil.exportCurrentSpan());
- builderConsumer.accept(builder);
- SCMSecurityRequest wrapper = builder.build();
-
- response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper);
- } catch (ServiceException ex) {
- throw ProtobufHelper.getRemoteException(ex);
- }
- return response;
- }
-
- /**
- * Closes this stream and releases any system resources associated
- * with it. If the stream is already closed then invoking this
- * method has no effect.
- *
- *
As noted in {@link AutoCloseable#close()}, cases where the
- * close may fail require careful attention. It is strongly advised
- * to relinquish the underlying resources and to internally
- * mark the {@code Closeable} as closed, prior to throwing
- * the {@code IOException}.
- *
- * @throws IOException if an I/O error occurs
- */
- @Override
- public void close() throws IOException {
- RPC.stopProxy(rpcProxy);
- }
-
- /**
- * Get SCM signed certificate for DataNode.
- *
- * @param dataNodeDetails - DataNode Details.
- * @param certSignReq - Certificate signing request.
- * @return byte[] - SCM signed certificate.
- */
- @Override
- public String getDataNodeCertificate(DatanodeDetailsProto dataNodeDetails,
- String certSignReq) throws IOException {
- return getDataNodeCertificateChain(dataNodeDetails, certSignReq)
- .getX509Certificate();
- }
-
- /**
- * Get SCM signed certificate for OM.
- *
- * @param omDetails - OzoneManager Details.
- * @param certSignReq - Certificate signing request.
- * @return byte[] - SCM signed certificate.
- */
- @Override
- public String getOMCertificate(OzoneManagerDetailsProto omDetails,
- String certSignReq) throws IOException {
- return getOMCertChain(omDetails, certSignReq).getX509Certificate();
- }
-
- /**
- * Get SCM signed certificate for OM.
- *
- * @param omDetails - OzoneManager Details.
- * @param certSignReq - Certificate signing request.
- * @return byte[] - SCM signed certificate.
- */
- public SCMGetCertResponseProto getOMCertChain(
- OzoneManagerDetailsProto omDetails, String certSignReq)
- throws IOException {
- SCMGetOMCertRequestProto request = SCMGetOMCertRequestProto
- .newBuilder()
- .setCSR(certSignReq)
- .setOmDetails(omDetails)
- .build();
- return submitRequest(Type.GetOMCertificate,
- builder -> builder.setGetOMCertRequest(request))
- .getGetCertResponseProto();
- }
-
- /**
- * Get SCM signed certificate with given serial id. Throws exception if
- * certificate is not found.
- *
- * @param certSerialId - Certificate serial id.
- * @return string - pem encoded certificate.
- */
- @Override
- public String getCertificate(String certSerialId) throws IOException {
- SCMGetCertificateRequestProto request = SCMGetCertificateRequestProto
- .newBuilder()
- .setCertSerialId(certSerialId)
- .build();
- return submitRequest(Type.GetCertificate,
- builder -> builder.setGetCertificateRequest(request))
- .getGetCertResponseProto()
- .getX509Certificate();
- }
-
- /**
- * Get SCM signed certificate for Datanode.
- *
- * @param dnDetails - Datanode Details.
- * @param certSignReq - Certificate signing request.
- * @return byte[] - SCM signed certificate.
- */
- public SCMGetCertResponseProto getDataNodeCertificateChain(
- DatanodeDetailsProto dnDetails, String certSignReq)
- throws IOException {
-
- SCMGetDataNodeCertRequestProto request =
- SCMGetDataNodeCertRequestProto.newBuilder()
- .setCSR(certSignReq)
- .setDatanodeDetails(dnDetails)
- .build();
- return submitRequest(Type.GetDataNodeCertificate,
- builder -> builder.setGetDataNodeCertRequest(request))
- .getGetCertResponseProto();
- }
-
- /**
- * Get CA certificate.
- *
- * @return serial - Root certificate.
- */
- @Override
- public String getCACertificate() throws IOException {
- SCMGetCACertificateRequestProto protoIns = SCMGetCACertificateRequestProto
- .getDefaultInstance();
- return submitRequest(Type.GetCACertificate,
- builder -> builder.setGetCACertificateRequest(protoIns))
- .getGetCertResponseProto().getX509Certificate();
-
- }
-
- /**
- * Return the proxy object underlying this protocol translator.
- *
- * @return the proxy object underlying this protocol translator.
- */
- @Override
- public Object getUnderlyingProxyObject() {
- return rpcProxy;
- }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolPB.java
deleted file mode 100644
index 41b0332d6d3cd..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolPB.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.protocolPB;
-
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityProtocolService;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ipc.ProtocolInfo;
-import org.apache.hadoop.security.KerberosInfo;
-
-/**
- * Protocol for security related operations on SCM.
- */
-
-@ProtocolInfo(protocolName =
- "org.apache.hadoop.hdds.protocol.SCMSecurityProtocol",
- protocolVersion = 1)
-@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
-public interface SCMSecurityProtocolPB extends
- SCMSecurityProtocolService.BlockingInterface {
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/package-info.java
deleted file mode 100644
index 44960194f075a..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.protocolPB;
-/**
- * This package contains classes for wiring HDDS protobuf calls to rpc.
- */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ContainerCommandRequestMessage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ContainerCommandRequestMessage.java
deleted file mode 100644
index 07a886a0f9c0d..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ContainerCommandRequestMessage.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.ratis;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto;
-import org.apache.ratis.protocol.Message;
-import org.apache.ratis.protocol.RaftGroupId;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.ratis.util.JavaUtils;
-
-import java.util.Objects;
-import java.util.function.Supplier;
-
-/**
- * Implementing the {@link Message} interface
- * for {@link ContainerCommandRequestProto}.
- */
-public final class ContainerCommandRequestMessage implements Message {
- public static ContainerCommandRequestMessage toMessage(
- ContainerCommandRequestProto request, String traceId) {
- final ContainerCommandRequestProto.Builder b
- = ContainerCommandRequestProto.newBuilder(request);
- if (traceId != null) {
- b.setTraceID(traceId);
- }
-
- ByteString data = ByteString.EMPTY;
- if (request.getCmdType() == Type.WriteChunk) {
- final WriteChunkRequestProto w = request.getWriteChunk();
- data = w.getData();
- b.setWriteChunk(w.toBuilder().clearData());
- } else if (request.getCmdType() == Type.PutSmallFile) {
- final PutSmallFileRequestProto p = request.getPutSmallFile();
- data = p.getData();
- b.setPutSmallFile(p.toBuilder().setData(ByteString.EMPTY));
- }
- return new ContainerCommandRequestMessage(b.build(), data);
- }
-
- public static ContainerCommandRequestProto toProto(
- ByteString bytes, RaftGroupId groupId)
- throws InvalidProtocolBufferException {
- final int i = 4 + bytes.asReadOnlyByteBuffer().getInt();
- final ContainerCommandRequestProto header
- = ContainerCommandRequestProto.parseFrom(bytes.substring(4, i));
- // TODO: setting pipeline id can be avoided if the client is sending it.
- // In such case, just have to validate the pipeline id.
- final ContainerCommandRequestProto.Builder b = header.toBuilder();
- if (groupId != null) {
- b.setPipelineID(groupId.getUuid().toString());
- }
- final ByteString data = bytes.substring(i);
- if (header.getCmdType() == Type.WriteChunk) {
- b.setWriteChunk(b.getWriteChunkBuilder().setData(data));
- } else if (header.getCmdType() == Type.PutSmallFile) {
- b.setPutSmallFile(b.getPutSmallFileBuilder().setData(data));
- }
- return b.build();
- }
-
- private final ContainerCommandRequestProto header;
- private final ByteString data;
- private final Supplier contentSupplier
- = JavaUtils.memoize(this::buildContent);
-
- private ContainerCommandRequestMessage(
- ContainerCommandRequestProto header, ByteString data) {
- this.header = Objects.requireNonNull(header, "header == null");
- this.data = Objects.requireNonNull(data, "data == null");
- }
-
- private ByteString buildContent() {
- final ByteString headerBytes = header.toByteString();
- return RatisHelper.int2ByteString(headerBytes.size())
- .concat(headerBytes)
- .concat(data);
- }
-
- @Override
- public ByteString getContent() {
- return contentSupplier.get();
- }
-
- @Override
- public String toString() {
- return header + ", data.size=" + data.size();
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
deleted file mode 100644
index 081b4fb766be8..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.ratis;
-
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.security.cert.CertificateException;
-import java.security.cert.X509Certificate;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-
-import org.apache.ratis.RaftConfigKeys;
-import org.apache.ratis.client.RaftClient;
-import org.apache.ratis.client.RaftClientConfigKeys;
-import org.apache.ratis.conf.RaftProperties;
-import org.apache.ratis.grpc.GrpcConfigKeys;
-import org.apache.ratis.grpc.GrpcFactory;
-import org.apache.ratis.grpc.GrpcTlsConfig;
-import org.apache.ratis.proto.RaftProtos;
-import org.apache.ratis.protocol.RaftGroup;
-import org.apache.ratis.protocol.RaftGroupId;
-import org.apache.ratis.protocol.RaftPeer;
-import org.apache.ratis.protocol.RaftPeerId;
-import org.apache.ratis.retry.RetryPolicies;
-import org.apache.ratis.retry.RetryPolicy;
-import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.rpc.SupportedRpcType;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.ratis.util.SizeInBytes;
-import org.apache.ratis.util.TimeDuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Ratis helper methods.
- */
-public interface RatisHelper {
- Logger LOG = LoggerFactory.getLogger(RatisHelper.class);
-
- static String toRaftPeerIdString(DatanodeDetails id) {
- return id.getUuidString();
- }
-
- static UUID toDatanodeId(String peerIdString) {
- return UUID.fromString(peerIdString);
- }
-
- static UUID toDatanodeId(RaftPeerId peerId) {
- return toDatanodeId(peerId.toString());
- }
-
- static UUID toDatanodeId(RaftProtos.RaftPeerProto peerId) {
- return toDatanodeId(RaftPeerId.valueOf(peerId.getId()));
- }
-
- static String toRaftPeerAddressString(DatanodeDetails id) {
- return id.getIpAddress() + ":" +
- id.getPort(DatanodeDetails.Port.Name.RATIS).getValue();
- }
-
- static RaftPeerId toRaftPeerId(DatanodeDetails id) {
- return RaftPeerId.valueOf(toRaftPeerIdString(id));
- }
-
- static RaftPeer toRaftPeer(DatanodeDetails id) {
- return new RaftPeer(toRaftPeerId(id), toRaftPeerAddressString(id));
- }
-
- static List toRaftPeers(Pipeline pipeline) {
- return toRaftPeers(pipeline.getNodes());
- }
-
- static List toRaftPeers(
- List datanodes) {
- return datanodes.stream().map(RatisHelper::toRaftPeer)
- .collect(Collectors.toList());
- }
-
- /* TODO: use a dummy id for all groups for the moment.
- * It should be changed to a unique id for each group.
- */
- RaftGroupId DUMMY_GROUP_ID =
- RaftGroupId.valueOf(ByteString.copyFromUtf8("AOzoneRatisGroup"));
-
- RaftGroup EMPTY_GROUP = RaftGroup.valueOf(DUMMY_GROUP_ID,
- Collections.emptyList());
-
- static RaftGroup emptyRaftGroup() {
- return EMPTY_GROUP;
- }
-
- static RaftGroup newRaftGroup(Collection peers) {
- return peers.isEmpty()? emptyRaftGroup()
- : RaftGroup.valueOf(DUMMY_GROUP_ID, peers);
- }
-
- static RaftGroup newRaftGroup(RaftGroupId groupId,
- Collection peers) {
- final List newPeers = peers.stream()
- .map(RatisHelper::toRaftPeer)
- .collect(Collectors.toList());
- return peers.isEmpty() ? RaftGroup.valueOf(groupId, Collections.emptyList())
- : RaftGroup.valueOf(groupId, newPeers);
- }
-
- static RaftGroup newRaftGroup(Pipeline pipeline) {
- return RaftGroup.valueOf(RaftGroupId.valueOf(pipeline.getId().getId()),
- toRaftPeers(pipeline));
- }
-
- static RaftClient newRaftClient(RpcType rpcType, Pipeline pipeline,
- RetryPolicy retryPolicy, int maxOutStandingRequest,
- GrpcTlsConfig tlsConfig, TimeDuration timeout) throws IOException {
- return newRaftClient(rpcType, toRaftPeerId(pipeline.getFirstNode()),
- newRaftGroup(RaftGroupId.valueOf(pipeline.getId().getId()),
- pipeline.getNodes()), retryPolicy, maxOutStandingRequest, tlsConfig,
- timeout);
- }
-
- static TimeDuration getClientRequestTimeout(Configuration conf) {
- // Set the client requestTimeout
- final TimeUnit timeUnit =
- OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT
- .getUnit();
- final long duration = conf.getTimeDuration(
- OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY,
- OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT
- .getDuration(), timeUnit);
- final TimeDuration clientRequestTimeout =
- TimeDuration.valueOf(duration, timeUnit);
- return clientRequestTimeout;
- }
-
- static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader,
- RetryPolicy retryPolicy, int maxOutstandingRequests,
- GrpcTlsConfig tlsConfig, TimeDuration clientRequestTimeout) {
- return newRaftClient(rpcType, leader.getId(),
- newRaftGroup(new ArrayList<>(Arrays.asList(leader))), retryPolicy,
- maxOutstandingRequests, tlsConfig, clientRequestTimeout);
- }
-
- static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader,
- RetryPolicy retryPolicy, int maxOutstandingRequests,
- TimeDuration clientRequestTimeout) {
- return newRaftClient(rpcType, leader.getId(),
- newRaftGroup(new ArrayList<>(Arrays.asList(leader))), retryPolicy,
- maxOutstandingRequests, null, clientRequestTimeout);
- }
-
- static RaftClient newRaftClient(RpcType rpcType, RaftPeerId leader,
- RaftGroup group, RetryPolicy retryPolicy, int maxOutStandingRequest,
- GrpcTlsConfig tlsConfig, TimeDuration clientRequestTimeout) {
- if (LOG.isTraceEnabled()) {
- LOG.trace("newRaftClient: {}, leader={}, group={}",
- rpcType, leader, group);
- }
- final RaftProperties properties = new RaftProperties();
- RaftConfigKeys.Rpc.setType(properties, rpcType);
- RaftClientConfigKeys.Rpc
- .setRequestTimeout(properties, clientRequestTimeout);
-
- GrpcConfigKeys.setMessageSizeMax(properties,
- SizeInBytes.valueOf(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE));
- GrpcConfigKeys.OutputStream.setOutstandingAppendsMax(properties,
- maxOutStandingRequest);
-
- RaftClient.Builder builder = RaftClient.newBuilder()
- .setRaftGroup(group)
- .setLeaderId(leader)
- .setProperties(properties)
- .setRetryPolicy(retryPolicy);
-
- // TODO: GRPC TLS only for now, netty/hadoop RPC TLS support later.
- if (tlsConfig != null && rpcType == SupportedRpcType.GRPC) {
- builder.setParameters(GrpcFactory.newRaftParameters(tlsConfig));
- }
- return builder.build();
- }
-
- // For External gRPC client to server with gRPC TLS.
- // No mTLS for external client as SCM CA does not issued certificates for them
- static GrpcTlsConfig createTlsClientConfig(SecurityConfig conf,
- X509Certificate caCert) {
- GrpcTlsConfig tlsConfig = null;
- if (conf.isSecurityEnabled() && conf.isGrpcTlsEnabled()) {
- tlsConfig = new GrpcTlsConfig(null, null,
- caCert, false);
- }
- return tlsConfig;
- }
-
- // For Internal gRPC client from SCM to DN with gRPC TLS
- static GrpcTlsConfig createTlsClientConfigForSCM(SecurityConfig conf,
- CertificateServer certificateServer) throws IOException {
- if (conf.isSecurityEnabled() && conf.isGrpcTlsEnabled()) {
- try {
- X509Certificate caCert =
- CertificateCodec.getX509Certificate(
- certificateServer.getCACertificate());
- return new GrpcTlsConfig(null, null,
- caCert, false);
- } catch (CertificateException ex) {
- throw new SCMSecurityException("Fail to find SCM CA certificate.", ex);
- }
- }
- return null;
- }
-
- // For gRPC server running DN container service with gPRC TLS
- // No mTLS as the channel is shared for for external client, which
- // does not have SCM CA issued certificates.
- // In summary:
- // authenticate from server to client is via TLS.
- // authenticate from client to server is via block token (or container token).
- static GrpcTlsConfig createTlsServerConfigForDN(SecurityConfig conf,
- CertificateClient caClient) {
- if (conf.isSecurityEnabled() && conf.isGrpcTlsEnabled()) {
- return new GrpcTlsConfig(
- caClient.getPrivateKey(), caClient.getCertificate(),
- null, false);
- }
- return null;
- }
-
- static RetryPolicy createRetryPolicy(Configuration conf) {
- int maxRetryCount =
- conf.getInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY,
- OzoneConfigKeys.
- DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT);
- long retryInterval = conf.getTimeDuration(OzoneConfigKeys.
- DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY, OzoneConfigKeys.
- DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT
- .toIntExact(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
- TimeDuration sleepDuration =
- TimeDuration.valueOf(retryInterval, TimeUnit.MILLISECONDS);
- RetryPolicy retryPolicy = RetryPolicies
- .retryUpToMaximumCountWithFixedSleep(maxRetryCount, sleepDuration);
- return retryPolicy;
- }
-
- static Long getMinReplicatedIndex(
- Collection commitInfos) {
- return commitInfos.stream().map(RaftProtos.CommitInfoProto::getCommitIndex)
- .min(Long::compareTo).orElse(null);
- }
-
- static ByteString int2ByteString(int n) {
- final ByteString.Output out = ByteString.newOutput();
- try(DataOutputStream dataOut = new DataOutputStream(out)) {
- dataOut.writeInt(n);
- } catch (IOException e) {
- throw new IllegalStateException(
- "Failed to write integer n = " + n + " to a ByteString.", e);
- }
- return out.toByteString();
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/package-info.java
deleted file mode 100644
index e52dc7ffc70bb..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.ratis;
-
-/**
- * This package contains classes related to Apache Ratis.
- */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java
deleted file mode 100644
index 4608df7612287..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations;
-
-import java.nio.ByteBuffer;
-import java.util.function.Function;
-
-/**
- * Helper class to create a conversion function from ByteBuffer to ByteString
- * based on the property
- * {@link OzoneConfigKeys#OZONE_UNSAFEBYTEOPERATIONS_ENABLED} in the
- * Ozone configuration.
- */
-public final class ByteStringConversion {
- private ByteStringConversion(){} // no instantiation.
-
- /**
- * Creates the conversion function to be used to convert ByteBuffers to
- * ByteString instances to be used in protobuf messages.
- *
- * @param config the Ozone configuration
- * @return the conversion function defined by
- * {@link OzoneConfigKeys#OZONE_UNSAFEBYTEOPERATIONS_ENABLED}
- * @see
ByteBuffer
- */
- public static Function createByteBufferConversion(
- Configuration config){
- boolean unsafeEnabled =
- config!=null && config.getBoolean(
- OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED,
- OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED_DEFAULT);
- if (unsafeEnabled) {
- return buffer -> UnsafeByteOperations.unsafeWrap(buffer);
- } else {
- return buffer -> {
- ByteString retval = ByteString.copyFrom(buffer);
- buffer.flip();
- return retval;
- };
- }
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
deleted file mode 100644
index 161780668ab0c..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.ratis.proto.RaftProtos.ReplicationLevel;
-import org.apache.ratis.util.TimeDuration;
-
-import java.util.concurrent.TimeUnit;
-
-/**
- * This class contains constants for configuration keys used in SCM.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public final class ScmConfigKeys {
-
- // Location of SCM DB files. For now we just support a single
- // metadata dir but in future we may support multiple for redundancy or
- // performance.
- public static final String OZONE_SCM_DB_DIRS = "ozone.scm.db.dirs";
-
- public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
- = "dfs.container.ratis.enabled";
- public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT
- = false;
- public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY
- = "dfs.container.ratis.rpc.type";
- public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
- = "GRPC";
- public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY
- = "dfs.container.ratis.num.write.chunk.threads";
- public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT
- = 60;
- public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY
- = "dfs.container.ratis.replication.level";
- public static final ReplicationLevel
- DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY;
- public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY
- = "dfs.container.ratis.num.container.op.executors";
- public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT
- = 10;
- public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
- "dfs.container.ratis.segment.size";
- public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
- "1MB";
- public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY =
- "dfs.container.ratis.segment.preallocated.size";
- public static final String
- DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "16KB";
- public static final String
- DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT =
- "dfs.container.ratis.statemachinedata.sync.timeout";
- public static final TimeDuration
- DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
- TimeDuration.valueOf(10, TimeUnit.SECONDS);
- public static final String
- DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES =
- "dfs.container.ratis.statemachinedata.sync.retries";
- public static final int
- DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT = -1;
- public static final String
- DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS =
- "dfs.container.ratis.statemachine.max.pending.apply-transactions";
- // The default value of maximum number of pending state machine apply
- // transactions is kept same as default snapshot threshold.
- public static final int
- DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT =
- 100000;
- public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS =
- "dfs.container.ratis.log.queue.num-elements";
- public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT =
- 1024;
- public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT =
- "dfs.container.ratis.log.queue.byte-limit";
- public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT =
- "4GB";
- public static final String
- DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS =
- "dfs.container.ratis.log.appender.queue.num-elements";
- public static final int
- DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1;
- public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT =
- "dfs.container.ratis.log.appender.queue.byte-limit";
- public static final String
- DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB";
- public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP =
- "dfs.container.ratis.log.purge.gap";
- // TODO: Set to 1024 once RATIS issue around purge is fixed.
- public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT =
- 1000000;
-
- public static final String DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS =
- "dfs.container.ratis.leader.num.pending.requests";
- public static final int
- DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT = 4096;
- // expiry interval stateMachineData cache entry inside containerStateMachine
- public static final String
- DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL =
- "dfs.container.ratis.statemachine.cache.expiry.interval";
- public static final String
- DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT =
- "10s";
- public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
- "dfs.ratis.client.request.timeout.duration";
- public static final TimeDuration
- DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT =
- TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS);
- public static final String DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY =
- "dfs.ratis.client.request.max.retries";
- public static final int DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT = 180;
- public static final String DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY =
- "dfs.ratis.client.request.retry.interval";
- public static final TimeDuration
- DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT =
- TimeDuration.valueOf(1000, TimeUnit.MILLISECONDS);
- public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY =
- "dfs.ratis.server.retry-cache.timeout.duration";
- public static final TimeDuration
- DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
- TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS);
- public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
- "dfs.ratis.server.request.timeout.duration";
- public static final TimeDuration
- DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT =
- TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS);
- public static final String
- DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY =
- "dfs.ratis.leader.election.minimum.timeout.duration";
- public static final TimeDuration
- DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT =
- TimeDuration.valueOf(5, TimeUnit.SECONDS);
-
- public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY =
- "dfs.ratis.snapshot.threshold";
- public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000;
-
- public static final String DFS_RATIS_SERVER_FAILURE_DURATION_KEY =
- "dfs.ratis.server.failure.duration";
- public static final TimeDuration
- DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT =
- TimeDuration.valueOf(120, TimeUnit.SECONDS);
-
- // TODO : this is copied from OzoneConsts, may need to move to a better place
- public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size";
- // 16 MB by default
- public static final String OZONE_SCM_CHUNK_SIZE_DEFAULT = "16MB";
-
- public static final String OZONE_SCM_CLIENT_PORT_KEY =
- "ozone.scm.client.port";
- public static final int OZONE_SCM_CLIENT_PORT_DEFAULT = 9860;
-
- public static final String OZONE_SCM_DATANODE_PORT_KEY =
- "ozone.scm.datanode.port";
- public static final int OZONE_SCM_DATANODE_PORT_DEFAULT = 9861;
-
- // OZONE_OM_PORT_DEFAULT = 9862
- public static final String OZONE_SCM_BLOCK_CLIENT_PORT_KEY =
- "ozone.scm.block.client.port";
- public static final int OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT = 9863;
-
- public static final String OZONE_SCM_SECURITY_SERVICE_PORT_KEY =
- "ozone.scm.security.service.port";
- public static final int OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT = 9961;
-
- // Container service client
- public static final String OZONE_SCM_CLIENT_ADDRESS_KEY =
- "ozone.scm.client.address";
- public static final String OZONE_SCM_CLIENT_BIND_HOST_KEY =
- "ozone.scm.client.bind.host";
- public static final String OZONE_SCM_CLIENT_BIND_HOST_DEFAULT =
- "0.0.0.0";
-
- // Block service client
- public static final String OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY =
- "ozone.scm.block.client.address";
- public static final String OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY =
- "ozone.scm.block.client.bind.host";
- public static final String OZONE_SCM_BLOCK_CLIENT_BIND_HOST_DEFAULT =
- "0.0.0.0";
-
- // SCM Security service address.
- public static final String OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY =
- "ozone.scm.security.service.address";
- public static final String OZONE_SCM_SECURITY_SERVICE_BIND_HOST_KEY =
- "ozone.scm.security.service.bind.host";
- public static final String OZONE_SCM_SECURITY_SERVICE_BIND_HOST_DEFAULT =
- "0.0.0.0";
-
- public static final String OZONE_SCM_DATANODE_ADDRESS_KEY =
- "ozone.scm.datanode.address";
- public static final String OZONE_SCM_DATANODE_BIND_HOST_KEY =
- "ozone.scm.datanode.bind.host";
- public static final String OZONE_SCM_DATANODE_BIND_HOST_DEFAULT =
- "0.0.0.0";
-
- public static final String OZONE_SCM_HTTP_ENABLED_KEY =
- "ozone.scm.http.enabled";
- public static final String OZONE_SCM_HTTP_BIND_HOST_KEY =
- "ozone.scm.http-bind-host";
- public static final String OZONE_SCM_HTTPS_BIND_HOST_KEY =
- "ozone.scm.https-bind-host";
- public static final String OZONE_SCM_HTTP_ADDRESS_KEY =
- "ozone.scm.http-address";
- public static final String OZONE_SCM_HTTPS_ADDRESS_KEY =
- "ozone.scm.https-address";
- public static final String HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY =
- "hdds.scm.kerberos.keytab.file";
- public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY =
- "hdds.scm.kerberos.principal";
- public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
- public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
- public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
-
- public static final String HDDS_REST_HTTP_ADDRESS_KEY =
- "hdds.rest.http-address";
- public static final String HDDS_REST_HTTP_ADDRESS_DEFAULT = "0.0.0.0:9880";
- public static final String HDDS_DATANODE_DIR_KEY = "hdds.datanode.dir";
- public static final String HDDS_REST_CSRF_ENABLED_KEY =
- "hdds.rest.rest-csrf.enabled";
- public static final boolean HDDS_REST_CSRF_ENABLED_DEFAULT = false;
- public static final String HDDS_REST_NETTY_HIGH_WATERMARK =
- "hdds.rest.netty.high.watermark";
- public static final int HDDS_REST_NETTY_HIGH_WATERMARK_DEFAULT = 65536;
- public static final int HDDS_REST_NETTY_LOW_WATERMARK_DEFAULT = 32768;
- public static final String HDDS_REST_NETTY_LOW_WATERMARK =
- "hdds.rest.netty.low.watermark";
-
- public static final String OZONE_SCM_HANDLER_COUNT_KEY =
- "ozone.scm.handler.count.key";
- public static final int OZONE_SCM_HANDLER_COUNT_DEFAULT = 10;
-
- public static final String OZONE_SCM_SECURITY_HANDLER_COUNT_KEY =
- "ozone.scm.security.handler.count.key";
- public static final int OZONE_SCM_SECURITY_HANDLER_COUNT_DEFAULT = 2;
-
- public static final String OZONE_SCM_DEADNODE_INTERVAL =
- "ozone.scm.dead.node.interval";
- public static final String OZONE_SCM_DEADNODE_INTERVAL_DEFAULT =
- "10m";
-
- public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL =
- "ozone.scm.heartbeat.thread.interval";
- public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT =
- "3s";
-
- public static final String OZONE_SCM_STALENODE_INTERVAL =
- "ozone.scm.stale.node.interval";
- public static final String OZONE_SCM_STALENODE_INTERVAL_DEFAULT =
- "5m";
-
- public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT =
- "ozone.scm.heartbeat.rpc-timeout";
- public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT =
- "1s";
-
- /**
- * Defines how frequently we will log the missing of heartbeat to a specific
- * SCM. In the default case we will write a warning message for each 10
- * sequential heart beats that we miss to a specific SCM. This is to avoid
- * overrunning the log with lots of HB missed Log statements.
- */
- public static final String OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT =
- "ozone.scm.heartbeat.log.warn.interval.count";
- public static final int OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT =
- 10;
-
- // ozone.scm.names key is a set of DNS | DNS:PORT | IP Address | IP:PORT.
- // Written as a comma separated string. e.g. scm1, scm2:8020, 7.7.7.7:7777
- //
- // If this key is not specified datanodes will not be able to find
- // SCM. The SCM membership can be dynamic, so this key should contain
- // all possible SCM names. Once the SCM leader is discovered datanodes will
- // get the right list of SCMs to heartbeat to from the leader.
- // While it is good for the datanodes to know the names of all SCM nodes,
- // it is sufficient to actually know the name of on working SCM. That SCM
- // will be able to return the information about other SCMs that are part of
- // the SCM replicated Log.
- //
- //In case of a membership change, any one of the SCM machines will be
- // able to send back a new list to the datanodes.
- public static final String OZONE_SCM_NAMES = "ozone.scm.names";
-
- public static final int OZONE_SCM_DEFAULT_PORT =
- OZONE_SCM_DATANODE_PORT_DEFAULT;
- // The path where datanode ID is to be written to.
- // if this value is not set then container startup will fail.
- public static final String OZONE_SCM_DATANODE_ID_DIR =
- "ozone.scm.datanode.id.dir";
-
- public static final String OZONE_SCM_DB_CACHE_SIZE_MB =
- "ozone.scm.db.cache.size.mb";
- public static final int OZONE_SCM_DB_CACHE_SIZE_DEFAULT = 128;
-
- public static final String OZONE_SCM_CONTAINER_SIZE =
- "ozone.scm.container.size";
- public static final String OZONE_SCM_CONTAINER_SIZE_DEFAULT = "5GB";
-
- public static final String OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY =
- "ozone.scm.container.placement.impl";
-
- public static final String OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT =
- "ozone.scm.pipeline.owner.container.count";
- public static final int OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT = 3;
-
- public static final String
- OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY =
- "ozone.scm.keyvalue.container.deletion-choosing.policy";
-
- public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT =
- "ozone.scm.container.creation.lease.timeout";
-
- public static final String
- OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
-
- public static final String OZONE_SCM_PIPELINE_DESTROY_TIMEOUT =
- "ozone.scm.pipeline.destroy.timeout";
-
- public static final String OZONE_SCM_PIPELINE_DESTROY_TIMEOUT_DEFAULT =
- "66s";
-
- public static final String OZONE_SCM_PIPELINE_CREATION_INTERVAL =
- "ozone.scm.pipeline.creation.interval";
- public static final String OZONE_SCM_PIPELINE_CREATION_INTERVAL_DEFAULT =
- "120s";
-
- public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
- "ozone.scm.block.deletion.max.retry";
- public static final int OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT = 4096;
-
- public static final String HDDS_SCM_WATCHER_TIMEOUT =
- "hdds.scm.watcher.timeout";
-
- public static final String HDDS_SCM_WATCHER_TIMEOUT_DEFAULT =
- "10m";
-
- public static final String
- HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY =
- "hdds.scm.http.kerberos.principal";
- public static final String
- HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY =
- "hdds.scm.http.kerberos.keytab";
-
- // Network topology
- public static final String OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE =
- "ozone.scm.network.topology.schema.file";
- public static final String OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT =
- "network-topology-default.xml";
-
- public static final String HDDS_TRACING_ENABLED = "hdds.tracing.enabled";
- public static final boolean HDDS_TRACING_ENABLED_DEFAULT = true;
-
- /**
- * Never constructed.
- */
- private ScmConfigKeys() {
-
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java
deleted file mode 100644
index 6236febb7b120..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-/**
- * ScmInfo wraps the result returned from SCM#getScmInfo which
- * contains clusterId and the SCM Id.
- */
-public final class ScmInfo {
- private String clusterId;
- private String scmId;
-
- /**
- * Builder for ScmInfo.
- */
- public static class Builder {
- private String clusterId;
- private String scmId;
-
- /**
- * sets the cluster id.
- * @param cid clusterId to be set
- * @return Builder for ScmInfo
- */
- public Builder setClusterId(String cid) {
- this.clusterId = cid;
- return this;
- }
-
- /**
- * sets the scmId.
- * @param id scmId
- * @return Builder for scmInfo
- */
- public Builder setScmId(String id) {
- this.scmId = id;
- return this;
- }
-
- public ScmInfo build() {
- return new ScmInfo(clusterId, scmId);
- }
- }
-
- private ScmInfo(String clusterId, String scmId) {
- this.clusterId = clusterId;
- this.scmId = scmId;
- }
-
- /**
- * Gets the clusterId from the Version file.
- * @return ClusterId
- */
- public String getClusterId() {
- return clusterId;
- }
-
- /**
- * Gets the SCM Id from the Version file.
- * @return SCM Id
- */
- public String getScmId() {
- return scmId;
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientReply.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientReply.java
deleted file mode 100644
index bae0758fddb8b..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientReply.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .ContainerCommandResponseProto;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.CompletableFuture;
-
-/**
- * This class represents the reply from XceiverClient.
- */
-public class XceiverClientReply {
-
- private CompletableFuture response;
- private Long logIndex;
-
- /**
- * List of datanodes where the command got executed and reply is received.
- * If there is an exception in the reply, these datanodes will inform
- * about the servers where there is a failure.
- */
- private List datanodes;
-
- public XceiverClientReply(
- CompletableFuture response) {
- this(response, null);
- }
-
- public XceiverClientReply(
- CompletableFuture response,
- List datanodes) {
- this.logIndex = (long) 0;
- this.response = response;
- this.datanodes = datanodes == null ? new ArrayList<>() : datanodes;
- }
-
- public CompletableFuture getResponse() {
- return response;
- }
-
- public long getLogIndex() {
- return logIndex;
- }
-
- public void setLogIndex(Long logIndex) {
- this.logIndex = logIndex;
- }
-
- public List getDatanodes() {
- return datanodes;
- }
-
- public void addDatanode(DatanodeDetails dn) {
- datanodes.add(dn);
- }
-
- public void setResponse(
- CompletableFuture response) {
- this.response = response;
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
deleted file mode 100644
index 5631badf44c93..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction;
-
-/**
- * A Client for the storageContainer protocol.
- */
-public abstract class XceiverClientSpi implements Closeable {
-
- final private AtomicInteger referenceCount;
- private boolean isEvicted;
-
- XceiverClientSpi() {
- this.referenceCount = new AtomicInteger(0);
- this.isEvicted = false;
- }
-
- void incrementReference() {
- this.referenceCount.incrementAndGet();
- }
-
- void decrementReference() {
- this.referenceCount.decrementAndGet();
- cleanup();
- }
-
- void setEvicted() {
- isEvicted = true;
- cleanup();
- }
-
- // close the xceiverClient only if,
- // 1) there is no refcount on the client
- // 2) it has been evicted from the cache.
- private void cleanup() {
- if (referenceCount.get() == 0 && isEvicted) {
- close();
- }
- }
-
- @VisibleForTesting
- public int getRefcount() {
- return referenceCount.get();
- }
-
- /**
- * Connects to the leader in the pipeline.
- */
- public abstract void connect() throws Exception;
-
- /**
- * Connects to the leader in the pipeline using encoded token. To be used
- * in a secure cluster.
- */
- public abstract void connect(String encodedToken) throws Exception;
-
- @Override
- public abstract void close();
-
- /**
- * Returns the pipeline of machines that host the container used by this
- * client.
- *
- * @return pipeline of machines that host the container
- */
- public abstract Pipeline getPipeline();
-
- /**
- * Sends a given command to server and gets the reply back.
- * @param request Request
- * @return Response to the command
- * @throws IOException
- */
- public ContainerCommandResponseProto sendCommand(
- ContainerCommandRequestProto request) throws IOException {
- try {
- XceiverClientReply reply;
- reply = sendCommandAsync(request);
- ContainerCommandResponseProto responseProto = reply.getResponse().get();
- return responseProto;
- } catch (ExecutionException | InterruptedException e) {
- throw new IOException("Failed to command " + request, e);
- }
- }
-
- /**
- * Sends a given command to server and gets the reply back along with
- * the server associated info.
- * @param request Request
- * @param validators functions to validate the response
- * @return Response to the command
- * @throws IOException
- */
- public ContainerCommandResponseProto sendCommand(
- ContainerCommandRequestProto request, List validators)
- throws IOException {
- try {
- XceiverClientReply reply;
- reply = sendCommandAsync(request);
- ContainerCommandResponseProto responseProto = reply.getResponse().get();
- for (CheckedBiFunction function : validators) {
- function.apply(request, responseProto);
- }
- return responseProto;
- } catch (ExecutionException | InterruptedException e) {
- throw new IOException("Failed to command " + request, e);
- }
- }
-
- /**
- * Sends a given command to server gets a waitable future back.
- *
- * @param request Request
- * @return Response to the command
- * @throws IOException
- */
- public abstract XceiverClientReply
- sendCommandAsync(ContainerCommandRequestProto request)
- throws IOException, ExecutionException, InterruptedException;
-
- /**
- * Returns pipeline Type.
- *
- * @return - {Stand_Alone, Ratis or Chained}
- */
- public abstract HddsProtos.ReplicationType getPipelineType();
-
- /**
- * Check if an specfic commitIndex is replicated to majority/all servers.
- * @param index index to watch for
- * @param timeout timeout provided for the watch operation to complete
- * @return reply containing the min commit index replicated to all or majority
- * servers in case of a failure
- * @throws InterruptedException
- * @throws ExecutionException
- * @throws TimeoutException
- * @throws IOException
- */
- public abstract XceiverClientReply watchForCommit(long index, long timeout)
- throws InterruptedException, ExecutionException, TimeoutException,
- IOException;
-
- /**
- * returns the min commit index replicated to all servers.
- * @return min commit index replicated to all servers.
- */
- public abstract long getReplicatedMinCommitIndex();
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
deleted file mode 100644
index 226ceda9255ad..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.client;
-
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .ContainerDataProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * The interface to call into underlying container layer.
- *
- * Written as interface to allow easy testing: implement a mock container layer
- * for standalone testing of CBlock API without actually calling into remote
- * containers. Actual container layer can simply re-implement this.
- *
- * NOTE this is temporarily needed class. When SCM containers are full-fledged,
- * this interface will likely be removed.
- */
-@InterfaceStability.Unstable
-public interface ScmClient extends Closeable {
- /**
- * Creates a Container on SCM and returns the pipeline.
- * @return ContainerInfo
- * @throws IOException
- */
- ContainerWithPipeline createContainer(String owner) throws IOException;
-
- /**
- * Gets a container by Name -- Throws if the container does not exist.
- * @param containerId - Container ID
- * @return Pipeline
- * @throws IOException
- */
- ContainerInfo getContainer(long containerId) throws IOException;
-
- /**
- * Gets a container by Name -- Throws if the container does not exist.
- * @param containerId - Container ID
- * @return ContainerWithPipeline
- * @throws IOException
- */
- ContainerWithPipeline getContainerWithPipeline(long containerId)
- throws IOException;
-
- /**
- * Close a container.
- *
- * @param containerId - ID of the container.
- * @param pipeline - Pipeline where the container is located.
- * @throws IOException
- */
- void closeContainer(long containerId, Pipeline pipeline) throws IOException;
-
- /**
- * Close a container.
- *
- * @param containerId - ID of the container.
- * @throws IOException
- */
- void closeContainer(long containerId) throws IOException;
-
- /**
- * Deletes an existing container.
- * @param containerId - ID of the container.
- * @param pipeline - Pipeline that represents the container.
- * @param force - true to forcibly delete the container.
- * @throws IOException
- */
- void deleteContainer(long containerId, Pipeline pipeline, boolean force)
- throws IOException;
-
- /**
- * Deletes an existing container.
- * @param containerId - ID of the container.
- * @param force - true to forcibly delete the container.
- * @throws IOException
- */
- void deleteContainer(long containerId, boolean force) throws IOException;
-
- /**
- * Lists a range of containers and get their info.
- *
- * @param startContainerID start containerID.
- * @param count count must be {@literal >} 0.
- *
- * @return a list of pipeline.
- * @throws IOException
- */
- List listContainer(long startContainerID,
- int count) throws IOException;
-
- /**
- * Read meta data from an existing container.
- * @param containerID - ID of the container.
- * @param pipeline - Pipeline where the container is located.
- * @return ContainerInfo
- * @throws IOException
- */
- ContainerDataProto readContainer(long containerID, Pipeline pipeline)
- throws IOException;
-
- /**
- * Read meta data from an existing container.
- * @param containerID - ID of the container.
- * @return ContainerInfo
- * @throws IOException
- */
- ContainerDataProto readContainer(long containerID)
- throws IOException;
-
- /**
- * Gets the container size -- Computed by SCM from Container Reports.
- * @param containerID - ID of the container.
- * @return number of bytes used by this container.
- * @throws IOException
- */
- long getContainerSize(long containerID) throws IOException;
-
- /**
- * Creates a Container on SCM and returns the pipeline.
- * @param type - Replication Type.
- * @param replicationFactor - Replication Factor
- * @return ContainerInfo
- * @throws IOException - in case of error.
- */
- ContainerWithPipeline createContainer(HddsProtos.ReplicationType type,
- HddsProtos.ReplicationFactor replicationFactor,
- String owner) throws IOException;
-
- /**
- * Returns a set of Nodes that meet a query criteria.
- * @param nodeStatuses - Criteria that we want the node to have.
- * @param queryScope - Query scope - Cluster or pool.
- * @param poolName - if it is pool, a pool name is required.
- * @return A set of nodes that meet the requested criteria.
- * @throws IOException
- */
- List queryNode(HddsProtos.NodeState nodeStatuses,
- HddsProtos.QueryScope queryScope, String poolName) throws IOException;
-
- /**
- * Creates a specified replication pipeline.
- * @param type - Type
- * @param factor - Replication factor
- * @param nodePool - Set of machines.
- * @throws IOException
- */
- Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
- HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
- throws IOException;
-
- /**
- * Returns the list of active Pipelines.
- *
- * @return list of Pipeline
- * @throws IOException in case of any exception
- */
- List listPipelines() throws IOException;
-
- /**
- * Activates the pipeline given a pipeline ID.
- *
- * @param pipelineID PipelineID to activate.
- * @throws IOException In case of exception while activating the pipeline
- */
- void activatePipeline(HddsProtos.PipelineID pipelineID) throws IOException;
-
- /**
- * Deactivates the pipeline given a pipeline ID.
- *
- * @param pipelineID PipelineID to deactivate.
- * @throws IOException In case of exception while deactivating the pipeline
- */
- void deactivatePipeline(HddsProtos.PipelineID pipelineID) throws IOException;
-
- /**
- * Closes the pipeline given a pipeline ID.
- *
- * @param pipelineID PipelineID to close.
- * @throws IOException In case of exception while closing the pipeline
- */
- void closePipeline(HddsProtos.PipelineID pipelineID) throws IOException;
-
- /**
- * Check if SCM is in safe mode.
- *
- * @return Returns true if SCM is in safe mode else returns false.
- * @throws IOException
- */
- boolean inSafeMode() throws IOException;
-
- /**
- * Force SCM out of safe mode.
- *
- * @return returns true if operation is successful.
- * @throws IOException
- */
- boolean forceExitSafeMode() throws IOException;
-
- /**
- * Start ReplicationManager.
- */
- void startReplicationManager() throws IOException;
-
- /**
- * Stop ReplicationManager.
- */
- void stopReplicationManager() throws IOException;
-
- /**
- * Returns ReplicationManager status.
- *
- * @return True if ReplicationManager is running, false otherwise.
- */
- boolean getReplicationManagerStatus() throws IOException;
-
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java
deleted file mode 100644
index e2f7033d7fa61..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.client;
-
-/**
- * This package contains classes for the client of the storage container
- * protocol.
- */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerException.java
deleted file mode 100644
index 9d37dfb1f3350..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerException.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import java.io.IOException;
-
-/**
- * Signals that ContainerException of some sort has occurred. This is parent
- * of all the exceptions thrown by ContainerManager.
- */
-public class ContainerException extends IOException {
-
- /**
- * Constructs an {@code ContainerException} with {@code null}
- * as its error detail message.
- */
- public ContainerException() {
- super();
- }
-
- /**
- * Constructs an {@code ContainerException} with the specified detail message.
- *
- * @param message
- * The detail message (which is saved for later retrieval
- * by the {@link #getMessage()} method)
- */
- public ContainerException(String message) {
- super(message);
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
deleted file mode 100644
index bb44da4e78e58..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- *
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Longs;
-import org.apache.commons.lang3.builder.CompareToBuilder;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-
-/**
- * Container ID is an integer that is a value between 1..MAX_CONTAINER ID.
- *
- * We are creating a specific type for this to avoid mixing this with
- * normal integers in code.
- */
-public final class ContainerID implements Comparable {
-
- private final long id;
-
- // TODO: make this private.
- /**
- * Constructs ContainerID.
- *
- * @param id int
- */
- public ContainerID(long id) {
- this.id = id;
- }
-
- /**
- * Factory method for creation of ContainerID.
- * @param containerID long
- * @return ContainerID.
- */
- public static ContainerID valueof(final long containerID) {
- Preconditions.checkState(containerID > 0,
- "Container ID should be a positive long. "+ containerID);
- return new ContainerID(containerID);
- }
-
- /**
- * Returns int representation of ID.
- *
- * @return int
- */
- public long getId() {
- return id;
- }
-
- public byte[] getBytes() {
- return Longs.toByteArray(id);
- }
-
- @Override
- public boolean equals(final Object o) {
- if (this == o) {
- return true;
- }
-
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
-
- final ContainerID that = (ContainerID) o;
-
- return new EqualsBuilder()
- .append(getId(), that.getId())
- .isEquals();
- }
-
- @Override
- public int hashCode() {
- return new HashCodeBuilder(61, 71)
- .append(getId())
- .toHashCode();
- }
-
- @Override
- public int compareTo(final ContainerID that) {
- Preconditions.checkNotNull(that);
- return new CompareToBuilder()
- .append(this.getId(), that.getId())
- .build();
- }
-
- @Override
- public String toString() {
- return "#" + id;
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
deleted file mode 100644
index 5c58e92d3c5d9..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ /dev/null
@@ -1,471 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container;
-
-import static java.lang.Math.max;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.google.common.base.Preconditions;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.Arrays;
-import java.util.Comparator;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.util.Time;
-
-/**
- * Class wraps ozone container info.
- */
-public class ContainerInfo implements Comparator,
- Comparable, Externalizable {
-
- private static final ObjectWriter WRITER;
- private static final String SERIALIZATION_ERROR_MSG = "Java serialization not"
- + " supported. Use protobuf instead.";
-
- static {
- ObjectMapper mapper = new ObjectMapper();
- mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY);
- mapper
- .setVisibility(PropertyAccessor.GETTER, JsonAutoDetect.Visibility.NONE);
- WRITER = mapper.writerWithDefaultPrettyPrinter();
- }
-
- private HddsProtos.LifeCycleState state;
- @JsonIgnore
- private PipelineID pipelineID;
- private ReplicationFactor replicationFactor;
- private ReplicationType replicationType;
- private long usedBytes;
- private long numberOfKeys;
- private long lastUsed;
- // The wall-clock ms since the epoch at which the current state enters.
- private long stateEnterTime;
- private String owner;
- private long containerID;
- private long deleteTransactionId;
- // The sequenceId of a close container cannot change, and all the
- // container replica should have the same sequenceId.
- private long sequenceId;
-
- /**
- * Allows you to maintain private data on ContainerInfo. This is not
- * serialized via protobuf, just allows us to maintain some private data.
- */
- @JsonIgnore
- private byte[] data;
-
- @SuppressWarnings("parameternumber")
- ContainerInfo(
- long containerID,
- HddsProtos.LifeCycleState state,
- PipelineID pipelineID,
- long usedBytes,
- long numberOfKeys,
- long stateEnterTime,
- String owner,
- long deleteTransactionId,
- long sequenceId,
- ReplicationFactor replicationFactor,
- ReplicationType repType) {
- this.containerID = containerID;
- this.pipelineID = pipelineID;
- this.usedBytes = usedBytes;
- this.numberOfKeys = numberOfKeys;
- this.lastUsed = Time.monotonicNow();
- this.state = state;
- this.stateEnterTime = stateEnterTime;
- this.owner = owner;
- this.deleteTransactionId = deleteTransactionId;
- this.sequenceId = sequenceId;
- this.replicationFactor = replicationFactor;
- this.replicationType = repType;
- }
-
- /**
- * Needed for serialization findbugs.
- */
- public ContainerInfo() {
- }
-
- public static ContainerInfo fromProtobuf(HddsProtos.ContainerInfoProto info) {
- ContainerInfo.Builder builder = new ContainerInfo.Builder();
- return builder.setPipelineID(
- PipelineID.getFromProtobuf(info.getPipelineID()))
- .setUsedBytes(info.getUsedBytes())
- .setNumberOfKeys(info.getNumberOfKeys())
- .setState(info.getState())
- .setStateEnterTime(info.getStateEnterTime())
- .setOwner(info.getOwner())
- .setContainerID(info.getContainerID())
- .setDeleteTransactionId(info.getDeleteTransactionId())
- .setReplicationFactor(info.getReplicationFactor())
- .setReplicationType(info.getReplicationType())
- .build();
- }
-
- public long getContainerID() {
- return containerID;
- }
-
- public HddsProtos.LifeCycleState getState() {
- return state;
- }
-
- public void setState(HddsProtos.LifeCycleState state) {
- this.state = state;
- }
-
- public long getStateEnterTime() {
- return stateEnterTime;
- }
-
- public ReplicationFactor getReplicationFactor() {
- return replicationFactor;
- }
-
- public PipelineID getPipelineID() {
- return pipelineID;
- }
-
- public long getUsedBytes() {
- return usedBytes;
- }
-
- public void setUsedBytes(long value) {
- usedBytes = value;
- }
-
- public long getNumberOfKeys() {
- return numberOfKeys;
- }
-
- public void setNumberOfKeys(long value) {
- numberOfKeys = value;
- }
-
- public long getDeleteTransactionId() {
- return deleteTransactionId;
- }
-
- public long getSequenceId() {
- return sequenceId;
- }
-
- public void updateDeleteTransactionId(long transactionId) {
- deleteTransactionId = max(transactionId, deleteTransactionId);
- }
-
- public void updateSequenceId(long sequenceID) {
- assert (isOpen() || state == HddsProtos.LifeCycleState.QUASI_CLOSED);
- sequenceId = max(sequenceID, sequenceId);
- }
-
- public ContainerID containerID() {
- return new ContainerID(getContainerID());
- }
-
- /**
- * Gets the last used time from SCM's perspective.
- *
- * @return time in milliseconds.
- */
- public long getLastUsed() {
- return lastUsed;
- }
-
- public ReplicationType getReplicationType() {
- return replicationType;
- }
-
- public void updateLastUsedTime() {
- lastUsed = Time.monotonicNow();
- }
-
- public HddsProtos.ContainerInfoProto getProtobuf() {
- HddsProtos.ContainerInfoProto.Builder builder =
- HddsProtos.ContainerInfoProto.newBuilder();
- Preconditions.checkState(containerID > 0);
- return builder.setContainerID(getContainerID())
- .setUsedBytes(getUsedBytes())
- .setNumberOfKeys(getNumberOfKeys()).setState(getState())
- .setStateEnterTime(getStateEnterTime()).setContainerID(getContainerID())
- .setDeleteTransactionId(getDeleteTransactionId())
- .setPipelineID(getPipelineID().getProtobuf())
- .setReplicationFactor(getReplicationFactor())
- .setReplicationType(getReplicationType())
- .setOwner(getOwner())
- .build();
- }
-
- public String getOwner() {
- return owner;
- }
-
- public void setOwner(String owner) {
- this.owner = owner;
- }
-
- @Override
- public String toString() {
- return "ContainerInfo{"
- + "id=" + containerID
- + ", state=" + state
- + ", pipelineID=" + pipelineID
- + ", stateEnterTime=" + stateEnterTime
- + ", owner=" + owner
- + '}';
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
-
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
-
- ContainerInfo that = (ContainerInfo) o;
-
- return new EqualsBuilder()
- .append(getContainerID(), that.getContainerID())
-
- // TODO : Fix this later. If we add these factors some tests fail.
- // So Commenting this to continue and will enforce this with
- // Changes in pipeline where we remove Container Name to
- // SCMContainerinfo from Pipeline.
- // .append(pipeline.getFactor(), that.pipeline.getFactor())
- // .append(pipeline.getType(), that.pipeline.getType())
- .append(owner, that.owner)
- .isEquals();
- }
-
- @Override
- public int hashCode() {
- return new HashCodeBuilder(11, 811)
- .append(getContainerID())
- .append(getOwner())
- .toHashCode();
- }
-
- /**
- * Compares its two arguments for order. Returns a negative integer, zero, or
- * a positive integer as the first argument is less than, equal to, or greater
- * than the second.
- *
- * @param o1 the first object to be compared.
- * @param o2 the second object to be compared.
- * @return a negative integer, zero, or a positive integer as the first
- * argument is less than, equal to, or greater than the second.
- * @throws NullPointerException if an argument is null and this comparator
- * does not permit null arguments
- * @throws ClassCastException if the arguments' types prevent them from
- * being compared by this comparator.
- */
- @Override
- public int compare(ContainerInfo o1, ContainerInfo o2) {
- return Long.compare(o1.getLastUsed(), o2.getLastUsed());
- }
-
- /**
- * Compares this object with the specified object for order. Returns a
- * negative integer, zero, or a positive integer as this object is less than,
- * equal to, or greater than the specified object.
- *
- * @param o the object to be compared.
- * @return a negative integer, zero, or a positive integer as this object is
- * less than, equal to, or greater than the specified object.
- * @throws NullPointerException if the specified object is null
- * @throws ClassCastException if the specified object's type prevents it
- * from being compared to this object.
- */
- @Override
- public int compareTo(ContainerInfo o) {
- return this.compare(this, o);
- }
-
- /**
- * Returns a JSON string of this object.
- *
- * @return String - json string
- * @throws IOException
- */
- public String toJsonString() throws IOException {
- return WRITER.writeValueAsString(this);
- }
-
- /**
- * Returns private data that is set on this containerInfo.
- *
- * @return blob, the user can interpret it any way they like.
- */
- public byte[] getData() {
- if (this.data != null) {
- return Arrays.copyOf(this.data, this.data.length);
- } else {
- return null;
- }
- }
-
- /**
- * Set private data on ContainerInfo object.
- *
- * @param data -- private data.
- */
- public void setData(byte[] data) {
- if (data != null) {
- this.data = Arrays.copyOf(data, data.length);
- }
- }
-
- /**
- * Throws IOException as default java serialization is not supported. Use
- * serialization via protobuf instead.
- *
- * @param out the stream to write the object to
- * @throws IOException Includes any I/O exceptions that may occur
- * @serialData Overriding methods should use this tag to describe
- * the data layout of this Externalizable object.
- * List the sequence of element types and, if possible,
- * relate the element to a public/protected field and/or
- * method of this Externalizable class.
- */
- @Override
- public void writeExternal(ObjectOutput out) throws IOException {
- throw new IOException(SERIALIZATION_ERROR_MSG);
- }
-
- /**
- * Throws IOException as default java serialization is not supported. Use
- * serialization via protobuf instead.
- *
- * @param in the stream to read data from in order to restore the object
- * @throws IOException if I/O errors occur
- * @throws ClassNotFoundException If the class for an object being
- * restored cannot be found.
- */
- @Override
- public void readExternal(ObjectInput in)
- throws IOException, ClassNotFoundException {
- throw new IOException(SERIALIZATION_ERROR_MSG);
- }
-
- /**
- * Builder class for ContainerInfo.
- */
- public static class Builder {
- private HddsProtos.LifeCycleState state;
- private long used;
- private long keys;
- private long stateEnterTime;
- private String owner;
- private long containerID;
- private long deleteTransactionId;
- private long sequenceId;
- private PipelineID pipelineID;
- private ReplicationFactor replicationFactor;
- private ReplicationType replicationType;
-
- public Builder setReplicationType(
- ReplicationType repType) {
- this.replicationType = repType;
- return this;
- }
-
- public Builder setPipelineID(PipelineID pipelineId) {
- this.pipelineID = pipelineId;
- return this;
- }
-
- public Builder setReplicationFactor(ReplicationFactor repFactor) {
- this.replicationFactor = repFactor;
- return this;
- }
-
- public Builder setContainerID(long id) {
- Preconditions.checkState(id >= 0);
- this.containerID = id;
- return this;
- }
-
- public Builder setState(HddsProtos.LifeCycleState lifeCycleState) {
- this.state = lifeCycleState;
- return this;
- }
-
- public Builder setUsedBytes(long bytesUsed) {
- this.used = bytesUsed;
- return this;
- }
-
- public Builder setNumberOfKeys(long keyCount) {
- this.keys = keyCount;
- return this;
- }
-
- public Builder setStateEnterTime(long time) {
- this.stateEnterTime = time;
- return this;
- }
-
- public Builder setOwner(String containerOwner) {
- this.owner = containerOwner;
- return this;
- }
-
- public Builder setDeleteTransactionId(long deleteTransactionID) {
- this.deleteTransactionId = deleteTransactionID;
- return this;
- }
-
- public Builder setSequenceId(long sequenceID) {
- this.sequenceId = sequenceID;
- return this;
- }
-
- public ContainerInfo build() {
- return new ContainerInfo(containerID, state, pipelineID,
- used, keys, stateEnterTime, owner, deleteTransactionId,
- sequenceId, replicationFactor, replicationType);
- }
- }
-
- /**
- * Check if a container is in open state, this will check if the
- * container is either open or closing state. Any containers in these states
- * is managed as an open container by SCM.
- */
- public boolean isOpen() {
- return state == HddsProtos.LifeCycleState.OPEN
- || state == HddsProtos.LifeCycleState.CLOSING;
- }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerNotFoundException.java
deleted file mode 100644
index 3eebcce8403ce..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerNotFoundException.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-/**
- * Signals that a container is missing from ContainerManager.
- */
-public class ContainerNotFoundException extends ContainerException {
-
- /**
- * Constructs an {@code ContainerNotFoundException} with {@code null}
- * as its error detail message.
- */
- public ContainerNotFoundException() {
- super();
- }
-
- /**
- * Constructs an {@code ContainerNotFoundException} with the specified
- * detail message.
- *
- * @param message
- * The detail message (which is saved for later retrieval
- * by the {@link #getMessage()} method)
- */
- public ContainerNotFoundException(String message) {
- super(message);
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaNotFoundException.java
deleted file mode 100644
index fdbc18b1191e8..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaNotFoundException.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-/**
- * Signals that a ContainerReplica is missing from the Container in
- * ContainerManager.
- */
-public class ContainerReplicaNotFoundException extends ContainerException {
-
- /**
- * Constructs an {@code ContainerReplicaNotFoundException} with {@code null}
- * as its error detail message.
- */
- public ContainerReplicaNotFoundException() {
- super();
- }
-
- /**
- * Constructs an {@code ContainerReplicaNotFoundException} with the
- * specified detail message.
- *
- * @param message
- * The detail message (which is saved for later retrieval
- * by the {@link #getMessage()} method)
- */
- public ContainerReplicaNotFoundException(String message) {
- super(message);
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java
deleted file mode 100644
index 7ac0401af1174..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import org.apache.hadoop.hdds.client.ContainerBlockID;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-
-/**
- * Allocated block wraps the result returned from SCM#allocateBlock which
- * contains a Pipeline and the key.
- */
-public final class AllocatedBlock {
- private Pipeline pipeline;
- private ContainerBlockID containerBlockID;
-
- /**
- * Builder for AllocatedBlock.
- */
- public static class Builder {
- private Pipeline pipeline;
- private ContainerBlockID containerBlockID;
-
- public Builder setPipeline(Pipeline p) {
- this.pipeline = p;
- return this;
- }
-
- public Builder setContainerBlockID(ContainerBlockID blockId) {
- this.containerBlockID = blockId;
- return this;
- }
-
- public AllocatedBlock build() {
- return new AllocatedBlock(pipeline, containerBlockID);
- }
- }
-
- private AllocatedBlock(Pipeline pipeline, ContainerBlockID containerBlockID) {
- this.pipeline = pipeline;
- this.containerBlockID = containerBlockID;
- }
-
- public Pipeline getPipeline() {
- return pipeline;
- }
-
- public ContainerBlockID getBlockID() {
- return containerBlockID;
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java
deleted file mode 100644
index 86f5a66cf4ca3..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
-/**
- * Exceptions thrown when a block is yet to be committed on the datanode.
- */
-public class BlockNotCommittedException extends StorageContainerException {
-
- /**
- * Constructs an {@code IOException} with the specified detail message.
- *
- * @param message The detail message (which is saved for later retrieval by
- * the {@link #getMessage()} method)
- */
- public BlockNotCommittedException(String message) {
- super(message, ContainerProtos.Result.BLOCK_NOT_COMMITTED);
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
deleted file mode 100644
index 4e406e6e97f45..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
-/**
- * Exceptions thrown when a write/update opearation is done on non-open
- * container.
- */
-public class ContainerNotOpenException extends StorageContainerException {
-
- /**
- * Constructs an {@code IOException} with the specified detail message.
- *
- * @param message The detail message (which is saved for later retrieval by
- * the {@link #getMessage()} method)
- */
- public ContainerNotOpenException(String message) {
- super(message, ContainerProtos.Result.CONTAINER_NOT_OPEN);
- }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
deleted file mode 100644
index 5b01bd2c652bc..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import java.util.Comparator;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.UnknownPipelineStateException;
-
-/**
- * Class wraps ozone container info.
- */
-public class ContainerWithPipeline implements Comparator,
- Comparable {
-
- private final ContainerInfo containerInfo;
- private final Pipeline pipeline;
-
- public ContainerWithPipeline(ContainerInfo containerInfo, Pipeline pipeline) {
- this.containerInfo = containerInfo;
- this.pipeline = pipeline;
- }
-
- public ContainerInfo getContainerInfo() {
- return containerInfo;
- }
-
- public Pipeline getPipeline() {
- return pipeline;
- }
-
- public static ContainerWithPipeline fromProtobuf(
- HddsProtos.ContainerWithPipeline allocatedContainer)
- throws UnknownPipelineStateException {
- return new ContainerWithPipeline(
- ContainerInfo.fromProtobuf(allocatedContainer.getContainerInfo()),
- Pipeline.getFromProtobuf(allocatedContainer.getPipeline()));
- }
-
- public HddsProtos.ContainerWithPipeline getProtobuf()
- throws UnknownPipelineStateException {
- HddsProtos.ContainerWithPipeline.Builder builder =
- HddsProtos.ContainerWithPipeline.newBuilder();
- builder.setContainerInfo(getContainerInfo().getProtobuf())
- .setPipeline(getPipeline().getProtobufMessage());
-
- return builder.build();
- }
-
-
- @Override
- public String toString() {
- return containerInfo.toString() + " | " + pipeline.toString();
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
-
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
-
- ContainerWithPipeline that = (ContainerWithPipeline) o;
-
- return new EqualsBuilder()
- .append(getContainerInfo(), that.getContainerInfo())
- .append(getPipeline(), that.getPipeline())
- .isEquals();
- }
-
- @Override
- public int hashCode() {
- return new HashCodeBuilder(11, 811)
- .append(getContainerInfo())
- .append(getPipeline())
- .toHashCode();
- }
-
- /**
- * Compares its two arguments for order. Returns a negative integer, zero, or
- * a positive integer as the first argument is less than, equal to, or greater
- * than the second.
- *
- * @param o1 the first object to be compared.
- * @param o2 the second object to be compared.
- * @return a negative integer, zero, or a positive integer as the first
- * argument is less than, equal to, or greater than the second.
- * @throws NullPointerException if an argument is null and this comparator
- * does not permit null arguments
- * @throws ClassCastException if the arguments' types prevent them from
- * being compared by this comparator.
- */
- @Override
- public int compare(ContainerWithPipeline o1, ContainerWithPipeline o2) {
- return o1.getContainerInfo().compareTo(o2.getContainerInfo());
- }
-
- /**
- * Compares this object with the specified object for order. Returns a
- * negative integer, zero, or a positive integer as this object is less than,
- * equal to, or greater than the specified object.
- *
- * @param o the object to be compared.
- * @return a negative integer, zero, or a positive integer as this object is
- * less than, equal to, or greater than the specified object.
- * @throws NullPointerException if the specified object is null
- * @throws ClassCastException if the specified object's type prevents it
- * from being compared to this object.
- */
- @Override
- public int compareTo(ContainerWithPipeline o) {
- return this.compare(this, o);
- }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java
deleted file mode 100644
index 5f5aaceb16a21..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import org.apache.hadoop.hdds.client.BlockID;
-
-import static org.apache.hadoop.hdds.protocol.proto
- .ScmBlockLocationProtocolProtos.DeleteScmBlockResult;
-
-/**
- * Class wraps storage container manager block deletion results.
- */
-public class DeleteBlockResult {
- private BlockID blockID;
- private DeleteScmBlockResult.Result result;
-
- public DeleteBlockResult(final BlockID blockID,
- final DeleteScmBlockResult.Result result) {
- this.blockID = blockID;
- this.result = result;
- }
-
- /**
- * Get block id deleted.
- * @return block id.
- */
- public BlockID getBlockID() {
- return blockID;
- }
-
- /**
- * Get key deletion result.
- * @return key deletion result.
- */
- public DeleteScmBlockResult.Result getResult() {
- return result;
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java
deleted file mode 100644
index eb215d63a4694..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Collection;
-
-/**
- * This class contains set of dns and containers which ozone client provides
- * to be handed over to SCM when block allocation request comes.
- */
-public class ExcludeList {
-
- private final List datanodes;
- private final List containerIds;
- private final List pipelineIds;
-
-
- public ExcludeList() {
- datanodes = new ArrayList<>();
- containerIds = new ArrayList<>();
- pipelineIds = new ArrayList<>();
- }
-
- public List getContainerIds() {
- return containerIds;
- }
-
- public List getDatanodes() {
- return datanodes;
- }
-
- public void addDatanodes(Collection dns) {
- datanodes.addAll(dns);
- }
-
- public void addDatanode(DatanodeDetails dn) {
- datanodes.add(dn);
- }
-
- public void addConatinerId(ContainerID containerId) {
- containerIds.add(containerId);
- }
-
- public void addPipeline(PipelineID pipelineId) {
- pipelineIds.add(pipelineId);
- }
-
- public List getPipelineIds() {
- return pipelineIds;
- }
-
- public HddsProtos.ExcludeListProto getProtoBuf() {
- HddsProtos.ExcludeListProto.Builder builder =
- HddsProtos.ExcludeListProto.newBuilder();
- containerIds
- .forEach(id -> builder.addContainerIds(id.getId()));
- datanodes.forEach(dn -> {
- builder.addDatanodes(dn.getUuidString());
- });
- pipelineIds.forEach(pipelineID -> {
- builder.addPipelineIds(pipelineID.getProtobuf());
- });
- return builder.build();
- }
-
- public static ExcludeList getFromProtoBuf(
- HddsProtos.ExcludeListProto excludeListProto) {
- ExcludeList excludeList = new ExcludeList();
- excludeListProto.getContainerIdsList().forEach(id -> {
- excludeList.addConatinerId(ContainerID.valueof(id));
- });
- DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
- excludeListProto.getDatanodesList().forEach(dn -> {
- builder.setUuid(dn);
- excludeList.addDatanode(builder.build());
- });
- excludeListProto.getPipelineIdsList().forEach(pipelineID -> {
- excludeList.addPipeline(PipelineID.getFromProtobuf(pipelineID));
- });
- return excludeList;
- }
-
- public void clear() {
- datanodes.clear();
- containerIds.clear();
- pipelineIds.clear();
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java
deleted file mode 100644
index 1378d1ab70ad0..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
-/**
- * Exceptions thrown when a container is in invalid state while doing a I/O.
- */
-public class InvalidContainerStateException extends StorageContainerException {
-
- /**
- * Constructs an {@code IOException} with the specified detail message.
- *
- * @param message The detail message (which is saved for later retrieval by
- * the {@link #getMessage()} method)
- */
- public InvalidContainerStateException(String message) {
- super(message, ContainerProtos.Result.INVALID_CONTAINER_STATE);
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java
deleted file mode 100644
index f1405fff94617..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
-import java.io.IOException;
-
-/**
- * Exceptions thrown from the Storage Container.
- */
-public class StorageContainerException extends IOException {
- private ContainerProtos.Result result;
-
- /**
- * Constructs an {@code IOException} with {@code null}
- * as its error detail message.
- */
- public StorageContainerException(ContainerProtos.Result result) {
- this.result = result;
- }
-
- /**
- * Constructs an {@code IOException} with the specified detail message.
- *
- * @param message The detail message (which is saved for later retrieval by
- * the {@link #getMessage()} method)
- * @param result - The result code
- */
- public StorageContainerException(String message,
- ContainerProtos.Result result) {
- super(message);
- this.result = result;
- }
-
- /**
- * Constructs an {@code IOException} with the specified detail message
- * and cause.
- *
- *
Note that the detail message associated with {@code cause} is
- * not automatically incorporated into this exception's detail
- * message.
- *
- * @param message The detail message (which is saved for later retrieval by
- * the {@link #getMessage()} method)
- *
- * @param cause The cause (which is saved for later retrieval by the {@link
- * #getCause()} method). (A null value is permitted, and indicates that the
- * cause is nonexistent or unknown.)
- *
- * @param result - The result code
- * @since 1.6
- */
- public StorageContainerException(String message, Throwable cause,
- ContainerProtos.Result result) {
- super(message, cause);
- this.result = result;
- }
-
- /**
- * Constructs an {@code IOException} with the specified cause and a
- * detail message of {@code (cause==null ? null : cause.toString())}
- * (which typically contains the class and detail message of {@code cause}).
- * This constructor is useful for IO exceptions that are little more
- * than wrappers for other throwables.
- *
- * @param cause The cause (which is saved for later retrieval by the {@link
- * #getCause()} method). (A null value is permitted, and indicates that the
- * cause is nonexistent or unknown.)
- * @param result - The result code
- * @since 1.6
- */
- public StorageContainerException(Throwable cause, ContainerProtos.Result
- result) {
- super(cause);
- this.result = result;
- }
-
- /**
- * Returns Result.
- *
- * @return Result.
- */
- public ContainerProtos.Result getResult() {
- return result;
- }
-
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java
deleted file mode 100644
index ffe0d3d4d99ab..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-/**
- Contains protocol buffer helper classes and utilites used in
- impl.
- **/
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java
deleted file mode 100644
index d13dcb1f6c40f..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java
deleted file mode 100644
index 52ce7964b6769..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.placement.algorithms;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * A ContainerPlacementPolicy support choosing datanodes to build replication
- * pipeline with specified constraints.
- */
-public interface ContainerPlacementPolicy {
-
- /**
- * Given the replication factor and size required, return set of datanodes
- * that satisfy the nodes and size requirement.
- *
- * @param excludedNodes - list of nodes to be excluded.
- * @param favoredNodes - list of nodes preferred.
- * @param nodesRequired - number of datanodes required.
- * @param sizeRequired - size required for the container or block.
- * @return list of datanodes chosen.
- * @throws IOException
- */
- List chooseDatanodes(List excludedNodes,
- List favoredNodes, int nodesRequired, long sizeRequired)
- throws IOException;
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java
deleted file mode 100644
index dac4752fe66fa..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container.placement.algorithms;
-/**
- Contains container placement policy interface definition.
- **/
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
deleted file mode 100644
index db1f82ae411d0..0000000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.exceptions;
-
-import java.io.IOException;
-
-/**
- * Exception thrown by SCM.
- */
-public class SCMException extends IOException {
- private final ResultCodes result;
-
- /**
- * Constructs an {@code IOException} with {@code null}
- * as its error detail message.
- */
- public SCMException(ResultCodes result) {
- this.result = result;
- }
-
- /**
- * Constructs an {@code IOException} with the specified detail message.
- *
- * @param message The detail message (which is saved for later retrieval by
- * the
- * {@link #getMessage()} method)
- */
- public SCMException(String message, ResultCodes result) {
- super(message);
- this.result = result;
- }
-
- /**
- * Constructs an {@code IOException} with the specified detail message
- * and cause.
- *
- *
Note that the detail message associated with {@code cause} is
- *