* This method is invoked by the {@link AuthenticationFilter#init} method.
*
* @param config configuration properties to initialize the handler.
@@ -103,7 +103,7 @@ protected boolean getAcceptAnonymous() {
/**
* Releases any resources initialized by the authentication handler.
- *
* This implementation does a NOP.
*/
@Override
@@ -112,7 +112,6 @@ public void destroy() {
/**
* Returns the authentication type of the authentication handler, 'simple'.
- *
* It extracts the {@link PseudoAuthenticator#USER_NAME} parameter from the query string and creates
* an {@link AuthenticationToken} with it.
- *
* If the HTTP client request does not contain the {@link PseudoAuthenticator#USER_NAME} parameter and
* the handler is configured to allow anonymous users it returns the {@link AuthenticationToken#ANONYMOUS}
* token.
- *
* If the HTTP client request does not contain the {@link PseudoAuthenticator#USER_NAME} parameter and
* the handler is configured to disallow anonymous users it throws an {@link AuthenticationException}.
*
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
index 62bb00acab424..7ae8ab2672e9d 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
@@ -92,7 +92,7 @@ public class KerberosName {
/**
* Create a name from the full Kerberos principal name.
- * @param name
+ * @param name full Kerberos principal name.
*/
public KerberosName(String name) {
Matcher match = nameParser.matcher(name);
@@ -367,7 +367,7 @@ public static class NoMatchingRule extends IOException {
* Get the translation of the principal name into an operating system
* user name.
* @return the short name
- * @throws IOException
+ * @throws IOException throws if something is wrong with the rules
*/
public String getShortName() throws IOException {
String[] params;
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
index ca0fce2251ea0..0e8d8db8ea520 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
@@ -135,12 +135,10 @@ static final String[] getPrincipalNames(String keytabFileName) throws IOExceptio
/**
* Get all the unique principals from keytabfile which matches a pattern.
*
- * @param keytab
- * Name of the keytab file to be read.
- * @param pattern
- * pattern to be matched.
+ * @param keytab Name of the keytab file to be read.
+ * @param pattern pattern to be matched.
* @return list of unique principals which matches the pattern.
- * @throws IOException
+ * @throws IOException if cannot get the principal name
*/
public static final String[] getPrincipalNames(String keytab,
Pattern pattern) throws IOException {
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
index e29301bc4ba05..f639503bd6f10 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
@@ -41,8 +41,6 @@ public Signer(SignerSecretProvider secretProvider) {
/**
* Returns a signed string.
- *
* It works by storing the secrets and next rollover time in a ZooKeeper znode.
* All ZKSignerSecretProviders looking at that znode will use those
* secrets and next rollover time to ensure they are synchronized. There is no
@@ -55,7 +55,7 @@
* your own Curator client, you can pass it to ZKSignerSecretProvider; see
* {@link org.apache.hadoop.security.authentication.server.AuthenticationFilter}
* for more details.
- *
* MiniKdc sets 2 System properties when started and un-sets them when stopped:
*
* - java.security.krb5.conf: set to the MiniKDC real/host/port
@@ -92,7 +92,7 @@
* For example, running testcases in parallel that start a KDC each. To
* accomplish this a single MiniKdc should be used for all testcases running
* in parallel.
- *
+ *
* MiniKdc default configuration values are:
*
* - org.name=EXAMPLE (used to create the REALM)
@@ -106,7 +106,6 @@
* - debug=false
*
* The generated krb5.conf forces TCP connections.
- *
*/
public class MiniKdc {
@@ -218,7 +217,7 @@ public void run() {
/**
* Convenience method that returns MiniKdc default configuration.
- *
+ *
* The returned configuration is a copy, it can be customized before using
* it to create a MiniKdc.
* @return a MiniKdc default configuration.
@@ -484,7 +483,6 @@ private void initKDCServer() throws Exception {
/**
* Stops the MiniKdc
- * @throws Exception
*/
public synchronized void stop() {
if (kdc != null) {
diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java
index 45b40c21266ee..141ab5afc3fb4 100644
--- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java
+++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java
@@ -42,8 +42,8 @@ public Exec(Mojo mojo) {
* Runs the specified command and saves each line of the command's output to
* the given list.
*
- * @param command List containing command and all arguments
- * @param output List in/out parameter to receive command output
+ * @param command List containing command and all arguments
+ * @param output List in/out parameter to receive command output
* @return int exit code of command
*/
public int run(List command, List output) {
diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/FileSetUtils.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/FileSetUtils.java
index 6661f676dba74..8bd66cc4d2352 100644
--- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/FileSetUtils.java
+++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/FileSetUtils.java
@@ -47,7 +47,7 @@ private static String getCommaSeparatedList(List list) {
* Converts a Maven FileSet to a list of File objects.
*
* @param source FileSet to convert
- * @return List containing every element of the FileSet as a File
+ * @return List containing every element of the FileSet as a File
* @throws IOException if an I/O error occurs while trying to find the files
*/
@SuppressWarnings("unchecked")
From 1c797b16734c2a5db43ca30c8dd080c15c982bc7 Mon Sep 17 00:00:00 2001
From: Steve Loughran
Date: Tue, 9 Dec 2014 11:32:51 +0000
Subject: [PATCH 013/432] HADOOP-11352 Clean up test-patch.sh to disable "+1
contrib tests"
---
dev-support/test-patch.sh | 71 -------------------
.../hadoop-common/CHANGES.txt | 3 +
2 files changed, 3 insertions(+), 71 deletions(-)
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index e6512abf81809..ece3ddf6f6703 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -857,74 +857,6 @@ findModules () {
rm $TMP_MODULES
echo $CHANGED_MODULES
}
-###############################################################################
-### Run the test-contrib target
-runContribTests () {
- echo ""
- echo ""
- echo "======================================================================"
- echo "======================================================================"
- echo " Running contrib tests."
- echo "======================================================================"
- echo "======================================================================"
- echo ""
- echo ""
-
- if [[ `$GREP -c 'test-contrib' build.xml` == 0 ]] ; then
- echo "No contrib tests in this project."
- return 0
- fi
-
- ### Kill any rogue build processes from the last attempt
- $PS auxwww | $GREP ${PROJECT_NAME}PatchProcess | $AWK '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
-
- #echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" $ECLIPSE_PROPERTY -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no test-contrib"
- #$ANT_HOME/bin/ant -Dversion="${VERSION}" $ECLIPSE_PROPERTY -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no test-contrib
- echo "NOP"
- if [[ $? != 0 ]] ; then
- JIRA_COMMENT="$JIRA_COMMENT
-
- {color:red}-1 contrib tests{color}. The patch failed contrib unit tests."
- return 1
- fi
- JIRA_COMMENT="$JIRA_COMMENT
-
- {color:green}+1 contrib tests{color}. The patch passed contrib unit tests."
- return 0
-}
-
-###############################################################################
-### Run the inject-system-faults target
-checkInjectSystemFaults () {
- echo ""
- echo ""
- echo "======================================================================"
- echo "======================================================================"
- echo " Checking the integrity of system test framework code."
- echo "======================================================================"
- echo "======================================================================"
- echo ""
- echo ""
-
- ### Kill any rogue build processes from the last attempt
- $PS auxwww | $GREP ${PROJECT_NAME}PatchProcess | $AWK '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
-
- #echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME inject-system-faults"
- #$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME inject-system-faults
- echo "NOP"
- return 0
- if [[ $? != 0 ]] ; then
- JIRA_COMMENT="$JIRA_COMMENT
-
- {color:red}-1 system test framework{color}. The patch failed system test framework compile."
- return 1
- fi
- JIRA_COMMENT="$JIRA_COMMENT
-
- {color:green}+1 system test framework{color}. The patch passed system test framework compile."
- return 0
-}
-
###############################################################################
### Submit a comment to the defect's Jira
submitJiraComment () {
@@ -1059,10 +991,7 @@ checkReleaseAuditWarnings
if [[ $JENKINS == "true" || $RUN_TESTS == "true" ]] ; then
runTests
(( RESULT = RESULT + $? ))
- runContribTests
- (( RESULT = RESULT + $? ))
fi
-checkInjectSystemFaults
(( RESULT = RESULT + $? ))
JIRA_COMMENT_FOOTER="Test results: $BUILD_URL/testReport/
$JIRA_COMMENT_FOOTER"
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index b5178c3b572f5..cf3e531ade58a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -148,6 +148,9 @@ Trunk (Unreleased)
HADOOP-11081. Document hadoop properties expected to be set by the shell
code in *-env.sh (aw)
+ HADOOP-11352 Clean up test-patch.sh to disable "+1 contrib tests"
+ (Akira AJISAKA via stevel)
+
BUG FIXES
HADOOP-9451. Fault single-layer config if node group topology is enabled.
From 7cf80aff212e6e69fa2eec23d44000229fe70cde Mon Sep 17 00:00:00 2001
From: Haohui Mai
Date: Tue, 9 Dec 2014 10:38:24 -0800
Subject: [PATCH 014/432] HADOOP-10476. Bumping the findbugs version to 3.0.0.
Contributed by Haohui Mai.
---
hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
hadoop-project/pom.xml | 5 +++--
2 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index cf3e531ade58a..a0a10b819a152 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -424,6 +424,8 @@ Release 2.7.0 - UNRELEASED
HADOOP-11287. Simplify UGI#reloginFromKeytab for Java 7+.
(Li Lu via wheat9)
+ HADOOP-10476) Bumping the findbugs version to 3.0.0. (wheat9)
+
OPTIMIZATIONS
HADOOP-11323. WritableComparator#compare keeps reference to byte array.
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 7c492c8a0083b..c3881e8736bc1 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -71,6 +71,7 @@
${env.HADOOP_PROTOC_PATH}
3.4.6
+ 3.0.0
6.0.41
@@ -843,7 +844,7 @@
com.google.code.findbugs
jsr305
- 1.3.9
+ ${findbugs.version}
javax.xml.bind
@@ -981,7 +982,7 @@
org.codehaus.mojo
findbugs-maven-plugin
- 2.3.2
+ ${findbugs.version}
org.apache.maven.plugins
From e1079fc67f096097e28cb4567f624a439f4c0502 Mon Sep 17 00:00:00 2001
From: Haohui Mai
Date: Tue, 9 Dec 2014 10:41:35 -0800
Subject: [PATCH 015/432] HADOOP-11367. Fix warnings from findbugs 3.0 in
hadoop-streaming. Contributed by Li Lu.
---
hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
.../main/java/org/apache/hadoop/streaming/Environment.java | 4 +++-
2 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index a0a10b819a152..425cab7e71b7f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -534,6 +534,8 @@ Release 2.7.0 - UNRELEASED
HADOOP-10134 [JDK8] Fix Javadoc errors caused by incorrect or illegal tags in doc
comments. (apurtell via stevel)
+ HADOOP-11367. Fix warnings from findbugs 3.0 in hadoop-streaming. (Li Lu via wheat9)
+
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES
diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/Environment.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/Environment.java
index bd76c31998256..98d8aa0306438 100644
--- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/Environment.java
+++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/Environment.java
@@ -20,6 +20,7 @@
import java.io.*;
import java.net.InetAddress;
+import java.nio.charset.Charset;
import java.util.*;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -62,7 +63,8 @@ public Environment() throws IOException {
// Read the environment variables
Process pid = Runtime.getRuntime().exec(command);
- BufferedReader in = new BufferedReader(new InputStreamReader(pid.getInputStream()));
+ BufferedReader in = new BufferedReader(
+ new InputStreamReader(pid.getInputStream(), Charset.forName("UTF-8")));
try {
while (true) {
String line = in.readLine();
From f956e3ddde3eab39980fce8ae4618d144627c65a Mon Sep 17 00:00:00 2001
From: Haohui Mai
Date: Tue, 9 Dec 2014 10:46:13 -0800
Subject: [PATCH 016/432] HADOOP-11369. Fix new findbugs warnings in
hadoop-mapreduce-client, non-core directories. Contributed by Li Lu.
---
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java | 3 +--
.../org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java | 1 -
.../org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java | 2 +-
4 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 425cab7e71b7f..5e2ff8d737322 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -536,6 +536,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11367. Fix warnings from findbugs 3.0 in hadoop-streaming. (Li Lu via wheat9)
+ HADOOP-11369. Fix new findbugs warnings in hadoop-mapreduce-client,
+ non-core directories. (Li Lu via wheat9)
+
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index 45ddb9eb60a4b..97de8fae2ebb0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -870,8 +870,7 @@ private void processEventForTimelineServer(HistoryEvent event, JobId jobId,
TaskAttemptStartedEvent tase = (TaskAttemptStartedEvent) event;
tEvent.addEventInfo("TASK_TYPE", tase.getTaskType().toString());
tEvent.addEventInfo("TASK_ATTEMPT_ID",
- tase.getTaskAttemptId().toString() == null ?
- "" : tase.getTaskAttemptId().toString());
+ tase.getTaskAttemptId().toString());
tEvent.addEventInfo("START_TIME", tase.getStartTime());
tEvent.addEventInfo("HTTP_PORT", tase.getHttpPort());
tEvent.addEventInfo("TRACKER_NAME", tase.getTrackerName());
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
index 6c58a683d1f27..cd4e272e60f38 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
@@ -76,7 +76,6 @@ public abstract class RMCommunicator extends AbstractService
protected EventHandler eventHandler;
protected ApplicationMasterProtocol scheduler;
private final ClientService clientService;
- protected int lastResponseID;
private Resource maxContainerCapability;
protected Map applicationACLs;
private volatile long lastHeartbeatTime;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
index f53f18896d94d..40844df5f17de 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
@@ -848,7 +848,7 @@ public void run() {
}
});
}
- } else if (old != null && !old.isMovePending()) {
+ } else if (!old.isMovePending()) {
//This is a duplicate so just delete it
if (LOG.isDebugEnabled()) {
LOG.debug("Duplicate: deleting");
From 243e86f55354f0f37e5dc9ed30bdd8f7918f4456 Mon Sep 17 00:00:00 2001
From: Andrew Wang
Date: Tue, 9 Dec 2014 10:46:50 -0800
Subject: [PATCH 017/432] HADOOP-11368. Fix SSLFactory truststore reloader
thread leak in KMSClientProvider. Contributed by Arun Suresh.
---
.../hadoop-common/CHANGES.txt | 3 +++
.../crypto/key/kms/KMSClientProvider.java | 4 +++
.../hadoop/crypto/key/kms/server/TestKMS.java | 26 +++++++++++++++++++
3 files changed, 33 insertions(+)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5e2ff8d737322..2051698f3f72b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -539,6 +539,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11369. Fix new findbugs warnings in hadoop-mapreduce-client,
non-core directories. (Li Lu via wheat9)
+ HADOOP-11368. Fix SSLFactory truststore reloader thread leak in
+ KMSClientProvider. (Arun Suresh via wang)
+
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index cb03683f5300c..50dd1ad239ccb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -827,6 +827,10 @@ public void close() throws IOException {
encKeyVersionQueue.shutdown();
} catch (Exception e) {
throw new IOException(e);
+ } finally {
+ if (sslFactory != null) {
+ sslFactory.destroy();
+ }
}
}
}
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 61ce8072be864..f487e986658a5 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -303,6 +303,32 @@ public Void call() throws Exception {
url.getProtocol().equals("https"));
final URI uri = createKMSUri(getKMSUrl());
+ if (ssl) {
+ KeyProvider testKp = new KMSClientProvider(uri, conf);
+ ThreadGroup threadGroup = Thread.currentThread().getThreadGroup();
+ while (threadGroup.getParent() != null) {
+ threadGroup = threadGroup.getParent();
+ }
+ Thread[] threads = new Thread[threadGroup.activeCount()];
+ threadGroup.enumerate(threads);
+ Thread reloaderThread = null;
+ for (Thread thread : threads) {
+ if ((thread.getName() != null)
+ && (thread.getName().contains("Truststore reloader thread"))) {
+ reloaderThread = thread;
+ }
+ }
+ Assert.assertTrue("Reloader is not alive", reloaderThread.isAlive());
+ testKp.close();
+ boolean reloaderStillAlive = true;
+ for (int i = 0; i < 10; i++) {
+ reloaderStillAlive = reloaderThread.isAlive();
+ if (!reloaderStillAlive) break;
+ Thread.sleep(1000);
+ }
+ Assert.assertFalse("Reloader is still alive", reloaderStillAlive);
+ }
+
if (kerberos) {
for (String user : new String[]{"client", "client/host"}) {
doAs(user, new PrivilegedExceptionAction() {
From d3d95b4a6886c1aaa270b4f67c19a89ad74350ab Mon Sep 17 00:00:00 2001
From: Haohui Mai
Date: Tue, 9 Dec 2014 10:48:35 -0800
Subject: [PATCH 018/432] HADOOP-11372. Fix new findbugs warnings in
mapreduce-examples. Contributed by Li Lu.
---
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../main/java/org/apache/hadoop/examples/pi/Parser.java | 5 ++---
.../java/org/apache/hadoop/examples/pi/math/Bellard.java | 9 ++++++++-
3 files changed, 13 insertions(+), 4 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2051698f3f72b..4b23471bcebe7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -542,6 +542,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11368. Fix SSLFactory truststore reloader thread leak in
KMSClientProvider. (Arun Suresh via wang)
+ HADOOP-11372. Fix new findbugs warnings in mapreduce-examples.
+ (Li Lu via wheat9)
+
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java
index 187520a399181..a2db9d1bac69d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java
@@ -151,11 +151,10 @@ Map> parse(String inputpath, String outputdir
static > Map combine(Map> m) {
final Map combined = new TreeMap();
for(Parameter p : Parameter.values()) {
+ //note: results would never be null due to the design of Util.combine
final List results = Util.combine(m.get(p));
Util.out.format("%-6s => ", p);
- if (results == null)
- Util.out.println("null");
- else if (results.size() != 1)
+ if (results.size() != 1)
Util.out.println(results.toString().replace(", ", ",\n "));
else {
final T r = results.get(0);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Bellard.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Bellard.java
index 90b608fd24cde..d909d92945151 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Bellard.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Bellard.java
@@ -25,6 +25,7 @@
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
+import java.util.NoSuchElementException;
import org.apache.hadoop.examples.pi.Container;
import org.apache.hadoop.examples.pi.Util;
@@ -255,7 +256,13 @@ public Iterator iterator() {
public boolean hasNext() {return i < parts.length;}
/** {@inheritDoc} */
@Override
- public Summation next() {return parts[i++];}
+ public Summation next() throws NoSuchElementException {
+ if (hasNext()) {
+ return parts[i++];
+ } else {
+ throw new NoSuchElementException("Sum's iterator does not have next!");
+ }
+ }
/** Unsupported */
@Override
public void remove() {throw new UnsupportedOperationException();}
From 9c4cdd4e7ea038c68e6e36cb4bb475d717ff03f7 Mon Sep 17 00:00:00 2001
From: Colin Patrick Mccabe
Date: Tue, 9 Dec 2014 10:55:17 -0800
Subject: [PATCH 019/432] Incorrect locking in FsVolumeList#checkDirs can hang
datanodes (Noah Lorang via Colin P. McCabe)
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../datanode/fsdataset/impl/FsVolumeList.java | 56 +++++++++----------
2 files changed, 31 insertions(+), 28 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 55026a2679cc6..626d90ad0fabf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -574,6 +574,9 @@ Release 2.6.1 - UNRELEASED
HDFS-4882. Prevent the Namenode's LeaseManager from looping forever in
checkLeases (Ravi Prakash via Colin P. McCabe)
+ HDFS-7489. Incorrect locking in FsVolumeList#checkDirs can hang datanodes
+ (Noah Lorang via Colin P. McCabe)
+
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index 837ddf720afb0..55329aea7d99b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -36,6 +36,7 @@ class FsVolumeList {
* This list is replaced on modification holding "this" lock.
*/
volatile List volumes = null;
+ private Object checkDirsMutex = new Object();
private final VolumeChoosingPolicy blockChooser;
private volatile int numFailedVolumes;
@@ -167,40 +168,39 @@ public void run() {
* Calls {@link FsVolumeImpl#checkDirs()} on each volume, removing any
* volumes from the active list that result in a DiskErrorException.
*
- * This method is synchronized to allow only one instance of checkDirs()
- * call
+ * Use checkDirsMutext to allow only one instance of checkDirs() call
+ *
* @return list of all the removed volumes.
*/
- synchronized List checkDirs() {
- ArrayList removedVols = null;
-
- // Make a copy of volumes for performing modification
- final List volumeList = new ArrayList(volumes);
+ List checkDirs() {
+ synchronized(checkDirsMutex) {
+ ArrayList removedVols = null;
+
+ // Make a copy of volumes for performing modification
+ final List volumeList = new ArrayList(volumes);
- for(Iterator i = volumeList.iterator(); i.hasNext(); ) {
- final FsVolumeImpl fsv = i.next();
- try {
- fsv.checkDirs();
- } catch (DiskErrorException e) {
- FsDatasetImpl.LOG.warn("Removing failed volume " + fsv + ": ",e);
- if (removedVols == null) {
- removedVols = new ArrayList(1);
+ for(Iterator i = volumeList.iterator(); i.hasNext(); ) {
+ final FsVolumeImpl fsv = i.next();
+ try {
+ fsv.checkDirs();
+ } catch (DiskErrorException e) {
+ FsDatasetImpl.LOG.warn("Removing failed volume " + fsv + ": ",e);
+ if (removedVols == null) {
+ removedVols = new ArrayList(1);
+ }
+ removedVols.add(fsv);
+ removeVolume(fsv.getBasePath());
+ numFailedVolumes++;
}
- removedVols.add(fsv);
- fsv.shutdown();
- i.remove(); // Remove the volume
- numFailedVolumes++;
}
- }
-
- if (removedVols != null && removedVols.size() > 0) {
- // Replace volume list
- volumes = Collections.unmodifiableList(volumeList);
- FsDatasetImpl.LOG.warn("Completed checkDirs. Removed " + removedVols.size()
- + " volumes. Current volumes: " + this);
- }
+
+ if (removedVols != null && removedVols.size() > 0) {
+ FsDatasetImpl.LOG.warn("Completed checkDirs. Removed " + removedVols.size()
+ + " volumes. Current volumes: " + this);
+ }
- return removedVols;
+ return removedVols;
+ }
}
@Override
From 168c179bc1348ab0dc19d10dd4fd4c2739a18810 Mon Sep 17 00:00:00 2001
From: Haohui Mai
Date: Tue, 9 Dec 2014 10:57:32 -0800
Subject: [PATCH 020/432] HADOOP-11273. TestMiniKdc failure: login options not
compatible with IBM JDK. Contributed by Gao Zhong Liang.
---
.../hadoop-common/CHANGES.txt | 3 +++
.../apache/hadoop/minikdc/TestMiniKdc.java | 22 ++++++++++++-------
2 files changed, 17 insertions(+), 8 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4b23471bcebe7..b030bf7d5fe55 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -545,6 +545,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11372. Fix new findbugs warnings in mapreduce-examples.
(Li Lu via wheat9)
+ HADOOP-11273. TestMiniKdc failure: login options not compatible with IBM
+ JDK. (Gao Zhong Liang via wheat9)
+
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java
index c052bb1425afa..fac7f0fbd0d7e 100644
--- a/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java
+++ b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java
@@ -37,7 +37,8 @@
import java.util.Arrays;
public class TestMiniKdc extends KerberosSecurityTestcase {
-
+ private static final boolean IBM_JAVA = System.getProperty("java.vendor")
+ .contains("IBM");
@Test
public void testMiniKdcStart() {
MiniKdc kdc = getKdc();
@@ -94,15 +95,20 @@ private static String getKrb5LoginModuleName() {
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
Map options = new HashMap();
- options.put("keyTab", keytab);
options.put("principal", principal);
- options.put("useKeyTab", "true");
- options.put("storeKey", "true");
- options.put("doNotPrompt", "true");
- options.put("useTicketCache", "true");
- options.put("renewTGT", "true");
options.put("refreshKrb5Config", "true");
- options.put("isInitiator", Boolean.toString(isInitiator));
+ if (IBM_JAVA) {
+ options.put("useKeytab", keytab);
+ options.put("credsType", "both");
+ } else {
+ options.put("keyTab", keytab);
+ options.put("useKeyTab", "true");
+ options.put("storeKey", "true");
+ options.put("doNotPrompt", "true");
+ options.put("useTicketCache", "true");
+ options.put("renewTGT", "true");
+ options.put("isInitiator", Boolean.toString(isInitiator));
+ }
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
options.put("ticketCache", ticketCache);
From 1f72e2d75c47ca266f8a5df9791a6b3c8735d4d3 Mon Sep 17 00:00:00 2001
From: Jing Zhao
Date: Tue, 9 Dec 2014 11:37:39 -0800
Subject: [PATCH 021/432] HDFS-7498. Simplify the logic in INodesInPath.
Contributed by Jing Zhao.
---
.../main/java/org/apache/hadoop/fs/Path.java | 1 -
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +
.../java/org/apache/hadoop/hdfs/DFSUtil.java | 15 +-
.../namenode/EncryptionZoneManager.java | 10 +-
.../hdfs/server/namenode/FSDirConcatOp.java | 5 +-
.../hdfs/server/namenode/FSDirMkdirOp.java | 42 +--
.../hdfs/server/namenode/FSDirRenameOp.java | 51 ++--
.../hdfs/server/namenode/FSDirSnapshotOp.java | 2 +-
.../namenode/FSDirStatAndListingOp.java | 10 +-
.../hdfs/server/namenode/FSDirectory.java | 95 ++++---
.../hdfs/server/namenode/FSEditLogLoader.java | 4 +-
.../hdfs/server/namenode/FSNamesystem.java | 8 +-
.../server/namenode/FSPermissionChecker.java | 51 ++--
.../hdfs/server/namenode/INodesInPath.java | 240 ++++++++----------
.../namenode/TestSnapshotPathINodes.java | 134 +++++-----
15 files changed, 320 insertions(+), 350 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
index 54ddedaff1dc4..caeb7a1b799fa 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
@@ -60,7 +60,6 @@ public class Path implements Comparable {
/**
* Pathnames with scheme and relative path are illegal.
- * @param path to be checked
*/
void checkNotSchemeWithRelative() {
if (toUri().isAbsolute() && !isUriPathAbsolute()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 626d90ad0fabf..9398429fd808d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -447,6 +447,8 @@ Release 2.7.0 - UNRELEASED
HDFS-7486. Consolidate XAttr-related implementation into a single class.
(wheat9)
+ HDFS-7498. Simplify the logic in INodesInPath. (jing9)
+
OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index f1bfcb4fafa75..8b3f5121bf037 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -341,15 +341,20 @@ public static byte[] string2Bytes(String str) {
/**
* Given a list of path components returns a path as a UTF8 String
*/
- public static String byteArray2PathString(byte[][] pathComponents) {
+ public static String byteArray2PathString(byte[][] pathComponents,
+ int offset, int length) {
if (pathComponents.length == 0) {
return "";
- } else if (pathComponents.length == 1
+ }
+ Preconditions.checkArgument(offset >= 0 && offset < pathComponents.length);
+ Preconditions.checkArgument(length >= 0 && offset + length <=
+ pathComponents.length);
+ if (pathComponents.length == 1
&& (pathComponents[0] == null || pathComponents[0].length == 0)) {
return Path.SEPARATOR;
}
StringBuilder result = new StringBuilder();
- for (int i = 0; i < pathComponents.length; i++) {
+ for (int i = offset; i < offset + length; i++) {
result.append(new String(pathComponents[i], Charsets.UTF_8));
if (i < pathComponents.length - 1) {
result.append(Path.SEPARATOR_CHAR);
@@ -358,6 +363,10 @@ public static String byteArray2PathString(byte[][] pathComponents) {
return result.toString();
}
+ public static String byteArray2PathString(byte[][] pathComponents) {
+ return byteArray2PathString(pathComponents, 0, pathComponents.length);
+ }
+
/**
* Converts a list of path components into a path using Path.SEPARATOR.
*
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index faab1f0b08a17..5c4f39d12331a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -199,9 +199,9 @@ String getKeyName(final INodesInPath iip) {
private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) {
assert dir.hasReadLock();
Preconditions.checkNotNull(iip);
- final INode[] inodes = iip.getINodes();
- for (int i = inodes.length - 1; i >= 0; i--) {
- final INode inode = inodes[i];
+ List inodes = iip.getReadOnlyINodes();
+ for (int i = inodes.size() - 1; i >= 0; i--) {
+ final INode inode = inodes.get(i);
if (inode != null) {
final EncryptionZoneInt ezi = encryptionZones.get(inode.getId());
if (ezi != null) {
@@ -259,9 +259,7 @@ void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, String src)
}
}
- if (srcInEZ || dstInEZ) {
- Preconditions.checkState(srcEZI != null, "couldn't find src EZ?");
- Preconditions.checkState(dstEZI != null, "couldn't find dst EZ?");
+ if (srcInEZ) {
if (srcEZI != dstEZI) {
final String srcEZPath = getFullPathName(srcEZI);
final String dstEZPath = getFullPathName(dstEZI);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index c2e0f088207a4..f7e57beeddeb7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -187,9 +187,8 @@ static void unprotectedConcat(
// do the move
final INodesInPath trgIIP = fsd.getINodesInPath4Write(target, true);
- final INode[] trgINodes = trgIIP.getINodes();
final INodeFile trgInode = trgIIP.getLastINode().asFile();
- INodeDirectory trgParent = trgINodes[trgINodes.length-2].asDirectory();
+ INodeDirectory trgParent = trgIIP.getINode(-2).asDirectory();
final int trgLatestSnapshot = trgIIP.getLatestSnapshotId();
final INodeFile [] allSrcInodes = new INodeFile[srcs.length];
@@ -229,6 +228,6 @@ static void unprotectedConcat(
trgInode.setModificationTime(timestamp, trgLatestSnapshot);
trgParent.updateModificationTime(timestamp, trgLatestSnapshot);
// update quota on the parent directory ('count' files removed, 0 space)
- FSDirectory.unprotectedUpdateCount(trgIIP, trgINodes.length - 1, -count, 0);
+ FSDirectory.unprotectedUpdateCount(trgIIP, trgIIP.length() - 1, -count, 0);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index af9e925e08232..c8c5cb2961af3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -85,12 +85,11 @@ static INode unprotectedMkdir(
throws QuotaExceededException, UnresolvedLinkException, AclException {
assert fsd.hasWriteLock();
byte[][] components = INode.getPathComponents(src);
- INodesInPath iip = fsd.getExistingPathINodes(components);
- INode[] inodes = iip.getINodes();
- final int pos = inodes.length - 1;
- unprotectedMkdir(fsd, inodeId, iip, pos, components[pos], permissions,
- aclEntries, timestamp);
- return inodes[pos];
+ final INodesInPath iip = fsd.getExistingPathINodes(components);
+ final int pos = iip.length() - 1;
+ final INodesInPath newiip = unprotectedMkdir(fsd, inodeId, iip, pos,
+ components[pos], permissions, aclEntries, timestamp);
+ return newiip.getINode(pos);
}
/**
@@ -129,17 +128,17 @@ static boolean mkdirsRecursively(
throw new SnapshotAccessControlException(
"Modification on RO snapshot is disallowed");
}
- INode[] inodes = iip.getINodes();
-
+ final int length = iip.length();
// find the index of the first null in inodes[]
StringBuilder pathbuilder = new StringBuilder();
int i = 1;
- for(; i < inodes.length && inodes[i] != null; i++) {
+ INode curNode;
+ for(; i < length && (curNode = iip.getINode(i)) != null; i++) {
pathbuilder.append(Path.SEPARATOR).append(names[i]);
- if (!inodes[i].isDirectory()) {
+ if (!curNode.isDirectory()) {
throw new FileAlreadyExistsException(
"Parent path is not a directory: "
- + pathbuilder + " "+inodes[i].getLocalName());
+ + pathbuilder + " " + curNode.getLocalName());
}
}
@@ -152,8 +151,8 @@ static boolean mkdirsRecursively(
// if inheriting (ie. creating a file or symlink), use the parent dir,
// else the supplied permissions
// NOTE: the permissions of the auto-created directories violate posix
- FsPermission parentFsPerm = inheritPermission
- ? inodes[i-1].getFsPermission() : permissions.getPermission();
+ FsPermission parentFsPerm = inheritPermission ?
+ iip.getINode(i-1).getFsPermission() : permissions.getPermission();
// ensure that the permissions allow user write+execute
if (!parentFsPerm.getUserAction().implies(FsAction.WRITE_EXECUTE)) {
@@ -176,11 +175,12 @@ static boolean mkdirsRecursively(
}
// create directories beginning from the first null index
- for(; i < inodes.length; i++) {
+ for(; i < length; i++) {
pathbuilder.append(Path.SEPARATOR).append(names[i]);
- unprotectedMkdir(fsd, fsd.allocateNewInodeId(), iip, i, components[i],
- (i < lastInodeIndex) ? parentPermissions : permissions, null, now);
- if (inodes[i] == null) {
+ iip = unprotectedMkdir(fsd, fsd.allocateNewInodeId(), iip, i,
+ components[i], (i < lastInodeIndex) ? parentPermissions :
+ permissions, null, now);
+ if (iip.getINode(i) == null) {
return false;
}
// Directory creation also count towards FilesCreated
@@ -188,7 +188,7 @@ static boolean mkdirsRecursively(
NameNode.getNameNodeMetrics().incrFilesCreated();
final String cur = pathbuilder.toString();
- fsd.getEditLog().logMkDir(cur, inodes[i]);
+ fsd.getEditLog().logMkDir(cur, iip.getINode(i));
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug(
"mkdirs: created directory " + cur);
@@ -219,7 +219,7 @@ private static boolean isDirMutable(FSDirectory fsd, INodesInPath iip)
* The parent path to the directory is at [0, pos-1].
* All ancestors exist. Newly created one stored at index pos.
*/
- private static void unprotectedMkdir(
+ private static INodesInPath unprotectedMkdir(
FSDirectory fsd, long inodeId, INodesInPath inodesInPath, int pos,
byte[] name, PermissionStatus permission, List aclEntries,
long timestamp)
@@ -231,7 +231,9 @@ private static void unprotectedMkdir(
if (aclEntries != null) {
AclStorage.updateINodeAcl(dir, aclEntries, Snapshot.CURRENT_STATE_ID);
}
- inodesInPath.setINode(pos, dir);
+ return INodesInPath.replace(inodesInPath, pos, dir);
+ } else {
+ return inodesInPath;
}
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 9f3983a50c182..511de7a155897 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -25,7 +25,6 @@
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.FSLimitException;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
@@ -42,6 +41,8 @@
import java.util.List;
import java.util.Map;
+import static org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
+import static org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
import static org.apache.hadoop.util.Time.now;
class FSDirRenameOp {
@@ -77,44 +78,40 @@ static RenameOldResult renameToInt(
* Verify quota for rename operation where srcInodes[srcInodes.length-1] moves
* dstInodes[dstInodes.length-1]
*/
- static void verifyQuotaForRename(FSDirectory fsd,
- INode[] src, INode[] dst)
- throws QuotaExceededException {
+ private static void verifyQuotaForRename(FSDirectory fsd, INodesInPath src,
+ INodesInPath dst) throws QuotaExceededException {
if (!fsd.getFSNamesystem().isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
// Do not check quota if edits log is still being processed
return;
}
int i = 0;
- while(src[i] == dst[i]) { i++; }
+ while(src.getINode(i) == dst.getINode(i)) { i++; }
// src[i - 1] is the last common ancestor.
- final Quota.Counts delta = src[src.length - 1].computeQuotaUsage();
+ final Quota.Counts delta = src.getLastINode().computeQuotaUsage();
// Reduce the required quota by dst that is being removed
- final int dstIndex = dst.length - 1;
- if (dst[dstIndex] != null) {
- delta.subtract(dst[dstIndex].computeQuotaUsage());
+ final INode dstINode = dst.getLastINode();
+ if (dstINode != null) {
+ delta.subtract(dstINode.computeQuotaUsage());
}
- FSDirectory.verifyQuota(dst, dstIndex, delta.get(Quota.NAMESPACE),
- delta.get(Quota.DISKSPACE), src[i - 1]);
+ FSDirectory.verifyQuota(dst, dst.length() - 1, delta.get(Quota.NAMESPACE),
+ delta.get(Quota.DISKSPACE), src.getINode(i - 1));
}
/**
* Checks file system limits (max component length and max directory items)
* during a rename operation.
*/
- static void verifyFsLimitsForRename(FSDirectory fsd,
- INodesInPath srcIIP,
+ static void verifyFsLimitsForRename(FSDirectory fsd, INodesInPath srcIIP,
INodesInPath dstIIP)
- throws FSLimitException.PathComponentTooLongException,
- FSLimitException.MaxDirectoryItemsExceededException {
+ throws PathComponentTooLongException, MaxDirectoryItemsExceededException {
byte[] dstChildName = dstIIP.getLastLocalName();
- INode[] dstInodes = dstIIP.getINodes();
- int pos = dstInodes.length - 1;
- fsd.verifyMaxComponentLength(dstChildName, dstInodes, pos);
+ final String parentPath = dstIIP.getParentPath();
+ fsd.verifyMaxComponentLength(dstChildName, parentPath);
// Do not enforce max directory items if renaming within same directory.
if (srcIIP.getINode(-2) != dstIIP.getINode(-2)) {
- fsd.verifyMaxDirItems(dstInodes, pos);
+ fsd.verifyMaxDirItems(dstIIP.getINode(-2).asDirectory(), parentPath);
}
}
@@ -176,7 +173,7 @@ static boolean unprotectedRenameTo(
fsd.ezManager.checkMoveValidity(srcIIP, dstIIP, src);
// Ensure dst has quota to accommodate rename
verifyFsLimitsForRename(fsd, srcIIP, dstIIP);
- verifyQuotaForRename(fsd, srcIIP.getINodes(), dstIIP.getINodes());
+ verifyQuotaForRename(fsd, srcIIP, dstIIP);
RenameOperation tx = new RenameOperation(fsd, src, dst, srcIIP, dstIIP);
@@ -184,7 +181,7 @@ static boolean unprotectedRenameTo(
try {
// remove src
- final long removedSrc = fsd.removeLastINode(srcIIP);
+ final long removedSrc = fsd.removeLastINode(tx.srcIIP);
if (removedSrc == -1) {
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
+ "failed to rename " + src + " to " + dst + " because the source" +
@@ -326,7 +323,7 @@ static boolean unprotectedRenameTo(
validateDestination(src, dst, srcInode);
INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false);
- if (dstIIP.getINodes().length == 1) {
+ if (dstIIP.length() == 1) {
error = "rename destination cannot be the root";
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " +
error);
@@ -357,12 +354,12 @@ static boolean unprotectedRenameTo(
// Ensure dst has quota to accommodate rename
verifyFsLimitsForRename(fsd, srcIIP, dstIIP);
- verifyQuotaForRename(fsd, srcIIP.getINodes(), dstIIP.getINodes());
+ verifyQuotaForRename(fsd, srcIIP, dstIIP);
RenameOperation tx = new RenameOperation(fsd, src, dst, srcIIP, dstIIP);
boolean undoRemoveSrc = true;
- final long removedSrc = fsd.removeLastINode(srcIIP);
+ final long removedSrc = fsd.removeLastINode(tx.srcIIP);
if (removedSrc == -1) {
error = "Failed to rename " + src + " to " + dst +
" because the source can not be removed";
@@ -594,7 +591,7 @@ private static void validateRenameSource(String src, INodesInPath srcIIP)
+ error);
throw new FileNotFoundException(error);
}
- if (srcIIP.getINodes().length == 1) {
+ if (srcIIP.length() == 1) {
error = "rename source cannot be the root";
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
+ error);
@@ -624,7 +621,6 @@ private static class RenameOperation {
INodesInPath srcIIP, INodesInPath dstIIP)
throws QuotaExceededException {
this.fsd = fsd;
- this.srcIIP = srcIIP;
this.dstIIP = dstIIP;
this.src = src;
this.dst = dst;
@@ -652,7 +648,7 @@ private static class RenameOperation {
srcChild, srcIIP.getLatestSnapshotId());
withCount = (INodeReference.WithCount) withName.getReferredINode();
srcChild = withName;
- srcIIP.setLastINode(srcChild);
+ srcIIP = INodesInPath.replace(srcIIP, srcIIP.length() - 1, srcChild);
// get the counts before rename
withCount.getReferredINode().computeQuotaUsage(oldSrcCounts, true);
} else if (srcChildIsReference) {
@@ -662,6 +658,7 @@ private static class RenameOperation {
} else {
withCount = null;
}
+ this.srcIIP = srcIIP;
}
boolean addSourceToDestination() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
index f295e060ca0d4..ea7dc24043ce7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
@@ -45,7 +45,7 @@ static void verifySnapshotName(FSDirectory fsd, String snapshotName,
}
final byte[] bytes = DFSUtil.string2Bytes(snapshotName);
fsd.verifyINodeName(bytes);
- fsd.verifyMaxComponentLength(bytes, path, 0);
+ fsd.verifyMaxComponentLength(bytes, path);
}
/** Allow snapshot on a directory. */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index a8c3c16287757..2e7ed6be39ddc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -122,9 +122,7 @@ static boolean isFileClosed(FSDirectory fsd, String src) throws IOException {
if (fsd.isPermissionEnabled()) {
fsd.checkTraverse(pc, iip);
}
- INode[] inodes = iip.getINodes();
- return !INodeFile.valueOf(inodes[inodes.length - 1],
- src).isUnderConstruction();
+ return !INodeFile.valueOf(iip.getLastINode(), src).isUnderConstruction();
}
static ContentSummary getContentSummary(
@@ -167,9 +165,8 @@ private static DirectoryListing getListing(
return getSnapshotsListing(fsd, srcs, startAfter);
}
final INodesInPath inodesInPath = fsd.getINodesInPath(srcs, true);
- final INode[] inodes = inodesInPath.getINodes();
final int snapshot = inodesInPath.getPathSnapshotId();
- final INode targetNode = inodes[inodes.length - 1];
+ final INode targetNode = inodesInPath.getLastINode();
if (targetNode == null)
return null;
byte parentStoragePolicy = isSuperUser ?
@@ -278,8 +275,7 @@ static HdfsFileStatus getFileInfo(
return getFileInfo4DotSnapshot(fsd, srcs);
}
final INodesInPath inodesInPath = fsd.getINodesInPath(srcs, resolveLink);
- final INode[] inodes = inodesInPath.getINodes();
- final INode i = inodes[inodes.length - 1];
+ final INode i = inodesInPath.getLastINode();
byte policyId = includeStoragePolicy && i != null && !i.isSymlink() ?
i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED;
return i == null ? null : createFileStatus(fsd,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index e8026274b7402..81b0eb6f255fd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -642,8 +642,7 @@ private void setDirStoragePolicy(INodeDirectory inode, byte policyId,
* @param path the file path
* @return the block size of the file.
*/
- long getPreferredBlockSize(String path) throws UnresolvedLinkException,
- FileNotFoundException, IOException {
+ long getPreferredBlockSize(String path) throws IOException {
readLock();
try {
return INodeFile.valueOf(getNode(path, false), path
@@ -740,15 +739,13 @@ long delete(String src, BlocksMapUpdateInfo collectedBlocks,
private static boolean deleteAllowed(final INodesInPath iip,
final String src) {
- final INode[] inodes = iip.getINodes();
- if (inodes == null || inodes.length == 0
- || inodes[inodes.length - 1] == null) {
+ if (iip.length() < 1 || iip.getLastINode() == null) {
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
+ "failed to remove " + src + " because it does not exist");
}
return false;
- } else if (inodes.length == 1) { // src is the root
+ } else if (iip.length() == 1) { // src is the root
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: "
+ "failed to remove " + src
+ " because the root is not allowed to be deleted");
@@ -763,8 +760,7 @@ private static boolean deleteAllowed(final INodesInPath iip,
boolean isNonEmptyDirectory(INodesInPath inodesInPath) {
readLock();
try {
- final INode[] inodes = inodesInPath.getINodes();
- final INode inode = inodes[inodes.length - 1];
+ final INode inode = inodesInPath.getLastINode();
if (inode == null || !inode.isDirectory()) {
//not found or not a directory
return false;
@@ -991,7 +987,7 @@ void updateSpaceConsumed(String path, long nsDelta, long dsDelta)
private void updateCount(INodesInPath iip, long nsDelta, long dsDelta,
boolean checkQuota) throws QuotaExceededException {
- updateCount(iip, iip.getINodes().length - 1, nsDelta, dsDelta, checkQuota);
+ updateCount(iip, iip.length() - 1, nsDelta, dsDelta, checkQuota);
}
/** update count of each inode with quota
@@ -1011,12 +1007,11 @@ private void updateCount(INodesInPath iip, int numOfINodes,
//still initializing. do not check or update quotas.
return;
}
- final INode[] inodes = iip.getINodes();
- if (numOfINodes > inodes.length) {
- numOfINodes = inodes.length;
+ if (numOfINodes > iip.length()) {
+ numOfINodes = iip.length();
}
if (checkQuota && !skipQuotaCheck) {
- verifyQuota(inodes, numOfINodes, nsDelta, dsDelta, null);
+ verifyQuota(iip, numOfINodes, nsDelta, dsDelta, null);
}
unprotectedUpdateCount(iip, numOfINodes, nsDelta, dsDelta);
}
@@ -1039,11 +1034,11 @@ private void updateCountNoQuotaCheck(INodesInPath inodesInPath,
* updates quota without verification
* callers responsibility is to make sure quota is not exceeded
*/
- static void unprotectedUpdateCount(INodesInPath inodesInPath, int numOfINodes, long nsDelta, long dsDelta) {
- final INode[] inodes = inodesInPath.getINodes();
+ static void unprotectedUpdateCount(INodesInPath inodesInPath,
+ int numOfINodes, long nsDelta, long dsDelta) {
for(int i=0; i < numOfINodes; i++) {
- if (inodes[i].isQuotaSet()) { // a directory with quota
- inodes[i].asDirectory().getDirectoryWithQuotaFeature()
+ if (inodesInPath.getINode(i).isQuotaSet()) { // a directory with quota
+ inodesInPath.getINode(i).asDirectory().getDirectoryWithQuotaFeature()
.addSpaceConsumed2Cache(nsDelta, dsDelta);
}
}
@@ -1105,14 +1100,15 @@ static String getFullPathName(INode inode) {
* @param src The full path name of the child node.
* @throws QuotaExceededException is thrown if it violates quota limit
*/
- private boolean addINode(String src, INode child
- ) throws QuotaExceededException, UnresolvedLinkException {
+ private boolean addINode(String src, INode child)
+ throws QuotaExceededException, UnresolvedLinkException {
byte[][] components = INode.getPathComponents(src);
child.setLocalName(components[components.length-1]);
cacheName(child);
writeLock();
try {
- return addLastINode(getExistingPathINodes(components), child, true);
+ final INodesInPath iip = getExistingPathINodes(components);
+ return addLastINode(iip, child, true);
} finally {
writeUnlock();
}
@@ -1122,7 +1118,7 @@ private boolean addINode(String src, INode child
* Verify quota for adding or moving a new INode with required
* namespace and diskspace to a given position.
*
- * @param inodes INodes corresponding to a path
+ * @param iip INodes corresponding to a path
* @param pos position where a new INode will be added
* @param nsDelta needed namespace
* @param dsDelta needed diskspace
@@ -1131,7 +1127,7 @@ private boolean addINode(String src, INode child
* Pass null if a node is not being moved.
* @throws QuotaExceededException if quota limit is exceeded.
*/
- static void verifyQuota(INode[] inodes, int pos, long nsDelta,
+ static void verifyQuota(INodesInPath iip, int pos, long nsDelta,
long dsDelta, INode commonAncestor) throws QuotaExceededException {
if (nsDelta <= 0 && dsDelta <= 0) {
// if quota is being freed or not being consumed
@@ -1139,18 +1135,20 @@ static void verifyQuota(INode[] inodes, int pos, long nsDelta,
}
// check existing components in the path
- for(int i = (pos > inodes.length? inodes.length: pos) - 1; i >= 0; i--) {
- if (commonAncestor == inodes[i]) {
+ for(int i = (pos > iip.length() ? iip.length(): pos) - 1; i >= 0; i--) {
+ if (commonAncestor == iip.getINode(i)) {
// Stop checking for quota when common ancestor is reached
return;
}
final DirectoryWithQuotaFeature q
- = inodes[i].asDirectory().getDirectoryWithQuotaFeature();
+ = iip.getINode(i).asDirectory().getDirectoryWithQuotaFeature();
if (q != null) { // a directory with quota
try {
q.verifyQuota(nsDelta, dsDelta);
} catch (QuotaExceededException e) {
- e.setPathName(getFullPathName(inodes, i));
+ List inodes = iip.getReadOnlyINodes();
+ final String path = getFullPathName(inodes.toArray(new INode[inodes.size()]), i);
+ e.setPathName(path);
throw e;
}
}
@@ -1172,22 +1170,20 @@ void verifyINodeName(byte[] childName) throws HadoopIllegalArgumentException {
* Verify child's name for fs limit.
*
* @param childName byte[] containing new child name
- * @param parentPath Object either INode[] or String containing parent path
- * @param pos int position of new child in path
+ * @param parentPath String containing parent path
* @throws PathComponentTooLongException child's name is too long.
*/
- void verifyMaxComponentLength(byte[] childName, Object parentPath,
- int pos) throws PathComponentTooLongException {
+ void verifyMaxComponentLength(byte[] childName, String parentPath)
+ throws PathComponentTooLongException {
if (maxComponentLength == 0) {
return;
}
final int length = childName.length;
if (length > maxComponentLength) {
- final String p = parentPath instanceof INode[]?
- getFullPathName((INode[])parentPath, pos - 1): (String)parentPath;
final PathComponentTooLongException e = new PathComponentTooLongException(
- maxComponentLength, length, p, DFSUtil.bytes2String(childName));
+ maxComponentLength, length, parentPath,
+ DFSUtil.bytes2String(childName));
if (namesystem.isImageLoaded()) {
throw e;
} else {
@@ -1200,20 +1196,16 @@ void verifyMaxComponentLength(byte[] childName, Object parentPath,
/**
* Verify children size for fs limit.
*
- * @param pathComponents INode[] containing full path of inodes to new child
- * @param pos int position of new child in pathComponents
* @throws MaxDirectoryItemsExceededException too many children.
*/
- void verifyMaxDirItems(INode[] pathComponents, int pos)
+ void verifyMaxDirItems(INodeDirectory parent, String parentPath)
throws MaxDirectoryItemsExceededException {
-
- final INodeDirectory parent = pathComponents[pos-1].asDirectory();
final int count = parent.getChildrenList(Snapshot.CURRENT_STATE_ID).size();
if (count >= maxDirItems) {
final MaxDirectoryItemsExceededException e
= new MaxDirectoryItemsExceededException(maxDirItems, count);
if (namesystem.isImageLoaded()) {
- e.setPathName(getFullPathName(pathComponents, pos - 1));
+ e.setPathName(parentPath);
throw e;
} else {
// Do not throw if edits log is still being processed
@@ -1227,9 +1219,9 @@ void verifyMaxDirItems(INode[] pathComponents, int pos)
* The same as {@link #addChild(INodesInPath, int, INode, boolean)}
* with pos = length - 1.
*/
- private boolean addLastINode(INodesInPath inodesInPath,
- INode inode, boolean checkQuota) throws QuotaExceededException {
- final int pos = inodesInPath.getINodes().length - 1;
+ private boolean addLastINode(INodesInPath inodesInPath, INode inode,
+ boolean checkQuota) throws QuotaExceededException {
+ final int pos = inodesInPath.length() - 1;
return addChild(inodesInPath, pos, inode, checkQuota);
}
@@ -1241,18 +1233,18 @@ private boolean addLastINode(INodesInPath inodesInPath,
*/
boolean addChild(INodesInPath iip, int pos, INode child, boolean checkQuota)
throws QuotaExceededException {
- final INode[] inodes = iip.getINodes();
// Disallow creation of /.reserved. This may be created when loading
// editlog/fsimage during upgrade since /.reserved was a valid name in older
// release. This may also be called when a user tries to create a file
// or directory /.reserved.
- if (pos == 1 && inodes[0] == rootDir && isReservedName(child)) {
+ if (pos == 1 && iip.getINode(0) == rootDir && isReservedName(child)) {
throw new HadoopIllegalArgumentException(
"File name \"" + child.getLocalName() + "\" is reserved and cannot "
+ "be created. If this is during upgrade change the name of the "
+ "existing file or directory to another name before upgrading "
+ "to the new release.");
}
+ final INodeDirectory parent = iip.getINode(pos-1).asDirectory();
// The filesystem limits are not really quotas, so this check may appear
// odd. It's because a rename operation deletes the src, tries to add
// to the dest, if that fails, re-adds the src from whence it came.
@@ -1260,8 +1252,9 @@ boolean addChild(INodesInPath iip, int pos, INode child, boolean checkQuota)
// original location becase a quota violation would cause the the item
// to go "poof". The fs limits must be bypassed for the same reason.
if (checkQuota) {
- verifyMaxComponentLength(child.getLocalNameBytes(), inodes, pos);
- verifyMaxDirItems(inodes, pos);
+ final String parentPath = iip.getPath(pos - 1);
+ verifyMaxComponentLength(child.getLocalNameBytes(), parentPath);
+ verifyMaxDirItems(parent, parentPath);
}
// always verify inode name
verifyINodeName(child.getLocalNameBytes());
@@ -1270,7 +1263,6 @@ boolean addChild(INodesInPath iip, int pos, INode child, boolean checkQuota)
updateCount(iip, pos,
counts.get(Quota.NAMESPACE), counts.get(Quota.DISKSPACE), checkQuota);
boolean isRename = (child.getParent() != null);
- final INodeDirectory parent = inodes[pos-1].asDirectory();
boolean added;
try {
added = parent.addChild(child, true, iip.getLatestSnapshotId());
@@ -1283,7 +1275,6 @@ boolean addChild(INodesInPath iip, int pos, INode child, boolean checkQuota)
updateCountNoQuotaCheck(iip, pos,
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
} else {
- iip.setINode(pos - 1, child.getParent());
if (!isRename) {
AclStorage.copyINodeDefaultAcl(child);
}
@@ -1320,7 +1311,7 @@ long removeLastINode(final INodesInPath iip)
if (!last.isInLatestSnapshot(latestSnapshot)) {
final Quota.Counts counts = last.computeQuotaUsage();
- updateCountNoQuotaCheck(iip, iip.getINodes().length - 1,
+ updateCountNoQuotaCheck(iip, iip.length() - 1,
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
if (INodeReference.tryRemoveReference(last) > 0) {
@@ -1715,10 +1706,10 @@ FileEncryptionInfo getFileEncryptionInfo(INode inode, int snapshotId,
static INode resolveLastINode(String src, INodesInPath iip)
throws FileNotFoundException {
- INode[] inodes = iip.getINodes();
- INode inode = inodes[inodes.length - 1];
- if (inode == null)
+ INode inode = iip.getLastINode();
+ if (inode == null) {
throw new FileNotFoundException("cannot find " + src);
+ }
return inode;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index d12ae1543b4de..2721f85dee644 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -343,9 +343,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
// See if the file already exists (persistBlocks call)
final INodesInPath iip = fsDir.getINodesInPath(path, true);
- final INode[] inodes = iip.getINodes();
- INodeFile oldFile = INodeFile.valueOf(
- inodes[inodes.length - 1], path, true);
+ INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path, true);
if (oldFile != null && addCloseOp.overwrite) {
// This is OP_ADD with overwrite
fsDir.unprotectedDelete(path, addCloseOp.mtime);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 2b530fa520cc2..30ac941ccb3f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1861,9 +1861,7 @@ private LocatedBlocks getBlockLocationsUpdateTimes(final String srcArg,
doAccessTime = false;
}
- final INode[] inodes = iip.getINodes();
- final INodeFile inode = INodeFile.valueOf(
- inodes[inodes.length - 1], src);
+ final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
if (isPermissionEnabled) {
checkUnreadableBySuperuser(pc, inode, iip.getPathSnapshotId());
}
@@ -8027,8 +8025,8 @@ void checkAccess(String src, FsAction mode) throws IOException {
checkOperation(OperationCategory.READ);
src = FSDirectory.resolvePath(src, pathComponents, dir);
final INodesInPath iip = dir.getINodesInPath(src, true);
- INode[] inodes = iip.getINodes();
- if (inodes[inodes.length - 1] == null) {
+ INode inode = iip.getLastINode();
+ if (inode == null) {
throw new FileNotFoundException("Path not found");
}
if (isPermissionEnabled) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index a0455dc6b824e..8de8c54bfc574 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -20,6 +20,7 @@
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
+import java.util.List;
import java.util.Set;
import java.util.Stack;
@@ -144,22 +145,25 @@ void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner,
// check if (parentAccess != null) && file exists, then check sb
// If resolveLink, the check is performed on the link target.
final int snapshotId = inodesInPath.getPathSnapshotId();
- final INode[] inodes = inodesInPath.getINodes();
- int ancestorIndex = inodes.length - 2;
- for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null;
- ancestorIndex--);
- checkTraverse(inodes, ancestorIndex, snapshotId);
+ final int length = inodesInPath.length();
+ final INode last = length > 0 ? inodesInPath.getLastINode() : null;
+ final INode parent = length > 1 ? inodesInPath.getINode(-2) : null;
+
+ checkTraverse(inodesInPath, snapshotId);
- final INode last = inodes[inodes.length - 1];
if (parentAccess != null && parentAccess.implies(FsAction.WRITE)
- && inodes.length > 1 && last != null) {
- checkStickyBit(inodes[inodes.length - 2], last, snapshotId);
+ && length > 1 && last != null) {
+ checkStickyBit(parent, last, snapshotId);
}
- if (ancestorAccess != null && inodes.length > 1) {
- check(inodes, ancestorIndex, snapshotId, ancestorAccess);
+ if (ancestorAccess != null && length > 1) {
+ List inodes = inodesInPath.getReadOnlyINodes();
+ INode ancestor = null;
+ for (int i = inodes.size() - 2; i >= 0 && (ancestor = inodes.get(i)) ==
+ null; i--);
+ check(ancestor, snapshotId, ancestorAccess);
}
- if (parentAccess != null && inodes.length > 1) {
- check(inodes, inodes.length - 2, snapshotId, parentAccess);
+ if (parentAccess != null && length > 1 && parent != null) {
+ check(parent, snapshotId, parentAccess);
}
if (access != null) {
check(last, snapshotId, access);
@@ -184,10 +188,15 @@ private void checkOwner(INode inode, int snapshotId
}
/** Guarded by {@link FSNamesystem#readLock()} */
- private void checkTraverse(INode[] inodes, int last, int snapshotId
- ) throws AccessControlException {
- for(int j = 0; j <= last; j++) {
- check(inodes[j], snapshotId, FsAction.EXECUTE);
+ private void checkTraverse(INodesInPath iip, int snapshotId)
+ throws AccessControlException {
+ List inodes = iip.getReadOnlyINodes();
+ for (int i = 0; i < inodes.size() - 1; i++) {
+ INode inode = inodes.get(i);
+ if (inode == null) {
+ break;
+ }
+ check(inode, snapshotId, FsAction.EXECUTE);
}
}
@@ -215,14 +224,8 @@ private void checkSubAccess(INode inode, int snapshotId, FsAction access,
}
/** Guarded by {@link FSNamesystem#readLock()} */
- private void check(INode[] inodes, int i, int snapshotId, FsAction access
- ) throws AccessControlException {
- check(i >= 0? inodes[i]: null, snapshotId, access);
- }
-
- /** Guarded by {@link FSNamesystem#readLock()} */
- private void check(INode inode, int snapshotId, FsAction access
- ) throws AccessControlException {
+ private void check(INode inode, int snapshotId, FsAction access)
+ throws AccessControlException {
if (inode == null) {
return;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 58f5f3d029fe3..1501fce75f7e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -18,6 +18,9 @@
package org.apache.hadoop.hdfs.server.namenode;
import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.NoSuchElementException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -31,6 +34,9 @@
import com.google.common.base.Preconditions;
+import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
+import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.ID_INTEGER_COMPARATOR;
+
/**
* Contains INodes information resolved from a given path.
*/
@@ -54,7 +60,6 @@ static INodesInPath fromINode(INode inode) {
}
final byte[][] path = new byte[depth][];
final INode[] inodes = new INode[depth];
- final INodesInPath iip = new INodesInPath(path, depth);
tmp = inode;
index = depth;
while (tmp != null) {
@@ -63,8 +68,7 @@ static INodesInPath fromINode(INode inode) {
inodes[index] = tmp;
tmp = tmp.getParent();
}
- iip.setINodes(inodes);
- return iip;
+ return new INodesInPath(inodes, path);
}
/**
@@ -134,30 +138,34 @@ static INodesInPath resolve(final INodeDirectory startingDir,
* @return the specified number of existing INodes in the path
*/
static INodesInPath resolve(final INodeDirectory startingDir,
- final byte[][] components, final int numOfINodes,
+ final byte[][] components, final int numOfINodes,
final boolean resolveLink) throws UnresolvedLinkException {
Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0);
INode curNode = startingDir;
- final INodesInPath existing = new INodesInPath(components, numOfINodes);
int count = 0;
- int index = numOfINodes - components.length;
- if (index > 0) {
- index = 0;
- }
+ int index = numOfINodes <= components.length ?
+ numOfINodes - components.length : 0;
+ int inodeNum = 0;
+ int capacity = numOfINodes;
+ INode[] inodes = new INode[numOfINodes];
+ boolean isSnapshot = false;
+ int snapshotId = CURRENT_STATE_ID;
+
while (count < components.length && curNode != null) {
final boolean lastComp = (count == components.length - 1);
if (index >= 0) {
- existing.addNode(curNode);
+ inodes[inodeNum++] = curNode;
}
final boolean isRef = curNode.isReference();
final boolean isDir = curNode.isDirectory();
final INodeDirectory dir = isDir? curNode.asDirectory(): null;
if (!isRef && isDir && dir.isWithSnapshot()) {
//if the path is a non-snapshot path, update the latest snapshot.
- if (!existing.isSnapshot()) {
- existing.updateLatestSnapshotId(dir.getDirectoryWithSnapshotFeature()
- .getLastSnapshotId());
+ if (!isSnapshot && shouldUpdateLatestId(
+ dir.getDirectoryWithSnapshotFeature().getLastSnapshotId(),
+ snapshotId)) {
+ snapshotId = dir.getDirectoryWithSnapshotFeature().getLastSnapshotId();
}
} else if (isRef && isDir && !lastComp) {
// If the curNode is a reference node, need to check its dstSnapshot:
@@ -170,19 +178,18 @@ static INodesInPath resolve(final INodeDirectory startingDir,
// the latest snapshot if lastComp is true. In case of the operation is
// a modification operation, we do a similar check in corresponding
// recordModification method.
- if (!existing.isSnapshot()) {
+ if (!isSnapshot) {
int dstSnapshotId = curNode.asReference().getDstSnapshotId();
- int latest = existing.getLatestSnapshotId();
- if (latest == Snapshot.CURRENT_STATE_ID || // no snapshot in dst tree of rename
- (dstSnapshotId != Snapshot.CURRENT_STATE_ID &&
- dstSnapshotId >= latest)) { // the above scenario
- int lastSnapshot = Snapshot.CURRENT_STATE_ID;
+ if (snapshotId == CURRENT_STATE_ID || // no snapshot in dst tree of rename
+ (dstSnapshotId != CURRENT_STATE_ID &&
+ dstSnapshotId >= snapshotId)) { // the above scenario
+ int lastSnapshot = CURRENT_STATE_ID;
DirectoryWithSnapshotFeature sf;
if (curNode.isDirectory() &&
(sf = curNode.asDirectory().getDirectoryWithSnapshotFeature()) != null) {
lastSnapshot = sf.getLastSnapshotId();
}
- existing.setSnapshotId(lastSnapshot);
+ snapshotId = lastSnapshot;
}
}
}
@@ -211,9 +218,9 @@ static INodesInPath resolve(final INodeDirectory startingDir,
// skip the ".snapshot" in components
count++;
index++;
- existing.isSnapshot = true;
+ isSnapshot = true;
if (index >= 0) { // decrease the capacity by 1 to account for .snapshot
- existing.capacity--;
+ capacity--;
}
// check if ".snapshot" is the last element of components
if (count == components.length - 1) {
@@ -222,65 +229,82 @@ static INodesInPath resolve(final INodeDirectory startingDir,
// Resolve snapshot root
final Snapshot s = dir.getSnapshot(components[count + 1]);
if (s == null) {
- //snapshot not found
- curNode = null;
+ curNode = null; // snapshot not found
} else {
curNode = s.getRoot();
- existing.setSnapshotId(s.getId());
- }
- if (index >= -1) {
- existing.snapshotRootIndex = existing.numNonNull;
+ snapshotId = s.getId();
}
} else {
// normal case, and also for resolving file/dir under snapshot root
- curNode = dir.getChild(childName, existing.getPathSnapshotId());
+ curNode = dir.getChild(childName,
+ isSnapshot ? snapshotId : CURRENT_STATE_ID);
}
count++;
index++;
}
- return existing;
+ if (isSnapshot && capacity < numOfINodes &&
+ !isDotSnapshotDir(components[components.length - 1])) {
+ // for snapshot path shrink the inode array. however, for path ending with
+ // .snapshot, still keep last the null inode in the array
+ INode[] newNodes = new INode[capacity];
+ System.arraycopy(inodes, 0, newNodes, 0, capacity);
+ inodes = newNodes;
+ }
+ return new INodesInPath(inodes, components, isSnapshot, snapshotId);
+ }
+
+ private static boolean shouldUpdateLatestId(int sid, int snapshotId) {
+ return snapshotId == CURRENT_STATE_ID || (sid != CURRENT_STATE_ID &&
+ ID_INTEGER_COMPARATOR.compare(snapshotId, sid) < 0);
}
- private final byte[][] path;
- /**
- * Array with the specified number of INodes resolved for a given path.
- */
- private INode[] inodes;
/**
- * Indicate the number of non-null elements in {@link #inodes}
+ * Replace an inode of the given INodesInPath in the given position. We do a
+ * deep copy of the INode array.
+ * @param pos the position of the replacement
+ * @param inode the new inode
+ * @return a new INodesInPath instance
*/
- private int numNonNull;
+ public static INodesInPath replace(INodesInPath iip, int pos, INode inode) {
+ Preconditions.checkArgument(iip.length() > 0 && pos > 0 // no for root
+ && pos < iip.length());
+ if (iip.getINode(pos) == null) {
+ Preconditions.checkState(iip.getINode(pos - 1) != null);
+ }
+ INode[] inodes = new INode[iip.inodes.length];
+ System.arraycopy(iip.inodes, 0, inodes, 0, inodes.length);
+ inodes[pos] = inode;
+ return new INodesInPath(inodes, iip.path, iip.isSnapshot, iip.snapshotId);
+ }
+
+ private final byte[][] path;
/**
- * The path for a snapshot file/dir contains the .snapshot thus makes the
- * length of the path components larger the number of inodes. We use
- * the capacity to control this special case.
+ * Array with the specified number of INodes resolved for a given path.
*/
- private int capacity;
+ private final INode[] inodes;
/**
* true if this path corresponds to a snapshot
*/
- private boolean isSnapshot;
- /**
- * index of the {@link Snapshot.Root} node in the inodes array,
- * -1 for non-snapshot paths.
- */
- private int snapshotRootIndex;
+ private final boolean isSnapshot;
/**
* For snapshot paths, it is the id of the snapshot; or
* {@link Snapshot#CURRENT_STATE_ID} if the snapshot does not exist. For
* non-snapshot paths, it is the id of the latest snapshot found in the path;
* or {@link Snapshot#CURRENT_STATE_ID} if no snapshot is found.
*/
- private int snapshotId = Snapshot.CURRENT_STATE_ID;
+ private final int snapshotId;
- private INodesInPath(byte[][] path, int number) {
+ private INodesInPath(INode[] inodes, byte[][] path, boolean isSnapshot,
+ int snapshotId) {
+ Preconditions.checkArgument(inodes != null && path != null);
+ this.inodes = inodes;
this.path = path;
- assert (number >= 0);
- inodes = new INode[number];
- capacity = number;
- numNonNull = 0;
- isSnapshot = false;
- snapshotRootIndex = -1;
+ this.isSnapshot = isSnapshot;
+ this.snapshotId = snapshotId;
+ }
+
+ private INodesInPath(INode[] inodes, byte[][] path) {
+ this(inodes, path, false, CURRENT_STATE_ID);
}
/**
@@ -296,49 +320,28 @@ public int getLatestSnapshotId() {
* For non-snapshot paths, return {@link Snapshot#CURRENT_STATE_ID}.
*/
public int getPathSnapshotId() {
- return isSnapshot ? snapshotId : Snapshot.CURRENT_STATE_ID;
- }
-
- private void setSnapshotId(int sid) {
- snapshotId = sid;
+ return isSnapshot ? snapshotId : CURRENT_STATE_ID;
}
-
- private void updateLatestSnapshotId(int sid) {
- if (snapshotId == Snapshot.CURRENT_STATE_ID
- || (sid != Snapshot.CURRENT_STATE_ID && Snapshot.ID_INTEGER_COMPARATOR
- .compare(snapshotId, sid) < 0)) {
- snapshotId = sid;
- }
- }
-
- /**
- * @return a new array of inodes excluding the null elements introduced by
- * snapshot path elements. E.g., after resolving path "/dir/.snapshot",
- * {@link #inodes} is {/, dir, null}, while the returned array only contains
- * inodes of "/" and "dir". Note the length of the returned array is always
- * equal to {@link #capacity}.
- */
- INode[] getINodes() {
- if (capacity == inodes.length) {
- return inodes;
- }
- INode[] newNodes = new INode[capacity];
- System.arraycopy(inodes, 0, newNodes, 0, capacity);
- return newNodes;
- }
-
/**
* @return the i-th inode if i >= 0;
* otherwise, i < 0, return the (length + i)-th inode.
*/
public INode getINode(int i) {
- return inodes[i >= 0? i: inodes.length + i];
+ if (inodes == null || inodes.length == 0) {
+ throw new NoSuchElementException("inodes is null or empty");
+ }
+ int index = i >= 0 ? i : inodes.length + i;
+ if (index < inodes.length && index >= 0) {
+ return inodes[index];
+ } else {
+ throw new NoSuchElementException("inodes.length == " + inodes.length);
+ }
}
/** @return the last inode. */
public INode getLastINode() {
- return inodes[inodes.length - 1];
+ return getINode(-1);
}
byte[] getLastLocalName() {
@@ -350,48 +353,29 @@ public String getPath() {
return DFSUtil.byteArray2PathString(path);
}
- /**
- * @return index of the {@link Snapshot.Root} node in the inodes array,
- * -1 for non-snapshot paths.
- */
- int getSnapshotRootIndex() {
- return this.snapshotRootIndex;
- }
-
- /**
- * @return isSnapshot true for a snapshot path
- */
- boolean isSnapshot() {
- return this.isSnapshot;
- }
-
- /**
- * Add an INode at the end of the array
- */
- private void addNode(INode node) {
- inodes[numNonNull++] = node;
+ public String getParentPath() {
+ return getPath(path.length - 1);
}
- private void setINodes(INode inodes[]) {
- this.inodes = inodes;
- this.numNonNull = this.inodes.length;
+ public String getPath(int pos) {
+ return DFSUtil.byteArray2PathString(path, 0, pos);
}
-
- void setINode(int i, INode inode) {
- inodes[i >= 0? i: inodes.length + i] = inode;
+
+ public int length() {
+ return inodes.length;
}
-
- void setLastINode(INode last) {
- inodes[inodes.length - 1] = last;
+
+ public List getReadOnlyINodes() {
+ return Collections.unmodifiableList(Arrays.asList(inodes));
}
-
+
/**
- * @return The number of non-null elements
+ * @return isSnapshot true for a snapshot path
*/
- int getNumNonNull() {
- return numNonNull;
+ boolean isSnapshot() {
+ return this.isSnapshot;
}
-
+
private static String toString(INode inode) {
return inode == null? null: inode.getLocalName();
}
@@ -420,20 +404,16 @@ private String toString(boolean vaildateObject) {
}
b.append("], length=").append(inodes.length);
}
- b.append("\n numNonNull = ").append(numNonNull)
- .append("\n capacity = ").append(capacity)
- .append("\n isSnapshot = ").append(isSnapshot)
- .append("\n snapshotRootIndex = ").append(snapshotRootIndex)
+ b.append("\n isSnapshot = ").append(isSnapshot)
.append("\n snapshotId = ").append(snapshotId);
return b.toString();
}
void validate() {
- // check parent up to snapshotRootIndex or numNonNull
- final int n = snapshotRootIndex >= 0? snapshotRootIndex + 1: numNonNull;
+ // check parent up to snapshotRootIndex if this is a snapshot path
int i = 0;
if (inodes[i] != null) {
- for(i++; i < n && inodes[i] != null; i++) {
+ for(i++; i < inodes.length && inodes[i] != null; i++) {
final INodeDirectory parent_i = inodes[i].getParent();
final INodeDirectory parent_i_1 = inodes[i-1].getParent();
if (parent_i != inodes[i-1] &&
@@ -447,8 +427,8 @@ void validate() {
}
}
}
- if (i != n) {
- throw new AssertionError("i = " + i + " != " + n
+ if (i != inodes.length) {
+ throw new AssertionError("i = " + i + " != " + inodes.length
+ ", this=" + toString(false));
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
index d1a23779a7d68..354bff1ab855c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
@@ -23,6 +23,7 @@
import static org.junit.Assert.assertTrue;
import java.io.FileNotFoundException;
+import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
@@ -104,19 +105,18 @@ public void testAllowSnapshot() throws Exception {
}
}
- static Snapshot getSnapshot(INodesInPath inodesInPath, String name) {
+ static Snapshot getSnapshot(INodesInPath inodesInPath, String name,
+ int index) {
if (name == null) {
return null;
}
- final int i = inodesInPath.getSnapshotRootIndex() - 1;
- final INode inode = inodesInPath.getINodes()[i];
+ final INode inode = inodesInPath.getINode(index - 1);
return inode.asDirectory().getSnapshot(DFSUtil.string2Bytes(name));
}
static void assertSnapshot(INodesInPath inodesInPath, boolean isSnapshot,
final Snapshot snapshot, int index) {
assertEquals(isSnapshot, inodesInPath.isSnapshot());
- assertEquals(index, inodesInPath.getSnapshotRootIndex());
assertEquals(Snapshot.getSnapshotId(isSnapshot ? snapshot : null),
inodesInPath.getPathSnapshotId());
if (!isSnapshot) {
@@ -124,7 +124,7 @@ static void assertSnapshot(INodesInPath inodesInPath, boolean isSnapshot,
inodesInPath.getLatestSnapshotId());
}
if (isSnapshot && index >= 0) {
- assertEquals(Snapshot.Root.class, inodesInPath.getINodes()[index].getClass());
+ assertEquals(Snapshot.Root.class, inodesInPath.getINode(index).getClass());
}
}
@@ -142,38 +142,35 @@ public void testNonSnapshotPathINodes() throws Exception {
String[] names = INode.getPathNames(file1.toString());
byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
- INode[] inodes = nodesInPath.getINodes();
// The number of inodes should be equal to components.length
- assertEquals(inodes.length, components.length);
+ assertEquals(nodesInPath.length(), components.length);
// The returned nodesInPath should be non-snapshot
assertSnapshot(nodesInPath, false, null, -1);
// The last INode should be associated with file1
assertTrue("file1=" + file1 + ", nodesInPath=" + nodesInPath,
- inodes[components.length - 1] != null);
- assertEquals(inodes[components.length - 1].getFullPathName(),
+ nodesInPath.getINode(components.length - 1) != null);
+ assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(),
file1.toString());
- assertEquals(inodes[components.length - 2].getFullPathName(),
+ assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(),
sub1.toString());
- assertEquals(inodes[components.length - 3].getFullPathName(),
+ assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(),
dir.toString());
// Call getExistingPathINodes and request only one INode. This is used
// when identifying the INode for a given path.
nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 1, false);
- inodes = nodesInPath.getINodes();
- assertEquals(inodes.length, 1);
+ assertEquals(nodesInPath.length(), 1);
assertSnapshot(nodesInPath, false, null, -1);
- assertEquals(inodes[0].getFullPathName(), file1.toString());
+ assertEquals(nodesInPath.getINode(0).getFullPathName(), file1.toString());
// Call getExistingPathINodes and request 2 INodes. This is usually used
// when identifying the parent INode of a given path.
nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 2, false);
- inodes = nodesInPath.getINodes();
- assertEquals(inodes.length, 2);
+ assertEquals(nodesInPath.length(), 2);
assertSnapshot(nodesInPath, false, null, -1);
- assertEquals(inodes[1].getFullPathName(), file1.toString());
- assertEquals(inodes[0].getFullPathName(), sub1.toString());
+ assertEquals(nodesInPath.getINode(1).getFullPathName(), file1.toString());
+ assertEquals(nodesInPath.getINode(0).getFullPathName(), sub1.toString());
}
/**
@@ -191,53 +188,49 @@ public void testSnapshotPathINodes() throws Exception {
String[] names = INode.getPathNames(snapshotPath);
byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
- INode[] inodes = nodesInPath.getINodes();
// Length of inodes should be (components.length - 1), since we will ignore
// ".snapshot"
- assertEquals(inodes.length, components.length - 1);
+ assertEquals(nodesInPath.length(), components.length - 1);
// SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s1, file1}
- final Snapshot snapshot = getSnapshot(nodesInPath, "s1");
+ final Snapshot snapshot = getSnapshot(nodesInPath, "s1", 3);
assertSnapshot(nodesInPath, true, snapshot, 3);
// Check the INode for file1 (snapshot file)
- INode snapshotFileNode = inodes[inodes.length - 1];
+ INode snapshotFileNode = nodesInPath.getLastINode();
assertINodeFile(snapshotFileNode, file1);
assertTrue(snapshotFileNode.getParent().isWithSnapshot());
// Call getExistingPathINodes and request only one INode.
nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 1, false);
- inodes = nodesInPath.getINodes();
- assertEquals(inodes.length, 1);
+ assertEquals(nodesInPath.length(), 1);
// The snapshotroot (s1) is not included in inodes. Thus the
// snapshotRootIndex should be -1.
assertSnapshot(nodesInPath, true, snapshot, -1);
// Check the INode for file1 (snapshot file)
- assertINodeFile(inodes[inodes.length - 1], file1);
+ assertINodeFile(nodesInPath.getLastINode(), file1);
// Call getExistingPathINodes and request 2 INodes.
nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 2, false);
- inodes = nodesInPath.getINodes();
- assertEquals(inodes.length, 2);
+ assertEquals(nodesInPath.length(), 2);
// There should be two INodes in inodes: s1 and snapshot of file1. Thus the
// SnapshotRootIndex should be 0.
assertSnapshot(nodesInPath, true, snapshot, 0);
- assertINodeFile(inodes[inodes.length - 1], file1);
+ assertINodeFile(nodesInPath.getLastINode(), file1);
// Resolve the path "/TestSnapshot/sub1/.snapshot"
String dotSnapshotPath = sub1.toString() + "/.snapshot";
names = INode.getPathNames(dotSnapshotPath);
components = INode.getPathComponents(names);
nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
- inodes = nodesInPath.getINodes();
- // The number of INodes returned should be components.length - 1 since we
- // will ignore ".snapshot"
- assertEquals(inodes.length, components.length - 1);
+ // The number of INodes returned should still be components.length
+ // since we put a null in the inode array for ".snapshot"
+ assertEquals(nodesInPath.length(), components.length);
// No SnapshotRoot dir is included in the resolved inodes
assertSnapshot(nodesInPath, true, snapshot, -1);
- // The last INode should be the INode for sub1
- final INode last = inodes[inodes.length - 1];
- assertEquals(last.getFullPathName(), sub1.toString());
- assertFalse(last instanceof INodeFile);
+ // The last INode should be null, the last but 1 should be sub1
+ assertNull(nodesInPath.getLastINode());
+ assertEquals(nodesInPath.getINode(-2).getFullPathName(), sub1.toString());
+ assertTrue(nodesInPath.getINode(-2).isDirectory());
String[] invalidPathComponent = {"invalidDir", "foo", ".snapshot", "bar"};
Path invalidPath = new Path(invalidPathComponent[0]);
@@ -275,16 +268,15 @@ public void testSnapshotPathINodesAfterDeletion() throws Exception {
String[] names = INode.getPathNames(snapshotPath);
byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
- INode[] inodes = nodesInPath.getINodes();
// Length of inodes should be (components.length - 1), since we will ignore
// ".snapshot"
- assertEquals(inodes.length, components.length - 1);
+ assertEquals(nodesInPath.length(), components.length - 1);
// SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s2, file1}
- snapshot = getSnapshot(nodesInPath, "s2");
+ snapshot = getSnapshot(nodesInPath, "s2", 3);
assertSnapshot(nodesInPath, true, snapshot, 3);
// Check the INode for file1 (snapshot file)
- final INode inode = inodes[inodes.length - 1];
+ final INode inode = nodesInPath.getLastINode();
assertEquals(file1.getName(), inode.getLocalName());
assertTrue(inode.asFile().isWithSnapshot());
}
@@ -293,25 +285,34 @@ public void testSnapshotPathINodesAfterDeletion() throws Exception {
String[] names = INode.getPathNames(file1.toString());
byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
- INode[] inodes = nodesInPath.getINodes();
// The length of inodes should be equal to components.length
- assertEquals(inodes.length, components.length);
+ assertEquals(nodesInPath.length(), components.length);
// The number of non-null elements should be components.length - 1 since
// file1 has been deleted
- assertEquals(nodesInPath.getNumNonNull(), components.length - 1);
+ assertEquals(getNumNonNull(nodesInPath), components.length - 1);
// The returned nodesInPath should be non-snapshot
assertSnapshot(nodesInPath, false, snapshot, -1);
// The last INode should be null, and the one before should be associated
// with sub1
- assertNull(inodes[components.length - 1]);
- assertEquals(inodes[components.length - 2].getFullPathName(),
+ assertNull(nodesInPath.getINode(components.length - 1));
+ assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(),
sub1.toString());
- assertEquals(inodes[components.length - 3].getFullPathName(),
+ assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(),
dir.toString());
hdfs.deleteSnapshot(sub1, "s2");
hdfs.disallowSnapshot(sub1);
}
+ private int getNumNonNull(INodesInPath iip) {
+ List inodes = iip.getReadOnlyINodes();
+ for (int i = inodes.size() - 1; i >= 0; i--) {
+ if (inodes.get(i) != null) {
+ return i+1;
+ }
+ }
+ return 0;
+ }
+
/**
* for snapshot file while adding a new file after snapshot.
*/
@@ -333,39 +334,37 @@ public void testSnapshotPathINodesWithAddedFile() throws Exception {
String[] names = INode.getPathNames(snapshotPath);
byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
- INode[] inodes = nodesInPath.getINodes();
// Length of inodes should be (components.length - 1), since we will ignore
// ".snapshot"
- assertEquals(inodes.length, components.length - 1);
+ assertEquals(nodesInPath.length(), components.length - 1);
// The number of non-null inodes should be components.length - 2, since
// snapshot of file3 does not exist
- assertEquals(nodesInPath.getNumNonNull(), components.length - 2);
- s4 = getSnapshot(nodesInPath, "s4");
+ assertEquals(getNumNonNull(nodesInPath), components.length - 2);
+ s4 = getSnapshot(nodesInPath, "s4", 3);
// SnapshotRootIndex should still be 3: {root, Testsnapshot, sub1, s4, null}
assertSnapshot(nodesInPath, true, s4, 3);
// Check the last INode in inodes, which should be null
- assertNull(inodes[inodes.length - 1]);
+ assertNull(nodesInPath.getINode(nodesInPath.length() - 1));
}
// Check the inodes for /TestSnapshot/sub1/file3
String[] names = INode.getPathNames(file3.toString());
byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
- INode[] inodes = nodesInPath.getINodes();
// The number of inodes should be equal to components.length
- assertEquals(inodes.length, components.length);
+ assertEquals(nodesInPath.length(), components.length);
// The returned nodesInPath should be non-snapshot
assertSnapshot(nodesInPath, false, s4, -1);
// The last INode should be associated with file3
- assertEquals(inodes[components.length - 1].getFullPathName(),
+ assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(),
file3.toString());
- assertEquals(inodes[components.length - 2].getFullPathName(),
+ assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(),
sub1.toString());
- assertEquals(inodes[components.length - 3].getFullPathName(),
+ assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(),
dir.toString());
hdfs.deleteSnapshot(sub1, "s4");
hdfs.disallowSnapshot(sub1);
@@ -380,15 +379,15 @@ public void testSnapshotPathINodesAfterModification() throws Exception {
String[] names = INode.getPathNames(file1.toString());
byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
- INode[] inodes = nodesInPath.getINodes();
// The number of inodes should be equal to components.length
- assertEquals(inodes.length, components.length);
+ assertEquals(nodesInPath.length(), components.length);
// The last INode should be associated with file1
- assertEquals(inodes[components.length - 1].getFullPathName(),
+ assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(),
file1.toString());
// record the modification time of the inode
- final long modTime = inodes[inodes.length - 1].getModificationTime();
+ final long modTime = nodesInPath.getINode(nodesInPath.length() - 1)
+ .getModificationTime();
// Create a snapshot for the dir, and check the inodes for the path
// pointing to a snapshot file
@@ -403,14 +402,13 @@ public void testSnapshotPathINodesAfterModification() throws Exception {
names = INode.getPathNames(snapshotPath);
components = INode.getPathComponents(names);
INodesInPath ssNodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
- INode[] ssInodes = ssNodesInPath.getINodes();
// Length of ssInodes should be (components.length - 1), since we will
// ignore ".snapshot"
- assertEquals(ssInodes.length, components.length - 1);
- final Snapshot s3 = getSnapshot(ssNodesInPath, "s3");
+ assertEquals(ssNodesInPath.length(), components.length - 1);
+ final Snapshot s3 = getSnapshot(ssNodesInPath, "s3", 3);
assertSnapshot(ssNodesInPath, true, s3, 3);
// Check the INode for snapshot of file1
- INode snapshotFileNode = ssInodes[ssInodes.length - 1];
+ INode snapshotFileNode = ssNodesInPath.getLastINode();
assertEquals(snapshotFileNode.getLocalName(), file1.getName());
assertTrue(snapshotFileNode.asFile().isWithSnapshot());
// The modification time of the snapshot INode should be the same with the
@@ -423,14 +421,14 @@ public void testSnapshotPathINodesAfterModification() throws Exception {
components = INode.getPathComponents(names);
INodesInPath newNodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
assertSnapshot(newNodesInPath, false, s3, -1);
- INode[] newInodes = newNodesInPath.getINodes();
// The number of inodes should be equal to components.length
- assertEquals(newInodes.length, components.length);
+ assertEquals(newNodesInPath.length(), components.length);
// The last INode should be associated with file1
final int last = components.length - 1;
- assertEquals(newInodes[last].getFullPathName(), file1.toString());
+ assertEquals(newNodesInPath.getINode(last).getFullPathName(),
+ file1.toString());
// The modification time of the INode for file3 should have been changed
- Assert.assertFalse(modTime == newInodes[last].getModificationTime());
+ Assert.assertFalse(modTime == newNodesInPath.getINode(last).getModificationTime());
hdfs.deleteSnapshot(sub1, "s3");
hdfs.disallowSnapshot(sub1);
}
From 75f9aa2b95bbc4071ddd3cd76dd806bb58512ecb Mon Sep 17 00:00:00 2001
From: Haohui Mai
Date: Tue, 9 Dec 2014 13:08:51 -0800
Subject: [PATCH 022/432] HADOOP-11379. Fix new findbugs warnings in
hadoop-auth*. Contributed by Li Lu.
---
.../hadoop/security/authentication/examples/WhoClient.java | 5 ++++-
.../authentication/util/RandomSignerSecretProvider.java | 4 +++-
.../apache/hadoop/security/authentication/util/Signer.java | 3 ++-
.../authentication/util/StringSignerSecretProvider.java | 3 ++-
.../security/authentication/util/ZKSignerSecretProvider.java | 3 ++-
hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
6 files changed, 15 insertions(+), 5 deletions(-)
diff --git a/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java b/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java
index 2299ae1fd8089..f5cff2b529a5f 100644
--- a/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java
+++ b/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java
@@ -19,6 +19,7 @@
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
+import java.nio.charset.Charset;
/**
* Example that uses AuthenticatedURL.
@@ -39,7 +40,9 @@ public static void main(String[] args) {
System.out.println("Status code: " + conn.getResponseCode() + " " + conn.getResponseMessage());
System.out.println();
if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
- BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
+ BufferedReader reader = new BufferedReader(
+ new InputStreamReader(
+ conn.getInputStream(), Charset.forName("UTF-8")));
String line = reader.readLine();
while (line != null) {
System.out.println(line);
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
index 29e5661cb0bd5..41059a7e00900 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
@@ -14,6 +14,8 @@
package org.apache.hadoop.security.authentication.util;
import com.google.common.annotations.VisibleForTesting;
+
+import java.nio.charset.Charset;
import java.util.Random;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -46,6 +48,6 @@ public RandomSignerSecretProvider(long seed) {
@Override
protected byte[] generateNewSecret() {
- return Long.toString(rand.nextLong()).getBytes();
+ return Long.toString(rand.nextLong()).getBytes(Charset.forName("UTF-8"));
}
}
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
index f639503bd6f10..aa63e403c6364 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
@@ -15,6 +15,7 @@
import org.apache.commons.codec.binary.Base64;
+import java.nio.charset.Charset;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
@@ -86,7 +87,7 @@ public String verifyAndExtract(String signedStr) throws SignerException {
protected String computeSignature(byte[] secret, String str) {
try {
MessageDigest md = MessageDigest.getInstance("SHA");
- md.update(str.getBytes());
+ md.update(str.getBytes(Charset.forName("UTF-8")));
md.update(secret);
byte[] digest = md.digest();
return new Base64(0).encodeToString(digest);
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
index 7aaccd2914c22..57ddd372fe4b1 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
@@ -13,6 +13,7 @@
*/
package org.apache.hadoop.security.authentication.util;
+import java.nio.charset.Charset;
import java.util.Properties;
import javax.servlet.ServletContext;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -36,7 +37,7 @@ public void init(Properties config, ServletContext servletContext,
long tokenValidity) throws Exception {
String signatureSecret = config.getProperty(
AuthenticationFilter.SIGNATURE_SECRET, null);
- secret = signatureSecret.getBytes();
+ secret = signatureSecret.getBytes(Charset.forName("UTF-8"));
secrets = new byte[][]{secret};
}
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
index f8db2ee743ce4..11bfccd05c6e9 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
@@ -15,6 +15,7 @@
import com.google.common.annotations.VisibleForTesting;
import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
@@ -369,7 +370,7 @@ private synchronized void pullFromZK(boolean isInit) {
}
private byte[] generateRandomSecret() {
- return Long.toString(rand.nextLong()).getBytes();
+ return Long.toString(rand.nextLong()).getBytes(Charset.forName("UTF-8"));
}
/**
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index b030bf7d5fe55..e6b44e9f2773a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -548,6 +548,8 @@ Release 2.7.0 - UNRELEASED
HADOOP-11273. TestMiniKdc failure: login options not compatible with IBM
JDK. (Gao Zhong Liang via wheat9)
+ HADOOP-11379. Fix new findbugs warnings in hadoop-auth*. (Li Lu via wheat9)
+
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES
From de68820d831acb21ff899abe4db7639e971b0b19 Mon Sep 17 00:00:00 2001
From: Haohui Mai
Date: Tue, 9 Dec 2014 13:10:03 -0800
Subject: [PATCH 023/432] HADOOP-11378. Fix new findbugs warnings in
hadoop-kms. Contributed by Li Lu.
---
hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
.../apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java | 4 +++-
2 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index e6b44e9f2773a..40aab856461fc 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -550,6 +550,8 @@ Release 2.7.0 - UNRELEASED
HADOOP-11379. Fix new findbugs warnings in hadoop-auth*. (Li Lu via wheat9)
+ HADOOP-11378. Fix new findbugs warnings in hadoop-kms. (Li Lu via wheat9)
+
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java
index 3674e7a87aaf3..31fac9f7b2061 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java
@@ -32,6 +32,7 @@
import java.io.Writer;
import java.lang.annotation.Annotation;
import java.lang.reflect.Type;
+import java.nio.charset.Charset;
import java.util.List;
import java.util.Map;
@@ -62,7 +63,8 @@ public void writeTo(Object obj, Class> aClass, Type type,
Annotation[] annotations, MediaType mediaType,
MultivaluedMap stringObjectMultivaluedMap,
OutputStream outputStream) throws IOException, WebApplicationException {
- Writer writer = new OutputStreamWriter(outputStream);
+ Writer writer = new OutputStreamWriter(outputStream, Charset
+ .forName("UTF-8"));
ObjectMapper jsonMapper = new ObjectMapper();
jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, obj);
}
From 5b75ea7b5f8225067453fc9deb6e4c0023a0f7a2 Mon Sep 17 00:00:00 2001
From: Karthik Kambatla
Date: Tue, 9 Dec 2014 14:00:31 -0800
Subject: [PATCH 024/432] YARN-2910. FSLeafQueue can throw
ConcurrentModificationException. (Wilfred Spiegelenburg via kasha)
---
hadoop-yarn-project/CHANGES.txt | 3 +
.../scheduler/fair/FSAppAttempt.java | 2 +-
.../scheduler/fair/FSLeafQueue.java | 151 ++++++++++++------
.../scheduler/fair/TestFSLeafQueue.java | 93 ++++++++++-
4 files changed, 199 insertions(+), 50 deletions(-)
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d06c8312c5ff0..d87322ffab76c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -203,6 +203,9 @@ Release 2.7.0 - UNRELEASED
YARN-2931. PublicLocalizer may fail until directory is initialized by
LocalizeRunner. (Anubhav Dhoot via kasha)
+ YARN-2910. FSLeafQueue can throw ConcurrentModificationException.
+ (Wilfred Spiegelenburg via kasha)
+
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index b9966e7f5511a..b23ec3ed30e21 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -172,7 +172,7 @@ private synchronized void unreserveInternal(
}
@Override
- public synchronized Resource getHeadroom() {
+ public Resource getHeadroom() {
final FSQueue queue = (FSQueue) this.queue;
SchedulingPolicy policy = queue.getPolicy();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 345ea8b7c365a..bbf1be7175583 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -23,6 +23,9 @@
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
@@ -50,6 +53,10 @@ public class FSLeafQueue extends FSQueue {
new ArrayList();
private final List nonRunnableApps =
new ArrayList();
+ // get a lock with fair distribution for app list updates
+ private final ReadWriteLock rwl = new ReentrantReadWriteLock(true);
+ private final Lock readLock = rwl.readLock();
+ private final Lock writeLock = rwl.writeLock();
private Resource demand = Resources.createResource(0);
@@ -72,16 +79,26 @@ public FSLeafQueue(String name, FairScheduler scheduler,
}
public void addApp(FSAppAttempt app, boolean runnable) {
- if (runnable) {
- runnableApps.add(app);
- } else {
- nonRunnableApps.add(app);
+ writeLock.lock();
+ try {
+ if (runnable) {
+ runnableApps.add(app);
+ } else {
+ nonRunnableApps.add(app);
+ }
+ } finally {
+ writeLock.unlock();
}
}
// for testing
void addAppSchedulable(FSAppAttempt appSched) {
- runnableApps.add(appSched);
+ writeLock.lock();
+ try {
+ runnableApps.add(appSched);
+ } finally {
+ writeLock.unlock();
+ }
}
/**
@@ -89,18 +106,25 @@ void addAppSchedulable(FSAppAttempt appSched) {
* @return whether or not the app was runnable
*/
public boolean removeApp(FSAppAttempt app) {
- if (runnableApps.remove(app)) {
- // Update AM resource usage
- if (app.isAmRunning() && app.getAMResource() != null) {
- Resources.subtractFrom(amResourceUsage, app.getAMResource());
+ boolean runnable = false;
+ writeLock.lock();
+ try {
+ if (runnableApps.remove(app)) {
+ runnable = true;
+ } else if (nonRunnableApps.remove(app)) {
+ runnable = false; //nop, runnable is initialised to false already
+ } else {
+ throw new IllegalStateException("Given app to remove " + app +
+ " does not exist in queue " + this);
}
- return true;
- } else if (nonRunnableApps.remove(app)) {
- return false;
- } else {
- throw new IllegalStateException("Given app to remove " + app +
- " does not exist in queue " + this);
+ } finally {
+ writeLock.unlock();
+ }
+ // Update AM resource usage if needed
+ if (runnable && app.isAmRunning() && app.getAMResource() != null) {
+ Resources.subtractFrom(amResourceUsage, app.getAMResource());
}
+ return runnable;
}
public Collection getRunnableAppSchedulables() {
@@ -114,11 +138,16 @@ public List getNonRunnableAppSchedulables() {
@Override
public void collectSchedulerApplications(
Collection apps) {
- for (FSAppAttempt appSched : runnableApps) {
- apps.add(appSched.getApplicationAttemptId());
- }
- for (FSAppAttempt appSched : nonRunnableApps) {
- apps.add(appSched.getApplicationAttemptId());
+ readLock.lock();
+ try {
+ for (FSAppAttempt appSched : runnableApps) {
+ apps.add(appSched.getApplicationAttemptId());
+ }
+ for (FSAppAttempt appSched : nonRunnableApps) {
+ apps.add(appSched.getApplicationAttemptId());
+ }
+ } finally {
+ readLock.unlock();
}
}
@@ -144,11 +173,16 @@ public Resource getDemand() {
@Override
public Resource getResourceUsage() {
Resource usage = Resources.createResource(0);
- for (FSAppAttempt app : runnableApps) {
- Resources.addTo(usage, app.getResourceUsage());
- }
- for (FSAppAttempt app : nonRunnableApps) {
- Resources.addTo(usage, app.getResourceUsage());
+ readLock.lock();
+ try {
+ for (FSAppAttempt app : runnableApps) {
+ Resources.addTo(usage, app.getResourceUsage());
+ }
+ for (FSAppAttempt app : nonRunnableApps) {
+ Resources.addTo(usage, app.getResourceUsage());
+ }
+ } finally {
+ readLock.unlock();
}
return usage;
}
@@ -164,17 +198,22 @@ public void updateDemand() {
Resource maxRes = scheduler.getAllocationConfiguration()
.getMaxResources(getName());
demand = Resources.createResource(0);
- for (FSAppAttempt sched : runnableApps) {
- if (Resources.equals(demand, maxRes)) {
- break;
+ readLock.lock();
+ try {
+ for (FSAppAttempt sched : runnableApps) {
+ if (Resources.equals(demand, maxRes)) {
+ break;
+ }
+ updateDemandForApp(sched, maxRes);
}
- updateDemandForApp(sched, maxRes);
- }
- for (FSAppAttempt sched : nonRunnableApps) {
- if (Resources.equals(demand, maxRes)) {
- break;
+ for (FSAppAttempt sched : nonRunnableApps) {
+ if (Resources.equals(demand, maxRes)) {
+ break;
+ }
+ updateDemandForApp(sched, maxRes);
}
- updateDemandForApp(sched, maxRes);
+ } finally {
+ readLock.unlock();
}
if (LOG.isDebugEnabled()) {
LOG.debug("The updated demand for " + getName() + " is " + demand
@@ -198,7 +237,8 @@ private void updateDemandForApp(FSAppAttempt sched, Resource maxRes) {
public Resource assignContainer(FSSchedulerNode node) {
Resource assigned = Resources.none();
if (LOG.isDebugEnabled()) {
- LOG.debug("Node " + node.getNodeName() + " offered to queue: " + getName());
+ LOG.debug("Node " + node.getNodeName() + " offered to queue: " +
+ getName());
}
if (!assignContainerPreCheck(node)) {
@@ -206,16 +246,26 @@ public Resource assignContainer(FSSchedulerNode node) {
}
Comparator comparator = policy.getComparator();
- Collections.sort(runnableApps, comparator);
- for (FSAppAttempt sched : runnableApps) {
- if (SchedulerAppUtils.isBlacklisted(sched, node, LOG)) {
- continue;
- }
-
- assigned = sched.assignContainer(node);
- if (!assigned.equals(Resources.none())) {
- break;
+ writeLock.lock();
+ try {
+ Collections.sort(runnableApps, comparator);
+ } finally {
+ writeLock.unlock();
+ }
+ readLock.lock();
+ try {
+ for (FSAppAttempt sched : runnableApps) {
+ if (SchedulerAppUtils.isBlacklisted(sched, node, LOG)) {
+ continue;
+ }
+
+ assigned = sched.assignContainer(node);
+ if (!assigned.equals(Resources.none())) {
+ break;
+ }
}
+ } finally {
+ readLock.unlock();
}
return assigned;
}
@@ -237,11 +287,16 @@ public RMContainer preemptContainer() {
// Choose the app that is most over fair share
Comparator comparator = policy.getComparator();
FSAppAttempt candidateSched = null;
- for (FSAppAttempt sched : runnableApps) {
- if (candidateSched == null ||
- comparator.compare(sched, candidateSched) > 0) {
- candidateSched = sched;
+ readLock.lock();
+ try {
+ for (FSAppAttempt sched : runnableApps) {
+ if (candidateSched == null ||
+ comparator.compare(sched, candidateSched) > 0) {
+ candidateSched = sched;
+ }
}
+ } finally {
+ readLock.unlock();
}
// Preempt from the selected app
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
index 97736bedd0427..385ea0be76b15 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
@@ -28,12 +28,22 @@
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
+import java.util.ArrayList;
import java.util.Collection;
-
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
@@ -222,4 +232,85 @@ public void testIsStarvedForFairShare() throws Exception {
assertFalse(queueB1.isStarvedForFairShare());
assertFalse(queueB2.isStarvedForFairShare());
}
+
+ @Test
+ public void testConcurrentAccess() {
+ conf.set(FairSchedulerConfiguration.ASSIGN_MULTIPLE, "false");
+ resourceManager = new MockRM(conf);
+ resourceManager.start();
+ scheduler = (FairScheduler) resourceManager.getResourceScheduler();
+
+ String queueName = "root.queue1";
+ final FSLeafQueue schedulable = scheduler.getQueueManager().
+ getLeafQueue(queueName, true);
+ ApplicationAttemptId applicationAttemptId = createAppAttemptId(1, 1);
+ RMContext rmContext = resourceManager.getRMContext();
+ final FSAppAttempt app =
+ new FSAppAttempt(scheduler, applicationAttemptId, "user1",
+ schedulable, null, rmContext);
+
+ // this needs to be in sync with the number of runnables declared below
+ int testThreads = 2;
+ List runnables = new ArrayList();
+
+ // add applications to modify the list
+ runnables.add(new Runnable() {
+ @Override
+ public void run() {
+ for (int i=0; i < 500; i++) {
+ schedulable.addAppSchedulable(app);
+ }
+ }
+ });
+
+ // iterate over the list a couple of times in a different thread
+ runnables.add(new Runnable() {
+ @Override
+ public void run() {
+ for (int i=0; i < 500; i++) {
+ schedulable.getResourceUsage();
+ }
+ }
+ });
+
+ final List exceptions = Collections.synchronizedList(
+ new ArrayList());
+ final ExecutorService threadPool = Executors.newFixedThreadPool(
+ testThreads);
+
+ try {
+ final CountDownLatch allExecutorThreadsReady =
+ new CountDownLatch(testThreads);
+ final CountDownLatch startBlocker = new CountDownLatch(1);
+ final CountDownLatch allDone = new CountDownLatch(testThreads);
+ for (final Runnable submittedTestRunnable : runnables) {
+ threadPool.submit(new Runnable() {
+ public void run() {
+ allExecutorThreadsReady.countDown();
+ try {
+ startBlocker.await();
+ submittedTestRunnable.run();
+ } catch (final Throwable e) {
+ exceptions.add(e);
+ } finally {
+ allDone.countDown();
+ }
+ }
+ });
+ }
+ // wait until all threads are ready
+ allExecutorThreadsReady.await();
+ // start all test runners
+ startBlocker.countDown();
+ int testTimeout = 2;
+ assertTrue("Timeout waiting for more than " + testTimeout + " seconds",
+ allDone.await(testTimeout, TimeUnit.SECONDS));
+ } catch (InterruptedException ie) {
+ exceptions.add(ie);
+ } finally {
+ threadPool.shutdownNow();
+ }
+ assertTrue("Test failed with exception(s)" + exceptions,
+ exceptions.isEmpty());
+ }
}
From f7670e92bec7b5407fa32d881c79d384451aeab2 Mon Sep 17 00:00:00 2001
From: Colin Patrick Mccabe
Date: Tue, 9 Dec 2014 14:31:44 -0800
Subject: [PATCH 025/432] HADOOP-11349. RawLocalFileSystem leaks file
descriptor while creating a file if creat succeeds but chmod fails. (Varun
Saxena via Colin P. McCabe)
---
.../hadoop-common/CHANGES.txt | 4 +++
.../apache/hadoop/fs/RawLocalFileSystem.java | 25 ++++++++++++++++---
2 files changed, 25 insertions(+), 4 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 40aab856461fc..0019b3a629dd3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -552,6 +552,10 @@ Release 2.7.0 - UNRELEASED
HADOOP-11378. Fix new findbugs warnings in hadoop-kms. (Li Lu via wheat9)
+ HADOOP-11349. RawLocalFileSystem leaks file descriptor while creating a
+ file if creat succeeds but chmod fails. (Varun Saxena via Colin P. McCabe)
+
+
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index b6b6f5905491f..858789e5f3cc2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -41,6 +41,7 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.Shell;
@@ -295,8 +296,16 @@ public FSDataOutputStream create(Path f, FsPermission permission,
FSDataOutputStream out = create(f,
overwrite, bufferSize, replication, blockSize, progress);
- setPermission(f, permission);
- return out;
+ boolean success = false;
+ try {
+ setPermission(f, permission);
+ success = true;
+ return out;
+ } finally {
+ if (!success) {
+ IOUtils.cleanup(LOG, out);
+ }
+ }
}
@Override
@@ -306,8 +315,16 @@ public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
Progressable progress) throws IOException {
FSDataOutputStream out = create(f,
overwrite, false, bufferSize, replication, blockSize, progress);
- setPermission(f, permission);
- return out;
+ boolean success = false;
+ try {
+ setPermission(f, permission);
+ success = true;
+ return out;
+ } finally {
+ if (!success) {
+ IOUtils.cleanup(LOG, out);
+ }
+ }
}
@Override
From 8d2fa14f32cc644e60a9c804abefce3bec8c7c78 Mon Sep 17 00:00:00 2001
From: Jian He
Date: Tue, 9 Dec 2014 16:47:24 -0800
Subject: [PATCH 026/432] YARN-2930. Fixed
TestRMRestart#testRMRestartRecoveringNodeLabelManager intermittent failure.
Contributed by Wangda Tan
---
hadoop-yarn-project/CHANGES.txt | 3 +++
.../yarn/server/resourcemanager/TestRMRestart.java | 14 ++++++++++++++
2 files changed, 17 insertions(+)
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d87322ffab76c..0173782750c90 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -206,6 +206,9 @@ Release 2.7.0 - UNRELEASED
YARN-2910. FSLeafQueue can throw ConcurrentModificationException.
(Wilfred Spiegelenburg via kasha)
+ YARN-2930. Fixed TestRMRestart#testRMRestartRecoveringNodeLabelManager
+ intermittent failure. (Wangda Tan via jianhe)
+
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
index 29f0208f28e79..fcb2be78737c8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
@@ -39,6 +39,7 @@
import java.util.Map;
import java.util.Set;
+import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.DataOutputBuffer;
@@ -2048,6 +2049,19 @@ protected void doSecureLogin() throws IOException {
// 4. Get cluster and node lobel, it should be present by recovering it
@Test(timeout = 20000)
public void testRMRestartRecoveringNodeLabelManager() throws Exception {
+ // Initial FS node label store root dir to a random tmp dir
+ File nodeLabelFsStoreDir =
+ new File("target", this.getClass().getSimpleName()
+ + "-testRMRestartRecoveringNodeLabelManager");
+ if (nodeLabelFsStoreDir.exists()) {
+ FileUtils.deleteDirectory(nodeLabelFsStoreDir);
+ }
+ nodeLabelFsStoreDir.deleteOnExit();
+
+ String nodeLabelFsStoreDirURI = nodeLabelFsStoreDir.toURI().toString();
+ conf.set(YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR,
+ nodeLabelFsStoreDirURI);
+
MemoryRMStateStore memStore = new MemoryRMStateStore();
memStore.init(conf);
MockRM rm1 = new MockRM(conf, memStore) {
From 0b25ed68b9ff68923a8b6710d61aad27e8032a72 Mon Sep 17 00:00:00 2001
From: Jian He
Date: Tue, 9 Dec 2014 17:56:04 -0800
Subject: [PATCH 027/432] YARN-2924. Fixed RMAdminCLI to not convert node
labels to lower case. Contributed by Wangda Tan
---
hadoop-yarn-project/CHANGES.txt | 3 +++
.../java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java | 2 +-
.../org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java | 4 ++--
3 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0173782750c90..81d5707e4f16e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -209,6 +209,9 @@ Release 2.7.0 - UNRELEASED
YARN-2930. Fixed TestRMRestart#testRMRestartRecoveringNodeLabelManager
intermittent failure. (Wangda Tan via jianhe)
+ YARN-2924. Fixed RMAdminCLI to not convert node labels to lower case.
+ (Wangda Tan via jianhe)
+
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index 89d87cfc4e2a0..c7cc4d2332104 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -399,7 +399,7 @@ private Map> buildNodeLabelsFromStr(String args)
for (int i = 1; i < splits.length; i++) {
if (!splits[i].trim().isEmpty()) {
- map.get(nodeId).add(splits[i].trim().toLowerCase());
+ map.get(nodeId).add(splits[i].trim());
}
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
index 6176a3e39c3cc..bee114be04818 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
@@ -468,9 +468,9 @@ public void testRemoveFromClusterNodeLabels() throws Exception {
@Test
public void testReplaceLabelsOnNode() throws Exception {
// Successfully replace labels
- dummyNodeLabelsManager.addToCluserNodeLabels(ImmutableSet.of("x", "y"));
+ dummyNodeLabelsManager.addToCluserNodeLabels(ImmutableSet.of("x", "Y"));
String[] args =
- { "-replaceLabelsOnNode", "node1,x,y node2,y",
+ { "-replaceLabelsOnNode", "node1,x,Y node2,Y",
"-directlyAccessNodeLabelStore" };
assertEquals(0, rmAdminCLI.run(args));
assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey(
From cd5dcfe0151a7a069c4ff21900e53675c7cf0b90 Mon Sep 17 00:00:00 2001
From: Vinayakumar B
Date: Wed, 10 Dec 2014 08:27:15 +0530
Subject: [PATCH 028/432] HDFS-7481. Add ACL indicator to the 'Permission
Denied' exception. (Contributed by Vinayakumar B )
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../hdfs/server/namenode/FSPermissionChecker.java | 11 ++++++++++-
2 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9398429fd808d..d1414399ac2ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -558,6 +558,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7473. Document setting dfs.namenode.fs-limits.max-directory-items to 0
is invalid. (Akira AJISAKA via cnauroth)
+ HDFS-7481. Add ACL indicator to the "Permission Denied" exception.
+ (vinayakumarb)
+
Release 2.6.1 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index 8de8c54bfc574..050848492d4d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -47,6 +47,12 @@ class FSPermissionChecker {
/** @return a string for throwing {@link AccessControlException} */
private String toAccessControlString(INode inode, int snapshotId,
FsAction access, FsPermission mode) {
+ return toAccessControlString(inode, snapshotId, access, mode, false);
+ }
+
+ /** @return a string for throwing {@link AccessControlException} */
+ private String toAccessControlString(INode inode, int snapshotId, FsAction access,
+ FsPermission mode, boolean deniedFromAcl) {
StringBuilder sb = new StringBuilder("Permission denied: ")
.append("user=").append(user).append(", ")
.append("access=").append(access).append(", ")
@@ -55,6 +61,9 @@ private String toAccessControlString(INode inode, int snapshotId,
.append(inode.getGroupName(snapshotId)).append(':')
.append(inode.isDirectory() ? 'd' : '-')
.append(mode);
+ if (deniedFromAcl) {
+ sb.append("+");
+ }
return sb.toString();
}
@@ -338,7 +347,7 @@ private void checkAccessAcl(INode inode, int snapshotId, FsAction access,
}
throw new AccessControlException(
- toAccessControlString(inode, snapshotId, access, mode));
+ toAccessControlString(inode, snapshotId, access, mode, true));
}
/** Guarded by {@link FSNamesystem#readLock()} */
From 15b15bccb7f1301541693cb8aa6cad795d984f22 Mon Sep 17 00:00:00 2001
From: Haohui Mai
Date: Tue, 9 Dec 2014 20:42:42 -0800
Subject: [PATCH 029/432] HDFS-7502. Fix findbugs warning in hdfs-nfs project.
Contributed by Brandon Li.
---
.../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 15 +++++++++------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
2 files changed, 12 insertions(+), 6 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index c860dd51813d5..aaac797b5311f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -25,6 +25,7 @@
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
import java.util.EnumSet;
import org.apache.commons.logging.Log;
@@ -651,15 +652,16 @@ READLINK3Response readlink(XDR xdr, SecurityHandler securityHandler,
}
int rtmax = config.getInt(NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY,
NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT);
- if (rtmax < target.getBytes().length) {
- LOG.error("Link size: " + target.getBytes().length
+ if (rtmax < target.getBytes(Charset.forName("UTF-8")).length) {
+ LOG.error("Link size: "
+ + target.getBytes(Charset.forName("UTF-8")).length
+ " is larger than max transfer size: " + rtmax);
return new READLINK3Response(Nfs3Status.NFS3ERR_IO, postOpAttr,
new byte[0]);
}
return new READLINK3Response(Nfs3Status.NFS3_OK, postOpAttr,
- target.getBytes());
+ target.getBytes(Charset.forName("UTF-8")));
} catch (IOException e) {
LOG.warn("Readlink error: " + e.getClass(), e);
@@ -1462,7 +1464,8 @@ private DirectoryListing listPaths(DFSClient dfsClient, String dirFileIdPath,
throw io;
}
// This happens when startAfter was just deleted
- LOG.info("Cookie couldn't be found: " + new String(startAfter)
+ LOG.info("Cookie couldn't be found: "
+ + new String(startAfter, Charset.forName("UTF-8"))
+ ", do listing from beginning");
dlisting = dfsClient
.listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
@@ -1571,7 +1574,7 @@ public READDIR3Response readdir(XDR xdr, SecurityHandler securityHandler,
startAfter = HdfsFileStatus.EMPTY_NAME;
} else {
String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
- startAfter = inodeIdPath.getBytes();
+ startAfter = inodeIdPath.getBytes(Charset.forName("UTF-8"));
}
dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
@@ -1733,7 +1736,7 @@ READDIRPLUS3Response readdirplus(XDR xdr, SecurityHandler securityHandler,
startAfter = HdfsFileStatus.EMPTY_NAME;
} else {
String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
- startAfter = inodeIdPath.getBytes();
+ startAfter = inodeIdPath.getBytes(Charset.forName("UTF-8"));
}
dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d1414399ac2ef..9f3f9ee23b984 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -561,6 +561,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7481. Add ACL indicator to the "Permission Denied" exception.
(vinayakumarb)
+ HDFS-7502. Fix findbugs warning in hdfs-nfs project.
+ (Brandon Li via wheat9)
+
Release 2.6.1 - UNRELEASED
INCOMPATIBLE CHANGES
From fc15f64b46c1b1a5cdd9a23b68ccac2c21d9cb02 Mon Sep 17 00:00:00 2001
From: Haohui Mai
Date: Tue, 9 Dec 2014 20:45:21 -0800
Subject: [PATCH 030/432] HADOOP-11381. Fix findbugs warnings in hadoop-distcp,
hadoop-aws, hadoop-azure, and hadoop-openstack. Contributed by Li Lu.
---
hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
.../main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java | 4 ++++
.../org/apache/hadoop/fs/azure/NativeAzureFileSystem.java | 5 +++--
.../java/org/apache/hadoop/fs/azure/SelfRenewingLease.java | 7 ++++---
.../java/org/apache/hadoop/tools/FileBasedCopyListing.java | 4 +++-
.../fs/swift/snative/SwiftNativeFileSystemStore.java | 7 ++++---
.../org/apache/hadoop/fs/swift/util/SwiftTestUtils.java | 4 ++--
7 files changed, 22 insertions(+), 11 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0019b3a629dd3..9065ff5c00388 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -555,6 +555,8 @@ Release 2.7.0 - UNRELEASED
HADOOP-11349. RawLocalFileSystem leaks file descriptor while creating a
file if creat succeeds but chmod fails. (Varun Saxena via Colin P. McCabe)
+ HADOOP-11381. Fix findbugs warnings in hadoop-distcp, hadoop-aws,
+ hadoop-azure, and hadoop-openstack. (Li Lu via wheat9)
Release 2.6.0 - 2014-11-18
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 6bdd233506574..457351d0242d0 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -875,6 +875,8 @@ public void progressChanged(ProgressEvent progressEvent) {
case ProgressEvent.PART_COMPLETED_EVENT_CODE:
statistics.incrementWriteOps(1);
break;
+ default:
+ break;
}
}
};
@@ -933,6 +935,8 @@ public void progressChanged(ProgressEvent progressEvent) {
case ProgressEvent.PART_COMPLETED_EVENT_CODE:
statistics.incrementWriteOps(1);
break;
+ default:
+ break;
}
}
};
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index ad2e2e6635c95..c13600222016f 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -25,6 +25,7 @@
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
+import java.nio.charset.Charset;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
@@ -153,7 +154,7 @@ public FolderRenamePending(Path redoFile, NativeAzureFileSystem fs)
"Error reading pending rename file contents -- "
+ "maximum file size exceeded");
}
- String contents = new String(bytes, 0, l);
+ String contents = new String(bytes, 0, l, Charset.forName("UTF-8"));
// parse the JSON
ObjectMapper objMapper = new ObjectMapper();
@@ -253,7 +254,7 @@ public void writeFile(FileSystem fs) throws IOException {
// Write file.
try {
output = fs.create(path);
- output.write(contents.getBytes());
+ output.write(contents.getBytes(Charset.forName("UTF-8")));
} catch (IOException e) {
throw new IOException("Unable to write RenamePending file for folder rename from "
+ srcKey + " to " + dstKey, e);
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
index 2d5c0c8ebde40..bda6006d60225 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.fs.azure;
-import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper;
@@ -27,6 +26,8 @@
import com.microsoft.windowsazure.storage.StorageException;
import com.microsoft.windowsazure.storage.blob.CloudBlob;
+import java.util.concurrent.atomic.AtomicInteger;
+
/**
* An Azure blob lease that automatically renews itself indefinitely
* using a background thread. Use it to synchronize distributed processes,
@@ -56,7 +57,7 @@ public class SelfRenewingLease {
private static final Log LOG = LogFactory.getLog(SelfRenewingLease.class);
// Used to allocate thread serial numbers in thread name
- private static volatile int threadNumber = 0;
+ private static AtomicInteger threadNumber = new AtomicInteger(0);
// Time to wait to retry getting the lease in milliseconds
@@ -99,7 +100,7 @@ public SelfRenewingLease(CloudBlobWrapper blobWrapper)
// A Renewer running should not keep JVM from exiting, so make it a daemon.
renewer.setDaemon(true);
- renewer.setName("AzureLeaseRenewer-" + threadNumber++);
+ renewer.setName("AzureLeaseRenewer-" + threadNumber.getAndIncrement());
renewer.start();
LOG.debug("Acquired lease " + leaseID + " on " + blob.getUri()
+ " managed by thread " + renewer.getName());
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java
index 0fe93c2f1367b..2bc343e1727fd 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java
@@ -27,6 +27,7 @@
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
+import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
@@ -74,7 +75,8 @@ private List fetchFileList(Path sourceListing) throws IOException {
FileSystem fs = sourceListing.getFileSystem(getConf());
BufferedReader input = null;
try {
- input = new BufferedReader(new InputStreamReader(fs.open(sourceListing)));
+ input = new BufferedReader(new InputStreamReader(fs.open(sourceListing),
+ Charset.forName("UTF-8")));
String line = input.readLine();
while (line != null) {
result.add(new Path(line));
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
index b3e6b9417950d..0138eae412d7d 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
@@ -45,6 +45,7 @@
import java.io.InterruptedIOException;
import java.net.URI;
import java.net.URISyntaxException;
+import java.nio.charset.Charset;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
@@ -352,8 +353,8 @@ private List listDirectory(SwiftObjectPath path,
final CollectionType collectionType = JSONUtil.getJsonMapper().getTypeFactory().
constructCollectionType(List.class, SwiftObjectFileStatus.class);
- final List fileStatusList =
- JSONUtil.toObject(new String(bytes), collectionType);
+ final List fileStatusList = JSONUtil.toObject(
+ new String(bytes, Charset.forName("UTF-8")), collectionType);
//this can happen if user lists file /data/files/file
//in this case swift will return empty array
@@ -447,7 +448,7 @@ public List getObjectLocation(Path path) throws IOException {
//no object location, return an empty list
return new LinkedList();
}
- return extractUris(new String(objectLocation), path);
+ return extractUris(new String(objectLocation, Charset.forName("UTF-8")), path);
}
/**
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java
index 7e850e713de46..c9e26acf3d4e1 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java
@@ -219,9 +219,9 @@ public static void compareByteArrays(byte[] src,
byte actual = dest[i];
byte expected = src[i];
String letter = toChar(actual);
- String line = String.format("[%04d] %2x %s\n", i, actual, letter);
+ String line = String.format("[%04d] %2x %s%n", i, actual, letter);
if (expected != actual) {
- line = String.format("[%04d] %2x %s -expected %2x %s\n",
+ line = String.format("[%04d] %2x %s -expected %2x %s%n",
i,
actual,
letter,
From 47b6c60f47f9b5a2c76d58ab74b364bf93015902 Mon Sep 17 00:00:00 2001
From: Haohui Mai
Date: Wed, 10 Dec 2014 12:44:25 -0800
Subject: [PATCH 031/432] HADOOP-10482. Fix various findbugs warnings in
hadoop-common. Contributed by Haohui Mai.
---
.../hadoop-common/CHANGES.txt | 2 ++
.../dev-support/findbugsExcludeFile.xml | 10 +++++++
.../org/apache/hadoop/conf/Configuration.java | 7 ++---
.../crypto/key/JavaKeyStoreProvider.java | 9 ++----
.../java/org/apache/hadoop/fs/FileSystem.java | 3 --
.../org/apache/hadoop/fs/HarFileSystem.java | 23 +++-----------
.../apache/hadoop/fs/LocalDirAllocator.java | 2 +-
.../hadoop/fs/MD5MD5CRC32FileChecksum.java | 8 ++---
.../apache/hadoop/fs/ftp/FTPFileSystem.java | 21 +++++--------
.../java/org/apache/hadoop/fs/shell/Ls.java | 2 +-
.../java/org/apache/hadoop/fs/shell/Stat.java | 4 +--
.../java/org/apache/hadoop/fs/shell/Test.java | 2 ++
.../java/org/apache/hadoop/ha/HAAdmin.java | 6 ----
.../apache/hadoop/ha/SshFenceByTcpPort.java | 2 ++
.../org/apache/hadoop/io/LongWritable.java | 4 +--
.../main/java/org/apache/hadoop/io/Text.java | 2 ++
.../io/compress/DecompressorStream.java | 2 +-
.../java/org/apache/hadoop/ipc/Server.java | 15 +++++-----
.../metrics/ganglia/GangliaContext31.java | 5 ----
.../hadoop/metrics/spi/CompositeContext.java | 2 +-
.../metrics2/lib/MutableCounterInt.java | 17 ++++++-----
.../metrics2/lib/MutableCounterLong.java | 19 ++++++------
.../hadoop/metrics2/lib/MutableGaugeInt.java | 30 +++++++++----------
.../hadoop/metrics2/lib/MutableGaugeLong.java | 30 +++++++++----------
.../java/org/apache/hadoop/net/NetUtils.java | 2 +-
.../net/ScriptBasedMappingWithDependency.java | 3 +-
.../hadoop/security/LdapGroupsMapping.java | 6 +---
.../apache/hadoop/util/ComparableVersion.java | 2 ++
.../apache/hadoop/util/PrintJarMainClass.java | 17 +++++------
.../org/apache/hadoop/util/ServletUtil.java | 6 ++--
30 files changed, 118 insertions(+), 145 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9065ff5c00388..53004cefc8feb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -558,6 +558,8 @@ Release 2.7.0 - UNRELEASED
HADOOP-11381. Fix findbugs warnings in hadoop-distcp, hadoop-aws,
hadoop-azure, and hadoop-openstack. (Li Lu via wheat9)
+ HADOOP-10482. Fix various findbugs warnings in hadoop-common. (wheat9)
+
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index 8de3c378bd6f7..1a05896ceaf98 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -241,6 +241,16 @@
+
+
+
+
+
+
+
+
+
+
mapreduce.job.end-notification.max.attempts5
mapreduce.jobhistory.max-age-ms10000000000
yarn.resourcemanager.zookeeper-store.session.timeout-ms60000
-mapreduce.task.tmp.dir./tmp
dfs.default.chunk.view.size32768
kfs.bytes-per-checksum512
mapreduce.reduce.memory.mb512
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java
index 17b5fd28f1a8c..6dc1e29024901 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java
@@ -391,36 +391,7 @@ public static void tearDown() {
ioe.printStackTrace();
}
}
-
- /**
- * Tests task's temp directory.
- *
- * In this test, we give different values to mapreduce.task.tmp.dir
- * both relative and absolute. And check whether the temp directory
- * is created. We also check whether java.io.tmpdir value is same as
- * the directory specified. We create a temp file and check if is is
- * created in the directory specified.
- */
- @Test
- public void testTaskTempDir(){
- try {
- JobConf conf = new JobConf(mr.getConfig());
-
- // intialize input, output directories
- Path inDir = new Path("testing/wc/input");
- Path outDir = new Path("testing/wc/output");
- String input = "The input";
- configure(conf, inDir, outDir, input,
- MapClass.class, IdentityReducer.class);
- launchTest(conf, inDir, outDir, input);
-
- } catch(Exception e) {
- e.printStackTrace();
- fail("Exception in testing temp dir");
- tearDown();
- }
- }
-
+
/**
* To test OS dependent setting of default execution path for a MapRed task.
* Mainly that we can use MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV to set -
From b5fe2b935b711128d566ad293dfe5e7bd64e8c5f Mon Sep 17 00:00:00 2001
From: Haohui Mai
Date: Wed, 10 Dec 2014 23:01:17 -0800
Subject: [PATCH 040/432] HDFS-7463. Simplify
FSNamesystem#getBlockLocationsUpdateTimes. Contributed by Haohui Mai.
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +
.../hdfs/server/namenode/FSNamesystem.java | 218 +++++++++---------
.../hdfs/server/namenode/NamenodeFsck.java | 9 +-
.../org/apache/hadoop/hdfs/TestGetBlocks.java | 4 +-
.../hdfs/server/namenode/NameNodeAdapter.java | 4 +-
.../hadoop/hdfs/server/namenode/TestFsck.java | 6 +-
6 files changed, 118 insertions(+), 125 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7b4e0c5159a15..fd486051b8131 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -449,6 +449,8 @@ Release 2.7.0 - UNRELEASED
HDFS-7498. Simplify the logic in INodesInPath. (jing9)
+ HDFS-7463. Simplify FSNamesystem#getBlockLocationsUpdateTimes. (wheat9)
+
OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 30ac941ccb3f6..c17c4f51e1d1c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1749,27 +1749,76 @@ private void setOwnerInt(final String srcArg, String username, String group)
logAuditEvent(true, "setOwner", srcArg, null, resultingStat);
}
+ static class GetBlockLocationsResult {
+ final INodesInPath iip;
+ final LocatedBlocks blocks;
+ boolean updateAccessTime() {
+ return iip != null;
+ }
+ private GetBlockLocationsResult(INodesInPath iip, LocatedBlocks blocks) {
+ this.iip = iip;
+ this.blocks = blocks;
+ }
+ }
+
/**
* Get block locations within the specified range.
* @see ClientProtocol#getBlockLocations(String, long, long)
*/
LocatedBlocks getBlockLocations(String clientMachine, String src,
- long offset, long length) throws AccessControlException,
- FileNotFoundException, UnresolvedLinkException, IOException {
- LocatedBlocks blocks = getBlockLocations(src, offset, length, true, true,
- true);
+ long offset, long length) throws IOException {
+ checkOperation(OperationCategory.READ);
+ GetBlockLocationsResult res = null;
+ readLock();
+ try {
+ checkOperation(OperationCategory.READ);
+ res = getBlockLocations(src, offset, length, true, true);
+ } catch (AccessControlException e) {
+ logAuditEvent(false, "open", src);
+ throw e;
+ } finally {
+ readUnlock();
+ }
+
+ logAuditEvent(true, "open", src);
+
+ if (res == null) {
+ return null;
+ }
+
+ if (res.updateAccessTime()) {
+ writeLock();
+ final long now = now();
+ try {
+ checkOperation(OperationCategory.WRITE);
+ INode inode = res.iip.getLastINode();
+ boolean updateAccessTime = now > inode.getAccessTime() +
+ getAccessTimePrecision();
+ if (!isInSafeMode() && updateAccessTime) {
+ boolean changed = dir.setTimes(
+ inode, -1, now, false, res.iip.getLatestSnapshotId());
+ if (changed) {
+ getEditLog().logTimes(src, -1, now);
+ }
+ }
+ } catch (Throwable e) {
+ LOG.warn("Failed to update the access time of " + src, e);
+ } finally {
+ writeUnlock();
+ }
+ }
+
+ LocatedBlocks blocks = res.blocks;
if (blocks != null) {
- blockManager.getDatanodeManager().sortLocatedBlocks(clientMachine,
- blocks.getLocatedBlocks());
+ blockManager.getDatanodeManager().sortLocatedBlocks(
+ clientMachine, blocks.getLocatedBlocks());
// lastBlock is not part of getLocatedBlocks(), might need to sort it too
LocatedBlock lastBlock = blocks.getLastLocatedBlock();
if (lastBlock != null) {
- ArrayList lastBlockList =
- Lists.newArrayListWithCapacity(1);
- lastBlockList.add(lastBlock);
- blockManager.getDatanodeManager().sortLocatedBlocks(clientMachine,
- lastBlockList);
+ ArrayList lastBlockList = Lists.newArrayList(lastBlock);
+ blockManager.getDatanodeManager().sortLocatedBlocks(
+ clientMachine, lastBlockList);
}
}
return blocks;
@@ -1778,24 +1827,11 @@ LocatedBlocks getBlockLocations(String clientMachine, String src,
/**
* Get block locations within the specified range.
* @see ClientProtocol#getBlockLocations(String, long, long)
- * @throws FileNotFoundException, UnresolvedLinkException, IOException
+ * @throws IOException
*/
- LocatedBlocks getBlockLocations(String src, long offset, long length,
- boolean doAccessTime, boolean needBlockToken, boolean checkSafeMode)
- throws FileNotFoundException, UnresolvedLinkException, IOException {
- try {
- return getBlockLocationsInt(src, offset, length, doAccessTime,
- needBlockToken, checkSafeMode);
- } catch (AccessControlException e) {
- logAuditEvent(false, "open", src);
- throw e;
- }
- }
-
- private LocatedBlocks getBlockLocationsInt(String src, long offset,
- long length, boolean doAccessTime, boolean needBlockToken,
- boolean checkSafeMode)
- throws FileNotFoundException, UnresolvedLinkException, IOException {
+ GetBlockLocationsResult getBlockLocations(
+ String src, long offset, long length, boolean needBlockToken,
+ boolean checkSafeMode) throws IOException {
if (offset < 0) {
throw new HadoopIllegalArgumentException(
"Negative offset is not supported. File: " + src);
@@ -1804,16 +1840,16 @@ private LocatedBlocks getBlockLocationsInt(String src, long offset,
throw new HadoopIllegalArgumentException(
"Negative length is not supported. File: " + src);
}
- final LocatedBlocks ret = getBlockLocationsUpdateTimes(src,
- offset, length, doAccessTime, needBlockToken);
- logAuditEvent(true, "open", src);
+ final GetBlockLocationsResult ret = getBlockLocationsInt(
+ src, offset, length, needBlockToken);
+
if (checkSafeMode && isInSafeMode()) {
- for (LocatedBlock b : ret.getLocatedBlocks()) {
+ for (LocatedBlock b : ret.blocks.getLocatedBlocks()) {
// if safemode & no block locations yet then throw safemodeException
if ((b.getLocations() == null) || (b.getLocations().length == 0)) {
SafeModeException se = new SafeModeException(
"Zero blocklocations for " + src, safeMode);
- if (haEnabled && haContext != null &&
+ if (haEnabled && haContext != null &&
haContext.getState().getServiceState() == HAServiceState.ACTIVE) {
throw new RetriableException(se);
} else {
@@ -1825,95 +1861,49 @@ private LocatedBlocks getBlockLocationsInt(String src, long offset,
return ret;
}
- /*
- * Get block locations within the specified range, updating the
- * access times if necessary.
- */
- private LocatedBlocks getBlockLocationsUpdateTimes(final String srcArg,
- long offset, long length, boolean doAccessTime, boolean needBlockToken)
+ private GetBlockLocationsResult getBlockLocationsInt(
+ final String srcArg, long offset, long length, boolean needBlockToken)
throws IOException {
String src = srcArg;
FSPermissionChecker pc = getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
- for (int attempt = 0; attempt < 2; attempt++) {
- boolean isReadOp = (attempt == 0);
- if (isReadOp) { // first attempt is with readlock
- checkOperation(OperationCategory.READ);
- readLock();
- } else { // second attempt is with write lock
- checkOperation(OperationCategory.WRITE);
- writeLock(); // writelock is needed to set accesstime
- }
- try {
- if (isReadOp) {
- checkOperation(OperationCategory.READ);
- } else {
- checkOperation(OperationCategory.WRITE);
- }
- src = dir.resolvePath(pc, src, pathComponents);
- final INodesInPath iip = dir.getINodesInPath(src, true);
- if (isPermissionEnabled) {
- dir.checkPathAccess(pc, iip, FsAction.READ);
- }
+ src = dir.resolvePath(pc, src, pathComponents);
+ final INodesInPath iip = dir.getINodesInPath(src, true);
+ final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
+ if (isPermissionEnabled) {
+ dir.checkPathAccess(pc, iip, FsAction.READ);
+ checkUnreadableBySuperuser(pc, inode, iip.getPathSnapshotId());
+ }
- // if the namenode is in safemode, then do not update access time
- if (isInSafeMode()) {
- doAccessTime = false;
- }
+ final long fileSize = iip.isSnapshot()
+ ? inode.computeFileSize(iip.getPathSnapshotId())
+ : inode.computeFileSizeNotIncludingLastUcBlock();
+ boolean isUc = inode.isUnderConstruction();
+ if (iip.isSnapshot()) {
+ // if src indicates a snapshot file, we need to make sure the returned
+ // blocks do not exceed the size of the snapshot file.
+ length = Math.min(length, fileSize - offset);
+ isUc = false;
+ }
- final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
- if (isPermissionEnabled) {
- checkUnreadableBySuperuser(pc, inode, iip.getPathSnapshotId());
- }
- if (!iip.isSnapshot() //snapshots are readonly, so don't update atime.
- && doAccessTime && isAccessTimeSupported()) {
- final long now = now();
- if (now > inode.getAccessTime() + getAccessTimePrecision()) {
- // if we have to set access time but we only have the readlock, then
- // restart this entire operation with the writeLock.
- if (isReadOp) {
- continue;
- }
- boolean changed = dir.setTimes(inode, -1, now, false,
- iip.getLatestSnapshotId());
- if (changed) {
- getEditLog().logTimes(src, -1, now);
- }
- }
- }
- final long fileSize = iip.isSnapshot() ?
- inode.computeFileSize(iip.getPathSnapshotId())
- : inode.computeFileSizeNotIncludingLastUcBlock();
- boolean isUc = inode.isUnderConstruction();
- if (iip.isSnapshot()) {
- // if src indicates a snapshot file, we need to make sure the returned
- // blocks do not exceed the size of the snapshot file.
- length = Math.min(length, fileSize - offset);
- isUc = false;
- }
+ final FileEncryptionInfo feInfo =
+ FSDirectory.isReservedRawName(srcArg) ? null
+ : dir.getFileEncryptionInfo(inode, iip.getPathSnapshotId(), iip);
- final FileEncryptionInfo feInfo =
- FSDirectory.isReservedRawName(srcArg) ?
- null : dir.getFileEncryptionInfo(inode, iip.getPathSnapshotId(),
- iip);
-
- final LocatedBlocks blocks =
- blockManager.createLocatedBlocks(inode.getBlocks(), fileSize,
- isUc, offset, length, needBlockToken, iip.isSnapshot(), feInfo);
- // Set caching information for the located blocks.
- for (LocatedBlock lb: blocks.getLocatedBlocks()) {
- cacheManager.setCachedLocations(lb);
- }
- return blocks;
- } finally {
- if (isReadOp) {
- readUnlock();
- } else {
- writeUnlock();
- }
- }
+ final LocatedBlocks blocks = blockManager.createLocatedBlocks(
+ inode.getBlocks(), fileSize, isUc, offset, length, needBlockToken,
+ iip.isSnapshot(), feInfo);
+
+ // Set caching information for the located blocks.
+ for (LocatedBlock lb : blocks.getLocatedBlocks()) {
+ cacheManager.setCachedLocations(lb);
}
- return null; // can never reach here
+
+ final long now = now();
+ boolean updateAccessTime = isAccessTimeSupported() && !isInSafeMode()
+ && !iip.isSnapshot()
+ && now > inode.getAccessTime() + getAccessTimePrecision();
+ return new GetBlockLocationsResult(updateAccessTime ? iip : null, blocks);
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index f82f0ea06a3e7..bab8f5e1b3176 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -443,12 +443,15 @@ void check(String parent, HdfsFileStatus file, Result res) throws IOException {
long fileLen = file.getLen();
// Get block locations without updating the file access time
// and without block access tokens
- LocatedBlocks blocks;
+ LocatedBlocks blocks = null;
+ FSNamesystem fsn = namenode.getNamesystem();
+ fsn.readLock();
try {
- blocks = namenode.getNamesystem().getBlockLocations(path, 0,
- fileLen, false, false, false);
+ blocks = fsn.getBlockLocations(path, 0, fileLen, false, false).blocks;
} catch (FileNotFoundException fnfe) {
blocks = null;
+ } finally {
+ fsn.readUnlock();
}
if (blocks == null) { // the file is deleted
return;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
index 2af86bd6bb048..cc898527882c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
@@ -167,9 +167,7 @@ public void testReadSelectNonStaleDatanode() throws Exception {
if (stm != null) {
stm.close();
}
- if (client != null) {
- client.close();
- }
+ client.close();
cluster.shutdown();
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
index 61e7f145fb5c3..7aad37854eff6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
@@ -64,8 +64,8 @@ public static FSNamesystem getNamesystem(NameNode namenode) {
*/
public static LocatedBlocks getBlockLocations(NameNode namenode,
String src, long offset, long length) throws IOException {
- return namenode.getNamesystem().getBlockLocations(
- src, offset, length, false, true, true);
+ return namenode.getNamesystem().getBlockLocations("foo",
+ src, offset, length);
}
public static HdfsFileStatus getFileInfo(NameNode namenode, String src,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index ef7de0d1c57f9..aecf55ef02e23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -996,9 +996,9 @@ public void testFsckFileNotFound() throws Exception {
DatanodeManager dnManager = mock(DatanodeManager.class);
when(namenode.getNamesystem()).thenReturn(fsName);
- when(fsName.getBlockLocations(anyString(), anyLong(), anyLong(),
- anyBoolean(), anyBoolean(), anyBoolean())).
- thenThrow(new FileNotFoundException()) ;
+ when(fsName.getBlockLocations(
+ anyString(), anyLong(), anyLong(), anyBoolean(), anyBoolean()))
+ .thenThrow(new FileNotFoundException());
when(fsName.getBlockManager()).thenReturn(blockManager);
when(blockManager.getDatanodeManager()).thenReturn(dnManager);
From fb1137baa8d5a3ad12f0d7c22c95334923147842 Mon Sep 17 00:00:00 2001
From: arp
Date: Wed, 10 Dec 2014 23:37:26 -0800
Subject: [PATCH 041/432] HDFS-7503. Namenode restart after large deletions can
cause slow processReport (Arpit Agarwal)
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../server/blockmanagement/BlockManager.java | 22 ++++++++++++++-----
2 files changed, 19 insertions(+), 6 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fd486051b8131..9049083033f9f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -593,6 +593,9 @@ Release 2.6.1 - UNRELEASED
HDFS-7489. Incorrect locking in FsVolumeList#checkDirs can hang datanodes
(Noah Lorang via Colin P. McCabe)
+ HDFS-7503. Namenode restart after large deletions can cause slow
+ processReport. (Arpit Agarwal)
+
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 26766961423f4..5f718e7e76d33 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1785,6 +1785,8 @@ public boolean processReport(final DatanodeID nodeID,
final long startTime = Time.now(); //after acquiring write lock
final long endTime;
DatanodeDescriptor node;
+ Collection invalidatedBlocks = null;
+
try {
node = datanodeManager.getDatanode(nodeID);
if (node == null || !node.isAlive) {
@@ -1813,7 +1815,7 @@ public boolean processReport(final DatanodeID nodeID,
// ordinary block reports. This shortens restart times.
processFirstBlockReport(storageInfo, newReport);
} else {
- processReport(storageInfo, newReport);
+ invalidatedBlocks = processReport(storageInfo, newReport);
}
// Now that we have an up-to-date block report, we know that any
@@ -1832,6 +1834,14 @@ public boolean processReport(final DatanodeID nodeID,
namesystem.writeUnlock();
}
+ if (invalidatedBlocks != null) {
+ for (Block b : invalidatedBlocks) {
+ blockLog.info("BLOCK* processReport: " + b + " on " + node
+ + " size " + b.getNumBytes()
+ + " does not belong to any file");
+ }
+ }
+
// Log the block report processing stats from Namenode perspective
final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
if (metrics != null) {
@@ -1875,8 +1885,9 @@ private void rescanPostponedMisreplicatedBlocks() {
}
}
- private void processReport(final DatanodeStorageInfo storageInfo,
- final BlockListAsLongs report) throws IOException {
+ private Collection processReport(
+ final DatanodeStorageInfo storageInfo,
+ final BlockListAsLongs report) throws IOException {
// Normal case:
// Modify the (block-->datanode) map, according to the difference
// between the old and new block report.
@@ -1907,14 +1918,13 @@ private void processReport(final DatanodeStorageInfo storageInfo,
+ " of " + numBlocksLogged + " reported.");
}
for (Block b : toInvalidate) {
- blockLog.info("BLOCK* processReport: "
- + b + " on " + node + " size " + b.getNumBytes()
- + " does not belong to any file");
addToInvalidates(b, node);
}
for (BlockToMarkCorrupt b : toCorrupt) {
markBlockAsCorrupt(b, storageInfo, node);
}
+
+ return toInvalidate;
}
/**
From 5ed973f18a0dee1cd4fd0ca6297e8dc265baf336 Mon Sep 17 00:00:00 2001
From: Allen Wittenauer
Date: Thu, 11 Dec 2014 09:00:35 -0800
Subject: [PATCH 042/432] HADOOP-11353. Add support for .hadooprc (aw)
---
hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
.../hadoop-common/src/main/bin/hadoop-config.sh | 2 ++
.../hadoop-common/src/main/bin/hadoop-functions.sh | 11 +++++++++++
3 files changed, 15 insertions(+)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8a4f13c10ac67..47d36e4d7d2d6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -26,6 +26,8 @@ Trunk (Unreleased)
Alexander Stojanovich, Brian Swan, and Min Wei via cnauroth)
HADOOP-6590. Add a username check for hadoop sub-commands (John Smith via aw)
+
+ HADOOP-11353. Add support for .hadooprc (aw)
IMPROVEMENTS
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
index 89b0c932b32a6..06fb0efdcc53f 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
@@ -168,6 +168,8 @@ hadoop_exec_userfuncs
# IMPORTANT! User provided code is now available!
#
+hadoop_exec_hadooprc
+
# do all the OS-specific startup bits here
# this allows us to get a decent JAVA_HOME,
# call crle for LD_LIBRARY_PATH, etc.
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index dfd7315e345ad..af45cec51a0ce 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -113,6 +113,17 @@ function hadoop_exec_userfuncs
fi
}
+function hadoop_exec_hadooprc
+{
+ # Read the user's settings. This provides for users to override
+ # and/or append hadoop-env.sh. It is not meant as a complete system override.
+
+ if [[ -f "${HOME}/.hadooprc" ]]; then
+ hadoop_debug "Applying the user's .hadooprc"
+ . "${HOME}/.hadooprc"
+ fi
+}
+
function hadoop_basic_init
{
# Some of these are also set in hadoop-env.sh.
From a6f9c201503359d34bda1f6e45a2b9d9dec7b16d Mon Sep 17 00:00:00 2001
From: Jian He
Date: Thu, 11 Dec 2014 11:16:45 -0800
Subject: [PATCH 043/432] YARN-2917. Fixed potential deadlock when system.exit
is called in AsyncDispatcher. Contributed by Rohith Sharmaks
---
hadoop-yarn-project/CHANGES.txt | 3 +++
.../apache/hadoop/yarn/event/AsyncDispatcher.java | 15 +++++++++++++--
2 files changed, 16 insertions(+), 2 deletions(-)
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 832efeec4ae75..3432f6f8bbabd 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -214,6 +214,9 @@ Release 2.7.0 - UNRELEASED
YARN-2924. Fixed RMAdminCLI to not convert node labels to lower case.
(Wangda Tan via jianhe)
+ YARN-2917. Fixed potential deadlock when system.exit is called in AsyncDispatcher
+ (Rohith Sharmaks via jianhe)
+
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index 370b0f7324f15..28be6acf732bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -181,8 +181,9 @@ protected void dispatch(Event event) {
if (exitOnDispatchException
&& (ShutdownHookManager.get().isShutdownInProgress()) == false
&& stopped == false) {
- LOG.info("Exiting, bbye..");
- System.exit(-1);
+ Thread shutDownThread = new Thread(createShutDownThread());
+ shutDownThread.setName("AsyncDispatcher ShutDown handler");
+ shutDownThread.start();
}
}
}
@@ -271,4 +272,14 @@ void addHandler(EventHandler handler) {
}
}
+
+ Runnable createShutDownThread() {
+ return new Runnable() {
+ @Override
+ public void run() {
+ LOG.info("Exiting, bbye..");
+ System.exit(-1);
+ }
+ };
+ }
}
From d6d3c1bbc6632ba5c5f20363fb38c583686b80c2 Mon Sep 17 00:00:00 2001
From: Haohui Mai
Date: Thu, 11 Dec 2014 12:36:13 -0800
Subject: [PATCH 044/432] HDFS-7515. Fix new findbugs warnings in hadoop-hdfs.
Contributed by Haohui Mai.
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +
.../hadoop/hdfs/BlockReaderFactory.java | 2 -
.../apache/hadoop/hdfs/DFSOutputStream.java | 52 +++++++-------
.../hdfs/qjournal/server/JournalNode.java | 1 +
.../hadoop/hdfs/server/common/Storage.java | 2 +-
.../hdfs/server/datanode/BlockReceiver.java | 7 +-
.../hadoop/hdfs/server/datanode/DataNode.java | 8 ++-
.../hdfs/server/datanode/DataStorage.java | 2 +-
.../fsdataset/impl/BlockPoolSlice.java | 16 ++---
.../fsdataset/impl/FsDatasetImpl.java | 33 ++++-----
.../web/webhdfs/ExceptionHandler.java | 7 +-
.../datanode/web/webhdfs/WebHdfsHandler.java | 8 ++-
.../hadoop/hdfs/server/mover/Mover.java | 5 +-
.../hdfs/server/namenode/FSDirRenameOp.java | 1 -
.../namenode/FSDirStatAndListingOp.java | 6 +-
.../hdfs/server/namenode/FSImageUtil.java | 4 +-
.../server/namenode/FileJournalManager.java | 2 +-
.../hadoop/hdfs/server/namenode/INode.java | 4 +-
.../server/namenode/NameNodeRpcServer.java | 8 ---
.../hdfs/server/namenode/NamenodeFsck.java | 4 --
.../namenode/XAttrPermissionFilter.java | 2 +-
.../apache/hadoop/hdfs/tools/DFSAdmin.java | 2 +-
.../DelimitedImageVisitor.java | 2 +-
.../offlineImageViewer/FSImageHandler.java | 71 +++++++++++--------
.../offlineImageViewer/FSImageLoader.java | 8 +--
.../FileDistributionCalculator.java | 13 ++--
.../FileDistributionVisitor.java | 4 +-
.../offlineImageViewer/LsImageVisitor.java | 6 +-
.../OfflineImageViewerPB.java | 47 ++++++------
.../offlineImageViewer/PBImageXmlWriter.java | 13 ++--
.../hadoop/hdfs/TestEncryptionZones.java | 6 +-
.../namenode/snapshot/TestSnapshot.java | 8 +--
.../TestOfflineImageViewer.java | 30 ++++----
33 files changed, 187 insertions(+), 199 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9049083033f9f..e2db1f68f44b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -572,6 +572,8 @@ Release 2.7.0 - UNRELEASED
HDFS-7475. Make TestLazyPersistFiles#testLazyPersistBlocksAreSaved
deterministic. (Xiaoyu Yao via Arpit Agarwal)
+ HDFS-7515. Fix new findbugs warnings in hadoop-hdfs. (wheat9)
+
Release 2.6.1 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
index 13e0a5226854a..7e40917fff009 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
@@ -668,7 +668,6 @@ private BlockReader getRemoteBlockReaderFromTcp() throws IOException {
Peer peer = null;
try {
curPeer = nextTcpPeer();
- if (curPeer == null) break;
if (curPeer.fromCache) remainingCacheTries--;
peer = curPeer.peer;
blockReader = getRemoteBlockReader(peer);
@@ -699,7 +698,6 @@ private BlockReader getRemoteBlockReaderFromTcp() throws IOException {
}
}
}
- return null;
}
public static class BlockReaderPeer {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index e574d1d080d8f..67d314302641f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -39,6 +39,7 @@
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -241,8 +242,6 @@ private static class Packet {
/**
* Create a new packet.
*
- * @param pktSize maximum size of the packet,
- * including checksum data and actual data.
* @param chunksPerPkt maximum number of chunks per packet.
* @param offsetInBlock offset in bytes into the HDFS block.
*/
@@ -405,7 +404,8 @@ public DatanodeInfo load(DatanodeInfo key) throws Exception {
private String[] favoredNodes;
volatile boolean hasError = false;
volatile int errorIndex = -1;
- volatile int restartingNodeIndex = -1; // Restarting node index
+ // Restarting node index
+ AtomicInteger restartingNodeIndex = new AtomicInteger(-1);
private long restartDeadline = 0; // Deadline of DN restart
private BlockConstructionStage stage; // block construction stage
private long bytesSent = 0; // number of bytes that've been sent
@@ -556,7 +556,7 @@ public void run() {
try {
// process datanode IO errors if any
boolean doSleep = false;
- if (hasError && (errorIndex >= 0 || restartingNodeIndex >= 0)) {
+ if (hasError && (errorIndex >= 0 || restartingNodeIndex.get() >= 0)) {
doSleep = processDatanodeError();
}
@@ -699,7 +699,7 @@ public void run() {
}
} catch (Throwable e) {
// Log warning if there was a real error.
- if (restartingNodeIndex == -1) {
+ if (restartingNodeIndex.get() == -1) {
DFSClient.LOG.warn("DataStreamer Exception", e);
}
if (e instanceof IOException) {
@@ -708,7 +708,7 @@ public void run() {
setLastException(new IOException("DataStreamer Exception: ",e));
}
hasError = true;
- if (errorIndex == -1 && restartingNodeIndex == -1) {
+ if (errorIndex == -1 && restartingNodeIndex.get() == -1) {
// Not a datanode issue
streamerClosed = true;
}
@@ -806,7 +806,7 @@ synchronized void setErrorIndex(int idx) {
/** Set the restarting node index. Called by responder */
synchronized void setRestartingNodeIndex(int idx) {
- restartingNodeIndex = idx;
+ restartingNodeIndex.set(idx);
// If the data streamer has already set the primary node
// bad, clear it. It is likely that the write failed due to
// the DN shutdown. Even if it was a real failure, the pipeline
@@ -821,7 +821,7 @@ synchronized void setRestartingNodeIndex(int idx) {
*/
synchronized void tryMarkPrimaryDatanodeFailed() {
// There should be no existing error and no ongoing restart.
- if ((errorIndex == -1) && (restartingNodeIndex == -1)) {
+ if ((errorIndex == -1) && (restartingNodeIndex.get() == -1)) {
errorIndex = 0;
}
}
@@ -962,7 +962,7 @@ public void run() {
synchronized (dataQueue) {
dataQueue.notifyAll();
}
- if (restartingNodeIndex == -1) {
+ if (restartingNodeIndex.get() == -1) {
DFSClient.LOG.warn("DFSOutputStream ResponseProcessor exception "
+ " for block " + block, e);
}
@@ -1186,7 +1186,7 @@ private boolean setupPipelineForAppendOrRecovery() throws IOException {
// Sleep before reconnect if a dn is restarting.
// This process will be repeated until the deadline or the datanode
// starts back up.
- if (restartingNodeIndex >= 0) {
+ if (restartingNodeIndex.get() >= 0) {
// 4 seconds or the configured deadline period, whichever is shorter.
// This is the retry interval and recovery will be retried in this
// interval until timeout or success.
@@ -1196,7 +1196,7 @@ private boolean setupPipelineForAppendOrRecovery() throws IOException {
Thread.sleep(delay);
} catch (InterruptedException ie) {
lastException.set(new IOException("Interrupted while waiting for " +
- "datanode to restart. " + nodes[restartingNodeIndex]));
+ "datanode to restart. " + nodes[restartingNodeIndex.get()]));
streamerClosed = true;
return false;
}
@@ -1237,21 +1237,21 @@ private boolean setupPipelineForAppendOrRecovery() throws IOException {
setPipeline(newnodes, newStorageTypes, newStorageIDs);
// Just took care of a node error while waiting for a node restart
- if (restartingNodeIndex >= 0) {
+ if (restartingNodeIndex.get() >= 0) {
// If the error came from a node further away than the restarting
// node, the restart must have been complete.
- if (errorIndex > restartingNodeIndex) {
- restartingNodeIndex = -1;
- } else if (errorIndex < restartingNodeIndex) {
+ if (errorIndex > restartingNodeIndex.get()) {
+ restartingNodeIndex.set(-1);
+ } else if (errorIndex < restartingNodeIndex.get()) {
// the node index has shifted.
- restartingNodeIndex--;
+ restartingNodeIndex.decrementAndGet();
} else {
// this shouldn't happen...
assert false;
}
}
- if (restartingNodeIndex == -1) {
+ if (restartingNodeIndex.get() == -1) {
hasError = false;
}
lastException.set(null);
@@ -1293,10 +1293,10 @@ private boolean setupPipelineForAppendOrRecovery() throws IOException {
success = createBlockOutputStream(nodes, storageTypes, newGS, isRecovery);
}
- if (restartingNodeIndex >= 0) {
+ if (restartingNodeIndex.get() >= 0) {
assert hasError == true;
// check errorIndex set above
- if (errorIndex == restartingNodeIndex) {
+ if (errorIndex == restartingNodeIndex.get()) {
// ignore, if came from the restarting node
errorIndex = -1;
}
@@ -1306,8 +1306,8 @@ private boolean setupPipelineForAppendOrRecovery() throws IOException {
}
// expired. declare the restarting node dead
restartDeadline = 0;
- int expiredNodeIndex = restartingNodeIndex;
- restartingNodeIndex = -1;
+ int expiredNodeIndex = restartingNodeIndex.get();
+ restartingNodeIndex.set(-1);
DFSClient.LOG.warn("Datanode did not restart in time: " +
nodes[expiredNodeIndex]);
// Mark the restarting node as failed. If there is any other failed
@@ -1459,7 +1459,7 @@ private boolean createBlockOutputStream(DatanodeInfo[] nodes,
// from the local datanode. Thus it is safe to treat this as a
// regular node error.
if (PipelineAck.isRestartOOBStatus(pipelineStatus) &&
- restartingNodeIndex == -1) {
+ restartingNodeIndex.get() == -1) {
checkRestart = true;
throw new IOException("A datanode is restarting.");
}
@@ -1476,10 +1476,10 @@ private boolean createBlockOutputStream(DatanodeInfo[] nodes,
assert null == blockStream : "Previous blockStream unclosed";
blockStream = out;
result = true; // success
- restartingNodeIndex = -1;
+ restartingNodeIndex.set(-1);
hasError = false;
} catch (IOException ie) {
- if (restartingNodeIndex == -1) {
+ if (restartingNodeIndex.get() == -1) {
DFSClient.LOG.info("Exception in createBlockOutputStream", ie);
}
if (ie instanceof InvalidEncryptionKeyException && refetchEncryptionKey > 0) {
@@ -1511,10 +1511,10 @@ private boolean createBlockOutputStream(DatanodeInfo[] nodes,
if (checkRestart && shouldWaitForRestart(errorIndex)) {
restartDeadline = dfsClient.getConf().datanodeRestartTimeout +
Time.now();
- restartingNodeIndex = errorIndex;
+ restartingNodeIndex.set(errorIndex);
errorIndex = -1;
DFSClient.LOG.info("Waiting for the datanode to be restarted: " +
- nodes[restartingNodeIndex]);
+ nodes[restartingNodeIndex.get()]);
}
hasError = true;
setLastException(ie);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
index 588bc580ce54c..a5a40f1158fea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
@@ -233,6 +233,7 @@ private File getLogDir(String jid) {
Preconditions.checkArgument(jid != null &&
!jid.isEmpty(),
"bad journal identifier: %s", jid);
+ assert jid != null;
return new File(new File(dir), jid);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 31fdb84385683..e6bd5b289332d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -727,7 +727,7 @@ FileLock tryLock() throws IOException {
file.close();
throw e;
}
- if (res != null && !deletionHookAdded) {
+ if (!deletionHookAdded) {
// If the file existed prior to our startup, we didn't
// call deleteOnExit above. But since we successfully locked
// the dir, we can take care of cleaning it up.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 2e388f9b9c6e2..08c96be86e6b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -29,6 +29,8 @@
import java.io.FileWriter;
import java.io.IOException;
import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.LinkedList;
@@ -836,9 +838,8 @@ void receiveBlock(
LOG.warn("Failed to delete restart meta file: " +
restartMeta.getPath());
}
- FileWriter out = null;
- try {
- out = new FileWriter(restartMeta);
+ try (Writer out = new OutputStreamWriter(
+ new FileOutputStream(restartMeta), "UTF-8")) {
// write out the current time.
out.write(Long.toString(Time.now() + restartBudget));
out.flush();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 13c32d530adf2..899a7294e86b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -580,7 +580,8 @@ public IOException call() {
try {
IOException ioe = ioExceptionFuture.get();
if (ioe != null) {
- errorMessageBuilder.append(String.format("FAILED TO ADD: %s: %s\n",
+ errorMessageBuilder.append(
+ String.format("FAILED TO ADD: %s: %s%n",
volume, ioe.getMessage()));
LOG.error("Failed to add volume: " + volume, ioe);
} else {
@@ -588,8 +589,9 @@ public IOException call() {
LOG.info("Successfully added volume: " + volume);
}
} catch (Exception e) {
- errorMessageBuilder.append(String.format("FAILED to ADD: %s: %s\n",
- volume, e.getMessage()));
+ errorMessageBuilder.append(
+ String.format("FAILED to ADD: %s: %s%n", volume,
+ e.toString()));
}
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index c90ef954d923f..15e7f552bd09f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -425,7 +425,7 @@ synchronized void removeVolumes(Collection locations)
LOG.warn(String.format(
"I/O error attempting to unlock storage directory %s.",
sd.getRoot()), e);
- errorMsgBuilder.append(String.format("Failed to remove %s: %s\n",
+ errorMsgBuilder.append(String.format("Failed to remove %s: %s%n",
sd.getRoot(), e.getMessage()));
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index 77cdb91ea4549..5a69e1e4d666d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -22,10 +22,13 @@
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
+import java.io.OutputStreamWriter;
import java.io.RandomAccessFile;
+import java.io.Writer;
import java.util.Scanner;
import org.apache.commons.io.FileUtils;
@@ -186,7 +189,7 @@ long loadDfsUsed() {
Scanner sc;
try {
- sc = new Scanner(new File(currentDir, DU_CACHE_FILE));
+ sc = new Scanner(new File(currentDir, DU_CACHE_FILE), "UTF-8");
} catch (FileNotFoundException fnfe) {
return -1;
}
@@ -227,23 +230,18 @@ void saveDfsUsed() {
outFile.getParent());
}
- FileWriter out = null;
try {
long used = getDfsUsed();
- if (used > 0) {
- out = new FileWriter(outFile);
+ try (Writer out = new OutputStreamWriter(
+ new FileOutputStream(outFile), "UTF-8")) {
// mtime is written last, so that truncated writes won't be valid.
out.write(Long.toString(used) + " " + Long.toString(Time.now()));
out.flush();
- out.close();
- out = null;
}
} catch (IOException ioe) {
// If write failed, the volume might be bad. Since the cache file is
// not critical, log the error and continue.
FsDatasetImpl.LOG.warn("Failed to write dfsUsed to " + outFile, ioe);
- } finally {
- IOUtils.cleanup(null, out);
}
}
@@ -447,7 +445,7 @@ void addToReplicasMap(ReplicaMap volumeMap, File dir,
File.pathSeparator + "." + file.getName() + ".restart");
Scanner sc = null;
try {
- sc = new Scanner(restartMeta);
+ sc = new Scanner(restartMeta, "UTF-8");
// The restart meta file exists
if (sc.hasNextLong() && (sc.nextLong() > Time.now())) {
// It didn't expire. Load the replica as a RBW.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 2c6f409e050e0..538c796e791f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -769,7 +769,6 @@ private static void computeChecksum(File srcMeta, File dstMeta, File blockFile)
final byte[] crcs = new byte[checksum.getChecksumSize(data.length)];
DataOutputStream metaOut = null;
- InputStream dataIn = null;
try {
File parentFile = dstMeta.getParentFile();
if (parentFile != null) {
@@ -782,22 +781,23 @@ private static void computeChecksum(File srcMeta, File dstMeta, File blockFile)
new FileOutputStream(dstMeta), HdfsConstants.SMALL_BUFFER_SIZE));
BlockMetadataHeader.writeHeader(metaOut, checksum);
- dataIn = isNativeIOAvailable ?
+ int offset = 0;
+ try (InputStream dataIn = isNativeIOAvailable ?
NativeIO.getShareDeleteFileInputStream(blockFile) :
- new FileInputStream(blockFile);
+ new FileInputStream(blockFile)) {
- int offset = 0;
- for(int n; (n = dataIn.read(data, offset, data.length - offset)) != -1; ) {
- if (n > 0) {
- n += offset;
- offset = n % checksum.getBytesPerChecksum();
- final int length = n - offset;
+ for (int n; (n = dataIn.read(data, offset, data.length - offset)) != -1; ) {
+ if (n > 0) {
+ n += offset;
+ offset = n % checksum.getBytesPerChecksum();
+ final int length = n - offset;
- if (length > 0) {
- checksum.calculateChunkedSums(data, 0, length, crcs, 0);
- metaOut.write(crcs, 0, checksum.getChecksumSize(length));
+ if (length > 0) {
+ checksum.calculateChunkedSums(data, 0, length, crcs, 0);
+ metaOut.write(crcs, 0, checksum.getChecksumSize(length));
- System.arraycopy(data, length, data, 0, offset);
+ System.arraycopy(data, length, data, 0, offset);
+ }
}
}
}
@@ -806,7 +806,7 @@ private static void computeChecksum(File srcMeta, File dstMeta, File blockFile)
checksum.calculateChunkedSums(data, 0, offset, crcs, 0);
metaOut.write(crcs, 0, 4);
} finally {
- IOUtils.cleanup(LOG, dataIn, metaOut);
+ IOUtils.cleanup(LOG, metaOut);
}
}
@@ -1599,11 +1599,6 @@ public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
}
f = info.getBlockFile();
v = (FsVolumeImpl)info.getVolume();
- if (f == null) {
- errors.add("Failed to delete replica " + invalidBlks[i]
- + ": File not found, volume=" + v);
- continue;
- }
if (v == null) {
errors.add("Failed to delete replica " + invalidBlks[i]
+ ". No volume for this replica, file=" + f);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java
index fea40d7ce9644..a7bb4907d9cff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.datanode.web.webhdfs;
+import com.google.common.base.Charsets;
import com.sun.jersey.api.ParamException;
import com.sun.jersey.api.container.ContainerException;
import io.netty.buffer.Unpooled;
@@ -39,7 +40,7 @@
import static io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR;
import static io.netty.handler.codec.http.HttpResponseStatus.NOT_FOUND;
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
-import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.APPLICATION_JSON;
+import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.APPLICATION_JSON_UTF8;
class ExceptionHandler {
static Log LOG = WebHdfsHandler.LOG;
@@ -82,11 +83,11 @@ static DefaultFullHttpResponse exceptionCaught(Throwable cause) {
s = INTERNAL_SERVER_ERROR;
}
- final byte[] js = JsonUtil.toJsonString(e).getBytes();
+ final byte[] js = JsonUtil.toJsonString(e).getBytes(Charsets.UTF_8);
DefaultFullHttpResponse resp =
new DefaultFullHttpResponse(HTTP_1_1, s, Unpooled.wrappedBuffer(js));
- resp.headers().set(CONTENT_TYPE, APPLICATION_JSON);
+ resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
resp.headers().set(CONTENT_LENGTH, js.length);
return resp;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
index cf7021828836b..f02780a01d783 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
@@ -29,6 +29,7 @@
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.QueryStringDecoder;
import io.netty.handler.stream.ChunkedStream;
+import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -77,7 +78,8 @@ public class WebHdfsHandler extends SimpleChannelInboundHandler {
public static final int WEBHDFS_PREFIX_LENGTH = WEBHDFS_PREFIX.length();
public static final String APPLICATION_OCTET_STREAM =
"application/octet-stream";
- public static final String APPLICATION_JSON = "application/json";
+ public static final String APPLICATION_JSON_UTF8 =
+ "application/json; charset=utf-8";
private final Configuration conf;
private final Configuration confForCreate;
@@ -224,11 +226,11 @@ private void onGetFileChecksum(ChannelHandlerContext ctx) throws IOException {
} finally {
IOUtils.cleanup(LOG, dfsclient);
}
- final byte[] js = JsonUtil.toJsonString(checksum).getBytes();
+ final byte[] js = JsonUtil.toJsonString(checksum).getBytes(Charsets.UTF_8);
DefaultFullHttpResponse resp =
new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(js));
- resp.headers().set(CONTENT_TYPE, APPLICATION_JSON);
+ resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
resp.headers().set(CONTENT_LENGTH, js.length);
resp.headers().set(CONNECTION, CLOSE);
ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index 108eb386a7457..a22f920099a40 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -48,8 +48,10 @@
import org.apache.hadoop.util.ToolRunner;
import java.io.BufferedReader;
+import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
+import java.io.InputStreamReader;
import java.net.URI;
import java.text.DateFormat;
import java.util.*;
@@ -579,7 +581,8 @@ private static Options buildCliOptions() {
private static String[] readPathFile(String file) throws IOException {
List list = Lists.newArrayList();
- BufferedReader reader = new BufferedReader(new FileReader(file));
+ BufferedReader reader = new BufferedReader(
+ new InputStreamReader(new FileInputStream(file), "UTF-8"));
try {
String line;
while ((line = reader.readLine()) != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 511de7a155897..c62c88e9f66b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -433,7 +433,6 @@ static boolean unprotectedRenameTo(
} else {
fsd.addLastINodeNoQuotaCheck(dstIIP, removedDst);
}
- assert removedDst != null;
if (removedDst.isReference()) {
final INodeReference removedDstRef = removedDst.asReference();
final INodeReference.WithCount wc = (INodeReference.WithCount)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 2e7ed6be39ddc..0f941710e6bde 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Preconditions;
+import org.apache.commons.io.Charsets;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
import org.apache.hadoop.fs.FileEncryptionInfo;
@@ -50,7 +51,7 @@ static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg,
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory
.getPathComponentsForReservedPath(srcArg);
- final String startAfterString = new String(startAfter);
+ final String startAfterString = new String(startAfter, Charsets.UTF_8);
final String src = fsd.resolvePath(pc, srcArg, pathComponents);
final INodesInPath iip = fsd.getINodesInPath(src, true);
@@ -195,8 +196,7 @@ private static DirectoryListing getListing(
cur.getLocalStoragePolicyID():
BlockStoragePolicySuite.ID_UNSPECIFIED;
listing[i] = createFileStatus(fsd, cur.getLocalNameBytes(), cur,
- needLocation, fsd.getStoragePolicyID(curPolicy,
- parentStoragePolicy), snapshot, isRawPath, inodesInPath);
+ needLocation, fsd.getStoragePolicyID(curPolicy, parentStoragePolicy), snapshot, isRawPath, inodesInPath);
listingCnt++;
if (needLocation) {
// Once we hit lsLimit locations, stop.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageUtil.java
index 931386cae9a55..388a1bf0cce2f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageUtil.java
@@ -23,6 +23,7 @@
import java.io.RandomAccessFile;
import java.util.Arrays;
+import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
@@ -32,7 +33,8 @@
@InterfaceAudience.Private
public final class FSImageUtil {
- public static final byte[] MAGIC_HEADER = "HDFSIMG1".getBytes();
+ public static final byte[] MAGIC_HEADER =
+ "HDFSIMG1".getBytes(Charsets.UTF_8);
public static final int FILE_VERSION = 1;
public static boolean checkFileFormat(RandomAccessFile file)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
index 6001db5ccea6a..921803c96c4b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
@@ -300,7 +300,7 @@ private static List matchEditLogs(File[] filesInStorage,
.matcher(name);
if (staleInprogressEditsMatch.matches()) {
try {
- long startTxId = Long.valueOf(staleInprogressEditsMatch.group(1));
+ long startTxId = Long.parseLong(staleInprogressEditsMatch.group(1));
ret.add(new EditLogFile(f, startTxId, HdfsConstants.INVALID_TXID,
true));
continue;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 44549303f925a..55430b7ead529 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -20,6 +20,7 @@
import java.io.PrintStream;
import java.io.PrintWriter;
import java.io.StringWriter;
+import java.nio.charset.Charset;
import java.util.List;
import org.apache.commons.logging.Log;
@@ -769,8 +770,7 @@ public final StringBuffer dumpTreeRecursively() {
@VisibleForTesting
public final void dumpTreeRecursively(PrintStream out) {
- dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(),
- Snapshot.CURRENT_STATE_ID);
+ out.println(dumpTreeRecursively().toString());
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 876afbaa2df67..db22c4be817d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -974,10 +974,6 @@ public long[] getStats() throws IOException {
public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
throws IOException {
DatanodeInfo results[] = namesystem.datanodeReport(type);
- if (results == null ) {
- throw new IOException("Failed to get datanode report for " + type
- + " datanodes.");
- }
return results;
}
@@ -985,10 +981,6 @@ public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
public DatanodeStorageReport[] getDatanodeStorageReport(
DatanodeReportType type) throws IOException {
final DatanodeStorageReport[] reports = namesystem.getDatanodeStorageReport(type);
- if (reports == null ) {
- throw new IOException("Failed to get datanode storage report for " + type
- + " datanodes.");
- }
return reports;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index bab8f5e1b3176..5eddeea4cffad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -643,10 +643,6 @@ private void copyBlocksToLostFound(String parent, HdfsFileStatus file,
}
if (fos == null) {
fos = dfs.create(target + "/" + chain, true);
- if (fos == null) {
- throw new IOException("Failed to copy " + fullName +
- " to /lost+found: could not store chain " + chain);
- }
chain++;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
index 79dabb33899e8..95f943da9289c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
@@ -100,7 +100,7 @@ static void checkPermissionForApi(FSPermissionChecker pc,
static List filterXAttrsForApi(FSPermissionChecker pc,
List xAttrs, boolean isRawPath) {
assert xAttrs != null : "xAttrs can not be null";
- if (xAttrs == null || xAttrs.isEmpty()) {
+ if (xAttrs.isEmpty()) {
return xAttrs;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 484ac1238be07..4073d5fa8f187 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -1476,7 +1476,7 @@ int getReconfigurationStatus(String nodeType, String address,
} else {
out.print("FAILED: ");
}
- out.printf("Change property %s\n\tFrom: \"%s\"\n\tTo: \"%s\"\n",
+ out.printf("Change property %s%n\tFrom: \"%s\"%n\tTo: \"%s\"%n",
result.getKey().prop, result.getKey().oldVal,
result.getKey().newVal);
if (result.getValue().isPresent()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/DelimitedImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/DelimitedImageVisitor.java
index eb6cae3d58a7a..bc5ff5608c812 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/DelimitedImageVisitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/DelimitedImageVisitor.java
@@ -144,7 +144,7 @@ void visit(ImageElement element, String value) throws IOException {
// Special case of file size, which is sum of the num bytes in each block
if(element == ImageElement.NUM_BYTES)
- fileSize += Long.valueOf(value);
+ fileSize += Long.parseLong(value);
if(elements.containsKey(element) && element != ImageElement.NUM_BYTES)
elements.put(element, value);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java
index eb93c87fc1eba..43fcd69a839a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java
@@ -17,11 +17,7 @@
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
+import com.google.common.base.Charsets;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelFutureListener;
@@ -30,19 +26,31 @@
import io.netty.channel.group.ChannelGroup;
import io.netty.handler.codec.http.DefaultFullHttpResponse;
import io.netty.handler.codec.http.DefaultHttpResponse;
-import static io.netty.handler.codec.http.HttpResponseStatus.*;
-
import io.netty.handler.codec.http.HttpMethod;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.HttpResponseStatus;
-import static io.netty.handler.codec.http.HttpVersion.*;
import io.netty.handler.codec.http.QueryStringDecoder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.web.JsonUtil;
-import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import static io.netty.handler.codec.http.HttpHeaders.Names.CONNECTION;
+import static io.netty.handler.codec.http.HttpHeaders.Names.CONTENT_LENGTH;
+import static io.netty.handler.codec.http.HttpHeaders.Names.CONTENT_TYPE;
+import static io.netty.handler.codec.http.HttpHeaders.Values.CLOSE;
+import static io.netty.handler.codec.http.HttpResponseStatus.BAD_REQUEST;
+import static io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR;
+import static io.netty.handler.codec.http.HttpResponseStatus.METHOD_NOT_ALLOWED;
+import static io.netty.handler.codec.http.HttpResponseStatus.NOT_FOUND;
+import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
+import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.APPLICATION_JSON_UTF8;
+import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.WEBHDFS_PREFIX;
+import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.WEBHDFS_PREFIX_LENGTH;
/**
* Implement the read-only WebHDFS API for fsimage.
*/
@@ -67,7 +75,7 @@ public void channelRead0(ChannelHandlerContext ctx, HttpRequest request)
if (request.getMethod() != HttpMethod.GET) {
DefaultHttpResponse resp = new DefaultHttpResponse(HTTP_1_1,
METHOD_NOT_ALLOWED);
- resp.headers().set("Connection", "close");
+ resp.headers().set(CONNECTION, CLOSE);
ctx.write(resp).addListener(ChannelFutureListener.CLOSE);
return;
}
@@ -77,24 +85,29 @@ public void channelRead0(ChannelHandlerContext ctx, HttpRequest request)
final String content;
String path = getPath(decoder);
- if ("GETFILESTATUS".equals(op)) {
- content = image.getFileStatus(path);
- } else if ("LISTSTATUS".equals(op)) {
- content = image.listStatus(path);
- } else if ("GETACLSTATUS".equals(op)) {
- content = image.getAclStatus(path);
- } else {
- throw new IllegalArgumentException("Invalid value for webhdfs parameter" + " \"op\"");
+ switch (op) {
+ case "GETFILESTATUS":
+ content = image.getFileStatus(path);
+ break;
+ case "LISTSTATUS":
+ content = image.listStatus(path);
+ break;
+ case "GETACLSTATUS":
+ content = image.getAclStatus(path);
+ break;
+ default:
+ throw new IllegalArgumentException(
+ "Invalid value for webhdfs parameter" + " \"op\"");
}
LOG.info("op=" + op + " target=" + path);
DefaultFullHttpResponse resp = new DefaultFullHttpResponse(
HTTP_1_1, HttpResponseStatus.OK,
- Unpooled.wrappedBuffer(content.getBytes()));
- resp.headers().set("Content-Type", "application/json");
- resp.headers().set("Content-Length", resp.content().readableBytes());
- resp.headers().set("Connection", "close");
+ Unpooled.wrappedBuffer(content.getBytes(Charsets.UTF_8)));
+ resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
+ resp.headers().set(CONTENT_LENGTH, resp.content().readableBytes());
+ resp.headers().set(CONNECTION, CLOSE);
ctx.write(resp).addListener(ChannelFutureListener.CLOSE);
}
@@ -109,19 +122,19 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause)
Exception e = cause instanceof Exception ? (Exception) cause : new
Exception(cause);
final String output = JsonUtil.toJsonString(e);
- ByteBuf content = Unpooled.wrappedBuffer(output.getBytes());
+ ByteBuf content = Unpooled.wrappedBuffer(output.getBytes(Charsets.UTF_8));
final DefaultFullHttpResponse resp = new DefaultFullHttpResponse(
HTTP_1_1, INTERNAL_SERVER_ERROR, content);
- resp.headers().set("Content-Type", "application/json");
+ resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
if (e instanceof IllegalArgumentException) {
resp.setStatus(BAD_REQUEST);
} else if (e instanceof FileNotFoundException) {
resp.setStatus(NOT_FOUND);
}
- resp.headers().set("Content-Length", resp.content().readableBytes());
- resp.headers().set("Connection", "close");
+ resp.headers().set(CONTENT_LENGTH, resp.content().readableBytes());
+ resp.headers().set(CONNECTION, CLOSE);
ctx.write(resp).addListener(ChannelFutureListener.CLOSE);
}
@@ -134,11 +147,11 @@ private static String getOp(QueryStringDecoder decoder) {
private static String getPath(QueryStringDecoder decoder)
throws FileNotFoundException {
String path = decoder.path();
- if (path.startsWith("/webhdfs/v1/")) {
- return path.substring(11);
+ if (path.startsWith(WEBHDFS_PREFIX)) {
+ return path.substring(WEBHDFS_PREFIX_LENGTH);
} else {
throw new FileNotFoundException("Path: " + path + " should " +
- "start with \"/webhdfs/v1/\"");
+ "start with " + WEBHDFS_PREFIX);
}
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
index a26f1bf6cd7a9..2f2fa5fa38694 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
@@ -111,17 +111,15 @@ static FSImageLoader load(String inputFile) throws IOException {
}
FsImageProto.FileSummary summary = FSImageUtil.loadSummary(file);
- FileInputStream fin = null;
- try {
+
+ try (FileInputStream fin = new FileInputStream(file.getFD())) {
// Map to record INodeReference to the referred id
ImmutableList refIdList = null;
String[] stringTable = null;
byte[][] inodes = null;
Map dirmap = null;
- fin = new FileInputStream(file.getFD());
-
ArrayList sections =
Lists.newArrayList(summary.getSectionsList());
Collections.sort(sections,
@@ -169,8 +167,6 @@ public int compare(FsImageProto.FileSummary.Section s1,
}
}
return new FSImageLoader(stringTable, inodes, dirmap);
- } finally {
- IOUtils.cleanup(null, fin);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
index 61c36506097d7..056ad96a5ba1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
@@ -21,7 +21,7 @@
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
-import java.io.PrintWriter;
+import java.io.PrintStream;
import java.io.RandomAccessFile;
import org.apache.hadoop.conf.Configuration;
@@ -30,7 +30,6 @@
import org.apache.hadoop.hdfs.server.namenode.FSImageUtil;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
-import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.LimitInputStream;
import com.google.common.base.Preconditions;
@@ -67,7 +66,7 @@ final class FileDistributionCalculator {
private final Configuration conf;
private final long maxSize;
private final int steps;
- private final PrintWriter out;
+ private final PrintStream out;
private final int[] distribution;
private int totalFiles;
@@ -77,7 +76,7 @@ final class FileDistributionCalculator {
private long maxFileSize;
FileDistributionCalculator(Configuration conf, long maxSize, int steps,
- PrintWriter out) {
+ PrintStream out) {
this.conf = conf;
this.maxSize = maxSize == 0 ? MAX_SIZE_DEFAULT : maxSize;
this.steps = steps == 0 ? INTERVAL_DEFAULT : steps;
@@ -96,9 +95,7 @@ void visit(RandomAccessFile file) throws IOException {
}
FileSummary summary = FSImageUtil.loadSummary(file);
- FileInputStream in = null;
- try {
- in = new FileInputStream(file.getFD());
+ try (FileInputStream in = new FileInputStream(file.getFD())) {
for (FileSummary.Section s : summary.getSectionsList()) {
if (SectionName.fromString(s.getName()) != SectionName.INODE) {
continue;
@@ -111,8 +108,6 @@ void visit(RandomAccessFile file) throws IOException {
run(is);
output();
}
- } finally {
- IOUtils.cleanup(null, in);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java
index f293db44d3d67..146d00a85ee20 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java
@@ -159,10 +159,10 @@ void visit(ImageElement element, String value) throws IOException {
current.path = (value.equals("") ? "/" : value);
break;
case REPLICATION:
- current.replication = Integer.valueOf(value);
+ current.replication = Integer.parseInt(value);
break;
case NUM_BYTES:
- current.fileSize += Long.valueOf(value);
+ current.fileSize += Long.parseLong(value);
break;
default:
break;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsImageVisitor.java
index 6e303a9f16159..7d229dbf98d73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsImageVisitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsImageVisitor.java
@@ -135,7 +135,7 @@ void visit(ImageElement element, String value) throws IOException {
perms = value;
break;
case REPLICATION:
- replication = Integer.valueOf(value);
+ replication = Integer.parseInt(value);
break;
case USER_NAME:
username = value;
@@ -144,7 +144,7 @@ void visit(ImageElement element, String value) throws IOException {
group = value;
break;
case NUM_BYTES:
- filesize += Long.valueOf(value);
+ filesize += Long.parseLong(value);
break;
case MODIFICATION_TIME:
modTime = value;
@@ -173,6 +173,6 @@ void visitEnclosingElement(ImageElement element,
if(element == ImageElement.INODE)
newLine();
else if (element == ImageElement.BLOCKS)
- numBlocks = Integer.valueOf(value);
+ numBlocks = Integer.parseInt(value);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
index f02acaed64956..4fce6a3c0711a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
@@ -18,9 +18,8 @@
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.EOFException;
-import java.io.File;
import java.io.IOException;
-import java.io.PrintWriter;
+import java.io.PrintStream;
import java.io.RandomAccessFile;
import org.apache.commons.cli.CommandLine;
@@ -33,7 +32,6 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
/**
@@ -144,36 +142,33 @@ public static int run(String[] args) throws Exception {
String processor = cmd.getOptionValue("p", "Web");
String outputFile = cmd.getOptionValue("o", "-");
- PrintWriter out = outputFile.equals("-") ?
- new PrintWriter(System.out) : new PrintWriter(new File(outputFile));
-
Configuration conf = new Configuration();
- try {
- if (processor.equals("FileDistribution")) {
- long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
- int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
- new FileDistributionCalculator(conf, maxSize, step, out)
- .visit(new RandomAccessFile(inputFile, "r"));
- } else if (processor.equals("XML")) {
- new PBImageXmlWriter(conf, out).visit(new RandomAccessFile(inputFile,
- "r"));
- } else if (processor.equals("Web")) {
- String addr = cmd.getOptionValue("addr", "localhost:5978");
- WebImageViewer viewer = new WebImageViewer(NetUtils.createSocketAddr
- (addr));
- try {
- viewer.start(inputFile);
- } finally {
- viewer.close();
- }
+ try (PrintStream out = outputFile.equals("-") ?
+ System.out : new PrintStream(outputFile, "UTF-8")) {
+ switch (processor) {
+ case "FileDistribution":
+ long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
+ int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
+ new FileDistributionCalculator(conf, maxSize, step, out).visit(
+ new RandomAccessFile(inputFile, "r"));
+ break;
+ case "XML":
+ new PBImageXmlWriter(conf, out).visit(
+ new RandomAccessFile(inputFile, "r"));
+ break;
+ case "Web":
+ String addr = cmd.getOptionValue("addr", "localhost:5978");
+ try (WebImageViewer viewer = new WebImageViewer(
+ NetUtils.createSocketAddr(addr))) {
+ viewer.start(inputFile);
+ }
+ break;
}
return 0;
} catch (EOFException e) {
System.err.println("Input file ended unexpectedly. Exiting");
} catch (IOException e) {
System.err.println("Encountered exception. Exiting: " + e.getMessage());
- } finally {
- IOUtils.cleanup(null, out);
}
return -1;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
index 3e3f0217ab34a..f3fe886089705 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
@@ -21,7 +21,7 @@
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
-import java.io.PrintWriter;
+import java.io.PrintStream;
import java.io.RandomAccessFile;
import java.util.ArrayList;
import java.util.Collections;
@@ -50,7 +50,6 @@
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection;
import org.apache.hadoop.hdfs.util.XMLUtils;
-import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.LimitInputStream;
import com.google.common.collect.Lists;
@@ -62,10 +61,10 @@
@InterfaceAudience.Private
public final class PBImageXmlWriter {
private final Configuration conf;
- private final PrintWriter out;
+ private final PrintStream out;
private String[] stringTable;
- public PBImageXmlWriter(Configuration conf, PrintWriter out) {
+ public PBImageXmlWriter(Configuration conf, PrintStream out) {
this.conf = conf;
this.out = out;
}
@@ -76,9 +75,7 @@ public void visit(RandomAccessFile file) throws IOException {
}
FileSummary summary = FSImageUtil.loadSummary(file);
- FileInputStream fin = null;
- try {
- fin = new FileInputStream(file.getFD());
+ try (FileInputStream fin = new FileInputStream(file.getFD())) {
out.print("\n");
ArrayList sections = Lists.newArrayList(summary
@@ -140,8 +137,6 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
}
}
out.print("\n");
- } finally {
- IOUtils.cleanup(null, fin);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index 603bf6e0cb224..cc00055c34a37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -1265,11 +1265,11 @@ public void testOfflineImageViewerOnEncryptionZones() throws Exception {
}
// Run the XML OIV processor
- StringWriter output = new StringWriter();
- PrintWriter pw = new PrintWriter(output);
+ ByteArrayOutputStream output = new ByteArrayOutputStream();
+ PrintStream pw = new PrintStream(output);
PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), pw);
v.visit(new RandomAccessFile(originalFsimage, "r"));
- final String xml = output.getBuffer().toString();
+ final String xml = output.toString();
SAXParser parser = SAXParserFactory.newInstance().newSAXParser();
parser.parse(new InputSource(new StringReader(xml)), new DefaultHandler());
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
index 12fba733818de..b20e2ad4de515 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
@@ -25,15 +25,15 @@
import java.io.File;
import java.io.IOException;
-import java.io.PrintWriter;
+import java.io.PrintStream;
import java.io.RandomAccessFile;
-import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Random;
+import org.apache.commons.io.output.NullOutputStream;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -57,6 +57,7 @@
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree.Node;
import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
@@ -256,8 +257,7 @@ public void testOfflineImageViewer() throws Exception {
FSImageTestUtil.getFSImage(
cluster.getNameNode()).getStorage().getStorageDir(0));
assertNotNull("Didn't generate or can't find fsimage", originalFsimage);
- StringWriter output = new StringWriter();
- PrintWriter o = new PrintWriter(output);
+ PrintStream o = new PrintStream(NullOutputStream.NULL_OUTPUT_STREAM);
PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), o);
v.visit(new RandomAccessFile(originalFsimage, "r"));
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index 36b5201bdc8f1..4bb2b79593e21 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -20,18 +20,18 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
+import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
-import java.io.FileWriter;
import java.io.IOException;
+import java.io.PrintStream;
import java.io.PrintWriter;
import java.io.RandomAccessFile;
import java.io.StringReader;
import java.io.StringWriter;
import java.net.HttpURLConnection;
import java.net.URI;
-import java.net.URISyntaxException;
import java.net.URL;
import java.util.Collections;
import java.util.Comparator;
@@ -43,6 +43,7 @@
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
+import org.apache.commons.io.output.NullOutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -186,10 +187,10 @@ private static FileStatus pathToFileEntry(FileSystem hdfs, String file)
@Test(expected = IOException.class)
public void testTruncatedFSImage() throws IOException {
File truncatedFile = folder.newFile();
- StringWriter output = new StringWriter();
+ PrintStream output = new PrintStream(NullOutputStream.NULL_OUTPUT_STREAM);
copyPartOfFile(originalFsimage, truncatedFile);
- new FileDistributionCalculator(new Configuration(), 0, 0, new PrintWriter(
- output)).visit(new RandomAccessFile(truncatedFile, "r"));
+ new FileDistributionCalculator(new Configuration(), 0, 0, output)
+ .visit(new RandomAccessFile(truncatedFile, "r"));
}
private void copyPartOfFile(File src, File dest) throws IOException {
@@ -208,20 +209,21 @@ private void copyPartOfFile(File src, File dest) throws IOException {
@Test
public void testFileDistributionCalculator() throws IOException {
- StringWriter output = new StringWriter();
- PrintWriter o = new PrintWriter(output);
+ ByteArrayOutputStream output = new ByteArrayOutputStream();
+ PrintStream o = new PrintStream(output);
new FileDistributionCalculator(new Configuration(), 0, 0, o)
.visit(new RandomAccessFile(originalFsimage, "r"));
o.close();
+ String outputString = output.toString();
Pattern p = Pattern.compile("totalFiles = (\\d+)\n");
- Matcher matcher = p.matcher(output.getBuffer());
+ Matcher matcher = p.matcher(outputString);
assertTrue(matcher.find() && matcher.groupCount() == 1);
int totalFiles = Integer.parseInt(matcher.group(1));
assertEquals(NUM_DIRS * FILES_PER_DIR, totalFiles);
p = Pattern.compile("totalDirectories = (\\d+)\n");
- matcher = p.matcher(output.getBuffer());
+ matcher = p.matcher(outputString);
assertTrue(matcher.find() && matcher.groupCount() == 1);
int totalDirs = Integer.parseInt(matcher.group(1));
// totalDirs includes root directory, empty directory, and xattr directory
@@ -236,7 +238,7 @@ public int compare(FileStatus first, FileStatus second) {
}
});
p = Pattern.compile("maxFileSize = (\\d+)\n");
- matcher = p.matcher(output.getBuffer());
+ matcher = p.matcher(output.toString("UTF-8"));
assertTrue(matcher.find() && matcher.groupCount() == 1);
assertEquals(maxFile.getLen(), Long.parseLong(matcher.group(1)));
}
@@ -252,13 +254,13 @@ public void testFileDistributionCalculatorWithOptions() throws Exception {
@Test
public void testPBImageXmlWriter() throws IOException, SAXException,
ParserConfigurationException {
- StringWriter output = new StringWriter();
- PrintWriter o = new PrintWriter(output);
+ ByteArrayOutputStream output = new ByteArrayOutputStream();
+ PrintStream o = new PrintStream(output);
PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), o);
v.visit(new RandomAccessFile(originalFsimage, "r"));
SAXParserFactory spf = SAXParserFactory.newInstance();
SAXParser parser = spf.newSAXParser();
- final String xml = output.getBuffer().toString();
+ final String xml = output.toString();
parser.parse(new InputSource(new StringReader(xml)), new DefaultHandler());
}
@@ -298,7 +300,7 @@ public void testWebImageViewer() throws Exception {
verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND, url);
// LISTSTATUS operation to a invalid prefix
- url = new URL("http://localhost:" + port + "/webhdfs/v1?op=LISTSTATUS");
+ url = new URL("http://localhost:" + port + "/foo");
verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND, url);
// GETFILESTATUS operation
From 0649a5cb5acbd0fd39fcf032993ff1a66ef923f4 Mon Sep 17 00:00:00 2001
From: Gera Shegalov
Date: Thu, 11 Dec 2014 12:25:25 -0800
Subject: [PATCH 045/432] HADOOP-11211.
mapreduce.job.classloader.system.classes semantics should be
order-independent. (Yitong Zhou via gera)
---
.../hadoop-common/CHANGES.txt | 3 +++
.../hadoop/util/ApplicationClassLoader.java | 25 +++++++++++++++----
.../util/TestApplicationClassLoader.java | 8 ++++--
.../src/main/resources/mapred-default.xml | 21 +++++++++++++---
4 files changed, 46 insertions(+), 11 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 47d36e4d7d2d6..d923b87238b33 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -571,6 +571,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11386. Replace \n by %n in format hadoop-common format strings.
(Li Lu via wheat9)
+
+ HADOOP-11211. mapreduce.job.classloader.system.classes semantics should be
+ be order-independent. (Yitong Zhou via gera)
Release 2.6.0 - 2014-11-18
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
index d2ab015567d86..9f16b61044f2b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
@@ -216,28 +216,43 @@ protected synchronized Class> loadClass(String name, boolean resolve)
return c;
}
+ /**
+ * Checks if a class should be included as a system class.
+ *
+ * A class is a system class if and only if it matches one of the positive
+ * patterns and none of the negative ones.
+ *
+ * @param name the class name to check
+ * @param systemClasses a list of system class configurations.
+ * @return true if the class is a system class
+ */
public static boolean isSystemClass(String name, List systemClasses) {
+ boolean result = false;
if (systemClasses != null) {
String canonicalName = name.replace('/', '.');
while (canonicalName.startsWith(".")) {
canonicalName=canonicalName.substring(1);
}
for (String c : systemClasses) {
- boolean result = true;
+ boolean shouldInclude = true;
if (c.startsWith("-")) {
c = c.substring(1);
- result = false;
+ shouldInclude = false;
}
if (canonicalName.startsWith(c)) {
if ( c.endsWith(".") // package
|| canonicalName.length() == c.length() // class
|| canonicalName.length() > c.length() // nested
&& canonicalName.charAt(c.length()) == '$' ) {
- return result;
+ if (shouldInclude) {
+ result = true;
+ } else {
+ return false;
+ }
}
}
}
}
- return false;
+ return result;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java
index cc16493758e23..be8e61ea23ae6 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java
@@ -87,7 +87,7 @@ public void testConstructUrlsFromClasspath() throws Exception {
assertEquals(jarFile.toURI().toURL(), urls[2]);
// nofile should be ignored
}
-
+
@Test
public void testIsSystemClass() {
testIsSystemClassInternal("");
@@ -112,8 +112,12 @@ private void testIsSystemClassInternal(String nestedClass) {
classes("-org.example.Foo,org.example.")));
assertTrue(isSystemClass("org.example.Bar" + nestedClass,
classes("-org.example.Foo.,org.example.")));
+ assertFalse(isSystemClass("org.example.Foo" + nestedClass,
+ classes("org.example.,-org.example.Foo")));
+ assertFalse(isSystemClass("org.example.Foo" + nestedClass,
+ classes("org.example.Foo,-org.example.Foo")));
}
-
+
private List classes(String classes) {
return Lists.newArrayList(Splitter.on(',').split(classes));
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 00a89c95b12b9..6e0deaa83733f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1363,10 +1363,23 @@
Used to override the default definition of the system classes for
the job classloader. The system classes are a comma-separated list of
- classes that should be loaded from the system classpath, not the
- user-supplied JARs, when mapreduce.job.classloader is enabled. Names ending
- in '.' (period) are treated as package names, and names starting with a '-'
- are treated as negative matches.
+ patterns that indicate whether to load a class from the system classpath,
+ instead from the user-supplied JARs, when mapreduce.job.classloader is
+ enabled.
+
+ A positive pattern is defined as:
+ 1. A single class name 'C' that matches 'C' and transitively all nested
+ classes 'C$*' defined in C;
+ 2. A package name ending with a '.' (e.g., "com.example.") that matches
+ all classes from that package.
+ A negative pattern is defined by a '-' in front of a positive pattern
+ (e.g., "-com.example.").
+
+ A class is considered a system class if and only if it matches one of the
+ positive patterns and none of the negative ones. More formally:
+ A class is a member of the inclusion set I if it matches one of the positive
+ patterns. A class is a member of the exclusion set E if it matches one of
+ the negative patterns. The set of system classes S = I \ E.
From 7974ee15d501b8e25d00a0f55585ff3c91f70845 Mon Sep 17 00:00:00 2001
From: Brandon Li
Date: Thu, 11 Dec 2014 15:40:45 -0800
Subject: [PATCH 046/432] HDFS-7449. Add metrics to NFS gateway. Contributed by
Brandon Li
---
.../hadoop/hdfs/nfs/conf/NfsConfigKeys.java | 3 +
.../org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java | 3 +-
.../hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java | 220 ++++++++++++++++++
.../hadoop/hdfs/nfs/nfs3/Nfs3Utils.java | 4 +
.../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java | 17 +-
.../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 49 +++-
.../apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java | 4 +-
.../hadoop/hdfs/nfs/nfs3/WriteManager.java | 6 +-
.../hdfs/nfs/nfs3/TestNfs3HttpServer.java | 4 +
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +
10 files changed, 297 insertions(+), 15 deletions(-)
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
index 7566791b06358..9e4aaf538f0ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
@@ -70,4 +70,7 @@ public class NfsConfigKeys {
public static final int NFS_HTTPS_PORT_DEFAULT = 50579;
public static final String NFS_HTTPS_ADDRESS_KEY = "nfs.https.address";
public static final String NFS_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + NFS_HTTPS_PORT_DEFAULT;
+
+ public static final String NFS_METRICS_PERCENTILES_INTERVALS_KEY = "nfs.metrics.percentiles.intervals";
+ public static final String NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT = "";
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
index 3daf7bb68db9b..ac9abf8b02b00 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
@@ -42,7 +42,8 @@ public Nfs3(NfsConfiguration conf) throws IOException {
public Nfs3(NfsConfiguration conf, DatagramSocket registrationSocket,
boolean allowInsecurePorts) throws IOException {
- super(new RpcProgramNfs3(conf, registrationSocket, allowInsecurePorts), conf);
+ super(RpcProgramNfs3.createRpcProgramNfs3(conf, registrationSocket,
+ allowInsecurePorts), conf);
mountd = new Mountd(conf, registrationSocket, allowInsecurePorts);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
new file mode 100644
index 0000000000000..d36ea732f0f2d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
@@ -0,0 +1,220 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.nfs.nfs3;
+
+import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableQuantiles;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * This class is for maintaining the various NFS gateway activity statistics and
+ * publishing them through the metrics interfaces.
+ */
+@InterfaceAudience.Private
+@Metrics(about = "Nfs3 metrics", context = "dfs")
+public class Nfs3Metrics {
+ // All mutable rates are in nanoseconds
+ // No metric for nullProcedure;
+ @Metric MutableRate getattr;
+ @Metric MutableRate setattr;
+ @Metric MutableRate lookup;
+ @Metric MutableRate access;
+ @Metric MutableRate readlink;
+ @Metric MutableRate read;
+ final MutableQuantiles[] readNanosQuantiles;
+ @Metric MutableRate write;
+ final MutableQuantiles[] writeNanosQuantiles;
+ @Metric MutableRate create;
+ @Metric MutableRate mkdir;
+ @Metric MutableRate symlink;
+ @Metric MutableRate mknod;
+ @Metric MutableRate remove;
+ @Metric MutableRate rmdir;
+ @Metric MutableRate rename;
+ @Metric MutableRate link;
+ @Metric MutableRate readdir;
+ @Metric MutableRate readdirplus;
+ @Metric MutableRate fsstat;
+ @Metric MutableRate fsinfo;
+ @Metric MutableRate pathconf;
+ @Metric MutableRate commit;
+ final MutableQuantiles[] commitNanosQuantiles;
+
+ @Metric MutableCounterLong bytesWritten;
+ @Metric MutableCounterLong bytesRead;
+
+ final MetricsRegistry registry = new MetricsRegistry("nfs3");
+ final String name;
+ JvmMetrics jvmMetrics = null;
+
+ public Nfs3Metrics(String name, String sessionId, int[] intervals,
+ final JvmMetrics jvmMetrics) {
+ this.name = name;
+ this.jvmMetrics = jvmMetrics;
+ registry.tag(SessionId, sessionId);
+
+ final int len = intervals.length;
+ readNanosQuantiles = new MutableQuantiles[len];
+ writeNanosQuantiles = new MutableQuantiles[len];
+ commitNanosQuantiles = new MutableQuantiles[len];
+
+ for (int i = 0; i < len; i++) {
+ int interval = intervals[i];
+ readNanosQuantiles[i] = registry.newQuantiles("readProcessNanos"
+ + interval + "s", "Read process in ns", "ops", "latency", interval);
+ writeNanosQuantiles[i] = registry.newQuantiles("writeProcessNanos"
+ + interval + "s", " process in ns", "ops", "latency", interval);
+ commitNanosQuantiles[i] = registry.newQuantiles("commitProcessNanos"
+ + interval + "s", "Read process in ns", "ops", "latency", interval);
+ }
+ }
+
+ public static Nfs3Metrics create(Configuration conf, String gatewayName) {
+ String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
+ MetricsSystem ms = DefaultMetricsSystem.instance();
+ JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms);
+
+ // Percentile measurement is [,,,] by default
+ int[] intervals = conf.getInts(conf.get(
+ NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY,
+ NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT));
+ return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm));
+ }
+
+ public String name() {
+ return name;
+ }
+
+ public JvmMetrics getJvmMetrics() {
+ return jvmMetrics;
+ }
+
+ public void incrBytesWritten(long bytes) {
+ bytesWritten.incr(bytes);
+ }
+
+ public void incrBytesRead(long bytes) {
+ bytesRead.incr(bytes);
+ }
+
+ public void addGetattr(long latencyNanos) {
+ getattr.add(latencyNanos);
+ }
+
+ public void addSetattr(long latencyNanos) {
+ setattr.add(latencyNanos);
+ }
+
+ public void addLookup(long latencyNanos) {
+ lookup.add(latencyNanos);
+ }
+
+ public void addAccess(long latencyNanos) {
+ access.add(latencyNanos);
+ }
+
+ public void addReadlink(long latencyNanos) {
+ readlink.add(latencyNanos);
+ }
+
+ public void addRead(long latencyNanos) {
+ read.add(latencyNanos);
+ for (MutableQuantiles q : readNanosQuantiles) {
+ q.add(latencyNanos);
+ }
+ }
+
+ public void addWrite(long latencyNanos) {
+ write.add(latencyNanos);
+ for (MutableQuantiles q : writeNanosQuantiles) {
+ q.add(latencyNanos);
+ }
+ }
+
+ public void addCreate(long latencyNanos) {
+ create.add(latencyNanos);
+ }
+
+ public void addMkdir(long latencyNanos) {
+ mkdir.add(latencyNanos);
+ }
+
+ public void addSymlink(long latencyNanos) {
+ symlink.add(latencyNanos);
+ }
+
+ public void addMknod(long latencyNanos) {
+ mknod.add(latencyNanos);
+ }
+
+ public void addRemove(long latencyNanos) {
+ remove.add(latencyNanos);
+ }
+
+ public void addRmdir(long latencyNanos) {
+ rmdir.add(latencyNanos);
+ }
+
+ public void addRename(long latencyNanos) {
+ rename.add(latencyNanos);
+ }
+
+ public void addLink(long latencyNanos) {
+ link.add(latencyNanos);
+ }
+
+ public void addReaddir(long latencyNanos) {
+ readdir.add(latencyNanos);
+ }
+
+ public void addReaddirplus(long latencyNanos) {
+ readdirplus.add(latencyNanos);
+ }
+
+ public void addFsstat(long latencyNanos) {
+ fsstat.add(latencyNanos);
+ }
+
+ public void addFsinfo(long latencyNanos) {
+ fsinfo.add(latencyNanos);
+ }
+
+ public void addPathconf(long latencyNanos) {
+ pathconf.add(latencyNanos);
+ }
+
+ public void addCommit(long latencyNanos) {
+ commit.add(latencyNanos);
+ for (MutableQuantiles q : commitNanosQuantiles) {
+ q.add(latencyNanos);
+ }
+ }
+
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
index 50e83ed4faeb7..cc17394197a5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
@@ -213,4 +213,8 @@ public static byte[] longToByte(long v) {
data[7] = (byte) (v >>> 0);
return data;
}
+
+ public static long getElapsedTime(long startTimeNano) {
+ return System.nanoTime() - startTimeNano;
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
index b31baf58f5d03..a06d1c5c02b35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
@@ -129,9 +129,8 @@ static class CommitCtx {
private final Channel channel;
private final int xid;
private final Nfs3FileAttributes preOpAttr;
-
- // Remember time for debug purpose
- private final long startTime;
+
+ public final long startTime;
long getOffset() {
return offset;
@@ -159,7 +158,7 @@ long getStartTime() {
this.channel = channel;
this.xid = xid;
this.preOpAttr = preOpAttr;
- this.startTime = Time.monotonicNow();
+ this.startTime = System.nanoTime();
}
@Override
@@ -687,6 +686,8 @@ private void receivedNewWriteInternal(DFSClient dfsClient,
WccData fileWcc = new WccData(preOpAttr, latestAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
+ RpcProgramNfs3.metrics.addWrite(Nfs3Utils
+ .getElapsedTime(writeCtx.startTime));
Nfs3Utils
.writeChannel(channel, response.serialize(new XDR(),
xid, new VerifierNone()), xid);
@@ -1131,14 +1132,16 @@ private void processCommits(long offset) {
COMMIT3Response response = new COMMIT3Response(status, wccData,
Nfs3Constant.WRITE_COMMIT_VERF);
+ RpcProgramNfs3.metrics.addCommit(Nfs3Utils
+ .getElapsedTime(commit.startTime));
Nfs3Utils.writeChannelCommit(commit.getChannel(), response
.serialize(new XDR(), commit.getXid(),
new VerifierNone()), commit.getXid());
if (LOG.isDebugEnabled()) {
LOG.debug("FileId: " + latestAttr.getFileId() + " Service time:"
- + (Time.monotonicNow() - commit.getStartTime())
- + "ms. Sent response for commit:" + commit);
+ + Nfs3Utils.getElapsedTime(commit.startTime)
+ + "ns. Sent response for commit:" + commit);
}
entry = pendingCommits.firstEntry();
}
@@ -1162,6 +1165,7 @@ private void doSingleWrite(final WriteCtx writeCtx) {
// The write is not protected by lock. asyncState is used to make sure
// there is one thread doing write back at any time
writeCtx.writeData(fos);
+ RpcProgramNfs3.metrics.incrBytesWritten(writeCtx.getCount());
long flushedOffset = getFlushedOffset();
if (flushedOffset != (offset + count)) {
@@ -1213,6 +1217,7 @@ private void doSingleWrite(final WriteCtx writeCtx) {
}
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
+ RpcProgramNfs3.metrics.addWrite(Nfs3Utils.getElapsedTime(writeCtx.startTime));
Nfs3Utils.writeChannel(channel, response.serialize(
new XDR(), xid, new VerifierNone()), xid);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index aaac797b5311f..148d4f7630b91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -48,6 +48,8 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.net.DNS;
import org.apache.hadoop.nfs.AccessPrivilege;
import org.apache.hadoop.nfs.NfsExports;
import org.apache.hadoop.nfs.NfsFileType;
@@ -164,6 +166,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
private final RpcCallCache rpcCallCache;
private JvmPauseMonitor pauseMonitor;
private Nfs3HttpServer infoServer = null;
+ static Nfs3Metrics metrics;
public RpcProgramNfs3(NfsConfiguration config, DatagramSocket registrationSocket,
boolean allowInsecurePorts) throws IOException {
@@ -209,6 +212,17 @@ public RpcProgramNfs3(NfsConfiguration config, DatagramSocket registrationSocket
infoServer = new Nfs3HttpServer(config);
}
+ public static RpcProgramNfs3 createRpcProgramNfs3(NfsConfiguration config,
+ DatagramSocket registrationSocket, boolean allowInsecurePorts)
+ throws IOException {
+ DefaultMetricsSystem.initialize("Nfs3");
+ String displayName = DNS.getDefaultHost("default", "default")
+ + config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
+ NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT);
+ metrics = Nfs3Metrics.create(config, displayName);
+ return new RpcProgramNfs3(config, registrationSocket, allowInsecurePorts);
+ }
+
private void clearDirectory(String writeDumpDir) throws IOException {
File dumpDir = new File(writeDumpDir);
if (dumpDir.exists()) {
@@ -225,10 +239,11 @@ private void clearDirectory(String writeDumpDir) throws IOException {
}
@Override
- public void startDaemons() {
+ public void startDaemons() {
if (pauseMonitor == null) {
pauseMonitor = new JvmPauseMonitor(config);
pauseMonitor.start();
+ metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
}
writeManager.startAsyncDataSerivce();
try {
@@ -770,6 +785,7 @@ READ3Response read(XDR xdr, SecurityHandler securityHandler,
try {
readCount = fis.read(offset, readbuffer, 0, count);
+ metrics.incrBytesRead(readCount);
} catch (IOException e) {
// TODO: A cleaner way is to throw a new type of exception
// which requires incompatible changes.
@@ -2049,8 +2065,8 @@ COMMIT3Response commit(XDR xdr, Channel channel, int xid,
: (request.getOffset() + request.getCount());
// Insert commit as an async request
- writeManager.handleCommit(dfsClient, handle, commitOffset,
- channel, xid, preOpAttr);
+ writeManager.handleCommit(dfsClient, handle, commitOffset, channel, xid,
+ preOpAttr);
return null;
} catch (IOException e) {
LOG.warn("Exception ", e);
@@ -2132,20 +2148,29 @@ public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
}
}
}
-
+
+ // Since write and commit could be async, they use their own startTime and
+ // only record success requests.
+ final long startTime = System.nanoTime();
+
NFS3Response response = null;
if (nfsproc3 == NFSPROC3.NULL) {
response = nullProcedure();
} else if (nfsproc3 == NFSPROC3.GETATTR) {
response = getattr(xdr, info);
+ metrics.addGetattr(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.SETATTR) {
response = setattr(xdr, info);
+ metrics.addSetattr(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.LOOKUP) {
response = lookup(xdr, info);
+ metrics.addLookup(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.ACCESS) {
response = access(xdr, info);
+ metrics.addAccess(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.READLINK) {
response = readlink(xdr, info);
+ metrics.addReadlink(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.READ) {
if (LOG.isDebugEnabled()) {
LOG.debug(Nfs3Utils.READ_RPC_START + xid);
@@ -2154,6 +2179,7 @@ public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
if (LOG.isDebugEnabled() && (nfsproc3 == NFSPROC3.READ)) {
LOG.debug(Nfs3Utils.READ_RPC_END + xid);
}
+ metrics.addRead(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.WRITE) {
if (LOG.isDebugEnabled()) {
LOG.debug(Nfs3Utils.WRITE_RPC_START + xid);
@@ -2162,30 +2188,43 @@ public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
// Write end debug trace is in Nfs3Utils.writeChannel
} else if (nfsproc3 == NFSPROC3.CREATE) {
response = create(xdr, info);
+ metrics.addCreate(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.MKDIR) {
response = mkdir(xdr, info);
+ metrics.addMkdir(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.SYMLINK) {
response = symlink(xdr, info);
+ metrics.addSymlink(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.MKNOD) {
response = mknod(xdr, info);
+ metrics.addMknod(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.REMOVE) {
response = remove(xdr, info);
+ metrics.addRemove(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.RMDIR) {
response = rmdir(xdr, info);
+ metrics.addRmdir(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.RENAME) {
response = rename(xdr, info);
+ metrics.addRename(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.LINK) {
response = link(xdr, info);
+ metrics.addLink(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.READDIR) {
response = readdir(xdr, info);
+ metrics.addReaddir(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.READDIRPLUS) {
response = readdirplus(xdr, info);
+ metrics.addReaddirplus(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.FSSTAT) {
response = fsstat(xdr, info);
+ metrics.addFsstat(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.FSINFO) {
response = fsinfo(xdr, info);
+ metrics.addFsinfo(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.PATHCONF) {
- response = pathconf(xdr,info);
+ response = pathconf(xdr, info);
+ metrics.addPathconf(Nfs3Utils.getElapsedTime(startTime));
} else if (nfsproc3 == NFSPROC3.COMMIT) {
response = commit(xdr, info);
} else {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
index 758fd3998b868..82c826fda1eef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
@@ -84,7 +84,8 @@ public int getOriginalCount() {
private long dumpFileOffset;
private volatile DataState dataState;
-
+ public final long startTime;
+
public DataState getDataState() {
return dataState;
}
@@ -235,6 +236,7 @@ void setReplied(boolean replied) {
this.replied = replied;
this.dataState = dataState;
raf = null;
+ this.startTime = System.nanoTime();
}
@Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
index e71eaa51488d4..df02e04fb6bd4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
@@ -224,6 +224,7 @@ int commitBeforeRead(DFSClient dfsClient, FileHandle fileHandle,
status = Nfs3Status.NFS3_OK;
} else {
+ // commit request triggered by read won't create pending comment obj
COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset,
null, 0, null, true);
switch (ret) {
@@ -260,6 +261,7 @@ int commitBeforeRead(DFSClient dfsClient, FileHandle fileHandle,
void handleCommit(DFSClient dfsClient, FileHandle fileHandle,
long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
+ long startTime = System.nanoTime();
int status;
OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
@@ -306,9 +308,9 @@ void handleCommit(DFSClient dfsClient, FileHandle fileHandle,
WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
COMMIT3Response response = new COMMIT3Response(status, fileWcc,
Nfs3Constant.WRITE_COMMIT_VERF);
+ RpcProgramNfs3.metrics.addCommit(Nfs3Utils.getElapsedTime(startTime));
Nfs3Utils.writeChannelCommit(channel,
- response.serialize(new XDR(), xid, new VerifierNone()),
- xid);
+ response.serialize(new XDR(), xid, new VerifierNone()), xid);
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java
index d44e9abe68022..46dbd42f4c97b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java
@@ -48,6 +48,10 @@ public static void setUp() throws Exception {
HttpConfig.Policy.HTTP_AND_HTTPS.name());
conf.set(NfsConfigKeys.NFS_HTTP_ADDRESS_KEY, "localhost:0");
conf.set(NfsConfigKeys.NFS_HTTPS_ADDRESS_KEY, "localhost:0");
+ // Use emphral port in case tests are running in parallel
+ conf.setInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, 0);
+ conf.setInt(NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY, 0);
+
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e2db1f68f44b0..5e75424063388 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -273,6 +273,8 @@ Release 2.7.0 - UNRELEASED
(Maysam Yabandeh via wang)
HDFS-7424. Add web UI for NFS gateway (brandonli)
+
+ HDFS-7449. Add metrics to NFS gateway (brandonli)
IMPROVEMENTS
From dd147e2c569c7068d3130bcafb7d4a2a0056eddb Mon Sep 17 00:00:00 2001
From: Haohui Mai
Date: Thu, 11 Dec 2014 16:41:30 -0800
Subject: [PATCH 047/432] HADOOP-11389. Clean up byte to string encoding issues
in hadoop-common. Contributed by Haohui Mai.
---
.../hadoop-common/CHANGES.txt | 3 +++
.../org/apache/hadoop/conf/Configuration.java | 3 ++-
.../apache/hadoop/crypto/key/KeyProvider.java | 6 ++++--
.../crypto/key/kms/KMSClientProvider.java | 3 ++-
.../org/apache/hadoop/fs/shell/Display.java | 8 +++++---
.../org/apache/hadoop/ha/StreamPumper.java | 4 +++-
.../org/apache/hadoop/http/HtmlQuoting.java | 19 +++++++++++--------
.../org/apache/hadoop/http/HttpServer2.java | 11 +++++++----
.../apache/hadoop/io/DefaultStringifier.java | 3 ++-
.../org/apache/hadoop/io/SequenceFile.java | 4 +++-
.../apache/hadoop/io/compress/BZip2Codec.java | 5 +++--
.../hadoop/io/file/tfile/TFileDumper.java | 3 ++-
.../org/apache/hadoop/ipc/RpcConstants.java | 4 +++-
.../java/org/apache/hadoop/ipc/Server.java | 5 +++--
.../java/org/apache/hadoop/log/LogLevel.java | 3 ++-
.../metrics/ganglia/GangliaContext.java | 3 ++-
.../hadoop/metrics2/impl/MetricsConfig.java | 8 ++++----
.../apache/hadoop/metrics2/sink/FileSink.java | 12 ++++++------
.../hadoop/metrics2/sink/GraphiteSink.java | 4 +++-
.../sink/ganglia/AbstractGangliaSink.java | 3 ++-
.../org/apache/hadoop/net/TableMapping.java | 19 +++++++------------
.../AuthenticationFilterInitializer.java | 11 +++++++----
.../apache/hadoop/security/Credentials.java | 4 +++-
.../hadoop/security/LdapGroupsMapping.java | 10 +++++++---
.../apache/hadoop/security/SaslRpcServer.java | 8 +++++---
.../hadoop/security/ShellBasedIdMapping.java | 8 ++++++--
.../security/alias/JavaKeyStoreProvider.java | 3 ++-
.../hadoop/security/alias/UserProvider.java | 4 +++-
.../hadoop/tracing/SpanReceiverHost.java | 4 +++-
.../org/apache/hadoop/tracing/TraceAdmin.java | 9 ++++++---
.../apache/hadoop/util/FileBasedIPList.java | 8 +++++++-
.../apache/hadoop/util/HostsFileReader.java | 4 +++-
.../apache/hadoop/util/ReflectionUtils.java | 14 ++++++++++----
.../java/org/apache/hadoop/util/Shell.java | 9 +++++----
.../hdfs/TestDataTransferKeepalive.java | 4 +---
35 files changed, 147 insertions(+), 86 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index d923b87238b33..45f226fc4c944 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -574,6 +574,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11211. mapreduce.job.classloader.system.classes semantics should be
be order-independent. (Yitong Zhou via gera)
+
+ HADOOP-11389. Clean up byte to string encoding issues in hadoop-common.
+ (wheat9)
Release 2.6.0 - 2014-11-18
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index d2c805280daa4..c71f35a08cd1c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -67,6 +67,7 @@
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
+import com.google.common.base.Charsets;
import org.apache.commons.collections.map.UnmodifiableMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -2263,7 +2264,7 @@ public Reader getConfResourceAsReader(String name) {
LOG.info("found resource " + name + " at " + url);
}
- return new InputStreamReader(url.openStream());
+ return new InputStreamReader(url.openStream(), Charsets.UTF_8);
} catch (Exception e) {
return null;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
index dd2d5b99fb252..a0675c2f16b9b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
@@ -32,6 +32,7 @@
import com.google.gson.stream.JsonReader;
import com.google.gson.stream.JsonWriter;
+import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -207,7 +208,8 @@ protected int addVersion() {
*/
protected byte[] serialize() throws IOException {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
- JsonWriter writer = new JsonWriter(new OutputStreamWriter(buffer));
+ JsonWriter writer = new JsonWriter(
+ new OutputStreamWriter(buffer, Charsets.UTF_8));
try {
writer.beginObject();
if (cipher != null) {
@@ -251,7 +253,7 @@ protected Metadata(byte[] bytes) throws IOException {
String description = null;
Map attributes = null;
JsonReader reader = new JsonReader(new InputStreamReader
- (new ByteArrayInputStream(bytes)));
+ (new ByteArrayInputStream(bytes), Charsets.UTF_8));
try {
reader.beginObject();
while (reader.hasNext()) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 50dd1ad239ccb..0464f5537672c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.crypto.key.kms;
import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
@@ -209,7 +210,7 @@ private static Metadata parseJSONMetadata(Map valueMap) {
}
private static void writeJson(Map map, OutputStream os) throws IOException {
- Writer writer = new OutputStreamWriter(os);
+ Writer writer = new OutputStreamWriter(os, Charsets.UTF_8);
ObjectMapper jsonMapper = new ObjectMapper();
jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, map);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
index ba65cd2e3b62a..f0d7b8de4453c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
@@ -32,6 +32,7 @@
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.io.JsonEncoder;
+import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -234,10 +235,10 @@ public int read() throws IOException {
if (!r.next(key, val)) {
return -1;
}
- byte[] tmp = key.toString().getBytes();
+ byte[] tmp = key.toString().getBytes(Charsets.UTF_8);
outbuf.write(tmp, 0, tmp.length);
outbuf.write('\t');
- tmp = val.toString().getBytes();
+ tmp = val.toString().getBytes(Charsets.UTF_8);
outbuf.write(tmp, 0, tmp.length);
outbuf.write('\n');
inbuf.reset(outbuf.getData(), outbuf.getLength());
@@ -299,7 +300,8 @@ public int read() throws IOException {
encoder.flush();
if (!fileReader.hasNext()) {
// Write a new line after the last Avro record.
- output.write(System.getProperty("line.separator").getBytes());
+ output.write(System.getProperty("line.separator")
+ .getBytes(Charsets.UTF_8));
output.flush();
}
pos = 0;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java
index 8bc16af2afa99..00c6401d88d9d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java
@@ -22,6 +22,7 @@
import java.io.InputStream;
import java.io.InputStreamReader;
+import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
/**
@@ -76,7 +77,8 @@ void start() {
}
protected void pump() throws IOException {
- InputStreamReader inputStreamReader = new InputStreamReader(stream);
+ InputStreamReader inputStreamReader = new InputStreamReader(
+ stream, Charsets.UTF_8);
BufferedReader br = new BufferedReader(inputStreamReader);
String line = null;
while ((line = br.readLine()) != null) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HtmlQuoting.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HtmlQuoting.java
index 99befeea6eb43..57acebd85f4a7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HtmlQuoting.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HtmlQuoting.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.http;
+import org.apache.commons.io.Charsets;
+
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
@@ -25,11 +27,11 @@
* This class is responsible for quoting HTML characters.
*/
public class HtmlQuoting {
- private static final byte[] ampBytes = "&".getBytes();
- private static final byte[] aposBytes = "'".getBytes();
- private static final byte[] gtBytes = ">".getBytes();
- private static final byte[] ltBytes = "<".getBytes();
- private static final byte[] quotBytes = """.getBytes();
+ private static final byte[] ampBytes = "&".getBytes(Charsets.UTF_8);
+ private static final byte[] aposBytes = "'".getBytes(Charsets.UTF_8);
+ private static final byte[] gtBytes = ">".getBytes(Charsets.UTF_8);
+ private static final byte[] ltBytes = "<".getBytes(Charsets.UTF_8);
+ private static final byte[] quotBytes = """.getBytes(Charsets.UTF_8);
/**
* Does the given string need to be quoted?
@@ -63,7 +65,7 @@ public static boolean needsQuoting(String str) {
if (str == null) {
return false;
}
- byte[] bytes = str.getBytes();
+ byte[] bytes = str.getBytes(Charsets.UTF_8);
return needsQuoting(bytes, 0 , bytes.length);
}
@@ -98,15 +100,16 @@ public static String quoteHtmlChars(String item) {
if (item == null) {
return null;
}
- byte[] bytes = item.getBytes();
+ byte[] bytes = item.getBytes(Charsets.UTF_8);
if (needsQuoting(bytes, 0, bytes.length)) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try {
quoteHtmlChars(buffer, bytes, 0, bytes.length);
+ return buffer.toString("UTF-8");
} catch (IOException ioe) {
// Won't happen, since it is a bytearrayoutputstream
+ return null;
}
- return buffer.toString();
} else {
return item;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 45b641957077c..63a32fbc2baff 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -20,6 +20,8 @@
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InterruptedIOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
import java.io.PrintWriter;
import java.net.BindException;
import java.net.InetSocketAddress;
@@ -1065,13 +1067,14 @@ public static class StackServlet extends HttpServlet {
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
- request, response)) {
+ request, response)) {
return;
}
response.setContentType("text/plain; charset=UTF-8");
- PrintWriter out = response.getWriter();
- ReflectionUtils.printThreadInfo(out, "");
- out.close();
+ try (PrintStream out = new PrintStream(
+ response.getOutputStream(), false, "UTF-8")) {
+ ReflectionUtils.printThreadInfo(out, "");
+ }
ReflectionUtils.logThreadInfo(LOG, "jsp requested", 1);
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java
index d32d58b6008be..3ba577fc4f4de 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java
@@ -23,6 +23,7 @@
import java.util.ArrayList;
import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -90,7 +91,7 @@ public String toString(T obj) throws IOException {
serializer.serialize(obj);
byte[] buf = new byte[outBuf.getLength()];
System.arraycopy(outBuf.getData(), 0, buf, 0, buf.length);
- return new String(Base64.encodeBase64(buf));
+ return new String(Base64.encodeBase64(buf), Charsets.UTF_8);
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
index 4cda107748208..7a59149ff0596 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
@@ -22,6 +22,8 @@
import java.util.*;
import java.rmi.server.UID;
import java.security.MessageDigest;
+
+import org.apache.commons.io.Charsets;
import org.apache.commons.logging.*;
import org.apache.hadoop.util.Options;
import org.apache.hadoop.fs.*;
@@ -849,7 +851,7 @@ public static class Writer implements java.io.Closeable, Syncable {
try {
MessageDigest digester = MessageDigest.getInstance("MD5");
long time = Time.now();
- digester.update((new UID()+"@"+time).getBytes());
+ digester.update((new UID()+"@"+time).getBytes(Charsets.UTF_8));
sync = digester.digest();
} catch (Exception e) {
throw new RuntimeException(e);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
index 37b97f2a641a9..91178ecdc2e62 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
@@ -23,6 +23,7 @@
import java.io.InputStream;
import java.io.OutputStream;
+import org.apache.commons.io.Charsets;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
@@ -281,7 +282,7 @@ private void writeStreamHeader() throws IOException {
// The compressed bzip2 stream should start with the
// identifying characters BZ. Caller of CBZip2OutputStream
// i.e. this class must write these characters.
- out.write(HEADER.getBytes());
+ out.write(HEADER.getBytes(Charsets.UTF_8));
}
}
@@ -415,7 +416,7 @@ private BufferedInputStream readStreamHeader() throws IOException {
byte[] headerBytes = new byte[HEADER_LEN];
int actualRead = bufferedIn.read(headerBytes, 0, HEADER_LEN);
if (actualRead != -1) {
- String header = new String(headerBytes);
+ String header = new String(headerBytes, Charsets.UTF_8);
if (header.compareTo(HEADER) != 0) {
bufferedIn.reset();
} else {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
index ad94c4297bd8b..aabdf57a26661 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
@@ -24,6 +24,7 @@
import java.util.Map;
import java.util.Set;
+import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -233,7 +234,7 @@ static public void dumpInfo(String file, PrintStream out, Configuration conf)
out.printf("%X", b);
}
} else {
- out.print(new String(key, 0, sampleLen));
+ out.print(new String(key, 0, sampleLen, Charsets.UTF_8));
}
if (sampleLen < key.length) {
out.print("...");
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcConstants.java
index c457500e902b5..d5e795b92f17c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcConstants.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcConstants.java
@@ -19,6 +19,7 @@
import java.nio.ByteBuffer;
+import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.Private
@@ -53,7 +54,8 @@ private RpcConstants() {
/**
* The first four bytes of Hadoop RPC connections
*/
- public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes());
+ public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes
+ (Charsets.UTF_8));
public static final int HEADER_LEN_AFTER_HRPC_PART = 3; // 3 bytes that follow
// 1 : Introduce ping and server does not throw away RPCs
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index a4d669ae740ae..e508d4e01f896 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -69,6 +69,7 @@
import javax.security.sasl.SaslException;
import javax.security.sasl.SaslServer;
+import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -182,7 +183,7 @@ boolean isTerse(Class> t) {
* and send back a nicer response.
*/
private static final ByteBuffer HTTP_GET_BYTES = ByteBuffer.wrap(
- "GET ".getBytes());
+ "GET ".getBytes(Charsets.UTF_8));
/**
* An HTTP response to send back if we detect an HTTP request to our IPC
@@ -1709,7 +1710,7 @@ private void setupBadVersionResponse(int clientVersion) throws IOException {
private void setupHttpRequestOnIpcPortResponse() throws IOException {
Call fakeCall = new Call(0, RpcConstants.INVALID_RETRY_COUNT, null, this);
fakeCall.setResponse(ByteBuffer.wrap(
- RECEIVED_HTTP_REQ_RESPONSE.getBytes()));
+ RECEIVED_HTTP_REQ_RESPONSE.getBytes(Charsets.UTF_8)));
responder.doRespond(fakeCall);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
index 77f74cc404911..4749ce19a65f8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
@@ -24,6 +24,7 @@
import javax.servlet.*;
import javax.servlet.http.*;
+import com.google.common.base.Charsets;
import org.apache.commons.logging.*;
import org.apache.commons.logging.impl.*;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -66,7 +67,7 @@ private static void process(String urlstring) {
connection.connect();
BufferedReader in = new BufferedReader(new InputStreamReader(
- connection.getInputStream()));
+ connection.getInputStream(), Charsets.UTF_8));
for(String line; (line = in.readLine()) != null; )
if (line.startsWith(MARKER)) {
System.out.println(TAG.matcher(line).replaceAll(""));
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java
index 841874fc08e34..0e707780c4de2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java
@@ -29,6 +29,7 @@
import java.util.List;
import java.util.Map;
+import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -225,7 +226,7 @@ protected int getDmax(String metricName) {
* a multiple of 4.
*/
protected void xdr_string(String s) {
- byte[] bytes = s.getBytes();
+ byte[] bytes = s.getBytes(Charsets.UTF_8);
int len = bytes.length;
xdr_int(len);
System.arraycopy(bytes, 0, buffer, offset, len);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
index e4b5580536bb5..167205e93e3c2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
@@ -269,14 +269,14 @@ public String toString() {
static String toString(Configuration c) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
- PrintStream ps = new PrintStream(buffer);
- PropertiesConfiguration tmp = new PropertiesConfiguration();
- tmp.copy(c);
try {
+ PrintStream ps = new PrintStream(buffer, false, "UTF-8");
+ PropertiesConfiguration tmp = new PropertiesConfiguration();
+ tmp.copy(c);
tmp.save(ps);
+ return buffer.toString("UTF-8");
} catch (Exception e) {
throw new MetricsConfigException(e);
}
- return buffer.toString();
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/FileSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/FileSink.java
index d1364160e2d67..ab121bcf67f6b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/FileSink.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/FileSink.java
@@ -20,9 +20,9 @@
import java.io.Closeable;
import java.io.File;
-import java.io.FileWriter;
+import java.io.FileOutputStream;
import java.io.IOException;
-import java.io.PrintWriter;
+import java.io.PrintStream;
import org.apache.commons.configuration.SubsetConfiguration;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -40,15 +40,15 @@
@InterfaceStability.Evolving
public class FileSink implements MetricsSink, Closeable {
private static final String FILENAME_KEY = "filename";
- private PrintWriter writer;
+ private PrintStream writer;
@Override
public void init(SubsetConfiguration conf) {
String filename = conf.getString(FILENAME_KEY);
try {
- writer = filename == null
- ? new PrintWriter(System.out)
- : new PrintWriter(new FileWriter(new File(filename), true));
+ writer = filename == null ? System.out
+ : new PrintStream(new FileOutputStream(new File(filename)),
+ true, "UTF-8");
} catch (Exception e) {
throw new MetricsException("Error creating "+ filename, e);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
index 9bc3f15d97e91..e72fe248449b8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
@@ -25,6 +25,7 @@
import java.net.Socket;
import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -64,7 +65,8 @@ public void init(SubsetConfiguration conf) {
try {
// Open an connection to Graphite server.
socket = new Socket(serverHost, serverPort);
- writer = new OutputStreamWriter(socket.getOutputStream());
+ writer = new OutputStreamWriter(
+ socket.getOutputStream(), Charsets.UTF_8);
} catch (Exception e) {
throw new MetricsException("Error creating connection, "
+ serverHost + ":" + serverPort, e);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java
index b3581f9e8d9bd..164ea085d71c8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java
@@ -29,6 +29,7 @@
import java.util.Map;
import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsSink;
@@ -223,7 +224,7 @@ protected String getHostName() {
* @param s the string to be written to buffer at offset location
*/
protected void xdr_string(String s) {
- byte[] bytes = s.getBytes();
+ byte[] bytes = s.getBytes(Charsets.UTF_8);
int len = bytes.length;
xdr_int(len);
System.arraycopy(bytes, 0, buffer, offset, len);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
index 2662108124d60..59c0ca96750a2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
@@ -20,13 +20,16 @@
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY;
import java.io.BufferedReader;
+import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
+import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import org.apache.commons.io.Charsets;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -96,9 +99,10 @@ private Map load() {
return null;
}
- BufferedReader reader = null;
- try {
- reader = new BufferedReader(new FileReader(filename));
+
+ try (BufferedReader reader =
+ new BufferedReader(new InputStreamReader(
+ new FileInputStream(filename), Charsets.UTF_8))) {
String line = reader.readLine();
while (line != null) {
line = line.trim();
@@ -115,15 +119,6 @@ private Map load() {
} catch (Exception e) {
LOG.warn(filename + " cannot be read.", e);
return null;
- } finally {
- if (reader != null) {
- try {
- reader.close();
- } catch (IOException e) {
- LOG.warn(filename + " cannot be read.", e);
- return null;
- }
- }
}
return loadMap;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
index 4fb9e45614133..43d1b66d44f6d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.security;
+import com.google.common.base.Charsets;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.conf.Configuration;
@@ -24,8 +25,10 @@
import org.apache.hadoop.http.FilterInitializer;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
+import java.io.InputStreamReader;
import java.io.Reader;
import java.util.HashMap;
import java.util.Map;
@@ -78,10 +81,10 @@ public void initFilter(FilterContainer container, Configuration conf) {
if (signatureSecretFile == null) {
throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE);
}
-
- try {
- StringBuilder secret = new StringBuilder();
- Reader reader = new FileReader(signatureSecretFile);
+
+ StringBuilder secret = new StringBuilder();
+ try (Reader reader = new InputStreamReader(
+ new FileInputStream(signatureSecretFile), Charsets.UTF_8)) {
int c = reader.read();
while (c > -1) {
secret.append((char)c);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
index b81e810f191c7..e6b8722c3558c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
@@ -32,6 +32,7 @@
import java.util.List;
import java.util.Map;
+import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -217,7 +218,8 @@ public void readTokenStorageStream(DataInputStream in) throws IOException {
readFields(in);
}
- private static final byte[] TOKEN_STORAGE_MAGIC = "HDTS".getBytes();
+ private static final byte[] TOKEN_STORAGE_MAGIC =
+ "HDTS".getBytes(Charsets.UTF_8);
private static final byte TOKEN_STORAGE_VERSION = 0;
public void writeTokenStorageToStream(DataOutputStream os)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
index e72d988bdf0b2..c0c8d2b64f598 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
@@ -17,8 +17,10 @@
*/
package org.apache.hadoop.security;
+import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
+import java.io.InputStreamReader;
import java.io.Reader;
import java.util.ArrayList;
import java.util.Hashtable;
@@ -34,6 +36,7 @@
import javax.naming.directory.SearchControls;
import javax.naming.directory.SearchResult;
+import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -366,9 +369,10 @@ String extractPassword(String pwFile) {
// an anonymous bind
return "";
}
-
- try (Reader reader = new FileReader(pwFile)) {
- StringBuilder password = new StringBuilder();
+
+ StringBuilder password = new StringBuilder();
+ try (Reader reader = new InputStreamReader(
+ new FileInputStream(pwFile), Charsets.UTF_8)) {
int c = reader.read();
while (c > -1) {
password.append((char)c);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
index 83f46efd6e456..f2b21e851bbc2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
@@ -44,6 +44,7 @@
import javax.security.sasl.SaslServerFactory;
import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -184,11 +185,11 @@ public static void init(Configuration conf) {
}
static String encodeIdentifier(byte[] identifier) {
- return new String(Base64.encodeBase64(identifier));
+ return new String(Base64.encodeBase64(identifier), Charsets.UTF_8);
}
static byte[] decodeIdentifier(String identifier) {
- return Base64.decodeBase64(identifier.getBytes());
+ return Base64.decodeBase64(identifier.getBytes(Charsets.UTF_8));
}
public static T getIdentifier(String id,
@@ -206,7 +207,8 @@ public static T getIdentifier(String id,
}
static char[] encodePassword(byte[] password) {
- return new String(Base64.encodeBase64(password)).toCharArray();
+ return new String(Base64.encodeBase64(password),
+ Charsets.UTF_8).toCharArray();
}
/** Splitting fully qualified Kerberos name into parts */
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
index e152d46c49d54..e995cb6cc6f55 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
@@ -22,11 +22,13 @@
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
+import java.nio.charset.Charset;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -217,7 +219,9 @@ public static boolean updateMapInternal(BiMap map,
try {
Process process = Runtime.getRuntime().exec(
new String[] { "bash", "-c", command });
- br = new BufferedReader(new InputStreamReader(process.getInputStream()));
+ br = new BufferedReader(
+ new InputStreamReader(process.getInputStream(),
+ Charset.defaultCharset()));
String line = null;
while ((line = br.readLine()) != null) {
String[] nameId = line.split(regex);
@@ -552,7 +556,7 @@ static StaticMapping parseStaticMap(File staticMapFile)
Map gidMapping = new HashMap();
BufferedReader in = new BufferedReader(new InputStreamReader(
- new FileInputStream(staticMapFile)));
+ new FileInputStream(staticMapFile), Charsets.UTF_8));
try {
String line = null;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
index 5dc2abfd13ff6..05958a058a3c1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.security.alias;
+import org.apache.commons.io.Charsets;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@@ -165,7 +166,7 @@ public CredentialEntry getCredentialEntry(String alias) throws IOException {
}
public static char[] bytesToChars(byte[] bytes) {
- String pass = new String(bytes);
+ String pass = new String(bytes, Charsets.UTF_8);
return pass.toCharArray();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java
index 262cbadd71ae7..127ccf005d8ca 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java
@@ -23,6 +23,7 @@
import java.util.ArrayList;
import java.util.List;
+import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
@@ -56,7 +57,8 @@ public synchronized CredentialEntry getCredentialEntry(String alias) {
if (bytes == null) {
return null;
}
- return new CredentialEntry(alias, new String(bytes).toCharArray());
+ return new CredentialEntry(
+ alias, new String(bytes, Charsets.UTF_8).toCharArray());
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverHost.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverHost.java
index 81993e9af2b81..f461dacab2ac7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverHost.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverHost.java
@@ -31,6 +31,7 @@
import java.util.TreeMap;
import java.util.UUID;
+import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -100,7 +101,8 @@ private static String getUniqueLocalTraceFileName() {
// out of /proc/self/stat. (There isn't any portable way to get the
// process ID from Java.)
reader = new BufferedReader(
- new InputStreamReader(new FileInputStream("/proc/self/stat")));
+ new InputStreamReader(new FileInputStream("/proc/self/stat"),
+ Charsets.UTF_8));
String line = reader.readLine();
if (line == null) {
throw new EOFException();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
index 4ae5aedccfa6c..5fdfbfadd2d92 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
@@ -25,6 +25,7 @@
import java.util.LinkedList;
import java.util.List;
+import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
@@ -91,7 +92,7 @@ private int addSpanReceiver(List args) throws IOException {
return 1;
}
ByteArrayOutputStream configStream = new ByteArrayOutputStream();
- PrintStream configsOut = new PrintStream(configStream);
+ PrintStream configsOut = new PrintStream(configStream, false, "UTF-8");
SpanReceiverInfoBuilder factory = new SpanReceiverInfoBuilder(className);
String prefix = "";
for (int i = 0; i < args.size(); ++i) {
@@ -113,13 +114,15 @@ private int addSpanReceiver(List args) throws IOException {
configsOut.print(prefix + key + " = " + value);
prefix = ", ";
}
+
+ String configStreamStr = configStream.toString("UTF-8");
try {
long id = remote.addSpanReceiver(factory.build());
System.out.println("Added trace span receiver " + id +
- " with configuration " + configStream.toString());
+ " with configuration " + configStreamStr);
} catch (IOException e) {
System.out.println("addSpanReceiver error with configuration " +
- configStream.toString());
+ configStreamStr);
throw e;
}
return 0;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java
index 8bfb5d93aef88..8020b7a10fbab 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java
@@ -19,13 +19,18 @@
import java.io.BufferedReader;
import java.io.File;
+import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
+import org.apache.commons.io.Charsets;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -78,7 +83,8 @@ private static String[] readLines(String fileName) {
if (fileName != null) {
File file = new File (fileName);
if (file.exists()) {
- FileReader fileReader = new FileReader(file);
+ Reader fileReader = new InputStreamReader(
+ new FileInputStream(file), Charsets.UTF_8);
BufferedReader bufferedReader = new BufferedReader(fileReader);
List lines = new ArrayList();
String line = null;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
index b012add42c5fb..ae77e6c33335f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
@@ -22,6 +22,7 @@
import java.util.Set;
import java.util.HashSet;
+import org.apache.commons.io.Charsets;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -72,7 +73,8 @@ public static void readFileToSetWithFileInputStream(String type,
throws IOException {
BufferedReader reader = null;
try {
- reader = new BufferedReader(new InputStreamReader(fileInputStream));
+ reader = new BufferedReader(
+ new InputStreamReader(fileInputStream, Charsets.UTF_8));
String line;
while ((line = reader.readLine()) != null) {
String[] nodes = line.split("[ \t\n\f\r]+");
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
index 3977e60287a4a..d9a73263d8521 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
@@ -20,13 +20,16 @@
import java.io.ByteArrayOutputStream;
import java.io.IOException;
+import java.io.PrintStream;
import java.io.PrintWriter;
+import java.io.UnsupportedEncodingException;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
+import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@@ -154,7 +157,7 @@ private static String getTaskName(long id, String name) {
* @param stream the stream to
* @param title a string title for the stack trace
*/
- public synchronized static void printThreadInfo(PrintWriter stream,
+ public synchronized static void printThreadInfo(PrintStream stream,
String title) {
final int STACK_DEPTH = 20;
boolean contention = threadBean.isThreadContentionMonitoringEnabled();
@@ -215,9 +218,12 @@ public static void logThreadInfo(Log log,
}
}
if (dumpStack) {
- ByteArrayOutputStream buffer = new ByteArrayOutputStream();
- printThreadInfo(new PrintWriter(buffer), title);
- log.info(buffer.toString());
+ try {
+ ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+ printThreadInfo(new PrintStream(buffer, false, "UTF-8"), title);
+ log.info(buffer.toString(Charset.defaultCharset().name()));
+ } catch (UnsupportedEncodingException ignored) {
+ }
}
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index a44e99212674d..f0100d440ab35 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -22,6 +22,7 @@
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.InputStream;
+import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.Map;
import java.util.Timer;
@@ -493,11 +494,11 @@ private void runCommand() throws IOException {
timeOutTimer.schedule(timeoutTimerTask, timeOutInterval);
}
final BufferedReader errReader =
- new BufferedReader(new InputStreamReader(process
- .getErrorStream()));
+ new BufferedReader(new InputStreamReader(
+ process.getErrorStream(), Charset.defaultCharset()));
BufferedReader inReader =
- new BufferedReader(new InputStreamReader(process
- .getInputStream()));
+ new BufferedReader(new InputStreamReader(
+ process.getInputStream(), Charset.defaultCharset()));
final StringBuffer errMsg = new StringBuffer();
// read error and input streams as this would free up the buffers
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
index eae8ea7681bd7..08aa2c9bb88ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
@@ -245,9 +245,7 @@ public void testManyClosedSocketsInCache() throws Exception {
private void assertXceiverCount(int expected) {
int count = getXceiverCountWithoutServer();
if (count != expected) {
- ReflectionUtils.printThreadInfo(
- new PrintWriter(System.err),
- "Thread dumps");
+ ReflectionUtils.printThreadInfo(System.err, "Thread dumps");
fail("Expected " + expected + " xceivers, found " +
count);
}
From 5c4848d49ef0213eb5eeb0b87dee47309c99c5ad Mon Sep 17 00:00:00 2001
From: Andrew Wang
Date: Thu, 11 Dec 2014 18:12:47 -0800
Subject: [PATCH 048/432] HDFS-7497. Inconsistent report of decommissioning
DataNodes between dfsadmin and NameNode webui. Contributed by Yongjun Zhang.
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../hdfs/server/blockmanagement/DatanodeManager.java | 12 ++----------
.../server/namenode/TestDecommissioningStatus.java | 7 ++++++-
3 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5e75424063388..5977ed79d2290 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -576,6 +576,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7515. Fix new findbugs warnings in hadoop-hdfs. (wheat9)
+ HDFS-7497. Inconsistent report of decommissioning DataNodes between
+ dfsadmin and NameNode webui. (Yongjun Zhang via wang)
+
Release 2.6.1 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 356a4a3cf0dac..0ff469a8c8254 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1112,16 +1112,8 @@ public int getNumDeadDataNodes() {
public List getDecommissioningNodes() {
// There is no need to take namesystem reader lock as
// getDatanodeListForReport will synchronize on datanodeMap
- final List decommissioningNodes
- = new ArrayList();
- final List results = getDatanodeListForReport(
- DatanodeReportType.LIVE);
- for(DatanodeDescriptor node : results) {
- if (node.isDecommissionInProgress()) {
- decommissioningNodes.add(node);
- }
- }
- return decommissioningNodes;
+ // A decommissioning DN may be "alive" or "dead".
+ return getDatanodeListForReport(DatanodeReportType.DECOMMISSIONING);
}
/* Getter and Setter for stale DataNodes related attributes */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
index 28f5eb497b203..a9aba864e94e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
@@ -239,10 +239,10 @@ private void checkDFSAdminDecommissionStatus(
System.setOut(oldOut);
}
}
+
/**
* Tests Decommissioning Status in DFS.
*/
-
@Test
public void testDecommissionStatus() throws IOException, InterruptedException {
InetSocketAddress addr = new InetSocketAddress("localhost", cluster
@@ -351,6 +351,11 @@ public void testDecommissionStatusAfterDNRestart()
assertTrue("the node should be DECOMMISSION_IN_PROGRESSS",
dead.get(0).isDecommissionInProgress());
+ // Check DatanodeManager#getDecommissionNodes, make sure it returns
+ // the node as decommissioning, even if it's dead
+ List decomlist = dm.getDecommissioningNodes();
+ assertTrue("The node should be be decommissioning", decomlist.size() == 1);
+
// Delete the under-replicated file, which should let the
// DECOMMISSION_IN_PROGRESS node become DECOMMISSIONED
cleanupFile(fileSys, f);
From 221d5b357f550ffe358bd760b5f99de12fb874d1 Mon Sep 17 00:00:00 2001
From: Devaraj K
Date: Fri, 12 Dec 2014 11:42:03 +0530
Subject: [PATCH 049/432] MAPREDUCE-6046. Change the class name for logs in
RMCommunicator. Contributed by Sahil Takiar.
---
hadoop-mapreduce-project/CHANGES.txt | 3 +++
.../org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java | 2 +-
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index bbab097b9c0f7..ee2485766db4a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -241,6 +241,9 @@ Release 2.7.0 - UNRELEASED
MAPREDUCE-5932. Provide an option to use a dedicated reduce-side shuffle
log (Gera Shegalov via jlowe)
+ MAPREDUCE-6046. Change the class name for logs in RMCommunicator
+ (Sahil Takiar via devaraj)
+
OPTIMIZATIONS
MAPREDUCE-6169. MergeQueue should release reference to the current item
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
index cd4e272e60f38..5d4fa12ead2a6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
@@ -67,7 +67,7 @@
*/
public abstract class RMCommunicator extends AbstractService
implements RMHeartbeatHandler {
- private static final Log LOG = LogFactory.getLog(RMContainerAllocator.class);
+ private static final Log LOG = LogFactory.getLog(RMCommunicator.class);
private int rmPollInterval;//millis
protected ApplicationId applicationId;
private final AtomicBoolean stopped;
From d2a7f162ff41ccb832ee45319b0690efc61caeb5 Mon Sep 17 00:00:00 2001
From: Devaraj K
Date: Fri, 12 Dec 2014 12:34:43 +0530
Subject: [PATCH 050/432] YARN-2243. Order of arguments for
Preconditions.checkNotNull() is wrong in SchedulerApplicationAttempt ctor.
Contributed by Devaraj K.
---
hadoop-yarn-project/CHANGES.txt | 3 +++
.../resourcemanager/scheduler/SchedulerApplicationAttempt.java | 2 +-
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3432f6f8bbabd..58d28b8ad6ecf 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -217,6 +217,9 @@ Release 2.7.0 - UNRELEASED
YARN-2917. Fixed potential deadlock when system.exit is called in AsyncDispatcher
(Rohith Sharmaks via jianhe)
+ YARN-2243. Order of arguments for Preconditions.checkNotNull() is wrong in
+ SchedulerApplicationAttempt ctor. (devaraj)
+
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index 84975b6b84e93..d5b6ce6698736 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -125,7 +125,7 @@ public class SchedulerApplicationAttempt {
public SchedulerApplicationAttempt(ApplicationAttemptId applicationAttemptId,
String user, Queue queue, ActiveUsersManager activeUsersManager,
RMContext rmContext) {
- Preconditions.checkNotNull("RMContext should not be null", rmContext);
+ Preconditions.checkNotNull(rmContext, "RMContext should not be null");
this.rmContext = rmContext;
this.appSchedulingInfo =
new AppSchedulingInfo(applicationAttemptId, user, queue,
From 017d60cb55bcf553ecebaf1a3a30cc2281846d5e Mon Sep 17 00:00:00 2001
From: Steve Loughran
Date: Fri, 12 Dec 2014 17:10:54 +0000
Subject: [PATCH 051/432] YARN-2912 Jersey Tests failing with port in use.
(varun saxena via stevel)
---
hadoop-yarn-project/CHANGES.txt | 2 +
.../hadoop-yarn/hadoop-yarn-common/pom.xml | 5 +++
.../hadoop/yarn/webapp/JerseyTestBase.java | 42 +++++++++++++++++++
.../webapp/TestAHSWebServices.java | 4 +-
.../webapp/TestTimelineWebServices.java | 4 +-
.../nodemanager/webapp/TestNMWebServices.java | 4 +-
.../webapp/TestNMWebServicesApps.java | 4 +-
.../webapp/TestNMWebServicesContainers.java | 4 +-
.../webapp/TestRMWebServices.java | 4 +-
.../webapp/TestRMWebServicesApps.java | 4 +-
.../TestRMWebServicesAppsModification.java | 4 +-
.../TestRMWebServicesCapacitySched.java | 4 +-
.../TestRMWebServicesDelegationTokens.java | 3 +-
.../TestRMWebServicesFairScheduler.java | 4 +-
.../webapp/TestRMWebServicesNodeLabels.java | 4 +-
.../webapp/TestRMWebServicesNodes.java | 4 +-
16 files changed, 75 insertions(+), 25 deletions(-)
create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 58d28b8ad6ecf..cd0bf7c55eb12 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -220,6 +220,8 @@ Release 2.7.0 - UNRELEASED
YARN-2243. Order of arguments for Preconditions.checkNotNull() is wrong in
SchedulerApplicationAttempt ctor. (devaraj)
+ YARN-2912 Jersey Tests failing with port in use. (varun saxena via stevel)
+
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index 3adfe8b788174..2301399c3557f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -142,6 +142,11 @@
junit
test
+
+ com.sun.jersey.jersey-test-framework
+ jersey-test-framework-grizzly2
+ test
+
commons-io
commons-io
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java
new file mode 100644
index 0000000000000..0b177f91d665f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.webapp;
+
+import org.junit.Before;
+import com.sun.jersey.test.framework.JerseyTest;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+
+public abstract class JerseyTestBase extends JerseyTest {
+ public JerseyTestBase(WebAppDescriptor appDescriptor) {
+ super(appDescriptor);
+ }
+
+ @Before
+ public void initializeJerseyPort() {
+ int jerseyPort = 9998;
+ String port = System.getProperty("jersey.test.port");
+ if(null != port) {
+ jerseyPort = Integer.parseInt(port) + 10;
+ if(jerseyPort > 65535) {
+ jerseyPort = 9998;
+ }
+ }
+ System.setProperty("jersey.test.port", Integer.toString(jerseyPort));
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
index 76bf8c3c75594..41dda91479901 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
@@ -52,6 +52,7 @@
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import org.codehaus.jettison.json.JSONArray;
@@ -73,11 +74,10 @@
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
@RunWith(Parameterized.class)
-public class TestAHSWebServices extends JerseyTest {
+public class TestAHSWebServices extends JerseyTestBase {
private static ApplicationHistoryManagerOnTimelineStore historyManager;
private static final String[] USERS = new String[] { "foo" , "bar" };
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
index fe2ed5c8b9772..7e96d2a36465a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
@@ -60,6 +60,7 @@
import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
import org.junit.Assert;
import org.junit.Test;
@@ -72,10 +73,9 @@
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.api.client.config.DefaultClientConfig;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
-public class TestTimelineWebServices extends JerseyTest {
+public class TestTimelineWebServices extends JerseyTestBase {
private static TimelineStore store;
private static TimelineACLsManager timelineACLsManager;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
index 61bdf1036c467..7caad4ad1ea01 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
@@ -57,6 +57,7 @@
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.YarnVersionInfo;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONException;
@@ -78,13 +79,12 @@
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
/**
* Test the nodemanager node info web services api's
*/
-public class TestNMWebServices extends JerseyTest {
+public class TestNMWebServices extends JerseyTestBase {
private static Context nmContext;
private static ResourceView resourceView;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
index 87aa85268942e..3e7aac8c8ae89 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
@@ -50,6 +50,7 @@
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONArray;
@@ -73,10 +74,9 @@
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
-public class TestNMWebServicesApps extends JerseyTest {
+public class TestNMWebServicesApps extends JerseyTestBase {
private static Context nmContext;
private static ResourceView resourceView;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java
index 62d9cb7b3b4b7..ceb1d571323d7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java
@@ -51,6 +51,7 @@
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONArray;
@@ -73,10 +74,9 @@
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
-public class TestNMWebServicesContainers extends JerseyTest {
+public class TestNMWebServicesContainers extends JerseyTestBase {
private static Context nmContext;
private static ResourceView resourceView;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index 9f091d2398f65..5e1ab74c054b1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -41,6 +41,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
import org.apache.hadoop.yarn.util.YarnVersionInfo;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
@@ -61,10 +62,9 @@
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
-public class TestRMWebServices extends JerseyTest {
+public class TestRMWebServices extends JerseyTestBase {
private static MockRM rm;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
index 23ea22e37fb27..705fd316af1e4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
@@ -46,6 +46,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
@@ -67,10 +68,9 @@
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.core.util.MultivaluedMapImpl;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
-public class TestRMWebServicesApps extends JerseyTest {
+public class TestRMWebServicesApps extends JerseyTestBase {
private static MockRM rm;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java
index df23e85ab1b2b..632eeb86d37d6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java
@@ -74,6 +74,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LocalResourceInfo;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
@@ -102,11 +103,10 @@
import com.sun.jersey.api.json.JSONJAXBContext;
import com.sun.jersey.api.json.JSONMarshaller;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
@RunWith(Parameterized.class)
-public class TestRMWebServicesAppsModification extends JerseyTest {
+public class TestRMWebServicesAppsModification extends JerseyTestBase {
private static MockRM rm;
private static final int CONTAINER_MB = 1024;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
index 87bacc6eca3fb..c7c403d85d9d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
@@ -55,10 +56,9 @@
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
-public class TestRMWebServicesCapacitySched extends JerseyTest {
+public class TestRMWebServicesCapacitySched extends JerseyTestBase {
private static MockRM rm;
private CapacitySchedulerConfiguration csConf;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java
index c5c048fa65ed4..dab83433a45b6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java
@@ -53,6 +53,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
@@ -87,7 +88,7 @@
import com.sun.jersey.test.framework.WebAppDescriptor;
@RunWith(Parameterized.class)
-public class TestRMWebServicesDelegationTokens extends JerseyTest {
+public class TestRMWebServicesDelegationTokens extends JerseyTestBase {
private static File testRootDir;
private static File httpSpnegoKeytabFile = new File(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
index 9de3f76f12c94..21ca6a724ac72 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.Test;
@@ -39,10 +40,9 @@
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
-public class TestRMWebServicesFairScheduler extends JerseyTest {
+public class TestRMWebServicesFairScheduler extends JerseyTestBase {
private static MockRM rm;
private YarnConfiguration conf;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java
index df5aecbba71e6..ae27c02b74922 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelsInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
@@ -51,10 +52,9 @@
import com.sun.jersey.api.json.JSONMarshaller;
import com.sun.jersey.api.json.JSONUnmarshaller;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
-public class TestRMWebServicesNodeLabels extends JerseyTest {
+public class TestRMWebServicesNodeLabels extends JerseyTestBase {
private static final Log LOG = LogFactory
.getLog(TestRMWebServicesNodeLabels.class);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
index e685f221ad2e5..f507e1789f714 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
@@ -43,6 +43,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
@@ -64,10 +65,9 @@
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
-public class TestRMWebServicesNodes extends JerseyTest {
+public class TestRMWebServicesNodes extends JerseyTestBase {
private static MockRM rm;
From 344a3eac7b4b4f58e8039662d0f37fa08ac71234 Mon Sep 17 00:00:00 2001
From: Haohui Mai
Date: Fri, 12 Dec 2014 11:51:17 -0800
Subject: [PATCH 052/432] HDFS-7517. Remove redundant non-null checks in
FSNamesystem#getBlockLocations. Contributed by Haohui Mai.
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 4 ----
2 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5977ed79d2290..9cd5b0591c160 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -579,6 +579,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7497. Inconsistent report of decommissioning DataNodes between
dfsadmin and NameNode webui. (Yongjun Zhang via wang)
+ HDFS-7517. Remove redundant non-null checks in FSNamesystem#
+ getBlockLocations. (wheat9)
+
Release 2.6.1 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index c17c4f51e1d1c..5dd5920c06d8f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1782,10 +1782,6 @@ LocatedBlocks getBlockLocations(String clientMachine, String src,
logAuditEvent(true, "open", src);
- if (res == null) {
- return null;
- }
-
if (res.updateAccessTime()) {
writeLock();
final long now = now();
From 55705d457459f59ed09fd9f2f85aaaadff24b2cb Mon Sep 17 00:00:00 2001
From: arp
Date: Fri, 12 Dec 2014 14:27:50 -0800
Subject: [PATCH 053/432] HDFS-7514. TestTextCommand fails on Windows. (Arpit
Agarwal)
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
.../java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java | 3 +--
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9cd5b0591c160..d635400fb5c9f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -582,6 +582,8 @@ Release 2.7.0 - UNRELEASED
HDFS-7517. Remove redundant non-null checks in FSNamesystem#
getBlockLocations. (wheat9)
+ HDFS-7514. TestTextCommand fails on Windows. (Arpit Agarwal)
+
Release 2.6.1 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java
index f589d7e27a4b1..76c32bfbaa9c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java
@@ -43,8 +43,7 @@
* by the Text command.
*/
public class TestHdfsTextCommand {
- private static final String TEST_ROOT_DIR =
- System.getProperty("test.build.data", "build/test/data/") + "/testText";
+ private static final String TEST_ROOT_DIR = "/test/data/testText";
private static final Path AVRO_FILENAME = new Path(TEST_ROOT_DIR, "weather.avro");
private static MiniDFSCluster cluster;
private static FileSystem fs;
From 529629559bfcf8efe73c7fc752380f0275695104 Mon Sep 17 00:00:00 2001
From: Jing Zhao
Date: Fri, 12 Dec 2014 14:15:06 -0800
Subject: [PATCH 054/432] HDFS-7059. Avoid resolving path multiple times.
Contributed by Jing Zhao.
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +
.../hdfs/server/namenode/FSDirAclOp.java | 27 +-
.../hdfs/server/namenode/FSDirMkdirOp.java | 33 ++-
.../hdfs/server/namenode/FSDirRenameOp.java | 173 ++++++-------
.../namenode/FSDirStatAndListingOp.java | 34 +--
.../hdfs/server/namenode/FSDirXAttrOp.java | 11 +-
.../hdfs/server/namenode/FSDirectory.java | 243 +++++++-----------
.../hdfs/server/namenode/FSEditLogLoader.java | 37 +--
.../hdfs/server/namenode/FSImageFormat.java | 6 +-
.../hdfs/server/namenode/FSNamesystem.java | 200 +++++++-------
.../hdfs/server/namenode/INodesInPath.java | 137 ++++++----
.../hdfs/server/namenode/LeaseManager.java | 6 +-
.../hdfs/server/namenode/FSAclBaseTest.java | 6 +-
.../hadoop/hdfs/server/namenode/TestFsck.java | 4 +-
.../server/namenode/TestLeaseManager.java | 2 +
.../namenode/TestSnapshotPathINodes.java | 64 ++---
.../snapshot/TestOpenFilesWithSnapshot.java | 3 +-
.../snapshot/TestSnapshotReplication.java | 11 +-
18 files changed, 473 insertions(+), 526 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d635400fb5c9f..eeedb0d8333a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -453,6 +453,8 @@ Release 2.7.0 - UNRELEASED
HDFS-7463. Simplify FSNamesystem#getBlockLocationsUpdateTimes. (wheat9)
+ HDFS-7509. Avoid resolving path multiple times. (jing9)
+
OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index c2dee207c32b4..0d2b34ce8cfe2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -46,7 +46,7 @@ static HdfsFileStatus modifyAclEntries(
INodesInPath iip = fsd.getINodesInPath4Write(
FSDirectory.normalizePath(src), true);
fsd.checkOwner(pc, iip);
- INode inode = FSDirectory.resolveLastINode(src, iip);
+ INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
List existingAcl = AclStorage.readINodeLogicalAcl(inode);
List newAcl = AclTransformation.mergeAclEntries(
@@ -72,7 +72,7 @@ static HdfsFileStatus removeAclEntries(
INodesInPath iip = fsd.getINodesInPath4Write(
FSDirectory.normalizePath(src), true);
fsd.checkOwner(pc, iip);
- INode inode = FSDirectory.resolveLastINode(src, iip);
+ INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
List existingAcl = AclStorage.readINodeLogicalAcl(inode);
List newAcl = AclTransformation.filterAclEntriesByAclSpec(
@@ -97,7 +97,7 @@ static HdfsFileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg)
INodesInPath iip = fsd.getINodesInPath4Write(
FSDirectory.normalizePath(src), true);
fsd.checkOwner(pc, iip);
- INode inode = FSDirectory.resolveLastINode(src, iip);
+ INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
List existingAcl = AclStorage.readINodeLogicalAcl(inode);
List newAcl = AclTransformation.filterDefaultAclEntries(
@@ -121,7 +121,7 @@ static HdfsFileStatus removeAcl(FSDirectory fsd, final String srcArg)
try {
INodesInPath iip = fsd.getINodesInPath4Write(src);
fsd.checkOwner(pc, iip);
- unprotectedRemoveAcl(fsd, src);
+ unprotectedRemoveAcl(fsd, iip);
} finally {
fsd.writeUnlock();
}
@@ -168,7 +168,7 @@ static AclStatus getAclStatus(
if (fsd.isPermissionEnabled()) {
fsd.checkTraverse(pc, iip);
}
- INode inode = FSDirectory.resolveLastINode(srcs, iip);
+ INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getPathSnapshotId();
List acl = AclStorage.readINodeAcl(inode, snapshotId);
FsPermission fsPermission = inode.getFsPermission(snapshotId);
@@ -185,16 +185,17 @@ static AclStatus getAclStatus(
static List unprotectedSetAcl(
FSDirectory fsd, String src, List aclSpec)
throws IOException {
+ assert fsd.hasWriteLock();
+ final INodesInPath iip = fsd.getINodesInPath4Write(
+ FSDirectory.normalizePath(src), true);
+
// ACL removal is logged to edits as OP_SET_ACL with an empty list.
if (aclSpec.isEmpty()) {
- unprotectedRemoveAcl(fsd, src);
+ unprotectedRemoveAcl(fsd, iip);
return AclFeature.EMPTY_ENTRY_LIST;
}
- assert fsd.hasWriteLock();
- INodesInPath iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath
- (src), true);
- INode inode = FSDirectory.resolveLastINode(src, iip);
+ INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
List existingAcl = AclStorage.readINodeLogicalAcl(inode);
List newAcl = AclTransformation.replaceAclEntries(existingAcl,
@@ -212,12 +213,10 @@ private static void checkAclsConfigFlag(FSDirectory fsd) throws AclException {
}
}
- private static void unprotectedRemoveAcl(FSDirectory fsd, String src)
+ private static void unprotectedRemoveAcl(FSDirectory fsd, INodesInPath iip)
throws IOException {
assert fsd.hasWriteLock();
- INodesInPath iip = fsd.getINodesInPath4Write(
- FSDirectory.normalizePath(src), true);
- INode inode = FSDirectory.resolveLastINode(src, iip);
+ INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
AclFeature f = inode.getAclFeature();
if (f == null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index c8c5cb2961af3..7e62d2c64df25 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -50,8 +50,7 @@ static HdfsFileStatus mkdirs(
throw new InvalidPathException(src);
}
FSPermissionChecker pc = fsd.getPermissionChecker();
- byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath
- (src);
+ byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = fsd.resolvePath(pc, src, pathComponents);
INodesInPath iip = fsd.getINodesInPath4Write(src);
if (fsd.isPermissionEnabled()) {
@@ -72,7 +71,7 @@ static HdfsFileStatus mkdirs(
// create multiple inodes.
fsn.checkFsObjectLimit();
- if (!mkdirsRecursively(fsd, src, permissions, false, now())) {
+ if (mkdirsRecursively(fsd, iip, permissions, false, now()) == null) {
throw new IOException("Failed to create directory: " + src);
}
}
@@ -97,33 +96,34 @@ static INode unprotectedMkdir(
* If ancestor directories do not exist, automatically create them.
* @param fsd FSDirectory
- * @param src string representation of the path to the directory
+ * @param iip the INodesInPath instance containing all the existing INodes
+ * and null elements for non-existing components in the path
* @param permissions the permission of the directory
* @param inheritPermission
* if the permission of the directory should inherit from its parent or not.
* u+wx is implicitly added to the automatically created directories,
* and to the given directory if inheritPermission is true
* @param now creation time
- * @return true if the operation succeeds false otherwise
+ * @return non-null INodesInPath instance if operation succeeds
* @throws QuotaExceededException if directory creation violates
* any quota limit
* @throws UnresolvedLinkException if a symlink is encountered in src.
* @throws SnapshotAccessControlException if path is in RO snapshot
*/
- static boolean mkdirsRecursively(
- FSDirectory fsd, String src, PermissionStatus permissions,
- boolean inheritPermission, long now)
+ static INodesInPath mkdirsRecursively(FSDirectory fsd, INodesInPath iip,
+ PermissionStatus permissions, boolean inheritPermission, long now)
throws FileAlreadyExistsException, QuotaExceededException,
UnresolvedLinkException, SnapshotAccessControlException,
AclException {
- src = FSDirectory.normalizePath(src);
- String[] names = INode.getPathNames(src);
- byte[][] components = INode.getPathComponents(names);
- final int lastInodeIndex = components.length - 1;
+ final int lastInodeIndex = iip.length() - 1;
+ final byte[][] components = iip.getPathComponents();
+ final String[] names = new String[components.length];
+ for (int i = 0; i < components.length; i++) {
+ names[i] = DFSUtil.bytes2String(components[i]);
+ }
fsd.writeLock();
try {
- INodesInPath iip = fsd.getExistingPathINodes(components);
if (iip.isSnapshot()) {
throw new SnapshotAccessControlException(
"Modification on RO snapshot is disallowed");
@@ -136,8 +136,7 @@ static boolean mkdirsRecursively(
for(; i < length && (curNode = iip.getINode(i)) != null; i++) {
pathbuilder.append(Path.SEPARATOR).append(names[i]);
if (!curNode.isDirectory()) {
- throw new FileAlreadyExistsException(
- "Parent path is not a directory: "
+ throw new FileAlreadyExistsException("Parent path is not a directory: "
+ pathbuilder + " " + curNode.getLocalName());
}
}
@@ -181,7 +180,7 @@ static boolean mkdirsRecursively(
components[i], (i < lastInodeIndex) ? parentPermissions :
permissions, null, now);
if (iip.getINode(i) == null) {
- return false;
+ return null;
}
// Directory creation also count towards FilesCreated
// to match count of FilesDeleted metric.
@@ -197,7 +196,7 @@ static boolean mkdirsRecursively(
} finally {
fsd.writeUnlock();
}
- return true;
+ return iip;
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index c62c88e9f66b7..e3020eaf26789 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.ChunkedArrayList;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.util.Time;
import java.io.FileNotFoundException;
import java.io.IOException;
@@ -43,9 +44,9 @@
import static org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import static org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
-import static org.apache.hadoop.util.Time.now;
class FSDirRenameOp {
+ @Deprecated
static RenameOldResult renameToInt(
FSDirectory fsd, final String srcArg, final String dstArg,
boolean logRetryCache)
@@ -67,7 +68,7 @@ static RenameOldResult renameToInt(
src = fsd.resolvePath(pc, src, srcComponents);
dst = fsd.resolvePath(pc, dst, dstComponents);
@SuppressWarnings("deprecation")
- final boolean status = renameToInternal(fsd, pc, src, dst, logRetryCache);
+ final boolean status = renameTo(fsd, pc, src, dst, logRetryCache);
if (status) {
resultingStat = fsd.getAuditFileInfo(dst, false);
}
@@ -115,6 +116,22 @@ static void verifyFsLimitsForRename(FSDirectory fsd, INodesInPath srcIIP,
}
}
+ /**
+ *
+ * Note: This is to be used by {@link FSEditLogLoader} only.
+ *
+ */
+ @Deprecated
+ static boolean unprotectedRenameTo(FSDirectory fsd, String src, String dst,
+ long timestamp) throws IOException {
+ if (fsd.isDir(dst)) {
+ dst += Path.SEPARATOR + new Path(src).getName();
+ }
+ final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false);
+ final INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false);
+ return unprotectedRenameTo(fsd, src, dst, srcIIP, dstIIP, timestamp);
+ }
+
/**
* Change a path name
*
@@ -126,24 +143,19 @@ static void verifyFsLimitsForRename(FSDirectory fsd, INodesInPath srcIIP,
* boolean, Options.Rename...)}
*/
@Deprecated
- static boolean unprotectedRenameTo(
- FSDirectory fsd, String src, String dst, long timestamp)
+ static boolean unprotectedRenameTo(FSDirectory fsd, String src, String dst,
+ final INodesInPath srcIIP, final INodesInPath dstIIP, long timestamp)
throws IOException {
assert fsd.hasWriteLock();
- INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false);
final INode srcInode = srcIIP.getLastINode();
try {
- validateRenameSource(src, srcIIP);
+ validateRenameSource(srcIIP);
} catch (SnapshotException e) {
throw e;
} catch (IOException ignored) {
return false;
}
- if (fsd.isDir(dst)) {
- dst += Path.SEPARATOR + new Path(src).getName();
- }
-
// validate the destination
if (dst.equals(src)) {
return true;
@@ -155,7 +167,6 @@ static boolean unprotectedRenameTo(
return false;
}
- INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false);
if (dstIIP.getLastINode() != null) {
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " +
"failed to rename " + src + " to " + dst + " because destination " +
@@ -234,8 +245,7 @@ static Map.Entry renameToInt(
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
src = fsd.resolvePath(pc, src, srcComponents);
dst = fsd.resolvePath(pc, dst, dstComponents);
- renameToInternal(fsd, pc, src, dst, logRetryCache, collectedBlocks,
- options);
+ renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options);
HdfsFileStatus resultingStat = fsd.getAuditFileInfo(dst, false);
return new AbstractMap.SimpleImmutableEntry renameToInt(
* @see #unprotectedRenameTo(FSDirectory, String, String, long,
* org.apache.hadoop.fs.Options.Rename...)
*/
- static void renameTo(
- FSDirectory fsd, String src, String dst, long mtime,
- BlocksMapUpdateInfo collectedBlocks, Options.Rename... options)
- throws IOException {
+ static void renameTo(FSDirectory fsd, FSPermissionChecker pc, String src,
+ String dst, BlocksMapUpdateInfo collectedBlocks, boolean logRetryCache,
+ Options.Rename... options) throws IOException {
+ final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false);
+ final INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false);
+ if (fsd.isPermissionEnabled()) {
+ // Rename does not operate on link targets
+ // Do not resolveLink when checking permissions of src and dst
+ // Check write access to parent of src
+ fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null,
+ false);
+ // Check write access to ancestor of dst
+ fsd.checkPermission(pc, dstIIP, false, FsAction.WRITE, null, null, null,
+ false);
+ }
+
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: " + src + " to "
+ dst);
}
+ final long mtime = Time.now();
fsd.writeLock();
try {
- if (unprotectedRenameTo(fsd, src, dst, mtime, collectedBlocks, options)) {
+ if (unprotectedRenameTo(fsd, src, dst, srcIIP, dstIIP, mtime,
+ collectedBlocks, options)) {
fsd.getFSNamesystem().incrDeletedFileCount(1);
}
} finally {
fsd.writeUnlock();
}
+ fsd.getEditLog().logRename(src, dst, mtime, logRetryCache, options);
}
/**
* Rename src to dst.
*
* Note: This is to be used by {@link org.apache.hadoop.hdfs.server
- * .namenode.FSEditLog} only.
+ * .namenode.FSEditLogLoader} only.
*
*
* @param fsd FSDirectory
@@ -282,7 +307,9 @@ static boolean unprotectedRenameTo(
Options.Rename... options)
throws IOException {
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
- boolean ret = unprotectedRenameTo(fsd, src, dst, timestamp,
+ final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false);
+ final INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false);
+ boolean ret = unprotectedRenameTo(fsd, src, dst, srcIIP, dstIIP, timestamp,
collectedBlocks, options);
if (!collectedBlocks.getToDeleteList().isEmpty()) {
fsd.getFSNamesystem().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
@@ -302,8 +329,8 @@ static boolean unprotectedRenameTo(
* @param collectedBlocks blocks to be removed
* @param options Rename options
*/
- static boolean unprotectedRenameTo(
- FSDirectory fsd, String src, String dst, long timestamp,
+ static boolean unprotectedRenameTo(FSDirectory fsd, String src, String dst,
+ final INodesInPath srcIIP, final INodesInPath dstIIP, long timestamp,
BlocksMapUpdateInfo collectedBlocks, Options.Rename... options)
throws IOException {
assert fsd.hasWriteLock();
@@ -311,9 +338,8 @@ static boolean unprotectedRenameTo(
&& Arrays.asList(options).contains(Options.Rename.OVERWRITE);
final String error;
- final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false);
final INode srcInode = srcIIP.getLastINode();
- validateRenameSource(src, srcIIP);
+ validateRenameSource(srcIIP);
// validate the destination
if (dst.equals(src)) {
@@ -322,7 +348,6 @@ static boolean unprotectedRenameTo(
}
validateDestination(src, dst, srcInode);
- INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false);
if (dstIIP.length() == 1) {
error = "rename destination cannot be the root";
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " +
@@ -373,8 +398,8 @@ static boolean unprotectedRenameTo(
long removedNum = 0;
try {
if (dstInode != null) { // dst exists remove it
- if ((removedNum = fsd.removeLastINode(dstIIP)) != -1) {
- removedDst = dstIIP.getLastINode();
+ if ((removedNum = fsd.removeLastINode(tx.dstIIP)) != -1) {
+ removedDst = tx.dstIIP.getLastINode();
undoRemoveDst = true;
}
}
@@ -395,13 +420,13 @@ static boolean unprotectedRenameTo(
undoRemoveDst = false;
if (removedNum > 0) {
List removedINodes = new ChunkedArrayList();
- if (!removedDst.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
+ if (!removedDst.isInLatestSnapshot(tx.dstIIP.getLatestSnapshotId())) {
removedDst.destroyAndCollectBlocks(collectedBlocks,
removedINodes);
filesDeleted = true;
} else {
filesDeleted = removedDst.cleanSubtree(
- Snapshot.CURRENT_STATE_ID, dstIIP.getLatestSnapshotId(),
+ Snapshot.CURRENT_STATE_ID, tx.dstIIP.getLatestSnapshotId(),
collectedBlocks, removedINodes, true)
.get(Quota.NAMESPACE) >= 0;
}
@@ -431,7 +456,7 @@ static boolean unprotectedRenameTo(
dstParent.asDirectory().undoRename4DstParent(removedDst,
dstIIP.getLatestSnapshotId());
} else {
- fsd.addLastINodeNoQuotaCheck(dstIIP, removedDst);
+ fsd.addLastINodeNoQuotaCheck(tx.dstIIP, removedDst);
}
if (removedDst.isReference()) {
final INodeReference removedDstRef = removedDst.asReference();
@@ -447,59 +472,41 @@ static boolean unprotectedRenameTo(
}
/**
- * @see #unprotectedRenameTo(FSDirectory, String, String, long)
* @deprecated Use {@link #renameToInt(FSDirectory, String, String,
* boolean, Options.Rename...)}
*/
@Deprecated
@SuppressWarnings("deprecation")
- private static boolean renameTo(
- FSDirectory fsd, String src, String dst, long mtime)
- throws IOException {
+ private static boolean renameTo(FSDirectory fsd, FSPermissionChecker pc,
+ String src, String dst, boolean logRetryCache) throws IOException {
+ // Rename does not operate on link targets
+ // Do not resolveLink when checking permissions of src and dst
+ // Check write access to parent of src
+ final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false);
+ // Note: We should not be doing this. This is move() not renameTo().
+ final String actualDst = fsd.isDir(dst) ?
+ dst + Path.SEPARATOR + new Path(src).getName() : dst;
+ final INodesInPath dstIIP = fsd.getINodesInPath4Write(actualDst, false);
+ if (fsd.isPermissionEnabled()) {
+ fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null,
+ false);
+ // Check write access to ancestor of dst
+ fsd.checkPermission(pc, dstIIP, false, FsAction.WRITE, null, null,
+ null, false);
+ }
+
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: " + src + " to "
+ dst);
}
+ final long mtime = Time.now();
boolean stat = false;
fsd.writeLock();
try {
- stat = unprotectedRenameTo(fsd, src, dst, mtime);
+ stat = unprotectedRenameTo(fsd, src, actualDst, srcIIP, dstIIP, mtime);
} finally {
fsd.writeUnlock();
}
- return stat;
- }
-
- /**
- * @deprecated See {@link #renameTo(FSDirectory, String, String, long)}
- */
- @Deprecated
- private static boolean renameToInternal(
- FSDirectory fsd, FSPermissionChecker pc, String src, String dst,
- boolean logRetryCache)
- throws IOException {
- if (fsd.isPermissionEnabled()) {
- //We should not be doing this. This is move() not renameTo().
- //but for now,
- //NOTE: yes, this is bad! it's assuming much lower level behavior
- // of rewriting the dst
- String actualdst = fsd.isDir(dst) ? dst + Path.SEPARATOR + new Path
- (src).getName() : dst;
- // Rename does not operates on link targets
- // Do not resolveLink when checking permissions of src and dst
- // Check write access to parent of src
- INodesInPath srcIIP = fsd.getINodesInPath(src, false);
- fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null,
- false);
- INodesInPath dstIIP = fsd.getINodesInPath(actualdst, false);
- // Check write access to ancestor of dst
- fsd.checkPermission(pc, dstIIP, false, FsAction.WRITE, null, null,
- null, false);
- }
-
- long mtime = now();
- @SuppressWarnings("deprecation")
- final boolean stat = renameTo(fsd, src, dst, mtime);
if (stat) {
fsd.getEditLog().logRename(src, dst, mtime, logRetryCache);
return true;
@@ -507,29 +514,6 @@ private static boolean renameToInternal(
return false;
}
- private static void renameToInternal(
- FSDirectory fsd, FSPermissionChecker pc, String src, String dst,
- boolean logRetryCache, BlocksMapUpdateInfo collectedBlocks,
- Options.Rename... options)
- throws IOException {
- if (fsd.isPermissionEnabled()) {
- // Rename does not operates on link targets
- // Do not resolveLink when checking permissions of src and dst
- // Check write access to parent of src
- INodesInPath srcIIP = fsd.getINodesInPath(src, false);
- fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null,
- false);
- // Check write access to ancestor of dst
- INodesInPath dstIIP = fsd.getINodesInPath(dst, false);
- fsd.checkPermission(pc, dstIIP, false, FsAction.WRITE, null, null, null,
- false);
- }
-
- long mtime = now();
- renameTo(fsd, src, dst, mtime, collectedBlocks, options);
- fsd.getEditLog().logRename(src, dst, mtime, logRetryCache, options);
- }
-
private static void validateDestination(
String src, String dst, INode srcInode)
throws IOException {
@@ -579,13 +563,13 @@ private static void validateOverwrite(
}
}
- private static void validateRenameSource(String src, INodesInPath srcIIP)
+ private static void validateRenameSource(INodesInPath srcIIP)
throws IOException {
String error;
final INode srcInode = srcIIP.getLastINode();
// validate source
if (srcInode == null) {
- error = "rename source " + src + " is not found.";
+ error = "rename source " + srcIIP.getPath() + " is not found.";
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
+ error);
throw new FileNotFoundException(error);
@@ -625,8 +609,7 @@ private static class RenameOperation {
this.dst = dst;
srcChild = srcIIP.getLastINode();
srcChildName = srcChild.getLocalNameBytes();
- isSrcInSnapshot = srcChild.isInLatestSnapshot(srcIIP
- .getLatestSnapshotId());
+ isSrcInSnapshot = srcChild.isInLatestSnapshot(srcIIP.getLatestSnapshotId());
srcChildIsReference = srcChild.isReference();
srcParent = srcIIP.getINode(-2).asDirectory();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 0f941710e6bde..5bc790ee5fcbd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -72,14 +72,14 @@ static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg,
boolean isSuperUser = true;
if (fsd.isPermissionEnabled()) {
- if (fsd.isDir(src)) {
+ if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
} else {
fsd.checkTraverse(pc, iip);
}
isSuperUser = pc.isSuperUser();
}
- return getListing(fsd, src, startAfter, needLocation, isSuperUser);
+ return getListing(fsd, iip, src, startAfter, needLocation, isSuperUser);
}
/**
@@ -131,12 +131,12 @@ static ContentSummary getContentSummary(
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
FSPermissionChecker pc = fsd.getPermissionChecker();
src = fsd.resolvePath(pc, src, pathComponents);
- final INodesInPath iip = fsd.getINodesInPath(src, true);
+ final INodesInPath iip = fsd.getINodesInPath(src, false);
if (fsd.isPermissionEnabled()) {
fsd.checkPermission(pc, iip, false, null, null, null,
FsAction.READ_EXECUTE);
}
- return getContentSummaryInt(fsd, src);
+ return getContentSummaryInt(fsd, iip);
}
/**
@@ -148,14 +148,15 @@ static ContentSummary getContentSummary(
* that at least this.lsLimit block locations are in the response
*
* @param fsd FSDirectory
+ * @param iip the INodesInPath instance containing all the INodes along the
+ * path
* @param src the directory name
* @param startAfter the name to start listing after
* @param needLocation if block locations are returned
* @return a partial listing starting after startAfter
*/
- private static DirectoryListing getListing(
- FSDirectory fsd, String src, byte[] startAfter, boolean needLocation,
- boolean isSuperUser)
+ private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
+ String src, byte[] startAfter, boolean needLocation, boolean isSuperUser)
throws IOException {
String srcs = FSDirectory.normalizePath(src);
final boolean isRawPath = FSDirectory.isReservedRawName(src);
@@ -165,9 +166,8 @@ private static DirectoryListing getListing(
if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
return getSnapshotsListing(fsd, srcs, startAfter);
}
- final INodesInPath inodesInPath = fsd.getINodesInPath(srcs, true);
- final int snapshot = inodesInPath.getPathSnapshotId();
- final INode targetNode = inodesInPath.getLastINode();
+ final int snapshot = iip.getPathSnapshotId();
+ final INode targetNode = iip.getLastINode();
if (targetNode == null)
return null;
byte parentStoragePolicy = isSuperUser ?
@@ -178,7 +178,7 @@ private static DirectoryListing getListing(
return new DirectoryListing(
new HdfsFileStatus[]{createFileStatus(fsd,
HdfsFileStatus.EMPTY_NAME, targetNode, needLocation,
- parentStoragePolicy, snapshot, isRawPath, inodesInPath)}, 0);
+ parentStoragePolicy, snapshot, isRawPath, iip)}, 0);
}
final INodeDirectory dirInode = targetNode.asDirectory();
@@ -196,7 +196,8 @@ private static DirectoryListing getListing(
cur.getLocalStoragePolicyID():
BlockStoragePolicySuite.ID_UNSPECIFIED;
listing[i] = createFileStatus(fsd, cur.getLocalNameBytes(), cur,
- needLocation, fsd.getStoragePolicyID(curPolicy, parentStoragePolicy), snapshot, isRawPath, inodesInPath);
+ needLocation, fsd.getStoragePolicyID(curPolicy,
+ parentStoragePolicy), snapshot, isRawPath, iip);
listingCnt++;
if (needLocation) {
// Once we hit lsLimit locations, stop.
@@ -453,14 +454,13 @@ private static FsPermission getPermissionForFileStatus(
return perm;
}
- private static ContentSummary getContentSummaryInt(
- FSDirectory fsd, String src) throws IOException {
- String srcs = FSDirectory.normalizePath(src);
+ private static ContentSummary getContentSummaryInt(FSDirectory fsd,
+ INodesInPath iip) throws IOException {
fsd.readLock();
try {
- INode targetNode = fsd.getNode(srcs, false);
+ INode targetNode = iip.getLastINode();
if (targetNode == null) {
- throw new FileNotFoundException("File does not exist: " + srcs);
+ throw new FileNotFoundException("File does not exist: " + iip.getPath());
}
else {
// Make it relinquish locks everytime contentCountLimit entries are
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 303b9e3a12ef3..47a995d8dab61 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -191,7 +191,7 @@ static List unprotectedRemoveXAttrs(
assert fsd.hasWriteLock();
INodesInPath iip = fsd.getINodesInPath4Write(
FSDirectory.normalizePath(src), true);
- INode inode = FSDirectory.resolveLastINode(src, iip);
+ INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
List existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
List removedXAttrs = Lists.newArrayListWithCapacity(toRemove.size());
@@ -260,8 +260,9 @@ static INode unprotectedSetXAttrs(
final EnumSet flag)
throws IOException {
assert fsd.hasWriteLock();
- INodesInPath iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true);
- INode inode = FSDirectory.resolveLastINode(src, iip);
+ INodesInPath iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src),
+ true);
+ INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
List existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
List newXAttrs = setINodeXAttrs(fsd, existingXAttrs, xAttrs, flag);
@@ -444,8 +445,8 @@ private static List getXAttrs(FSDirectory fsd,
String srcs = FSDirectory.normalizePath(src);
fsd.readLock();
try {
- INodesInPath iip = fsd.getLastINodeInPath(srcs, true);
- INode inode = FSDirectory.resolveLastINode(src, iip);
+ INodesInPath iip = fsd.getINodesInPath(srcs, true);
+ INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getPathSnapshotId();
return XAttrStorage.readINodeXAttrs(inode, snapshotId);
} finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 81b0eb6f255fd..ee9bdd0019ba6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -335,8 +335,8 @@ void disableQuotaChecks() {
private static INodeFile newINodeFile(long id, PermissionStatus permissions,
long mtime, long atime, short replication, long preferredBlockSize) {
- return newINodeFile(id, permissions, mtime, atime, replication, preferredBlockSize,
- (byte)0);
+ return newINodeFile(id, permissions, mtime, atime, replication,
+ preferredBlockSize, (byte)0);
}
private static INodeFile newINodeFile(long id, PermissionStatus permissions,
@@ -354,20 +354,21 @@ private static INodeFile newINodeFile(long id, PermissionStatus permissions,
* @throws UnresolvedLinkException
* @throws SnapshotAccessControlException
*/
- INodeFile addFile(String path, PermissionStatus permissions,
+ INodeFile addFile(INodesInPath iip, String path, PermissionStatus permissions,
short replication, long preferredBlockSize,
String clientName, String clientMachine)
throws FileAlreadyExistsException, QuotaExceededException,
UnresolvedLinkException, SnapshotAccessControlException, AclException {
long modTime = now();
- INodeFile newNode = newINodeFile(allocateNewInodeId(), permissions, modTime, modTime, replication, preferredBlockSize);
+ INodeFile newNode = newINodeFile(allocateNewInodeId(), permissions, modTime,
+ modTime, replication, preferredBlockSize);
newNode.toUnderConstruction(clientName, clientMachine);
boolean added = false;
writeLock();
try {
- added = addINode(path, newNode);
+ added = addINode(iip, newNode);
} finally {
writeUnlock();
}
@@ -382,8 +383,8 @@ INodeFile addFile(String path, PermissionStatus permissions,
return newNode;
}
- INodeFile unprotectedAddFile( long id,
- String path,
+ INodeFile unprotectedAddFile(long id,
+ INodesInPath iip,
PermissionStatus permissions,
List aclEntries,
List xAttrs,
@@ -401,14 +402,13 @@ INodeFile unprotectedAddFile( long id,
newNode = newINodeFile(id, permissions, modificationTime,
modificationTime, replication, preferredBlockSize, storagePolicyId);
newNode.toUnderConstruction(clientName, clientMachine);
-
} else {
newNode = newINodeFile(id, permissions, modificationTime, atime,
replication, preferredBlockSize, storagePolicyId);
}
try {
- if (addINode(path, newNode)) {
+ if (addINode(iip, newNode)) {
if (aclEntries != null) {
AclStorage.updateINodeAcl(newNode, aclEntries,
Snapshot.CURRENT_STATE_ID);
@@ -422,8 +422,8 @@ INodeFile unprotectedAddFile( long id,
} catch (IOException e) {
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug(
- "DIR* FSDirectory.unprotectedAddFile: exception when add " + path
- + " to the file system", e);
+ "DIR* FSDirectory.unprotectedAddFile: exception when add "
+ + iip.getPath() + " to the file system", e);
}
}
return null;
@@ -468,18 +468,18 @@ BlockInfo addBlock(String path, INodesInPath inodesInPath, Block block,
* Remove a block from the file.
* @return Whether the block exists in the corresponding file
*/
- boolean removeBlock(String path, INodeFile fileNode, Block block)
- throws IOException {
+ boolean removeBlock(String path, INodesInPath iip, INodeFile fileNode,
+ Block block) throws IOException {
Preconditions.checkArgument(fileNode.isUnderConstruction());
writeLock();
try {
- return unprotectedRemoveBlock(path, fileNode, block);
+ return unprotectedRemoveBlock(path, iip, fileNode, block);
} finally {
writeUnlock();
}
}
- boolean unprotectedRemoveBlock(String path,
+ boolean unprotectedRemoveBlock(String path, INodesInPath iip,
INodeFile fileNode, Block block) throws IOException {
// modify file-> block and blocksMap
// fileNode should be under construction
@@ -496,7 +496,6 @@ boolean unprotectedRemoveBlock(String path,
}
// update space consumed
- final INodesInPath iip = getINodesInPath4Write(path, true);
updateCount(iip, 0, -fileNode.getBlockDiskspace(), true);
return true;
}
@@ -638,20 +637,6 @@ private void setDirStoragePolicy(INodeDirectory inode, byte policyId,
XAttrStorage.updateINodeXAttrs(inode, newXAttrs, latestSnapshotId);
}
- /**
- * @param path the file path
- * @return the block size of the file.
- */
- long getPreferredBlockSize(String path) throws IOException {
- readLock();
- try {
- return INodeFile.valueOf(getNode(path, false), path
- ).getPreferredBlockSize();
- } finally {
- readUnlock();
- }
- }
-
void setPermission(String src, FsPermission permission)
throws FileNotFoundException, UnresolvedLinkException,
QuotaExceededException, SnapshotAccessControlException {
@@ -706,28 +691,26 @@ void unprotectedSetOwner(String src, String username, String groupname)
/**
* Delete the target directory and collect the blocks under it
- *
- * @param src Path of a directory to delete
+ *
+ * @param iip the INodesInPath instance containing all the INodes for the path
* @param collectedBlocks Blocks under the deleted directory
* @param removedINodes INodes that should be removed from {@link #inodeMap}
* @return the number of files that have been removed
*/
- long delete(String src, BlocksMapUpdateInfo collectedBlocks,
+ long delete(INodesInPath iip, BlocksMapUpdateInfo collectedBlocks,
List removedINodes, long mtime) throws IOException {
if (NameNode.stateChangeLog.isDebugEnabled()) {
- NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src);
+ NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + iip.getPath());
}
final long filesRemoved;
writeLock();
try {
- final INodesInPath inodesInPath = getINodesInPath4Write(
- normalizePath(src), false);
- if (!deleteAllowed(inodesInPath, src) ) {
+ if (!deleteAllowed(iip, iip.getPath()) ) {
filesRemoved = -1;
} else {
List snapshottableDirs = new ArrayList();
- FSDirSnapshotOp.checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs);
- filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks,
+ FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs);
+ filesRemoved = unprotectedDelete(iip, collectedBlocks,
removedINodes, mtime);
namesystem.removeSnapshottableDirs(snapshottableDirs);
}
@@ -863,88 +846,15 @@ byte getStoragePolicyID(byte inodePolicy, byte parentPolicy) {
parentPolicy;
}
- INode getINode4DotSnapshot(String src) throws UnresolvedLinkException {
- Preconditions.checkArgument(
- src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
- "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
-
- final String dirPath = normalizePath(src.substring(0,
- src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
-
- final INode node = this.getINode(dirPath);
- if (node != null && node.isDirectory()
- && node.asDirectory().isSnapshottable()) {
- return node;
- }
- return null;
- }
-
- INodesInPath getExistingPathINodes(byte[][] components)
- throws UnresolvedLinkException {
- return INodesInPath.resolve(rootDir, components);
- }
-
- /**
- * Get {@link INode} associated with the file / directory.
- */
- public INode getINode(String src) throws UnresolvedLinkException {
- return getLastINodeInPath(src).getINode(0);
- }
-
- /**
- * Get {@link INode} associated with the file / directory.
- */
- public INodesInPath getLastINodeInPath(String src)
- throws UnresolvedLinkException {
- readLock();
- try {
- return getLastINodeInPath(src, true);
- } finally {
- readUnlock();
- }
- }
-
- /**
- * Get {@link INode} associated with the file / directory.
- */
- public INodesInPath getINodesInPath4Write(String src
- ) throws UnresolvedLinkException, SnapshotAccessControlException {
- readLock();
- try {
- return getINodesInPath4Write(src, true);
- } finally {
- readUnlock();
- }
- }
-
- /**
- * Get {@link INode} associated with the file / directory.
- * @throws SnapshotAccessControlException if path is in RO snapshot
- */
- public INode getINode4Write(String src) throws UnresolvedLinkException,
- SnapshotAccessControlException {
- readLock();
- try {
- return getINode4Write(src, true);
- } finally {
- readUnlock();
- }
- }
-
/**
* Check whether the filepath could be created
* @throws SnapshotAccessControlException if path is in RO snapshot
*/
- boolean isValidToCreate(String src) throws UnresolvedLinkException,
- SnapshotAccessControlException {
+ boolean isValidToCreate(String src, INodesInPath iip)
+ throws SnapshotAccessControlException {
String srcs = normalizePath(src);
- readLock();
- try {
- return srcs.startsWith("/") && !srcs.endsWith("/")
- && getINode4Write(srcs, false) == null;
- } finally {
- readUnlock();
- }
+ return srcs.startsWith("/") && !srcs.endsWith("/") &&
+ iip.getLastINode() == null;
}
/**
@@ -954,7 +864,7 @@ boolean isDir(String src) throws UnresolvedLinkException {
src = normalizePath(src);
readLock();
try {
- INode node = getNode(src, false);
+ INode node = getINode(src, false);
return node != null && node.isDirectory();
} finally {
readUnlock();
@@ -963,21 +873,21 @@ boolean isDir(String src) throws UnresolvedLinkException {
/** Updates namespace and diskspace consumed for all
* directories until the parent directory of file represented by path.
- *
- * @param path path for the file.
+ *
+ * @param iip the INodesInPath instance containing all the INodes for
+ * updating quota usage
* @param nsDelta the delta change of namespace
* @param dsDelta the delta change of diskspace
* @throws QuotaExceededException if the new count violates any quota limit
* @throws FileNotFoundException if path does not exist.
*/
- void updateSpaceConsumed(String path, long nsDelta, long dsDelta)
+ void updateSpaceConsumed(INodesInPath iip, long nsDelta, long dsDelta)
throws QuotaExceededException, FileNotFoundException,
UnresolvedLinkException, SnapshotAccessControlException {
writeLock();
try {
- final INodesInPath iip = getINodesInPath4Write(path, false);
if (iip.getLastINode() == null) {
- throw new FileNotFoundException("Path not found: " + path);
+ throw new FileNotFoundException("Path not found: " + iip.getPath());
}
updateCount(iip, nsDelta, dsDelta, true);
} finally {
@@ -1097,17 +1007,15 @@ static String getFullPathName(INode inode) {
/**
* Add the given child to the namespace.
- * @param src The full path name of the child node.
+ * @param iip the INodesInPath instance containing all the ancestral INodes
* @throws QuotaExceededException is thrown if it violates quota limit
*/
- private boolean addINode(String src, INode child)
+ private boolean addINode(INodesInPath iip, INode child)
throws QuotaExceededException, UnresolvedLinkException {
- byte[][] components = INode.getPathComponents(src);
- child.setLocalName(components[components.length-1]);
+ child.setLocalName(iip.getLastLocalName());
cacheName(child);
writeLock();
try {
- final INodesInPath iip = getExistingPathINodes(components);
return addLastINode(iip, child, true);
} finally {
writeUnlock();
@@ -1504,7 +1412,7 @@ boolean setTimes(INode inode, long mtime, long atime, boolean force,
boolean unprotectedSetTimes(String src, long mtime, long atime, boolean force)
throws UnresolvedLinkException, QuotaExceededException {
assert hasWriteLock();
- final INodesInPath i = getLastINodeInPath(src);
+ final INodesInPath i = getINodesInPath(src, true);
return unprotectedSetTimes(i.getLastINode(), mtime, atime, force,
i.getLatestSnapshotId());
}
@@ -1551,24 +1459,24 @@ void reset() {
/**
* Add the specified path into the namespace.
*/
- INodeSymlink addSymlink(long id, String path, String target,
+ INodeSymlink addSymlink(INodesInPath iip, long id, String target,
long mtime, long atime, PermissionStatus perm)
throws UnresolvedLinkException, QuotaExceededException {
writeLock();
try {
- return unprotectedAddSymlink(id, path, target, mtime, atime, perm);
+ return unprotectedAddSymlink(iip, id, target, mtime, atime, perm);
} finally {
writeUnlock();
}
}
- INodeSymlink unprotectedAddSymlink(long id, String path, String target,
+ INodeSymlink unprotectedAddSymlink(INodesInPath iip, long id, String target,
long mtime, long atime, PermissionStatus perm)
throws UnresolvedLinkException, QuotaExceededException {
assert hasWriteLock();
final INodeSymlink symlink = new INodeSymlink(id, null, perm, mtime, atime,
target);
- return addINode(path, symlink) ? symlink : null;
+ return addINode(iip, symlink) ? symlink : null;
}
boolean isInAnEZ(INodesInPath iip)
@@ -1704,11 +1612,10 @@ FileEncryptionInfo getFileEncryptionInfo(INode inode, int snapshotId,
}
}
- static INode resolveLastINode(String src, INodesInPath iip)
- throws FileNotFoundException {
+ static INode resolveLastINode(INodesInPath iip) throws FileNotFoundException {
INode inode = iip.getLastINode();
if (inode == null) {
- throw new FileNotFoundException("cannot find " + src);
+ throw new FileNotFoundException("cannot find " + iip.getPath());
}
return inode;
}
@@ -1885,36 +1792,62 @@ private static String constructRemainingPath(String pathPrefix,
return path.toString();
}
- /** @return the {@link INodesInPath} containing only the last inode. */
- INodesInPath getLastINodeInPath(
- String path, boolean resolveLink) throws UnresolvedLinkException {
- return INodesInPath.resolve(rootDir, INode.getPathComponents(path), 1,
- resolveLink);
+ INode getINode4DotSnapshot(String src) throws UnresolvedLinkException {
+ Preconditions.checkArgument(
+ src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
+ "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
+
+ final String dirPath = normalizePath(src.substring(0,
+ src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
+
+ final INode node = this.getINode(dirPath);
+ if (node != null && node.isDirectory()
+ && node.asDirectory().isSnapshottable()) {
+ return node;
+ }
+ return null;
+ }
+
+ INodesInPath getExistingPathINodes(byte[][] components)
+ throws UnresolvedLinkException {
+ return INodesInPath.resolve(rootDir, components, false);
+ }
+
+ /**
+ * Get {@link INode} associated with the file / directory.
+ */
+ public INodesInPath getINodesInPath4Write(String src)
+ throws UnresolvedLinkException, SnapshotAccessControlException {
+ return getINodesInPath4Write(src, true);
+ }
+
+ /**
+ * Get {@link INode} associated with the file / directory.
+ * @throws SnapshotAccessControlException if path is in RO snapshot
+ */
+ public INode getINode4Write(String src) throws UnresolvedLinkException,
+ SnapshotAccessControlException {
+ return getINodesInPath4Write(src, true).getLastINode();
}
/** @return the {@link INodesInPath} containing all inodes in the path. */
- INodesInPath getINodesInPath(String path, boolean resolveLink
- ) throws UnresolvedLinkException {
+ public INodesInPath getINodesInPath(String path, boolean resolveLink)
+ throws UnresolvedLinkException {
final byte[][] components = INode.getPathComponents(path);
- return INodesInPath.resolve(rootDir, components, components.length,
- resolveLink);
+ return INodesInPath.resolve(rootDir, components, resolveLink);
}
/** @return the last inode in the path. */
- INode getNode(String path, boolean resolveLink)
- throws UnresolvedLinkException {
- return getLastINodeInPath(path, resolveLink).getINode(0);
+ INode getINode(String path, boolean resolveLink)
+ throws UnresolvedLinkException {
+ return getINodesInPath(path, resolveLink).getLastINode();
}
/**
- * @return the INode of the last component in src, or null if the last
- * component does not exist.
- * @throws UnresolvedLinkException if symlink can't be resolved
- * @throws SnapshotAccessControlException if path is in RO snapshot
+ * Get {@link INode} associated with the file / directory.
*/
- INode getINode4Write(String src, boolean resolveLink)
- throws UnresolvedLinkException, SnapshotAccessControlException {
- return getINodesInPath4Write(src, resolveLink).getLastINode();
+ public INode getINode(String src) throws UnresolvedLinkException {
+ return getINode(src, true);
}
/**
@@ -1926,7 +1859,7 @@ INodesInPath getINodesInPath4Write(String src, boolean resolveLink)
throws UnresolvedLinkException, SnapshotAccessControlException {
final byte[][] components = INode.getPathComponents(src);
INodesInPath inodesInPath = INodesInPath.resolve(rootDir, components,
- components.length, resolveLink);
+ resolveLink);
if (inodesInPath.isSnapshot()) {
throw new SnapshotAccessControlException(
"Modification on a read-only snapshot is disallowed");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 2721f85dee644..833b9dbaada20 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -342,7 +342,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
// 3. OP_ADD to open file for append
// See if the file already exists (persistBlocks call)
- final INodesInPath iip = fsDir.getINodesInPath(path, true);
+ INodesInPath iip = fsDir.getINodesInPath(path, true);
INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path, true);
if (oldFile != null && addCloseOp.overwrite) {
// This is OP_ADD with overwrite
@@ -361,11 +361,12 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion,
lastInodeId);
newFile = fsDir.unprotectedAddFile(inodeId,
- path, addCloseOp.permissions, addCloseOp.aclEntries,
+ iip, addCloseOp.permissions, addCloseOp.aclEntries,
addCloseOp.xAttrs,
replication, addCloseOp.mtime, addCloseOp.atime,
addCloseOp.blockSize, true, addCloseOp.clientName,
addCloseOp.clientMachine, addCloseOp.storagePolicyId);
+ iip = INodesInPath.replace(iip, iip.length() - 1, newFile);
fsNamesys.leaseManager.addLease(addCloseOp.clientName, path);
// add the op into retry cache if necessary
@@ -384,10 +385,10 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
FSNamesystem.LOG.debug("Reopening an already-closed file " +
"for append");
}
- LocatedBlock lb = fsNamesys.prepareFileForWrite(path,
- oldFile, addCloseOp.clientName, addCloseOp.clientMachine, false, iip.getLatestSnapshotId(), false);
- newFile = INodeFile.valueOf(fsDir.getINode(path),
- path, true);
+ // Note we do not replace the INodeFile when converting it to
+ // under-construction
+ LocatedBlock lb = fsNamesys.prepareFileForWrite(path, iip,
+ addCloseOp.clientName, addCloseOp.clientMachine, false, false);
// add the op into retry cache is necessary
if (toAddRetryCache) {
@@ -408,7 +409,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
// Update the salient file attributes.
newFile.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
newFile.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
- updateBlocks(fsDir, addCloseOp, newFile);
+ updateBlocks(fsDir, addCloseOp, iip, newFile);
break;
}
case OP_CLOSE: {
@@ -422,13 +423,13 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
" clientMachine " + addCloseOp.clientMachine);
}
- final INodesInPath iip = fsDir.getLastINodeInPath(path);
- final INodeFile file = INodeFile.valueOf(iip.getINode(0), path);
+ final INodesInPath iip = fsDir.getINodesInPath(path, true);
+ final INodeFile file = INodeFile.valueOf(iip.getLastINode(), path);
// Update the salient file attributes.
file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
file.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
- updateBlocks(fsDir, addCloseOp, file);
+ updateBlocks(fsDir, addCloseOp, iip, file);
// Now close the file
if (!file.isUnderConstruction() &&
@@ -455,10 +456,10 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
FSNamesystem.LOG.debug(op.opCode + ": " + path +
" numblocks : " + updateOp.blocks.length);
}
- INodeFile oldFile = INodeFile.valueOf(fsDir.getINode(path),
- path);
+ INodesInPath iip = fsDir.getINodesInPath(path, true);
+ INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path);
// Update in-memory data structures
- updateBlocks(fsDir, updateOp, oldFile);
+ updateBlocks(fsDir, updateOp, iip, oldFile);
if (toAddRetryCache) {
fsNamesys.addCacheEntry(updateOp.rpcClientId, updateOp.rpcCallId);
@@ -587,8 +588,10 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
SymlinkOp symlinkOp = (SymlinkOp)op;
inodeId = getAndUpdateLastInodeId(symlinkOp.inodeId, logVersion,
lastInodeId);
- fsDir.unprotectedAddSymlink(inodeId,
- renameReservedPathsOnUpgrade(symlinkOp.path, logVersion),
+ final String path = renameReservedPathsOnUpgrade(symlinkOp.path,
+ logVersion);
+ final INodesInPath iip = fsDir.getINodesInPath(path, false);
+ fsDir.unprotectedAddSymlink(iip, inodeId,
symlinkOp.value, symlinkOp.mtime, symlinkOp.atime,
symlinkOp.permissionStatus);
@@ -922,7 +925,7 @@ private void addNewBlock(FSDirectory fsDir, AddBlockOp op, INodeFile file)
* @throws IOException
*/
private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
- INodeFile file) throws IOException {
+ INodesInPath iip, INodeFile file) throws IOException {
// Update its block list
BlockInfo[] oldBlocks = file.getBlocks();
Block[] newBlocks = op.getBlocks();
@@ -976,7 +979,7 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
+ path);
}
Block oldBlock = oldBlocks[oldBlocks.length - 1];
- boolean removed = fsDir.unprotectedRemoveBlock(path, file, oldBlock);
+ boolean removed = fsDir.unprotectedRemoveBlock(path, iip, file, oldBlock);
if (!removed && !(op instanceof UpdateBlocksOp)) {
throw new IOException("Trying to delete non-existant block " + oldBlock);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index e26f052c6c1c4..0a92054d879b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -596,7 +596,7 @@ private int loadDirectory(DataInput in, Counter counter) throws IOException {
// Rename .snapshot paths if we're doing an upgrade
parentPath = renameReservedPathsOnUpgrade(parentPath, getLayoutVersion());
final INodeDirectory parent = INodeDirectory.valueOf(
- namesystem.dir.getNode(parentPath, true), parentPath);
+ namesystem.dir.getINode(parentPath, true), parentPath);
return loadChildren(parent, in, counter);
}
@@ -940,8 +940,8 @@ LayoutVersion.Feature.ADD_INODE_ID, getLayoutVersion())) {
inSnapshot = true;
} else {
path = renameReservedPathsOnUpgrade(path, getLayoutVersion());
- final INodesInPath iip = fsDir.getLastINodeInPath(path);
- oldnode = INodeFile.valueOf(iip.getINode(0), path);
+ final INodesInPath iip = fsDir.getINodesInPath(path, true);
+ oldnode = INodeFile.valueOf(iip.getLastINode(), path);
}
FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 5dd5920c06d8f..b4b897a186b0c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2028,7 +2028,7 @@ private void createSymlinkInt(String target, final String linkArg,
if (!createParent) {
dir.verifyParentDir(iip, link);
}
- if (!dir.isValidToCreate(link)) {
+ if (!dir.isValidToCreate(link, iip)) {
throw new IOException("failed to create link " + link
+" either because the filename is invalid or the file exists");
}
@@ -2039,7 +2039,7 @@ private void createSymlinkInt(String target, final String linkArg,
checkFsObjectLimit();
// add symbolic link to namespace
- addSymlink(link, target, dirPerms, createParent, logRetryCache);
+ addSymlink(link, iip, target, dirPerms, createParent, logRetryCache);
resultingStat = getAuditFileInfo(link, false);
} finally {
writeUnlock();
@@ -2191,11 +2191,12 @@ long getPreferredBlockSize(String filename) throws IOException {
try {
checkOperation(OperationCategory.READ);
filename = dir.resolvePath(pc, filename, pathComponents);
- final INodesInPath iip = dir.getINodesInPath(filename, true);
+ final INodesInPath iip = dir.getINodesInPath(filename, false);
if (isPermissionEnabled) {
dir.checkTraverse(pc, iip);
}
- return dir.getPreferredBlockSize(filename);
+ return INodeFile.valueOf(iip.getLastINode(), filename)
+ .getPreferredBlockSize();
} finally {
readUnlock();
}
@@ -2491,14 +2492,14 @@ private BlocksMapUpdateInfo startFileInternal(FSPermissionChecker pc,
if (overwrite) {
toRemoveBlocks = new BlocksMapUpdateInfo();
List toRemoveINodes = new ChunkedArrayList();
- long ret = dir.delete(src, toRemoveBlocks, toRemoveINodes, now());
+ long ret = dir.delete(iip, toRemoveBlocks, toRemoveINodes, now());
if (ret >= 0) {
incrDeletedFileCount(ret);
removePathAndBlocks(src, null, toRemoveINodes, true);
}
} else {
// If lease soft limit time is expired, recover the lease
- recoverLeaseInternal(myFile, src, holder, clientMachine, false);
+ recoverLeaseInternal(iip, src, holder, clientMachine, false);
throw new FileAlreadyExistsException(src + " for client " +
clientMachine + " already exists");
}
@@ -2508,10 +2509,11 @@ private BlocksMapUpdateInfo startFileInternal(FSPermissionChecker pc,
INodeFile newNode = null;
// Always do an implicit mkdirs for parent directory tree.
- Path parent = new Path(src).getParent();
- if (parent != null && FSDirMkdirOp.mkdirsRecursively(dir,
- parent.toString(), permissions, true, now())) {
- newNode = dir.addFile(src, permissions, replication, blockSize,
+ INodesInPath parentIIP = iip.getParentINodesInPath();
+ if (parentIIP != null && (parentIIP = FSDirMkdirOp.mkdirsRecursively(dir,
+ parentIIP, permissions, true, now())) != null) {
+ iip = INodesInPath.append(parentIIP, newNode, iip.getLastLocalName());
+ newNode = dir.addFile(iip, src, permissions, replication, blockSize,
holder, clientMachine);
}
@@ -2621,12 +2623,8 @@ private LocatedBlock appendFileInternal(FSPermissionChecker pc,
"Cannot append to lazy persist file " + src);
}
// Opening an existing file for write - may need to recover lease.
- recoverLeaseInternal(myFile, src, holder, clientMachine, false);
+ recoverLeaseInternal(iip, src, holder, clientMachine, false);
- // recoverLeaseInternal may create a new InodeFile via
- // finalizeINodeFileUnderConstruction so we need to refresh
- // the referenced file.
- myFile = INodeFile.valueOf(dir.getINode(src), src, true);
final BlockInfo lastBlock = myFile.getLastBlock();
// Check that the block has at least minimum replication.
if(lastBlock != null && lastBlock.isComplete() &&
@@ -2634,8 +2632,8 @@ private LocatedBlock appendFileInternal(FSPermissionChecker pc,
throw new IOException("append: lastBlock=" + lastBlock +
" of src=" + src + " is not sufficiently replicated yet.");
}
- return prepareFileForWrite(src, myFile, holder, clientMachine, true,
- iip.getLatestSnapshotId(), logRetryCache);
+ return prepareFileForWrite(src, iip, holder, clientMachine, true,
+ logRetryCache);
} catch (IOException ie) {
NameNode.stateChangeLog.warn("DIR* NameSystem.append: " +ie.getMessage());
throw ie;
@@ -2643,11 +2641,10 @@ private LocatedBlock appendFileInternal(FSPermissionChecker pc,
}
/**
- * Replace current node with a INodeUnderConstruction.
+ * Convert current node to under construction.
* Recreate in-memory lease record.
*
* @param src path to the file
- * @param file existing file object
* @param leaseHolder identifier of the lease holder on this file
* @param clientMachine identifier of the client machine
* @param writeToEditLog whether to persist this change to the edit log
@@ -2657,26 +2654,25 @@ private LocatedBlock appendFileInternal(FSPermissionChecker pc,
* @throws UnresolvedLinkException
* @throws IOException
*/
- LocatedBlock prepareFileForWrite(String src, INodeFile file,
- String leaseHolder, String clientMachine,
- boolean writeToEditLog,
- int latestSnapshot, boolean logRetryCache)
- throws IOException {
- file.recordModification(latestSnapshot);
- final INodeFile cons = file.toUnderConstruction(leaseHolder, clientMachine);
+ LocatedBlock prepareFileForWrite(String src, INodesInPath iip,
+ String leaseHolder, String clientMachine, boolean writeToEditLog,
+ boolean logRetryCache) throws IOException {
+ final INodeFile file = iip.getLastINode().asFile();
+ file.recordModification(iip.getLatestSnapshotId());
+ file.toUnderConstruction(leaseHolder, clientMachine);
- leaseManager.addLease(cons.getFileUnderConstructionFeature()
- .getClientName(), src);
+ leaseManager.addLease(
+ file.getFileUnderConstructionFeature().getClientName(), src);
- LocatedBlock ret = blockManager.convertLastBlockToUnderConstruction(cons);
+ LocatedBlock ret = blockManager.convertLastBlockToUnderConstruction(file);
if (ret != null) {
// update the quota: use the preferred block size for UC block
final long diff = file.getPreferredBlockSize() - ret.getBlockSize();
- dir.updateSpaceConsumed(src, 0, diff * file.getBlockReplication());
+ dir.updateSpaceConsumed(iip, 0, diff * file.getBlockReplication());
}
if (writeToEditLog) {
- getEditLog().logOpenFile(src, cons, false, logRetryCache);
+ getEditLog().logOpenFile(src, file, false, logRetryCache);
}
return ret;
}
@@ -2716,7 +2712,7 @@ boolean recoverLease(String src, String holder, String clientMachine)
dir.checkPathAccess(pc, iip, FsAction.WRITE);
}
- recoverLeaseInternal(inode, src, holder, clientMachine, true);
+ recoverLeaseInternal(iip, src, holder, clientMachine, true);
} catch (StandbyException se) {
skipSync = true;
throw se;
@@ -2731,11 +2727,12 @@ boolean recoverLease(String src, String holder, String clientMachine)
return false;
}
- private void recoverLeaseInternal(INodeFile fileInode,
+ private void recoverLeaseInternal(INodesInPath iip,
String src, String holder, String clientMachine, boolean force)
throws IOException {
assert hasWriteLock();
- if (fileInode != null && fileInode.isUnderConstruction()) {
+ INodeFile file = iip.getLastINode().asFile();
+ if (file != null && file.isUnderConstruction()) {
//
// If the file is under construction , then it must be in our
// leases. Find the appropriate lease record.
@@ -2758,7 +2755,7 @@ private void recoverLeaseInternal(INodeFile fileInode,
//
// Find the original holder.
//
- FileUnderConstructionFeature uc = fileInode.getFileUnderConstructionFeature();
+ FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
String clientName = uc.getClientName();
lease = leaseManager.getLease(clientName);
if (lease == null) {
@@ -2772,7 +2769,7 @@ private void recoverLeaseInternal(INodeFile fileInode,
// close only the file src
LOG.info("recoverLease: " + lease + ", src=" + src +
" from client " + clientName);
- internalReleaseLease(lease, src, holder);
+ internalReleaseLease(lease, src, iip, holder);
} else {
assert lease.getHolder().equals(clientName) :
"Current lease holder " + lease.getHolder() +
@@ -2784,13 +2781,13 @@ private void recoverLeaseInternal(INodeFile fileInode,
if (lease.expiredSoftLimit()) {
LOG.info("startFile: recover " + lease + ", src=" + src + " client "
+ clientName);
- boolean isClosed = internalReleaseLease(lease, src, null);
+ boolean isClosed = internalReleaseLease(lease, src, iip, null);
if(!isClosed)
throw new RecoveryInProgressException(
"Failed to close file " + src +
". Lease recovery is in progress. Try again later.");
} else {
- final BlockInfo lastBlock = fileInode.getLastBlock();
+ final BlockInfo lastBlock = file.getLastBlock();
if (lastBlock != null
&& lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
throw new RecoveryInProgressException("Recovery in progress, file ["
@@ -2822,10 +2819,7 @@ LastBlockWithStatus appendFile(
}
private LastBlockWithStatus appendFileInt(final String srcArg, String holder,
- String clientMachine, boolean logRetryCache)
- throws AccessControlException, SafeModeException,
- FileAlreadyExistsException, FileNotFoundException,
- ParentNotDirectoryException, IOException {
+ String clientMachine, boolean logRetryCache) throws IOException {
String src = srcArg;
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: src=" + src
@@ -2892,10 +2886,7 @@ void setBlockPoolId(String bpid) {
*/
LocatedBlock getAdditionalBlock(String src, long fileId, String clientName,
ExtendedBlock previous, Set excludedNodes,
- List favoredNodes)
- throws LeaseExpiredException, NotReplicatedYetException,
- QuotaExceededException, SafeModeException, UnresolvedLinkException,
- IOException {
+ List favoredNodes) throws IOException {
final long blockSize;
final int replication;
final byte storagePolicyID;
@@ -2983,7 +2974,7 @@ LocatedBlock getAdditionalBlock(String src, long fileId, String clientName,
}
// commit the last block and complete it if it has minimum replicas
- commitOrCompleteLastBlock(pendingFile,
+ commitOrCompleteLastBlock(pendingFile, fileState.iip,
ExtendedBlock.getLocalBlock(previous));
// allocate new block, record block locations in INode.
@@ -3023,10 +3014,12 @@ private Node getClientNode(String clientMachine) {
static class FileState {
public final INodeFile inode;
public final String path;
+ public final INodesInPath iip;
- public FileState(INodeFile inode, String fullPath) {
+ public FileState(INodeFile inode, String fullPath, INodesInPath iip) {
this.inode = inode;
this.path = fullPath;
+ this.iip = iip;
}
}
@@ -3046,18 +3039,22 @@ FileState analyzeFileState(String src,
checkFsObjectLimit();
Block previousBlock = ExtendedBlock.getLocalBlock(previous);
- INode inode;
+ final INode inode;
+ final INodesInPath iip;
if (fileId == INodeId.GRANDFATHER_INODE_ID) {
// Older clients may not have given us an inode ID to work with.
// In this case, we have to try to resolve the path and hope it
// hasn't changed or been deleted since the file was opened for write.
- final INodesInPath iip = dir.getINodesInPath4Write(src);
+ iip = dir.getINodesInPath4Write(src);
inode = iip.getLastINode();
} else {
// Newer clients pass the inode ID, so we can just get the inode
// directly.
inode = dir.getInode(fileId);
- if (inode != null) src = inode.getFullPathName();
+ iip = INodesInPath.fromINode(inode);
+ if (inode != null) {
+ src = iip.getPath();
+ }
}
final INodeFile pendingFile = checkLease(src, clientName, inode, fileId);
BlockInfo lastBlockInFile = pendingFile.getLastBlock();
@@ -3117,7 +3114,7 @@ FileState analyzeFileState(String src,
onRetryBlock[0] = makeLocatedBlock(lastBlockInFile,
((BlockInfoUnderConstruction)lastBlockInFile).getExpectedStorageLocations(),
offset);
- return new FileState(pendingFile, src);
+ return new FileState(pendingFile, src, iip);
} else {
// Case 3
throw new IOException("Cannot allocate block in " + src + ": " +
@@ -3130,7 +3127,7 @@ FileState analyzeFileState(String src,
if (!checkFileProgress(src, pendingFile, false)) {
throw new NotReplicatedYetException("Not replicated yet: " + src);
}
- return new FileState(pendingFile, src);
+ return new FileState(pendingFile, src, iip);
}
LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs,
@@ -3208,8 +3205,7 @@ LocatedBlock getAdditionalDatanode(String src, long fileId,
* The client would like to let go of the given block
*/
boolean abandonBlock(ExtendedBlock b, long fileId, String src, String holder)
- throws LeaseExpiredException, FileNotFoundException,
- UnresolvedLinkException, IOException {
+ throws IOException {
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: " + b
+ "of file " + src);
@@ -3225,21 +3221,24 @@ boolean abandonBlock(ExtendedBlock b, long fileId, String src, String holder)
src = dir.resolvePath(pc, src, pathComponents);
final INode inode;
+ final INodesInPath iip;
if (fileId == INodeId.GRANDFATHER_INODE_ID) {
// Older clients may not have given us an inode ID to work with.
// In this case, we have to try to resolve the path and hope it
// hasn't changed or been deleted since the file was opened for write.
- inode = dir.getINode(src);
+ iip = dir.getINodesInPath(src, true);
+ inode = iip.getLastINode();
} else {
inode = dir.getInode(fileId);
- if (inode != null) src = inode.getFullPathName();
+ iip = INodesInPath.fromINode(inode);
+ if (inode != null) {
+ src = iip.getPath();
+ }
}
final INodeFile file = checkLease(src, holder, inode, fileId);
- //
// Remove the block from the pending creates list
- //
- boolean removed = dir.removeBlock(src, file,
+ boolean removed = dir.removeBlock(src, iip, file,
ExtendedBlock.getLocalBlock(b));
if (!removed) {
return true;
@@ -3258,8 +3257,7 @@ boolean abandonBlock(ExtendedBlock b, long fileId, String src, String holder)
}
private INodeFile checkLease(String src, String holder, INode inode,
- long fileId)
- throws LeaseExpiredException, FileNotFoundException {
+ long fileId) throws LeaseExpiredException, FileNotFoundException {
assert hasReadLock();
final String ident = src + " (inode " + fileId + ")";
if (inode == null) {
@@ -3336,29 +3334,30 @@ boolean completeFile(final String srcArg, String holder,
return success;
}
- private boolean completeFileInternal(String src,
- String holder, Block last, long fileId) throws SafeModeException,
- UnresolvedLinkException, IOException {
+ private boolean completeFileInternal(String src, String holder, Block last,
+ long fileId) throws IOException {
assert hasWriteLock();
final INodeFile pendingFile;
+ final INodesInPath iip;
+ INode inode = null;
try {
- final INode inode;
if (fileId == INodeId.GRANDFATHER_INODE_ID) {
// Older clients may not have given us an inode ID to work with.
// In this case, we have to try to resolve the path and hope it
// hasn't changed or been deleted since the file was opened for write.
- final INodesInPath iip = dir.getLastINodeInPath(src);
- inode = iip.getINode(0);
+ iip = dir.getINodesInPath(src, true);
+ inode = iip.getLastINode();
} else {
inode = dir.getInode(fileId);
- if (inode != null) src = inode.getFullPathName();
+ iip = INodesInPath.fromINode(inode);
+ if (inode != null) {
+ src = iip.getPath();
+ }
}
pendingFile = checkLease(src, holder, inode, fileId);
} catch (LeaseExpiredException lee) {
- final INode inode = dir.getINode(src);
- if (inode != null
- && inode.isFile()
- && !inode.asFile().isUnderConstruction()) {
+ if (inode != null && inode.isFile() &&
+ !inode.asFile().isUnderConstruction()) {
// This could be a retry RPC - i.e the client tried to close
// the file, but missed the RPC response. Thus, it is trying
// again to close the file. If the file still exists and
@@ -3383,7 +3382,7 @@ private boolean completeFileInternal(String src,
}
// commit the last block and complete it if it has minimum replicas
- commitOrCompleteLastBlock(pendingFile, last);
+ commitOrCompleteLastBlock(pendingFile, iip, last);
if (!checkFileProgress(src, pendingFile, true)) {
return false;
@@ -3618,7 +3617,7 @@ private boolean deleteInternal(String src, boolean recursive,
long mtime = now();
// Unlink the target directory from directory tree
- long filesRemoved = dir.delete(src, collectedBlocks, removedINodes,
+ long filesRemoved = dir.delete(iip, collectedBlocks, removedINodes,
mtime);
if (filesRemoved < 0) {
return false;
@@ -3885,7 +3884,7 @@ void setQuota(String path, long nsQuota, long dsQuota)
* @throws IOException if path does not exist
*/
void fsync(String src, long fileId, String clientName, long lastBlockLength)
- throws IOException, UnresolvedLinkException {
+ throws IOException {
NameNode.stateChangeLog.info("BLOCK* fsync: " + src + " for " + clientName);
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
@@ -3933,15 +3932,13 @@ void fsync(String src, long fileId, String clientName, long lastBlockLength)
* false if block recovery has been initiated. Since the lease owner
* has been changed and logged, caller should call logSync().
*/
- boolean internalReleaseLease(Lease lease, String src,
- String recoveryLeaseHolder) throws AlreadyBeingCreatedException,
- IOException, UnresolvedLinkException {
+ boolean internalReleaseLease(Lease lease, String src, INodesInPath iip,
+ String recoveryLeaseHolder) throws IOException {
LOG.info("Recovering " + lease + ", src=" + src);
assert !isInSafeMode();
assert hasWriteLock();
- final INodesInPath iip = dir.getLastINodeInPath(src);
- final INodeFile pendingFile = iip.getINode(0).asFile();
+ final INodeFile pendingFile = iip.getLastINode().asFile();
int nrBlocks = pendingFile.numBlocks();
BlockInfo[] blocks = pendingFile.getBlocks();
@@ -4070,7 +4067,7 @@ Lease reassignLeaseInternal(Lease lease, String src, String newHolder,
}
private void commitOrCompleteLastBlock(final INodeFile fileINode,
- final Block commitBlock) throws IOException {
+ final INodesInPath iip, final Block commitBlock) throws IOException {
assert hasWriteLock();
Preconditions.checkArgument(fileINode.isUnderConstruction());
if (!blockManager.commitOrCompleteLastBlock(fileINode, commitBlock)) {
@@ -4081,8 +4078,7 @@ private void commitOrCompleteLastBlock(final INodeFile fileINode,
final long diff = fileINode.getPreferredBlockSize() - commitBlock.getNumBytes();
if (diff > 0) {
try {
- String path = fileINode.getFullPathName();
- dir.updateSpaceConsumed(path, 0, -diff*fileINode.getFileReplication());
+ dir.updateSpaceConsumed(iip, 0, -diff*fileINode.getFileReplication());
} catch (IOException e) {
LOG.warn("Unexpected exception while updating disk space.", e);
}
@@ -4090,8 +4086,7 @@ private void commitOrCompleteLastBlock(final INodeFile fileINode,
}
private void finalizeINodeFileUnderConstruction(String src,
- INodeFile pendingFile, int latestSnapshot) throws IOException,
- UnresolvedLinkException {
+ INodeFile pendingFile, int latestSnapshot) throws IOException {
assert hasWriteLock();
FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature();
@@ -4103,13 +4098,13 @@ private void finalizeINodeFileUnderConstruction(String src,
// The file is no longer pending.
// Create permanent INode, update blocks. No need to replace the inode here
// since we just remove the uc feature from pendingFile
- final INodeFile newFile = pendingFile.toCompleteFile(now());
+ pendingFile.toCompleteFile(now());
waitForLoadingFSImage();
// close file and persist block allocations for this file
- closeFile(src, newFile);
+ closeFile(src, pendingFile);
- blockManager.checkReplication(newFile);
+ blockManager.checkReplication(pendingFile);
}
@VisibleForTesting
@@ -4126,11 +4121,10 @@ public boolean isInSnapshot(BlockInfoUnderConstruction blockUC) {
return false;
}
- INodeFile inodeUC = (INodeFile) bc;
- String fullName = inodeUC.getName();
+ String fullName = bc.getName();
try {
if (fullName != null && fullName.startsWith(Path.SEPARATOR)
- && dir.getINode(fullName) == inodeUC) {
+ && dir.getINode(fullName) == bc) {
// If file exists in normal path then no need to look in snapshot
return false;
}
@@ -4139,7 +4133,7 @@ public boolean isInSnapshot(BlockInfoUnderConstruction blockUC) {
return false;
}
/*
- * 1. if bc is an instance of INodeFileUnderConstructionWithSnapshot, and
+ * 1. if bc is under construction and also with snapshot, and
* bc is not in the current fsdirectory tree, bc must represent a snapshot
* file.
* 2. if fullName is not an absolute path, bc cannot be existent in the
@@ -4153,8 +4147,7 @@ public boolean isInSnapshot(BlockInfoUnderConstruction blockUC) {
void commitBlockSynchronization(ExtendedBlock lastblock,
long newgenerationstamp, long newlength,
boolean closeFile, boolean deleteblock, DatanodeID[] newtargets,
- String[] newtargetstorages)
- throws IOException, UnresolvedLinkException {
+ String[] newtargetstorages) throws IOException {
LOG.info("commitBlockSynchronization(lastblock=" + lastblock
+ ", newgenerationstamp=" + newgenerationstamp
+ ", newlength=" + newlength
@@ -4312,10 +4305,11 @@ void commitBlockSynchronization(ExtendedBlock lastblock,
@VisibleForTesting
String closeFileCommitBlocks(INodeFile pendingFile, BlockInfo storedBlock)
throws IOException {
- String src = pendingFile.getFullPathName();
+ final INodesInPath iip = INodesInPath.fromINode(pendingFile);
+ final String src = iip.getPath();
// commit the last block and complete it if it has minimum replicas
- commitOrCompleteLastBlock(pendingFile, storedBlock);
+ commitOrCompleteLastBlock(pendingFile, iip, storedBlock);
//remove lease, close file
finalizeINodeFileUnderConstruction(src, pendingFile,
@@ -4515,7 +4509,7 @@ private void closeFile(String path, INodeFile file) {
/**
* Add the given symbolic link to the fs. Record it in the edits log.
*/
- private INodeSymlink addSymlink(String path, String target,
+ private INodeSymlink addSymlink(String path, INodesInPath iip, String target,
PermissionStatus dirPerms,
boolean createParent, boolean logRetryCache)
throws UnresolvedLinkException, FileAlreadyExistsException,
@@ -4524,15 +4518,17 @@ private INodeSymlink addSymlink(String path, String target,
final long modTime = now();
if (createParent) {
- final String parent = new Path(path).getParent().toString();
- if (!FSDirMkdirOp.mkdirsRecursively(dir, parent, dirPerms, true,
- modTime)) {
+ INodesInPath parentIIP = iip.getParentINodesInPath();
+ if (parentIIP == null || (parentIIP = FSDirMkdirOp.mkdirsRecursively(dir,
+ parentIIP, dirPerms, true, modTime)) == null) {
return null;
+ } else {
+ iip = INodesInPath.append(parentIIP, null, iip.getLastLocalName());
}
}
final String userName = dirPerms.getUserName();
long id = dir.allocateNewInodeId();
- INodeSymlink newNode = dir.addSymlink(id, path, target, modTime, modTime,
+ INodeSymlink newNode = dir.addSymlink(iip, id, target, modTime, modTime,
new PermissionStatus(userName, null, FsPermission.getDefault()));
if (newNode == null) {
NameNode.stateChangeLog.info("addSymlink: failed to add " + path);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 1501fce75f7e6..cfc7a24b14d29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -89,18 +89,11 @@ private static String constructPath(byte[][] components, int start, int end) {
return buf.toString();
}
- static INodesInPath resolve(final INodeDirectory startingDir,
- final byte[][] components) throws UnresolvedLinkException {
- return resolve(startingDir, components, components.length, false);
- }
-
/**
- * Retrieve existing INodes from a path. If existing is big enough to store
- * all path components (existing and non-existing), then existing INodes
- * will be stored starting from the root INode into existing[0]; if
- * existing is not big enough to store all path components, then only the
- * last existing and non existing INodes will be stored so that
- * existing[existing.length-1] refers to the INode of the final component.
+ * Retrieve existing INodes from a path. For non-snapshot path,
+ * the number of INodes is equal to the number of path components. For
+ * snapshot path (e.g., /foo/.snapshot/s1/bar), the number of INodes is
+ * (number_of_path_components - 1).
*
* An UnresolvedPathException is always thrown when an intermediate path
* component refers to a symbolic link. If the final path component refers
@@ -110,56 +103,38 @@ static INodesInPath resolve(final INodeDirectory startingDir,
*
* Example:
* Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the
- * following path components: ["","c1","c2","c3"],
- *
- *
- * getExistingPathINodes(["","c1","c2"], [?]) should fill the
- * array with [c2]
- * getExistingPathINodes(["","c1","c2","c3"], [?]) should fill the
- * array with [null]
- *
- *
- * getExistingPathINodes(["","c1","c2"], [?,?]) should fill the
- * array with [c1,c2]
- * getExistingPathINodes(["","c1","c2","c3"], [?,?]) should fill
- * the array with [c2,null]
+ * following path components: ["","c1","c2","c3"]
*
*
- * getExistingPathINodes(["","c1","c2"], [?,?,?,?]) should fill
- * the array with [rootINode,c1,c2,null],
- * getExistingPathINodes(["","c1","c2","c3"], [?,?,?,?]) should
+ * getExistingPathINodes(["","c1","c2"]) should fill
+ * the array with [rootINode,c1,c2],
+ * getExistingPathINodes(["","c1","c2","c3"]) should
* fill the array with [rootINode,c1,c2,null]
*
* @param startingDir the starting directory
* @param components array of path component name
- * @param numOfINodes number of INodes to return
* @param resolveLink indicates whether UnresolvedLinkException should
* be thrown when the path refers to a symbolic link.
* @return the specified number of existing INodes in the path
*/
static INodesInPath resolve(final INodeDirectory startingDir,
- final byte[][] components, final int numOfINodes,
- final boolean resolveLink) throws UnresolvedLinkException {
+ final byte[][] components, final boolean resolveLink)
+ throws UnresolvedLinkException {
Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0);
INode curNode = startingDir;
int count = 0;
- int index = numOfINodes <= components.length ?
- numOfINodes - components.length : 0;
int inodeNum = 0;
- int capacity = numOfINodes;
- INode[] inodes = new INode[numOfINodes];
+ INode[] inodes = new INode[components.length];
boolean isSnapshot = false;
int snapshotId = CURRENT_STATE_ID;
while (count < components.length && curNode != null) {
- final boolean lastComp = (count == components.length - 1);
- if (index >= 0) {
- inodes[inodeNum++] = curNode;
- }
+ final boolean lastComp = (count == components.length - 1);
+ inodes[inodeNum++] = curNode;
final boolean isRef = curNode.isReference();
final boolean isDir = curNode.isDirectory();
- final INodeDirectory dir = isDir? curNode.asDirectory(): null;
+ final INodeDirectory dir = isDir? curNode.asDirectory(): null;
if (!isRef && isDir && dir.isWithSnapshot()) {
//if the path is a non-snapshot path, update the latest snapshot.
if (!isSnapshot && shouldUpdateLatestId(
@@ -217,11 +192,7 @@ static INodesInPath resolve(final INodeDirectory startingDir,
if (isDotSnapshotDir(childName) && dir.isSnapshottable()) {
// skip the ".snapshot" in components
count++;
- index++;
isSnapshot = true;
- if (index >= 0) { // decrease the capacity by 1 to account for .snapshot
- capacity--;
- }
// check if ".snapshot" is the last element of components
if (count == components.length - 1) {
break;
@@ -240,14 +211,12 @@ static INodesInPath resolve(final INodeDirectory startingDir,
isSnapshot ? snapshotId : CURRENT_STATE_ID);
}
count++;
- index++;
}
- if (isSnapshot && capacity < numOfINodes &&
- !isDotSnapshotDir(components[components.length - 1])) {
+ if (isSnapshot && !isDotSnapshotDir(components[components.length - 1])) {
// for snapshot path shrink the inode array. however, for path ending with
// .snapshot, still keep last the null inode in the array
- INode[] newNodes = new INode[capacity];
- System.arraycopy(inodes, 0, newNodes, 0, capacity);
+ INode[] newNodes = new INode[components.length - 1];
+ System.arraycopy(inodes, 0, newNodes, 0, newNodes.length);
inodes = newNodes;
}
return new INodesInPath(inodes, components, isSnapshot, snapshotId);
@@ -277,6 +246,24 @@ public static INodesInPath replace(INodesInPath iip, int pos, INode inode) {
return new INodesInPath(inodes, iip.path, iip.isSnapshot, iip.snapshotId);
}
+ /**
+ * Extend a given INodesInPath with a child INode. The child INode will be
+ * appended to the end of the new INodesInPath.
+ */
+ public static INodesInPath append(INodesInPath iip, INode child,
+ byte[] childName) {
+ Preconditions.checkArgument(!iip.isSnapshot && iip.length() > 0);
+ Preconditions.checkArgument(iip.getLastINode() != null && iip
+ .getLastINode().isDirectory());
+ INode[] inodes = new INode[iip.length() + 1];
+ System.arraycopy(iip.inodes, 0, inodes, 0, inodes.length - 1);
+ inodes[inodes.length - 1] = child;
+ byte[][] path = new byte[iip.path.length + 1][];
+ System.arraycopy(iip.path, 0, path, 0, path.length - 1);
+ path[path.length - 1] = childName;
+ return new INodesInPath(inodes, path, false, iip.snapshotId);
+ }
+
private final byte[][] path;
/**
* Array with the specified number of INodes resolved for a given path.
@@ -348,6 +335,10 @@ byte[] getLastLocalName() {
return path[path.length - 1];
}
+ public byte[][] getPathComponents() {
+ return path;
+ }
+
/** @return the full path in string form */
public String getPath() {
return DFSUtil.byteArray2PathString(path);
@@ -369,6 +360,56 @@ public List getReadOnlyINodes() {
return Collections.unmodifiableList(Arrays.asList(inodes));
}
+ /**
+ * @param length number of ancestral INodes in the returned INodesInPath
+ * instance
+ * @return the INodesInPath instance containing ancestral INodes
+ */
+ private INodesInPath getAncestorINodesInPath(int length) {
+ Preconditions.checkArgument(length >= 0 && length < inodes.length);
+ final INode[] anodes = new INode[length];
+ final byte[][] apath;
+ final boolean isSnapshot;
+ final int snapshotId;
+ int dotSnapshotIndex = getDotSnapshotIndex();
+ if (this.isSnapshot && length >= dotSnapshotIndex + 1) {
+ apath = new byte[length + 1][];
+ isSnapshot = true;
+ snapshotId = this.snapshotId;
+ } else {
+ apath = new byte[length][];
+ isSnapshot = false;
+ snapshotId = this.isSnapshot ? CURRENT_STATE_ID : this.snapshotId;
+ }
+ System.arraycopy(this.inodes, 0, anodes, 0, length);
+ System.arraycopy(this.path, 0, apath, 0, apath.length);
+ return new INodesInPath(anodes, apath, isSnapshot, snapshotId);
+ }
+
+ /**
+ * @return an INodesInPath instance containing all the INodes in the parent
+ * path. We do a deep copy here.
+ */
+ public INodesInPath getParentINodesInPath() {
+ return inodes.length > 1 ? getAncestorINodesInPath(inodes.length - 1) :
+ null;
+ }
+
+ private int getDotSnapshotIndex() {
+ if (isSnapshot) {
+ for (int i = 0; i < path.length; i++) {
+ if (isDotSnapshotDir(path[i])) {
+ return i;
+ }
+ }
+ throw new IllegalStateException("The path " + getPath()
+ + " is a snapshot path but does not contain "
+ + HdfsConstants.DOT_SNAPSHOT_DIR);
+ } else {
+ return -1;
+ }
+ }
+
/**
* @return isSnapshot true for a snapshot path
*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index e13a5c67e9e11..f07621579ad10 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -116,7 +116,7 @@ synchronized long getNumUnderConstructionBlocks() {
final INodeFile cons;
try {
cons = this.fsnamesystem.getFSDirectory().getINode(path).asFile();
- Preconditions.checkState(cons.isUnderConstruction());
+ Preconditions.checkState(cons.isUnderConstruction());
} catch (UnresolvedLinkException e) {
throw new AssertionError("Lease files should reside on this FS");
}
@@ -481,8 +481,10 @@ synchronized boolean checkLeases() {
leaseToCheck.getPaths().toArray(leasePaths);
for(String p : leasePaths) {
try {
+ INodesInPath iip = fsnamesystem.getFSDirectory().getINodesInPath(p,
+ true);
boolean completed = fsnamesystem.internalReleaseLease(leaseToCheck, p,
- HdfsServerConstants.NAMENODE_LEASE_HOLDER);
+ iip, HdfsServerConstants.NAMENODE_LEASE_HOLDER);
if (LOG.isDebugEnabled()) {
if (completed) {
LOG.debug("Lease recovery for " + p + " is complete. File closed.");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
index eda0a28580c7a..b0275e8c7fc96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
@@ -827,8 +827,8 @@ public void testSetPermissionCannotSetAclBit() throws IOException {
fs.setPermission(path,
new FsPermissionExtension(FsPermission.
createImmutable((short)0755), true, true));
- INode inode = cluster.getNamesystem().getFSDirectory().getNode(
- path.toUri().getPath(), false);
+ INode inode = cluster.getNamesystem().getFSDirectory().getINode(
+ path.toUri().getPath(), false);
assertNotNull(inode);
FsPermission perm = inode.getFsPermission();
assertNotNull(perm);
@@ -1433,7 +1433,7 @@ private static void assertAclFeature(boolean expectAclFeature)
private static void assertAclFeature(Path pathToCheck,
boolean expectAclFeature) throws IOException {
INode inode = cluster.getNamesystem().getFSDirectory()
- .getNode(pathToCheck.toUri().getPath(), false);
+ .getINode(pathToCheck.toUri().getPath(), false);
assertNotNull(inode);
AclFeature aclFeature = inode.getAclFeature();
if (expectAclFeature) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index aecf55ef02e23..5450cf7c5d58f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -711,8 +711,8 @@ public void testFsckError() throws Exception {
DFSTestUtil.waitReplication(fs, filePath, (short)1);
// intentionally corrupt NN data structure
- INodeFile node = (INodeFile)cluster.getNamesystem().dir.getNode(
- fileName, true);
+ INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode
+ (fileName, true);
final BlockInfo[] blocks = node.getBlocks();
assertEquals(blocks.length, 1);
blocks[0].setNumBytes(-1L); // set the block length to be negative
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
index 9b454ea066229..2f114a78c6ac1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
@@ -62,9 +62,11 @@ public void testRemoveLeaseWithPrefixPath() throws Exception {
*/
@Test (timeout=1000)
public void testCheckLeaseNotInfiniteLoop() {
+ FSDirectory dir = Mockito.mock(FSDirectory.class);
FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
Mockito.when(fsn.isRunning()).thenReturn(true);
Mockito.when(fsn.hasWriteLock()).thenReturn(true);
+ Mockito.when(fsn.getFSDirectory()).thenReturn(dir);
LeaseManager lm = new LeaseManager(fsn);
//Make sure the leases we are going to add exceed the hard limit
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
index 354bff1ab855c..e416e00e1dace 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
@@ -141,7 +140,8 @@ public void testNonSnapshotPathINodes() throws Exception {
// Get the inodes by resolving the path of a normal file
String[] names = INode.getPathNames(file1.toString());
byte[][] components = INode.getPathComponents(names);
- INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+ INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+ components, false);
// The number of inodes should be equal to components.length
assertEquals(nodesInPath.length(), components.length);
// The returned nodesInPath should be non-snapshot
@@ -157,20 +157,10 @@ public void testNonSnapshotPathINodes() throws Exception {
assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(),
dir.toString());
- // Call getExistingPathINodes and request only one INode. This is used
- // when identifying the INode for a given path.
- nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 1, false);
- assertEquals(nodesInPath.length(), 1);
- assertSnapshot(nodesInPath, false, null, -1);
- assertEquals(nodesInPath.getINode(0).getFullPathName(), file1.toString());
-
- // Call getExistingPathINodes and request 2 INodes. This is usually used
- // when identifying the parent INode of a given path.
- nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 2, false);
- assertEquals(nodesInPath.length(), 2);
+ nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
+ assertEquals(nodesInPath.length(), components.length);
assertSnapshot(nodesInPath, false, null, -1);
- assertEquals(nodesInPath.getINode(1).getFullPathName(), file1.toString());
- assertEquals(nodesInPath.getINode(0).getFullPathName(), sub1.toString());
+ assertEquals(nodesInPath.getLastINode().getFullPathName(), file1.toString());
}
/**
@@ -187,7 +177,8 @@ public void testSnapshotPathINodes() throws Exception {
String snapshotPath = sub1.toString() + "/.snapshot/s1/file1";
String[] names = INode.getPathNames(snapshotPath);
byte[][] components = INode.getPathComponents(names);
- INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+ INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+ components, false);
// Length of inodes should be (components.length - 1), since we will ignore
// ".snapshot"
assertEquals(nodesInPath.length(), components.length - 1);
@@ -200,27 +191,17 @@ public void testSnapshotPathINodes() throws Exception {
assertTrue(snapshotFileNode.getParent().isWithSnapshot());
// Call getExistingPathINodes and request only one INode.
- nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 1, false);
- assertEquals(nodesInPath.length(), 1);
- // The snapshotroot (s1) is not included in inodes. Thus the
- // snapshotRootIndex should be -1.
- assertSnapshot(nodesInPath, true, snapshot, -1);
+ nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
+ assertEquals(nodesInPath.length(), components.length - 1);
+ assertSnapshot(nodesInPath, true, snapshot, 3);
// Check the INode for file1 (snapshot file)
assertINodeFile(nodesInPath.getLastINode(), file1);
-
- // Call getExistingPathINodes and request 2 INodes.
- nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 2, false);
- assertEquals(nodesInPath.length(), 2);
- // There should be two INodes in inodes: s1 and snapshot of file1. Thus the
- // SnapshotRootIndex should be 0.
- assertSnapshot(nodesInPath, true, snapshot, 0);
- assertINodeFile(nodesInPath.getLastINode(), file1);
-
+
// Resolve the path "/TestSnapshot/sub1/.snapshot"
String dotSnapshotPath = sub1.toString() + "/.snapshot";
names = INode.getPathNames(dotSnapshotPath);
components = INode.getPathComponents(names);
- nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+ nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
// The number of INodes returned should still be components.length
// since we put a null in the inode array for ".snapshot"
assertEquals(nodesInPath.length(), components.length);
@@ -267,7 +248,8 @@ public void testSnapshotPathINodesAfterDeletion() throws Exception {
String snapshotPath = sub1.toString() + "/.snapshot/s2/file1";
String[] names = INode.getPathNames(snapshotPath);
byte[][] components = INode.getPathComponents(names);
- INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+ INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+ components, false);
// Length of inodes should be (components.length - 1), since we will ignore
// ".snapshot"
assertEquals(nodesInPath.length(), components.length - 1);
@@ -284,7 +266,8 @@ public void testSnapshotPathINodesAfterDeletion() throws Exception {
// Check the INodes for path /TestSnapshot/sub1/file1
String[] names = INode.getPathNames(file1.toString());
byte[][] components = INode.getPathComponents(names);
- INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+ INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+ components, false);
// The length of inodes should be equal to components.length
assertEquals(nodesInPath.length(), components.length);
// The number of non-null elements should be components.length - 1 since
@@ -333,7 +316,8 @@ public void testSnapshotPathINodesWithAddedFile() throws Exception {
String snapshotPath = sub1.toString() + "/.snapshot/s4/file3";
String[] names = INode.getPathNames(snapshotPath);
byte[][] components = INode.getPathComponents(names);
- INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+ INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+ components, false);
// Length of inodes should be (components.length - 1), since we will ignore
// ".snapshot"
assertEquals(nodesInPath.length(), components.length - 1);
@@ -352,7 +336,8 @@ public void testSnapshotPathINodesWithAddedFile() throws Exception {
// Check the inodes for /TestSnapshot/sub1/file3
String[] names = INode.getPathNames(file3.toString());
byte[][] components = INode.getPathComponents(names);
- INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+ INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+ components, false);
// The number of inodes should be equal to components.length
assertEquals(nodesInPath.length(), components.length);
@@ -378,7 +363,8 @@ public void testSnapshotPathINodesAfterModification() throws Exception {
// First check the INode for /TestSnapshot/sub1/file1
String[] names = INode.getPathNames(file1.toString());
byte[][] components = INode.getPathComponents(names);
- INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+ INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+ components, false);
// The number of inodes should be equal to components.length
assertEquals(nodesInPath.length(), components.length);
@@ -401,7 +387,8 @@ public void testSnapshotPathINodesAfterModification() throws Exception {
String snapshotPath = sub1.toString() + "/.snapshot/s3/file1";
names = INode.getPathNames(snapshotPath);
components = INode.getPathComponents(names);
- INodesInPath ssNodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+ INodesInPath ssNodesInPath = INodesInPath.resolve(fsdir.rootDir,
+ components, false);
// Length of ssInodes should be (components.length - 1), since we will
// ignore ".snapshot"
assertEquals(ssNodesInPath.length(), components.length - 1);
@@ -419,7 +406,8 @@ public void testSnapshotPathINodesAfterModification() throws Exception {
// Check the INode for /TestSnapshot/sub1/file1 again
names = INode.getPathNames(file1.toString());
components = INode.getPathComponents(names);
- INodesInPath newNodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+ INodesInPath newNodesInPath = INodesInPath.resolve(fsdir.rootDir,
+ components, false);
assertSnapshot(newNodesInPath, false, s3, -1);
// The number of inodes should be equal to components.length
assertEquals(newNodesInPath.length(), components.length);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
index 62041e8e3bcda..ba318dee63963 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
@@ -32,7 +32,6 @@
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
-import org.apache.hadoop.security.AccessControlException;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -169,7 +168,7 @@ public void testOpenFilesWithMultipleSnapshotsWithoutCheckpoint()
}
private void doTestMultipleSnapshots(boolean saveNamespace)
- throws IOException, AccessControlException {
+ throws IOException {
Path path = new Path("/test");
doWriteAndAbort(fs, path);
fs.createSnapshot(path, "s2");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
index e1ca26393b92c..5264cb71b0bc0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
@@ -40,7 +40,7 @@
/**
* This class tests the replication handling/calculation of snapshots. In
* particular, {@link INodeFile#getFileReplication()} and
- * {@link INodeFileWithSnapshot#getBlockReplication()} are tested to make sure
+ * {@link INodeFile#getBlockReplication()} are tested to make sure
* the number of replication is calculated correctly with/without snapshots.
*/
public class TestSnapshotReplication {
@@ -82,7 +82,7 @@ public void tearDown() throws Exception {
* Check the replication of a given file. We test both
* {@link INodeFile#getFileReplication()} and
* {@link INodeFile#getBlockReplication()}.
- *
+ *
* @param file The given file
* @param replication The expected replication number
* @param blockReplication The expected replication number for the block
@@ -132,8 +132,7 @@ INodeFile getINodeFile(Path p) throws Exception {
* as their expected replication number stored in their corresponding
* INodes
* @param expectedBlockRep
- * The expected replication number that should be returned by
- * {@link INodeFileWithSnapshot#getBlockReplication()} of all the INodes
+ * The expected replication number
* @throws Exception
*/
private void checkSnapshotFileReplication(Path currentFile,
@@ -143,8 +142,8 @@ private void checkSnapshotFileReplication(Path currentFile,
assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
// Then check replication for every snapshot
for (Path ss : snapshotRepMap.keySet()) {
- final INodesInPath iip = fsdir.getLastINodeInPath(ss.toString());
- final INodeFile ssInode = (INodeFile)iip.getLastINode();
+ final INodesInPath iip = fsdir.getINodesInPath(ss.toString(), true);
+ final INodeFile ssInode = iip.getLastINode().asFile();
// The replication number derived from the
// INodeFileWithLink#getBlockReplication should always == expectedBlockRep
assertEquals(expectedBlockRep, ssInode.getBlockReplication());
From e0b02b1d666ab1143def73d0eed8319da5181c05 Mon Sep 17 00:00:00 2001
From: Colin Patrick Mccabe
Date: Fri, 12 Dec 2014 16:30:52 -0800
Subject: [PATCH 055/432] HADOOP-11238. Update the NameNode's Group Cache in
the background when possible (Chris Li via Colin P. McCabe)
---
.../hadoop-common/CHANGES.txt | 3 +
.../org/apache/hadoop/security/Groups.java | 193 +++++++-------
.../hadoop/security/TestGroupsCaching.java | 236 ++++++++++++++++++
3 files changed, 342 insertions(+), 90 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 45f226fc4c944..1e59395343d01 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -437,6 +437,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11323. WritableComparator#compare keeps reference to byte array.
(Wilfred Spiegelenburg via wang)
+ HADOOP-11238. Update the NameNode's Group Cache in the background when
+ possible (Chris Li via Colin P. McCabe)
+
BUG FIXES
HADOOP-11236. NFS: Fix javadoc warning in RpcProgram.java (Abhiraj Butala via harsh)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
index c5004197e5638..f3c50943039c1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
@@ -24,7 +24,13 @@
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import com.google.common.base.Ticker;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -52,10 +58,11 @@ public class Groups {
private static final Log LOG = LogFactory.getLog(Groups.class);
private final GroupMappingServiceProvider impl;
-
- private final Map userToGroupsMap =
- new ConcurrentHashMap();
- private final Map> staticUserToGroupsMap =
+
+ private final LoadingCache> cache;
+ private final ConcurrentHashMap negativeCacheMask =
+ new ConcurrentHashMap();
+ private final Map> staticUserToGroupsMap =
new HashMap>();
private final long cacheTimeout;
private final long negativeCacheTimeout;
@@ -66,7 +73,7 @@ public Groups(Configuration conf) {
this(conf, new Timer());
}
- public Groups(Configuration conf, Timer timer) {
+ public Groups(Configuration conf, final Timer timer) {
impl =
ReflectionUtils.newInstance(
conf.getClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
@@ -86,6 +93,11 @@ public Groups(Configuration conf, Timer timer) {
parseStaticMapping(conf);
this.timer = timer;
+ this.cache = CacheBuilder.newBuilder()
+ .refreshAfterWrite(cacheTimeout, TimeUnit.MILLISECONDS)
+ .ticker(new TimerToTickerAdapter(timer))
+ .expireAfterWrite(10 * cacheTimeout, TimeUnit.MILLISECONDS)
+ .build(new GroupCacheLoader());
if(LOG.isDebugEnabled())
LOG.debug("Group mapping impl=" + impl.getClass().getName() +
@@ -123,78 +135,112 @@ private void parseStaticMapping(Configuration conf) {
}
}
- /**
- * Determine whether the CachedGroups is expired.
- * @param groups cached groups for one user.
- * @return true if groups is expired from useToGroupsMap.
- */
- private boolean hasExpired(CachedGroups groups, long startMs) {
- if (groups == null) {
- return true;
- }
- long timeout = cacheTimeout;
- if (isNegativeCacheEnabled() && groups.getGroups().isEmpty()) {
- // This CachedGroups is in the negative cache, thus it should expire
- // sooner.
- timeout = negativeCacheTimeout;
- }
- return groups.getTimestamp() + timeout <= startMs;
- }
-
private boolean isNegativeCacheEnabled() {
return negativeCacheTimeout > 0;
}
+ private IOException noGroupsForUser(String user) {
+ return new IOException("No groups found for user " + user);
+ }
+
/**
* Get the group memberships of a given user.
+ * If the user's group is not cached, this method may block.
* @param user User's name
* @return the group memberships of the user
- * @throws IOException
+ * @throws IOException if user does not exist
*/
- public List getGroups(String user) throws IOException {
+ public List getGroups(final String user) throws IOException {
// No need to lookup for groups of static users
List staticMapping = staticUserToGroupsMap.get(user);
if (staticMapping != null) {
return staticMapping;
}
- // Return cached value if available
- CachedGroups groups = userToGroupsMap.get(user);
- long startMs = timer.monotonicNow();
- if (!hasExpired(groups, startMs)) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Returning cached groups for '" + user + "'");
- }
- if (groups.getGroups().isEmpty()) {
- // Even with enabling negative cache, getGroups() has the same behavior
- // that throws IOException if the groups for the user is empty.
- throw new IOException("No groups found for user " + user);
+
+ // Check the negative cache first
+ if (isNegativeCacheEnabled()) {
+ Long expirationTime = negativeCacheMask.get(user);
+ if (expirationTime != null) {
+ if (timer.monotonicNow() < expirationTime) {
+ throw noGroupsForUser(user);
+ } else {
+ negativeCacheMask.remove(user, expirationTime);
+ }
}
- return groups.getGroups();
}
- // Create and cache user's groups
- List groupList = impl.getGroups(user);
- long endMs = timer.monotonicNow();
- long deltaMs = endMs - startMs ;
- UserGroupInformation.metrics.addGetGroups(deltaMs);
- if (deltaMs > warningDeltaMs) {
- LOG.warn("Potential performance problem: getGroups(user=" + user +") " +
- "took " + deltaMs + " milliseconds.");
+ try {
+ return cache.get(user);
+ } catch (ExecutionException e) {
+ throw (IOException)e.getCause();
}
- groups = new CachedGroups(groupList, endMs);
- if (groups.getGroups().isEmpty()) {
- if (isNegativeCacheEnabled()) {
- userToGroupsMap.put(user, groups);
+ }
+
+ /**
+ * Convert millisecond times from hadoop's timer to guava's nanosecond ticker.
+ */
+ private static class TimerToTickerAdapter extends Ticker {
+ private Timer timer;
+
+ public TimerToTickerAdapter(Timer timer) {
+ this.timer = timer;
+ }
+
+ @Override
+ public long read() {
+ final long NANOSECONDS_PER_MS = 1000000;
+ return timer.monotonicNow() * NANOSECONDS_PER_MS;
+ }
+ }
+
+ /**
+ * Deals with loading data into the cache.
+ */
+ private class GroupCacheLoader extends CacheLoader> {
+ /**
+ * This method will block if a cache entry doesn't exist, and
+ * any subsequent requests for the same user will wait on this
+ * request to return. If a user already exists in the cache,
+ * this will be run in the background.
+ * @param user key of cache
+ * @return List of groups belonging to user
+ * @throws IOException to prevent caching negative entries
+ */
+ @Override
+ public List load(String user) throws Exception {
+ List groups = fetchGroupList(user);
+
+ if (groups.isEmpty()) {
+ if (isNegativeCacheEnabled()) {
+ long expirationTime = timer.monotonicNow() + negativeCacheTimeout;
+ negativeCacheMask.put(user, expirationTime);
+ }
+
+ // We throw here to prevent Cache from retaining an empty group
+ throw noGroupsForUser(user);
}
- throw new IOException("No groups found for user " + user);
+
+ return groups;
}
- userToGroupsMap.put(user, groups);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Returning fetched groups for '" + user + "'");
+
+ /**
+ * Queries impl for groups belonging to the user. This could involve I/O and take awhile.
+ */
+ private List fetchGroupList(String user) throws IOException {
+ long startMs = timer.monotonicNow();
+ List groupList = impl.getGroups(user);
+ long endMs = timer.monotonicNow();
+ long deltaMs = endMs - startMs ;
+ UserGroupInformation.metrics.addGetGroups(deltaMs);
+ if (deltaMs > warningDeltaMs) {
+ LOG.warn("Potential performance problem: getGroups(user=" + user +") " +
+ "took " + deltaMs + " milliseconds.");
+ }
+
+ return groupList;
}
- return groups.getGroups();
}
-
+
/**
* Refresh all user-to-groups mappings.
*/
@@ -205,7 +251,8 @@ public void refresh() {
} catch (IOException e) {
LOG.warn("Error refreshing groups cache", e);
}
- userToGroupsMap.clear();
+ cache.invalidateAll();
+ negativeCacheMask.clear();
}
/**
@@ -221,40 +268,6 @@ public void cacheGroupsAdd(List groups) {
}
}
- /**
- * Class to hold the cached groups
- */
- private static class CachedGroups {
- final long timestamp;
- final List groups;
-
- /**
- * Create and initialize group cache
- */
- CachedGroups(List groups, long timestamp) {
- this.groups = groups;
- this.timestamp = timestamp;
- }
-
- /**
- * Returns time of last cache update
- *
- * @return time of last cache update
- */
- public long getTimestamp() {
- return timestamp;
- }
-
- /**
- * Get list of cached groups
- *
- * @return cached groups
- */
- public List getGroups() {
- return groups;
- }
- }
-
private static Groups GROUPS = null;
/**
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
index a814b0d9e6883..89e5b2d79d58d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
@@ -51,6 +51,9 @@ public class TestGroupsCaching {
@Before
public void setup() {
+ FakeGroupMapping.resetRequestCount();
+ ExceptionalGroupMapping.resetRequestCount();
+
conf = new Configuration();
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
FakeGroupMapping.class,
@@ -61,16 +64,32 @@ public static class FakeGroupMapping extends ShellBasedUnixGroupsMapping {
// any to n mapping
private static Set allGroups = new HashSet();
private static Set blackList = new HashSet();
+ private static int requestCount = 0;
+ private static long getGroupsDelayMs = 0;
@Override
public List getGroups(String user) throws IOException {
LOG.info("Getting groups for " + user);
+ requestCount++;
+
+ delayIfNecessary();
+
if (blackList.contains(user)) {
return new LinkedList();
}
return new LinkedList(allGroups);
}
+ private void delayIfNecessary() {
+ if (getGroupsDelayMs > 0) {
+ try {
+ Thread.sleep(getGroupsDelayMs);
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+
@Override
public void cacheGroupsRefresh() throws IOException {
LOG.info("Cache is being refreshed.");
@@ -93,6 +112,36 @@ public static void addToBlackList(String user) throws IOException {
LOG.info("Adding " + user + " to the blacklist");
blackList.add(user);
}
+
+ public static int getRequestCount() {
+ return requestCount;
+ }
+
+ public static void resetRequestCount() {
+ requestCount = 0;
+ }
+
+ public static void setGetGroupsDelayMs(long delayMs) {
+ getGroupsDelayMs = delayMs;
+ }
+ }
+
+ public static class ExceptionalGroupMapping extends ShellBasedUnixGroupsMapping {
+ private static int requestCount = 0;
+
+ @Override
+ public List getGroups(String user) throws IOException {
+ requestCount++;
+ throw new IOException("For test");
+ }
+
+ public static int getRequestCount() {
+ return requestCount;
+ }
+
+ public static void resetRequestCount() {
+ requestCount = 0;
+ }
}
@Test
@@ -219,4 +268,191 @@ public void testNegativeGroupCaching() throws Exception {
// groups for the user is fetched.
assertEquals(Arrays.asList(myGroups), groups.getGroups(user));
}
+
+ @Test
+ public void testCachePreventsImplRequest() throws Exception {
+ // Disable negative cache.
+ conf.setLong(
+ CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 0);
+ Groups groups = new Groups(conf);
+ groups.cacheGroupsAdd(Arrays.asList(myGroups));
+ groups.refresh();
+ FakeGroupMapping.clearBlackList();
+
+ assertEquals(0, FakeGroupMapping.getRequestCount());
+
+ // First call hits the wire
+ assertTrue(groups.getGroups("me").size() == 2);
+ assertEquals(1, FakeGroupMapping.getRequestCount());
+
+ // Second count hits cache
+ assertTrue(groups.getGroups("me").size() == 2);
+ assertEquals(1, FakeGroupMapping.getRequestCount());
+ }
+
+ @Test
+ public void testExceptionsFromImplNotCachedInNegativeCache() {
+ conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
+ ExceptionalGroupMapping.class,
+ ShellBasedUnixGroupsMapping.class);
+ conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 10000);
+ Groups groups = new Groups(conf);
+ groups.cacheGroupsAdd(Arrays.asList(myGroups));
+ groups.refresh();
+
+ assertEquals(0, ExceptionalGroupMapping.getRequestCount());
+
+ // First call should hit the wire
+ try {
+ groups.getGroups("anything");
+ fail("Should have thrown");
+ } catch (IOException e) {
+ // okay
+ }
+ assertEquals(1, ExceptionalGroupMapping.getRequestCount());
+
+ // Second call should hit the wire (no negative caching)
+ try {
+ groups.getGroups("anything");
+ fail("Should have thrown");
+ } catch (IOException e) {
+ // okay
+ }
+ assertEquals(2, ExceptionalGroupMapping.getRequestCount());
+ }
+
+ @Test
+ public void testOnlyOneRequestWhenNoEntryIsCached() throws Exception {
+ // Disable negative cache.
+ conf.setLong(
+ CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 0);
+ final Groups groups = new Groups(conf);
+ groups.cacheGroupsAdd(Arrays.asList(myGroups));
+ groups.refresh();
+ FakeGroupMapping.clearBlackList();
+ FakeGroupMapping.setGetGroupsDelayMs(100);
+
+ ArrayList threads = new ArrayList();
+ for (int i = 0; i < 10; i++) {
+ threads.add(new Thread() {
+ public void run() {
+ try {
+ assertEquals(2, groups.getGroups("me").size());
+ } catch (IOException e) {
+ fail("Should not happen");
+ }
+ }
+ });
+ }
+
+ // We start a bunch of threads who all see no cached value
+ for (Thread t : threads) {
+ t.start();
+ }
+
+ for (Thread t : threads) {
+ t.join();
+ }
+
+ // But only one thread should have made the request
+ assertEquals(1, FakeGroupMapping.getRequestCount());
+ }
+
+ @Test
+ public void testOnlyOneRequestWhenExpiredEntryExists() throws Exception {
+ conf.setLong(
+ CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 1);
+ FakeTimer timer = new FakeTimer();
+ final Groups groups = new Groups(conf, timer);
+ groups.cacheGroupsAdd(Arrays.asList(myGroups));
+ groups.refresh();
+ FakeGroupMapping.clearBlackList();
+ FakeGroupMapping.setGetGroupsDelayMs(100);
+
+ // We make an initial request to populate the cache
+ groups.getGroups("me");
+ int startingRequestCount = FakeGroupMapping.getRequestCount();
+
+ // Then expire that entry
+ timer.advance(400 * 1000);
+ Thread.sleep(100);
+
+ ArrayList threads = new ArrayList();
+ for (int i = 0; i < 10; i++) {
+ threads.add(new Thread() {
+ public void run() {
+ try {
+ assertEquals(2, groups.getGroups("me").size());
+ } catch (IOException e) {
+ fail("Should not happen");
+ }
+ }
+ });
+ }
+
+ // We start a bunch of threads who all see the cached value
+ for (Thread t : threads) {
+ t.start();
+ }
+
+ for (Thread t : threads) {
+ t.join();
+ }
+
+ // Only one extra request is made
+ assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
+ }
+
+ @Test
+ public void testCacheEntriesExpire() throws Exception {
+ conf.setLong(
+ CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 1);
+ FakeTimer timer = new FakeTimer();
+ final Groups groups = new Groups(conf, timer);
+ groups.cacheGroupsAdd(Arrays.asList(myGroups));
+ groups.refresh();
+ FakeGroupMapping.clearBlackList();
+
+ // We make an entry
+ groups.getGroups("me");
+ int startingRequestCount = FakeGroupMapping.getRequestCount();
+
+ timer.advance(20 * 1000);
+
+ // Cache entry has expired so it results in a new fetch
+ groups.getGroups("me");
+ assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
+ }
+
+ @Test
+ public void testNegativeCacheClearedOnRefresh() throws Exception {
+ conf.setLong(
+ CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 100);
+ final Groups groups = new Groups(conf);
+ groups.cacheGroupsAdd(Arrays.asList(myGroups));
+ groups.refresh();
+ FakeGroupMapping.clearBlackList();
+ FakeGroupMapping.addToBlackList("dne");
+
+ try {
+ groups.getGroups("dne");
+ fail("Should have failed to find this group");
+ } catch (IOException e) {
+ // pass
+ }
+
+ int startingRequestCount = FakeGroupMapping.getRequestCount();
+
+ groups.refresh();
+ FakeGroupMapping.addToBlackList("dne");
+
+ try {
+ List g = groups.getGroups("dne");
+ fail("Should have failed to find this group");
+ } catch (IOException e) {
+ // pass
+ }
+
+ assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
+ }
}
From e722501c517798307cdc9214b1efb72a27ae0f80 Mon Sep 17 00:00:00 2001
From: Andrew Wang
Date: Fri, 12 Dec 2014 17:04:33 -0800
Subject: [PATCH 056/432] HDFS-7426. Change nntop JMX format to be a JSON blob.
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +
.../hdfs/server/namenode/FSNamesystem.java | 38 ++-
.../namenode/metrics/FSNamesystemMBean.java | 7 +
.../server/namenode/top/TopAuditLogger.java | 20 +-
.../hdfs/server/namenode/top/TopConf.java | 29 ++-
.../namenode/top/metrics/TopMetrics.java | 216 ++++-------------
.../top/window/RollingWindowManager.java | 223 ++++++++++++------
.../namenode/TestFSNamesystemMBean.java | 2 +
.../server/namenode/TestNameNodeMXBean.java | 116 +++++++++
.../namenode/metrics/TestNameNodeMetrics.java | 59 -----
.../top/window/TestRollingWindowManager.java | 63 ++---
11 files changed, 417 insertions(+), 358 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index eeedb0d8333a2..9dfecc1c5b5c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -455,6 +455,8 @@ Release 2.7.0 - UNRELEASED
HDFS-7509. Avoid resolving path multiple times. (jing9)
+ HDFS-7426. Change nntop JMX format to be a JSON blob. (wang)
+
OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b4b897a186b0c..1ac19fb30b5ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -120,6 +120,7 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
@@ -241,6 +242,7 @@
import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics;
+import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -281,6 +283,7 @@
import org.apache.log4j.Appender;
import org.apache.log4j.AsyncAppender;
import org.apache.log4j.Logger;
+import org.codehaus.jackson.map.ObjectMapper;
import org.mortbay.util.ajax.JSON;
import com.google.common.annotations.VisibleForTesting;
@@ -539,6 +542,9 @@ private void logAuditEvent(boolean succeeded,
private final FSImage fsImage;
+ private final TopConf topConf;
+ private TopMetrics topMetrics;
+
/**
* Notify that loading of this FSDirectory is complete, and
* it is imageLoaded for use
@@ -842,6 +848,7 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException {
this.snapshotManager = new SnapshotManager(dir);
this.cacheManager = new CacheManager(this, conf, blockManager);
this.safeMode = new SafeModeInfo(conf);
+ this.topConf = new TopConf(conf);
this.auditLoggers = initAuditLoggers(conf);
this.isDefaultAuditLogger = auditLoggers.size() == 1 &&
auditLoggers.get(0) instanceof DefaultAuditLogger;
@@ -952,13 +959,9 @@ private List initAuditLoggers(Configuration conf) {
}
// Add audit logger to calculate top users
- if (conf.getBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY,
- DFSConfigKeys.NNTOP_ENABLED_DEFAULT)) {
- String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
- TopConf nntopConf = new TopConf(conf);
- TopMetrics.initSingleton(conf, NamenodeRole.NAMENODE.name(), sessionId,
- nntopConf.nntopReportingPeriodsMs);
- auditLoggers.add(new TopAuditLogger());
+ if (topConf.isEnabled) {
+ topMetrics = new TopMetrics(conf, topConf.nntopReportingPeriodsMs);
+ auditLoggers.add(new TopAuditLogger(topMetrics));
}
return Collections.unmodifiableList(auditLoggers);
@@ -6013,6 +6016,27 @@ public int getNumStaleStorages() {
return getBlockManager().getDatanodeManager().getNumStaleStorages();
}
+ @Override // FSNamesystemMBean
+ public String getTopUserOpCounts() {
+ if (!topConf.isEnabled) {
+ return null;
+ }
+
+ Date now = new Date();
+ final List topWindows =
+ topMetrics.getTopWindows();
+ Map topMap = new TreeMap();
+ topMap.put("windows", topWindows);
+ topMap.put("timestamp", DFSUtil.dateToIso8601String(now));
+ ObjectMapper mapper = new ObjectMapper();
+ try {
+ return mapper.writeValueAsString(topMap);
+ } catch (IOException e) {
+ LOG.warn("Failed to fetch TopUser metrics", e);
+ }
+ return null;
+ }
+
/**
* Increments, logs and then returns the stamp
*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
index 708591b45eadf..86f4bd624ec92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
@@ -164,4 +164,11 @@ public interface FSNamesystemMBean {
*/
public int getNumStaleStorages();
+ /**
+ * Returns a nested JSON object listing the top users for different RPC
+ * operations over tracked time windows.
+ *
+ * @return JSON string
+ */
+ public String getTopUserOpCounts();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java
index 4f26b171c83aa..49c91536215d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java
@@ -19,6 +19,7 @@
import java.net.InetAddress;
+import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
@@ -36,6 +37,14 @@
public class TopAuditLogger implements AuditLogger {
public static final Logger LOG = LoggerFactory.getLogger(TopAuditLogger.class);
+ private final TopMetrics topMetrics;
+
+ public TopAuditLogger(TopMetrics topMetrics) {
+ Preconditions.checkNotNull(topMetrics, "Cannot init with a null " +
+ "TopMetrics");
+ this.topMetrics = topMetrics;
+ }
+
@Override
public void initialize(Configuration conf) {
}
@@ -43,12 +52,11 @@ public void initialize(Configuration conf) {
@Override
public void logAuditEvent(boolean succeeded, String userName,
InetAddress addr, String cmd, String src, String dst, FileStatus status) {
-
- TopMetrics instance = TopMetrics.getInstance();
- if (instance != null) {
- instance.report(succeeded, userName, addr, cmd, src, dst, status);
- } else {
- LOG.error("TopMetrics is not initialized yet!");
+ try {
+ topMetrics.report(succeeded, userName, addr, cmd, src, dst, status);
+ } catch (Throwable t) {
+ LOG.error("An error occurred while reflecting the event in top service, "
+ + "event: (cmd={},userName={})", cmd, userName);
}
if (LOG.isDebugEnabled()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java
index 0f4ebac5fd1d2..ba820323b20b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java
@@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hdfs.server.namenode.top;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.primitives.Ints;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -27,34 +30,34 @@
*/
@InterfaceAudience.Private
public final class TopConf {
-
- public static final String TOP_METRICS_REGISTRATION_NAME = "topusers";
- public static final String TOP_METRICS_RECORD_NAME = "topparam";
/**
- * A meta command representing the total number of commands
+ * Whether TopMetrics are enabled
*/
- public static final String CMD_TOTAL = "total";
+ public final boolean isEnabled;
+
/**
- * A meta user representing all users
+ * A meta command representing the total number of calls to all commands
*/
- public static String ALL_USERS = "ALL";
+ public static final String ALL_CMDS = "*";
/**
* nntop reporting periods in milliseconds
*/
- public final long[] nntopReportingPeriodsMs;
+ public final int[] nntopReportingPeriodsMs;
public TopConf(Configuration conf) {
+ isEnabled = conf.getBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY,
+ DFSConfigKeys.NNTOP_ENABLED_DEFAULT);
String[] periodsStr = conf.getTrimmedStrings(
DFSConfigKeys.NNTOP_WINDOWS_MINUTES_KEY,
DFSConfigKeys.NNTOP_WINDOWS_MINUTES_DEFAULT);
- nntopReportingPeriodsMs = new long[periodsStr.length];
+ nntopReportingPeriodsMs = new int[periodsStr.length];
for (int i = 0; i < periodsStr.length; i++) {
- nntopReportingPeriodsMs[i] = Integer.parseInt(periodsStr[i]) *
- 60L * 1000L; //min to ms
+ nntopReportingPeriodsMs[i] = Ints.checkedCast(
+ TimeUnit.MINUTES.toMillis(Integer.parseInt(periodsStr[i])));
}
- for (long aPeriodMs: nntopReportingPeriodsMs) {
- Preconditions.checkArgument(aPeriodMs >= 60L * 1000L,
+ for (int aPeriodMs: nntopReportingPeriodsMs) {
+ Preconditions.checkArgument(aPeriodMs >= TimeUnit.MINUTES.toMillis(1),
"minimum reporting period is 1 min!");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
index e8a4e23fdf085..ab55392886278 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
@@ -17,67 +17,50 @@
*/
package org.apache.hadoop.hdfs.server.namenode.top.metrics;
-import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName;
-import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
-import static org.apache.hadoop.metrics2.lib.Interns.info;
-
import java.net.InetAddress;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
-import org.apache.hadoop.metrics2.MetricsCollector;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
-import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.MetricValueMap;
+import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.TopWindow;
-/***
- * The interface to the top metrics
+/**
+ * The interface to the top metrics.
+ *
+ * Metrics are collected by a custom audit logger, {@link org.apache.hadoop
+ * .hdfs.server.namenode.top.TopAuditLogger}, which calls TopMetrics to
+ * increment per-operation, per-user counts on every audit log call. These
+ * counts are used to show the top users by NameNode operation as well as
+ * across all operations.
+ *
+ * TopMetrics maintains these counts for a configurable number of time
+ * intervals, e.g. 1min, 5min, 25min. Each interval is tracked by a
+ * RollingWindowManager.
*
- * The producers use the {@link #report} method to report events and the
- * consumers use {@link #getMetrics(MetricsCollector, boolean)} to retrieve the
- * current top metrics. The default consumer is JMX but it could be any other
- * user interface.
+ * These metrics are published as a JSON string via {@link org.apache.hadoop
+ * .hdfs.server .namenode.metrics.FSNamesystemMBean#getTopWindows}. This is
+ * done by calling {@link org.apache.hadoop.hdfs.server.namenode.top.window
+ * .RollingWindowManager#snapshot} on each RollingWindowManager.
*
* Thread-safe: relies on thread-safety of RollingWindowManager
*/
@InterfaceAudience.Private
-public class TopMetrics implements MetricsSource {
+public class TopMetrics {
public static final Logger LOG = LoggerFactory.getLogger(TopMetrics.class);
- enum Singleton {
- INSTANCE;
-
- volatile TopMetrics impl = null;
-
- synchronized TopMetrics init(Configuration conf, String processName,
- String sessionId, long[] reportingPeriods) {
- if (impl == null) {
- impl =
- create(conf, processName, sessionId, reportingPeriods,
- DefaultMetricsSystem.instance());
- }
- logConf(conf);
- return impl;
- }
- }
-
private static void logConf(Configuration conf) {
LOG.info("NNTop conf: " + DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY +
" = " + conf.get(DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY));
@@ -87,128 +70,35 @@ private static void logConf(Configuration conf) {
" = " + conf.get(DFSConfigKeys.NNTOP_WINDOWS_MINUTES_KEY));
}
- /**
- * Return only the shortest periods for default
- * TODO: make it configurable
- */
- final boolean smallestOnlyDefault = true;
-
- /**
- * The smallest of reporting periods
- */
- long smallestPeriod = Long.MAX_VALUE;
-
- /**
- * processName and sessionId might later be leveraged later when we aggregate
- * report from multiple federated name nodes
- */
- final String processName, sessionId;
-
/**
* A map from reporting periods to WindowManager. Thread-safety is provided by
* the fact that the mapping is not changed after construction.
*/
- final Map rollingWindowManagers =
- new HashMap();
+ final Map rollingWindowManagers =
+ new HashMap();
- TopMetrics(Configuration conf, String processName, String sessionId,
- long[] reportingPeriods) {
- this.processName = processName;
- this.sessionId = sessionId;
+ public TopMetrics(Configuration conf, int[] reportingPeriods) {
+ logConf(conf);
for (int i = 0; i < reportingPeriods.length; i++) {
- smallestPeriod = Math.min(smallestPeriod, reportingPeriods[i]);
rollingWindowManagers.put(reportingPeriods[i], new RollingWindowManager(
conf, reportingPeriods[i]));
}
}
- public static TopMetrics create(Configuration conf, String processName,
- String sessionId, long[] reportingPeriods, MetricsSystem ms) {
- return ms.register(TopConf.TOP_METRICS_REGISTRATION_NAME,
- "top metrics of the namenode in a last period of time", new TopMetrics(
- conf, processName, sessionId, reportingPeriods));
- }
-
- public static TopMetrics initSingleton(Configuration conf,
- String processName, String sessionId, long[] reportingPeriods) {
- return Singleton.INSTANCE.init(conf, processName, sessionId,
- reportingPeriods);
- }
-
- public static TopMetrics getInstance() {
- TopMetrics topMetrics = Singleton.INSTANCE.impl;
- Preconditions.checkArgument(topMetrics != null,
- "The TopMetric singleton instance is not initialized."
- + " Have you called initSingleton first?");
- return topMetrics;
- }
-
/**
- * In testing, the previous initialization should be reset if the entire
- * metric system is reinitialized
+ * Get a list of the current TopWindow statistics, one TopWindow per tracked
+ * time interval.
*/
- @VisibleForTesting
- public static void reset() {
- Singleton.INSTANCE.impl = null;
- }
-
- @Override
- public void getMetrics(MetricsCollector collector, boolean all) {
- long realTime = Time.monotonicNow();
- getMetrics(smallestOnlyDefault, realTime, collector, all);
- }
-
- public void getMetrics(boolean smallestOnly, long currTime,
- MetricsCollector collector, boolean all) {
- for (Entry entry : rollingWindowManagers
+ public List getTopWindows() {
+ long monoTime = Time.monotonicNow();
+ List windows = Lists.newArrayListWithCapacity
+ (rollingWindowManagers.size());
+ for (Entry entry : rollingWindowManagers
.entrySet()) {
- if (!smallestOnly || smallestPeriod == entry.getKey()) {
- getMetrics(currTime, collector, entry.getKey(), entry.getValue(), all);
- }
- }
- }
-
- /**
- * Get metrics for a particular recording period and its corresponding
- * {@link RollingWindowManager}
- *
- *
- * @param collector the metric collector
- * @param period the reporting period
- * @param rollingWindowManager the window manager corresponding to the
- * reporting period
- * @param all currently ignored
- */
- void getMetrics(long currTime, MetricsCollector collector, Long period,
- RollingWindowManager rollingWindowManager, boolean all) {
- MetricsRecordBuilder rb =
- collector.addRecord(createTopMetricsRecordName(period))
- .setContext("namenode").tag(ProcessName, processName)
- .tag(SessionId, sessionId);
-
- MetricValueMap snapshotMetrics = rollingWindowManager.snapshot(currTime);
- LOG.debug("calling snapshot, result size is: " + snapshotMetrics.size());
- for (Map.Entry entry : snapshotMetrics.entrySet()) {
- String key = entry.getKey();
- Number value = entry.getValue();
- LOG.debug("checking an entry: key: {} value: {}", key, value);
- long min = period / 1000L / 60L; //ms -> min
- String desc = "top user of name node in the past " + min + " minutes";
-
- if (value instanceof Integer) {
- rb.addGauge(info(key, desc), (Integer) value);
- } else if (value instanceof Long) {
- rb.addGauge(info(key, desc), (Long) value);
- } else if (value instanceof Float) {
- rb.addGauge(info(key, desc), (Float) value);
- } else if (value instanceof Double) {
- rb.addGauge(info(key, desc), (Double) value);
- } else {
- LOG.warn("Unsupported metric type: " + value.getClass());
- }
+ TopWindow window = entry.getValue().snapshot(monoTime);
+ windows.add(window);
}
- LOG.debug("END iterating over metrics, result size is: {}",
- snapshotMetrics.size());
+ return windows;
}
/**
@@ -216,18 +106,10 @@ void getMetrics(long currTime, MetricsCollector collector, Long period,
* log file. This is to be consistent when {@link TopMetrics} is charged with
* data read back from log files instead of being invoked directly by the
* FsNamesystem
- *
- * @param succeeded
- * @param userName
- * @param addr
- * @param cmd
- * @param src
- * @param dst
- * @param status
*/
public void report(boolean succeeded, String userName, InetAddress addr,
String cmd, String src, String dst, FileStatus status) {
- //currently we nntop makes use of only the username and the command
+ // currently nntop only makes use of the username and the command
report(userName, cmd);
}
@@ -239,27 +121,11 @@ public void report(String userName, String cmd) {
public void report(long currTime, String userName, String cmd) {
LOG.debug("a metric is reported: cmd: {} user: {}", cmd, userName);
userName = UserGroupInformation.trimLoginMethod(userName);
- try {
- for (RollingWindowManager rollingWindowManager : rollingWindowManagers
- .values()) {
- rollingWindowManager.recordMetric(currTime, cmd, userName, 1);
- rollingWindowManager.recordMetric(currTime,
- TopConf.CMD_TOTAL, userName, 1);
- }
- } catch (Throwable t) {
- LOG.error("An error occurred while reflecting the event in top service, "
- + "event: (time,cmd,userName)=(" + currTime + "," + cmd + ","
- + userName);
+ for (RollingWindowManager rollingWindowManager : rollingWindowManagers
+ .values()) {
+ rollingWindowManager.recordMetric(currTime, cmd, userName, 1);
+ rollingWindowManager.recordMetric(currTime,
+ TopConf.ALL_CMDS, userName, 1);
}
}
-
- /***
- *
- * @param period the reporting period length in ms
- * @return
- */
- public static String createTopMetricsRecordName(Long period) {
- return TopConf.TOP_METRICS_RECORD_NAME + "-" + period;
- }
-
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
index d818cce26efd3..00e708766e0d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
@@ -17,21 +17,22 @@
*/
package org.apache.hadoop.hdfs.server.namenode.top.window;
-import java.util.HashMap;
import java.util.Iterator;
+import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.Set;
+import java.util.Stack;
import java.util.concurrent.ConcurrentHashMap;
-import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.primitives.Ints;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hadoop.conf.Configuration;
/**
* A class to manage the set of {@link RollingWindow}s. This class is the
@@ -46,25 +47,93 @@ public class RollingWindowManager {
public static final Logger LOG = LoggerFactory.getLogger(
RollingWindowManager.class);
- private int windowLenMs;
- private int bucketsPerWindow; // e.g., 10 buckets per minute
- private int topUsersCnt; // e.g., report top 10 metrics
+ private final int windowLenMs;
+ private final int bucketsPerWindow; // e.g., 10 buckets per minute
+ private final int topUsersCnt; // e.g., report top 10 metrics
+
+ static private class RollingWindowMap extends
+ ConcurrentHashMap {
+ private static final long serialVersionUID = -6785807073237052051L;
+ }
/**
- * Create a metric name composed of the command and user
- *
- * @param command the command executed
- * @param user the user
- * @return a composed metric name
+ * Represents a snapshot of the rolling window. It contains one Op per
+ * operation in the window, with ranked users for each Op.
*/
- @VisibleForTesting
- public static String createMetricName(String command, String user) {
- return command + "." + user;
+ public static class TopWindow {
+ private final int windowMillis;
+ private final List top;
+
+ public TopWindow(int windowMillis) {
+ this.windowMillis = windowMillis;
+ this.top = Lists.newArrayList();
+ }
+
+ public void addOp(Op op) {
+ top.add(op);
+ }
+
+ public int getWindowLenMs() {
+ return windowMillis;
+ }
+
+ public List getOps() {
+ return top;
+ }
}
- static private class RollingWindowMap extends
- ConcurrentHashMap {
- private static final long serialVersionUID = -6785807073237052051L;
+ /**
+ * Represents an operation within a TopWindow. It contains a ranked
+ * set of the top users for the operation.
+ */
+ public static class Op {
+ private final String opType;
+ private final List topUsers;
+ private final long totalCount;
+
+ public Op(String opType, long totalCount) {
+ this.opType = opType;
+ this.topUsers = Lists.newArrayList();
+ this.totalCount = totalCount;
+ }
+
+ public void addUser(User u) {
+ topUsers.add(u);
+ }
+
+ public String getOpType() {
+ return opType;
+ }
+
+ public List getTopUsers() {
+ return topUsers;
+ }
+
+ public long getTotalCount() {
+ return totalCount;
+ }
+ }
+
+ /**
+ * Represents a user who called an Op within a TopWindow. Specifies the
+ * user and the number of times the user called the operation.
+ */
+ public static class User {
+ private final String user;
+ private final long count;
+
+ public User(String user, long count) {
+ this.user = user;
+ this.count = count;
+ }
+
+ public String getUser() {
+ return user;
+ }
+
+ public long getCount() {
+ return count;
+ }
}
/**
@@ -75,8 +144,9 @@ static private class RollingWindowMap extends
public ConcurrentHashMap metricMap =
new ConcurrentHashMap();
- public RollingWindowManager(Configuration conf, long reportingPeriodMs) {
- windowLenMs = (int) reportingPeriodMs;
+ public RollingWindowManager(Configuration conf, int reportingPeriodMs) {
+
+ windowLenMs = reportingPeriodMs;
bucketsPerWindow =
conf.getInt(DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY,
DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_DEFAULT);
@@ -112,53 +182,71 @@ public void recordMetric(long time, String command, String user, long delta) {
* Take a snapshot of current top users in the past period.
*
* @param time the current time
- * @return a map between the top metrics and their values. The user is encoded
- * in the metric name. Refer to {@link RollingWindowManager#createMetricName} for
- * the actual format.
+ * @return a TopWindow describing the top users for each metric in the
+ * window.
*/
- public MetricValueMap snapshot(long time) {
- MetricValueMap map = new MetricValueMap();
- Set metricNames = metricMap.keySet();
- LOG.debug("iterating in reported metrics, size={} values={}",
- metricNames.size(), metricNames);
- for (Map.Entry rwEntry: metricMap.entrySet()) {
- String metricName = rwEntry.getKey();
- RollingWindowMap rollingWindows = rwEntry.getValue();
- TopN topN = new TopN(topUsersCnt);
- Iterator> iterator =
- rollingWindows.entrySet().iterator();
- while (iterator.hasNext()) {
- Map.Entry entry = iterator.next();
- String userName = entry.getKey();
- RollingWindow aWindow = entry.getValue();
- long windowSum = aWindow.getSum(time);
- // do the gc here
- if (windowSum == 0) {
- LOG.debug("gc window of metric: {} userName: {}",
- metricName, userName);
- iterator.remove();
- continue;
- }
- LOG.debug("offer window of metric: {} userName: {} sum: {}",
- metricName, userName, windowSum);
- topN.offer(new NameValuePair(userName, windowSum));
- }
- int n = topN.size();
- LOG.info("topN size for command " + metricName + " is: " + n);
- if (n == 0) {
+ public TopWindow snapshot(long time) {
+ TopWindow window = new TopWindow(windowLenMs);
+ if (LOG.isDebugEnabled()) {
+ Set metricNames = metricMap.keySet();
+ LOG.debug("iterating in reported metrics, size={} values={}",
+ metricNames.size(), metricNames);
+ }
+ for (Map.Entry entry : metricMap.entrySet()) {
+ String metricName = entry.getKey();
+ RollingWindowMap rollingWindows = entry.getValue();
+ TopN topN = getTopUsersForMetric(time, metricName, rollingWindows);
+ final int size = topN.size();
+ if (size == 0) {
continue;
}
- String allMetricName =
- createMetricName(metricName, TopConf.ALL_USERS);
- map.put(allMetricName, Long.valueOf(topN.total));
- for (int i = 0; i < n; i++) {
- NameValuePair userEntry = topN.poll();
- String userMetricName =
- createMetricName(metricName, userEntry.name);
- map.put(userMetricName, Long.valueOf(userEntry.value));
+ Op op = new Op(metricName, topN.getTotal());
+ window.addOp(op);
+ // Reverse the users from the TopUsers using a stack,
+ // since we'd like them sorted in descending rather than ascending order
+ Stack reverse = new Stack();
+ for (int i = 0; i < size; i++) {
+ reverse.push(topN.poll());
}
+ for (int i = 0; i < size; i++) {
+ NameValuePair userEntry = reverse.pop();
+ User user = new User(userEntry.name, Long.valueOf(userEntry.value));
+ op.addUser(user);
+ }
+ }
+ return window;
+ }
+
+ /**
+ * Calculates the top N users over a time interval.
+ *
+ * @param time the current time
+ * @param metricName Name of metric
+ * @return
+ */
+ private TopN getTopUsersForMetric(long time, String metricName,
+ RollingWindowMap rollingWindows) {
+ TopN topN = new TopN(topUsersCnt);
+ Iterator> iterator =
+ rollingWindows.entrySet().iterator();
+ while (iterator.hasNext()) {
+ Map.Entry entry = iterator.next();
+ String userName = entry.getKey();
+ RollingWindow aWindow = entry.getValue();
+ long windowSum = aWindow.getSum(time);
+ // do the gc here
+ if (windowSum == 0) {
+ LOG.debug("gc window of metric: {} userName: {}",
+ metricName, userName);
+ iterator.remove();
+ continue;
+ }
+ LOG.debug("offer window of metric: {} userName: {} sum: {}",
+ metricName, userName, windowSum);
+ topN.offer(new NameValuePair(userName, windowSum));
}
- return map;
+ LOG.info("topN size for command {} is: {}", metricName, topN.size());
+ return topN;
}
/**
@@ -190,7 +278,8 @@ private RollingWindow getRollingWindow(String metric, String user) {
}
/**
- * A pair of a name and its corresponding value
+ * A pair of a name and its corresponding value. Defines a custom
+ * comparator so the TopN PriorityQueue sorts based on the count.
*/
static private class NameValuePair implements Comparable {
String name;
@@ -254,12 +343,4 @@ public long getTotal() {
return total;
}
}
-
- /**
- * A mapping from metric names to their absolute values and their percentage
- */
- @InterfaceAudience.Private
- public static class MetricValueMap extends HashMap {
- private static final long serialVersionUID = 8936732010242400171L;
- }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
index 39e1165359d54..3703c2dcba42c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
@@ -96,6 +96,8 @@ public void run() {
"MaxObjects"));
Integer numStaleStorages = (Integer) (mbs.getAttribute(
mxbeanNameFsns, "NumStaleStorages"));
+ String topUsers =
+ (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
// Metrics that belong to "NameNodeInfo".
// These are metrics that FSNamesystem registers directly with MBeanServer.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index 03ade90b4159b..c649621db73a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -26,9 +26,12 @@
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
+import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
import org.apache.hadoop.util.VersionInfo;
+import org.codehaus.jackson.map.ObjectMapper;
import org.junit.Test;
import org.mortbay.util.ajax.JSON;
@@ -38,10 +41,15 @@
import java.lang.management.ManagementFactory;
import java.net.URI;
import java.util.Collection;
+import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
+import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.Op;
+import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.TopWindow;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
/**
@@ -257,4 +265,112 @@ public void testLastContactTime() throws Exception {
}
}
}
+
+ @Test(timeout=120000)
+ @SuppressWarnings("unchecked")
+ public void testTopUsers() throws Exception {
+ final Configuration conf = new Configuration();
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+ cluster.waitActive();
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ ObjectName mxbeanNameFsns = new ObjectName(
+ "Hadoop:service=NameNode,name=FSNamesystemState");
+ FileSystem fs = cluster.getFileSystem();
+ final Path path = new Path("/");
+ final int NUM_OPS = 10;
+ for (int i=0; i< NUM_OPS; i++) {
+ fs.listStatus(path);
+ fs.setTimes(path, 0, 1);
+ }
+ String topUsers =
+ (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
+ ObjectMapper mapper = new ObjectMapper();
+ Map map = mapper.readValue(topUsers, Map.class);
+ assertTrue("Could not find map key timestamp",
+ map.containsKey("timestamp"));
+ assertTrue("Could not find map key windows", map.containsKey("windows"));
+ List