From 989158ab65079d530aa83e5c9b8fab5f0873e818 Mon Sep 17 00:00:00 2001 From: Shashikant Banerjee Date: Wed, 22 Jul 2020 21:33:29 +0530 Subject: [PATCH 1/9] HDFS-15488. Add a command to list all snapshots for a snaphottable root with snapshot Ids. --- .../org/apache/hadoop/hdfs/DFSClient.java | 19 ++ .../hadoop/hdfs/DistributedFileSystem.java | 10 + .../hadoop/hdfs/protocol/ClientProtocol.java | 12 + .../hadoop/hdfs/protocol/SnapshotStatus.java | 218 ++++++++++++++++++ .../ClientNamenodeProtocolTranslatorPB.java | 22 ++ .../hdfs/protocolPB/PBHelperClient.java | 75 ++++++ .../main/proto/ClientNamenodeProtocol.proto | 10 + .../src/main/proto/hdfs.proto | 21 ++ .../router/RouterClientProtocol.java | 7 + .../federation/router/RouterRpcServer.java | 7 + .../federation/router/RouterSnapshot.java | 17 ++ .../hadoop-hdfs/src/main/bin/hdfs | 4 + .../hadoop-hdfs/src/main/bin/hdfs.cmd | 8 +- ...amenodeProtocolServerSideTranslatorPB.java | 24 ++ .../hdfs/server/namenode/FSDirSnapshotOp.java | 18 ++ .../hdfs/server/namenode/FSNamesystem.java | 29 ++- .../server/namenode/NameNodeRpcServer.java | 11 + .../namenode/metrics/NameNodeMetrics.java | 7 + .../namenode/snapshot/SnapshotManager.java | 38 ++- .../hdfs/tools/snapshot/LsSnapshot.java | 74 ++++++ .../namenode/snapshot/TestListSnapshot.java | 132 +++++++++++ 21 files changed, 760 insertions(+), 3 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshot.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 5a6a0f65f12f6..7df03807e8c00 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -150,6 +150,7 @@ import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; +import org.apache.hadoop.hdfs.protocol.SnapshotStatus; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; @@ -2190,6 +2191,24 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() } } + /** + * Get listing of all the snapshots for a snapshottable directory + * + * @return Information about all the snapshots for a snapshottable directory + * @throws IOException If an I/O error occurred + * @see ClientProtocol#getSnapshotListing() + */ + public SnapshotStatus[] getSnapshotListing(String snapshotRoot) + throws IOException { + checkOpen(); + try (TraceScope ignored = tracer.newScope("getSnapshottableDirListing")) { + return namenode.getSnapshotListing(snapshotRoot); + } catch (RemoteException re) { + throw re.unwrapRemoteException(); + } + } + + /** * Allow snapshot on a directory. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 450862b777078..01a80d15275ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -109,6 +109,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing.DiffReportListingEntry; import org.apache.hadoop.hdfs.client.impl.SnapshotDiffReportGenerator; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.SnapshotStatus; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; @@ -2148,6 +2149,15 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() return dfs.getSnapshottableDirListing(); } + /** + * @return all the snapshots for a snapshottable directory + * @throws IOException + */ + public SnapshotStatus[] getSnapshotListing(Path snapshotRoot) + throws IOException { + return dfs.getSnapshotListing(getPathName(snapshotRoot)); + } + @Override public void deleteSnapshot(final Path snapshotDir, final String snapshotName) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 2f4dfb9b46cc1..c8cecbfbf932d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -727,6 +727,18 @@ BatchedDirectoryListing getBatchedListing( SnapshottableDirectoryStatus[] getSnapshottableDirListing() throws IOException; + /** + * Get listing of all the snapshots for a snapshottable directory + * + * @return Information about all the snapshots for a snapshottable directory + * @throws IOException If an I/O error occurred + */ + @Idempotent + @ReadOnly(isCoordinated = true) + SnapshotStatus[] getSnapshotListing(String snapshotRoot) + throws IOException; + + /////////////////////////////////////// // System issues and management /////////////////////////////////////// diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java new file mode 100644 index 0000000000000..72bb05f14b4ac --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java @@ -0,0 +1,218 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import java.io.PrintStream; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.EnumSet; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSUtilClient; + +/** + * Metadata about a snapshottable directory + */ +public class SnapshotStatus { + /** + * Basic information of the snapshot directory + */ + private final HdfsFileStatus dirStatus; + + /** + * Snapshot ID for the snapshot + */ + private final int snapshotID; + + /** + * Full path of the parent. + */ + private final byte[] parentFullPath; + + public SnapshotStatus(long modification_time, long access_time, + FsPermission permission, + EnumSet flags, + String owner, String group, byte[] localName, + long inodeId, int childrenNum, int snapshotID, + byte[] parentFullPath) { + this.dirStatus = new HdfsFileStatus.Builder() + .isdir(true) + .mtime(modification_time) + .atime(access_time) + .perm(permission) + .flags(flags) + .owner(owner) + .group(group) + .path(localName) + .fileId(inodeId) + .children(childrenNum) + .build(); + this.snapshotID = snapshotID; + this.parentFullPath = parentFullPath; + } + + public SnapshotStatus(HdfsFileStatus dirStatus, + int snapshotNumber, byte[] parentFullPath) { + this.dirStatus = dirStatus; + this.snapshotID = snapshotNumber; + this.parentFullPath = parentFullPath; + } + + /** + * @return snapshot id for the snapshot + */ + public int getSnapshotID() { + return snapshotID; + } + + /** + * @return The basic information of the directory + */ + public HdfsFileStatus getDirStatus() { + return dirStatus; + } + + /** + * @return Full path of the file + */ + public byte[] getParentFullPath() { + return parentFullPath; + } + + /** + * @return Full path of the snapshot + */ + public Path getFullPath() { + String parentFullPathStr = + (parentFullPath == null || parentFullPath.length == 0) ? + "/" : DFSUtilClient.bytes2String(parentFullPath); + return new Path(getSnapshotPath(parentFullPathStr, + dirStatus.getLocalName())); + } + + /** + * Print a list of {@link SnapshotStatus} out to a given stream. + * + * @param stats The list of {@link SnapshotStatus} + * @param out The given stream for printing. + */ + public static void print(SnapshotStatus[] stats, + PrintStream out) { + if (stats == null || stats.length == 0) { + out.println(); + return; + } + int maxRepl = 0, maxLen = 0, maxOwner = 0, maxGroup = 0; + int maxSnapshotID = 0; + for (SnapshotStatus status : stats) { + maxRepl = maxLength(maxRepl, status.dirStatus.getReplication()); + maxLen = maxLength(maxLen, status.dirStatus.getLen()); + maxOwner = maxLength(maxOwner, status.dirStatus.getOwner()); + maxGroup = maxLength(maxGroup, status.dirStatus.getGroup()); + maxSnapshotID = maxLength(maxSnapshotID, status.snapshotID); + } + + String lineFormat = "%s%s " // permission string + + "%" + maxRepl + "s " + + (maxOwner > 0 ? "%-" + maxOwner + "s " : "%s") + + (maxGroup > 0 ? "%-" + maxGroup + "s " : "%s") + + "%" + maxLen + "s " + + "%s " // mod time + + "%" + maxSnapshotID + "s " + + "%s"; // path + SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm"); + + for (SnapshotStatus status : stats) { + String line = String.format(lineFormat, "d", + status.dirStatus.getPermission(), + status.dirStatus.getReplication(), + status.dirStatus.getOwner(), + status.dirStatus.getGroup(), + String.valueOf(status.dirStatus.getLen()), + dateFormat.format(new Date(status.dirStatus.getModificationTime())), + status.snapshotID, + getSnapshotPath(DFSUtilClient.bytes2String(status.parentFullPath), + status.dirStatus.getLocalName()) + ); + out.println(line); + } + } + + private static int maxLength(int n, Object value) { + return Math.max(n, String.valueOf(value).length()); + } + + public static class Bean { + private final String path; + private final int snapshotID; + private final long modificationTime; + private final short permission; + private final String owner; + private final String group; + + public Bean(String path, int snapshotID, long + modificationTime, short permission, String owner, String group) { + this.path = path; + this.snapshotID = snapshotID; + this.modificationTime = modificationTime; + this.permission = permission; + this.owner = owner; + this.group = group; + } + + public String getPath() { + return path; + } + + public int getSnapshotID() { + return snapshotID; + } + + public long getModificationTime() { + return modificationTime; + } + + public short getPermission() { + return permission; + } + + public String getOwner() { + return owner; + } + + public String getGroup() { + return group; + } + } + + static String getSnapshotPath(String snapshottableDir, + String snapshotRelativePath) { + String parentFullPathStr = + snapshottableDir == null || snapshottableDir.isEmpty() ? + "/" : snapshottableDir; + final StringBuilder b = new StringBuilder(parentFullPathStr); + if (b.charAt(b.length() - 1) != Path.SEPARATOR_CHAR) { + b.append(Path.SEPARATOR); + } + return b.append(HdfsConstants.DOT_SNAPSHOT_DIR) + .append(Path.SEPARATOR) + .append(snapshotRelativePath) + .toString(); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 7e41460ca4c63..0674cefe2e2df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -89,6 +89,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.SnapshotStatus; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto; @@ -150,6 +151,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotListingRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto; @@ -1299,6 +1302,25 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() } } + @Override + public SnapshotStatus[] getSnapshotListing(String path) + throws IOException { + GetSnapshotListingRequestProto req = + GetSnapshotListingRequestProto.newBuilder() + .setSnapshotRoot(path).build(); + try { + GetSnapshotListingResponseProto result = rpcProxy + .getSnapshotListing(null, req); + + if (result.hasSnapshotList()) { + return PBHelperClient.convert(result.getSnapshotList()); + } + return null; + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + @Override public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, String fromSnapshot, String toSnapshot) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java index 9fc302464271d..96d84646513c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java @@ -114,6 +114,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus; +import org.apache.hadoop.hdfs.protocol.SnapshotStatus; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryScopeProto; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTypeProto; @@ -184,6 +185,8 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto; @@ -1669,6 +1672,47 @@ public static SnapshottableDirectoryStatus convert( sdirStatusProto.getParentFullpath().toByteArray()); } + public static SnapshotStatus[] convert( + HdfsProtos.SnapshotListingProto sdlp) { + if (sdlp == null) + return null; + List list = sdlp + .getSnapshotListingList(); + if (list.isEmpty()) { + return new SnapshotStatus[0]; + } else { + SnapshotStatus[] result = + new SnapshotStatus[list.size()]; + for (int i = 0; i < list.size(); i++) { + result[i] = convert(list.get(i)); + } + return result; + } + } + + public static SnapshotStatus convert( + HdfsProtos.SnapshotStatusProto sdirStatusProto) { + if (sdirStatusProto == null) { + return null; + } + final HdfsFileStatusProto status = sdirStatusProto.getDirStatus(); + EnumSet flags = status.hasFlags() + ? convertFlags(status.getFlags()) + : convertFlags(status.getPermission()); + return new SnapshotStatus( + status.getModificationTime(), + status.getAccessTime(), + convert(status.getPermission()), + flags, + status.getOwner(), + status.getGroup(), + status.getPath().toByteArray(), + status.getFileId(), + status.getChildrenNum(), + sdirStatusProto.getSnapshotID(), + sdirStatusProto.getParentFullpath().toByteArray()); + } + // DataEncryptionKey public static DataEncryptionKey convert(DataEncryptionKeyProto bet) { String encryptionAlgorithm = bet.getEncryptionAlgorithm(); @@ -2367,6 +2411,23 @@ public static SnapshottableDirectoryStatusProto convert( return builder.build(); } + public static HdfsProtos.SnapshotStatusProto convert(SnapshotStatus status) { + if (status == null) { + return null; + } + byte[] parentFullPath = status.getParentFullPath(); + ByteString parentFullPathBytes = getByteString( + parentFullPath == null ? DFSUtilClient.EMPTY_BYTES : parentFullPath); + HdfsFileStatusProto fs = convert(status.getDirStatus()); + HdfsProtos.SnapshotStatusProto.Builder builder = + HdfsProtos.SnapshotStatusProto + .newBuilder() + .setSnapshotID(status.getSnapshotID()) + .setParentFullpath(parentFullPathBytes) + .setDirStatus(fs); + return builder.build(); + } + public static HdfsFileStatusProto[] convert(HdfsFileStatus[] fs) { if (fs == null) return null; final int len = fs.length; @@ -2649,6 +2710,20 @@ public static SnapshottableDirectoryListingProto convert( .addAllSnapshottableDirListing(protoList).build(); } + public static HdfsProtos.SnapshotListingProto convert( + SnapshotStatus[] status) { + if (status == null) + return null; + HdfsProtos.SnapshotStatusProto[] protos = + new HdfsProtos.SnapshotStatusProto[status.length]; + for (int i = 0; i < status.length; i++) { + protos[i] = convert(status[i]); + } + List protoList = Arrays.asList(protos); + return SnapshotListingProto.newBuilder() + .addAllSnapshotListing(protoList).build(); + } + public static SnapshotDiffReportEntryProto convert(DiffReportEntry entry) { if (entry == null) { return null; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto index 3fb57bc02d0ac..20967cc13ab86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto @@ -303,6 +303,14 @@ message GetSnapshottableDirListingResponseProto { optional SnapshottableDirectoryListingProto snapshottableDirList = 1; } +message GetSnapshotListingRequestProto { + required string snapshotRoot = 1; +} + +message GetSnapshotListingResponseProto { + optional SnapshotListingProto snapshotList = 1; +} + message GetSnapshotDiffReportRequestProto { required string snapshotRoot = 1; required string fromSnapshot = 2; @@ -986,6 +994,8 @@ service ClientNamenodeProtocol { returns(DisallowSnapshotResponseProto); rpc getSnapshottableDirListing(GetSnapshottableDirListingRequestProto) returns(GetSnapshottableDirListingResponseProto); + rpc getSnapshotListing(GetSnapshotListingRequestProto) + returns(GetSnapshotListingResponseProto); rpc deleteSnapshot(DeleteSnapshotRequestProto) returns(DeleteSnapshotResponseProto); rpc getSnapshotDiffReport(GetSnapshotDiffReportRequestProto) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto index 82fe329c9ce5e..3e24d73ce2d26 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto @@ -563,6 +563,20 @@ message SnapshottableDirectoryStatusProto { required bytes parent_fullpath = 4; } +/** + * Status of a snapshot directory: besides the normal information for + * a directory status, also include snapshot ID, and + * the full path of the parent directory. + */ +message SnapshotStatusProto { + required HdfsFileStatusProto dirStatus = 1; + + // Fields specific for snapshot directory + required uint32 snapshotID = 2; + required bytes parent_fullpath = 3; +} + + /** * Snapshottable directory listing */ @@ -570,6 +584,13 @@ message SnapshottableDirectoryListingProto { repeated SnapshottableDirectoryStatusProto snapshottableDirListing = 1; } +/** + * Snapshot listing + */ +message SnapshotListingProto { + repeated SnapshotStatusProto snapshotListing = 1; +} + /** * Snapshot diff report entry */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 680cdc93250ec..e2ec0303333f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -73,6 +73,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.SnapshotStatus; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; @@ -1314,6 +1315,12 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() return snapshotProto.getSnapshottableDirListing(); } + @Override + public SnapshotStatus[] getSnapshotListing(String snapshotRoot) + throws IOException { + return snapshotProto.getSnapshotListing(snapshotRoot); + } + @Override public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, String earlierSnapshotName, String laterSnapshotName) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 5905a1dbbd370..97b146c947442 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -112,6 +112,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.SnapshotStatus; import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus; import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol; @@ -1130,6 +1131,12 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() return clientProto.getSnapshottableDirListing(); } + @Override // ClientProtocol + public SnapshotStatus[] getSnapshotListing(String snapshotRoot) + throws IOException { + return clientProto.getSnapshotListing(snapshotRoot); + } + @Override // ClientProtocol public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, String earlierSnapshotName, String laterSnapshotName) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java index 7b08092d6431a..63c7514efa824 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.SnapshotStatus; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; @@ -157,6 +158,22 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() return RouterRpcServer.merge(ret, SnapshottableDirectoryStatus.class); } + public SnapshotStatus[] getSnapshotListing(String snapshotRoot) + throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.READ); + final List locations = + rpcServer.getLocationsForPath(snapshotRoot, true, false); + RemoteMethod method = new RemoteMethod("getSnapshotListing", + new Class[] {String.class}, + new RemoteParam()); + Set nss = namenodeResolver.getNamespaces(); + Map ret = + rpcClient.invokeConcurrent( + nss, method, true, false, SnapshotStatus[].class); + + return RouterRpcServer.merge(ret, SnapshotStatus.class); + } + public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, String earlierSnapshotName, String laterSnapshotName) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index 7a8bf8dbe0deb..fa933540735ca 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -54,6 +54,7 @@ function hadoop_usage hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode." hadoop_add_subcommand "journalnode" daemon "run the DFS journalnode" hadoop_add_subcommand "lsSnapshottableDir" client "list all snapshottable dirs owned by the current user" + hadoop_add_subcommand "lsSnapshot" client "list all snapshots for a snapshottable directory" hadoop_add_subcommand "mover" daemon "run a utility to move block replicas across storage types" hadoop_add_subcommand "namenode" daemon "run the DFS namenode" hadoop_add_subcommand "nfs3" daemon "run an NFS version 3 gateway" @@ -166,6 +167,9 @@ function hdfscmd_case lsSnapshottableDir) HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir ;; + lsSnapshot) + HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshot + ;; mover) HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.mover.Mover diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd index 23d6a5aa1c301..65d341cf20855 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd @@ -59,7 +59,7 @@ if "%1" == "--loglevel" ( ) ) - set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck fsImageValidation balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath crypto dfsrouter dfsrouteradmin debug + set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck fsImageValidation balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir lsSnapshot cacheadmin mover storagepolicies classpath crypto dfsrouter dfsrouteradmin debug for %%i in ( %hdfscommands% ) do ( if %hdfs-command% == %%i set hdfscommand=true ) @@ -167,6 +167,10 @@ goto :eof set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir goto :eof +:lsSnapshot + set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshot + goto :eof + :cacheadmin set CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin goto :eof @@ -253,6 +257,8 @@ goto :eof @echo current directory contents with a snapshot @echo lsSnapshottableDir list all snapshottable dirs owned by the current user @echo Use -help to see options + @echo lsSnapshot list all snapshots for a snapshottable dir + @echo Use -help to see options @echo cacheadmin configure the HDFS cache @echo crypto configure HDFS encryption zones @echo mover run a utility to move block replicas across storage types diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index e0afe006a2f9a..5132afaa4b15c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -66,6 +66,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.SnapshotStatus; import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto; @@ -161,6 +162,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotListingRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto; @@ -325,6 +328,9 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements static final GetSnapshottableDirListingResponseProto NULL_GET_SNAPSHOTTABLE_DIR_LISTING_RESPONSE = GetSnapshottableDirListingResponseProto.newBuilder().build(); + static final GetSnapshotListingResponseProto + NULL_GET_SNAPSHOT_LISTING_RESPONSE = + GetSnapshotListingResponseProto.newBuilder().build(); static final SetStoragePolicyResponseProto VOID_SET_STORAGE_POLICY_RESPONSE = SetStoragePolicyResponseProto.newBuilder().build(); static final UnsetStoragePolicyResponseProto @@ -1349,6 +1355,24 @@ public GetSnapshottableDirListingResponseProto getSnapshottableDirListing( } } + @Override + public GetSnapshotListingResponseProto getSnapshotListing( + RpcController controller, GetSnapshotListingRequestProto request) + throws ServiceException { + try { + SnapshotStatus[] result = server + .getSnapshotListing(request.getSnapshotRoot()); + if (result != null) { + return GetSnapshotListingResponseProto.newBuilder(). + setSnapshotList(PBHelperClient.convert(result)).build(); + } else { + return NULL_GET_SNAPSHOT_LISTING_RESPONSE; + } + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public GetSnapshotDiffReportResponseProto getSnapshotDiffReport( RpcController controller, GetSnapshotDiffReportRequestProto request) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java index 923c6a88b0318..1eee20d166742 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.SnapshotStatus; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; @@ -154,6 +155,23 @@ static SnapshottableDirectoryStatus[] getSnapshottableDirListing( } } + static SnapshotStatus[] getSnapshotListing( + FSDirectory fsd, SnapshotManager snapshotManager, String path) + throws IOException { + FSPermissionChecker pc = fsd.getPermissionChecker(); + fsd.readLock(); + try { + INodesInPath iip = fsd.getINodesInPath(path, DirOp.READ); + if (fsd.isPermissionEnabled()) { + fsd.checkPermission(pc, iip, false, null, null, FsAction.READ, + FsAction.READ); + } + return snapshotManager.getSnapshotListing(iip); + } finally { + fsd.readUnlock(); + } + } + static SnapshotDiffReport getSnapshotDiffReport(FSDirectory fsd, FSPermissionChecker pc, SnapshotManager snapshotManager, String path, String fromSnapshot, String toSnapshot) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index fe39b071e207c..34f4bcd63bf86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -99,6 +99,7 @@ import org.apache.commons.text.CaseUtils; import org.apache.hadoop.hdfs.protocol.ECTopologyVerifierResult; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.SnapshotStatus; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY; import static org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.*; import static org.apache.hadoop.ha.HAServiceProtocol.HAServiceState.ACTIVE; @@ -7001,7 +7002,33 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() logAuditEvent(true, operationName, null, null, null); return status; } - + + /** + * Get the list of snapshots for a given snapshottable directory. + * + * @return The list of all the snapshots for a snapshottable directory + * @throws IOException + */ + public SnapshotStatus[] getSnapshotListing(String snapshotRoot) + throws IOException { + SnapshotStatus[] status = null; + checkOperation(OperationCategory.READ); + boolean success = false; + readLock(); + try { + checkOperation(OperationCategory.READ); + status = FSDirSnapshotOp.getSnapshotListing(dir, snapshotManager, + snapshotRoot); + success = true; + } catch (AccessControlException ace) { + logAuditEvent(success, "listSnapshots", null, null, null); + throw ace; + } finally { + readUnlock(); + } + logAuditEvent(success, "listSnapshots", null, null, null); + return status; + } /** * Get the difference between two snapshots (or between a snapshot and the * current status) of a snapshottable directory. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 230e4020117f0..c0161dc11fa11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -132,6 +132,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.SnapshotStatus; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol; @@ -2004,6 +2005,16 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() return status; } + @Override // Client Protocol + public SnapshotStatus[] getSnapshotListing(String path) + throws IOException { + checkNNStartup(); + SnapshotStatus[] status = namesystem + .getSnapshotListing(path); + metrics.incrListSnapshotsOps(); + return status; + } + @Override // ClientProtocol public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, String earlierSnapshotName, String laterSnapshotName) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java index c15cdbdd48e4e..de99ddfaa92d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java @@ -77,6 +77,8 @@ public class NameNodeMetrics { MutableCounterLong renameSnapshotOps; @Metric("Number of listSnapshottableDirectory operations") MutableCounterLong listSnapshottableDirOps; + @Metric("Number of listSnapshots operations") + MutableCounterLong listSnapshotOps; @Metric("Number of snapshotDiffReport operations") MutableCounterLong snapshotDiffReportOps; @Metric("Number of blockReceivedAndDeleted calls") @@ -106,6 +108,7 @@ public long totalFileOps(){ disallowSnapshotOps.value() + renameSnapshotOps.value() + listSnapshottableDirOps.value() + + listSnapshotOps.value() + createSymlinkOps.value() + snapshotDiffReportOps.value(); } @@ -319,6 +322,10 @@ public void incrRenameSnapshotOps() { public void incrListSnapshottableDirOps() { listSnapshottableDirOps.incr(); } + + public void incrListSnapshotsOps() { + listSnapshotOps.incr(); + } public void incrSnapshotDiffReportOps() { snapshotDiffReportOps.incr(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java index 9ace8a97b8641..2bcf7671a2da6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java @@ -50,10 +50,18 @@ import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshotInfo; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.SnapshotStatus; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.*; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; +import org.apache.hadoop.hdfs.server.namenode.FSImageFormat; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.INode; +import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; +import org.apache.hadoop.hdfs.server.namenode.INodesInPath; +import org.apache.hadoop.hdfs.server.namenode.LeaseManager; +import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.metrics2.util.MBeans; import com.google.common.base.Preconditions; @@ -501,7 +509,35 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing( return statusList.toArray( new SnapshottableDirectoryStatus[statusList.size()]); } - + + /** + * List all the snapshots under a snapshottable directory. + */ + public SnapshotStatus[] getSnapshotListing(INodesInPath iip) + throws IOException { + INodeDirectory srcRoot = getSnapshottableRoot(iip); + ReadOnlyList snapshotList = srcRoot.getDirectorySnapshottableFeature(). + getSnapshotList(); + if (snapshotList.isEmpty()) { + return null; + } + List statusList = + new ArrayList<>(); + for (Snapshot s : snapshotList) { + Snapshot.Root dir = s.getRoot(); + SnapshotStatus status = new SnapshotStatus(dir.getModificationTime() + , dir.getAccessTime(), dir.getFsPermission(), + EnumSet.noneOf(HdfsFileStatus.Flags.class), + dir.getUserName(), dir.getGroupName(), + dir.getLocalNameBytes(), dir.getId(), + dir.getChildrenNum(Snapshot.CURRENT_STATE_ID), + s.getId(), DFSUtil.string2Bytes(dir.getParent().getFullPathName())); + statusList.add(status); + } + return statusList.toArray( + new SnapshotStatus[statusList.size()]); + } + /** * Compute the difference between two snapshots of a directory, or between a * snapshot of the directory and its current tree. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshot.java new file mode 100644 index 0000000000000..f089f9550bbd0 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshot.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools.snapshot; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.protocol.SnapshotStatus; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +/** + * A tool used to list all snapshottable directories that are owned by the + * current user. The tool returns all the snapshottable directories if the user + * is a super user. + */ +@InterfaceAudience.Private +public class LsSnapshot extends Configured implements Tool { + @Override + public int run(String[] argv) throws Exception { + String description = "hdfs lsSnapshot : \n" + + "\tGet the list of snapshots for a snapshottable directory.\n"; + + if(argv.length != 1) { + System.err.println("Usage: \n" + description); + return 1; + } + + FileSystem fs = FileSystem.get(getConf()); + if (! (fs instanceof DistributedFileSystem)) { + System.err.println( + "lsSnapshot can only be used in DistributedFileSystem"); + return 1; + } + DistributedFileSystem dfs = (DistributedFileSystem) fs; + Path snapshotRoot = new Path(argv[0]); + + try { + SnapshotStatus[] stats = dfs.getSnapshotListing(snapshotRoot); + SnapshotStatus.print(stats, System.out); + } catch (IOException e) { + String[] content = e.getLocalizedMessage().split("\n"); + System.err.println("lsSnapshot: " + content[0]); + e.printStackTrace(System.err); + return 1; + } + return 0; + } + public static void main(String[] argv) throws Exception { + int rc = ToolRunner.run(new LsSnapshot(), argv); + System.exit(rc); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java new file mode 100644 index 0000000000000..9f73202f24266 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.snapshot; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.SnapshotStatus; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertEquals; + +public class TestListSnapshot { + + static final short REPLICATION = 3; + + private final Path dir1 = new Path("/TestSnapshot1"); + + Configuration conf; + MiniDFSCluster cluster; + FSNamesystem fsn; + DistributedFileSystem hdfs; + + @Before + public void setUp() throws Exception { + conf = new Configuration(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) + .build(); + cluster.waitActive(); + fsn = cluster.getNamesystem(); + hdfs = cluster.getFileSystem(); + hdfs.mkdirs(dir1); + } + + @After + public void tearDown() throws Exception { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + + /** + * Test listing all the snapshottable directories + */ + @Test(timeout = 60000) + public void testListSnapshot() throws Exception { + cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true); + + // Initially there is no snapshottable directories in the system + SnapshotStatus[] snapshotStatuses = null; + SnapshottableDirectoryStatus[] dirs = hdfs.getSnapshottableDirListing(); + assertNull(dirs); + try { + hdfs.getSnapshotListing(dir1); + } catch (Exception e) { + assertTrue(e.getMessage().contains( + "Directory is not a snapshottable directory")); + } + // Make root as snapshottable + final Path root = new Path("/"); + hdfs.allowSnapshot(root); + dirs = hdfs.getSnapshottableDirListing(); + assertEquals(1, dirs.length); + assertEquals("", dirs[0].getDirStatus().getLocalName()); + assertEquals(root, dirs[0].getFullPath()); + snapshotStatuses = hdfs.getSnapshotListing(root); + assertNull(snapshotStatuses); + // Make root non-snaphsottable + hdfs.disallowSnapshot(root); + dirs = hdfs.getSnapshottableDirListing(); + assertNull(dirs); + snapshotStatuses = hdfs.getSnapshotListing(root); + assertNull(snapshotStatuses); + + // Make dir1 as snapshottable + hdfs.allowSnapshot(dir1); + hdfs.createSnapshot(dir1, "s0"); + snapshotStatuses = hdfs.getSnapshotListing(dir1); + assertEquals(1, snapshotStatuses.length); + assertEquals("s0", snapshotStatuses[0].getDirStatus(). + getLocalName()); + assertEquals(SnapshotTestHelper.getSnapshotRoot(dir1, "s0"), + snapshotStatuses[0].getFullPath()); + // snapshot id is zero + assertEquals(0, snapshotStatuses[0].getSnapshotID()); + // Create a snapshot for dir2 + hdfs.createSnapshot(dir1, "s1"); + hdfs.createSnapshot(dir1, "s2"); + snapshotStatuses = hdfs.getSnapshotListing(dir1); + // There are now 3 snapshots for dir1 + assertEquals(3, snapshotStatuses.length); + assertEquals("s0", snapshotStatuses[0].getDirStatus(). + getLocalName()); + assertEquals(SnapshotTestHelper.getSnapshotRoot(dir1, "s0"), + snapshotStatuses[0].getFullPath()); + assertEquals("s1", snapshotStatuses[1].getDirStatus(). + getLocalName()); + assertEquals(SnapshotTestHelper.getSnapshotRoot(dir1, "s1"), + snapshotStatuses[1].getFullPath()); + assertEquals("s2", snapshotStatuses[2].getDirStatus(). + getLocalName()); + assertEquals(SnapshotTestHelper.getSnapshotRoot(dir1, "s2"), + snapshotStatuses[2].getFullPath()); + hdfs.deleteSnapshot(dir1, "s2"); + snapshotStatuses = hdfs.getSnapshotListing(dir1); + // There are now 2 snapshots for dir1 + assertEquals(2, snapshotStatuses.length); + } +} \ No newline at end of file From 04f02d2d9820ce67abc3fe3b918f7dd637a56c51 Mon Sep 17 00:00:00 2001 From: Shashikant Banerjee Date: Mon, 27 Jul 2020 12:01:07 +0530 Subject: [PATCH 2/9] Addressed checkstyle/findbug as well as review comments. --- .../dev-support/findbugsExcludeFile.xml | 2 ++ .../org/apache/hadoop/hdfs/DFSClient.java | 6 ++-- .../hadoop/hdfs/DFSOpsCountStatistics.java | 1 + .../hadoop/hdfs/DistributedFileSystem.java | 6 +++- .../hadoop/hdfs/protocol/ClientProtocol.java | 2 +- .../hadoop/hdfs/protocol/SnapshotStatus.java | 18 +++++----- .../hdfs/protocolPB/PBHelperClient.java | 6 ++-- .../federation/router/RouterSnapshot.java | 18 +++++----- .../federation/router/TestRouterRpc.java | 33 +++++++------------ .../hdfs/server/namenode/FSDirSnapshotOp.java | 7 ++-- .../hdfs/server/namenode/FSNamesystem.java | 25 ++++++++------ .../server/namenode/NameNodeRpcServer.java | 4 +-- .../namenode/snapshot/SnapshotManager.java | 20 +++++------ .../apache/hadoop/hdfs/tools/AdminHelper.java | 2 +- .../hdfs/tools/snapshot/LsSnapshot.java | 19 +++-------- .../namenode/snapshot/TestListSnapshot.java | 26 +++++++-------- 16 files changed, 93 insertions(+), 102 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml index 278d01dc22d0f..012136be9c824 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml @@ -22,6 +22,8 @@ + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 7df03807e8c00..d781dd9ac45db 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2192,16 +2192,16 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() } /** - * Get listing of all the snapshots for a snapshottable directory + * Get listing of all the snapshots for a snapshottable directory. * * @return Information about all the snapshots for a snapshottable directory * @throws IOException If an I/O error occurred - * @see ClientProtocol#getSnapshotListing() + * @see ClientProtocol#getSnapshotListing(String) */ public SnapshotStatus[] getSnapshotListing(String snapshotRoot) throws IOException { checkOpen(); - try (TraceScope ignored = tracer.newScope("getSnapshottableDirListing")) { + try (TraceScope ignored = tracer.newScope("getSnapshotListing")) { return namenode.getSnapshotListing(snapshotRoot); } catch (RemoteException re) { throw re.unwrapRemoteException(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java index 2113ae5c63544..fdd0072905fd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java @@ -111,6 +111,7 @@ public enum OpType { SET_XATTR("op_set_xattr"), GET_SNAPSHOT_DIFF("op_get_snapshot_diff"), GET_SNAPSHOTTABLE_DIRECTORY_LIST("op_get_snapshottable_directory_list"), + GET_SNAPSHOT_LIST("op_get_snapshot_list"), TRUNCATE(CommonStatisticNames.OP_TRUNCATE), UNSET_EC_POLICY("op_unset_ec_policy"), UNSET_STORAGE_POLICY("op_unset_storage_policy"); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 01a80d15275ca..37d0226a3a326 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -2155,7 +2155,11 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() */ public SnapshotStatus[] getSnapshotListing(Path snapshotRoot) throws IOException { - return dfs.getSnapshotListing(getPathName(snapshotRoot)); + Path absF = fixRelativePart(snapshotRoot); + statistics.incrementReadOps(1); + storageStatistics + .incrementOpCounter(OpType.GET_SNAPSHOT_LIST); + return dfs.getSnapshotListing(getPathName(absF)); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index c8cecbfbf932d..ea90645ca082b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -728,7 +728,7 @@ SnapshottableDirectoryStatus[] getSnapshottableDirListing() throws IOException; /** - * Get listing of all the snapshots for a snapshottable directory + * Get listing of all the snapshots for a snapshottable directory. * * @return Information about all the snapshots for a snapshottable directory * @throws IOException If an I/O error occurred diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java index 72bb05f14b4ac..42953fb23976f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java @@ -27,16 +27,16 @@ import org.apache.hadoop.hdfs.DFSUtilClient; /** - * Metadata about a snapshottable directory + * Metadata about a snapshottable directory. */ public class SnapshotStatus { /** - * Basic information of the snapshot directory + * Basic information of the snapshot directory. */ private final HdfsFileStatus dirStatus; /** - * Snapshot ID for the snapshot + * Snapshot ID for the snapshot. */ private final int snapshotID; @@ -45,7 +45,7 @@ public class SnapshotStatus { */ private final byte[] parentFullPath; - public SnapshotStatus(long modification_time, long access_time, + public SnapshotStatus(long modificationTime, long accessTime, FsPermission permission, EnumSet flags, String owner, String group, byte[] localName, @@ -53,8 +53,8 @@ public SnapshotStatus(long modification_time, long access_time, byte[] parentFullPath) { this.dirStatus = new HdfsFileStatus.Builder() .isdir(true) - .mtime(modification_time) - .atime(access_time) + .mtime(modificationTime) + .atime(accessTime) .perm(permission) .flags(flags) .owner(owner) @@ -102,9 +102,9 @@ public Path getFullPath() { String parentFullPathStr = (parentFullPath == null || parentFullPath.length == 0) ? "/" : DFSUtilClient.bytes2String(parentFullPath); - return new Path(getSnapshotPath(parentFullPathStr, - dirStatus.getLocalName())); - } + return new Path(getSnapshotPath(parentFullPathStr, + dirStatus.getLocalName())); + } /** * Print a list of {@link SnapshotStatus} out to a given stream. diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java index 96d84646513c7..efe020a3acf9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java @@ -1674,8 +1674,9 @@ public static SnapshottableDirectoryStatus convert( public static SnapshotStatus[] convert( HdfsProtos.SnapshotListingProto sdlp) { - if (sdlp == null) + if (sdlp == null) { return null; + } List list = sdlp .getSnapshotListingList(); if (list.isEmpty()) { @@ -2712,8 +2713,9 @@ public static SnapshottableDirectoryListingProto convert( public static HdfsProtos.SnapshotListingProto convert( SnapshotStatus[] status) { - if (status == null) + if (status == null) { return null; + } HdfsProtos.SnapshotStatusProto[] protos = new HdfsProtos.SnapshotStatusProto[status.length]; for (int i = 0; i < status.length; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java index 63c7514efa824..056f92a77bfb3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java @@ -163,15 +163,17 @@ public SnapshotStatus[] getSnapshotListing(String snapshotRoot) rpcServer.checkOperation(NameNode.OperationCategory.READ); final List locations = rpcServer.getLocationsForPath(snapshotRoot, true, false); - RemoteMethod method = new RemoteMethod("getSnapshotListing", - new Class[] {String.class}, + RemoteMethod remoteMethod = new RemoteMethod("getSnapshotListing", + new Class[]{String.class}, new RemoteParam()); - Set nss = namenodeResolver.getNamespaces(); - Map ret = - rpcClient.invokeConcurrent( - nss, method, true, false, SnapshotStatus[].class); - - return RouterRpcServer.merge(ret, SnapshotStatus.class); + if (rpcServer.isInvokeConcurrent(snapshotRoot)) { + Map ret = rpcClient.invokeConcurrent( + locations, remoteMethod, true, false, SnapshotStatus[].class); + return ret.values().iterator().next(); + } else { + return rpcClient.invokeSequential( + locations, remoteMethod, SnapshotStatus[].class, null); + } } public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index b9b1212333eea..e4ccb82bff4da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -71,29 +71,9 @@ import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; -import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse; -import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; -import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; -import org.apache.hadoop.hdfs.protocol.CachePoolEntry; -import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.ClientProtocol; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.ECBlockGroupStats; -import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo; -import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats; -import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; -import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; -import org.apache.hadoop.hdfs.protocol.SnapshotException; -import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; @@ -110,6 +90,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; @@ -926,6 +907,16 @@ public void testGetSnapshotListing() throws IOException { SnapshottableDirectoryStatus snapshotDir0 = dirList[0]; assertEquals(snapshotPath, snapshotDir0.getFullPath().toString()); + // check for snapshot listing through the Router + SnapshotStatus[] snapshots = routerProtocol. + getSnapshotListing(snapshotPath); + assertEquals(2, snapshots.length); + assertEquals(SnapshotTestHelper.getSnapshotRoot + (new Path(snapshotPath), snapshot1), + snapshots[0].getFullPath()); + assertEquals(SnapshotTestHelper.getSnapshotRoot + (new Path(snapshotPath), snapshot2), + snapshots[1].getFullPath()); // Check for difference report in two snapshot SnapshotDiffReport diffReport = routerProtocol.getSnapshotDiffReport( snapshotPath, snapshot1, snapshot2); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java index 1eee20d166742..f264dc34063f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java @@ -156,15 +156,14 @@ static SnapshottableDirectoryStatus[] getSnapshottableDirListing( } static SnapshotStatus[] getSnapshotListing( - FSDirectory fsd, SnapshotManager snapshotManager, String path) + FSDirectory fsd, FSPermissionChecker pc, SnapshotManager snapshotManager, + String path) throws IOException { - FSPermissionChecker pc = fsd.getPermissionChecker(); fsd.readLock(); try { INodesInPath iip = fsd.getINodesInPath(path, DirOp.READ); if (fsd.isPermissionEnabled()) { - fsd.checkPermission(pc, iip, false, null, null, FsAction.READ, - FsAction.READ); + fsd.checkPathAccess(pc, iip, FsAction.READ); } return snapshotManager.getSnapshotListing(iip); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 34f4bcd63bf86..9efcab2872748 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -7011,22 +7011,27 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() */ public SnapshotStatus[] getSnapshotListing(String snapshotRoot) throws IOException { - SnapshotStatus[] status = null; + final String operationName = "listSnapshotDirectory"; + SnapshotStatus[] status; checkOperation(OperationCategory.READ); boolean success = false; - readLock(); + final FSPermissionChecker pc = getPermissionChecker(); + FSPermissionChecker.setOperationType(operationName); try { - checkOperation(OperationCategory.READ); - status = FSDirSnapshotOp.getSnapshotListing(dir, snapshotManager, - snapshotRoot); - success = true; + readLock(); + try { + checkOperation(OperationCategory.READ); + status = FSDirSnapshotOp.getSnapshotListing(dir, pc, snapshotManager, + snapshotRoot); + success = true; + } finally { + readUnlock(operationName, getLockReportInfoSupplier(null)); + } } catch (AccessControlException ace) { - logAuditEvent(success, "listSnapshots", null, null, null); + logAuditEvent(success, "listSnapshots", snapshotRoot); throw ace; - } finally { - readUnlock(); } - logAuditEvent(success, "listSnapshots", null, null, null); + logAuditEvent(success, "listSnapshots", snapshotRoot); return status; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index c0161dc11fa11..b42252d9aa9d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -2006,11 +2006,11 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() } @Override // Client Protocol - public SnapshotStatus[] getSnapshotListing(String path) + public SnapshotStatus[] getSnapshotListing(String snapshotRoot) throws IOException { checkNNStartup(); SnapshotStatus[] status = namesystem - .getSnapshotListing(path); + .getSnapshotListing(snapshotRoot); metrics.incrListSnapshotsOps(); return status; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java index 2bcf7671a2da6..7aaae3cfa6e49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java @@ -516,26 +516,22 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing( public SnapshotStatus[] getSnapshotListing(INodesInPath iip) throws IOException { INodeDirectory srcRoot = getSnapshottableRoot(iip); - ReadOnlyList snapshotList = srcRoot.getDirectorySnapshottableFeature(). - getSnapshotList(); - if (snapshotList.isEmpty()) { - return null; - } - List statusList = - new ArrayList<>(); - for (Snapshot s : snapshotList) { + ReadOnlyList snapshotList = srcRoot. + getDirectorySnapshottableFeature().getSnapshotList(); + SnapshotStatus[] statuses = new SnapshotStatus[snapshotList.size()]; + for (int count = 0; count < snapshotList.size(); count++) { + Snapshot s = snapshotList.get(count); Snapshot.Root dir = s.getRoot(); - SnapshotStatus status = new SnapshotStatus(dir.getModificationTime() + statuses[count] = new SnapshotStatus(dir.getModificationTime() , dir.getAccessTime(), dir.getFsPermission(), EnumSet.noneOf(HdfsFileStatus.Flags.class), dir.getUserName(), dir.getGroupName(), dir.getLocalNameBytes(), dir.getId(), dir.getChildrenNum(Snapshot.CURRENT_STATE_ID), s.getId(), DFSUtil.string2Bytes(dir.getParent().getFullPathName())); - statusList.add(status); + } - return statusList.toArray( - new SnapshotStatus[statusList.size()]); + return statuses; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java index 27cdf70279087..59a6c8deed9e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java @@ -43,7 +43,7 @@ public class AdminHelper { static final int MAX_LINE_WIDTH = 80; static final String HELP_COMMAND_NAME = "-help"; - static DistributedFileSystem getDFS(Configuration conf) + public static DistributedFileSystem getDFS(Configuration conf) throws IOException { FileSystem fs = FileSystem.get(conf); return checkAndGetDFS(fs, conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshot.java index f089f9550bbd0..65b38afe416eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshot.java @@ -17,16 +17,13 @@ */ package org.apache.hadoop.hdfs.tools.snapshot; -import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.SnapshotStatus; -import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.tools.AdminHelper; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -43,26 +40,18 @@ public int run(String[] argv) throws Exception { "\tGet the list of snapshots for a snapshottable directory.\n"; if(argv.length != 1) { + System.err.println("Invalid no of arguments"); System.err.println("Usage: \n" + description); return 1; } - - FileSystem fs = FileSystem.get(getConf()); - if (! (fs instanceof DistributedFileSystem)) { - System.err.println( - "lsSnapshot can only be used in DistributedFileSystem"); - return 1; - } - DistributedFileSystem dfs = (DistributedFileSystem) fs; Path snapshotRoot = new Path(argv[0]); - try { + DistributedFileSystem dfs = AdminHelper.getDFS(getConf()); SnapshotStatus[] stats = dfs.getSnapshotListing(snapshotRoot); SnapshotStatus.print(stats, System.out); - } catch (IOException e) { + } catch (Exception e) { String[] content = e.getLocalizedMessage().split("\n"); System.err.println("lsSnapshot: " + content[0]); - e.printStackTrace(System.err); return 1; } return 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java index 9f73202f24266..ed942884a4eb4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java @@ -21,17 +21,20 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshotStatus; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.test.LambdaTestUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertEquals; +import static org.junit.Assert.*; +/** + * Tests listSnapshot. + */ public class TestListSnapshot { static final short REPLICATION = 3; @@ -63,22 +66,19 @@ public void tearDown() throws Exception { } /** - * Test listing all the snapshottable directories + * Test listing all the snapshottable directories. */ @Test(timeout = 60000) public void testListSnapshot() throws Exception { - cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true); + fsn.getSnapshotManager().setAllowNestedSnapshots(true); // Initially there is no snapshottable directories in the system SnapshotStatus[] snapshotStatuses = null; SnapshottableDirectoryStatus[] dirs = hdfs.getSnapshottableDirListing(); assertNull(dirs); - try { - hdfs.getSnapshotListing(dir1); - } catch (Exception e) { - assertTrue(e.getMessage().contains( - "Directory is not a snapshottable directory")); - } + LambdaTestUtils.intercept(SnapshotException.class, + "Directory is not a " + "snapshottable directory", + () -> hdfs.getSnapshotListing(dir1)); // Make root as snapshottable final Path root = new Path("/"); hdfs.allowSnapshot(root); @@ -87,13 +87,13 @@ public void testListSnapshot() throws Exception { assertEquals("", dirs[0].getDirStatus().getLocalName()); assertEquals(root, dirs[0].getFullPath()); snapshotStatuses = hdfs.getSnapshotListing(root); - assertNull(snapshotStatuses); + assertTrue(snapshotStatuses.length == 0); // Make root non-snaphsottable hdfs.disallowSnapshot(root); dirs = hdfs.getSnapshottableDirListing(); assertNull(dirs); snapshotStatuses = hdfs.getSnapshotListing(root); - assertNull(snapshotStatuses); + assertTrue(snapshotStatuses.length == 0); // Make dir1 as snapshottable hdfs.allowSnapshot(dir1); From 69902c9e9bfb6adc16d5976f101877f2a3f2ff9a Mon Sep 17 00:00:00 2001 From: Shashikant Banerjee Date: Mon, 27 Jul 2020 12:19:17 +0530 Subject: [PATCH 3/9] Added documentaion. --- .../src/site/markdown/HDFSCommands.md | 10 ++++++++++ .../src/site/markdown/HdfsSnapshots.md | 17 +++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md index d199c06afb740..4b7a7a751049c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md @@ -158,6 +158,16 @@ Usage: `hdfs lsSnapshottableDir [-help]` Get the list of snapshottable directories. When this is run as a super user, it returns all snapshottable directories. Otherwise it returns those directories that are owned by the current user. +### `lsSnapshot` + +Usage: `hdfs lsSnapshot [-help]` + +| COMMAND\_OPTION | Description | +|:---- |:---- | +| `-help` | print help | + +Get the list of snapshots for a snapshottable directory. + ### `jmxget` Usage: `hdfs jmxget [-localVM ConnectorURL | -port port | -server mbeanserver | -service service]` diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsSnapshots.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsSnapshots.md index af55f33b2640c..1b9b573886b0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsSnapshots.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsSnapshots.md @@ -236,6 +236,23 @@ See also the corresponding Java API `SnapshottableDirectoryStatus[] getSnapshottableDirectoryListing()` in `DistributedFileSystem`. +#### Get Snapshot Listing + +Get all the snapshots for a snapshottable directory. + +* Command: + + hdfs lsSnapshot + +* Arguments: + + | --- | --- | + | path | The path of the snapshottable directory. | + +See also the corresponding Java API +`SnapshotStatus[] getSnapshotListing()` +in `DistributedFileSystem`. + #### Get Snapshots Difference Report From 5bc83f1f685017c7182366e1c3ed3dd8cfa38bd6 Mon Sep 17 00:00:00 2001 From: Shashikant Banerjee Date: Mon, 27 Jul 2020 18:09:00 +0530 Subject: [PATCH 4/9] Fixed xml error. --- .../hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml | 1 - .../hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml index 012136be9c824..c96b3a99bd1c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml @@ -24,7 +24,6 @@ - diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java index 7aaae3cfa6e49..919754ae5367c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java @@ -522,8 +522,8 @@ public SnapshotStatus[] getSnapshotListing(INodesInPath iip) for (int count = 0; count < snapshotList.size(); count++) { Snapshot s = snapshotList.get(count); Snapshot.Root dir = s.getRoot(); - statuses[count] = new SnapshotStatus(dir.getModificationTime() - , dir.getAccessTime(), dir.getFsPermission(), + statuses[count] = new SnapshotStatus(dir.getModificationTime(), + dir.getAccessTime(), dir.getFsPermission(), EnumSet.noneOf(HdfsFileStatus.Flags.class), dir.getUserName(), dir.getGroupName(), dir.getLocalNameBytes(), dir.getId(), From 972ab57f8c6769f4291456eb37b880b836d29ce2 Mon Sep 17 00:00:00 2001 From: Shashikant Banerjee Date: Tue, 28 Jul 2020 11:08:41 +0530 Subject: [PATCH 5/9] Addressed review comments. --- .../hadoop/hdfs/protocol/SnapshotStatus.java | 10 ++++++++- .../federation/router/RouterSnapshot.java | 21 +++++++++++++++++-- .../hadoop-hdfs/src/main/bin/hdfs.cmd | 2 +- 3 files changed, 29 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java index 42953fb23976f..6a938a9e4b17b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java @@ -43,7 +43,7 @@ public class SnapshotStatus { /** * Full path of the parent. */ - private final byte[] parentFullPath; + private byte[] parentFullPath; public SnapshotStatus(long modificationTime, long accessTime, FsPermission permission, @@ -74,6 +74,14 @@ public SnapshotStatus(HdfsFileStatus dirStatus, this.parentFullPath = parentFullPath; } + /** + * sets the prent path name. + * @param path parent path + */ + public void setParentFullPath(byte[] path) { + parentFullPath = path; + } + /** * @return snapshot id for the snapshot */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java index 056f92a77bfb3..e7ab64369ab94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java @@ -24,6 +24,7 @@ import java.util.Map.Entry; import java.util.Set; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; @@ -166,14 +167,30 @@ public SnapshotStatus[] getSnapshotListing(String snapshotRoot) RemoteMethod remoteMethod = new RemoteMethod("getSnapshotListing", new Class[]{String.class}, new RemoteParam()); + SnapshotStatus[] response; if (rpcServer.isInvokeConcurrent(snapshotRoot)) { Map ret = rpcClient.invokeConcurrent( locations, remoteMethod, true, false, SnapshotStatus[].class); - return ret.values().iterator().next(); + response = ret.values().iterator().next(); + String src = ret.keySet().iterator().next().getSrc(); + String dst = ret.keySet().iterator().next().getDest(); + for (SnapshotStatus s : response) { + String mountPath = + new String(s.getParentFullPath()).replaceFirst(src, dst); + s.setParentFullPath(mountPath.getBytes()); + } } else { - return rpcClient.invokeSequential( + response = rpcClient.invokeSequential( locations, remoteMethod, SnapshotStatus[].class, null); + RemoteLocation loc = locations.get(0); + for (SnapshotStatus s : response) { + String mountPath = + new String(s.getParentFullPath()).replaceFirst(loc.getDest(), + loc.getSrc()); + s.setParentFullPath(mountPath.getBytes()); + } } + return response; } public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd index 65d341cf20855..21d4de75cc8b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd @@ -258,7 +258,7 @@ goto :eof @echo lsSnapshottableDir list all snapshottable dirs owned by the current user @echo Use -help to see options @echo lsSnapshot list all snapshots for a snapshottable dir - @echo Use -help to see options + @echo Use -help to see options @echo cacheadmin configure the HDFS cache @echo crypto configure HDFS encryption zones @echo mover run a utility to move block replicas across storage types From 81f9119d464e633bbf53681858ae6e2daf4bef04 Mon Sep 17 00:00:00 2001 From: Shashikant Banerjee Date: Tue, 28 Jul 2020 19:56:05 +0530 Subject: [PATCH 6/9] Addressed Review comments. --- .../federation/router/RouterSnapshot.java | 4 ++-- .../federation/router/TestRouterRpc.java | 23 ++++++++++++++++++- .../namenode/snapshot/SnapshotManager.java | 5 ++++ .../namenode/snapshot/TestListSnapshot.java | 6 +++-- 4 files changed, 33 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java index e7ab64369ab94..be28a90d3989d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java @@ -177,7 +177,7 @@ public SnapshotStatus[] getSnapshotListing(String snapshotRoot) for (SnapshotStatus s : response) { String mountPath = new String(s.getParentFullPath()).replaceFirst(src, dst); - s.setParentFullPath(mountPath.getBytes()); + s.setParentFullPath(DFSUtil.string2Bytes(mountPath)); } } else { response = rpcClient.invokeSequential( @@ -187,7 +187,7 @@ public SnapshotStatus[] getSnapshotListing(String snapshotRoot) String mountPath = new String(s.getParentFullPath()).replaceFirst(loc.getDest(), loc.getSrc()); - s.setParentFullPath(mountPath.getBytes()); + s.setParentFullPath(DFSUtil.string2Bytes(mountPath)); } } return response; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index e4ccb82bff4da..f4cfd7c7e0baa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -71,7 +71,28 @@ import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; -import org.apache.hadoop.hdfs.protocol.*; +import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse; +import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.ECBlockGroupStats; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; +import org.apache.hadoop.hdfs.protocol.SnapshotException; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.SnapshotStatus; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java index 919754ae5367c..e9729abd42347 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java @@ -527,6 +527,11 @@ public SnapshotStatus[] getSnapshotListing(INodesInPath iip) EnumSet.noneOf(HdfsFileStatus.Flags.class), dir.getUserName(), dir.getGroupName(), dir.getLocalNameBytes(), dir.getId(), + // the children number is same as the + // live fs as the children count is not cached per snashot. + // It is just used here to construct the HdfsFileStatus object. + // It is expensive to build the snapshot tree for the directory + // and determine the child count. dir.getChildrenNum(Snapshot.CURRENT_STATE_ID), s.getId(), DFSUtil.string2Bytes(dir.getParent().getFullPathName())); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java index ed942884a4eb4..dcff2f7f7c69c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java @@ -30,7 +30,9 @@ import org.junit.Before; import org.junit.Test; -import static org.junit.Assert.*; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; /** * Tests listSnapshot. @@ -106,7 +108,7 @@ public void testListSnapshot() throws Exception { snapshotStatuses[0].getFullPath()); // snapshot id is zero assertEquals(0, snapshotStatuses[0].getSnapshotID()); - // Create a snapshot for dir2 + // Create a snapshot for dir1 hdfs.createSnapshot(dir1, "s1"); hdfs.createSnapshot(dir1, "s2"); snapshotStatuses = hdfs.getSnapshotListing(dir1); From 7f4911ce1f7994a262f924a8a1c37ee565bd4953 Mon Sep 17 00:00:00 2001 From: Shashikant Banerjee Date: Wed, 29 Jul 2020 07:35:15 +0530 Subject: [PATCH 7/9] Addressed checkstyle issue. --- .../hdfs/server/federation/router/RouterSnapshot.java | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java index be28a90d3989d..824275d4ee54e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java @@ -175,8 +175,8 @@ public SnapshotStatus[] getSnapshotListing(String snapshotRoot) String src = ret.keySet().iterator().next().getSrc(); String dst = ret.keySet().iterator().next().getDest(); for (SnapshotStatus s : response) { - String mountPath = - new String(s.getParentFullPath()).replaceFirst(src, dst); + String mountPath = DFSUtil.bytes2String(s.getParentFullPath()). + replaceFirst(src, dst); s.setParentFullPath(DFSUtil.string2Bytes(mountPath)); } } else { @@ -184,9 +184,8 @@ public SnapshotStatus[] getSnapshotListing(String snapshotRoot) locations, remoteMethod, SnapshotStatus[].class, null); RemoteLocation loc = locations.get(0); for (SnapshotStatus s : response) { - String mountPath = - new String(s.getParentFullPath()).replaceFirst(loc.getDest(), - loc.getSrc()); + String mountPath = DFSUtil.bytes2String(s.getParentFullPath()). + replaceFirst(loc.getDest(),loc.getSrc()); s.setParentFullPath(DFSUtil.string2Bytes(mountPath)); } } From 722869410635a0572781488e9dd51b064654013a Mon Sep 17 00:00:00 2001 From: Shashikant Banerjee Date: Wed, 29 Jul 2020 12:17:56 +0530 Subject: [PATCH 8/9] Addressed test failure. --- .../test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java index 41069b439784f..4e6f4e3f4ba38 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java @@ -41,6 +41,7 @@ public class TestReadOnly { "getListing", "getBatchedListing", "getSnapshottableDirListing", + "getSnapshotListing", "getPreferredBlockSize", "listCorruptFileBlocks", "getFileInfo", From dbfcdafd8cde2ec182572cb7e51d1be5dd74fd3e Mon Sep 17 00:00:00 2001 From: Shashikant Banerjee Date: Wed, 29 Jul 2020 21:28:51 +0530 Subject: [PATCH 9/9] Addressed few checkstyle issues. --- .../hdfs/server/federation/router/RouterSnapshot.java | 2 +- .../hdfs/server/federation/router/TestRouterRpc.java | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java index 824275d4ee54e..3f4f4cb46bd82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSnapshot.java @@ -185,7 +185,7 @@ public SnapshotStatus[] getSnapshotListing(String snapshotRoot) RemoteLocation loc = locations.get(0); for (SnapshotStatus s : response) { String mountPath = DFSUtil.bytes2String(s.getParentFullPath()). - replaceFirst(loc.getDest(),loc.getSrc()); + replaceFirst(loc.getDest(), loc.getSrc()); s.setParentFullPath(DFSUtil.string2Bytes(mountPath)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index f4cfd7c7e0baa..db118f56234d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -932,11 +932,11 @@ public void testGetSnapshotListing() throws IOException { SnapshotStatus[] snapshots = routerProtocol. getSnapshotListing(snapshotPath); assertEquals(2, snapshots.length); - assertEquals(SnapshotTestHelper.getSnapshotRoot - (new Path(snapshotPath), snapshot1), + assertEquals(SnapshotTestHelper.getSnapshotRoot( + new Path(snapshotPath), snapshot1), snapshots[0].getFullPath()); - assertEquals(SnapshotTestHelper.getSnapshotRoot - (new Path(snapshotPath), snapshot2), + assertEquals(SnapshotTestHelper.getSnapshotRoot( + new Path(snapshotPath), snapshot2), snapshots[1].getFullPath()); // Check for difference report in two snapshot SnapshotDiffReport diffReport = routerProtocol.getSnapshotDiffReport(