Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
<Class name="org.apache.hadoop.hdfs.util.StripedBlockUtil$ChunkByteArray"/>
<Class name="org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing$DiffReportListingEntry"/>
<Class name="org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing"/>
<Class name="org.apache.hadoop.hdfs.protocol.SnapshotStatus"/>
</Or>
<Bug pattern="EI_EXPOSE_REP,EI_EXPOSE_REP2" />
</Match>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
Expand Down Expand Up @@ -2190,6 +2191,24 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
}
}

/**
* Get listing of all the snapshots for a snapshottable directory.
*
* @return Information about all the snapshots for a snapshottable directory
* @throws IOException If an I/O error occurred
* @see ClientProtocol#getSnapshotListing(String)
*/
public SnapshotStatus[] getSnapshotListing(String snapshotRoot)
throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("getSnapshotListing")) {
return namenode.getSnapshotListing(snapshotRoot);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}


/**
* Allow snapshot on a directory.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ public enum OpType {
SET_XATTR("op_set_xattr"),
GET_SNAPSHOT_DIFF("op_get_snapshot_diff"),
GET_SNAPSHOTTABLE_DIRECTORY_LIST("op_get_snapshottable_directory_list"),
GET_SNAPSHOT_LIST("op_get_snapshot_list"),
TRUNCATE(CommonStatisticNames.OP_TRUNCATE),
UNSET_EC_POLICY("op_unset_ec_policy"),
UNSET_STORAGE_POLICY("op_unset_storage_policy");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing.DiffReportListingEntry;
import org.apache.hadoop.hdfs.client.impl.SnapshotDiffReportGenerator;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
Expand Down Expand Up @@ -2148,6 +2149,19 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
return dfs.getSnapshottableDirListing();
}

/**
* @return all the snapshots for a snapshottable directory
* @throws IOException
*/
public SnapshotStatus[] getSnapshotListing(Path snapshotRoot)
throws IOException {
Path absF = fixRelativePart(snapshotRoot);
statistics.incrementReadOps(1);
storageStatistics
.incrementOpCounter(OpType.GET_SNAPSHOT_LIST);
return dfs.getSnapshotListing(getPathName(absF));
}

@Override
public void deleteSnapshot(final Path snapshotDir, final String snapshotName)
throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -727,6 +727,18 @@ BatchedDirectoryListing getBatchedListing(
SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException;

/**
* Get listing of all the snapshots for a snapshottable directory.
*
* @return Information about all the snapshots for a snapshottable directory
* @throws IOException If an I/O error occurred
*/
@Idempotent
@ReadOnly(isCoordinated = true)
SnapshotStatus[] getSnapshotListing(String snapshotRoot)
throws IOException;


///////////////////////////////////////
// System issues and management
///////////////////////////////////////
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,226 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;

import java.io.PrintStream;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.EnumSet;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtilClient;

/**
* Metadata about a snapshottable directory.
*/
public class SnapshotStatus {
/**
* Basic information of the snapshot directory.
*/
private final HdfsFileStatus dirStatus;

/**
* Snapshot ID for the snapshot.
*/
private final int snapshotID;

/**
* Full path of the parent.
*/
private byte[] parentFullPath;

public SnapshotStatus(long modificationTime, long accessTime,
FsPermission permission,
EnumSet<HdfsFileStatus.Flags> flags,
String owner, String group, byte[] localName,
long inodeId, int childrenNum, int snapshotID,
byte[] parentFullPath) {
this.dirStatus = new HdfsFileStatus.Builder()
.isdir(true)
.mtime(modificationTime)
.atime(accessTime)
.perm(permission)
.flags(flags)
.owner(owner)
.group(group)
.path(localName)
.fileId(inodeId)
.children(childrenNum)
.build();
this.snapshotID = snapshotID;
this.parentFullPath = parentFullPath;
}

public SnapshotStatus(HdfsFileStatus dirStatus,
int snapshotNumber, byte[] parentFullPath) {
this.dirStatus = dirStatus;
this.snapshotID = snapshotNumber;
this.parentFullPath = parentFullPath;
}

/**
* sets the prent path name.
* @param path parent path
*/
public void setParentFullPath(byte[] path) {
parentFullPath = path;
}

/**
* @return snapshot id for the snapshot
*/
public int getSnapshotID() {
return snapshotID;
}

/**
* @return The basic information of the directory
*/
public HdfsFileStatus getDirStatus() {
return dirStatus;
}

/**
* @return Full path of the file
*/
public byte[] getParentFullPath() {
return parentFullPath;
}

/**
* @return Full path of the snapshot
*/
public Path getFullPath() {
String parentFullPathStr =
(parentFullPath == null || parentFullPath.length == 0) ?
"/" : DFSUtilClient.bytes2String(parentFullPath);
return new Path(getSnapshotPath(parentFullPathStr,
dirStatus.getLocalName()));
}

/**
* Print a list of {@link SnapshotStatus} out to a given stream.
*
* @param stats The list of {@link SnapshotStatus}
* @param out The given stream for printing.
*/
public static void print(SnapshotStatus[] stats,
PrintStream out) {
if (stats == null || stats.length == 0) {
out.println();
return;
}
int maxRepl = 0, maxLen = 0, maxOwner = 0, maxGroup = 0;
int maxSnapshotID = 0;
for (SnapshotStatus status : stats) {
maxRepl = maxLength(maxRepl, status.dirStatus.getReplication());
maxLen = maxLength(maxLen, status.dirStatus.getLen());
maxOwner = maxLength(maxOwner, status.dirStatus.getOwner());
maxGroup = maxLength(maxGroup, status.dirStatus.getGroup());
maxSnapshotID = maxLength(maxSnapshotID, status.snapshotID);
}

String lineFormat = "%s%s " // permission string
+ "%" + maxRepl + "s "
+ (maxOwner > 0 ? "%-" + maxOwner + "s " : "%s")
+ (maxGroup > 0 ? "%-" + maxGroup + "s " : "%s")
+ "%" + maxLen + "s "
+ "%s " // mod time
+ "%" + maxSnapshotID + "s "
+ "%s"; // path
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm");

for (SnapshotStatus status : stats) {
String line = String.format(lineFormat, "d",
status.dirStatus.getPermission(),
status.dirStatus.getReplication(),
status.dirStatus.getOwner(),
status.dirStatus.getGroup(),
String.valueOf(status.dirStatus.getLen()),
dateFormat.format(new Date(status.dirStatus.getModificationTime())),
status.snapshotID,
getSnapshotPath(DFSUtilClient.bytes2String(status.parentFullPath),
status.dirStatus.getLocalName())
);
out.println(line);
}
}

private static int maxLength(int n, Object value) {
return Math.max(n, String.valueOf(value).length());
}

public static class Bean {
private final String path;
private final int snapshotID;
private final long modificationTime;
private final short permission;
private final String owner;
private final String group;

public Bean(String path, int snapshotID, long
modificationTime, short permission, String owner, String group) {
this.path = path;
this.snapshotID = snapshotID;
this.modificationTime = modificationTime;
this.permission = permission;
this.owner = owner;
this.group = group;
}

public String getPath() {
return path;
}

public int getSnapshotID() {
return snapshotID;
}

public long getModificationTime() {
return modificationTime;
}

public short getPermission() {
return permission;
}

public String getOwner() {
return owner;
}

public String getGroup() {
return group;
}
}

static String getSnapshotPath(String snapshottableDir,
String snapshotRelativePath) {
String parentFullPathStr =
snapshottableDir == null || snapshottableDir.isEmpty() ?
"/" : snapshottableDir;
final StringBuilder b = new StringBuilder(parentFullPathStr);
if (b.charAt(b.length() - 1) != Path.SEPARATOR_CHAR) {
b.append(Path.SEPARATOR);
}
return b.append(HdfsConstants.DOT_SNAPSHOT_DIR)
.append(Path.SEPARATOR)
.append(snapshotRelativePath)
.toString();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto;
Expand Down Expand Up @@ -150,6 +151,8 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto;
Expand Down Expand Up @@ -1299,6 +1302,25 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
}
}

@Override
public SnapshotStatus[] getSnapshotListing(String path)
throws IOException {
GetSnapshotListingRequestProto req =
GetSnapshotListingRequestProto.newBuilder()
.setSnapshotRoot(path).build();
try {
GetSnapshotListingResponseProto result = rpcProxy
.getSnapshotListing(null, req);

if (result.hasSnapshotList()) {
return PBHelperClient.convert(result.getSnapshotList());
}
return null;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}

@Override
public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot,
String fromSnapshot, String toSnapshot) throws IOException {
Expand Down
Loading