diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaRemoteFSMismatchException.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaRemoteFSMismatchException.java new file mode 100644 index 0000000000000..197606592a165 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaRemoteFSMismatchException.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.hadoop.hdfs.server.datanode; + +import java.io.IOException; + +/** + * Exception indicating that the replica path prefix does not match the path + * prefix of the remote FS. + */ +public class ReplicaRemoteFSMismatchException extends IOException { + private static final long serialVersionUID = 1L; + + public ReplicaRemoteFSMismatchException() { + super(); + } + + public ReplicaRemoteFSMismatchException(String msg) { + super(msg); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java index f58447fc1d937..df122d7d9fca0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java @@ -179,7 +179,7 @@ private static URI getAbsoluteURI(URI uri) { * @return true if the block URI is contained within the volume URI. */ @VisibleForTesting - static boolean containsBlock(URI volumeURI, URI blockURI) { + public static boolean containsBlock(URI volumeURI, URI blockURI) { if (volumeURI == null && blockURI == null){ return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 1fb757f3aaf31..f3760da7bddeb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -626,16 +626,15 @@ private void addVolume(final StorageLocation location, final String storageId, StorageType storageType = location.getStorageType(); final FsVolumeImpl fsVolume = createFsVolume(sd.getStorageUuid(), sd, config); - VolumeReplicaMap tempVolumeMap = new VolumeReplicaMap( - new AutoCloseableLock()); + VolumeReplicaMap tempVolumeMap = null; ArrayList exceptions = Lists.newArrayList(); for (final NamespaceInfo nsInfo : nsInfos) { String bpid = nsInfo.getBlockPoolID(); try { fsVolume.addBlockPool(bpid, config, this.timer); - tempVolumeMap.addAll( - fsVolume.getVolumeMap(bpid, fsVolume, ramDiskReplicaTracker)); + tempVolumeMap = + fsVolume.getVolumeMap(bpid, fsVolume, ramDiskReplicaTracker); } catch (IOException e) { LOG.warn("Caught exception when adding " + fsVolume + ". Will throw later.", e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeReplicaMap.java index e23f95f762755..3d58ef93d79a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeReplicaMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeReplicaMap.java @@ -29,13 +29,16 @@ import com.google.common.cache.LoadingCache; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.FileRegion; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap; +import org.apache.hadoop.hdfs.server.datanode.ProvidedReplica; import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder; import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException; +import org.apache.hadoop.hdfs.server.datanode.ReplicaRemoteFSMismatchException; import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; import org.apache.hadoop.util.AutoCloseableLock; import org.slf4j.Logger; @@ -126,8 +129,16 @@ ReplicaInfo get(String bpid, long blockId) { } return cache.get(blockId); } catch (ExecutionException e) { - LOG.warn("Exception in retrieving ReplicaInfo for block id {}:\n{}", - blockId, e.getMessage()); + Throwable cause = e.getCause(); + Throwable nestedCause = cause == null ? null : cause.getCause(); + if (nestedCause != null && + (nestedCause instanceof ReplicaNotFoundException || + nestedCause instanceof ReplicaRemoteFSMismatchException)) { + LOG.debug(e.getMessage()); + } else { + LOG.warn("Exception in retrieving ReplicaInfo for block id {}:\n{}", + blockId, e.getMessage()); + } } } return null; @@ -138,6 +149,12 @@ private ReplicaInfo getReplicaFromAliasMap(long blockId) throws IOException { Optional region = (Optional) aliasMapReader.resolve(blockId); if (region.isPresent()) { + Path path = region.get().getProvidedStorageLocation().getPath(); + if (remoteFS != null && + ! ProvidedReplica.containsBlock(remoteFS.getUri(), path.toUri())) { + throw new ReplicaRemoteFSMismatchException(); + } + return new ReplicaBuilder( HdfsServerConstants.ReplicaState.FINALIZED) .setFileRegion(region.get()) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java index e3c03bea1b212..83f51518c8f5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java @@ -27,6 +27,7 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; @@ -108,16 +109,34 @@ ReplicaInfo get(String bpid, long blockId) { checkBlockPool(bpid); try (AutoCloseableLock l = lock.acquire()) { // check inner-maps; each of them will have their own synchronization. - for (VolumeReplicaMap inner : innerReplicaMaps.values()) { - ReplicaInfo info = inner.get(bpid, blockId); - if (info != null) { - return info; + for (FsVolumeImpl fsVolume : innerReplicaMaps.keySet()) { + if (!isProvidedVolume(fsVolume)) { + // only search non-provided volumes first + VolumeReplicaMap volReplicaMap = innerReplicaMaps.get(fsVolume); + ReplicaInfo info = volReplicaMap.get(bpid, blockId); + if (info != null) { + return info; + } + } + } + for (FsVolumeImpl fsVolume : innerReplicaMaps.keySet()) { + if (isProvidedVolume(fsVolume)) { + VolumeReplicaMap volReplicaMap = innerReplicaMaps.get(fsVolume); + ReplicaInfo info = volReplicaMap.get(bpid, blockId); + if (info != null) { + return info; + } } } } return null; } + private boolean isProvidedVolume(FsVolumeImpl volume) { + StorageType storageType = volume.getStorageType(); + return storageType != null && storageType.equals(StorageType.PROVIDED); + } + /** * Add a replica's meta information into the map *