Skip to content

Commit 7dcad84

Browse files
committed
HDFS-7228. Add an SSD policy into the default BlockStoragePolicySuite. Contributed by Jing Zhao.
1 parent 5faaba0 commit 7dcad84

File tree

7 files changed

+86
-77
lines changed

7 files changed

+86
-77
lines changed

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -676,6 +676,9 @@ Release 2.6.0 - UNRELEASED
676676
HDFS-7195. Update user doc of secure mode about Datanodes don't require root
677677
or jsvc. (cnauroth)
678678

679+
HDFS-7228. Add an SSD policy into the default BlockStoragePolicySuite.
680+
(jing9)
681+
679682
OPTIMIZATIONS
680683

681684
HDFS-6690. Deduplicate xattr names in memory. (wang)

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -455,8 +455,8 @@ private DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat,
455455
}
456456

457457
private boolean initLazyPersist(HdfsFileStatus stat) {
458-
final BlockStoragePolicy lpPolicy =
459-
blockStoragePolicySuite.getPolicy("LAZY_PERSIST");
458+
final BlockStoragePolicy lpPolicy = blockStoragePolicySuite
459+
.getPolicy(HdfsConstants.MEMORY_STORAGE_POLICY_NAME);
460460
return lpPolicy != null &&
461461
stat.getStoragePolicy() == lpPolicy.getId();
462462
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -164,4 +164,18 @@ public static enum DatanodeReportType {
164164

165165
public static final String SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR
166166
= Path.SEPARATOR + DOT_SNAPSHOT_DIR + Path.SEPARATOR;
167+
168+
public static final String MEMORY_STORAGE_POLICY_NAME = "LAZY_PERSIST";
169+
public static final String ALLSSD_STORAGE_POLICY_NAME = "ALL_SSD";
170+
public static final String ONESSD_STORAGE_POLICY_NAME = "ONE_SSD";
171+
public static final String HOT_STORAGE_POLICY_NAME = "HOT";
172+
public static final String WARM_STORAGE_POLICY_NAME = "WARM";
173+
public static final String COLD_STORAGE_POLICY_NAME = "COLD";
174+
175+
public static final byte MEMORY_STORAGE_POLICY_ID = 15;
176+
public static final byte ALLSSD_STORAGE_POLICY_ID = 12;
177+
public static final byte ONESSD_STORAGE_POLICY_ID = 10;
178+
public static final byte HOT_STORAGE_POLICY_ID = 7;
179+
public static final byte WARM_STORAGE_POLICY_ID = 5;
180+
public static final byte COLD_STORAGE_POLICY_ID = 2;
167181
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java

Lines changed: 25 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
import org.apache.hadoop.hdfs.StorageType;
2424
import org.apache.hadoop.hdfs.XAttrHelper;
2525
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
26+
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
2627
import org.slf4j.Logger;
2728
import org.slf4j.LoggerFactory;
2829

@@ -44,23 +45,39 @@ public class BlockStoragePolicySuite {
4445
public static BlockStoragePolicySuite createDefaultSuite() {
4546
final BlockStoragePolicy[] policies =
4647
new BlockStoragePolicy[1 << ID_BIT_LENGTH];
47-
final byte lazyPersistId = 15;
48-
policies[lazyPersistId] = new BlockStoragePolicy(lazyPersistId, "LAZY_PERSIST",
48+
final byte lazyPersistId = HdfsConstants.MEMORY_STORAGE_POLICY_ID;
49+
policies[lazyPersistId] = new BlockStoragePolicy(lazyPersistId,
50+
HdfsConstants.MEMORY_STORAGE_POLICY_NAME,
4951
new StorageType[]{StorageType.RAM_DISK, StorageType.DISK},
5052
new StorageType[]{StorageType.DISK},
5153
new StorageType[]{StorageType.DISK},
5254
true); // Cannot be changed on regular files, but inherited.
53-
final byte hotId = 12;
54-
policies[hotId] = new BlockStoragePolicy(hotId, "HOT",
55+
final byte allssdId = HdfsConstants.ALLSSD_STORAGE_POLICY_ID;
56+
policies[allssdId] = new BlockStoragePolicy(allssdId,
57+
HdfsConstants.ALLSSD_STORAGE_POLICY_NAME,
58+
new StorageType[]{StorageType.SSD},
59+
new StorageType[]{StorageType.DISK},
60+
new StorageType[]{StorageType.DISK});
61+
final byte onessdId = HdfsConstants.ONESSD_STORAGE_POLICY_ID;
62+
policies[onessdId] = new BlockStoragePolicy(onessdId,
63+
HdfsConstants.ONESSD_STORAGE_POLICY_NAME,
64+
new StorageType[]{StorageType.SSD, StorageType.DISK},
65+
new StorageType[]{StorageType.SSD, StorageType.DISK},
66+
new StorageType[]{StorageType.SSD, StorageType.DISK});
67+
final byte hotId = HdfsConstants.HOT_STORAGE_POLICY_ID;
68+
policies[hotId] = new BlockStoragePolicy(hotId,
69+
HdfsConstants.HOT_STORAGE_POLICY_NAME,
5570
new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY,
5671
new StorageType[]{StorageType.ARCHIVE});
57-
final byte warmId = 8;
58-
policies[warmId] = new BlockStoragePolicy(warmId, "WARM",
72+
final byte warmId = HdfsConstants.WARM_STORAGE_POLICY_ID;
73+
policies[warmId] = new BlockStoragePolicy(warmId,
74+
HdfsConstants.WARM_STORAGE_POLICY_NAME,
5975
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
6076
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
6177
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE});
62-
final byte coldId = 4;
63-
policies[coldId] = new BlockStoragePolicy(coldId, "COLD",
78+
final byte coldId = HdfsConstants.COLD_STORAGE_POLICY_ID;
79+
policies[coldId] = new BlockStoragePolicy(coldId,
80+
HdfsConstants.COLD_STORAGE_POLICY_NAME,
6481
new StorageType[]{StorageType.ARCHIVE}, StorageType.EMPTY_ARRAY,
6582
StorageType.EMPTY_ARRAY);
6683
return new BlockStoragePolicySuite(hotId, policies);

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1133,7 +1133,8 @@ public static void runOperations(MiniDFSCluster cluster,
11331133
// OP_CLOSE 9
11341134
s.close();
11351135
// OP_SET_STORAGE_POLICY 45
1136-
filesystem.setStoragePolicy(pathFileCreate, "HOT");
1136+
filesystem.setStoragePolicy(pathFileCreate,
1137+
HdfsConstants.HOT_STORAGE_POLICY_NAME);
11371138
// OP_RENAME_OLD 1
11381139
final Path pathFileMoved = new Path("/file_moved");
11391140
filesystem.rename(pathFileCreate, pathFileMoved);

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java

Lines changed: 34 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -66,10 +66,12 @@ public class TestBlockStoragePolicy {
6666
static final long FILE_LEN = 1024;
6767
static final short REPLICATION = 3;
6868

69-
static final byte COLD = (byte) 4;
70-
static final byte WARM = (byte) 8;
71-
static final byte HOT = (byte) 12;
72-
static final byte LAZY_PERSIST = (byte) 15;
69+
static final byte COLD = HdfsConstants.COLD_STORAGE_POLICY_ID;
70+
static final byte WARM = HdfsConstants.WARM_STORAGE_POLICY_ID;
71+
static final byte HOT = HdfsConstants.HOT_STORAGE_POLICY_ID;
72+
static final byte ONESSD = HdfsConstants.ONESSD_STORAGE_POLICY_ID;
73+
static final byte ALLSSD = HdfsConstants.ALLSSD_STORAGE_POLICY_ID;
74+
static final byte LAZY_PERSIST = HdfsConstants.MEMORY_STORAGE_POLICY_ID;
7375

7476
@Test (timeout=300000)
7577
public void testConfigKeyEnabled() throws IOException {
@@ -79,7 +81,8 @@ public void testConfigKeyEnabled() throws IOException {
7981
.numDataNodes(1).build();
8082
try {
8183
cluster.waitActive();
82-
cluster.getFileSystem().setStoragePolicy(new Path("/"), "COLD");
84+
cluster.getFileSystem().setStoragePolicy(new Path("/"),
85+
HdfsConstants.COLD_STORAGE_POLICY_NAME);
8386
} finally {
8487
cluster.shutdown();
8588
}
@@ -98,7 +101,8 @@ public void testConfigKeyDisabled() throws IOException {
98101
.numDataNodes(1).build();
99102
try {
100103
cluster.waitActive();
101-
cluster.getFileSystem().setStoragePolicy(new Path("/"), "COLD");
104+
cluster.getFileSystem().setStoragePolicy(new Path("/"),
105+
HdfsConstants.COLD_STORAGE_POLICY_NAME);
102106
} finally {
103107
cluster.shutdown();
104108
}
@@ -108,17 +112,25 @@ public void testConfigKeyDisabled() throws IOException {
108112
public void testDefaultPolicies() {
109113
final Map<Byte, String> expectedPolicyStrings = new HashMap<Byte, String>();
110114
expectedPolicyStrings.put(COLD,
111-
"BlockStoragePolicy{COLD:4, storageTypes=[ARCHIVE], " +
115+
"BlockStoragePolicy{COLD:" + COLD + ", storageTypes=[ARCHIVE], " +
112116
"creationFallbacks=[], replicationFallbacks=[]}");
113117
expectedPolicyStrings.put(WARM,
114-
"BlockStoragePolicy{WARM:8, storageTypes=[DISK, ARCHIVE], " +
115-
"creationFallbacks=[DISK, ARCHIVE], replicationFallbacks=[DISK, ARCHIVE]}");
118+
"BlockStoragePolicy{WARM:" + WARM + ", storageTypes=[DISK, ARCHIVE], " +
119+
"creationFallbacks=[DISK, ARCHIVE], " +
120+
"replicationFallbacks=[DISK, ARCHIVE]}");
116121
expectedPolicyStrings.put(HOT,
117-
"BlockStoragePolicy{HOT:12, storageTypes=[DISK], " +
122+
"BlockStoragePolicy{HOT:" + HOT + ", storageTypes=[DISK], " +
118123
"creationFallbacks=[], replicationFallbacks=[ARCHIVE]}");
119124
expectedPolicyStrings.put(LAZY_PERSIST,
120-
"BlockStoragePolicy{LAZY_PERSIST:15, storageTypes=[RAM_DISK, DISK], " +
125+
"BlockStoragePolicy{LAZY_PERSIST:" + LAZY_PERSIST +
126+
", storageTypes=[RAM_DISK, DISK], " +
121127
"creationFallbacks=[DISK], replicationFallbacks=[DISK]}");
128+
expectedPolicyStrings.put(ONESSD, "BlockStoragePolicy{ONE_SSD:" + ONESSD +
129+
", storageTypes=[SSD, DISK], creationFallbacks=[SSD, DISK], " +
130+
"replicationFallbacks=[SSD, DISK]}");
131+
expectedPolicyStrings.put(ALLSSD, "BlockStoragePolicy{ALL_SSD:" + ALLSSD +
132+
", storageTypes=[SSD], creationFallbacks=[DISK], " +
133+
"replicationFallbacks=[DISK]}");
122134

123135
for(byte i = 1; i < 16; i++) {
124136
final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i);
@@ -845,15 +857,15 @@ public void testSetStoragePolicy() throws Exception {
845857

846858
final Path invalidPath = new Path("/invalidPath");
847859
try {
848-
fs.setStoragePolicy(invalidPath, "WARM");
860+
fs.setStoragePolicy(invalidPath, HdfsConstants.WARM_STORAGE_POLICY_NAME);
849861
Assert.fail("Should throw a FileNotFoundException");
850862
} catch (FileNotFoundException e) {
851863
GenericTestUtils.assertExceptionContains(invalidPath.toString(), e);
852864
}
853865

854-
fs.setStoragePolicy(fooFile, "COLD");
855-
fs.setStoragePolicy(barDir, "WARM");
856-
fs.setStoragePolicy(barFile2, "HOT");
866+
fs.setStoragePolicy(fooFile, HdfsConstants.COLD_STORAGE_POLICY_NAME);
867+
fs.setStoragePolicy(barDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);
868+
fs.setStoragePolicy(barFile2, HdfsConstants.HOT_STORAGE_POLICY_NAME);
857869

858870
dirList = fs.getClient().listPaths(dir.toString(),
859871
HdfsFileStatus.EMPTY_NAME).getPartialListing();
@@ -901,7 +913,7 @@ public void testSetStoragePolicyWithSnapshot() throws Exception {
901913
DFSTestUtil.createFile(fs, fooFile1, FILE_LEN, REPLICATION, 0L);
902914
DFSTestUtil.createFile(fs, fooFile2, FILE_LEN, REPLICATION, 0L);
903915

904-
fs.setStoragePolicy(fooDir, "WARM");
916+
fs.setStoragePolicy(fooDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);
905917

906918
HdfsFileStatus[] dirList = fs.getClient().listPaths(dir.toString(),
907919
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
@@ -913,7 +925,7 @@ public void testSetStoragePolicyWithSnapshot() throws Exception {
913925
// take snapshot
914926
SnapshotTestHelper.createSnapshot(fs, dir, "s1");
915927
// change the storage policy of fooFile1
916-
fs.setStoragePolicy(fooFile1, "COLD");
928+
fs.setStoragePolicy(fooFile1, HdfsConstants.COLD_STORAGE_POLICY_NAME);
917929

918930
fooList = fs.getClient().listPaths(fooDir.toString(),
919931
HdfsFileStatus.EMPTY_NAME).getPartialListing();
@@ -936,7 +948,7 @@ public void testSetStoragePolicyWithSnapshot() throws Exception {
936948
HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD);
937949

938950
// change the storage policy of foo dir
939-
fs.setStoragePolicy(fooDir, "HOT");
951+
fs.setStoragePolicy(fooDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
940952
// /dir/foo is now hot
941953
dirList = fs.getClient().listPaths(dir.toString(),
942954
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
@@ -1053,7 +1065,7 @@ private void testChangeFileRep(String policyName, byte policyId,
10531065
*/
10541066
@Test
10551067
public void testChangeHotFileRep() throws Exception {
1056-
testChangeFileRep("HOT", HOT,
1068+
testChangeFileRep(HdfsConstants.HOT_STORAGE_POLICY_NAME, HOT,
10571069
new StorageType[]{StorageType.DISK, StorageType.DISK,
10581070
StorageType.DISK},
10591071
new StorageType[]{StorageType.DISK, StorageType.DISK, StorageType.DISK,
@@ -1067,7 +1079,7 @@ public void testChangeHotFileRep() throws Exception {
10671079
*/
10681080
@Test
10691081
public void testChangeWarmRep() throws Exception {
1070-
testChangeFileRep("WARM", WARM,
1082+
testChangeFileRep(HdfsConstants.WARM_STORAGE_POLICY_NAME, WARM,
10711083
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
10721084
StorageType.ARCHIVE},
10731085
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
@@ -1080,7 +1092,7 @@ public void testChangeWarmRep() throws Exception {
10801092
*/
10811093
@Test
10821094
public void testChangeColdRep() throws Exception {
1083-
testChangeFileRep("COLD", COLD,
1095+
testChangeFileRep(HdfsConstants.COLD_STORAGE_POLICY_NAME, COLD,
10841096
new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,
10851097
StorageType.ARCHIVE},
10861098
new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,
@@ -1144,7 +1156,7 @@ public void testGetAllStoragePolicies() throws Exception {
11441156
final DistributedFileSystem fs = cluster.getFileSystem();
11451157
try {
11461158
BlockStoragePolicy[] policies = fs.getStoragePolicies();
1147-
Assert.assertEquals(4, policies.length);
1159+
Assert.assertEquals(6, policies.length);
11481160
Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(),
11491161
policies[0].toString());
11501162
Assert.assertEquals(POLICY_SUITE.getPolicy(WARM).toString(),

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java

Lines changed: 6 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@
3232
import org.apache.hadoop.conf.Configuration;
3333
import org.apache.hadoop.fs.FSDataInputStream;
3434
import org.apache.hadoop.fs.FSDataOutputStream;
35-
import org.apache.hadoop.fs.FileUtil;
3635
import org.apache.hadoop.fs.Path;
3736
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
3837
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -44,6 +43,7 @@
4443
import org.apache.hadoop.hdfs.MiniDFSCluster;
4544
import org.apache.hadoop.hdfs.StorageType;
4645
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
46+
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
4747
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
4848
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
4949
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -67,8 +67,6 @@
6767
import com.google.common.base.Preconditions;
6868
import com.google.common.collect.Maps;
6969

70-
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC;
71-
7270
/**
7371
* Test the data migration tool (for Archival Storage)
7472
*/
@@ -100,9 +98,9 @@ public class TestStorageMover {
10098
DEFAULT_CONF.setLong(DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY, 2000L);
10199

102100
DEFAULT_POLICIES = BlockStoragePolicySuite.createDefaultSuite();
103-
HOT = DEFAULT_POLICIES.getPolicy("HOT");
104-
WARM = DEFAULT_POLICIES.getPolicy("WARM");
105-
COLD = DEFAULT_POLICIES.getPolicy("COLD");
101+
HOT = DEFAULT_POLICIES.getPolicy(HdfsConstants.HOT_STORAGE_POLICY_NAME);
102+
WARM = DEFAULT_POLICIES.getPolicy(HdfsConstants.WARM_STORAGE_POLICY_NAME);
103+
COLD = DEFAULT_POLICIES.getPolicy(HdfsConstants.COLD_STORAGE_POLICY_NAME);
106104
TestBalancer.initTestSetup();
107105
Dispatcher.setDelayAfterErrors(1000L);
108106
}
@@ -201,14 +199,6 @@ class MigrationTest {
201199
this.policies = DEFAULT_POLICIES;
202200
}
203201

204-
MigrationTest(ClusterScheme cScheme, NamespaceScheme nsScheme,
205-
BlockStoragePolicySuite policies) {
206-
this.clusterScheme = cScheme;
207-
this.nsScheme = nsScheme;
208-
this.conf = clusterScheme.conf;
209-
this.policies = policies;
210-
}
211-
212202
/**
213203
* Set up the cluster and start NameNode and DataNodes according to the
214204
* corresponding scheme.
@@ -273,9 +263,6 @@ void verify(boolean verifyAll) throws Exception {
273263
}
274264
if (verifyAll) {
275265
verifyNamespace();
276-
} else {
277-
// TODO verify according to the given path list
278-
279266
}
280267
}
281268

@@ -413,11 +400,6 @@ private static StorageType[][] genStorageTypes(int numDataNodes) {
413400
return genStorageTypes(numDataNodes, 0, 0, 0);
414401
}
415402

416-
private static StorageType[][] genStorageTypes(int numDataNodes,
417-
int numAllDisk, int numAllArchive) {
418-
return genStorageTypes(numDataNodes, numAllDisk, numAllArchive, 0);
419-
}
420-
421403
private static StorageType[][] genStorageTypes(int numDataNodes,
422404
int numAllDisk, int numAllArchive, int numRamDisk) {
423405
Preconditions.checkArgument(
@@ -441,26 +423,6 @@ private static StorageType[][] genStorageTypes(int numDataNodes,
441423
return types;
442424
}
443425

444-
private static long[][] genCapacities(int nDatanodes, int numAllDisk,
445-
int numAllArchive, int numRamDisk, long diskCapacity,
446-
long archiveCapacity, long ramDiskCapacity) {
447-
final long[][] capacities = new long[nDatanodes][];
448-
int i = 0;
449-
for (; i < numRamDisk; i++) {
450-
capacities[i] = new long[]{ramDiskCapacity, diskCapacity};
451-
}
452-
for (; i < numRamDisk + numAllDisk; i++) {
453-
capacities[i] = new long[]{diskCapacity, diskCapacity};
454-
}
455-
for (; i < numRamDisk + numAllDisk + numAllArchive; i++) {
456-
capacities[i] = new long[]{archiveCapacity, archiveCapacity};
457-
}
458-
for(; i < capacities.length; i++) {
459-
capacities[i] = new long[]{diskCapacity, archiveCapacity};
460-
}
461-
return capacities;
462-
}
463-
464426
private static class PathPolicyMap {
465427
final Map<Path, BlockStoragePolicy> map = Maps.newHashMap();
466428
final Path hot = new Path("/hot");
@@ -666,8 +628,8 @@ private void waitForAllReplicas(int expectedReplicaNum, Path file,
666628

667629
private void setVolumeFull(DataNode dn, StorageType type) {
668630
List<? extends FsVolumeSpi> volumes = dn.getFSDataset().getVolumes();
669-
for (int j = 0; j < volumes.size(); ++j) {
670-
FsVolumeImpl volume = (FsVolumeImpl) volumes.get(j);
631+
for (FsVolumeSpi v : volumes) {
632+
FsVolumeImpl volume = (FsVolumeImpl) v;
671633
if (volume.getStorageType() == type) {
672634
LOG.info("setCapacity to 0 for [" + volume.getStorageType() + "]"
673635
+ volume.getStorageID());

0 commit comments

Comments
 (0)