Skip to content

Commit c7d5e95

Browse files
committed
HBASE-27148 Move minimum hadoop 3 support version to 3.2.3
1 parent acfbc3f commit c7d5e95

File tree

7 files changed

+155
-165
lines changed

7 files changed

+155
-165
lines changed

hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java

Lines changed: 63 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,8 @@
143143
import org.apache.hadoop.hdfs.DFSClient;
144144
import org.apache.hadoop.hdfs.DistributedFileSystem;
145145
import org.apache.hadoop.hdfs.MiniDFSCluster;
146+
import org.apache.hadoop.hdfs.server.datanode.DataNode;
147+
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
146148
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
147149
import org.apache.hadoop.mapred.JobConf;
148150
import org.apache.hadoop.mapred.MiniMRCluster;
@@ -202,6 +204,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
202204
public static final boolean PRESPLIT_TEST_TABLE = true;
203205

204206
private MiniDFSCluster dfsCluster = null;
207+
private FsDatasetAsyncDiskServiceFixer dfsClusterFixer = null;
205208

206209
private volatile HBaseClusterInterface hbaseCluster = null;
207210
private MiniMRCluster mrCluster = null;
@@ -571,6 +574,56 @@ private void setFs() throws IOException {
571574
conf.unset(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE);
572575
}
573576

577+
// Workaround to avoid IllegalThreadStateException
578+
// See HBASE-27148 for more details
579+
private static final class FsDatasetAsyncDiskServiceFixer extends Thread {
580+
581+
private volatile boolean stopped = false;
582+
583+
private final MiniDFSCluster cluster;
584+
585+
FsDatasetAsyncDiskServiceFixer(MiniDFSCluster cluster) {
586+
super("FsDatasetAsyncDiskServiceFixer");
587+
setDaemon(true);
588+
this.cluster = cluster;
589+
}
590+
591+
@Override
592+
public void run() {
593+
while (!stopped) {
594+
try {
595+
Thread.sleep(30000);
596+
} catch (InterruptedException e) {
597+
Thread.currentThread().interrupt();
598+
continue;
599+
}
600+
// we could add new datanodes during tests, so here we will check every 30 seconds, as the
601+
// timeout of the thread pool executor is 60 seconds by default.
602+
try {
603+
for (DataNode dn : cluster.getDataNodes()) {
604+
FsDatasetSpi<?> dataset = dn.getFSDataset();
605+
Field service = dataset.getClass().getDeclaredField("asyncDiskService");
606+
service.setAccessible(true);
607+
Object asyncDiskService = service.get(dataset);
608+
Field group = asyncDiskService.getClass().getDeclaredField("threadGroup");
609+
group.setAccessible(true);
610+
ThreadGroup threadGroup = (ThreadGroup) group.get(asyncDiskService);
611+
if (threadGroup.isDaemon()) {
612+
threadGroup.setDaemon(false);
613+
}
614+
}
615+
} catch (Exception e) {
616+
LOG.warn("failed to reset thread pool timeout for FsDatasetAsyncDiskService", e);
617+
}
618+
}
619+
}
620+
621+
void shutdown() {
622+
stopped = true;
623+
interrupt();
624+
}
625+
}
626+
574627
public MiniDFSCluster startMiniDFSCluster(int servers, final String[] racks, String[] hosts)
575628
throws Exception {
576629
createDirsAndSetProperties();
@@ -582,7 +635,8 @@ public MiniDFSCluster startMiniDFSCluster(int servers, final String[] racks, Str
582635
"ERROR");
583636
this.dfsCluster =
584637
new MiniDFSCluster(0, this.conf, servers, true, true, true, null, racks, hosts, null);
585-
638+
this.dfsClusterFixer = new FsDatasetAsyncDiskServiceFixer(dfsCluster);
639+
this.dfsClusterFixer.start();
586640
// Set this just-started cluster as our filesystem.
587641
setFs();
588642

@@ -606,6 +660,8 @@ public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOE
606660
"ERROR");
607661
dfsCluster =
608662
new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
663+
this.dfsClusterFixer = new FsDatasetAsyncDiskServiceFixer(dfsCluster);
664+
this.dfsClusterFixer.start();
609665
return dfsCluster;
610666
}
611667

@@ -728,6 +784,12 @@ public void shutdownMiniDFSCluster() throws IOException {
728784
// The below throws an exception per dn, AsynchronousCloseException.
729785
this.dfsCluster.shutdown();
730786
dfsCluster = null;
787+
// It is possible that the dfs cluster is set through setDFSCluster method, where we will not
788+
// have a fixer
789+
if (dfsClusterFixer != null) {
790+
this.dfsClusterFixer.shutdown();
791+
dfsClusterFixer = null;
792+
}
731793
dataTestDirOnTestFS = null;
732794
CommonFSUtils.setFsDefault(this.conf, new Path("file:///"));
733795
}

hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml

Lines changed: 0 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -81,38 +81,6 @@
8181
<artifactId>hadoop-common</artifactId>
8282
<scope>provided</scope>
8383
</dependency>
84-
<dependency>
85-
<groupId>org.codehaus.jackson</groupId>
86-
<artifactId>jackson-jaxrs</artifactId>
87-
<version>1.9.13</version>
88-
<scope>provided</scope>
89-
<exclusions>
90-
<exclusion>
91-
<groupId>org.codehaus.jackson</groupId>
92-
<artifactId>jackson-mapper-asl</artifactId>
93-
</exclusion>
94-
<exclusion>
95-
<groupId>org.codehaus.jackson</groupId>
96-
<artifactId>jackson-core-asl</artifactId>
97-
</exclusion>
98-
</exclusions>
99-
</dependency>
100-
<dependency>
101-
<groupId>org.codehaus.jackson</groupId>
102-
<artifactId>jackson-xc</artifactId>
103-
<version>1.9.13</version>
104-
<scope>provided</scope>
105-
<exclusions>
106-
<exclusion>
107-
<groupId>org.codehaus.jackson</groupId>
108-
<artifactId>jackson-mapper-asl</artifactId>
109-
</exclusion>
110-
<exclusion>
111-
<groupId>org.codehaus.jackson</groupId>
112-
<artifactId>jackson-core-asl</artifactId>
113-
</exclusion>
114-
</exclusions>
115-
</dependency>
11684
</dependencies>
11785
</profile>
11886
</profiles>

hbase-shaded/hbase-shaded-mapreduce/pom.xml

Lines changed: 0 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -203,52 +203,6 @@
203203
<groupId>org.apache.hadoop</groupId>
204204
<artifactId>hadoop-mapreduce-client-core</artifactId>
205205
<scope>provided</scope>
206-
<exclusions>
207-
<exclusion>
208-
<groupId>com.google.guava</groupId>
209-
<artifactId>guava</artifactId>
210-
</exclusion>
211-
<exclusion>
212-
<groupId>javax.xml.bind</groupId>
213-
<artifactId>jaxb-api</artifactId>
214-
</exclusion>
215-
<exclusion>
216-
<groupId>javax.ws.rs</groupId>
217-
<artifactId>jsr311-api</artifactId>
218-
</exclusion>
219-
</exclusions>
220-
</dependency>
221-
<dependency>
222-
<groupId>org.codehaus.jackson</groupId>
223-
<artifactId>jackson-jaxrs</artifactId>
224-
<version>1.9.13</version>
225-
<scope>provided</scope>
226-
<exclusions>
227-
<exclusion>
228-
<groupId>org.codehaus.jackson</groupId>
229-
<artifactId>jackson-mapper-asl</artifactId>
230-
</exclusion>
231-
<exclusion>
232-
<groupId>org.codehaus.jackson</groupId>
233-
<artifactId>jackson-core-asl</artifactId>
234-
</exclusion>
235-
</exclusions>
236-
</dependency>
237-
<dependency>
238-
<groupId>org.codehaus.jackson</groupId>
239-
<artifactId>jackson-xc</artifactId>
240-
<version>1.9.13</version>
241-
<scope>provided</scope>
242-
<exclusions>
243-
<exclusion>
244-
<groupId>org.codehaus.jackson</groupId>
245-
<artifactId>jackson-mapper-asl</artifactId>
246-
</exclusion>
247-
<exclusion>
248-
<groupId>org.codehaus.jackson</groupId>
249-
<artifactId>jackson-core-asl</artifactId>
250-
</exclusion>
251-
</exclusions>
252206
</dependency>
253207
</dependencies>
254208
</profile>

hbase-shaded/hbase-shaded-testing-util-tester/pom.xml

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -84,12 +84,6 @@
8484
<version>${project.version}</version>
8585
<scope>test</scope>
8686
</dependency>
87-
<dependency>
88-
<groupId>org.codehaus.jackson</groupId>
89-
<artifactId>jackson-mapper-asl</artifactId>
90-
<version>1.9.13</version>
91-
<scope>test</scope>
92-
</dependency>
9387
</dependencies>
9488

9589
</project>

hbase-shaded/hbase-shaded-testing-util/pom.xml

Lines changed: 0 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -38,36 +38,6 @@
3838
<version>${hadoop.version}</version>
3939
<type>test-jar</type>
4040
<scope>compile</scope>
41-
<exclusions>
42-
<exclusion>
43-
<groupId>javax.servlet.jsp</groupId>
44-
<artifactId>jsp-api</artifactId>
45-
</exclusion>
46-
<exclusion>
47-
<groupId>org.codehaus.jackson</groupId>
48-
<artifactId>jackson-mapper-asl</artifactId>
49-
</exclusion>
50-
<exclusion>
51-
<groupId>org.codehaus.jackson</groupId>
52-
<artifactId>jackson-core-asl</artifactId>
53-
</exclusion>
54-
<exclusion>
55-
<groupId>org.codehaus.jackson</groupId>
56-
<artifactId>jackson-jaxrs</artifactId>
57-
</exclusion>
58-
<exclusion>
59-
<groupId>org.codehaus.jackson</groupId>
60-
<artifactId>jackson-xc</artifactId>
61-
</exclusion>
62-
<exclusion>
63-
<groupId>javax.xml.bind</groupId>
64-
<artifactId>jaxb-api</artifactId>
65-
</exclusion>
66-
<exclusion>
67-
<groupId>javax.ws.rs</groupId>
68-
<artifactId>jsr311-api</artifactId>
69-
</exclusion>
70-
</exclusions>
7141
</dependency>
7242
<dependency>
7343
<groupId>org.apache.hadoop</groupId>
@@ -123,12 +93,6 @@
12393
<type>test-jar</type>
12494
<scope>compile</scope>
12595
</dependency>
126-
<dependency>
127-
<groupId>org.codehaus.jackson</groupId>
128-
<artifactId>jackson-jaxrs</artifactId>
129-
<version>1.9.13</version>
130-
<scope>compile</scope>
131-
</dependency>
13296
<dependency>
13397
<groupId>org.apache.hbase</groupId>
13498
<artifactId>hbase-testing-util</artifactId>

hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java

Lines changed: 63 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,8 @@
135135
import org.apache.hadoop.hdfs.DFSClient;
136136
import org.apache.hadoop.hdfs.DistributedFileSystem;
137137
import org.apache.hadoop.hdfs.MiniDFSCluster;
138+
import org.apache.hadoop.hdfs.server.datanode.DataNode;
139+
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
138140
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
139141
import org.apache.hadoop.mapred.JobConf;
140142
import org.apache.hadoop.mapred.MiniMRCluster;
@@ -189,6 +191,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
189191
public static final boolean PRESPLIT_TEST_TABLE = true;
190192

191193
private MiniDFSCluster dfsCluster = null;
194+
private FsDatasetAsyncDiskServiceFixer dfsClusterFixer = null;
192195

193196
private volatile HBaseCluster hbaseCluster = null;
194197
private MiniMRCluster mrCluster = null;
@@ -509,6 +512,56 @@ public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException
509512
return getTestFileSystem().delete(cpath, true);
510513
}
511514

515+
// Workaround to avoid IllegalThreadStateException
516+
// See HBASE-27148 for more details
517+
private static final class FsDatasetAsyncDiskServiceFixer extends Thread {
518+
519+
private volatile boolean stopped = false;
520+
521+
private final MiniDFSCluster cluster;
522+
523+
FsDatasetAsyncDiskServiceFixer(MiniDFSCluster cluster) {
524+
super("FsDatasetAsyncDiskServiceFixer");
525+
setDaemon(true);
526+
this.cluster = cluster;
527+
}
528+
529+
@Override
530+
public void run() {
531+
while (!stopped) {
532+
try {
533+
Thread.sleep(30000);
534+
} catch (InterruptedException e) {
535+
Thread.currentThread().interrupt();
536+
continue;
537+
}
538+
// we could add new datanodes during tests, so here we will check every 30 seconds, as the
539+
// timeout of the thread pool executor is 60 seconds by default.
540+
try {
541+
for (DataNode dn : cluster.getDataNodes()) {
542+
FsDatasetSpi<?> dataset = dn.getFSDataset();
543+
Field service = dataset.getClass().getDeclaredField("asyncDiskService");
544+
service.setAccessible(true);
545+
Object asyncDiskService = service.get(dataset);
546+
Field group = asyncDiskService.getClass().getDeclaredField("threadGroup");
547+
group.setAccessible(true);
548+
ThreadGroup threadGroup = (ThreadGroup) group.get(asyncDiskService);
549+
if (threadGroup.isDaemon()) {
550+
threadGroup.setDaemon(false);
551+
}
552+
}
553+
} catch (Exception e) {
554+
LOG.warn("failed to reset thread pool timeout for FsDatasetAsyncDiskService", e);
555+
}
556+
}
557+
}
558+
559+
void shutdown() {
560+
stopped = true;
561+
interrupt();
562+
}
563+
}
564+
512565
/**
513566
* Start a minidfscluster.
514567
* @param servers How many DNs to start. n * @see #shutdownMiniDFSCluster()
@@ -567,7 +620,8 @@ public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], Str
567620

568621
this.dfsCluster =
569622
new MiniDFSCluster(0, this.conf, servers, true, true, true, null, racks, hosts, null);
570-
623+
this.dfsClusterFixer = new FsDatasetAsyncDiskServiceFixer(dfsCluster);
624+
this.dfsClusterFixer.start();
571625
// Set this just-started cluster as our filesystem.
572626
setFs();
573627

@@ -591,6 +645,8 @@ public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOE
591645
"ERROR");
592646
dfsCluster =
593647
new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
648+
this.dfsClusterFixer = new FsDatasetAsyncDiskServiceFixer(dfsCluster);
649+
this.dfsClusterFixer.start();
594650
return dfsCluster;
595651
}
596652

@@ -713,6 +769,12 @@ public void shutdownMiniDFSCluster() throws IOException {
713769
// The below throws an exception per dn, AsynchronousCloseException.
714770
this.dfsCluster.shutdown();
715771
dfsCluster = null;
772+
// It is possible that the dfs cluster is set through setDFSCluster method, where we will not
773+
// have a fixer
774+
if (dfsClusterFixer != null) {
775+
this.dfsClusterFixer.shutdown();
776+
dfsClusterFixer = null;
777+
}
716778
dataTestDirOnTestFS = null;
717779
CommonFSUtils.setFsDefault(this.conf, new Path("file:///"));
718780
}

0 commit comments

Comments
 (0)