diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala index ef3d9b27aad79..d0e0d20df30af 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala @@ -2023,7 +2023,8 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton { } test("SPARK-21721: Clear FileSystem deleterOnExit cache if path is successfully removed") { - withTable("test21721") { + val table = "test21721" + withTable(table) { val deleteOnExitField = classOf[FileSystem].getDeclaredField("deleteOnExit") deleteOnExitField.setAccessible(true) @@ -2031,10 +2032,10 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton { val setOfPath = deleteOnExitField.get(fs).asInstanceOf[Set[Path]] val testData = sparkContext.parallelize(1 to 10).map(i => TestData(i, i.toString)).toDF() - sql("CREATE TABLE test21721 (key INT, value STRING)") + sql(s"CREATE TABLE $table (key INT, value STRING)") val pathSizeToDeleteOnExit = setOfPath.size() - (0 to 10).foreach(_ => testData.write.mode(SaveMode.Append).insertInto("test1")) + (0 to 10).foreach(_ => testData.write.mode(SaveMode.Append).insertInto(table)) assert(setOfPath.size() == pathSizeToDeleteOnExit) }