@@ -31,7 +31,7 @@ import org.apache.spark.sql.catalyst.TableIdentifier
3131import org .apache .spark .sql .catalyst .analysis .{NoSuchPartitionException , TableAlreadyExistsException }
3232import org .apache .spark .sql .catalyst .catalog ._
3333import org .apache .spark .sql .execution .command .{DDLSuite , DDLUtils }
34- import org .apache .spark .sql .hive .HiveExternalCatalog
34+ import org .apache .spark .sql .hive .{ HiveExternalCatalog , HiveUtils }
3535import org .apache .spark .sql .hive .orc .OrcFileOperator
3636import org .apache .spark .sql .hive .test .TestHiveSingleton
3737import org .apache .spark .sql .internal .{HiveSerDe , SQLConf }
@@ -1438,39 +1438,44 @@ class HiveDDLSuite
14381438 }
14391439
14401440 test(" create hive serde table with new syntax" ) {
1441- withTable(" t" , " t2" , " t3" ) {
1442- withTempPath { path =>
1443- sql(
1444- s """
1445- |CREATE TABLE t(id int) USING hive
1446- |OPTIONS(fileFormat 'orc', compression 'Zlib')
1447- |LOCATION ' ${path.toURI}'
1448- """ .stripMargin)
1449- val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier (" t" ))
1450- assert(DDLUtils .isHiveTable(table))
1451- assert(table.storage.serde == Some (" org.apache.hadoop.hive.ql.io.orc.OrcSerde" ))
1452- assert(table.storage.properties.get(" compression" ) == Some (" Zlib" ))
1453- assert(spark.table(" t" ).collect().isEmpty)
1454-
1455- sql(" INSERT INTO t SELECT 1" )
1456- checkAnswer(spark.table(" t" ), Row (1 ))
1457- // Check if this is compressed as ZLIB.
1458- val maybeOrcFile = path.listFiles().find(! _.getName.endsWith(" .crc" ))
1459- assert(maybeOrcFile.isDefined)
1460- val orcFilePath = maybeOrcFile.get.toPath.toString
1461- val expectedCompressionKind =
1462- OrcFileOperator .getFileReader(orcFilePath).get.getCompression
1463- assert(" ZLIB" === expectedCompressionKind.name())
1464-
1465- sql(" CREATE TABLE t2 USING HIVE AS SELECT 1 AS c1, 'a' AS c2" )
1466- val table2 = spark.sessionState.catalog.getTableMetadata(TableIdentifier (" t2" ))
1467- assert(DDLUtils .isHiveTable(table2))
1468- assert(table2.storage.serde == Some (" org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe" ))
1469- checkAnswer(spark.table(" t2" ), Row (1 , " a" ))
1470-
1471- sql(" CREATE TABLE t3(a int, p int) USING hive PARTITIONED BY (p)" )
1472- sql(" INSERT INTO t3 PARTITION(p=1) SELECT 0" )
1473- checkAnswer(spark.table(" t3" ), Row (0 , 1 ))
1441+ Seq (" true" , " false" ).foreach { value =>
1442+ withSQLConf(HiveUtils .CONVERT_METASTORE_ORC .key -> value) {
1443+ withTable(" t" , " t2" , " t3" ) {
1444+ withTempPath { path =>
1445+ sql(
1446+ s """
1447+ |CREATE TABLE t(id int) USING hive
1448+ |OPTIONS(fileFormat 'orc', compression 'Zlib')
1449+ |LOCATION ' ${path.toURI}'
1450+ """ .stripMargin)
1451+ val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier (" t" ))
1452+ assert(DDLUtils .isHiveTable(table))
1453+ assert(table.storage.serde == Some (" org.apache.hadoop.hive.ql.io.orc.OrcSerde" ))
1454+ assert(table.storage.properties.get(" compression" ) == Some (" Zlib" ))
1455+ assert(spark.table(" t" ).collect().isEmpty)
1456+
1457+ sql(" INSERT INTO t SELECT 1" )
1458+ checkAnswer(spark.table(" t" ), Row (1 ))
1459+ // Check if this is compressed as ZLIB.
1460+ val maybeOrcFile = path.listFiles().find(_.getName.startsWith(" part" ))
1461+ assert(maybeOrcFile.isDefined)
1462+ val orcFilePath = maybeOrcFile.get.toPath.toString
1463+ val expectedCompressionKind =
1464+ OrcFileOperator .getFileReader(orcFilePath).get.getCompression
1465+ assert(" ZLIB" === expectedCompressionKind.name())
1466+
1467+ sql(" CREATE TABLE t2 USING HIVE AS SELECT 1 AS c1, 'a' AS c2" )
1468+ val table2 = spark.sessionState.catalog.getTableMetadata(TableIdentifier (" t2" ))
1469+ assert(DDLUtils .isHiveTable(table2))
1470+ assert(
1471+ table2.storage.serde == Some (" org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe" ))
1472+ checkAnswer(spark.table(" t2" ), Row (1 , " a" ))
1473+
1474+ sql(" CREATE TABLE t3(a int, p int) USING hive PARTITIONED BY (p)" )
1475+ sql(" INSERT INTO t3 PARTITION(p=1) SELECT 0" )
1476+ checkAnswer(spark.table(" t3" ), Row (0 , 1 ))
1477+ }
1478+ }
14741479 }
14751480 }
14761481 }
0 commit comments