@@ -25,7 +25,6 @@ import java.util.TimeZone
2525import org .apache .hadoop .fs .{FileSystem , Path }
2626import org .apache .parquet .hadoop .ParquetFileReader
2727import org .apache .parquet .schema .PrimitiveType .PrimitiveTypeName
28- import org .scalatest .BeforeAndAfterEach
2928
3029import org .apache .spark .sql .{AnalysisException , Dataset , Row , SparkSession }
3130import org .apache .spark .sql .catalyst .TableIdentifier
@@ -207,10 +206,10 @@ class ParquetHiveCompatibilitySuite extends ParquetCompatibilityTest with TestHi
207206 val defaultTz = None
208207 // check that created tables have correct TBLPROPERTIES
209208 val tblProperties = explicitTz.map {
210- tz => raw """ TBLPROPERTIES ( $key=" $tz") """
209+ tz => s """ TBLPROPERTIES ( $key=" $tz") """
211210 }.getOrElse(" " )
212211 spark.sql(
213- raw """ CREATE TABLE $baseTable (
212+ s """ CREATE TABLE $baseTable (
214213 | x int
215214 | )
216215 | STORED AS PARQUET
@@ -219,7 +218,7 @@ class ParquetHiveCompatibilitySuite extends ParquetCompatibilityTest with TestHi
219218 val expectedTableTz = explicitTz.orElse(defaultTz)
220219 checkHasTz(spark, baseTable, expectedTableTz)
221220 spark.sql(
222- raw """ CREATE TABLE partitioned_ $baseTable (
221+ s """ CREATE TABLE partitioned_ $baseTable (
223222 | x int
224223 | )
225224 | PARTITIONED BY (y int)
@@ -230,7 +229,7 @@ class ParquetHiveCompatibilitySuite extends ParquetCompatibilityTest with TestHi
230229 spark.sql(s " CREATE TABLE like_ $baseTable LIKE $baseTable" )
231230 checkHasTz(spark, s " like_ $baseTable" , expectedTableTz)
232231 spark.sql(
233- raw """ CREATE TABLE select_ $baseTable
232+ s """ CREATE TABLE select_ $baseTable
234233 | STORED AS PARQUET
235234 | AS
236235 | SELECT * from $baseTable
@@ -239,14 +238,14 @@ class ParquetHiveCompatibilitySuite extends ParquetCompatibilityTest with TestHi
239238
240239 // check alter table, setting, unsetting, resetting the property
241240 spark.sql(
242- raw """ ALTER TABLE $baseTable SET TBLPROPERTIES ( $key="America/Los_Angeles") """ )
241+ s """ ALTER TABLE $baseTable SET TBLPROPERTIES ( $key="America/Los_Angeles") """ )
243242 checkHasTz(spark, baseTable, Some (" America/Los_Angeles" ))
244- spark.sql(raw """ ALTER TABLE $baseTable SET TBLPROPERTIES ( $key="UTC") """ )
243+ spark.sql(s """ ALTER TABLE $baseTable SET TBLPROPERTIES ( $key="UTC") """ )
245244 checkHasTz(spark, baseTable, Some (" UTC" ))
246- spark.sql(raw """ ALTER TABLE $baseTable UNSET TBLPROPERTIES ( $key) """ )
245+ spark.sql(s """ ALTER TABLE $baseTable UNSET TBLPROPERTIES ( $key) """ )
247246 checkHasTz(spark, baseTable, None )
248247 explicitTz.foreach { tz =>
249- spark.sql(raw """ ALTER TABLE $baseTable SET TBLPROPERTIES ( $key=" $tz") """ )
248+ spark.sql(s """ ALTER TABLE $baseTable SET TBLPROPERTIES ( $key=" $tz") """ )
250249 checkHasTz(spark, baseTable, expectedTableTz)
251250 }
252251 }
@@ -267,16 +266,13 @@ class ParquetHiveCompatibilitySuite extends ParquetCompatibilityTest with TestHi
267266 val timestampTimezoneToMillis = {
268267 val originalTz = TimeZone .getDefault
269268 try {
270- (for {
271- timestampString <- desiredTimestampStrings
272- timezone <- Seq (" America/Los_Angeles" , " Europe/Berlin" , " UTC" ).map {
273- TimeZone .getTimeZone(_)
269+ desiredTimestampStrings.flatMap { timestampString =>
270+ Seq (" America/Los_Angeles" , " Europe/Berlin" , " UTC" ).map { tzId =>
271+ TimeZone .setDefault(TimeZone .getTimeZone(tzId))
272+ val timestamp = Timestamp .valueOf(timestampString)
273+ (timestampString, tzId) -> timestamp.getTime()
274274 }
275- } yield {
276- TimeZone .setDefault(timezone)
277- val timestamp = Timestamp .valueOf(timestampString)
278- (timestampString, timezone.getID()) -> timestamp.getTime()
279- }).toMap
275+ }.toMap
280276 } finally {
281277 TimeZone .setDefault(originalTz)
282278 }
@@ -303,18 +299,17 @@ class ParquetHiveCompatibilitySuite extends ParquetCompatibilityTest with TestHi
303299 val sessionTzId = sessionTzOpt.getOrElse(TimeZone .getDefault().getID())
304300 // check that created tables have correct TBLPROPERTIES
305301 val tblProperties = explicitTz.map {
306- tz => raw """ TBLPROPERTIES ( $key=" $tz") """
302+ tz => s """ TBLPROPERTIES ( $key=" $tz") """
307303 }.getOrElse(" " )
308304
309-
310305 val rawData = createRawData(spark)
311306 // Check writing data out.
312307 // We write data into our tables, and then check the raw parquet files to see whether
313308 // the correct conversion was applied.
314309 rawData.write.saveAsTable(s " saveAsTable_ $baseTable" )
315310 checkHasTz(spark, s " saveAsTable_ $baseTable" , None )
316311 spark.sql(
317- raw """ CREATE TABLE insert_ $baseTable (
312+ s """ CREATE TABLE insert_ $baseTable (
318313 | display string,
319314 | ts timestamp
320315 | )
@@ -370,7 +365,7 @@ class ParquetHiveCompatibilitySuite extends ParquetCompatibilityTest with TestHi
370365 baseTable : String ,
371366 explicitTz : Option [String ],
372367 sessionTzOpt : Option [String ]): Unit = {
373- val key = ParquetFileFormat .PARQUET_TIMEZONE_TABLE_PROPERTY
368+ val key = ParquetFileFormat .PARQUET_TIMEZONE_TABLE_PROPERTY
374369 test(s " SPARK-12297: Read from Parquet tables with Timestamps; explicitTz = $explicitTz; " +
375370 s " sessionTzOpt = $sessionTzOpt" ) {
376371 withTable(s " external_ $baseTable" , s " partitioned_ $baseTable" ) {
@@ -412,8 +407,8 @@ class ParquetHiveCompatibilitySuite extends ParquetCompatibilityTest with TestHi
412407 .parquet(partitionedPath.getCanonicalPath)
413408 // unfortunately, catalog.createTable() doesn't let us specify partitioning, so just use
414409 // a "CREATE TABLE" stmt.
415- val tblOpts = explicitTz.map { tz => raw """ TBLPROPERTIES ( $key=" $tz") """ }.getOrElse(" " )
416- spark.sql(raw """ CREATE EXTERNAL TABLE partitioned_ $baseTable (
410+ val tblOpts = explicitTz.map { tz => s """ TBLPROPERTIES ( $key=" $tz") """ }.getOrElse(" " )
411+ spark.sql(s """ CREATE EXTERNAL TABLE partitioned_ $baseTable (
417412 | display string,
418413 | ts timestamp
419414 |)
@@ -512,7 +507,7 @@ class ParquetHiveCompatibilitySuite extends ParquetCompatibilityTest with TestHi
512507 val key = ParquetFileFormat .PARQUET_TIMEZONE_TABLE_PROPERTY
513508 val badTzException = intercept[AnalysisException ] {
514509 spark.sql(
515- raw """ CREATE TABLE bad_tz_table (
510+ s """ CREATE TABLE bad_tz_table (
516511 | x int
517512 | )
518513 | STORED AS PARQUET
0 commit comments