@@ -118,21 +118,21 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with Tes
118118 new UDT .MyDenseVectorUDT ()
119119 ).filter(supportsDataType)
120120
121- private val parquetDictionaryEncodingEnabledConfs = if (isParquetDataSource ) {
122- // Run with/without Parquet dictionary encoding enabled for Parquet data source.
123- Seq ( true , false )
124- } else {
125- Seq ( false )
126- }
127-
128- for (dataType <- supportedDataTypes) {
129- for (parquetDictionaryEncodingEnabled <- parquetDictionaryEncodingEnabledConfs) {
130- val extraMessage = if (isParquetDataSource) {
131- s " with parquet.enable.dictionary = $parquetDictionaryEncodingEnabled"
132- } else {
133- " "
134- }
135- test (s " test all data types - $dataType$ extraMessage" ) {
121+ test( s " test all data types " ) {
122+ val parquetDictionaryEncodingEnabledConfs = if (isParquetDataSource) {
123+ // Run with/without Parquet dictionary encoding enabled for Parquet data source.
124+ Seq ( true , false )
125+ } else {
126+ Seq ( false )
127+ }
128+ for (dataType <- supportedDataTypes) {
129+ for (parquetDictionaryEncodingEnabled <- parquetDictionaryEncodingEnabledConfs) {
130+ val extraMessage = if (isParquetDataSource) {
131+ s " with parquet.enable.dictionary = $parquetDictionaryEncodingEnabled"
132+ } else {
133+ " "
134+ }
135+ logInfo (s " Testing $dataType data type $ extraMessage" )
136136
137137 val extraOptions = Map [String , String ](
138138 " parquet.enable.dictionary" -> parquetDictionaryEncodingEnabled.toString
@@ -769,37 +769,6 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with Tes
769769 }
770770 }
771771
772- // NOTE: This test suite is not super deterministic. On nodes with only relatively few cores
773- // (4 or even 1), it's hard to reproduce the data loss issue. But on nodes with for example 8 or
774- // more cores, the issue can be reproduced steadily. Fortunately our Jenkins builder meets this
775- // requirement. We probably want to move this test case to spark-integration-tests or spark-perf
776- // later.
777- test(" SPARK-8406: Avoids name collision while writing files" ) {
778- // The following test is slow. As now all the file format data source are using common code
779- // for creating result files, we can test one data source(Parquet) only to reduce test time.
780- if (isParquetDataSource) {
781- withTempPath { dir =>
782- val path = dir.getCanonicalPath
783- spark
784- .range(10000 )
785- .repartition(250 )
786- .write
787- .mode(SaveMode .Overwrite )
788- .format(dataSourceName)
789- .save(path)
790-
791- assertResult(10000 ) {
792- spark
793- .read
794- .format(dataSourceName)
795- .option(" dataSchema" , StructType (StructField (" id" , LongType ) :: Nil ).json)
796- .load(path)
797- .count()
798- }
799- }
800- }
801- }
802-
803772 test(" SPARK-8887: Explicitly define which data types can be used as dynamic partition columns" ) {
804773 val df = Seq (
805774 (1 , " v1" , Array (1 , 2 , 3 ), Map (" k1" -> " v1" ), Tuple2 (1 , " 4" )),
0 commit comments