Skip to content

Commit 13b70f0

Browse files
committed
add checkValue for the conf
1 parent ec0afac commit 13b70f0

File tree

3 files changed

+10
-5
lines changed

3 files changed

+10
-5
lines changed

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningAwareFileIndex.scala

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -300,8 +300,7 @@ object PartitioningAwareFileIndex extends Logging {
300300
sparkSession: SparkSession): Seq[(Path, Seq[FileStatus])] = {
301301

302302
// Short-circuits parallel listing when serial listing is likely to be faster.
303-
if (paths.isEmpty ||
304-
paths.size < sparkSession.sessionState.conf.parallelPartitionDiscoveryThreshold) {
303+
if (paths.size <= sparkSession.sessionState.conf.parallelPartitionDiscoveryThreshold) {
305304
return paths.map { path =>
306305
(path, listLeafFiles(path, hadoopConf, filter, Some(sparkSession)))
307306
}

sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -407,6 +407,8 @@ object SQLConf {
407407
"files with another Spark distributed job. This applies to Parquet, ORC, CSV, JSON and " +
408408
"LibSVM data sources.")
409409
.intConf
410+
.checkValue(parallel => parallel >= 0, "The maximum number of files allowed for listing " +
411+
"files at driver side must not be negative")
410412
.createWithDefault(32)
411413

412414
val PARALLEL_PARTITION_DISCOVERY_PARALLELISM =

sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileIndexSuite.scala

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -186,9 +186,13 @@ class FileIndexSuite extends SharedSQLContext {
186186
new InMemoryFileIndex(spark, Seq.empty, Map.empty, None)
187187
}
188188

189-
withSQLConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD.key -> "-1") {
190-
new InMemoryFileIndex(spark, Seq.empty, Map.empty, None)
191-
}
189+
val e = intercept[IllegalArgumentException] {
190+
withSQLConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD.key -> "-1") {
191+
new InMemoryFileIndex(spark, Seq.empty, Map.empty, None)
192+
}
193+
}.getMessage
194+
assert(e.contains("The maximum number of files allowed for listing files at " +
195+
"driver side must not be negative"))
192196
}
193197

194198
test("refresh for InMemoryFileIndex with FileStatusCache") {

0 commit comments

Comments
 (0)