diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala index 2cc937034651..dfba12a5856e 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala @@ -213,9 +213,8 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging { val objectMapper = new ObjectMapper() Some("paths" -> objectMapper.writeValueAsString(paths.toArray)) } - // TODO SPARK-27113: remove this option. - val checkFilesExistsOpt = "check_files_exist" -> "true" - val finalOptions = sessionOptions ++ extraOptions.toMap ++ pathsOption + checkFilesExistsOpt + + val finalOptions = sessionOptions ++ extraOptions.toMap ++ pathsOption val dsOptions = new CaseInsensitiveStringMap(finalOptions.asJava) val table = userSpecifiedSchema match { case Some(schema) => provider.getTable(dsOptions, schema) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala index e58225e0f58a..3c51edd8ab60 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala @@ -261,10 +261,9 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) { val provider = cls.getConstructor().newInstance().asInstanceOf[TableProvider] val sessionOptions = DataSourceV2Utils.extractSessionConfigs( provider, session.sessionState.conf) - // TODO SPARK-27113: remove this option. - val checkFilesExistsOption = "check_files_exist" -> "false" - val options = sessionOptions ++ extraOptions + checkFilesExistsOption + val options = sessionOptions ++ extraOptions val dsOptions = new CaseInsensitiveStringMap(options.asJava) + provider.getTable(dsOptions) match { case table: SupportsBatchWrite => lazy val relation = DataSourceV2Relation.create(table, dsOptions) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileTable.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileTable.scala index 08873a3b5a64..21fb6fda98a9 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileTable.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileTable.scala @@ -36,10 +36,8 @@ abstract class FileTable( lazy val fileIndex: PartitioningAwareFileIndex = { val scalaMap = options.asScala.toMap val hadoopConf = sparkSession.sessionState.newHadoopConfWithOptions(scalaMap) - // This is an internal config so must be present. - val checkFilesExist = options.get("check_files_exist").toBoolean val rootPathsSpecified = DataSource.checkAndGlobPathIfNecessary(paths, hadoopConf, - checkEmptyGlobPath = true, checkFilesExist = checkFilesExist) + checkEmptyGlobPath = true, checkFilesExist = true) val fileStatusCache = FileStatusCache.getOrCreate(sparkSession) new InMemoryFileIndex( sparkSession, rootPathsSpecified, scalaMap, userSpecifiedSchema, fileStatusCache)