diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala index d04481105221..f65c15463054 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala @@ -108,7 +108,7 @@ class HadoopTableReader( val broadcastedHadoopConf = _broadcastedHadoopConf val tablePath = hiveTable.getPath - val inputPathStr = applyFilterIfNeeded(tablePath, filterOpt) + val inputPathStr = tablePath.toString // logDebug("Table input: %s".format(tablePath)) val ifc = hiveTable.getInputFormatClass @@ -190,7 +190,7 @@ class HadoopTableReader( .map { case (partition, partDeserializer) => val partDesc = Utilities.getPartitionDesc(partition) val partPath = partition.getDataLocation - val inputPathStr = applyFilterIfNeeded(partPath, filterOpt) + val inputPathStr = partPath.toString val ifc = partDesc.getInputFileFormatClass .asInstanceOf[java.lang.Class[InputFormat[Writable, Writable]]] // Get partition field info @@ -252,20 +252,6 @@ class HadoopTableReader( } } - /** - * If `filterOpt` is defined, then it will be used to filter files from `path`. These files are - * returned in a single, comma-separated string. - */ - private def applyFilterIfNeeded(path: Path, filterOpt: Option[PathFilter]): String = { - filterOpt match { - case Some(filter) => - val fs = path.getFileSystem(hadoopConf) - val filteredFiles = fs.listStatus(path, filter).map(_.getPath.toString) - filteredFiles.mkString(",") - case None => path.toString - } - } - /** * Creates a HadoopRDD based on the broadcasted HiveConf and other job properties that will be * applied locally on each slave.