From 4beea546a4b9e5e5ce1ed1f7edc1b52ad7bf8318 Mon Sep 17 00:00:00 2001 From: zsxwing Date: Wed, 13 May 2015 16:53:34 -0700 Subject: [PATCH] Use 'new Job' in fsBasedParquet.scala --- .../scala/org/apache/spark/sql/parquet/fsBasedParquet.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/parquet/fsBasedParquet.scala b/sql/core/src/main/scala/org/apache/spark/sql/parquet/fsBasedParquet.scala index d810d6a028c5..c83a9c35dbdd 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/parquet/fsBasedParquet.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/parquet/fsBasedParquet.scala @@ -231,7 +231,7 @@ private[sql] class FSBasedParquetRelation( filters: Array[Filter], inputPaths: Array[String]): RDD[Row] = { - val job = Job.getInstance(SparkHadoopUtil.get.conf) + val job = new Job(SparkHadoopUtil.get.conf) val conf = ContextUtil.getConfiguration(job) ParquetInputFormat.setReadSupportClass(job, classOf[RowReadSupport])