diff --git a/project/MimaExcludes.scala b/project/MimaExcludes.scala index b0d862d006e49..69161e0d61f44 100644 --- a/project/MimaExcludes.scala +++ b/project/MimaExcludes.scala @@ -348,6 +348,9 @@ object MimaExcludes { ) ++ Seq( // [SPARK-13686][MLLIB][STREAMING] Add a constructor parameter `reqParam` to (Streaming)LinearRegressionWithSGD ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.mllib.regression.LinearRegressionWithSGD.this") + ) ++ Seq( + // SPARK-15250 Remove deprecated json API in DataFrameReader + ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.sql.DataFrameReader.json") ) ++ Seq( // SPARK-13920: MIMA checks should apply to @Experimental and @DeveloperAPI APIs ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.Aggregator.combineCombinersByKey"), diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py index e2ee9db049489..4d551f8a11fe1 100644 --- a/python/pyspark/sql/readwriter.py +++ b/python/pyspark/sql/readwriter.py @@ -241,8 +241,8 @@ def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, if columnNameOfCorruptRecord is not None: self.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord) if isinstance(path, basestring): - return self._df(self._jreader.json(path)) - elif type(path) == list: + path = [path] + if type(path) == list: return self._df(self._jreader.json(self._sqlContext._sc._jvm.PythonUtils.toSeq(path))) elif isinstance(path, RDD): def func(iterator): diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala index 5bf696c1c3927..c0fbed4931bb9 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala @@ -275,38 +275,6 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging { sparkSession.baseRelationToDataFrame(relation) } - /** - * Loads a JSON file (one object per line) and returns the result as a [[DataFrame]]. - * - * This function goes through the input once to determine the input schema. If you know the - * schema in advance, use the version that specifies the schema to avoid the extra scan. - * - * You can set the following JSON-specific options to deal with non-standard JSON files: - *
  • `primitivesAsString` (default `false`): infers all primitive values as a string type
  • - *
  • `allowComments` (default `false`): ignores Java/C++ style comment in JSON records
  • - *
  • `allowUnquotedFieldNames` (default `false`): allows unquoted JSON field names
  • - *
  • `allowSingleQuotes` (default `true`): allows single quotes in addition to double quotes - *
  • - *
  • `allowNumericLeadingZeros` (default `false`): allows leading zeros in numbers - * (e.g. 00012)
  • - *
  • `mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt records - * during parsing.
  • - * - *
  • `columnNameOfCorruptRecord` (default `_corrupt_record`): allows renaming the new field - * having malformed string created by `PERMISSIVE` mode. This overrides - * `spark.sql.columnNameOfCorruptRecord`.
  • - * - * @since 1.4.0 - */ - // TODO: Remove this one in Spark 2.0. - def json(path: String): DataFrame = format("json").load(path) - /** * Loads a JSON file (one object per line) and returns the result as a [[DataFrame]]. * @@ -340,6 +308,7 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging { * * @since 1.6.0 */ + @scala.annotation.varargs def json(paths: String*): DataFrame = format("json").load(paths : _*) /**