@@ -269,7 +269,10 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
269269 *
270270 * @since 1.4.0
271271 */
272- def json (path : String ): DataFrame = json(Seq (path): _* )
272+ def json (path : String ): DataFrame = {
273+ // This method ensures that calls that explicit need single argument works, see SPARK-16009
274+ json(Seq (path): _* )
275+ }
273276
274277 /**
275278 * Loads a JSON file (one object per line) and returns the result as a [[DataFrame ]].
@@ -404,7 +407,10 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
404407 *
405408 * @since 2.0.0
406409 */
407- def csv (path : String ): DataFrame = csv(Seq (path): _* )
410+ def csv (path : String ): DataFrame = {
411+ // This method ensures that calls that explicit need single argument works, see SPARK-16009
412+ csv(Seq (path): _* )
413+ }
408414
409415 /**
410416 * Loads a CSV file and returns the result as a [[DataFrame ]].
@@ -472,7 +478,10 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
472478 *
473479 * @since 2.0.0
474480 */
475- def parquet (path : String ): DataFrame = parquet(Seq (path): _* )
481+ def parquet (path : String ): DataFrame = {
482+ // This method ensures that calls that explicit need single argument works, see SPARK-16009
483+ parquet(Seq (path): _* )
484+ }
476485
477486 /**
478487 * Loads a Parquet file, returning the result as a [[DataFrame ]]. This function returns an empty
@@ -497,7 +506,10 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
497506 * @since 1.5.0
498507 * @note Currently, this method can only be used after enabling Hive support.
499508 */
500- def orc (path : String ): DataFrame = orc(Seq (path): _* )
509+ def orc (path : String ): DataFrame = {
510+ // This method ensures that calls that explicit need single argument works, see SPARK-16009
511+ orc(Seq (path): _* )
512+ }
501513
502514 /**
503515 * Loads an ORC file and returns the result as a [[DataFrame ]].
@@ -536,7 +548,10 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
536548 * @param path input path
537549 * @since 2.0.0
538550 */
539- def text (path : String ): DataFrame = text(Seq (path): _* )
551+ def text (path : String ): DataFrame = {
552+ // This method ensures that calls that explicit need single argument works, see SPARK-16009
553+ text(Seq (path): _* )
554+ }
540555
541556 /**
542557 * Loads text files and returns a [[DataFrame ]] whose schema starts with a string column named
@@ -576,7 +591,10 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
576591 * @param path input path
577592 * @since 2.0.0
578593 */
579- def textFile (path : String ): Dataset [String ] = textFile(Seq (path): _* )
594+ def textFile (path : String ): Dataset [String ] = {
595+ // This method ensures that calls that explicit need single argument works, see SPARK-16009
596+ textFile(Seq (path): _* )
597+ }
580598
581599 /**
582600 * Loads text files and returns a [[Dataset ]] of String. The underlying schema of the Dataset
0 commit comments