@@ -37,37 +37,6 @@ getInternalType <- function(x) {
3737 stop(paste(" Unsupported type for SparkDataFrame:" , class(x ))))
3838}
3939
40- # ' Temporary function to reroute old S3 Method call to new
41- # ' This function is specifically implemented to remove SQLContext from the parameter list.
42- # ' It determines the target to route the call by checking the parent of this callsite (say 'func').
43- # ' The target should be called 'func.default'.
44- # ' We need to check the class of x to ensure it is SQLContext/HiveContext before dispatching.
45- # ' @param newFuncSig name of the function the user should call instead in the deprecation message
46- # ' @param x the first parameter of the original call
47- # ' @param ... the rest of parameter to pass along
48- # ' @return whatever the target returns
49- # ' @noRd
50- dispatchFunc <- function (newFuncSig , x , ... ) {
51- # When called with SparkR::createDataFrame, sys.call()[[1]] returns c(::, SparkR, createDataFrame)
52- callsite <- as.character(sys.call(sys.parent())[[1 ]])
53- funcName <- callsite [[length(callsite )]]
54- f <- get(paste0(funcName , " .default" ))
55- # Strip sqlContext from list of parameters and then pass the rest along.
56- contextNames <- c(" org.apache.spark.sql.SQLContext" ,
57- " org.apache.spark.sql.hive.HiveContext" ,
58- " org.apache.spark.sql.hive.test.TestHiveContext" ,
59- " org.apache.spark.sql.SparkSession" )
60- if (missing(x ) && length(list (... )) == 0 ) {
61- f()
62- } else if (class(x ) == " jobj" &&
63- any(grepl(paste(contextNames , collapse = " |" ), getClassName.jobj(x )))) {
64- .Deprecated(newFuncSig , old = paste0(funcName , " (sqlContext...)" ))
65- f(... )
66- } else {
67- f(x , ... )
68- }
69- }
70-
7140# ' return the SparkSession
7241# ' @noRd
7342getSparkSession <- function () {
@@ -198,11 +167,10 @@ getDefaultSqlSource <- function() {
198167# ' df4 <- createDataFrame(cars, numPartitions = 2)
199168# ' }
200169# ' @name createDataFrame
201- # ' @method createDataFrame default
202170# ' @note createDataFrame since 1.4.0
203171# TODO(davies): support sampling and infer type from NA
204- createDataFrame.default <- function (data , schema = NULL , samplingRatio = 1.0 ,
205- numPartitions = NULL ) {
172+ createDataFrame <- function (data , schema = NULL , samplingRatio = 1.0 ,
173+ numPartitions = NULL ) {
206174 sparkSession <- getSparkSession()
207175
208176 if (is.data.frame(data )) {
@@ -285,31 +253,18 @@ createDataFrame.default <- function(data, schema = NULL, samplingRatio = 1.0,
285253 dataFrame(sdf )
286254}
287255
288- createDataFrame <- function (x , ... ) {
289- dispatchFunc(" createDataFrame(data, schema = NULL)" , x , ... )
290- }
291-
292256# ' @rdname createDataFrame
293257# ' @aliases createDataFrame
294- # ' @method as.DataFrame default
295258# ' @note as.DataFrame since 1.6.0
296- as.DataFrame.default <- function (data , schema = NULL , samplingRatio = 1.0 , numPartitions = NULL ) {
259+ as.DataFrame <- function (data , schema = NULL , samplingRatio = 1.0 , numPartitions = NULL ) {
297260 createDataFrame(data , schema , samplingRatio , numPartitions )
298261}
299262
300- # ' @param ... additional argument(s).
301- # ' @rdname createDataFrame
302- # ' @aliases as.DataFrame
303- as.DataFrame <- function (data , ... ) {
304- dispatchFunc(" as.DataFrame(data, schema = NULL)" , data , ... )
305- }
306-
307263# ' toDF
308264# '
309265# ' Converts an RDD to a SparkDataFrame by infer the types.
310266# '
311267# ' @param x An RDD
312- # '
313268# ' @rdname SparkDataFrame
314269# ' @noRd
315270# ' @examples
@@ -345,9 +300,8 @@ setMethod("toDF", signature(x = "RDD"),
345300# ' df <- read.json(path, multiLine = TRUE)
346301# ' }
347302# ' @name read.json
348- # ' @method read.json default
349303# ' @note read.json since 1.6.0
350- read.json.default <- function (path , ... ) {
304+ read.json <- function (path , ... ) {
351305 sparkSession <- getSparkSession()
352306 options <- varargsToStrEnv(... )
353307 # Allow the user to have a more flexible definition of the text file path
@@ -358,10 +312,6 @@ read.json.default <- function(path, ...) {
358312 dataFrame(sdf )
359313}
360314
361- read.json <- function (x , ... ) {
362- dispatchFunc(" read.json(path)" , x , ... )
363- }
364-
365315# ' Create a SparkDataFrame from an ORC file.
366316# '
367317# ' Loads an ORC file, returning the result as a SparkDataFrame.
@@ -388,13 +338,12 @@ read.orc <- function(path, ...) {
388338# ' Loads a Parquet file, returning the result as a SparkDataFrame.
389339# '
390340# ' @param path path of file to read. A vector of multiple paths is allowed.
391- # ' @param ... additional external data source specific named properties.
341+ # ' @param ... additional data source specific named properties.
392342# ' @return SparkDataFrame
393343# ' @rdname read.parquet
394344# ' @name read.parquet
395- # ' @method read.parquet default
396345# ' @note read.parquet since 1.6.0
397- read.parquet.default <- function (path , ... ) {
346+ read.parquet <- function (path , ... ) {
398347 sparkSession <- getSparkSession()
399348 options <- varargsToStrEnv(... )
400349 # Allow the user to have a more flexible definition of the Parquet file path
@@ -405,10 +354,6 @@ read.parquet.default <- function(path, ...) {
405354 dataFrame(sdf )
406355}
407356
408- read.parquet <- function (x , ... ) {
409- dispatchFunc(" read.parquet(...)" , x , ... )
410- }
411-
412357# ' Create a SparkDataFrame from a text file.
413358# '
414359# ' Loads text files and returns a SparkDataFrame whose schema starts with
@@ -428,9 +373,8 @@ read.parquet <- function(x, ...) {
428373# ' df <- read.text(path)
429374# ' }
430375# ' @name read.text
431- # ' @method read.text default
432376# ' @note read.text since 1.6.1
433- read.text.default <- function (path , ... ) {
377+ read.text <- function (path , ... ) {
434378 sparkSession <- getSparkSession()
435379 options <- varargsToStrEnv(... )
436380 # Allow the user to have a more flexible definition of the text file path
@@ -441,10 +385,6 @@ read.text.default <- function(path, ...) {
441385 dataFrame(sdf )
442386}
443387
444- read.text <- function (x , ... ) {
445- dispatchFunc(" read.text(path)" , x , ... )
446- }
447-
448388# ' SQL Query
449389# '
450390# ' Executes a SQL query using Spark, returning the result as a SparkDataFrame.
@@ -461,18 +401,13 @@ read.text <- function(x, ...) {
461401# ' new_df <- sql("SELECT * FROM table")
462402# ' }
463403# ' @name sql
464- # ' @method sql default
465404# ' @note sql since 1.4.0
466- sql.default <- function (sqlQuery ) {
405+ sql <- function (sqlQuery ) {
467406 sparkSession <- getSparkSession()
468407 sdf <- callJMethod(sparkSession , " sql" , sqlQuery )
469408 dataFrame(sdf )
470409}
471410
472- sql <- function (x , ... ) {
473- dispatchFunc(" sql(sqlQuery)" , x , ... )
474- }
475-
476411# ' Create a SparkDataFrame from a SparkSQL table or view
477412# '
478413# ' Returns the specified table or view as a SparkDataFrame. The table or view must already exist or
@@ -531,9 +466,8 @@ tableToDF <- function(tableName) {
531466# ' df4 <- read.df(mapTypeJsonPath, "json", stringSchema, multiLine = TRUE)
532467# ' }
533468# ' @name read.df
534- # ' @method read.df default
535469# ' @note read.df since 1.4.0
536- read.df.default <- function (path = NULL , source = NULL , schema = NULL , na.strings = " NA" , ... ) {
470+ read.df <- function (path = NULL , source = NULL , schema = NULL , na.strings = " NA" , ... ) {
537471 if (! is.null(path ) && ! is.character(path )) {
538472 stop(" path should be character, NULL or omitted." )
539473 }
@@ -568,22 +502,13 @@ read.df.default <- function(path = NULL, source = NULL, schema = NULL, na.string
568502 dataFrame(sdf )
569503}
570504
571- read.df <- function (x = NULL , ... ) {
572- dispatchFunc(" read.df(path = NULL, source = NULL, schema = NULL, ...)" , x , ... )
573- }
574-
575505# ' @rdname read.df
576506# ' @name loadDF
577- # ' @method loadDF default
578507# ' @note loadDF since 1.6.0
579- loadDF.default <- function (path = NULL , source = NULL , schema = NULL , ... ) {
508+ loadDF <- function (path = NULL , source = NULL , schema = NULL , ... ) {
580509 read.df(path , source , schema , ... )
581510}
582511
583- loadDF <- function (x = NULL , ... ) {
584- dispatchFunc(" loadDF(path = NULL, source = NULL, schema = NULL, ...)" , x , ... )
585- }
586-
587512# ' Create a SparkDataFrame representing the database table accessible via JDBC URL
588513# '
589514# ' Additional JDBC database connection properties can be set (...)
0 commit comments