@@ -165,9 +165,9 @@ getDefaultSqlSource <- function() {
165165# '
166166# ' Converts R data.frame or list into SparkDataFrame.
167167# '
168- # ' @param data An RDD or list or data.frame
169- # ' @param schema a list of column names or named list (StructType), optional
170- # ' @return a SparkDataFrame
168+ # ' @param data an RDD or list or data.frame.
169+ # ' @param schema a list of column names or named list (StructType), optional.
170+ # ' @return A SparkDataFrame.
171171# ' @rdname createDataFrame
172172# ' @export
173173# ' @examples
@@ -257,23 +257,25 @@ createDataFrame.default <- function(data, schema = NULL, samplingRatio = 1.0) {
257257}
258258
259259createDataFrame <- function (x , ... ) {
260- dispatchFunc(" createDataFrame(data, schema = NULL, samplingRatio = 1.0 )" , x , ... )
260+ dispatchFunc(" createDataFrame(data, schema = NULL)" , x , ... )
261261}
262262
263+ # ' @param samplingRatio Currently not used.
263264# ' @rdname createDataFrame
264265# ' @aliases createDataFrame
265266# ' @export
266267# ' @method as.DataFrame default
267268# ' @note as.DataFrame since 1.6.0
268269as.DataFrame.default <- function (data , schema = NULL , samplingRatio = 1.0 ) {
269- createDataFrame(data , schema , samplingRatio )
270+ createDataFrame(data , schema )
270271}
271272
273+ # ' @param ... additional argument(s).
272274# ' @rdname createDataFrame
273275# ' @aliases as.DataFrame
274276# ' @export
275- as.DataFrame <- function (x , ... ) {
276- dispatchFunc(" as.DataFrame(data, schema = NULL, samplingRatio = 1.0 )" , x , ... )
277+ as.DataFrame <- function (data , ... ) {
278+ dispatchFunc(" as.DataFrame(data, schema = NULL)" , data , ... )
277279}
278280
279281# ' toDF
@@ -398,7 +400,7 @@ read.orc <- function(path) {
398400# '
399401# ' Loads a Parquet file, returning the result as a SparkDataFrame.
400402# '
401- # ' @param path Path of file to read. A vector of multiple paths is allowed.
403+ # ' @param path path of file to read. A vector of multiple paths is allowed.
402404# ' @return SparkDataFrame
403405# ' @rdname read.parquet
404406# ' @export
@@ -418,6 +420,7 @@ read.parquet <- function(x, ...) {
418420 dispatchFunc(" read.parquet(...)" , x , ... )
419421}
420422
423+ # ' @param ... argument(s) passed to the method.
421424# ' @rdname read.parquet
422425# ' @name parquetFile
423426# ' @export
@@ -727,6 +730,7 @@ dropTempView <- function(viewName) {
727730# ' @param source The name of external data source
728731# ' @param schema The data schema defined in structType
729732# ' @param na.strings Default string value for NA when source is "csv"
733+ # ' @param ... additional external data source specific named properties.
730734# ' @return SparkDataFrame
731735# ' @rdname read.df
732736# ' @name read.df
@@ -791,10 +795,11 @@ loadDF <- function(x, ...) {
791795# ' If `source` is not specified, the default data source configured by
792796# ' "spark.sql.sources.default" will be used.
793797# '
794- # ' @param tableName A name of the table
795- # ' @param path The path of files to load
796- # ' @param source the name of external data source
797- # ' @return SparkDataFrame
798+ # ' @param tableName a name of the table.
799+ # ' @param path the path of files to load.
800+ # ' @param source the name of external data source.
801+ # ' @param ... additional argument(s) passed to the method.
802+ # ' @return A SparkDataFrame.
798803# ' @rdname createExternalTable
799804# ' @export
800805# ' @examples
@@ -840,6 +845,7 @@ createExternalTable <- function(x, ...) {
840845# ' clause expressions used to split the column `partitionColumn` evenly.
841846# ' This defaults to SparkContext.defaultParallelism when unset.
842847# ' @param predicates a list of conditions in the where clause; each one defines one partition
848+ # ' @param ... additional JDBC database connection named propertie(s).
843849# ' @return SparkDataFrame
844850# ' @rdname read.jdbc
845851# ' @name read.jdbc
0 commit comments