@@ -463,6 +463,7 @@ setMethod("createOrReplaceTempView",
463463 })
464464
465465# ' (Deprecated) Register Temporary Table
466+ # '
466467# ' Registers a SparkDataFrame as a Temporary Table in the SQLContext
467468# ' @param x A SparkDataFrame
468469# ' @param tableName A character vector containing the name of the table
@@ -606,10 +607,10 @@ setMethod("unpersist",
606607# '
607608# ' The following options for repartition are possible:
608609# ' \itemize{
609- # ' \item{"Option 1" } {Return a new SparkDataFrame partitioned by
610+ # ' \item{1. } {Return a new SparkDataFrame partitioned by
610611# ' the given columns into `numPartitions`.}
611- # ' \item{"Option 2" } {Return a new SparkDataFrame that has exactly `numPartitions`.}
612- # ' \item{"Option 3" } {Return a new SparkDataFrame partitioned by the given column(s),
612+ # ' \item{2. } {Return a new SparkDataFrame that has exactly `numPartitions`.}
613+ # ' \item{3. } {Return a new SparkDataFrame partitioned by the given column(s),
613614# ' using `spark.sql.shuffle.partitions` as number of partitions.}
614615# '}
615616# ' @param x A SparkDataFrame
@@ -1053,7 +1054,7 @@ setMethod("limit",
10531054 dataFrame(res )
10541055 })
10551056
1056- # ' Take the first NUM rows of a SparkDataFrame and return a the results as a data.frame
1057+ # ' Take the first NUM rows of a SparkDataFrame and return a the results as a R data.frame
10571058# '
10581059# ' @family SparkDataFrame functions
10591060# ' @rdname take
@@ -1076,7 +1077,7 @@ setMethod("take",
10761077
10771078# ' Head
10781079# '
1079- # ' Return the first NUM rows of a SparkDataFrame as a data.frame. If NUM is NULL,
1080+ # ' Return the first NUM rows of a SparkDataFrame as a R data.frame. If NUM is NULL,
10801081# ' then head() returns the first 6 rows in keeping with the current data.frame
10811082# ' convention in R.
10821083# '
@@ -1157,7 +1158,6 @@ setMethod("toRDD",
11571158# '
11581159# ' @param x a SparkDataFrame
11591160# ' @return a GroupedData
1160- # ' @seealso GroupedData
11611161# ' @family SparkDataFrame functions
11621162# ' @rdname groupBy
11631163# ' @name groupBy
@@ -1242,9 +1242,9 @@ dapplyInternal <- function(x, func, schema) {
12421242# '
12431243# ' @param x A SparkDataFrame
12441244# ' @param func A function to be applied to each partition of the SparkDataFrame.
1245- # ' func should have only one parameter, to which a data.frame corresponds
1245+ # ' func should have only one parameter, to which a R data.frame corresponds
12461246# ' to each partition will be passed.
1247- # ' The output of func should be a data.frame.
1247+ # ' The output of func should be a R data.frame.
12481248# ' @param schema The schema of the resulting SparkDataFrame after the function is applied.
12491249# ' It must match the output of func.
12501250# ' @family SparkDataFrame functions
@@ -1290,9 +1290,9 @@ setMethod("dapply",
12901290# '
12911291# ' @param x A SparkDataFrame
12921292# ' @param func A function to be applied to each partition of the SparkDataFrame.
1293- # ' func should have only one parameter, to which a data.frame corresponds
1293+ # ' func should have only one parameter, to which a R data.frame corresponds
12941294# ' to each partition will be passed.
1295- # ' The output of func should be a data.frame.
1295+ # ' The output of func should be a R data.frame.
12961296# ' @family SparkDataFrame functions
12971297# ' @rdname dapply
12981298# ' @name dapplyCollect
@@ -1639,7 +1639,6 @@ setMethod("select", signature(x = "SparkDataFrame", col = "character"),
16391639 }
16401640 })
16411641
1642- # ' @family SparkDataFrame functions
16431642# ' @rdname select
16441643# ' @export
16451644# ' @note select(SparkDataFrame, Column) since 1.4.0
@@ -1652,7 +1651,6 @@ setMethod("select", signature(x = "SparkDataFrame", col = "Column"),
16521651 dataFrame(sdf )
16531652 })
16541653
1655- # ' @family SparkDataFrame functions
16561654# ' @rdname select
16571655# ' @export
16581656# ' @note select(SparkDataFrame, list) since 1.4.0
@@ -1999,7 +1997,6 @@ setMethod("filter",
19991997 dataFrame(sdf )
20001998 })
20011999
2002- # ' @family SparkDataFrame functions
20032000# ' @rdname filter
20042001# ' @name where
20052002# ' @note where since 1.4.0
@@ -2220,11 +2217,13 @@ setMethod("merge",
22202217 joinRes
22212218 })
22222219
2220+ # ' Creates a list of columns by replacing the intersected ones with aliases
2221+ # '
22232222# ' Creates a list of columns by replacing the intersected ones with aliases.
22242223# ' The name of the alias column is formed by concatanating the original column name and a suffix.
22252224# '
2226- # ' @param x a SparkDataFrame on which the
2227- # ' @param intersectedColNames a list of intersected column names
2225+ # ' @param x a SparkDataFrame
2226+ # ' @param intersectedColNames a list of intersected column names of the SparkDataFrame
22282227# ' @param suffix a suffix for the column name
22292228# ' @return list of columns
22302229# '
@@ -2511,9 +2510,9 @@ setMethod("summary",
25112510 })
25122511
25132512
2514- # ' dropna
2513+ # ' A set of SparkDataFrame functions working with NA falues
25152514# '
2516- # ' Returns a new SparkDataFrame omitting rows with null values.
2515+ # ' dropna, na.omit - Returns a new SparkDataFrame omitting rows with null values.
25172516# '
25182517# ' @param x A SparkDataFrame.
25192518# ' @param how "any" or "all".
@@ -2565,9 +2564,7 @@ setMethod("na.omit",
25652564 dropna(object , how , minNonNulls , cols )
25662565 })
25672566
2568- # ' fillna
2569- # '
2570- # ' Replace null values.
2567+ # ' fillna - Replace null values.
25712568# '
25722569# ' @param x A SparkDataFrame.
25732570# ' @param value Value to replace null values with.
@@ -2638,7 +2635,7 @@ setMethod("fillna",
26382635 dataFrame(sdf )
26392636 })
26402637
2641- # ' Download data from a SparkDataFrame into a data.frame
2638+ # ' Download data from a SparkDataFrame into a R data.frame
26422639# '
26432640# ' This function downloads the contents of a SparkDataFrame into an R's data.frame.
26442641# ' Since data.frames are held in memory, ensure that you have enough memory
0 commit comments