@@ -225,6 +225,37 @@ setCheckpointDir <- function(sc, dirName) {
225225 invisible (callJMethod(sc , " setCheckpointDir" , suppressWarnings(normalizePath(dirName ))))
226226}
227227
228+ # ' Add a file or directory to be downloaded with this Spark job on every node.
229+ # '
230+ # ' The path passed can be either a local file, a file in HDFS (or other Hadoop-supported
231+ # ' filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
232+ # ' use sparkFiles.get(fileName) to find its download location.
233+ # '
234+ # ' A directory can be given if the recursive option is set to true.
235+ # ' Currently directories are only supported for Hadoop-supported filesystems.
236+ # '
237+ # ' @param path The path of the files to be added
238+ # ' @param recursive Recursive or not if the path is directory. Default is FALSE.
239+ # ' @noRd
240+ # ' @examples
241+ # '\dontrun{
242+ # ' sc <- sparkR.init()
243+ # ' addFile(sc, "myfile")
244+ # '}
245+ addFile <- function (sc , path ) {
246+ invisible (callJMethod(sc , " addFile" , suppressWarnings(normalizePath(path ))))
247+ }
248+
249+ # ' Get the root directory that contains files added through addFile.
250+ sparkFiles.getRootDirectory <- function () {
251+ callJStatic(" org.apache.spark.SparkFiles" , " getRootDirectory" )
252+ }
253+
254+ # ' Get the absolute path of a file added through addFile.
255+ sparkFiles.get <- function (fileName ) {
256+ callJStatic(" org.apache.spark.SparkFiles" , " get" , as.character(fileName ))
257+ }
258+
228259# ' Run a function over a list of elements, distributing the computations with Spark
229260# '
230261# ' Run a function over a list of elements, distributing the computations with Spark. Applies a
0 commit comments