Skip to content

Commit b3ea579

Browse files
committed
[SPARK-14831][.2][ML][R] rename ml.save/ml.load to write.ml/read.ml
## What changes were proposed in this pull request? Continue the work of #12789 to rename ml.asve/ml.load to write.ml/read.ml, which are more consistent with read.df/write.df and other methods in SparkR. I didn't rename `data` to `df` because we still use `predict` for prediction, which uses `newData` to match the signature in R. ## How was this patch tested? Existing unit tests. cc: yanboliang thunterdb Author: Xiangrui Meng <[email protected]> Closes #12807 from mengxr/SPARK-14831.
1 parent 7fbe1bb commit b3ea579

File tree

4 files changed

+44
-44
lines changed

4 files changed

+44
-44
lines changed

R/pkg/NAMESPACE

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ exportMethods("arrange",
110110
"write.json",
111111
"write.parquet",
112112
"write.text",
113-
"ml.save")
113+
"write.ml")
114114

115115
exportClasses("Column")
116116

@@ -305,7 +305,7 @@ export("as.DataFrame",
305305
"tables",
306306
"uncacheTable",
307307
"print.summary.GeneralizedLinearRegressionModel",
308-
"ml.load")
308+
"read.ml")
309309

310310
export("structField",
311311
"structField.jobj",

R/pkg/R/generics.R

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1213,6 +1213,6 @@ setGeneric("spark.naiveBayes", function(data, formula, ...) { standardGeneric("s
12131213
#' @export
12141214
setGeneric("spark.survreg", function(data, formula, ...) { standardGeneric("spark.survreg") })
12151215

1216-
#' @rdname ml.save
1216+
#' @rdname write.ml
12171217
#' @export
1218-
setGeneric("ml.save", function(object, path, ...) { standardGeneric("ml.save") })
1218+
setGeneric("write.ml", function(object, path, ...) { standardGeneric("write.ml") })

R/pkg/R/mllib.R

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -403,17 +403,17 @@ setMethod("spark.naiveBayes", signature(data = "SparkDataFrame", formula = "form
403403
#' @param overwrite Overwrites or not if the output path already exists. Default is FALSE
404404
#' which means throw exception if the output path exists.
405405
#'
406-
#' @rdname ml.save
407-
#' @name ml.save
406+
#' @rdname write.ml
407+
#' @name write.ml
408408
#' @export
409409
#' @examples
410410
#' \dontrun{
411411
#' df <- createDataFrame(sqlContext, infert)
412412
#' model <- spark.naiveBayes(education ~ ., df, laplace = 0)
413413
#' path <- "path/to/model"
414-
#' ml.save(model, path)
414+
#' write.ml(model, path)
415415
#' }
416-
setMethod("ml.save", signature(object = "NaiveBayesModel", path = "character"),
416+
setMethod("write.ml", signature(object = "NaiveBayesModel", path = "character"),
417417
function(object, path, overwrite = FALSE) {
418418
writer <- callJMethod(object@jobj, "write")
419419
if (overwrite) {
@@ -429,16 +429,16 @@ setMethod("ml.save", signature(object = "NaiveBayesModel", path = "character"),
429429
#' @param overwrite Overwrites or not if the output path already exists. Default is FALSE
430430
#' which means throw exception if the output path exists.
431431
#'
432-
#' @rdname ml.save
433-
#' @name ml.save
432+
#' @rdname write.ml
433+
#' @name write.ml
434434
#' @export
435435
#' @examples
436436
#' \dontrun{
437437
#' model <- spark.survreg(trainingData, Surv(futime, fustat) ~ ecog_ps + rx)
438438
#' path <- "path/to/model"
439-
#' ml.save(model, path)
439+
#' write.ml(model, path)
440440
#' }
441-
setMethod("ml.save", signature(object = "AFTSurvivalRegressionModel", path = "character"),
441+
setMethod("write.ml", signature(object = "AFTSurvivalRegressionModel", path = "character"),
442442
function(object, path, overwrite = FALSE) {
443443
writer <- callJMethod(object@jobj, "write")
444444
if (overwrite) {
@@ -454,16 +454,16 @@ setMethod("ml.save", signature(object = "AFTSurvivalRegressionModel", path = "ch
454454
#' @param overwrite Overwrites or not if the output path already exists. Default is FALSE
455455
#' which means throw exception if the output path exists.
456456
#'
457-
#' @rdname ml.save
458-
#' @name ml.save
457+
#' @rdname write.ml
458+
#' @name write.ml
459459
#' @export
460460
#' @examples
461461
#' \dontrun{
462462
#' model <- glm(y ~ x, trainingData)
463463
#' path <- "path/to/model"
464-
#' ml.save(model, path)
464+
#' write.ml(model, path)
465465
#' }
466-
setMethod("ml.save", signature(object = "GeneralizedLinearRegressionModel", path = "character"),
466+
setMethod("write.ml", signature(object = "GeneralizedLinearRegressionModel", path = "character"),
467467
function(object, path, overwrite = FALSE) {
468468
writer <- callJMethod(object@jobj, "write")
469469
if (overwrite) {
@@ -479,16 +479,16 @@ setMethod("ml.save", signature(object = "GeneralizedLinearRegressionModel", path
479479
#' @param overwrite Overwrites or not if the output path already exists. Default is FALSE
480480
#' which means throw exception if the output path exists.
481481
#'
482-
#' @rdname ml.save
483-
#' @name ml.save
482+
#' @rdname write.ml
483+
#' @name write.ml
484484
#' @export
485485
#' @examples
486486
#' \dontrun{
487487
#' model <- spark.kmeans(x, k = 2, initializationMode="random")
488488
#' path <- "path/to/model"
489-
#' ml.save(model, path)
489+
#' write.ml(model, path)
490490
#' }
491-
setMethod("ml.save", signature(object = "KMeansModel", path = "character"),
491+
setMethod("write.ml", signature(object = "KMeansModel", path = "character"),
492492
function(object, path, overwrite = FALSE) {
493493
writer <- callJMethod(object@jobj, "write")
494494
if (overwrite) {
@@ -501,15 +501,15 @@ setMethod("ml.save", signature(object = "KMeansModel", path = "character"),
501501
#'
502502
#' @param path Path of the model to read.
503503
#' @return a fitted MLlib model
504-
#' @rdname ml.load
505-
#' @name ml.load
504+
#' @rdname read.ml
505+
#' @name read.ml
506506
#' @export
507507
#' @examples
508508
#' \dontrun{
509509
#' path <- "path/to/model"
510-
#' model <- ml.load(path)
510+
#' model <- read.ml(path)
511511
#' }
512-
ml.load <- function(path) {
512+
read.ml <- function(path) {
513513
path <- suppressWarnings(normalizePath(path))
514514
jobj <- callJStatic("org.apache.spark.ml.r.RWrappers", "load", path)
515515
if (isInstanceOf(jobj, "org.apache.spark.ml.r.NaiveBayesWrapper")) {

R/pkg/inst/tests/testthat/test_mllib.R

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -133,10 +133,10 @@ test_that("spark.glm save/load", {
133133
s <- summary(m)
134134

135135
modelPath <- tempfile(pattern = "glm", fileext = ".tmp")
136-
ml.save(m, modelPath)
137-
expect_error(ml.save(m, modelPath))
138-
ml.save(m, modelPath, overwrite = TRUE)
139-
m2 <- ml.load(modelPath)
136+
write.ml(m, modelPath)
137+
expect_error(write.ml(m, modelPath))
138+
write.ml(m, modelPath, overwrite = TRUE)
139+
m2 <- read.ml(modelPath)
140140
s2 <- summary(m2)
141141

142142
expect_equal(s$coefficients, s2$coefficients)
@@ -263,10 +263,10 @@ test_that("glm save/load", {
263263
s <- summary(m)
264264

265265
modelPath <- tempfile(pattern = "glm", fileext = ".tmp")
266-
ml.save(m, modelPath)
267-
expect_error(ml.save(m, modelPath))
268-
ml.save(m, modelPath, overwrite = TRUE)
269-
m2 <- ml.load(modelPath)
266+
write.ml(m, modelPath)
267+
expect_error(write.ml(m, modelPath))
268+
write.ml(m, modelPath, overwrite = TRUE)
269+
m2 <- read.ml(modelPath)
270270
s2 <- summary(m2)
271271

272272
expect_equal(s$coefficients, s2$coefficients)
@@ -311,10 +311,10 @@ test_that("spark.kmeans", {
311311

312312
# Test model save/load
313313
modelPath <- tempfile(pattern = "kmeans", fileext = ".tmp")
314-
ml.save(model, modelPath)
315-
expect_error(ml.save(model, modelPath))
316-
ml.save(model, modelPath, overwrite = TRUE)
317-
model2 <- ml.load(modelPath)
314+
write.ml(model, modelPath)
315+
expect_error(write.ml(model, modelPath))
316+
write.ml(model, modelPath, overwrite = TRUE)
317+
model2 <- read.ml(modelPath)
318318
summary2 <- summary(model2)
319319
expect_equal(sort(unlist(summary.model$size)), sort(unlist(summary2$size)))
320320
expect_equal(summary.model$coefficients, summary2$coefficients)
@@ -378,10 +378,10 @@ test_that("naiveBayes", {
378378

379379
# Test model save/load
380380
modelPath <- tempfile(pattern = "naiveBayes", fileext = ".tmp")
381-
ml.save(m, modelPath)
382-
expect_error(ml.save(m, modelPath))
383-
ml.save(m, modelPath, overwrite = TRUE)
384-
m2 <- ml.load(modelPath)
381+
write.ml(m, modelPath)
382+
expect_error(write.ml(m, modelPath))
383+
write.ml(m, modelPath, overwrite = TRUE)
384+
m2 <- read.ml(modelPath)
385385
s2 <- summary(m2)
386386
expect_equal(s$apriori, s2$apriori)
387387
expect_equal(s$tables, s2$tables)
@@ -435,10 +435,10 @@ test_that("spark.survreg", {
435435

436436
# Test model save/load
437437
modelPath <- tempfile(pattern = "survreg", fileext = ".tmp")
438-
ml.save(model, modelPath)
439-
expect_error(ml.save(model, modelPath))
440-
ml.save(model, modelPath, overwrite = TRUE)
441-
model2 <- ml.load(modelPath)
438+
write.ml(model, modelPath)
439+
expect_error(write.ml(model, modelPath))
440+
write.ml(model, modelPath, overwrite = TRUE)
441+
model2 <- read.ml(modelPath)
442442
stats2 <- summary(model2)
443443
coefs2 <- as.vector(stats2$coefficients[, 1])
444444
expect_equal(coefs, coefs2)

0 commit comments

Comments
 (0)