Skip to content

Commit cb6d263

Browse files
author
Michael Chirico
committed
Merge branch 'master' into r-regex-fixed
2 parents 14652d1 + 2d3e960 commit cb6d263

File tree

26 files changed

+311
-103
lines changed

26 files changed

+311
-103
lines changed

.github/workflows/master.yml

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -103,12 +103,12 @@ jobs:
103103
- uses: actions/setup-java@v1
104104
with:
105105
java-version: '11'
106-
- name: install R
106+
- uses: r-lib/actions/setup-r@v1
107+
with:
108+
r-version: '3.6.2'
109+
- name: Install lib
107110
run: |
108-
echo 'deb https://cloud.r-project.org/bin/linux/ubuntu bionic-cran35/' | sudo tee -a /etc/apt/sources.list
109-
curl -sL "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xE298A3A825C0D65DFD57CBB651716619E084DAB9" | sudo apt-key add
110-
sudo apt-get update
111-
sudo apt-get install -y r-base r-base-dev libcurl4-openssl-dev
111+
sudo apt-get install -y libcurl4-openssl-dev
112112
- name: install R packages
113113
run: |
114114
sudo Rscript -e "install.packages(c('curl', 'xml2', 'httr', 'devtools', 'testthat', 'knitr', 'rmarkdown', 'roxygen2', 'e1071', 'survival'), repos='https://cloud.r-project.org/')"
@@ -139,12 +139,12 @@ jobs:
139139
- uses: actions/setup-ruby@v1
140140
with:
141141
ruby-version: '2.7'
142-
- name: Install R
142+
- uses: r-lib/actions/setup-r@v1
143+
with:
144+
r-version: '3.6.2'
145+
- name: Install lib and pandoc
143146
run: |
144-
echo 'deb https://cloud.r-project.org/bin/linux/ubuntu bionic-cran35/' | sudo tee -a /etc/apt/sources.list
145-
curl -sL "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xE298A3A825C0D65DFD57CBB651716619E084DAB9" | sudo apt-key add
146-
sudo apt-get update
147-
sudo apt-get install -y r-base r-base-dev libcurl4-openssl-dev pandoc
147+
sudo apt-get install -y libcurl4-openssl-dev pandoc
148148
- name: Install packages
149149
run: |
150150
pip install sphinx mkdocs numpy

R/pkg/R/DataFrame.R

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -271,7 +271,7 @@ setMethod("show", "SparkDataFrame",
271271
paste(l, collapse = ":")
272272
})
273273
s <- paste(cols, collapse = ", ")
274-
cat(paste(class(object), "[", s, "]\n", sep = ""))
274+
cat(paste0(class(object), "[", s, "]\n"))
275275
}
276276
})
277277

@@ -2755,10 +2755,10 @@ setMethod("merge",
27552755
colY <- joinY[[i]]
27562756

27572757
if (colX %in% by) {
2758-
colX <- paste(colX, suffixes[1], sep = "")
2758+
colX <- paste0(colX, suffixes[1])
27592759
}
27602760
if (colY %in% by) {
2761-
colY <- paste(colY, suffixes[2], sep = "")
2761+
colY <- paste0(colY, suffixes[2])
27622762
}
27632763

27642764
colX <- getColumn(xsel, colX)
@@ -2773,7 +2773,7 @@ setMethod("merge",
27732773

27742774
# sorts the result by 'by' columns if sort = TRUE
27752775
if (sort && length(by) > 0) {
2776-
colNameWithSuffix <- paste(by, suffixes[2], sep = "")
2776+
colNameWithSuffix <- paste0(by, suffixes[2])
27772777
joinRes <- do.call("arrange", c(joinRes, colNameWithSuffix, decreasing = FALSE))
27782778
}
27792779

@@ -2796,7 +2796,7 @@ genAliasesForIntersectedCols <- function(x, intersectedColNames, suffix) {
27962796
cols <- lapply(allColNames, function(colName) {
27972797
col <- getColumn(x, colName)
27982798
if (colName %in% intersectedColNames) {
2799-
newJoin <- paste(colName, suffix, sep = "")
2799+
newJoin <- paste0(colName, suffix)
28002800
if (newJoin %in% allColNames) {
28012801
stop("The following column name: ", newJoin, " occurs more than once in the 'DataFrame'.",
28022802
"Please use different suffixes for the intersected columns.")

R/pkg/R/RDD.R

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ setMethod("initialize", "RDD", function(.Object, jrdd, serializedMode,
6969

7070
setMethod("showRDD", "RDD",
7171
function(object) {
72-
cat(paste(callJMethod(getJRDD(object), "toString"), "\n", sep = ""))
72+
cat(paste0(callJMethod(getJRDD(object), "toString"), "\n"))
7373
})
7474

7575
setMethod("initialize", "PipelinedRDD", function(.Object, prev, func, jrdd_val) {

R/pkg/R/mllib_classification.R

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -338,7 +338,7 @@ setMethod("spark.logit", signature(data = "SparkDataFrame", formula = "formula")
338338
if (!is.null(lowerBoundsOnCoefficients) && (row != nrow(upperBoundsOnCoefficients)
339339
|| col != ncol(upperBoundsOnCoefficients))) {
340340
stop(paste0("dimension of upperBoundsOnCoefficients ",
341-
"is not the same as lowerBoundsOnCoefficients", sep = ""))
341+
"is not the same as lowerBoundsOnCoefficients"))
342342
}
343343

344344
if (is.null(lowerBoundsOnCoefficients)) {

R/pkg/R/schema.R

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -99,10 +99,9 @@ print.structType <- function(x, ...) {
9999
cat("StructType\n",
100100
sapply(x$fields(),
101101
function(field) {
102-
paste("|-", "name = \"", field$name(),
103-
"\", type = \"", field$dataType.toString(),
104-
"\", nullable = ", field$nullable(), "\n",
105-
sep = "")
102+
paste0("|-", "name = \"", field$name(),
103+
"\", type = \"", field$dataType.toString(),
104+
"\", nullable = ", field$nullable(), "\n")
106105
}),
107106
sep = "")
108107
}

R/pkg/R/sparkR.R

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ sparkR.sparkContext <- function(
244244
uriSep <- "////"
245245
}
246246
localJarPaths <- lapply(jars,
247-
function(j) { utils::URLencode(paste("file:", uriSep, j, sep = "")) })
247+
function(j) { utils::URLencode(paste0("file:", uriSep, j)) })
248248

249249
# Set the start time to identify jobjs
250250
# Seconds resolution is good enough for this purpose, so use ints

R/pkg/R/types.R

Lines changed: 6 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -94,27 +94,22 @@ checkSchemaInArrow <- function(schema) {
9494
}
9595

9696
# Both cases below produce a corrupt value for unknown reason. It needs to be investigated.
97-
if (any(sapply(schema$fields(), function(x) x$dataType.toString() == "FloatType"))) {
97+
field_strings <- sapply(schema$fields(), function(x) x$dataType.toString())
98+
if (any(field_strings == "FloatType")) {
9899
stop("Arrow optimization in R does not support float type yet.")
99100
}
100-
if (any(sapply(schema$fields(), function(x) x$dataType.toString() == "BinaryType"))) {
101+
if (any(field_strings == "BinaryType")) {
101102
stop("Arrow optimization in R does not support binary type yet.")
102103
}
103-
if (any(sapply(schema$fields(),
104-
function(x) startsWith(x$dataType.toString(),
105-
"ArrayType")))) {
104+
if (any(startsWith(field_strings, "ArrayType"))) {
106105
stop("Arrow optimization in R does not support array type yet.")
107106
}
108107

109108
# Arrow optimization in Spark does not yet support both cases below.
110-
if (any(sapply(schema$fields(),
111-
function(x) startsWith(x$dataType.toString(),
112-
"StructType")))) {
109+
if (any(startsWith(field_strings, "StructType"))) {
113110
stop("Arrow optimization in R does not support nested struct type yet.")
114111
}
115-
if (any(sapply(schema$fields(),
116-
function(x) startsWith(x$dataType.toString(),
117-
"MapType")))) {
112+
if (any(startsWith(field_strings, "MapType"))) {
118113
stop("Arrow optimization in R does not support map type yet.")
119114
}
120115
}

R/pkg/R/utils.R

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ hashCode <- function(key) {
137137
as.integer(hashC)
138138
}
139139
} else {
140-
warning(paste("Could not hash object, returning 0", sep = ""))
140+
warning("Could not hash object, returning 0")
141141
as.integer(0)
142142
}
143143
}

R/pkg/tests/fulltests/test_sparkSQL.R

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2593,8 +2593,8 @@ test_that("join(), crossJoin() and merge() on a DataFrame", {
25932593
writeLines(mockLines3, jsonPath3)
25942594
df3 <- read.json(jsonPath3)
25952595
expect_error(merge(df, df3),
2596-
paste("The following column name: name_y occurs more than once in the 'DataFrame'.",
2597-
"Please use different suffixes for the intersected columns.", sep = ""))
2596+
paste0("The following column name: name_y occurs more than once in the 'DataFrame'.",
2597+
"Please use different suffixes for the intersected columns."))
25982598

25992599
unlink(jsonPath2)
26002600
unlink(jsonPath3)
@@ -2637,20 +2637,20 @@ test_that("toJSON() on DataFrame", {
26372637

26382638
test_that("showDF()", {
26392639
df <- read.json(jsonPath)
2640-
expected <- paste("+----+-------+\n",
2641-
"| age| name|\n",
2642-
"+----+-------+\n",
2643-
"|null|Michael|\n",
2644-
"| 30| Andy|\n",
2645-
"| 19| Justin|\n",
2646-
"+----+-------+\n", sep = "")
2647-
expected2 <- paste("+---+----+\n",
2648-
"|age|name|\n",
2649-
"+---+----+\n",
2650-
"|nul| Mic|\n",
2651-
"| 30| And|\n",
2652-
"| 19| Jus|\n",
2653-
"+---+----+\n", sep = "")
2640+
expected <- paste("+----+-------+",
2641+
"| age| name|",
2642+
"+----+-------+",
2643+
"|null|Michael|",
2644+
"| 30| Andy|",
2645+
"| 19| Justin|",
2646+
"+----+-------+\n", sep = "\n")
2647+
expected2 <- paste("+---+----+",
2648+
"|age|name|",
2649+
"+---+----+",
2650+
"|nul| Mic|",
2651+
"| 30| And|",
2652+
"| 19| Jus|",
2653+
"+---+----+\n", sep = "\n")
26542654
expect_output(showDF(df), expected)
26552655
expect_output(showDF(df, truncate = 3), expected2)
26562656
})

dev/deps/spark-deps-hadoop-2.7-hive-1.2

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -160,9 +160,9 @@ objenesis/2.5.1//objenesis-2.5.1.jar
160160
okhttp/3.12.6//okhttp-3.12.6.jar
161161
okio/1.15.0//okio-1.15.0.jar
162162
opencsv/2.3//opencsv-2.3.jar
163-
orc-core/1.5.9/nohive/orc-core-1.5.9-nohive.jar
164-
orc-mapreduce/1.5.9/nohive/orc-mapreduce-1.5.9-nohive.jar
165-
orc-shims/1.5.9//orc-shims-1.5.9.jar
163+
orc-core/1.5.10/nohive/orc-core-1.5.10-nohive.jar
164+
orc-mapreduce/1.5.10/nohive/orc-mapreduce-1.5.10-nohive.jar
165+
orc-shims/1.5.10//orc-shims-1.5.10.jar
166166
oro/2.0.8//oro-2.0.8.jar
167167
osgi-resource-locator/1.0.3//osgi-resource-locator-1.0.3.jar
168168
paranamer/2.8//paranamer-2.8.jar

0 commit comments

Comments
 (0)