Skip to content

Commit 4c14278

Browse files
committed
Merge remote-tracking branch 'upstream/master' into vectorized-orc-reader3
Conflicts: sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala
2 parents 66ab632 + 0923c4f commit 4c14278

File tree

437 files changed

+12823
-4988
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

437 files changed

+12823
-4988
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,3 +77,4 @@ spark-warehouse/
7777
# For R session data
7878
.RData
7979
.RHistory
80+
.Rhistory

R/pkg/NAMESPACE

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,16 @@ importFrom(methods, setGeneric, setMethod, setOldClass)
66
#useDynLib(SparkR, stringHashCode)
77

88
# S3 methods exported
9+
export("sparkR.session")
910
export("sparkR.init")
1011
export("sparkR.stop")
12+
export("sparkR.session.stop")
13+
export("sparkR.conf")
1114
export("print.jobj")
1215

16+
export("sparkRSQL.init",
17+
"sparkRHive.init")
18+
1319
# MLlib integration
1420
exportMethods("glm",
1521
"spark.glm",
@@ -62,6 +68,7 @@ exportMethods("arrange",
6268
"filter",
6369
"first",
6470
"freqItems",
71+
"gapply",
6572
"group_by",
6673
"groupBy",
6774
"head",
@@ -80,7 +87,9 @@ exportMethods("arrange",
8087
"orderBy",
8188
"persist",
8289
"printSchema",
90+
"randomSplit",
8391
"rbind",
92+
"registerTempTable",
8493
"rename",
8594
"repartition",
8695
"sample",
@@ -99,6 +108,7 @@ exportMethods("arrange",
99108
"summary",
100109
"take",
101110
"transform",
111+
"union",
102112
"unionAll",
103113
"unique",
104114
"unpersist",
@@ -109,6 +119,7 @@ exportMethods("arrange",
109119
"write.df",
110120
"write.jdbc",
111121
"write.json",
122+
"write.orc",
112123
"write.parquet",
113124
"write.text",
114125
"write.ml")
@@ -210,6 +221,7 @@ exportMethods("%in%",
210221
"mean",
211222
"min",
212223
"minute",
224+
"monotonically_increasing_id",
213225
"month",
214226
"months_between",
215227
"n",
@@ -250,6 +262,7 @@ exportMethods("%in%",
250262
"skewness",
251263
"sort_array",
252264
"soundex",
265+
"spark_partition_id",
253266
"stddev",
254267
"stddev_pop",
255268
"stddev_samp",
@@ -283,22 +296,22 @@ exportMethods("%in%",
283296

284297
exportClasses("GroupedData")
285298
exportMethods("agg")
286-
287-
export("sparkRSQL.init",
288-
"sparkRHive.init")
299+
exportMethods("pivot")
289300

290301
export("as.DataFrame",
291302
"cacheTable",
292303
"clearCache",
293304
"createDataFrame",
294305
"createExternalTable",
295306
"dropTempTable",
307+
"dropTempView",
296308
"jsonFile",
297309
"loadDF",
298310
"parquetFile",
299311
"read.df",
300312
"read.jdbc",
301313
"read.json",
314+
"read.orc",
302315
"read.parquet",
303316
"read.text",
304317
"spark.lapply",

0 commit comments

Comments
 (0)