File tree Expand file tree Collapse file tree 1 file changed +3
-4
lines changed
examples/src/main/scala/org/apache/spark/examples/sql Expand file tree Collapse file tree 1 file changed +3
-4
lines changed Original file line number Diff line number Diff line change 1919package org .apache .spark .examples .sql
2020
2121import org .apache .spark .{SparkConf , SparkContext }
22- import org .apache .spark .sql .SQLContext
23- import org .apache .spark .sql .functions ._
22+ import org .apache .spark .sql .{SaveMode , SQLContext }
2423
2524// One method for defining the schema of an RDD is to make a case class with the desired column
2625// names and types.
@@ -58,8 +57,8 @@ object RDDRelation {
5857 // Queries can also be written using a LINQ-like Scala DSL.
5958 df.where($" key" === 1 ).orderBy($" value" .asc).select($" key" ).collect().foreach(println)
6059
61- // Write out an RDD as a parquet file.
62- df.write.parquet(" pair.parquet" )
60+ // Write out an RDD as a parquet file with overwrite mode .
61+ df.write.mode( SaveMode . Overwrite ). parquet(" pair.parquet" )
6362
6463 // Read in parquet file. Parquet files are self-describing so the schmema is preserved.
6564 val parquetFile = sqlContext.read.parquet(" pair.parquet" )
You can’t perform that action at this time.
0 commit comments