File tree Expand file tree Collapse file tree 1 file changed +3
-3
lines changed
sql/hive/src/main/scala/org/apache/spark/sql/hive Expand file tree Collapse file tree 1 file changed +3
-3
lines changed Original file line number Diff line number Diff line change @@ -27,9 +27,10 @@ import org.apache.hadoop.hive.ql.{ErrorMsg, Context}
2727import org .apache .hadoop .hive .ql .exec .{FunctionRegistry , FunctionInfo }
2828import org .apache .hadoop .hive .ql .lib .Node
2929import org .apache .hadoop .hive .ql .parse ._
30+ import org .apache .hadoop .hive .ql .plan .PlanUtils
3031import org .apache .hadoop .hive .ql .session .SessionState
3132
32- import org .apache .spark .sql .{ AnalysisException , SparkSQLParser }
33+ import org .apache .spark .sql .AnalysisException
3334import org .apache .spark .sql .catalyst .analysis ._
3435import org .apache .spark .sql .catalyst .expressions ._
3536import org .apache .spark .sql .catalyst .plans ._
@@ -611,8 +612,7 @@ https://cwiki.apache.org/confluence/display/Hive/Enhanced+Aggregation%2C+Cube%2C
611612 hiveConf.getVar(HiveConf .ConfVars .HIVEDEFAULTFILEFORMAT ))) {
612613 tableDesc.copy(
613614 inputFormat = Option (" org.apache.hadoop.mapred.SequenceFileInputFormat" ),
614- outputFormat = Option (" org.apache.hadoop.mapred.SequenceFileOutputFormat" ),
615- serde = Option (" org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe" ))
615+ outputFormat = Option (" org.apache.hadoop.mapred.SequenceFileOutputFormat" ))
616616 } else if (" RCFile" .equalsIgnoreCase(
617617 hiveConf.getVar(HiveConf .ConfVars .HIVEDEFAULTFILEFORMAT ))) {
618618 tableDesc.copy(
You can’t perform that action at this time.
0 commit comments