Skip to content

Commit f4e243f

Browse files
remove the serde setting for SequenceFile
1 parent d166afa commit f4e243f

File tree

1 file changed

+3
-3
lines changed
  • sql/hive/src/main/scala/org/apache/spark/sql/hive

1 file changed

+3
-3
lines changed

sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,10 @@ import org.apache.hadoop.hive.ql.{ErrorMsg, Context}
2727
import org.apache.hadoop.hive.ql.exec.{FunctionRegistry, FunctionInfo}
2828
import org.apache.hadoop.hive.ql.lib.Node
2929
import org.apache.hadoop.hive.ql.parse._
30+
import org.apache.hadoop.hive.ql.plan.PlanUtils
3031
import org.apache.hadoop.hive.ql.session.SessionState
3132

32-
import org.apache.spark.sql.{AnalysisException, SparkSQLParser}
33+
import org.apache.spark.sql.AnalysisException
3334
import org.apache.spark.sql.catalyst.analysis._
3435
import org.apache.spark.sql.catalyst.expressions._
3536
import org.apache.spark.sql.catalyst.plans._
@@ -611,8 +612,7 @@ https://cwiki.apache.org/confluence/display/Hive/Enhanced+Aggregation%2C+Cube%2C
611612
hiveConf.getVar(HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT))) {
612613
tableDesc.copy(
613614
inputFormat = Option("org.apache.hadoop.mapred.SequenceFileInputFormat"),
614-
outputFormat = Option("org.apache.hadoop.mapred.SequenceFileOutputFormat"),
615-
serde = Option("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))
615+
outputFormat = Option("org.apache.hadoop.mapred.SequenceFileOutputFormat"))
616616
} else if ("RCFile".equalsIgnoreCase(
617617
hiveConf.getVar(HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT))) {
618618
tableDesc.copy(

0 commit comments

Comments
 (0)