@@ -24,24 +24,13 @@ import org.apache.hadoop.conf.Configuration
2424
2525import org .apache .spark .annotation .{AlphaComponent , DeveloperApi , Experimental }
2626import org .apache .spark .rdd .RDD
27- <<<<<<< HEAD
28- import org .apache .spark .sql .catalyst .analysis ._
29- import org .apache .spark .sql .catalyst .expressions ._
30- import org .apache .spark .sql .catalyst .dsl .ExpressionConversions
31- import org .apache .spark .sql .catalyst .optimizer .Optimizer
32- import org .apache .spark .sql .catalyst .plans .logical .LogicalPlan
33- import org .apache .spark .sql .catalyst .rules .RuleExecutor
34- import org .apache .spark .sql .catalyst .ScalaReflection
35- =======
3627import org .apache .spark .sql .catalyst .ScalaReflection
3728import org .apache .spark .sql .catalyst .analysis ._
3829import org .apache .spark .sql .catalyst .dsl .ExpressionConversions
3930import org .apache .spark .sql .catalyst .expressions ._
4031import org .apache .spark .sql .catalyst .optimizer .Optimizer
4132import org .apache .spark .sql .catalyst .plans .logical .LogicalPlan
4233import org .apache .spark .sql .catalyst .rules .RuleExecutor
43- import org .apache .spark .sql .catalyst .types ._
44- >>>>>>> upstream/ master
4534import org .apache .spark .sql .columnar .InMemoryRelation
4635import org .apache .spark .sql .execution ._
4736import org .apache .spark .sql .execution .SparkStrategies
@@ -477,6 +466,8 @@ class SQLContext(@transient val sparkContext: SparkContext)
477466 private [sql] def applySchemaToPythonRDD (
478467 rdd : RDD [Map [String , _]],
479468 schema : StructType ): SchemaRDD = {
469+ // TODO: We should have a better implementation once we do not turn a Python side record
470+ // to a Map.
480471 import scala .collection .JavaConversions ._
481472 import scala .collection .convert .Wrappers .{JListWrapper , JMapWrapper }
482473
0 commit comments