Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 3 additions & 7 deletions python/pyspark/sql/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import sys
import warnings
import json
from functools import reduce

if sys.version >= '3':
basestring = unicode = str
Expand Down Expand Up @@ -236,14 +237,9 @@ def _inferSchemaFromList(self, data):
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = _infer_schema(first)
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
for r in data:
schema = _merge_type(schema, _infer_schema(r))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined after inferring")
raise ValueError("Some of types cannot be determined after inferring")
return schema

def _inferSchema(self, rdd, samplingRatio=None):
Expand Down
11 changes: 11 additions & 0 deletions python/pyspark/sql/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -353,6 +353,17 @@ def test_apply_schema_to_row(self):
df3 = self.sqlCtx.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())

def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.sqlCtx.createDataFrame(input)
df2 = self.sqlCtx.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)

rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.sqlCtx.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())

def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
Expand Down