Skip to content

Commit d632706

Browse files
committed
Fix python style checking
1 parent 77112ef commit d632706

File tree

3 files changed

+10
-6
lines changed

3 files changed

+10
-6
lines changed

python/pyspark/sql/readwriter.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -239,6 +239,7 @@ def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None,
239239
including tab and line feed characters) or not.
240240
:param lineSep: defines the line separator that should be used for parsing. If None is
241241
set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``.
242+
242243
>>> df1 = spark.read.json('python/test_support/sql/people.json')
243244
>>> df1.dtypes
244245
[('age', 'bigint'), ('name', 'string')]
@@ -774,6 +775,7 @@ def json(self, path, mode=None, compression=None, dateFormat=None, timestampForm
774775
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
775776
:param lineSep: defines the line separator that should be used for writing. If None is
776777
set, it uses the default value, ``\\n``.
778+
777779
>>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))
778780
"""
779781
self.mode(mode)

python/pyspark/sql/streaming.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -470,6 +470,8 @@ def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None,
470470
:param allowUnquotedControlChars: allows JSON Strings to contain unquoted control
471471
characters (ASCII characters with value less than 32,
472472
including tab and line feed characters) or not.
473+
:param lineSep: defines the line separator that should be used for parsing. If None is
474+
set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``.
473475
474476
>>> json_sdf = spark.readStream.json(tempfile.mkdtemp(), schema = sdf_schema)
475477
>>> json_sdf.isStreaming

python/pyspark/sql/tests.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -670,6 +670,12 @@ def test_linesep_text(self):
670670
finally:
671671
shutil.rmtree(tpath)
672672

673+
def test_multiline_json(self):
674+
people1 = self.spark.read.json("python/test_support/sql/people.json")
675+
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
676+
multiLine=True)
677+
self.assertEqual(people1.collect(), people_array.collect())
678+
673679
def test_linesep_json(self):
674680
df = self.spark.read.json("python/test_support/sql/people.json", lineSep=",")
675681
expected = [Row(_corrupt_record=None, name=u'Michael'),
@@ -687,12 +693,6 @@ def test_linesep_json(self):
687693
finally:
688694
shutil.rmtree(tpath)
689695

690-
def test_multiline_json(self):
691-
people1 = self.spark.read.json("python/test_support/sql/people.json")
692-
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
693-
multiLine=True)
694-
self.assertEqual(people1.collect(), people_array.collect())
695-
696696
def test_multiline_csv(self):
697697
ages_newlines = self.spark.read.csv(
698698
"python/test_support/sql/ages_newlines.csv", multiLine=True)

0 commit comments

Comments
 (0)