@@ -832,8 +832,7 @@ def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPar
832832 """
833833 if properties is None :
834834 properties = dict ()
835- jprop = JavaClass ("java.util.Properties" ,
836- self ._spark ._sc ._gateway ._gateway_client )()
835+ jprop = JavaClass ("java.util.Properties" , self ._spark ._sc ._gateway ._gateway_client )()
837836 for k in properties :
838837 jprop .setProperty (k , properties [k ])
839838 if column is not None :
@@ -845,8 +844,7 @@ def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPar
845844 int (numPartitions ), jprop ))
846845 if predicates is not None :
847846 gateway = self ._spark ._sc ._gateway
848- jpredicates = utils .toJArray (
849- gateway , gateway .jvm .java .lang .String , predicates )
847+ jpredicates = utils .toJArray (gateway , gateway .jvm .java .lang .String , predicates )
850848 return self ._df (self ._jreader .jdbc (url , table , jpredicates , jprop ))
851849 return self ._df (self ._jreader .jdbc (url , table , jprop ))
852850
@@ -859,7 +857,6 @@ class DataFrameWriter(OptionUtils):
859857
860858 .. versionadded:: 1.4
861859 """
862-
863860 def __init__ (self , df ):
864861 self ._df = df
865862 self ._spark = df .sql_ctx
@@ -1001,21 +998,18 @@ def bucketBy(self, numBuckets, col, *cols):
1001998 ... .saveAsTable('bucketed_table'))
1002999 """
10031000 if not isinstance (numBuckets , int ):
1004- raise TypeError (
1005- "numBuckets should be an int, got {0}." .format (type (numBuckets )))
1001+ raise TypeError ("numBuckets should be an int, got {0}." .format (type (numBuckets )))
10061002
10071003 if isinstance (col , (list , tuple )):
10081004 if cols :
1009- raise ValueError (
1010- "col is a {0} but cols are not empty" .format (type (col )))
1005+ raise ValueError ("col is a {0} but cols are not empty" .format (type (col )))
10111006
10121007 col , cols = col [0 ], col [1 :]
10131008
10141009 if not all (isinstance (c , str ) for c in cols ) or not (isinstance (col , str )):
10151010 raise TypeError ("all names should be `str`" )
10161011
1017- self ._jwrite = self ._jwrite .bucketBy (
1018- numBuckets , col , _to_seq (self ._spark ._sc , cols ))
1012+ self ._jwrite = self ._jwrite .bucketBy (numBuckets , col , _to_seq (self ._spark ._sc , cols ))
10191013 return self
10201014
10211015 def sortBy (self , col , * cols ):
@@ -1040,8 +1034,7 @@ def sortBy(self, col, *cols):
10401034 """
10411035 if isinstance (col , (list , tuple )):
10421036 if cols :
1043- raise ValueError (
1044- "col is a {0} but cols are not empty" .format (type (col )))
1037+ raise ValueError ("col is a {0} but cols are not empty" .format (type (col )))
10451038
10461039 col , cols = col [0 ], col [1 :]
10471040
@@ -1423,8 +1416,7 @@ def jdbc(self, url, table, mode=None, properties=None):
14231416 """
14241417 if properties is None :
14251418 properties = dict ()
1426- jprop = JavaClass ("java.util.Properties" ,
1427- self ._spark ._sc ._gateway ._gateway_client )()
1419+ jprop = JavaClass ("java.util.Properties" , self ._spark ._sc ._gateway ._gateway_client )()
14281420 for k in properties :
14291421 jprop .setProperty (k , properties [k ])
14301422 self .mode (mode )._jwrite .jdbc (url , table , jprop )
@@ -1590,8 +1582,7 @@ def _test():
15901582 globs ['os' ] = os
15911583 globs ['sc' ] = sc
15921584 globs ['spark' ] = spark
1593- globs ['df' ] = spark .read .parquet (
1594- 'python/test_support/sql/parquet_partitioned' )
1585+ globs ['df' ] = spark .read .parquet ('python/test_support/sql/parquet_partitioned' )
15951586 (failure_count , test_count ) = doctest .testmod (
15961587 pyspark .sql .readwriter , globs = globs ,
15971588 optionflags = doctest .ELLIPSIS | doctest .NORMALIZE_WHITESPACE | doctest .REPORT_NDIFF )
0 commit comments