Skip to content

Commit 2fd8b3b

Browse files
committed
style fix
1 parent f6bd4ec commit 2fd8b3b

File tree

3 files changed

+1
-13
lines changed

3 files changed

+1
-13
lines changed

python/pyspark/sql/functions.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -975,7 +975,7 @@ def date_format(date, format):
975975
A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All
976976
pattern letters of `datetime pattern`_. can be used.
977977
978-
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
978+
.. _datetime pattern: /docs/latest/sql-ref-datetime-pattern.html
979979
.. note:: Use when ever possible specialized functions like `year`. These benefit from a
980980
specialized implementation.
981981
@@ -1196,8 +1196,6 @@ def to_date(col, format=None):
11961196
By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format
11971197
is omitted. Equivalent to ``col.cast("date")``.
11981198
1199-
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
1200-
12011199
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
12021200
>>> df.select(to_date(df.t).alias('date')).collect()
12031201
[Row(date=datetime.date(1997, 2, 28))]
@@ -1221,8 +1219,6 @@ def to_timestamp(col, format=None):
12211219
By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format
12221220
is omitted. Equivalent to ``col.cast("timestamp")``.
12231221
1224-
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
1225-
12261222
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
12271223
>>> df.select(to_timestamp(df.t).alias('dt')).collect()
12281224
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]

python/pyspark/sql/readwriter.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -490,8 +490,6 @@ def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=Non
490490
:param recursiveFileLookup: recursively scan a directory for files. Using this option
491491
disables `partition discovery`_.
492492
493-
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
494-
495493
>>> df = spark.read.csv('python/test_support/sql/ages.csv')
496494
>>> df.dtypes
497495
[('_c0', 'string'), ('_c1', 'string')]
@@ -865,8 +863,6 @@ def json(self, path, mode=None, compression=None, dateFormat=None, timestampForm
865863
:param ignoreNullFields: Whether to ignore null fields when generating JSON objects.
866864
If None is set, it uses the default value, ``true``.
867865
868-
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
869-
870866
>>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))
871867
"""
872868
self.mode(mode)
@@ -981,8 +977,6 @@ def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=No
981977
:param lineSep: defines the line separator that should be used for writing. If None is
982978
set, it uses the default value, ``\\n``. Maximum length is 1 character.
983979
984-
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
985-
986980
>>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data'))
987981
"""
988982
self.mode(mode)

python/pyspark/sql/streaming.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -725,8 +725,6 @@ def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=Non
725725
:param recursiveFileLookup: recursively scan a directory for files. Using this option
726726
disables `partition discovery`_.
727727
728-
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
729-
730728
>>> csv_sdf = spark.readStream.csv(tempfile.mkdtemp(), schema = sdf_schema)
731729
>>> csv_sdf.isStreaming
732730
True

0 commit comments

Comments
 (0)