diff --git a/python/pyspark/sql/functions.py b/python/pyspark/sql/functions.py index 9f8714fd8579..e089963a7fd5 100644 --- a/python/pyspark/sql/functions.py +++ b/python/pyspark/sql/functions.py @@ -1196,8 +1196,6 @@ def to_date(col, format=None): By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format is omitted. Equivalent to ``col.cast("date")``. - .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html - >>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t']) >>> df.select(to_date(df.t).alias('date')).collect() [Row(date=datetime.date(1997, 2, 28))] @@ -1221,8 +1219,6 @@ def to_timestamp(col, format=None): By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format is omitted. Equivalent to ``col.cast("timestamp")``. - .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html - >>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t']) >>> df.select(to_timestamp(df.t).alias('dt')).collect() [Row(dt=datetime.datetime(1997, 2, 28, 10, 30))] diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py index e7ecb3ba9fc7..817978498fcf 100644 --- a/python/pyspark/sql/readwriter.py +++ b/python/pyspark/sql/readwriter.py @@ -253,7 +253,8 @@ def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, :param recursiveFileLookup: recursively scan a directory for files. Using this option disables `partition discovery`_. - .. _partition discovery: /sql-data-sources-parquet.html#partition-discovery + .. _partition discovery: + https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#partition-discovery .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html >>> df1 = spark.read.json('python/test_support/sql/people.json') @@ -490,8 +491,6 @@ def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=Non :param recursiveFileLookup: recursively scan a directory for files. Using this option disables `partition discovery`_. - .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html - >>> df = spark.read.csv('python/test_support/sql/ages.csv') >>> df.dtypes [('_c0', 'string'), ('_c1', 'string')] @@ -865,8 +864,6 @@ def json(self, path, mode=None, compression=None, dateFormat=None, timestampForm :param ignoreNullFields: Whether to ignore null fields when generating JSON objects. If None is set, it uses the default value, ``true``. - .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html - >>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) @@ -981,8 +978,6 @@ def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=No :param lineSep: defines the line separator that should be used for writing. If None is set, it uses the default value, ``\\n``. Maximum length is 1 character. - .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html - >>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) diff --git a/python/pyspark/sql/streaming.py b/python/pyspark/sql/streaming.py index a83167882a8d..a5e86466579c 100644 --- a/python/pyspark/sql/streaming.py +++ b/python/pyspark/sql/streaming.py @@ -489,7 +489,8 @@ def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, :param recursiveFileLookup: recursively scan a directory for files. Using this option disables `partition discovery`_. - .. _partition discovery: /sql-data-sources-parquet.html#partition-discovery + .. _partition discovery: + https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#partition-discovery .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html >>> json_sdf = spark.readStream.json(tempfile.mkdtemp(), schema = sdf_schema) @@ -725,8 +726,6 @@ def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=Non :param recursiveFileLookup: recursively scan a directory for files. Using this option disables `partition discovery`_. - .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html - >>> csv_sdf = spark.readStream.csv(tempfile.mkdtemp(), schema = sdf_schema) >>> csv_sdf.isStreaming True