@@ -465,7 +465,8 @@ def json(self, path, mode=None, compression=None):
465465 * ``ignore``: Silently ignore this operation if data already exists.
466466 * ``error`` (default case): Throw an exception if data already exists.
467467 :param compression: compression codec to use when saving to file. This can be one of the
468- known case-insensitive shorten names (bzip2, gzip, lz4, and snappy).
468+ known case-insensitive shorten names (none, bzip2, gzip, lz4,
469+ snappy and deflate).
469470
470471 >>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))
471472 """
@@ -487,8 +488,8 @@ def parquet(self, path, mode=None, partitionBy=None, compression=None):
487488 * ``error`` (default case): Throw an exception if data already exists.
488489 :param partitionBy: names of partitioning columns
489490 :param compression: compression codec to use when saving to file. This can be one of the
490- known case-insensitive shorten names (uncompressed , snappy, gzip, and
491- lzo). This will overwrite ``spark.sql.parquet.compression.codec``.
491+ known case-insensitive shorten names (none , snappy, gzip, and lzo).
492+ This will overwrite ``spark.sql.parquet.compression.codec``.
492493
493494 >>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data'))
494495 """
@@ -505,7 +506,8 @@ def text(self, path, compression=None):
505506
506507 :param path: the path in any Hadoop supported file system
507508 :param compression: compression codec to use when saving to file. This can be one of the
508- known case-insensitive shorten names (bzip2, gzip, lz4, and snappy).
509+ known case-insensitive shorten names (none, bzip2, gzip, lz4,
510+ snappy and deflate).
509511
510512 The DataFrame must have only one column that is of string type.
511513 Each row becomes a new line in the output file.
@@ -527,7 +529,8 @@ def csv(self, path, mode=None, compression=None):
527529 * ``error`` (default case): Throw an exception if data already exists.
528530
529531 :param compression: compression codec to use when saving to file. This can be one of the
530- known case-insensitive shorten names (bzip2, gzip, lz4, and snappy).
532+ known case-insensitive shorten names (none, bzip2, gzip, lz4,
533+ snappy and deflate).
531534
532535 >>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data'))
533536 """
@@ -552,8 +555,8 @@ def orc(self, path, mode=None, partitionBy=None, compression=None):
552555 * ``error`` (default case): Throw an exception if data already exists.
553556 :param partitionBy: names of partitioning columns
554557 :param compression: compression codec to use when saving to file. This can be one of the
555- known case-insensitive shorten names (uncompressed , snappy, zlib, and
556- lzo). This will overwrite ``orc.compress``.
558+ known case-insensitive shorten names (none , snappy, zlib, and lzo).
559+ This will overwrite ``orc.compress``.
557560
558561 >>> orc_df = hiveContext.read.orc('python/test_support/sql/orc_partitioned')
559562 >>> orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data'))
0 commit comments