@@ -61,7 +61,7 @@ class SparkContext(object):
6161
6262 """
6363 Main entry point for Spark functionality. A SparkContext represents the
64- connection to a Spark cluster, and can be used to create L{ RDD} and
64+ connection to a Spark cluster, and can be used to create :class:` RDD` and
6565 broadcast variables on that cluster.
6666
6767 .. note:: Only one :class:`SparkContext` should be active per JVM. You must `stop()`
@@ -86,7 +86,7 @@ def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
8686 gateway = None , jsc = None , profiler_cls = BasicProfiler ):
8787 """
8888 Create a new SparkContext. At least the master and app name should be set,
89- either through the named parameters here or through C{ conf} .
89+ either through the named parameters here or through ` conf` .
9090
9191 :param master: Cluster URL to connect to
9292 (e.g. mesos://host:port, spark://host:port, local[4]).
@@ -102,7 +102,7 @@ def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
102102 the batch size based on object sizes, or -1 to use an unlimited
103103 batch size
104104 :param serializer: The serializer for RDDs.
105- :param conf: A L{ SparkConf} object setting Spark properties.
105+ :param conf: A :class:` SparkConf` object setting Spark properties.
106106 :param gateway: Use an existing gateway and JVM, otherwise a new JVM
107107 will be instantiated.
108108 :param jsc: The JavaSparkContext instance (optional).
@@ -576,7 +576,7 @@ def _serialize_to_jvm(self, data, serializer, reader_func, createRDDServer):
576576
577577 def pickleFile (self , name , minPartitions = None ):
578578 """
579- Load an RDD previously saved using L{ RDD.saveAsPickleFile} method.
579+ Load an RDD previously saved using :meth:` RDD.saveAsPickleFile` method.
580580
581581 >>> tmpFile = NamedTemporaryFile(delete=True)
582582 >>> tmpFile.close()
@@ -624,20 +624,24 @@ def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
624624 as `utf-8`), which is faster and smaller than unicode. (Added in
625625 Spark 1.2)
626626
627- For example, if you have the following files::
627+ For example, if you have the following files:
628628
629- hdfs://a-hdfs-path/part-00000
630- hdfs://a-hdfs-path/part-00001
631- ...
632- hdfs://a-hdfs-path/part-nnnnn
629+ .. code-block:: text
633630
634- Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")},
635- then C{rdd} contains::
631+ hdfs://a-hdfs-path/part-00000
632+ hdfs://a-hdfs-path/part-00001
633+ ...
634+ hdfs://a-hdfs-path/part-nnnnn
635+
636+ Do ``rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")``,
637+ then ``rdd`` contains:
636638
637- (a-hdfs-path/part-00000, its content)
638- (a-hdfs-path/part-00001, its content)
639- ...
640- (a-hdfs-path/part-nnnnn, its content)
639+ .. code-block:: text
640+
641+ (a-hdfs-path/part-00000, its content)
642+ (a-hdfs-path/part-00001, its content)
643+ ...
644+ (a-hdfs-path/part-nnnnn, its content)
641645
642646 .. note:: Small files are preferred, as each file will be loaded
643647 fully in memory.
@@ -705,7 +709,7 @@ def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
705709 and value Writable classes
706710 2. Serialization is attempted via Pyrolite pickling
707711 3. If this fails, the fallback is to call 'toString' on each key and value
708- 4. C{ PickleSerializer} is used to deserialize pickled objects on the Python side
712+ 4. :class:` PickleSerializer` is used to deserialize pickled objects on the Python side
709713
710714 :param path: path to sequncefile
711715 :param keyClass: fully qualified classname of key Writable class
@@ -872,17 +876,16 @@ def union(self, rdds):
872876
873877 def broadcast (self , value ):
874878 """
875- Broadcast a read-only variable to the cluster, returning a
876- L{Broadcast<pyspark.broadcast.Broadcast>}
879+ Broadcast a read-only variable to the cluster, returning a :class:`Broadcast`
877880 object for reading it in distributed functions. The variable will
878881 be sent to each cluster only once.
879882 """
880883 return Broadcast (self , value , self ._pickled_broadcast_vars )
881884
882885 def accumulator (self , value , accum_param = None ):
883886 """
884- Create an L{ Accumulator} with the given initial value, using a given
885- L{ AccumulatorParam} helper object to define how to add values of the
887+ Create an :class:` Accumulator` with the given initial value, using a given
888+ :class:` AccumulatorParam` helper object to define how to add values of the
886889 data type if provided. Default AccumulatorParams are used for integers
887890 and floating-point numbers if you do not provide one. For other types,
888891 a custom AccumulatorParam can be used.
@@ -902,12 +905,11 @@ def accumulator(self, value, accum_param=None):
902905 def addFile (self , path , recursive = False ):
903906 """
904907 Add a file to be downloaded with this Spark job on every node.
905- The C{ path} passed can be either a local file, a file in HDFS
908+ The ` path` passed can be either a local file, a file in HDFS
906909 (or other Hadoop-supported filesystems), or an HTTP, HTTPS or
907910 FTP URI.
908911
909- To access the file in Spark jobs, use
910- L{SparkFiles.get(fileName)<pyspark.files.SparkFiles.get>} with the
912+ To access the file in Spark jobs, use :meth:`SparkFiles.get` with the
911913 filename to find its download location.
912914
913915 A directory can be given if the recursive option is set to True.
@@ -932,7 +934,7 @@ def addFile(self, path, recursive=False):
932934 def addPyFile (self , path ):
933935 """
934936 Add a .py or .zip dependency for all tasks to be executed on this
935- SparkContext in the future. The C{ path} passed can be either a local
937+ SparkContext in the future. The ` path` passed can be either a local
936938 file, a file in HDFS (or other Hadoop-supported filesystems), or an
937939 HTTP, HTTPS or FTP URI.
938940
@@ -978,7 +980,7 @@ def setJobGroup(self, groupId, description, interruptOnCancel=False):
978980 Application programmers can use this method to group all those jobs together and give a
979981 group description. Once set, the Spark web UI will associate such jobs with this group.
980982
981- The application can use L{ SparkContext.cancelJobGroup} to cancel all
983+ The application can use :meth:` SparkContext.cancelJobGroup` to cancel all
982984 running jobs in this group.
983985
984986 >>> import threading
@@ -1023,7 +1025,7 @@ def setLocalProperty(self, key, value):
10231025 def getLocalProperty (self , key ):
10241026 """
10251027 Get a local property set in this thread, or null if it is missing. See
1026- L{ setLocalProperty}
1028+ :meth:` setLocalProperty`.
10271029 """
10281030 return self ._jsc .getLocalProperty (key )
10291031
@@ -1041,7 +1043,7 @@ def sparkUser(self):
10411043
10421044 def cancelJobGroup (self , groupId ):
10431045 """
1044- Cancel active jobs for the specified group. See L{ SparkContext.setJobGroup}
1046+ Cancel active jobs for the specified group. See :meth:` SparkContext.setJobGroup`.
10451047 for more information.
10461048 """
10471049 self ._jsc .sc ().cancelJobGroup (groupId )
0 commit comments