From c277a2cd755df908b8d0adc9863a3a30eb94784c Mon Sep 17 00:00:00 2001 From: Yanbo Liang Date: Sun, 18 Sep 2016 07:28:01 -0700 Subject: [PATCH 1/2] PySpark SparkContext.addFile supports adding files recursively --- .../spark/api/java/JavaSparkContext.scala | 13 +++++++++++++ python/pyspark/context.py | 7 +++++-- python/pyspark/tests.py | 19 ++++++++++++++----- python/test_support/{ => hello}/hello.txt | 0 .../hello/sub_hello/sub_hello.txt | 1 + 5 files changed, 33 insertions(+), 7 deletions(-) rename python/test_support/{ => hello}/hello.txt (100%) create mode 100644 python/test_support/hello/sub_hello/sub_hello.txt diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala b/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala index 131f36f5470f..4e50c2686dd5 100644 --- a/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala +++ b/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala @@ -669,6 +669,19 @@ class JavaSparkContext(val sc: SparkContext) sc.addFile(path) } + /** + * Add a file to be downloaded with this Spark job on every node. + * The `path` passed can be either a local file, a file in HDFS (or other Hadoop-supported + * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, + * use `SparkFiles.get(fileName)` to find its download location. + * + * A directory can be given if the recursive option is set to true. Currently directories are only + * supported for Hadoop-supported filesystems. + */ + def addFile(path: String, recursive: Boolean): Unit = { + sc.addFile(path, recursive) + } + /** * Adds a JAR dependency for all tasks to be executed on this SparkContext in the future. * The `path` passed can be either a local file, a file in HDFS (or other Hadoop-supported diff --git a/python/pyspark/context.py b/python/pyspark/context.py index 2744bb9ec04e..96d90f7514c0 100644 --- a/python/pyspark/context.py +++ b/python/pyspark/context.py @@ -762,7 +762,7 @@ def accumulator(self, value, accum_param=None): SparkContext._next_accum_id += 1 return Accumulator(SparkContext._next_accum_id - 1, value, accum_param) - def addFile(self, path): + def addFile(self, path, recursive=False): """ Add a file to be downloaded with this Spark job on every node. The C{path} passed can be either a local file, a file in HDFS @@ -773,6 +773,9 @@ def addFile(self, path): L{SparkFiles.get(fileName)} with the filename to find its download location. + A directory can be given if the recursive option is set to True. + Currently directories are only supported for Hadoop-supported filesystems. + >>> from pyspark import SparkFiles >>> path = os.path.join(tempdir, "test.txt") >>> with open(path, "w") as testFile: @@ -785,7 +788,7 @@ def addFile(self, path): >>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect() [100, 200, 300, 400] """ - self._jsc.sc().addFile(path) + self._jsc.sc().addFile(path, recursive) def addPyFile(self, path): """ diff --git a/python/pyspark/tests.py b/python/pyspark/tests.py index 0a029b6e7441..888bd8999988 100644 --- a/python/pyspark/tests.py +++ b/python/pyspark/tests.py @@ -409,13 +409,22 @@ def func(x): self.assertEqual("Hello World!", res) def test_add_file_locally(self): - path = os.path.join(SPARK_HOME, "python/test_support/hello.txt") + path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt") self.sc.addFile(path) download_path = SparkFiles.get("hello.txt") self.assertNotEqual(path, download_path) with open(download_path) as test_file: self.assertEqual("Hello World!\n", test_file.readline()) + path = os.path.join(SPARK_HOME, "python/test_support/hello") + self.sc.addFile(path, True) + download_path = SparkFiles.get("hello") + self.assertNotEqual(path, download_path) + with open(download_path + "/hello.txt") as test_file: + self.assertEqual("Hello World!\n", test_file.readline()) + with open(download_path + "/sub_hello/sub_hello.txt") as test_file: + self.assertEqual("Sub Hello World!\n", test_file.readline()) + def test_add_py_file_locally(self): # To ensure that we're actually testing addPyFile's effects, check that # this fails due to `userlibrary` not being on the Python path: @@ -514,7 +523,7 @@ def test_transforming_pickle_file(self): def test_cartesian_on_textfile(self): # Regression test for - path = os.path.join(SPARK_HOME, "python/test_support/hello.txt") + path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt") a = self.sc.textFile(path) result = a.cartesian(a).collect() (x, y) = result[0] @@ -751,7 +760,7 @@ def test_zip_with_different_serializers(self): b = b._reserialize(MarshalSerializer()) self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)]) # regression test for SPARK-4841 - path = os.path.join(SPARK_HOME, "python/test_support/hello.txt") + path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt") t = self.sc.textFile(path) cnt = t.count() self.assertEqual(cnt, t.zip(t).count()) @@ -1214,7 +1223,7 @@ def test_oldhadoop(self): ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')] self.assertEqual(ints, ei) - hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt") + hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt") oldconf = {"mapred.input.dir": hellopath} hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat", "org.apache.hadoop.io.LongWritable", @@ -1233,7 +1242,7 @@ def test_newhadoop(self): ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')] self.assertEqual(ints, ei) - hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt") + hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt") newconf = {"mapred.input.dir": hellopath} hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat", "org.apache.hadoop.io.LongWritable", diff --git a/python/test_support/hello.txt b/python/test_support/hello/hello.txt similarity index 100% rename from python/test_support/hello.txt rename to python/test_support/hello/hello.txt diff --git a/python/test_support/hello/sub_hello/sub_hello.txt b/python/test_support/hello/sub_hello/sub_hello.txt new file mode 100644 index 000000000000..ce2d435b8c45 --- /dev/null +++ b/python/test_support/hello/sub_hello/sub_hello.txt @@ -0,0 +1 @@ +Sub Hello World! From 6174f0d77f1fafc8e7499a037a2185cf95e27957 Mon Sep 17 00:00:00 2001 From: Yanbo Liang Date: Tue, 20 Sep 2016 06:22:40 -0700 Subject: [PATCH 2/2] Make the test case separately. --- python/pyspark/tests.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/pyspark/tests.py b/python/pyspark/tests.py index 888bd8999988..b0756911bfc1 100644 --- a/python/pyspark/tests.py +++ b/python/pyspark/tests.py @@ -416,6 +416,7 @@ def test_add_file_locally(self): with open(download_path) as test_file: self.assertEqual("Hello World!\n", test_file.readline()) + def test_add_file_recursively_locally(self): path = os.path.join(SPARK_HOME, "python/test_support/hello") self.sc.addFile(path, True) download_path = SparkFiles.get("hello")