Skip to content

Commit 271e152

Browse files
committed
[SPARK-23853][PYSPARK][TEST] Run Hive-related PySpark tests only for -Phive
1 parent bd14da6 commit 271e152

File tree

2 files changed

+21
-1
lines changed

2 files changed

+21
-1
lines changed

python/pyspark/sql/readwriter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -976,7 +976,7 @@ def _test():
976976
globs = pyspark.sql.readwriter.__dict__.copy()
977977
sc = SparkContext('local[4]', 'PythonTest')
978978
try:
979-
spark = SparkSession.builder.enableHiveSupport().getOrCreate()
979+
spark = SparkSession.builder.getOrCreate()
980980
except py4j.protocol.Py4JError:
981981
spark = SparkSession(sc)
982982

python/pyspark/sql/tests.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3036,6 +3036,26 @@ def test_json_sampling_ratio(self):
30363036

30373037
class HiveSparkSubmitTests(SparkSubmitTests):
30383038

3039+
@classmethod
3040+
def setUpClass(cls):
3041+
# get a SparkContext to check for availability of Hive
3042+
sc = SparkContext('local[4]', cls.__name__)
3043+
cls.hive_available = True
3044+
try:
3045+
sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
3046+
except py4j.protocol.Py4JError:
3047+
cls.hive_available = False
3048+
except TypeError:
3049+
cls.hive_available = False
3050+
finally:
3051+
# we don't need this SparkContext for the test
3052+
sc.stop()
3053+
3054+
def setUp(self):
3055+
super(HiveSparkSubmitTests, self).setUp()
3056+
if not self.hive_available:
3057+
self.skipTest("Hive is not available.")
3058+
30393059
def test_hivecontext(self):
30403060
# This test checks that HiveContext is using Hive metastore (SPARK-16224).
30413061
# It sets a metastore url and checks if there is a derby dir created by

0 commit comments

Comments
 (0)