Skip to content

Commit 201df0d

Browse files
committed
[MINOR][PYTHON][TESTS] Move a test out of parity tests
### What changes were proposed in this pull request? Move a test out of parity tests ### Why are the changes needed? it is not tested in Spark Classic, not a parity test ### Does this PR introduce _any_ user-facing change? no ### How was this patch tested? ci ### Was this patch authored or co-authored using generative AI tooling? no Closes #46914 from zhengruifeng/move_a_non_parity_test. Authored-by: Ruifeng Zheng <[email protected]> Signed-off-by: Ruifeng Zheng <[email protected]>
1 parent 8911d59 commit 201df0d

File tree

2 files changed

+23
-24
lines changed

2 files changed

+23
-24
lines changed

python/pyspark/sql/tests/connect/test_connect_dataframe_property.py

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,29 @@
3737

3838

3939
class SparkConnectDataFramePropertyTests(SparkConnectSQLTestCase):
40+
def test_cached_property_is_copied(self):
41+
schema = StructType(
42+
[
43+
StructField("id", IntegerType(), True),
44+
StructField("name", StringType(), True),
45+
StructField("age", IntegerType(), True),
46+
StructField("city", StringType(), True),
47+
]
48+
)
49+
# Create some dummy data
50+
data = [
51+
(1, "Alice", 30, "New York"),
52+
(2, "Bob", 25, "San Francisco"),
53+
(3, "Cathy", 29, "Los Angeles"),
54+
(4, "David", 35, "Chicago"),
55+
]
56+
df = self.spark.createDataFrame(data, schema)
57+
df_columns = df.columns
58+
assert len(df.columns) == 4
59+
for col in ["id", "name"]:
60+
df_columns.remove(col)
61+
assert len(df.columns) == 4
62+
4063
def test_cached_schema_to(self):
4164
cdf = self.connect.read.table(self.tbl_name)
4265
sdf = self.spark.read.table(self.tbl_name)

python/pyspark/sql/tests/connect/test_parity_dataframe.py

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -19,37 +19,13 @@
1919

2020
from pyspark.sql.tests.test_dataframe import DataFrameTestsMixin
2121
from pyspark.testing.connectutils import ReusedConnectTestCase
22-
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
2322

2423

2524
class DataFrameParityTests(DataFrameTestsMixin, ReusedConnectTestCase):
2625
def test_help_command(self):
2726
df = self.spark.createDataFrame(data=[{"foo": "bar"}, {"foo": "baz"}])
2827
super().check_help_command(df)
2928

30-
def test_cached_property_is_copied(self):
31-
schema = StructType(
32-
[
33-
StructField("id", IntegerType(), True),
34-
StructField("name", StringType(), True),
35-
StructField("age", IntegerType(), True),
36-
StructField("city", StringType(), True),
37-
]
38-
)
39-
# Create some dummy data
40-
data = [
41-
(1, "Alice", 30, "New York"),
42-
(2, "Bob", 25, "San Francisco"),
43-
(3, "Cathy", 29, "Los Angeles"),
44-
(4, "David", 35, "Chicago"),
45-
]
46-
df = self.spark.createDataFrame(data, schema)
47-
df_columns = df.columns
48-
assert len(df.columns) == 4
49-
for col in ["id", "name"]:
50-
df_columns.remove(col)
51-
assert len(df.columns) == 4
52-
5329
@unittest.skip("Spark Connect does not support RDD but the tests depend on them.")
5430
def test_toDF_with_schema_string(self):
5531
super().test_toDF_with_schema_string()

0 commit comments

Comments
 (0)