Skip to content

Commit 0ddca9d

Browse files
committed
Refractored the test code and moved to respective folders
1 parent c576110 commit 0ddca9d

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+588
-633
lines changed

databricks_sql_connector_core/src/databricks_sql_connector_core/sql/client.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -784,8 +784,6 @@ def execute(
784784
parameters=prepared_params,
785785
)
786786

787-
# print("Line 781")
788-
# print(execute_response)
789787
self.active_result_set = ResultSet(
790788
self.connection,
791789
execute_response,
@@ -1141,7 +1139,6 @@ def _fill_results_buffer(self):
11411139
def _convert_columnar_table(self, table):
11421140
column_names = [c[0] for c in self.description]
11431141
ResultRow = Row(*column_names)
1144-
# print("Table\n",table)
11451142
result = []
11461143
for row_index in range(len(table[0])):
11471144
curr_row = []

databricks_sql_connector_core/src/databricks_sql_connector_core/sql/thrift_backend.py

Lines changed: 2 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -743,7 +743,6 @@ def _results_message_to_execute_response(self, resp, operation_state):
743743
else:
744744
t_result_set_metadata_resp = self._get_metadata_resp(resp.operationHandle)
745745

746-
# print(f"Line 739 - {t_result_set_metadata_resp.resultFormat}")
747746
if t_result_set_metadata_resp.resultFormat not in [
748747
ttypes.TSparkRowSetType.ARROW_BASED_SET,
749748
ttypes.TSparkRowSetType.COLUMN_BASED_SET,
@@ -880,18 +879,8 @@ def execute_command(
880879
# We want to receive proper Timestamp arrow types.
881880
"spark.thriftserver.arrowBasedRowSet.timestampAsString": "false"
882881
},
883-
# useArrowNativeTypes=spark_arrow_types,
884-
# canReadArrowResult=True,
885-
# # canDecompressLZ4Result=lz4_compression,
886-
# canDecompressLZ4Result=False,
887-
# canDownloadResult=False,
888-
# # confOverlay={
889-
# # # We want to receive proper Timestamp arrow types.
890-
# # "spark.thriftserver.arrowBasedRowSet.timestampAsString": "false"
891-
# # },
892-
# resultDataFormat=TDBSqlResultFormat(None,None,True),
893-
# # useArrowNativeTypes=spark_arrow_types,
894-
parameters=parameters,
882+
useArrowNativeTypes=spark_arrow_types,
883+
parameters=parameters,
895884
)
896885
resp = self.make_request(self._client.ExecuteStatement, req)
897886
return self._handle_execute_response(resp, cursor)

databricks_sql_connector_core/src/databricks_sql_connector_core/sql/utils.py

Lines changed: 0 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -74,18 +74,6 @@ def build_queue(
7474
Returns:
7575
ResultSetQueue
7676
"""
77-
78-
# def trow_to_json(trow):
79-
# # Step 1: Serialize TRow using Thrift's TJSONProtocol
80-
# transport = TTransport.TMemoryBuffer()
81-
# protocol = TJSONProtocol.TJSONProtocol(transport)
82-
# trow.write(protocol)
83-
#
84-
# # Step 2: Extract JSON string from the transport
85-
# json_str = transport.getvalue().decode('utf-8')
86-
#
87-
# return json_str
88-
8977
if row_set_type == TSparkRowSetType.ARROW_BASED_SET:
9078
arrow_table, n_valid_rows = convert_arrow_based_set_to_arrow_table(
9179
t_row_set.arrowBatches, lz4_compressed, arrow_schema_bytes
@@ -95,30 +83,11 @@ def build_queue(
9583
)
9684
return ArrowQueue(converted_arrow_table, n_valid_rows)
9785
elif row_set_type == TSparkRowSetType.COLUMN_BASED_SET:
98-
# print("Lin 79 ")
99-
# print(type(t_row_set))
100-
# print(t_row_set)
101-
# json_str = json.loads(trow_to_json(t_row_set))
102-
# pretty_json = json.dumps(json_str, indent=2)
103-
# print(pretty_json)
104-
10586
converted_column_table, column_names = convert_column_based_set_to_column_table(
10687
t_row_set.columns,
10788
description)
108-
# print(converted_column_table, column_names)
10989

11090
return ColumnQueue(converted_column_table, column_names)
111-
112-
# print(columnQueue.next_n_rows(2))
113-
# print(columnQueue.next_n_rows(2))
114-
# print(columnQueue.remaining_rows())
115-
# arrow_table, n_valid_rows = convert_column_based_set_to_arrow_table(
116-
# t_row_set.columns, description
117-
# )
118-
# converted_arrow_table = convert_decimals_in_arrow_table(
119-
# arrow_table, description
120-
# )
121-
# return ArrowQueue(converted_arrow_table, n_valid_rows)
12291
elif row_set_type == TSparkRowSetType.URL_BASED_SET:
12392
return CloudFetchQueue(
12493
schema_bytes=arrow_schema_bytes,
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.

tests/e2e/common/predicates.py renamed to databricks_sql_connector_core/tests/e2e/common/predicates.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,13 @@
88

99

1010
def pysql_supports_arrow():
11-
"""Import databricks.sql and test whether Cursor has fetchall_arrow."""
12-
from databricks.sql import Cursor
11+
"""Import databricks_sql_connector_core.sql and test whether Cursor has fetchall_arrow."""
12+
from databricks_sql_connector_core.sql.client import Cursor
1313
return hasattr(Cursor, 'fetchall_arrow')
1414

1515

1616
def pysql_has_version(compare, version):
17-
"""Import databricks.sql, and return compare_module_version(...).
17+
"""Import databricks_sql_connector_core.sql, and return compare_module_version(...).
1818
1919
Expected use:
2020
from common.predicates import pysql_has_version
@@ -98,4 +98,4 @@ def validate_version(version):
9898

9999
mod_version = validate_version(module.__version__)
100100
req_version = validate_version(version)
101-
return compare_versions(compare, mod_version, req_version)
101+
return compare_versions(compare, mod_version, req_version)

0 commit comments

Comments
 (0)