Skip to content

Commit 88e5d2e

Browse files
committed
Beautify code
1 parent 22b0c2a commit 88e5d2e

File tree

1 file changed

+26
-20
lines changed

1 file changed

+26
-20
lines changed

sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala

Lines changed: 26 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -26,26 +26,32 @@ private case object OracleDialect extends JdbcDialect {
2626

2727
override def canHandle(url: String): Boolean = url.startsWith("jdbc:oracle")
2828

29-
override def getCatalystType(sqlType: Int, typeName: String, size: Int, md: MetadataBuilder):
30-
Option[DataType] = sqlType match {
31-
// Handle NUMBER fields that have no precision/scale in special way
32-
// because JDBC ResultSetMetaData converts this to 0 precision and -127 scale
33-
// For more details, please see
34-
// https://github.com/apache/spark/pull/8780#issuecomment-145598968
35-
// and
36-
// https://github.com/apache/spark/pull/8780#issuecomment-144541760
37-
case Types.NUMERIC if size == 0 => Option(DecimalType(DecimalType.MAX_PRECISION, 10))
38-
// Handle FLOAT fields in a special way because JDBC ResultSetMetaData converts
39-
// this to NUMERIC with -127 scale
40-
// Not sure if there is a more robust way to identify the field as a float (or other
41-
// numeric types that do not specify a scale.
42-
case Types.NUMERIC if md.build().getLong("scale") == -127 =>
43-
Option(DecimalType(DecimalType.MAX_PRECISION, 10))
44-
case Types.NUMERIC if size == 1 => Option(BooleanType)
45-
case Types.NUMERIC if size == 3 || size == 5 || size == 10 => Option(IntegerType)
46-
case Types.NUMERIC if size == 19 && md.build().getLong("scale") == 0L => Option(LongType)
47-
case Types.NUMERIC if size == 19 && md.build().getLong("scale") == 4L => Option(FloatType)
48-
case _ => None
29+
override def getCatalystType(
30+
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] = {
31+
if (sqlType == Types.NUMERIC) {
32+
val scale = md.build().getLong("scale")
33+
size match {
34+
// Handle NUMBER fields that have no precision/scale in special way
35+
// because JDBC ResultSetMetaData converts this to 0 precision and -127 scale
36+
// For more details, please see
37+
// https://github.com/apache/spark/pull/8780#issuecomment-145598968
38+
// and
39+
// https://github.com/apache/spark/pull/8780#issuecomment-144541760
40+
case 0 => Option(DecimalType(DecimalType.MAX_PRECISION, 10))
41+
// Handle FLOAT fields in a special way because JDBC ResultSetMetaData converts
42+
// this to NUMERIC with -127 scale
43+
// Not sure if there is a more robust way to identify the field as a float (or other
44+
// numeric types that do not specify a scale.
45+
case -127 => Option(DecimalType(DecimalType.MAX_PRECISION, 10))
46+
case 1 => Option(BooleanType)
47+
case 3 | 5 | 10 => Option(IntegerType)
48+
case 19 if scale == 0L => Option(LongType)
49+
case 19 if scale == 4L => Option(FloatType)
50+
case _ => None
51+
}
52+
} else {
53+
None
54+
}
4955
}
5056

5157
override def getJDBCType(dt: DataType): Option[JdbcType] = dt match {

0 commit comments

Comments
 (0)