Skip to content

Commit cc764d4

Browse files
committed
initial attempt
1 parent a94671a commit cc764d4

File tree

1 file changed

+22
-0
lines changed

1 file changed

+22
-0
lines changed

sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -315,3 +315,25 @@ case object DerbyDialect extends JdbcDialect {
315315

316316
}
317317

318+
/**
319+
* :: DeveloperApi ::
320+
* Default Oracle dialect, mapping a nonspecific
321+
* numeric type to a general decimal type.
322+
* Solution by @bdolbeare (github.com)
323+
*/
324+
@DeveloperApi
325+
case object OracleDialect extends JdbcDialect {
326+
override def getCatalystType(
327+
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] = {
328+
// Handle NUMBER fields that have no precision/scale in special way
329+
// because JDBC ResultSetMetaData converts this to 0 procision and -127 scale
330+
if (sqlType == Types.NUMERIC && size == 0) {
331+
// This is sub-optimal as we have to pick a precision/scale in advance whereas the data in Oracle is allowed
332+
// to have different precision/scale for each value. This conversion works in our domain for now though we
333+
// need a more durable solution. Look into changing JDBCRDD (line 406):
334+
// FROM: mutableRow.update(i, Decimal(decimalVal, p, s))
335+
// TO: mutableRow.update(i, Decimal(decimalVal))
336+
Some(DecimalType(DecimalType.MAX_PRECISION, 10))
337+
} else None
338+
}
339+
}

0 commit comments

Comments
 (0)