Skip to content

Commit df7041d

Browse files
yhuaidavies
authored andcommitted
[SPARK-10196] [SQL] Correctly saving decimals in internal rows to JSON.
https://issues.apache.org/jira/browse/SPARK-10196 Author: Yin Huai <[email protected]> Closes #8408 from yhuai/DecimalJsonSPARK-10196.
1 parent f023aa2 commit df7041d

File tree

2 files changed

+28
-1
lines changed

2 files changed

+28
-1
lines changed

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonGenerator.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ private[sql] object JacksonGenerator {
9595
case (FloatType, v: Float) => gen.writeNumber(v)
9696
case (DoubleType, v: Double) => gen.writeNumber(v)
9797
case (LongType, v: Long) => gen.writeNumber(v)
98-
case (DecimalType(), v: java.math.BigDecimal) => gen.writeNumber(v)
98+
case (DecimalType(), v: Decimal) => gen.writeNumber(v.toJavaBigDecimal)
9999
case (ByteType, v: Byte) => gen.writeNumber(v.toInt)
100100
case (BinaryType, v: Array[Byte]) => gen.writeBinary(v)
101101
case (BooleanType, v: Boolean) => gen.writeBoolean(v)

sql/hive/src/test/scala/org/apache/spark/sql/sources/JsonHadoopFsRelationSuite.scala

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717

1818
package org.apache.spark.sql.sources
1919

20+
import java.math.BigDecimal
21+
2022
import org.apache.hadoop.fs.Path
2123

2224
import org.apache.spark.deploy.SparkHadoopUtil
@@ -75,4 +77,29 @@ class JsonHadoopFsRelationSuite extends HadoopFsRelationTest {
7577
)
7678
}
7779
}
80+
81+
test("SPARK-10196: save decimal type to JSON") {
82+
withTempDir { file =>
83+
file.delete()
84+
85+
val schema =
86+
new StructType()
87+
.add("decimal", DecimalType(7, 2))
88+
89+
val data =
90+
Row(new BigDecimal("10.02")) ::
91+
Row(new BigDecimal("20000.99")) ::
92+
Row(new BigDecimal("10000")) :: Nil
93+
val df = createDataFrame(sparkContext.parallelize(data), schema)
94+
95+
// Write the data out.
96+
df.write.format(dataSourceName).save(file.getCanonicalPath)
97+
98+
// Read it back and check the result.
99+
checkAnswer(
100+
read.format(dataSourceName).schema(schema).load(file.getCanonicalPath),
101+
df
102+
)
103+
}
104+
}
78105
}

0 commit comments

Comments
 (0)