File tree Expand file tree Collapse file tree 2 files changed +28
-1
lines changed
core/src/main/scala/org/apache/spark/sql/execution/datasources/json
hive/src/test/scala/org/apache/spark/sql/sources Expand file tree Collapse file tree 2 files changed +28
-1
lines changed Original file line number Diff line number Diff line change @@ -95,7 +95,7 @@ private[sql] object JacksonGenerator {
9595 case (FloatType , v : Float ) => gen.writeNumber(v)
9696 case (DoubleType , v : Double ) => gen.writeNumber(v)
9797 case (LongType , v : Long ) => gen.writeNumber(v)
98- case (DecimalType (), v : java.math. BigDecimal ) => gen.writeNumber(v)
98+ case (DecimalType (), v : Decimal ) => gen.writeNumber(v.toJavaBigDecimal )
9999 case (ByteType , v : Byte ) => gen.writeNumber(v.toInt)
100100 case (BinaryType , v : Array [Byte ]) => gen.writeBinary(v)
101101 case (BooleanType , v : Boolean ) => gen.writeBoolean(v)
Original file line number Diff line number Diff line change 1717
1818package org .apache .spark .sql .sources
1919
20+ import java .math .BigDecimal
21+
2022import org .apache .hadoop .fs .Path
2123
2224import org .apache .spark .deploy .SparkHadoopUtil
@@ -75,4 +77,29 @@ class JsonHadoopFsRelationSuite extends HadoopFsRelationTest {
7577 )
7678 }
7779 }
80+
81+ test(" SPARK-10196: save decimal type to JSON" ) {
82+ withTempDir { file =>
83+ file.delete()
84+
85+ val schema =
86+ new StructType ()
87+ .add(" decimal" , DecimalType (7 , 2 ))
88+
89+ val data =
90+ Row (new BigDecimal (" 10.02" )) ::
91+ Row (new BigDecimal (" 20000.99" )) ::
92+ Row (new BigDecimal (" 10000" )) :: Nil
93+ val df = createDataFrame(sparkContext.parallelize(data), schema)
94+
95+ // Write the data out.
96+ df.write.format(dataSourceName).save(file.getCanonicalPath)
97+
98+ // Read it back and check the result.
99+ checkAnswer(
100+ read.format(dataSourceName).schema(schema).load(file.getCanonicalPath),
101+ df
102+ )
103+ }
104+ }
78105}
You can’t perform that action at this time.
0 commit comments