Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,10 @@ private[json] object InferSchema {
StructType(Seq(StructField(columnNameOfCorruptRecords, StringType)))
}
}
}.treeAggregate[DataType](StructType(Seq()))(compatibleRootType, compatibleRootType)
}.treeAggregate[DataType](
StructType(Seq()))(
compatibleRootType(columnNameOfCorruptRecords),
compatibleRootType(columnNameOfCorruptRecords))

canonicalizeType(rootType) match {
case Some(st: StructType) => st
Expand Down Expand Up @@ -170,12 +173,38 @@ private[json] object InferSchema {
case other => Some(other)
}

private def withCorruptField(
struct: StructType,
columnNameOfCorruptRecords: String): StructType = {
if (!struct.fieldNames.contains(columnNameOfCorruptRecords)) {
// If this given struct does not have a column used for corrupt records,
// add this field.
struct.add(columnNameOfCorruptRecords, StringType, nullable = true)
} else {
// Otherwise, just return this struct.
struct
}
}

/**
* Remove top-level ArrayType wrappers and merge the remaining schemas
*/
private def compatibleRootType: (DataType, DataType) => DataType = {
case (ArrayType(ty1, _), ty2) => compatibleRootType(ty1, ty2)
case (ty1, ArrayType(ty2, _)) => compatibleRootType(ty1, ty2)
private def compatibleRootType(
columnNameOfCorruptRecords: String): (DataType, DataType) => DataType = {
// Since we support array of json objects at the top level,
// we need to check the element type and find the root level data type.
case (ArrayType(ty1, _), ty2) => compatibleRootType(columnNameOfCorruptRecords)(ty1, ty2)
case (ty1, ArrayType(ty2, _)) => compatibleRootType(columnNameOfCorruptRecords)(ty1, ty2)
// If we see any other data type at the root level, we get records that cannot be
// parsed. So, we use the struct as the data type and add the corrupt field to the schema.
case (struct: StructType, NullType) => struct
case (NullType, struct: StructType) => struct
case (struct: StructType, o) if !o.isInstanceOf[StructType] =>
withCorruptField(struct, columnNameOfCorruptRecords)
case (o, struct: StructType) if !o.isInstanceOf[StructType] =>
withCorruptField(struct, columnNameOfCorruptRecords)
// If we get anything else, we call compatibleType.
// Usually, when we reach here, ty1 and ty2 are two StructTypes.
case (ty1, ty2) => compatibleType(ty1, ty2)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.Utils

private[json] class SparkSQLJsonProcessingException(msg: String) extends Exception(msg)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this should technically extend RuntimeException


object JacksonParser {

def parse(
Expand Down Expand Up @@ -110,7 +112,7 @@ object JacksonParser {
lowerCaseValue.equals("-inf")) {
value.toFloat
} else {
sys.error(s"Cannot parse $value as FloatType.")
throw new SparkSQLJsonProcessingException(s"Cannot parse $value as FloatType.")
}

case (VALUE_NUMBER_INT | VALUE_NUMBER_FLOAT, DoubleType) =>
Expand All @@ -127,7 +129,7 @@ object JacksonParser {
lowerCaseValue.equals("-inf")) {
value.toDouble
} else {
sys.error(s"Cannot parse $value as DoubleType.")
throw new SparkSQLJsonProcessingException(s"Cannot parse $value as DoubleType.")
}

case (VALUE_NUMBER_INT | VALUE_NUMBER_FLOAT, dt: DecimalType) =>
Expand Down Expand Up @@ -174,7 +176,11 @@ object JacksonParser {
convertField(factory, parser, udt.sqlType)

case (token, dataType) =>
sys.error(s"Failed to parse a value for data type $dataType (current token: $token).")
// We cannot parse this token based on the given data type. So, we throw a
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should the other error cases be handled this way too? The String -> float/double conversions.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah, it is a good point. I added the change.

// SparkSQLJsonProcessingException and this exception will be caught by
// parseJson method.
throw new SparkSQLJsonProcessingException(
s"Failed to parse a value for data type $dataType (current token: $token).")
}
}

Expand Down Expand Up @@ -267,15 +273,14 @@ object JacksonParser {
array.toArray[InternalRow](schema)
}
case _ =>
sys.error(
s"Failed to parse record $record. Please make sure that each line of " +
"the file (or each string in the RDD) is a valid JSON object or " +
"an array of JSON objects.")
failedRecord(record)
}
}
} catch {
case _: JsonProcessingException =>
failedRecord(record)
case _: SparkSQLJsonProcessingException =>
failedRecord(record)
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1427,4 +1427,41 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
}
}
}

test("SPARK-12057 additional corrupt records do not throw exceptions") {
// Test if we can query corrupt records.
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
withTempTable("jsonTable") {
val schema = StructType(
StructField("_unparsed", StringType, true) ::
StructField("dummy", StringType, true) :: Nil)

{
// We need to make sure we can infer the schema.
val jsonDF = sqlContext.read.json(additionalCorruptRecords)
assert(jsonDF.schema === schema)
}

{
val jsonDF = sqlContext.read.schema(schema).json(additionalCorruptRecords)
jsonDF.registerTempTable("jsonTable")

// In HiveContext, backticks should be used to access columns starting with a underscore.
checkAnswer(
sql(
"""
|SELECT dummy, _unparsed
|FROM jsonTable
""".stripMargin),
Row("test", null) ::
Row(null, """[1,2,3]""") ::
Row(null, """":"test", "a":1}""") ::
Row(null, """42""") ::
Row(null, """ ","ian":"test"}""") :: Nil
)
}
}
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,14 @@ private[json] trait TestJsonData {
"""{"b":"str_b_4", "a":"str_a_4", "c":"str_c_4"}""" ::
"""]""" :: Nil)

def additionalCorruptRecords: RDD[String] =
sqlContext.sparkContext.parallelize(
"""{"dummy":"test"}""" ::
"""[1,2,3]""" ::
"""":"test", "a":1}""" ::
"""42""" ::
""" ","ian":"test"}""" :: Nil)

def emptyRecords: RDD[String] =
sqlContext.sparkContext.parallelize(
"""{""" ::
Expand All @@ -197,7 +205,6 @@ private[json] trait TestJsonData {
"""{"b": [{"c": {}}]}""" ::
"""]""" :: Nil)


lazy val singleRow: RDD[String] = sqlContext.sparkContext.parallelize("""{"a":123}""" :: Nil)

def empty: RDD[String] = sqlContext.sparkContext.parallelize(Seq[String]())
Expand Down