Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -109,24 +109,32 @@ public ParquetVectorUpdater getUpdater(ColumnDescriptor descriptor, DataType spa
// For unsigned int64, it stores as plain signed int64 in Parquet when dictionary
// fallbacks. We read them as decimal values.
return new UnsignedLongUpdater();
} else if (isTimestamp(sparkType) &&
isTimestampTypeMatched(LogicalTypeAnnotation.TimeUnit.MICROS)) {
validateTimestampType(sparkType);
} else if (sparkType == DataTypes.TimestampType &&
isTimestampTypeMatched(LogicalTypeAnnotation.TimeUnit.MICROS)) {
if ("CORRECTED".equals(datetimeRebaseMode)) {
return new LongUpdater();
} else {
boolean failIfRebase = "EXCEPTION".equals(datetimeRebaseMode);
return new LongWithRebaseUpdater(failIfRebase, datetimeRebaseTz);
}
} else if (isTimestamp(sparkType) &&
isTimestampTypeMatched(LogicalTypeAnnotation.TimeUnit.MILLIS)) {
validateTimestampType(sparkType);
} else if (sparkType == DataTypes.TimestampType &&
isTimestampTypeMatched(LogicalTypeAnnotation.TimeUnit.MILLIS)) {
if ("CORRECTED".equals(datetimeRebaseMode)) {
return new LongAsMicrosUpdater();
} else {
final boolean failIfRebase = "EXCEPTION".equals(datetimeRebaseMode);
return new LongAsMicrosRebaseUpdater(failIfRebase, datetimeRebaseTz);
}
} else if (sparkType == DataTypes.TimestampNTZType &&
isTimestampTypeMatched(LogicalTypeAnnotation.TimeUnit.MICROS)) {
validateTimestampNTZType();
// TIMESTAMP_NTZ is a new data type and has no legacy files that need to do rebase.
return new LongUpdater();
} else if (sparkType == DataTypes.TimestampNTZType &&
isTimestampTypeMatched(LogicalTypeAnnotation.TimeUnit.MILLIS)) {
validateTimestampNTZType();
// TIMESTAMP_NTZ is a new data type and has no legacy files that need to do rebase.
return new LongAsMicrosUpdater();
} else if (sparkType instanceof DayTimeIntervalType) {
return new LongUpdater();
}
Expand Down Expand Up @@ -196,12 +204,11 @@ boolean isTimestampTypeMatched(LogicalTypeAnnotation.TimeUnit unit) {
((TimestampLogicalTypeAnnotation) logicalTypeAnnotation).getUnit() == unit;
}

void validateTimestampType(DataType sparkType) {
private void validateTimestampNTZType() {
assert(logicalTypeAnnotation instanceof TimestampLogicalTypeAnnotation);
// Throw an exception if the Parquet type is TimestampLTZ and the Catalyst type is TimestampNTZ.
// Throw an exception if the Parquet type is TimestampLTZ as the Catalyst type is TimestampNTZ.
// This is to avoid mistakes in reading the timestamp values.
if (((TimestampLogicalTypeAnnotation) logicalTypeAnnotation).isAdjustedToUTC() &&
sparkType == DataTypes.TimestampNTZType) {
if (((TimestampLogicalTypeAnnotation) logicalTypeAnnotation).isAdjustedToUTC()) {
convertErrorForTimestampNTZ("int64 time(" + logicalTypeAnnotation + ")");
}
}
Expand Down Expand Up @@ -1152,10 +1159,6 @@ private static boolean isLongDecimal(DataType dt) {
return false;
}

private static boolean isTimestamp(DataType dt) {
return dt == DataTypes.TimestampType || dt == DataTypes.TimestampNTZType;
}

private static boolean isDecimalTypeMatched(ColumnDescriptor descriptor, DataType dt) {
DecimalType d = (DecimalType) dt;
LogicalTypeAnnotation typeAnnotation = descriptor.getPrimitiveType().getLogicalTypeAnnotation();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -255,6 +255,18 @@ abstract class ParquetQuerySuite extends QueryTest with ParquetTest with SharedS
}
}

test("SPARK-46466: write and read TimestampNTZ with legacy rebase mode") {
withSQLConf(SQLConf.PARQUET_REBASE_MODE_IN_WRITE.key -> "LEGACY") {
withTable("ts") {
sql("create table ts (c1 timestamp_ntz) using parquet")
sql("insert into ts values (timestamp_ntz'0900-01-01 01:10:10')")
withAllParquetReaders {
checkAnswer(spark.table("ts"), sql("select timestamp_ntz'0900-01-01 01:10:10'"))
}
}
}
}

test("Enabling/disabling merging partfiles when merging parquet schema") {
def testSchemaMerging(expectedColumnNumber: Int): Unit = {
withTempDir { dir =>
Expand Down