Skip to content

Commit e0d924f

Browse files
tdasRobert Kruszewski
authored andcommitted
[SPARK-18776][SS] Make Offset for FileStreamSource corrected formatted in json
## What changes were proposed in this pull request? - Changed FileStreamSource to use new FileStreamSourceOffset rather than LongOffset. The field is named as `logOffset` to make it more clear that this is a offset in the file stream log. - Fixed bug in FileStreamSourceLog, the field endId in the FileStreamSourceLog.get(startId, endId) was not being used at all. No test caught it earlier. Only my updated tests caught it. Other minor changes - Dont use batchId in the FileStreamSource, as calling it batch id is extremely miss leading. With multiple sources, it may happen that a new batch has no new data from a file source. So offset of FileStreamSource != batchId after that batch. ## How was this patch tested? Updated unit test. Author: Tathagata Das <[email protected]> Closes apache#16205 from tdas/SPARK-18776.
1 parent a9207d4 commit e0d924f

File tree

10 files changed

+95
-33
lines changed

10 files changed

+95
-33
lines changed

external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaSourceOffsetSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ class KafkaSourceOffsetSuite extends OffsetSuite with SharedSQLContext {
9090
}
9191
}
9292

93-
test("read Spark 2.1.0 log format") {
93+
test("read Spark 2.1.0 offset format") {
9494
val offset = readFromResource("kafka-source-offset-version-2.1.0.txt")
9595
assert(KafkaSourceOffset(offset) ===
9696
KafkaSourceOffset(("topic1", 0, 456L), ("topic1", 1, 789L), ("topic2", 0, 0L)))

sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSource.scala

Lines changed: 15 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ class FileStreamSource(
5757

5858
private val metadataLog =
5959
new FileStreamSourceLog(FileStreamSourceLog.VERSION, sparkSession, metadataPath)
60-
private var maxBatchId = metadataLog.getLatest().map(_._1).getOrElse(-1L)
60+
private var metadataLogCurrentOffset = metadataLog.getLatest().map(_._1).getOrElse(-1L)
6161

6262
/** Maximum number of new files to be considered in each batch */
6363
private val maxFilesPerBatch = sourceOptions.maxFilesPerTrigger
@@ -79,7 +79,7 @@ class FileStreamSource(
7979
* `synchronized` on this method is for solving race conditions in tests. In the normal usage,
8080
* there is no race here, so the cost of `synchronized` should be rare.
8181
*/
82-
private def fetchMaxOffset(): LongOffset = synchronized {
82+
private def fetchMaxOffset(): FileStreamSourceOffset = synchronized {
8383
// All the new files found - ignore aged files and files that we have seen.
8484
val newFiles = fetchAllFiles().filter {
8585
case (path, timestamp) => seenFiles.isNewFile(path, timestamp)
@@ -104,14 +104,14 @@ class FileStreamSource(
104104
""".stripMargin)
105105

106106
if (batchFiles.nonEmpty) {
107-
maxBatchId += 1
108-
metadataLog.add(maxBatchId, batchFiles.map { case (path, timestamp) =>
109-
FileEntry(path = path, timestamp = timestamp, batchId = maxBatchId)
107+
metadataLogCurrentOffset += 1
108+
metadataLog.add(metadataLogCurrentOffset, batchFiles.map { case (p, timestamp) =>
109+
FileEntry(path = p, timestamp = timestamp, batchId = metadataLogCurrentOffset)
110110
}.toArray)
111-
logInfo(s"Max batch id increased to $maxBatchId with ${batchFiles.size} new files")
111+
logInfo(s"Log offset set to $metadataLogCurrentOffset with ${batchFiles.size} new files")
112112
}
113113

114-
new LongOffset(maxBatchId)
114+
FileStreamSourceOffset(metadataLogCurrentOffset)
115115
}
116116

117117
/**
@@ -122,21 +122,19 @@ class FileStreamSource(
122122
func
123123
}
124124

125-
/** Return the latest offset in the source */
126-
def currentOffset: LongOffset = synchronized {
127-
new LongOffset(maxBatchId)
128-
}
125+
/** Return the latest offset in the [[FileStreamSourceLog]] */
126+
def currentLogOffset: Long = synchronized { metadataLogCurrentOffset }
129127

130128
/**
131129
* Returns the data that is between the offsets (`start`, `end`].
132130
*/
133131
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
134-
val startId = start.flatMap(LongOffset.convert(_)).getOrElse(LongOffset(-1L)).offset
135-
val endId = LongOffset.convert(end).getOrElse(LongOffset(0)).offset
132+
val startOffset = start.map(FileStreamSourceOffset(_).logOffset).getOrElse(-1L)
133+
val endOffset = FileStreamSourceOffset(end).logOffset
136134

137-
assert(startId <= endId)
138-
val files = metadataLog.get(Some(startId + 1), Some(endId)).flatMap(_._2)
139-
logInfo(s"Processing ${files.length} files from ${startId + 1}:$endId")
135+
assert(startOffset <= endOffset)
136+
val files = metadataLog.get(Some(startOffset + 1), Some(endOffset)).flatMap(_._2)
137+
logInfo(s"Processing ${files.length} files from ${startOffset + 1}:$endOffset")
140138
logTrace(s"Files are:\n\t" + files.mkString("\n\t"))
141139
val newDataSource =
142140
DataSource(
@@ -172,7 +170,7 @@ class FileStreamSource(
172170
files
173171
}
174172

175-
override def getOffset: Option[Offset] = Some(fetchMaxOffset()).filterNot(_.offset == -1)
173+
override def getOffset: Option[Offset] = Some(fetchMaxOffset()).filterNot(_.logOffset == -1)
176174

177175
override def toString: String = s"FileStreamSource[$qualifiedBasePath]"
178176

sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSourceLog.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ class FileStreamSourceLog(
7878

7979
override def get(startId: Option[Long], endId: Option[Long]): Array[(Long, Array[FileEntry])] = {
8080
val startBatchId = startId.getOrElse(0L)
81-
val endBatchId = getLatest().map(_._1).getOrElse(0L)
81+
val endBatchId = endId.orElse(getLatest().map(_._1)).getOrElse(0L)
8282

8383
val (existedBatches, removedBatches) = (startBatchId to endBatchId).map { id =>
8484
if (isCompactionBatch(id, compactInterval) && fileEntryCache.containsKey(id)) {
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one or more
3+
* contributor license agreements. See the NOTICE file distributed with
4+
* this work for additional information regarding copyright ownership.
5+
* The ASF licenses this file to You under the Apache License, Version 2.0
6+
* (the "License"); you may not use this file except in compliance with
7+
* the License. You may obtain a copy of the License at
8+
*
9+
* http://www.apache.org/licenses/LICENSE-2.0
10+
*
11+
* Unless required by applicable law or agreed to in writing, software
12+
* distributed under the License is distributed on an "AS IS" BASIS,
13+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
* See the License for the specific language governing permissions and
15+
* limitations under the License.
16+
*/
17+
18+
package org.apache.spark.sql.execution.streaming
19+
20+
import scala.util.control.Exception._
21+
22+
import org.json4s.NoTypeHints
23+
import org.json4s.jackson.Serialization
24+
25+
/**
26+
* Offset for the [[FileStreamSource]].
27+
* @param logOffset Position in the [[FileStreamSourceLog]]
28+
*/
29+
case class FileStreamSourceOffset(logOffset: Long) extends Offset {
30+
override def json: String = {
31+
Serialization.write(this)(FileStreamSourceOffset.format)
32+
}
33+
}
34+
35+
object FileStreamSourceOffset {
36+
implicit val format = Serialization.formats(NoTypeHints)
37+
38+
def apply(offset: Offset): FileStreamSourceOffset = {
39+
offset match {
40+
case f: FileStreamSourceOffset => f
41+
case SerializedOffset(str) =>
42+
catching(classOf[NumberFormatException]).opt {
43+
FileStreamSourceOffset(str.toLong)
44+
}.getOrElse {
45+
Serialization.read[FileStreamSourceOffset](str)
46+
}
47+
case _ =>
48+
throw new IllegalArgumentException(
49+
s"Invalid conversion from offset of ${offset.getClass} to FileStreamSourceOffset")
50+
}
51+
}
52+
}
53+
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
{"logOffset":345}
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
v1
22
{"batchWatermarkMs":0,"batchTimestampMs":1480981499528}
3-
0
4-
{"topic-0":{"0":1}}
3+
{"logOffset":345}
4+
{"topic-0":{"0":1}}

sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/FileStreamSourceSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ class FileStreamSourceSuite extends SparkFunSuite with SharedSQLContext {
9797
val newSource = new FileStreamSource(spark, s"$scheme:///", "parquet", StructType(Nil), Nil,
9898
dir.getAbsolutePath, Map.empty)
9999
// this method should throw an exception if `fs.exists` is called during resolveRelation
100-
newSource.getBatch(None, LongOffset(1))
100+
newSource.getBatch(None, FileStreamSourceOffset(1))
101101
}
102102
}
103103
}

sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/OffsetSeqLogSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ class OffsetSeqLogSuite extends SparkFunSuite with SharedSQLContext {
7474
val (batchId, offsetSeq) = readFromResource("offset-log-version-2.1.0")
7575
assert(batchId === 0)
7676
assert(offsetSeq.offsets === Seq(
77-
Some(SerializedOffset("0")),
77+
Some(SerializedOffset("""{"logOffset":345}""")),
7878
Some(SerializedOffset("""{"topic-0":{"0":1}}"""))
7979
))
8080
assert(offsetSeq.metadata === Some(OffsetSeqMetadata(0L, 1480981499528L)))

sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala

Lines changed: 20 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ abstract class FileStreamSourceTest
6161
val source = sources.head
6262
val newOffset = source.withBatchingLocked {
6363
addData(source)
64-
source.currentOffset + 1
64+
new FileStreamSourceOffset(source.currentLogOffset + 1)
6565
}
6666
logInfo(s"Added file to $source at offset $newOffset")
6767
(source, newOffset)
@@ -987,12 +987,17 @@ class FileStreamSourceSuite extends FileStreamSourceTest {
987987
val _sources = PrivateMethod[Seq[Source]]('sources)
988988
val fileSource =
989989
(execution invokePrivate _sources()).head.asInstanceOf[FileStreamSource]
990-
assert(fileSource.getBatch(None, LongOffset(2)).as[String].collect() ===
991-
List("keep1", "keep2", "keep3"))
992-
assert(fileSource.getBatch(Some(LongOffset(0)), LongOffset(2)).as[String].collect() ===
993-
List("keep2", "keep3"))
994-
assert(fileSource.getBatch(Some(LongOffset(1)), LongOffset(2)).as[String].collect() ===
995-
List("keep3"))
990+
991+
def verify(startId: Option[Int], endId: Int, expected: String*): Unit = {
992+
val start = startId.map(new FileStreamSourceOffset(_))
993+
val end = FileStreamSourceOffset(endId)
994+
assert(fileSource.getBatch(start, end).as[String].collect().toSeq === expected)
995+
}
996+
997+
verify(startId = None, endId = 2, "keep1", "keep2", "keep3")
998+
verify(startId = Some(0), endId = 1, "keep2")
999+
verify(startId = Some(0), endId = 2, "keep2", "keep3")
1000+
verify(startId = Some(1), endId = 2, "keep3")
9961001
true
9971002
}
9981003
)
@@ -1023,9 +1028,14 @@ class FileStreamSourceSuite extends FileStreamSourceTest {
10231028
assert(options.maxFilesPerTrigger == Some(1))
10241029
}
10251030

1026-
test("FileStreamSource offset - read Spark 2.1.0 log format") {
1027-
val offset = readOffsetFromResource("file-source-offset-version-2.1.0.txt")
1028-
assert(LongOffset.convert(offset) === Some(LongOffset(345)))
1031+
test("FileStreamSource offset - read Spark 2.1.0 offset json format") {
1032+
val offset = readOffsetFromResource("file-source-offset-version-2.1.0-json.txt")
1033+
assert(FileStreamSourceOffset(offset) === FileStreamSourceOffset(345))
1034+
}
1035+
1036+
test("FileStreamSource offset - read Spark 2.1.0 offset long format") {
1037+
val offset = readOffsetFromResource("file-source-offset-version-2.1.0-long.txt")
1038+
assert(FileStreamSourceOffset(offset) === FileStreamSourceOffset(345))
10291039
}
10301040

10311041
test("FileStreamSourceLog - read Spark 2.1.0 log format") {

0 commit comments

Comments
 (0)