@@ -364,9 +364,9 @@ private[state] class HDFSBackedStateStoreProvider extends StateStoreProvider wit
364364 ByteStreams .readFully(input, valueRowBuffer, 0 , valueSize)
365365 val valueRow = new UnsafeRow (valueSchema.fields.length)
366366 // If valueSize in existing file is not multiple of 8, floor it to multiple of 8.
367- // This is work around for the following.
368- // Pre- Spark 2.3 mistakenly append 4 bytes to the value row in
369- // `FixedLengthRowBasedKeyValueBatch `, which gets persisted into the checkpoint data
367+ // This is a workaround for the following:
368+ // Prior to Spark 2.3 mistakenly append 4 bytes to the value row in
369+ // `RowBasedKeyValueBatch `, which gets persisted into the checkpoint data
370370 valueRow.pointTo(valueRowBuffer, (valueSize / 8 ) * 8 )
371371 map.put(keyRow, valueRow)
372372 }
@@ -432,9 +432,9 @@ private[state] class HDFSBackedStateStoreProvider extends StateStoreProvider wit
432432 ByteStreams .readFully(input, valueRowBuffer, 0 , valueSize)
433433 val valueRow = new UnsafeRow (valueSchema.fields.length)
434434 // If valueSize in existing file is not multiple of 8, floor it to multiple of 8.
435- // This is work around for the following.
436- // Pre- Spark 2.3 mistakenly append 4 bytes to the value row in
437- // `FixedLengthRowBasedKeyValueBatch `, which gets persisted into the checkpoint data
435+ // This is a workaround for the following:
436+ // Prior to Spark 2.3 mistakenly append 4 bytes to the value row in
437+ // `RowBasedKeyValueBatch `, which gets persisted into the checkpoint data
438438 valueRow.pointTo(valueRowBuffer, (valueSize / 8 ) * 8 )
439439 map.put(keyRow, valueRow)
440440 }
0 commit comments