Skip to content

Commit 2629cd7

Browse files
loneknightpyzsxwing
authored andcommitted
[SPARK-17711][TEST-HADOOP2.2] Fix hadoop2.2 compilation error
## What changes were proposed in this pull request? Fix hadoop2.2 compilation error. ## How was this patch tested? Existing tests. cc tdas zsxwing Author: Yu Peng <[email protected]> Closes #15537 from loneknightpy/fix-17711.
1 parent 5f20ae0 commit 2629cd7

File tree

1 file changed

+2
-3
lines changed

1 file changed

+2
-3
lines changed

core/src/main/scala/org/apache/spark/util/Utils.scala

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,6 @@ import scala.util.control.{ControlThrowable, NonFatal}
4242
import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
4343
import com.google.common.io.{ByteStreams, Files => GFiles}
4444
import com.google.common.net.InetAddresses
45-
import org.apache.commons.io.IOUtils
4645
import org.apache.commons.lang3.SystemUtils
4746
import org.apache.hadoop.conf.Configuration
4847
import org.apache.hadoop.fs.{FileSystem, FileUtil, Path}
@@ -1486,10 +1485,10 @@ private[spark] object Utils extends Logging {
14861485
val gzInputStream = new GZIPInputStream(new FileInputStream(file))
14871486
val bufSize = 1024
14881487
val buf = new Array[Byte](bufSize)
1489-
var numBytes = IOUtils.read(gzInputStream, buf)
1488+
var numBytes = ByteStreams.read(gzInputStream, buf, 0, bufSize)
14901489
while (numBytes > 0) {
14911490
fileSize += numBytes
1492-
numBytes = IOUtils.read(gzInputStream, buf)
1491+
numBytes = ByteStreams.read(gzInputStream, buf, 0, bufSize)
14931492
}
14941493
fileSize
14951494
} catch {

0 commit comments

Comments
 (0)