Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -66,9 +66,23 @@ private[sql] class DefaultSource
mode match {
case SaveMode.Append =>
sys.error(s"Append mode is not supported by ${this.getClass.getCanonicalName}")
case SaveMode.Overwrite =>
fs.delete(filesystemPath, true)
case SaveMode.Overwrite => {
var success: Boolean = false
try {
success = fs.delete(filesystemPath, true)
} catch {
case e: IOException =>
throw new IOException(
s"Unable to clear output directory ${filesystemPath.toString} prior"
+ s" to writing to JSON table:\n${e.toString}")
}
if (!success) {
throw new IOException(
s"Unable to clear output directory ${filesystemPath.toString} prior"
+ s" to writing to JSON table.")
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@yanbohappy Seems we just throw another error message at here. Based on your JIRA description, I think you need to check if delete returns true or false when data already exists.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@yhuai Yes, it is expected. If there is no exception thrown, it can ensure delete success. I have investigated other codes in spark and they do the same thing for delete.

true
}
case SaveMode.ErrorIfExists =>
sys.error(s"path $path already exists.")
case SaveMode.Ignore => false
Expand Down Expand Up @@ -109,13 +123,21 @@ private[sql] case class JSONRelation(
val fs = filesystemPath.getFileSystem(sqlContext.sparkContext.hadoopConfiguration)

if (overwrite) {
try {
fs.delete(filesystemPath, true)
} catch {
case e: IOException =>
if (fs.exists(filesystemPath)) {
var success: Boolean = false
try {
success = fs.delete(filesystemPath, true)
} catch {
case e: IOException =>
throw new IOException(
s"Unable to clear output directory ${filesystemPath.toString} prior"
+ s" to writing to JSON table:\n${e.toString}")
}
if (!success) {
throw new IOException(
s"Unable to clear output directory ${filesystemPath.toString} prior"
+ s" to INSERT OVERWRITE a JSON table:\n${e.toString}")
+ s" to writing to JSON table.")
}
}
// Write the data.
data.toJSON.saveAsTextFile(path)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

package org.apache.spark.sql.sources

import java.io.File
import java.io.{IOException, File}

import org.scalatest.BeforeAndAfterAll

Expand Down Expand Up @@ -62,6 +62,29 @@ class CreateTableAsSelectSuite extends DataSourceTest with BeforeAndAfterAll {
dropTempTable("jsonTable")
}

test("CREATE TEMPORARY TABLE AS SELECT based on the file without write permission") {
val childPath = new File(path.toString, "child")
path.mkdir()
childPath.createNewFile()
path.setWritable(false)

val e = intercept[IOException] {
sql(
s"""
|CREATE TEMPORARY TABLE jsonTable
|USING org.apache.spark.sql.json.DefaultSource
|OPTIONS (
| path '${path.toString}'
|) AS
|SELECT a, b FROM jt
""".stripMargin)
sql("SELECT a, b FROM jsonTable").collect()
}
assert(e.getMessage().contains("Unable to clear output directory"))

path.setWritable(true)
}

test("create a table, drop it and create another one with the same name") {
sql(
s"""
Expand Down