Skip to content

Commit c662d2c

Browse files
committed
address comments.
1 parent f305c4c commit c662d2c

File tree

7 files changed

+47
-58
lines changed

7 files changed

+47
-58
lines changed

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/NoSuchItemException.scala

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,6 @@ class NoSuchDatabaseException(db: String) extends AnalysisException(s"Database '
3030
class NoSuchTableException(db: String, table: String)
3131
extends AnalysisException(s"Table or view '$table' not found in database '$db'")
3232

33-
class NoSuchTempViewException(table: String)
34-
extends AnalysisException(s"Temporary view '$table' not found")
35-
3633
class NoSuchPartitionException(
3734
db: String,
3835
table: String,

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala

Lines changed: 15 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -245,29 +245,6 @@ class SessionCatalog(
245245
externalCatalog.alterTable(newTableDefinition)
246246
}
247247

248-
/**
249-
* Retrieve the metadata of an existing temporary view.
250-
* If the temporary view does not exist, a [[NoSuchTempViewException]] is thrown.
251-
*/
252-
def getTempViewMetadata(name: String): CatalogTable = {
253-
getTempViewMetadataOption(name).getOrElse(throw new NoSuchTempViewException(name))
254-
}
255-
256-
/**
257-
* Retrieve the metadata of an existing temporary view.
258-
* If the temporary view does not exist, return None.
259-
*/
260-
def getTempViewMetadataOption(name: String): Option[CatalogTable] = synchronized {
261-
val table = formatTableName(name)
262-
getTempView(table).map { plan =>
263-
CatalogTable(
264-
identifier = TableIdentifier(table),
265-
tableType = CatalogTableType.VIEW,
266-
storage = CatalogStorageFormat.empty,
267-
schema = plan.output.toStructType)
268-
}
269-
}
270-
271248
/**
272249
* Retrieve the metadata of an existing permanent table/view. If no database is specified,
273250
* assume the table/view is in the current database. If the specified table/view is not found
@@ -369,6 +346,21 @@ class SessionCatalog(
369346
tempTables.remove(formatTableName(name))
370347
}
371348

349+
/**
350+
* Retrieve the metadata of an existing temporary view.
351+
* If the temporary view does not exist, return None.
352+
*/
353+
def getTempViewMetadataOption(name: String): Option[CatalogTable] = synchronized {
354+
val table = formatTableName(name)
355+
getTempView(table).map { plan =>
356+
CatalogTable(
357+
identifier = TableIdentifier(table),
358+
tableType = CatalogTableType.VIEW,
359+
storage = CatalogStorageFormat.empty,
360+
schema = plan.output.toStructType)
361+
}
362+
}
363+
372364
// -------------------------------------------------------------
373365
// | Methods that interact with temporary and metastore tables |
374366
// -------------------------------------------------------------

sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -444,7 +444,7 @@ class SessionCatalogSuite extends SparkFunSuite {
444444
assert(!catalog.tableExists(TableIdentifier("view1", Some("default"))))
445445
}
446446

447-
test("getTableMetadata and getTempViewMetadata on temporary views") {
447+
test("getTableMetadata and getTempViewMetadataOption on temporary views") {
448448
val catalog = new SessionCatalog(newBasicCatalog())
449449
val tempTable = Range(1, 10, 2, 10)
450450
val m = intercept[AnalysisException] {
@@ -457,12 +457,12 @@ class SessionCatalogSuite extends SparkFunSuite {
457457
}.getMessage
458458
assert(m2.contains("Table or view 'view1' not found in database 'default'"))
459459

460-
intercept[NoSuchTempViewException] {
461-
catalog.getTempViewMetadata("view1")
462-
}.getMessage
460+
assert(catalog.getTempViewMetadataOption("view1").isEmpty,
461+
"the temporary view `view1` should not exist")
463462

464463
catalog.createTempView("view1", tempTable, overrideIfExists = false)
465-
assert(catalog.getTempViewMetadata("view1").identifier === TableIdentifier("view1"))
464+
assert(catalog.getTempViewMetadataOption("view1").get.identifier === TableIdentifier("view1"),
465+
"the temporary view `view1` should exist")
466466

467467
intercept[NoSuchTableException] {
468468
catalog.getTableMetadata(TableIdentifier("view1"))

sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTableCommand.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,8 @@ case class AnalyzeTableCommand(tableName: String, noscan: Boolean = true) extend
3939
val sessionState = sparkSession.sessionState
4040
val tableIdent = sessionState.sqlParser.parseTableIdentifier(tableName)
4141
val db = tableIdent.database.getOrElse(sessionState.catalog.getCurrentDatabase)
42-
val qualifiedName = TableIdentifier(tableIdent.table, Some(db))
43-
val relation = EliminateSubqueryAliases(sessionState.catalog.lookupRelation(qualifiedName))
42+
val tableIdentwithDB = TableIdentifier(tableIdent.table, Some(db))
43+
val relation = EliminateSubqueryAliases(sessionState.catalog.lookupRelation(tableIdentwithDB))
4444

4545
relation match {
4646
case relation: CatalogRelation =>

sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -472,19 +472,19 @@ case class AlterTableRecoverPartitionsCommand(
472472
override def run(spark: SparkSession): Seq[Row] = {
473473
val catalog = spark.sessionState.catalog
474474
val table = catalog.getTableMetadata(tableName)
475-
val qualifiedName = table.identifier.quotedString
475+
val tableIdentWithDB = table.identifier.quotedString
476476
DDLUtils.verifyAlterTableType(catalog, table, isView = false)
477477
if (DDLUtils.isDatasourceTable(table)) {
478478
throw new AnalysisException(
479-
s"Operation not allowed: $cmd on datasource tables: $qualifiedName")
479+
s"Operation not allowed: $cmd on datasource tables: $tableIdentWithDB")
480480
}
481481
if (table.partitionColumnNames.isEmpty) {
482482
throw new AnalysisException(
483-
s"Operation not allowed: $cmd only works on partitioned tables: $qualifiedName")
483+
s"Operation not allowed: $cmd only works on partitioned tables: $tableIdentWithDB")
484484
}
485485
if (table.storage.locationUri.isEmpty) {
486-
throw new AnalysisException(
487-
s"Operation not allowed: $cmd only works on table with location provided: $qualifiedName")
486+
throw new AnalysisException(s"Operation not allowed: $cmd only works on table with " +
487+
s"location provided: $tableIdentWithDB")
488488
}
489489

490490
val root = new Path(table.storage.locationUri.get)

sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -219,37 +219,37 @@ case class LoadDataCommand(
219219
override def run(sparkSession: SparkSession): Seq[Row] = {
220220
val catalog = sparkSession.sessionState.catalog
221221
val targetTable = catalog.getTableMetadata(table)
222-
val qualifiedName = targetTable.identifier.quotedString
222+
val tableIdentwithDB = targetTable.identifier.quotedString
223223

224224
if (targetTable.tableType == CatalogTableType.VIEW) {
225-
throw new AnalysisException(s"Target table in LOAD DATA cannot be a view: $qualifiedName")
225+
throw new AnalysisException(s"Target table in LOAD DATA cannot be a view: $tableIdentwithDB")
226226
}
227227
if (DDLUtils.isDatasourceTable(targetTable)) {
228228
throw new AnalysisException(
229-
s"LOAD DATA is not supported for datasource tables: $qualifiedName")
229+
s"LOAD DATA is not supported for datasource tables: $tableIdentwithDB")
230230
}
231231
if (targetTable.partitionColumnNames.nonEmpty) {
232232
if (partition.isEmpty) {
233-
throw new AnalysisException(s"LOAD DATA target table $qualifiedName is partitioned, " +
233+
throw new AnalysisException(s"LOAD DATA target table $tableIdentwithDB is partitioned, " +
234234
s"but no partition spec is provided")
235235
}
236236
if (targetTable.partitionColumnNames.size != partition.get.size) {
237-
throw new AnalysisException(s"LOAD DATA target table $qualifiedName is partitioned, " +
237+
throw new AnalysisException(s"LOAD DATA target table $tableIdentwithDB is partitioned, " +
238238
s"but number of columns in provided partition spec (${partition.get.size}) " +
239239
s"do not match number of partitioned columns in table " +
240240
s"(s${targetTable.partitionColumnNames.size})")
241241
}
242242
partition.get.keys.foreach { colName =>
243243
if (!targetTable.partitionColumnNames.contains(colName)) {
244-
throw new AnalysisException(s"LOAD DATA target table $qualifiedName is partitioned, " +
244+
throw new AnalysisException(s"LOAD DATA target table $tableIdentwithDB is partitioned, " +
245245
s"but the specified partition spec refers to a column that is not partitioned: " +
246246
s"'$colName'")
247247
}
248248
}
249249
} else {
250250
if (partition.nonEmpty) {
251-
throw new AnalysisException(s"LOAD DATA target table $qualifiedName is not partitioned, " +
252-
s"but a partition spec was provided.")
251+
throw new AnalysisException(s"LOAD DATA target table $tableIdentwithDB is not " +
252+
s"partitioned, but a partition spec was provided.")
253253
}
254254
}
255255

@@ -338,26 +338,26 @@ case class TruncateTableCommand(
338338
override def run(spark: SparkSession): Seq[Row] = {
339339
val catalog = spark.sessionState.catalog
340340
val table = catalog.getTableMetadata(tableName)
341-
val qualifiedName = table.identifier.quotedString
341+
val tableIdentwithDB = table.identifier.quotedString
342342

343343
if (table.tableType == CatalogTableType.EXTERNAL) {
344344
throw new AnalysisException(
345-
s"Operation not allowed: TRUNCATE TABLE on external tables: $qualifiedName")
345+
s"Operation not allowed: TRUNCATE TABLE on external tables: $tableIdentwithDB")
346346
}
347347
if (table.tableType == CatalogTableType.VIEW) {
348348
throw new AnalysisException(
349-
s"Operation not allowed: TRUNCATE TABLE on views: $qualifiedName")
349+
s"Operation not allowed: TRUNCATE TABLE on views: $tableIdentwithDB")
350350
}
351351
val isDatasourceTable = DDLUtils.isDatasourceTable(table)
352352
if (isDatasourceTable && partitionSpec.isDefined) {
353353
throw new AnalysisException(
354354
s"Operation not allowed: TRUNCATE TABLE ... PARTITION is not supported " +
355-
s"for tables created using the data sources API: $qualifiedName")
355+
s"for tables created using the data sources API: $tableIdentwithDB")
356356
}
357357
if (table.partitionColumnNames.isEmpty && partitionSpec.isDefined) {
358358
throw new AnalysisException(
359359
s"Operation not allowed: TRUNCATE TABLE ... PARTITION is not supported " +
360-
s"for tables that are not partitioned: $qualifiedName")
360+
s"for tables that are not partitioned: $tableIdentwithDB")
361361
}
362362
val locations =
363363
if (isDatasourceTable) {
@@ -378,7 +378,7 @@ case class TruncateTableCommand(
378378
} catch {
379379
case NonFatal(e) =>
380380
throw new AnalysisException(
381-
s"Failed to truncate table $qualifiedName when removing data of the path: $path " +
381+
s"Failed to truncate table $tableIdentwithDB when removing data of the path: $path " +
382382
s"because of ${e.toString}")
383383
}
384384
}
@@ -391,7 +391,7 @@ case class TruncateTableCommand(
391391
spark.sharedState.cacheManager.uncacheQuery(spark.table(table.identifier))
392392
} catch {
393393
case NonFatal(e) =>
394-
log.warn(s"Exception when attempting to uncache table $qualifiedName", e)
394+
log.warn(s"Exception when attempting to uncache table $tableIdentwithDB", e)
395395
}
396396
Seq.empty[Row]
397397
}
@@ -646,7 +646,7 @@ case class ShowPartitionsCommand(
646646
override def run(sparkSession: SparkSession): Seq[Row] = {
647647
val catalog = sparkSession.sessionState.catalog
648648
val table = catalog.getTableMetadata(tableName)
649-
val qualifiedName = table.identifier.quotedString
649+
val tableIdentWithDB = table.identifier.quotedString
650650

651651
/**
652652
* Validate and throws an [[AnalysisException]] exception under the following conditions:
@@ -655,17 +655,17 @@ case class ShowPartitionsCommand(
655655
* 3. If it is a view.
656656
*/
657657
if (table.tableType == VIEW) {
658-
throw new AnalysisException(s"SHOW PARTITIONS is not allowed on a view: $qualifiedName")
658+
throw new AnalysisException(s"SHOW PARTITIONS is not allowed on a view: $tableIdentWithDB")
659659
}
660660

661661
if (table.partitionColumnNames.isEmpty) {
662662
throw new AnalysisException(
663-
s"SHOW PARTITIONS is not allowed on a table that is not partitioned: $qualifiedName")
663+
s"SHOW PARTITIONS is not allowed on a table that is not partitioned: $tableIdentWithDB")
664664
}
665665

666666
if (DDLUtils.isDatasourceTable(table)) {
667667
throw new AnalysisException(
668-
s"SHOW PARTITIONS is not allowed on a datasource table: $qualifiedName")
668+
s"SHOW PARTITIONS is not allowed on a datasource table: $tableIdentWithDB")
669669
}
670670

671671
/**

sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -678,7 +678,7 @@ class HiveDDLSuite
678678
.createTempView(sourceViewName)
679679
sql(s"CREATE TABLE $targetTabName LIKE $sourceViewName")
680680

681-
val sourceTable = spark.sessionState.catalog.getTempViewMetadata(sourceViewName)
681+
val sourceTable = spark.sessionState.catalog.getTempViewMetadataOption(sourceViewName).get
682682
val targetTable = spark.sessionState.catalog.getTableMetadata(
683683
TableIdentifier(targetTabName, Some("default")))
684684

0 commit comments

Comments
 (0)