diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTableCommand.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTableCommand.scala index a469d4da8613..9509b66ffd39 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTableCommand.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTableCommand.scala @@ -23,7 +23,7 @@ import org.apache.hadoop.fs.{FileSystem, Path} import org.apache.spark.sql.{AnalysisException, Row, SparkSession} import org.apache.spark.sql.catalyst.analysis.EliminateSubqueryAliases -import org.apache.spark.sql.catalyst.catalog.{CatalogRelation, CatalogTable} +import org.apache.spark.sql.catalyst.catalog.{CatalogRelation, CatalogTable, SimpleCatalogRelation} /** @@ -41,7 +41,7 @@ case class AnalyzeTableCommand(tableName: String) extends RunnableCommand { val relation = EliminateSubqueryAliases(sessionState.catalog.lookupRelation(tableIdent)) relation match { - case relation: CatalogRelation => + case relation: CatalogRelation if !relation.isInstanceOf[SimpleCatalogRelation] => val catalogTable: CatalogTable = relation.catalogTable // This method is mainly based on // org.apache.hadoop.hive.ql.stats.StatsUtils.getFileSizeForTable(HiveConf, Table) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala index d70cae74bc6c..95654712fee5 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala @@ -395,6 +395,17 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach { assert(catalog.getTableMetadata(tableIdent1) === expectedTable) } + test("Analyze in-memory cataloged tables(SimpleCatalogRelation)") { + withTable("tbl") { + sql("CREATE TABLE tbl(a INT, b INT) USING parquet") + val e = intercept[AnalysisException] { + sql("ANALYZE TABLE tbl COMPUTE STATISTICS") + }.getMessage + assert(e.contains("ANALYZE TABLE is only supported for Hive tables, " + + "but 'tbl' is a SimpleCatalogRelation")) + } + } + test("create table using") { val catalog = spark.sessionState.catalog withTable("tbl") { diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala index 92282420214d..df6cd568ac4f 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala @@ -594,6 +594,21 @@ class HiveDDLSuite } } + test("Analyze data source tables(LogicalRelation)") { + withTable("t1") { + withTempPath { dir => + val path = dir.getCanonicalPath + spark.range(1).write.format("parquet").save(path) + sql(s"CREATE TABLE t1 USING parquet OPTIONS (PATH '$path')") + val e = intercept[AnalysisException] { + sql("ANALYZE TABLE t1 COMPUTE STATISTICS") + }.getMessage + assert(e.contains("ANALYZE TABLE is only supported for Hive tables, " + + "but 't1' is a LogicalRelation")) + } + } + } + test("desc table for data source table") { withTable("tab1") { val tabName = "tab1"