File tree Expand file tree Collapse file tree 1 file changed +13
-1
lines changed
sql/hive/src/main/scala/org/apache/spark/sql/hive/execution Expand file tree Collapse file tree 1 file changed +13
-1
lines changed Original file line number Diff line number Diff line change @@ -445,7 +445,19 @@ case class NativeCommand(
445445 if (sideEffectResult.size == 0 ) {
446446 context.emptyResult
447447 } else {
448- val rows = sideEffectResult.map(r => new GenericRow (Array [Any ](r)))
448+ // TODO: Need a better way to handle the result of a native command.
449+ // We may want to consider to use JsonMetaDataFormatter in Hive.
450+ val isDescribe = sql.trim.startsWith(" describe" )
451+ val rows = if (isDescribe) {
452+ // TODO: If we upgrade Hive to 0.13, we need to check the results of
453+ // context.sessionState.isHiveServerQuery() to determine how to split the result.
454+ // This method is introduced by https://issues.apache.org/jira/browse/HIVE-4545.
455+ // Right now, we split every string by any number of consecutive spaces.
456+ sideEffectResult.map(
457+ r => r.split(" \\ s+" )).map(r => new GenericRow (r.asInstanceOf [Array [Any ]]))
458+ } else {
459+ sideEffectResult.map(r => new GenericRow (Array [Any ](r)))
460+ }
449461 context.sparkContext.parallelize(rows, 1 )
450462 }
451463 }
You can’t perform that action at this time.
0 commit comments