diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala index bd986d0138..276d5d29bf 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala @@ -825,8 +825,16 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging { def orc(paths: String*): DataFrame = format("orc").load(paths: _*) /** - * Returns the specified table as a `DataFrame`. + * Returns the specified table/view as a `DataFrame`. If it's a table, it must support batch + * reading and the returned DataFrame is the batch scan query plan of this table. If it's a view, + * the returned DataFrame is simply the query plan of the view, which can either be a batch or + * streaming query plan. * + * @param tableName is either a qualified or unqualified name that designates a table or view. + * If a database is specified, it identifies the table/view from the database. + * Otherwise, it first attempts to find a temporary view with the given name + * and then match the table/view from the current database. + * Note that, the global temporary view database is also valid here. * @since 1.4.0 */ def table(tableName: String): DataFrame = { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala b/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala index 592f209475..d738d617f2 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala @@ -573,7 +573,10 @@ class SparkSession private( @transient lazy val catalog: Catalog = new CatalogImpl(self) /** - * Returns the specified table/view as a `DataFrame`. + * Returns the specified table/view as a `DataFrame`. If it's a table, it must support batch + * reading and the returned DataFrame is the batch scan query plan of this table. If it's a view, + * the returned DataFrame is simply the query plan of the view, which can either be a batch or + * streaming query plan. * * @param tableName is either a qualified or unqualified name that designates a table or view. * If a database is specified, it identifies the table/view from the database. @@ -583,11 +586,7 @@ class SparkSession private( * @since 2.0.0 */ def table(tableName: String): DataFrame = { - table(sessionState.sqlParser.parseMultipartIdentifier(tableName)) - } - - private[sql] def table(multipartIdentifier: Seq[String]): DataFrame = { - Dataset.ofRows(self, UnresolvedRelation(multipartIdentifier)) + read.table(tableName) } private[sql] def table(tableIdent: TableIdentifier): DataFrame = {