[SPARK-7927] whitespace fixes for Hive and ThriftServer.

So we can enable a whitespace enforcement rule in the style checker to save code review time.

Author: Reynold Xin <rxin@databricks.com>

Closes #6478 from rxin/whitespace-hive and squashes the following commits:

e01b0e0 [Reynold Xin] Fixed tests.
a3bba22 [Reynold Xin] [SPARK-7927] whitespace fixes for Hive and ThriftServer.

(cherry picked from commit ee6a0e12fb)
Signed-off-by: Reynold Xin <rxin@databricks.com>
This commit is contained in:
Reynold Xin 2015-05-28 18:08:56 -07:00
parent f4b135337c
commit 3b38c06f0d
14 changed files with 43 additions and 39 deletions

View file

@ -73,7 +73,7 @@ class UISeleniumSuite
}
ignore("thrift server ui test") {
withJdbcStatement(statement =>{
withJdbcStatement { statement =>
val baseURL = s"http://localhost:$uiPort"
val queries = Seq(
@ -97,6 +97,6 @@ class UISeleniumSuite
findAll(cssSelector("""ul table tbody tr td""")).map(_.text).toList should contain (line)
}
}
})
}
}
}

View file

@ -546,13 +546,17 @@ private[hive] class HiveMetastoreCatalog(val client: ClientInterface, hive: Hive
* UNIMPLEMENTED: It needs to be decided how we will persist in-memory tables to the metastore.
* For now, if this functionality is desired mix in the in-memory [[OverrideCatalog]].
*/
override def registerTable(tableIdentifier: Seq[String], plan: LogicalPlan): Unit = ???
override def registerTable(tableIdentifier: Seq[String], plan: LogicalPlan): Unit = {
throw new UnsupportedOperationException
}
/**
* UNIMPLEMENTED: It needs to be decided how we will persist in-memory tables to the metastore.
* For now, if this functionality is desired mix in the in-memory [[OverrideCatalog]].
*/
override def unregisterTable(tableIdentifier: Seq[String]): Unit = ???
override def unregisterTable(tableIdentifier: Seq[String]): Unit = {
throw new UnsupportedOperationException
}
override def unregisterAllTables(): Unit = {}
}

View file

@ -1171,7 +1171,8 @@ https://cwiki.apache.org/confluence/display/Hive/Enhanced+Aggregation%2C+Cube%2C
// worth the number of hacks that will be required to implement it. Namely, we need to add
// some sort of mapped star expansion that would expand all child output row to be similarly
// named output expressions where some aggregate expression has been applied (i.e. First).
??? // Aggregate(groups, Star(None, First(_)) :: Nil, joinedResult)
// Aggregate(groups, Star(None, First(_)) :: Nil, joinedResult)
throw new UnsupportedOperationException
case Token(allJoinTokens(joinToken),
relation1 ::

View file

@ -195,8 +195,7 @@ case class InsertIntoHiveTable(
// loadPartition call orders directories created on the iteration order of the this map
val orderedPartitionSpec = new util.LinkedHashMap[String, String]()
table.hiveQlTable.getPartCols().foreach{
entry=>
table.hiveQlTable.getPartCols().foreach { entry =>
orderedPartitionSpec.put(entry.getName, partitionSpec.get(entry.getName).getOrElse(""))
}
val partVals = MetaStoreUtils.getPvals(table.hiveQlTable.getPartCols, partitionSpec)