[SPARK-36725][SQL][TESTS] Ensure HiveThriftServer2Suites to stop Thrift JDBC server on exit

### What changes were proposed in this pull request?

This PR aims to ensure that HiveThriftServer2Suites (e.g. `thriftserver.UISeleniumSuite`) stop Thrift JDBC server on exit using shutdown hook.

### Why are the changes needed?

Normally, HiveThriftServer2Suites stops Thrift JDBC server via `afterAll` method.
But, if they are killed by signal (e.g. Ctrl-C), Thrift JDBC server will be remain.
```
$ jps
2792969 SparkSubmit
```
### Does this PR introduce _any_ user-facing change?

No.

### How was this patch tested?

Killed `thriftserver.UISeleniumSuite` by Ctrl-C and confirmed no Thrift JDBC server is remain by jps.

Closes #33967 from sarutak/stop-thrift-on-exit.

Authored-by: Kousuke Saruta <sarutak@oss.nttdata.com>
Signed-off-by: Dongjoon Hyun <dongjoon@apache.org>
(cherry picked from commit c36d70836d)
Signed-off-by: Dongjoon Hyun <dongjoon@apache.org>
This commit is contained in:
Kousuke Saruta 2021-09-11 15:54:35 -07:00 committed by Dongjoon Hyun
parent d71e2fd1d1
commit b8a23e9ccc

View file

@ -50,7 +50,7 @@ import org.apache.spark.sql.hive.test.HiveTestJars
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.StaticSQLConf.HIVE_THRIFT_SERVER_SINGLESESSION
import org.apache.spark.sql.test.ProcessTestUtils.ProcessOutputCapturer
import org.apache.spark.util.{ThreadUtils, Utils}
import org.apache.spark.util.{ShutdownHookManager, ThreadUtils, Utils}
object TestData {
def getTestDataFilePath(name: String): URL = {
@ -1338,33 +1338,36 @@ abstract class HiveThriftServer2TestBase extends SparkFunSuite with BeforeAndAft
process
}
ShutdownHookManager.addShutdownHook(stopThriftServer _)
ThreadUtils.awaitResult(serverStarted.future, SERVER_STARTUP_TIMEOUT)
}
private def stopThriftServer(): Unit = {
// The `spark-daemon.sh' script uses kill, which is not synchronous, have to wait for a while.
Utils.executeAndGetOutput(
command = Seq(stopScript),
extraEnvironment = Map("SPARK_PID_DIR" -> pidDir.getCanonicalPath))
Thread.sleep(3.seconds.toMillis)
if (pidDir.list.nonEmpty) {
// The `spark-daemon.sh' script uses kill, which is not synchronous, have to wait for a while.
Utils.executeAndGetOutput(
command = Seq(stopScript),
extraEnvironment = Map("SPARK_PID_DIR" -> pidDir.getCanonicalPath))
Thread.sleep(3.seconds.toMillis)
warehousePath.delete()
warehousePath = null
warehousePath.delete()
warehousePath = null
metastorePath.delete()
metastorePath = null
metastorePath.delete()
metastorePath = null
operationLogPath.delete()
operationLogPath = null
operationLogPath.delete()
operationLogPath = null
lScratchDir.delete()
lScratchDir = null
lScratchDir.delete()
lScratchDir = null
Option(logPath).foreach(_.delete())
logPath = null
Option(logPath).foreach(_.delete())
logPath = null
Option(logTailingProcess).foreach(_.destroy())
logTailingProcess = null
Option(logTailingProcess).foreach(_.destroy())
logTailingProcess = null
}
}
private def dumpLogs(): Unit = {