[SPARK-33656][TESTS] Add option to keep container after tests finish for DockerJDBCIntegrationSuites for debug
### What changes were proposed in this pull request? This PR add an option to keep container after DockerJDBCIntegrationSuites (e.g. DB2IntegrationSuite, PostgresIntegrationSuite) finish. By setting a system property `spark.test.docker.keepContainer` to `true`, we can use this option. ### Why are the changes needed? If some error occur during the tests, it would be useful to keep the container for debug. ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? I confirmed that the container is kept after the test by the following commands. ``` # With sbt $ build/sbt -Dspark.test.docker.keepContainer=true -Pdocker-integration-tests -Phive -Phive-thriftserver package "testOnly org.apache.spark.sql.jdbc.MariaDBKrbIntegrationSuite" # With Maven $ build/mvn -Dspark.test.docker.keepContainer=true -Pdocker-integration-tests -Phive -Phive-thriftserver -Dtest=none -DwildcardSuites=org.apache.spark.sql.jdbc.MariaDBKrbIntegrationSuite test $ docker container ls ``` I also confirmed that there are no regression for all the subclasses of `DockerJDBCIntegrationSuite` with sbt/Maven. * MariaDBKrbIntegrationSuite * DB2KrbIntegrationSuite * PostgresKrbIntegrationSuite * MySQLIntegrationSuite * PostgresIntegrationSuite * DB2IntegrationSuite * MsSqlServerintegrationsuite * OracleIntegrationSuite * v2.MySQLIntegrationSuite * v2.PostgresIntegrationSuite * v2.DB2IntegrationSuite * v2.MsSqlServerIntegrationSuite * v2.OracleIntegrationSuite NOTE: `DB2IntegrationSuite`, `v2.DB2IntegrationSuite` and `DB2KrbIntegrationSuite` can fail due to the too much short connection timeout. It's a separate issue and I'll fix it in #30583 Closes #30601 from sarutak/keepContainer. Authored-by: Kousuke Saruta <sarutak@oss.nttdata.com> Signed-off-by: Dongjoon Hyun <dongjoon@apache.org>
This commit is contained in:
parent
325abf7957
commit
91baab77f7
|
@ -25,6 +25,7 @@ import scala.collection.JavaConverters._
|
|||
import scala.util.control.NonFatal
|
||||
|
||||
import com.spotify.docker.client._
|
||||
import com.spotify.docker.client.DockerClient.ListContainersParam
|
||||
import com.spotify.docker.client.exceptions.ImageNotFoundException
|
||||
import com.spotify.docker.client.messages.{ContainerConfig, HostConfig, PortBinding}
|
||||
import org.scalatest.concurrent.Eventually
|
||||
|
@ -95,7 +96,9 @@ abstract class DockerJDBCIntegrationSuite extends SharedSparkSession with Eventu
|
|||
|
||||
protected val dockerIp = DockerUtils.getDockerIp()
|
||||
val db: DatabaseOnDocker
|
||||
val connectionTimeout = timeout(2.minutes)
|
||||
val connectionTimeout = timeout(5.minutes)
|
||||
val keepContainer =
|
||||
sys.props.getOrElse("spark.test.docker.keepContainer", "false").toBoolean
|
||||
|
||||
private var docker: DockerClient = _
|
||||
// Configure networking (necessary for boot2docker / Docker Machine)
|
||||
|
@ -176,20 +179,11 @@ abstract class DockerJDBCIntegrationSuite extends SharedSparkSession with Eventu
|
|||
|
||||
override def afterAll(): Unit = {
|
||||
try {
|
||||
if (docker != null) {
|
||||
try {
|
||||
if (containerId != null) {
|
||||
docker.killContainer(containerId)
|
||||
docker.removeContainer(containerId)
|
||||
}
|
||||
} catch {
|
||||
case NonFatal(e) =>
|
||||
logWarning(s"Could not stop container $containerId", e)
|
||||
cleanupContainer()
|
||||
} finally {
|
||||
if (docker != null) {
|
||||
docker.close()
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
super.afterAll()
|
||||
}
|
||||
}
|
||||
|
@ -205,4 +199,23 @@ abstract class DockerJDBCIntegrationSuite extends SharedSparkSession with Eventu
|
|||
* Prepare databases and tables for testing.
|
||||
*/
|
||||
def dataPreparation(connection: Connection): Unit
|
||||
|
||||
private def cleanupContainer(): Unit = {
|
||||
if (docker != null && containerId != null && !keepContainer) {
|
||||
try {
|
||||
docker.killContainer(containerId)
|
||||
} catch {
|
||||
case NonFatal(e) =>
|
||||
val exitContainerIds =
|
||||
docker.listContainers(ListContainersParam.withStatusExited()).asScala.map(_.id())
|
||||
if (exitContainerIds.contains(containerId)) {
|
||||
logWarning(s"Container $containerId already stopped")
|
||||
} else {
|
||||
logWarning(s"Could not stop container $containerId", e)
|
||||
}
|
||||
} finally {
|
||||
docker.removeContainer(containerId)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
2
pom.xml
2
pom.xml
|
@ -250,6 +250,7 @@
|
|||
-->
|
||||
<spark.test.home>${session.executionRootDirectory}</spark.test.home>
|
||||
<spark.test.webdriver.chrome.driver></spark.test.webdriver.chrome.driver>
|
||||
<spark.test.docker.keepContainer>false</spark.test.docker.keepContainer>
|
||||
|
||||
<CodeCacheSize>1g</CodeCacheSize>
|
||||
<!-- Needed for consistent times -->
|
||||
|
@ -2626,6 +2627,7 @@
|
|||
<spark.ui.showConsoleProgress>false</spark.ui.showConsoleProgress>
|
||||
<spark.unsafe.exceptionOnMemoryLeak>true</spark.unsafe.exceptionOnMemoryLeak>
|
||||
<spark.test.webdriver.chrome.driver>${spark.test.webdriver.chrome.driver}</spark.test.webdriver.chrome.driver>
|
||||
<spark.test.docker.keepContainer>${spark.test.docker.keepContainer}</spark.test.docker.keepContainer>
|
||||
<!-- Needed by sql/hive tests. -->
|
||||
<test.src.tables>__not_used__</test.src.tables>
|
||||
</systemProperties>
|
||||
|
|
Loading…
Reference in a new issue