[SPARK-31118][K8S][DOC] Add version information to the configuration of K8S

### What changes were proposed in this pull request?
Add version information to the configuration of `K8S`.

I sorted out some information show below.

Item name | Since version | JIRA ID | Commit ID | Note
-- | -- | -- | -- | --
spark.kubernetes.context | 3.0.0 | SPARK-25887 | c542c247bbfe1214c0bf81076451718a9e8931dc#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.driver.master | 3.0.0 | SPARK-30371 | f14061c6a4729ad419902193aa23575d8f17f597#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.namespace | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.container.image | 2.3.0 | SPARK-22994 | b94debd2b01b87ef1d2a34d48877e38ade0969e6#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.driver.container.image | 2.3.0 | SPARK-22807 | fb3636b482be3d0940345b1528c1d5090bbc25e6#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.container.image | 2.3.0 | SPARK-22807 | fb3636b482be3d0940345b1528c1d5090bbc25e6#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.container.image.pullPolicy | 2.3.0 | SPARK-22807 | fb3636b482be3d0940345b1528c1d5090bbc25e6#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.container.image.pullSecrets | 2.4.0 | SPARK-23668 | cccaaa14ad775fb981e501452ba2cc06ff5c0f0a#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.submission.requestTimeout | 3.0.0 | SPARK-27023 | e9e8bb33ef9ad785473ded168bc85867dad4ee70#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.submission.connectionTimeout | 3.0.0 | SPARK-27023 | e9e8bb33ef9ad785473ded168bc85867dad4ee70#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.driver.requestTimeout | 3.0.0 | SPARK-27023 | e9e8bb33ef9ad785473ded168bc85867dad4ee70#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.driver.connectionTimeout | 3.0.0 | SPARK-27023 | e9e8bb33ef9ad785473ded168bc85867dad4ee70#diff-6e882d5561424e7e6651eb46f10104b8 |  
KUBERNETES_AUTH_DRIVER_CONF_PREFIX.serviceAccountName | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 | spark.kubernetes.authenticate.driver
KUBERNETES_AUTH_EXECUTOR_CONF_PREFIX.serviceAccountName | 3.1.0 | SPARK-30122 | f9f06eee9853ad4b6458ac9d31233e729a1ca226#diff-6e882d5561424e7e6651eb46f10104b8 | spark.kubernetes.authenticate.executor
spark.kubernetes.driver.limit.cores | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.driver.request.cores | 3.0.0 | SPARK-27754 | 1a8c09334db87b0e938c38cd6b59d326bdcab3c3#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.submitInDriver | 2.4.0 | SPARK-22839 | f15906da153f139b698e192ec6f82f078f896f1e#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.limit.cores | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.scheduler.name | 3.0.0 | SPARK-29436 | f800fa383131559c4e841bf062c9775d09190935#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.request.cores | 2.4.0 | SPARK-23285 | fe2b7a4568d65a62da6e6eb00fff05f248b4332c#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.driver.pod.name | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.driver.resourceNamePrefix | 3.0.0 | SPARK-25876 | 6be272b75b4ae3149869e19df193675cc4117763#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.podNamePrefix | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.allocation.batch.size | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.allocation.batch.delay | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.lostCheck.maxAttempts | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.submission.waitAppCompletion | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.report.interval | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.apiPollingInterval | 2.4.0 | SPARK-24248 | 270a9a3cac25f3e799460320d0fc94ccd7ecfaea#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.eventProcessingInterval | 2.4.0 | SPARK-24248 | 270a9a3cac25f3e799460320d0fc94ccd7ecfaea#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.memoryOverheadFactor | 2.4.0 | SPARK-23984 | 1a644afbac35c204f9ad55f86999319a9ab458c6#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.pyspark.pythonVersion | 2.4.0 | SPARK-23984 | a791c29bd824adadfb2d85594bc8dad4424df936#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.kerberos.krb5.path | 3.0.0 | SPARK-23257 | 6c9c84ffb9c8d98ee2ece7ba4b010856591d383d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.kerberos.krb5.configMapName | 3.0.0 | SPARK-23257 | 6c9c84ffb9c8d98ee2ece7ba4b010856591d383d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.hadoop.configMapName | 3.0.0 | SPARK-23257 | 6c9c84ffb9c8d98ee2ece7ba4b010856591d383d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.kerberos.tokenSecret.name | 3.0.0 | SPARK-23257 | 6c9c84ffb9c8d98ee2ece7ba4b010856591d383d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.kerberos.tokenSecret.itemKey | 3.0.0 | SPARK-23257 | 6c9c84ffb9c8d98ee2ece7ba4b010856591d383d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.resource.type | 2.4.1 | SPARK-25021 | 9031c784847353051bc0978f63ef4146ae9095ff#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.local.dirs.tmpfs | 3.0.0 | SPARK-25262 | da6fa3828bb824b65f50122a8a0a0d4741551257#diff-6e882d5561424e7e6651eb46f10104b8 | It exists in branch-3.0, but in pom.xml it is 2.4.0-snapshot
spark.kubernetes.driver.podTemplateFile | 3.0.0 | SPARK-24434 | f6cc354d83c2c9a757f9b507aadd4dbdc5825cca#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.podTemplateFile | 3.0.0 | SPARK-24434 | f6cc354d83c2c9a757f9b507aadd4dbdc5825cca#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.driver.podTemplateContainerName | 3.0.0 | SPARK-24434 | f6cc354d83c2c9a757f9b507aadd4dbdc5825cca#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.podTemplateContainerName | 3.0.0 | SPARK-24434 | f6cc354d83c2c9a757f9b507aadd4dbdc5825cca#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.deleteOnTermination | 3.0.0 | SPARK-25515 | 0c2935b01def8a5f631851999d9c2d57b63763e6#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.dynamicAllocation.deleteGracePeriod | 3.0.0 | SPARK-28487 | 0343854f54b48b206ca434accec99355011560c2#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.appKillPodDeletionGracePeriod | 3.0.0 | SPARK-24793 | 05168e725d2a17c4164ee5f9aa068801ec2454f4#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.file.upload.path | 3.0.0 | SPARK-23153 | 5e74570c8f5e7dfc1ca1c53c177827c5cea57bf1#diff-6e882d5561424e7e6651eb46f10104b8 |  
The following appears in the document |   |   |   |  
spark.kubernetes.authenticate.submission.caCertFile | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.submission.clientKeyFile | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.submission.clientCertFile | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.submission.oauthToken | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.submission.oauthTokenFile | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.driver.caCertFile | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.driver.clientKeyFile | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.driver.clientCertFile | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.driver.oauthToken | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.driver.oauthTokenFile | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.driver.mounted.caCertFile | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.driver.mounted.clientKeyFile | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.driver.mounted.clientCertFile | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.driver.mounted.oauthTokenFile | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.caCertFile | 2.4.0 | SPARK-23146 | 571a6f0574e50e53cea403624ec3795cd03aa204#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.clientKeyFile | 2.4.0 | SPARK-23146 | 571a6f0574e50e53cea403624ec3795cd03aa204#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.clientCertFile | 2.4.0 | SPARK-23146 | 571a6f0574e50e53cea403624ec3795cd03aa204#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.oauthToken | 2.4.0 | SPARK-23146 | 571a6f0574e50e53cea403624ec3795cd03aa204#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.authenticate.oauthTokenFile | 2.4.0 | SPARK-23146 | 571a6f0574e50e53cea403624ec3795cd03aa204#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.driver.label.[LabelName] | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.driver.annotation.[AnnotationName] | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.label.[LabelName] | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.annotation.[AnnotationName] | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.node.selector.[labelKey] | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.driverEnv.[EnvironmentVariableName] | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.driver.secrets.[SecretName] | 2.3.0 | SPARK-22757 | 171f6ddadc6185ffcc6ad82e5f48952fb49095b2#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.secrets.[SecretName] | 2.3.0 | SPARK-22757 | 171f6ddadc6185ffcc6ad82e5f48952fb49095b2#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.driver.secretKeyRef.[EnvName] | 2.4.0 | SPARK-24232 | 21e1fc7d4aed688d7b685be6ce93f76752159c98#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.secretKeyRef.[EnvName] | 2.4.0 | SPARK-24232 | 21e1fc7d4aed688d7b685be6ce93f76752159c98#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.driver.volumes.[VolumeType].[VolumeName].mount.path | 2.4.0 | SPARK-23529 | 5ff1b9ba1983d5601add62aef64a3e87d07050eb#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.driver.volumes.[VolumeType].[VolumeName].mount.subPath | 3.0.0 | SPARK-25960 | 3df307aa515b3564686e75d1b71754bbcaaf2dec#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.driver.volumes.[VolumeType].[VolumeName].mount.readOnly | 2.4.0 | SPARK-23529 | 5ff1b9ba1983d5601add62aef64a3e87d07050eb#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.driver.volumes.[VolumeType].[VolumeName].options.[OptionName] | 2.4.0 | SPARK-23529 | 5ff1b9ba1983d5601add62aef64a3e87d07050eb#diff-b5527f236b253e0d9f5db5164bdb43e9 |  
spark.kubernetes.executor.volumes.[VolumeType].[VolumeName].mount.path | 2.4.0 | SPARK-23529 | 5ff1b9ba1983d5601add62aef64a3e87d07050eb#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.volumes.[VolumeType].[VolumeName].mount.subPath | 3.0.0 | SPARK-25960 | 3df307aa515b3564686e75d1b71754bbcaaf2dec#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.volumes.[VolumeType].[VolumeName].mount.readOnly | 2.4.0 | SPARK-23529 | 5ff1b9ba1983d5601add62aef64a3e87d07050eb#diff-6e882d5561424e7e6651eb46f10104b8 |  
spark.kubernetes.executor.volumes.[VolumeType].[VolumeName].options.[OptionName] | 2.4.0 | SPARK-23529 | 5ff1b9ba1983d5601add62aef64a3e87d07050eb#diff-b5527f236b253e0d9f5db5164bdb43e9 |  

### Why are the changes needed?
Supplemental configuration version information.

### Does this PR introduce any user-facing change?
'No'

### How was this patch tested?
Exists UT

Closes #27875 from beliefer/add-version-to-k8s-config.

Authored-by: beliefer <beliefer@163.com>
Signed-off-by: HyukjinKwon <gurwls223@apache.org>
This commit is contained in:
beliefer 2020-03-12 09:54:08 +09:00 committed by HyukjinKwon
parent 0722dc5fb8
commit 1254c88034
2 changed files with 124 additions and 2 deletions

View file

@ -494,7 +494,7 @@ See the [configuration page](configuration.html) for information on Spark config
#### Spark Properties
<table class="table">
<tr><th>Property Name</th><th>Default</th><th>Meaning</th></tr>
<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since Version</th></tr>
<tr>
<td><code>spark.kubernetes.context</code></td>
<td><code>(none)</code></td>
@ -505,6 +505,7 @@ See the [configuration page](configuration.html) for information on Spark config
auto-configured settings can be overridden by the use of other Spark
configuration properties e.g. <code>spark.kubernetes.namespace</code>.
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driver.master</code></td>
@ -512,6 +513,7 @@ See the [configuration page](configuration.html) for information on Spark config
<td>
The internal Kubernetes master (API server) address to be used for driver to request executors.
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.namespace</code></td>
@ -519,6 +521,7 @@ See the [configuration page](configuration.html) for information on Spark config
<td>
The namespace that will be used for running the driver and executor pods.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.container.image</code></td>
@ -529,6 +532,7 @@ See the [configuration page](configuration.html) for information on Spark config
This configuration is required and must be provided by the user, unless explicit
images are provided for each different container type.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driver.container.image</code></td>
@ -536,6 +540,7 @@ See the [configuration page](configuration.html) for information on Spark config
<td>
Custom container image to use for the driver.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.executor.container.image</code></td>
@ -543,6 +548,7 @@ See the [configuration page](configuration.html) for information on Spark config
<td>
Custom container image to use for executors.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.container.image.pullPolicy</code></td>
@ -550,6 +556,7 @@ See the [configuration page](configuration.html) for information on Spark config
<td>
Container image pull policy used when pulling images within Kubernetes.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.container.image.pullSecrets</code></td>
@ -557,6 +564,7 @@ See the [configuration page](configuration.html) for information on Spark config
<td>
Comma separated list of Kubernetes secrets used to pull images from private image registries.
</td>
<td>2.4.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.allocation.batch.size</code></td>
@ -564,6 +572,7 @@ See the [configuration page](configuration.html) for information on Spark config
<td>
Number of pods to launch at once in each round of executor pod allocation.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.allocation.batch.delay</code></td>
@ -572,6 +581,7 @@ See the [configuration page](configuration.html) for information on Spark config
Time to wait between each round of executor pod allocation. Specifying values less than 1 second may lead to
excessive CPU usage on the spark driver.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.submission.caCertFile</code></td>
@ -581,6 +591,7 @@ See the [configuration page](configuration.html) for information on Spark config
must be located on the submitting machine's disk. Specify this as a path as opposed to a URI (i.e. do not provide
a scheme). In client mode, use <code>spark.kubernetes.authenticate.caCertFile</code> instead.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.submission.clientKeyFile</code></td>
@ -590,6 +601,7 @@ See the [configuration page](configuration.html) for information on Spark config
must be located on the submitting machine's disk. Specify this as a path as opposed to a URI (i.e. do not provide
a scheme). In client mode, use <code>spark.kubernetes.authenticate.clientKeyFile</code> instead.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.submission.clientCertFile</code></td>
@ -599,6 +611,7 @@ See the [configuration page](configuration.html) for information on Spark config
file must be located on the submitting machine's disk. Specify this as a path as opposed to a URI (i.e. do not
provide a scheme). In client mode, use <code>spark.kubernetes.authenticate.clientCertFile</code> instead.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.submission.oauthToken</code></td>
@ -608,6 +621,7 @@ See the [configuration page](configuration.html) for information on Spark config
that unlike the other authentication options, this is expected to be the exact string value of the token to use for
the authentication. In client mode, use <code>spark.kubernetes.authenticate.oauthToken</code> instead.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.submission.oauthTokenFile</code></td>
@ -617,6 +631,7 @@ See the [configuration page](configuration.html) for information on Spark config
This file must be located on the submitting machine's disk. Specify this as a path as opposed to a URI (i.e. do not
provide a scheme). In client mode, use <code>spark.kubernetes.authenticate.oauthTokenFile</code> instead.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.driver.caCertFile</code></td>
@ -627,6 +642,7 @@ See the [configuration page](configuration.html) for information on Spark config
Specify this as a path as opposed to a URI (i.e. do not provide a scheme). In client mode, use
<code>spark.kubernetes.authenticate.caCertFile</code> instead.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.driver.clientKeyFile</code></td>
@ -637,6 +653,7 @@ See the [configuration page](configuration.html) for information on Spark config
a Kubernetes secret. Specify this as a path as opposed to a URI (i.e. do not provide a scheme).
In client mode, use <code>spark.kubernetes.authenticate.clientKeyFile</code> instead.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.driver.clientCertFile</code></td>
@ -647,6 +664,7 @@ See the [configuration page](configuration.html) for information on Spark config
driver pod as a Kubernetes secret. Specify this as a path as opposed to a URI (i.e. do not provide a scheme).
In client mode, use <code>spark.kubernetes.authenticate.clientCertFile</code> instead.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.driver.oauthToken</code></td>
@ -657,6 +675,7 @@ See the [configuration page](configuration.html) for information on Spark config
the token to use for the authentication. This token value is uploaded to the driver pod as a Kubernetes secret.
In client mode, use <code>spark.kubernetes.authenticate.oauthToken</code> instead.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.driver.oauthTokenFile</code></td>
@ -667,6 +686,7 @@ See the [configuration page](configuration.html) for information on Spark config
the token to use for the authentication. This token value is uploaded to the driver pod as a secret. In client mode, use
<code>spark.kubernetes.authenticate.oauthTokenFile</code> instead.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.driver.mounted.caCertFile</code></td>
@ -677,6 +697,7 @@ See the [configuration page](configuration.html) for information on Spark config
Specify this as a path as opposed to a URI (i.e. do not provide a scheme). In client mode, use
<code>spark.kubernetes.authenticate.caCertFile</code> instead.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.driver.mounted.clientKeyFile</code></td>
@ -687,6 +708,7 @@ See the [configuration page](configuration.html) for information on Spark config
Specify this as a path as opposed to a URI (i.e. do not provide a scheme). In client mode, use
<code>spark.kubernetes.authenticate.clientKeyFile</code> instead.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.driver.mounted.clientCertFile</code></td>
@ -697,6 +719,7 @@ See the [configuration page](configuration.html) for information on Spark config
Specify this as a path as opposed to a URI (i.e. do not provide a scheme). In client mode, use
<code>spark.kubernetes.authenticate.clientCertFile</code> instead.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.driver.mounted.oauthTokenFile</code></td>
@ -707,6 +730,7 @@ See the [configuration page](configuration.html) for information on Spark config
Note that unlike the other authentication options, this file must contain the exact string value of the token to use
for the authentication. In client mode, use <code>spark.kubernetes.authenticate.oauthTokenFile</code> instead.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.driver.serviceAccountName</code></td>
@ -716,6 +740,7 @@ See the [configuration page](configuration.html) for information on Spark config
executor pods from the API server. Note that this cannot be specified alongside a CA cert file, client key file,
client cert file, and/or OAuth token. In client mode, use <code>spark.kubernetes.authenticate.serviceAccountName</code> instead.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.caCertFile</code></td>
@ -724,6 +749,7 @@ See the [configuration page](configuration.html) for information on Spark config
In client mode, path to the CA cert file for connecting to the Kubernetes API server over TLS when
requesting executors. Specify this as a path as opposed to a URI (i.e. do not provide a scheme).
</td>
<td>2.4.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.clientKeyFile</code></td>
@ -732,6 +758,7 @@ See the [configuration page](configuration.html) for information on Spark config
In client mode, path to the client key file for authenticating against the Kubernetes API server
when requesting executors. Specify this as a path as opposed to a URI (i.e. do not provide a scheme).
</td>
<td>2.4.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.clientCertFile</code></td>
@ -740,6 +767,7 @@ See the [configuration page](configuration.html) for information on Spark config
In client mode, path to the client cert file for authenticating against the Kubernetes API server
when requesting executors. Specify this as a path as opposed to a URI (i.e. do not provide a scheme).
</td>
<td>2.4.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.oauthToken</code></td>
@ -749,6 +777,7 @@ See the [configuration page](configuration.html) for information on Spark config
requesting executors. Note that unlike the other authentication options, this must be the exact string value of
the token to use for the authentication.
</td>
<td>2.4.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.authenticate.oauthTokenFile</code></td>
@ -757,6 +786,7 @@ See the [configuration page](configuration.html) for information on Spark config
In client mode, path to the file containing the OAuth token to use when authenticating against the Kubernetes API
server when requesting executors.
</td>
<td>2.4.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driver.label.[LabelName]</code></td>
@ -767,6 +797,7 @@ See the [configuration page](configuration.html) for information on Spark config
Note that Spark also adds its own labels to the driver pod
for bookkeeping purposes.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driver.annotation.[AnnotationName]</code></td>
@ -775,6 +806,7 @@ See the [configuration page](configuration.html) for information on Spark config
Add the annotation specified by <code>AnnotationName</code> to the driver pod.
For example, <code>spark.kubernetes.driver.annotation.something=true</code>.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.executor.label.[LabelName]</code></td>
@ -785,6 +817,7 @@ See the [configuration page](configuration.html) for information on Spark config
Note that Spark also adds its own labels to the executor pod
for bookkeeping purposes.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.executor.annotation.[AnnotationName]</code></td>
@ -793,6 +826,7 @@ See the [configuration page](configuration.html) for information on Spark config
Add the annotation specified by <code>AnnotationName</code> to the executor pods.
For example, <code>spark.kubernetes.executor.annotation.something=true</code>.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driver.pod.name</code></td>
@ -804,6 +838,7 @@ See the [configuration page](configuration.html) for information on Spark config
value in client mode allows the driver to become the owner of its executor pods, which in turn allows the executor
pods to be garbage collected by the cluster.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.executor.lostCheck.maxAttempts</code></td>
@ -813,6 +848,7 @@ See the [configuration page](configuration.html) for information on Spark config
The loss reason is used to ascertain whether the executor failure is due to a framework or an application error
which in turn decides whether the executor is removed and replaced, or placed into a failed state for debugging.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.submission.waitAppCompletion</code></td>
@ -821,6 +857,7 @@ See the [configuration page](configuration.html) for information on Spark config
In cluster mode, whether to wait for the application to finish before exiting the launcher process. When changed to
false, the launcher has a "fire-and-forget" behavior when launching the Spark job.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.report.interval</code></td>
@ -828,6 +865,7 @@ See the [configuration page](configuration.html) for information on Spark config
<td>
Interval between reports of the current Spark job status in cluster mode.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driver.request.cores</code></td>
@ -837,6 +875,7 @@ See the [configuration page](configuration.html) for information on Spark config
Example values include 0.1, 500m, 1.5, 5, etc., with the definition of cpu units documented in <a href="https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/#cpu-units">CPU units</a>.
This takes precedence over <code>spark.driver.cores</code> for specifying the driver pod cpu request if set.
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driver.limit.cores</code></td>
@ -844,6 +883,7 @@ See the [configuration page](configuration.html) for information on Spark config
<td>
Specify a hard cpu <a href="https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container">limit</a> for the driver pod.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.executor.request.cores</code></td>
@ -854,6 +894,7 @@ See the [configuration page](configuration.html) for information on Spark config
This is distinct from <code>spark.executor.cores</code>: it is only used and takes precedence over <code>spark.executor.cores</code> for specifying the executor pod cpu request if set. Task
parallelism, e.g., number of tasks an executor can run concurrently is not affected by this.
</td>
<td>2.4.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.executor.limit.cores</code></td>
@ -861,6 +902,7 @@ See the [configuration page](configuration.html) for information on Spark config
<td>
Specify a hard cpu <a href="https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container">limit</a> for each executor pod launched for the Spark Application.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.node.selector.[labelKey]</code></td>
@ -871,6 +913,7 @@ See the [configuration page](configuration.html) for information on Spark config
will result in the driver pod and executors having a node selector with key <code>identifier</code> and value
<code>myIdentifier</code>. Multiple node selector keys can be added by setting multiple configurations with this prefix.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driverEnv.[EnvironmentVariableName]</code></td>
@ -879,6 +922,7 @@ See the [configuration page](configuration.html) for information on Spark config
Add the environment variable specified by <code>EnvironmentVariableName</code> to
the Driver process. The user can specify multiple of these to set multiple environment variables.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driver.secrets.[SecretName]</code></td>
@ -887,6 +931,7 @@ See the [configuration page](configuration.html) for information on Spark config
Add the <a href="https://kubernetes.io/docs/concepts/configuration/secret/">Kubernetes Secret</a> named <code>SecretName</code> to the driver pod on the path specified in the value. For example,
<code>spark.kubernetes.driver.secrets.spark-secret=/etc/secrets</code>.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.executor.secrets.[SecretName]</code></td>
@ -895,6 +940,7 @@ See the [configuration page](configuration.html) for information on Spark config
Add the <a href="https://kubernetes.io/docs/concepts/configuration/secret/">Kubernetes Secret</a> named <code>SecretName</code> to the executor pod on the path specified in the value. For example,
<code>spark.kubernetes.executor.secrets.spark-secret=/etc/secrets</code>.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driver.secretKeyRef.[EnvName]</code></td>
@ -903,6 +949,7 @@ See the [configuration page](configuration.html) for information on Spark config
Add as an environment variable to the driver container with name EnvName (case sensitive), the value referenced by key <code> key </code> in the data of the referenced <a href="https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-environment-variables">Kubernetes Secret</a>. For example,
<code>spark.kubernetes.driver.secretKeyRef.ENV_VAR=spark-secret:key</code>.
</td>
<td>2.4.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.executor.secretKeyRef.[EnvName]</code></td>
@ -911,6 +958,7 @@ See the [configuration page](configuration.html) for information on Spark config
Add as an environment variable to the executor container with name EnvName (case sensitive), the value referenced by key <code> key </code> in the data of the referenced <a href="https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-environment-variables">Kubernetes Secret</a>. For example,
<code>spark.kubernetes.executor.secrets.ENV_VAR=spark-secret:key</code>.
</td>
<td>2.4.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driver.volumes.[VolumeType].[VolumeName].mount.path</code></td>
@ -919,6 +967,7 @@ See the [configuration page](configuration.html) for information on Spark config
Add the <a href="https://kubernetes.io/docs/concepts/storage/volumes/">Kubernetes Volume</a> named <code>VolumeName</code> of the <code>VolumeType</code> type to the driver pod on the path specified in the value. For example,
<code>spark.kubernetes.driver.volumes.persistentVolumeClaim.checkpointpvc.mount.path=/checkpoint</code>.
</td>
<td>2.4.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driver.volumes.[VolumeType].[VolumeName].mount.subPath</code></td>
@ -927,6 +976,7 @@ See the [configuration page](configuration.html) for information on Spark config
Specifies a <a href="https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath">subpath</a> to be mounted from the volume into the driver pod.
<code>spark.kubernetes.driver.volumes.persistentVolumeClaim.checkpointpvc.mount.subPath=checkpoint</code>.
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driver.volumes.[VolumeType].[VolumeName].mount.readOnly</code></td>
@ -935,6 +985,7 @@ See the [configuration page](configuration.html) for information on Spark config
Specify if the mounted volume is read only or not. For example,
<code>spark.kubernetes.driver.volumes.persistentVolumeClaim.checkpointpvc.mount.readOnly=false</code>.
</td>
<td>2.4.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driver.volumes.[VolumeType].[VolumeName].options.[OptionName]</code></td>
@ -943,6 +994,7 @@ See the [configuration page](configuration.html) for information on Spark config
Configure <a href="https://kubernetes.io/docs/concepts/storage/volumes/">Kubernetes Volume</a> options passed to the Kubernetes with <code>OptionName</code> as key having specified value, must conform with Kubernetes option format. For example,
<code>spark.kubernetes.driver.volumes.persistentVolumeClaim.checkpointpvc.options.claimName=spark-pvc-claim</code>.
</td>
<td>2.4.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.executor.volumes.[VolumeType].[VolumeName].mount.path</code></td>
@ -951,6 +1003,7 @@ See the [configuration page](configuration.html) for information on Spark config
Add the <a href="https://kubernetes.io/docs/concepts/storage/volumes/">Kubernetes Volume</a> named <code>VolumeName</code> of the <code>VolumeType</code> type to the executor pod on the path specified in the value. For example,
<code>spark.kubernetes.executor.volumes.persistentVolumeClaim.checkpointpvc.mount.path=/checkpoint</code>.
</td>
<td>2.4.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.executor.volumes.[VolumeType].[VolumeName].mount.subPath</code></td>
@ -959,6 +1012,7 @@ See the [configuration page](configuration.html) for information on Spark config
Specifies a <a href="https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath">subpath</a> to be mounted from the volume into the executor pod.
<code>spark.kubernetes.executor.volumes.persistentVolumeClaim.checkpointpvc.mount.subPath=checkpoint</code>.
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.executor.volumes.[VolumeType].[VolumeName].mount.readOnly</code></td>
@ -967,6 +1021,7 @@ See the [configuration page](configuration.html) for information on Spark config
Specify if the mounted volume is read only or not. For example,
<code>spark.kubernetes.executor.volumes.persistentVolumeClaim.checkpointpvc.mount.readOnly=false</code>.
</td>
<td>2.4.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.executor.volumes.[VolumeType].[VolumeName].options.[OptionName]</code></td>
@ -975,6 +1030,7 @@ See the [configuration page](configuration.html) for information on Spark config
Configure <a href="https://kubernetes.io/docs/concepts/storage/volumes/">Kubernetes Volume</a> options passed to the Kubernetes with <code>OptionName</code> as key having specified value. For example,
<code>spark.kubernetes.executor.volumes.persistentVolumeClaim.checkpointpvc.options.claimName=spark-pvc-claim</code>.
</td>
<td>2.4.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.local.dirs.tmpfs</code></td>
@ -983,6 +1039,7 @@ See the [configuration page](configuration.html) for information on Spark config
Configure the <code>emptyDir</code> volumes used to back <code>SPARK_LOCAL_DIRS</code> within the Spark driver and executor pods to use <code>tmpfs</code> backing i.e. RAM. See <a href="#local-storage">Local Storage</a> earlier on this page
for more discussion of this.
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.memoryOverheadFactor</code></td>
@ -991,6 +1048,7 @@ See the [configuration page](configuration.html) for information on Spark config
This sets the Memory Overhead Factor that will allocate memory to non-JVM memory, which includes off-heap memory allocations, non-JVM tasks, and various systems processes. For JVM-based jobs this value will default to 0.10 and 0.40 for non-JVM jobs.
This is done as non-JVM tasks need more non-JVM heap space and such tasks commonly fail with "Memory Overhead Exceeded" errors. This prempts this error with a higher default.
</td>
<td>2.4.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.pyspark.pythonVersion</code></td>
@ -998,6 +1056,7 @@ See the [configuration page](configuration.html) for information on Spark config
<td>
This sets the major Python version of the docker image used to run the driver and executor containers. Can either be 2 or 3.
</td>
<td>2.4.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.kerberos.krb5.path</code></td>
@ -1006,6 +1065,7 @@ See the [configuration page](configuration.html) for information on Spark config
Specify the local location of the krb5.conf file to be mounted on the driver and executors for Kerberos interaction.
It is important to note that the KDC defined needs to be visible from inside the containers.
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.kerberos.krb5.configMapName</code></td>
@ -1015,6 +1075,7 @@ See the [configuration page](configuration.html) for information on Spark config
for Kerberos interaction. The KDC defined needs to be visible from inside the containers. The ConfigMap must also
be in the same namespace of the driver and executor pods.
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.hadoop.configMapName</code></td>
@ -1023,6 +1084,7 @@ See the [configuration page](configuration.html) for information on Spark config
Specify the name of the ConfigMap, containing the HADOOP_CONF_DIR files, to be mounted on the driver
and executors for custom Hadoop configuration.
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.kerberos.tokenSecret.name</code></td>
@ -1031,6 +1093,7 @@ See the [configuration page](configuration.html) for information on Spark config
Specify the name of the secret where your existing delegation tokens are stored. This removes the need for the job user
to provide any kerberos credentials for launching a job.
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.kerberos.tokenSecret.itemKey</code></td>
@ -1039,6 +1102,7 @@ See the [configuration page](configuration.html) for information on Spark config
Specify the item key of the data where your existing delegation tokens are stored. This removes the need for the job user
to provide any kerberos credentials for launching a job.
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driver.podTemplateFile</code></td>
@ -1047,6 +1111,7 @@ See the [configuration page](configuration.html) for information on Spark config
Specify the local file that contains the driver <a href="#pod-template">pod template</a>. For example
<code>spark.kubernetes.driver.podTemplateFile=/path/to/driver-pod-template.yaml</code>
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driver.podTemplateContainerName</code></td>
@ -1055,6 +1120,7 @@ See the [configuration page](configuration.html) for information on Spark config
Specify the container name to be used as a basis for the driver in the given <a href="#pod-template">pod template</a>.
For example <code>spark.kubernetes.driver.podTemplateContainerName=spark-driver</code>
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.executor.podTemplateFile</code></td>
@ -1063,6 +1129,7 @@ See the [configuration page](configuration.html) for information on Spark config
Specify the local file that contains the executor <a href="#pod-template">pod template</a>. For example
<code>spark.kubernetes.executor.podTemplateFile=/path/to/executor-pod-template.yaml</code>
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.executor.podTemplateContainerName</code></td>
@ -1071,6 +1138,7 @@ See the [configuration page](configuration.html) for information on Spark config
Specify the container name to be used as a basis for the executor in the given <a href="#pod-template">pod template</a>.
For example <code>spark.kubernetes.executor.podTemplateContainerName=spark-executor</code>
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.executor.deleteOnTermination</code></td>
@ -1078,6 +1146,7 @@ See the [configuration page](configuration.html) for information on Spark config
<td>
Specify whether executor pods should be deleted in case of failure or normal termination.
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.submission.connectionTimeout</code></td>
@ -1085,6 +1154,7 @@ See the [configuration page](configuration.html) for information on Spark config
<td>
Connection timeout in milliseconds for the kubernetes client to use for starting the driver.
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.submission.requestTimeout</code></td>
@ -1092,6 +1162,7 @@ See the [configuration page](configuration.html) for information on Spark config
<td>
Request timeout in milliseconds for the kubernetes client to use for starting the driver.
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driver.connectionTimeout</code></td>
@ -1099,6 +1170,7 @@ See the [configuration page](configuration.html) for information on Spark config
<td>
Connection timeout in milliseconds for the kubernetes client in driver to use when requesting executors.
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.driver.requestTimeout</code></td>
@ -1106,6 +1178,7 @@ See the [configuration page](configuration.html) for information on Spark config
<td>
Request timeout in milliseconds for the kubernetes client in driver to use when requesting executors.
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.appKillPodDeletionGracePeriod</code></td>
@ -1113,6 +1186,7 @@ See the [configuration page](configuration.html) for information on Spark config
<td>
Specify the grace period in seconds when deleting a Spark application using spark-submit.
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.kubernetes.file.upload.path</code></td>
@ -1122,6 +1196,7 @@ See the [configuration page](configuration.html) for information on Spark config
<code>spark.kubernetes.file.upload.path=s3a://&lt;s3-bucket&gt;/path</code>
File should specified as <code>file://path/to/file </code> or absolute path.
</td>
<td>3.0.0</td>
</tr>
</table>

View file

@ -33,6 +33,7 @@ private[spark] object Config extends Logging {
"to .kube/config under your home directory. If not specified then your current " +
"context is used. You can always override specific aspects of the config file " +
"provided configuration using other Spark on K8S configuration options.")
.version("3.0.0")
.stringConf
.createOptional
@ -40,12 +41,14 @@ private[spark] object Config extends Logging {
ConfigBuilder("spark.kubernetes.driver.master")
.doc("The internal Kubernetes master (API server) address " +
"to be used for driver to request executors.")
.version("3.0.0")
.stringConf
.createWithDefault(KUBERNETES_MASTER_INTERNAL_URL)
val KUBERNETES_NAMESPACE =
ConfigBuilder("spark.kubernetes.namespace")
.doc("The namespace that will be used for running the driver and executor pods.")
.version("2.3.0")
.stringConf
.createWithDefault("default")
@ -54,22 +57,26 @@ private[spark] object Config extends Logging {
.doc("Container image to use for Spark containers. Individual container types " +
"(e.g. driver or executor) can also be configured to use different images if desired, " +
"by setting the container type-specific image name.")
.version("2.3.0")
.stringConf
.createOptional
val DRIVER_CONTAINER_IMAGE =
ConfigBuilder("spark.kubernetes.driver.container.image")
.doc("Container image to use for the driver.")
.version("2.3.0")
.fallbackConf(CONTAINER_IMAGE)
val EXECUTOR_CONTAINER_IMAGE =
ConfigBuilder("spark.kubernetes.executor.container.image")
.doc("Container image to use for the executors.")
.version("2.3.0")
.fallbackConf(CONTAINER_IMAGE)
val CONTAINER_IMAGE_PULL_POLICY =
ConfigBuilder("spark.kubernetes.container.image.pullPolicy")
.doc("Kubernetes image pull policy. Valid values are Always, Never, and IfNotPresent.")
.version("2.3.0")
.stringConf
.checkValues(Set("Always", "Never", "IfNotPresent"))
.createWithDefault("IfNotPresent")
@ -78,6 +85,7 @@ private[spark] object Config extends Logging {
ConfigBuilder("spark.kubernetes.container.image.pullSecrets")
.doc("Comma separated list of the Kubernetes secrets used " +
"to access private image registries.")
.version("2.4.0")
.stringConf
.toSequence
.createWithDefault(Nil)
@ -95,24 +103,28 @@ private[spark] object Config extends Logging {
val SUBMISSION_CLIENT_REQUEST_TIMEOUT =
ConfigBuilder("spark.kubernetes.submission.requestTimeout")
.doc("request timeout to be used in milliseconds for starting the driver")
.version("3.0.0")
.intConf
.createWithDefault(10000)
val SUBMISSION_CLIENT_CONNECTION_TIMEOUT =
ConfigBuilder("spark.kubernetes.submission.connectionTimeout")
.doc("connection timeout to be used in milliseconds for starting the driver")
.version("3.0.0")
.intConf
.createWithDefault(10000)
val DRIVER_CLIENT_REQUEST_TIMEOUT =
ConfigBuilder("spark.kubernetes.driver.requestTimeout")
.doc("request timeout to be used in milliseconds for driver to request executors")
.version("3.0.0")
.intConf
.createWithDefault(10000)
val DRIVER_CLIENT_CONNECTION_TIMEOUT =
ConfigBuilder("spark.kubernetes.driver.connectionTimeout")
.doc("connection timeout to be used in milliseconds for driver to request executors")
.version("3.0.0")
.intConf
.createWithDefault(10000)
@ -122,6 +134,7 @@ private[spark] object Config extends Logging {
"this service account when requesting executor pods from the API server. If specific " +
"credentials are given for the driver pod to use, the driver will favor " +
"using those credentials instead.")
.version("2.3.0")
.stringConf
.createOptional
@ -129,48 +142,56 @@ private[spark] object Config extends Logging {
ConfigBuilder(s"$KUBERNETES_AUTH_EXECUTOR_CONF_PREFIX.serviceAccountName")
.doc("Service account that is used when running the executor pod." +
"If this parameter is not setup, the fallback logic will use the driver's service account.")
.version("3.1.0")
.stringConf
.createOptional
val KUBERNETES_DRIVER_LIMIT_CORES =
ConfigBuilder("spark.kubernetes.driver.limit.cores")
.doc("Specify the hard cpu limit for the driver pod")
.version("2.3.0")
.stringConf
.createOptional
val KUBERNETES_DRIVER_REQUEST_CORES =
ConfigBuilder("spark.kubernetes.driver.request.cores")
.doc("Specify the cpu request for the driver pod")
.version("3.0.0")
.stringConf
.createOptional
val KUBERNETES_DRIVER_SUBMIT_CHECK =
ConfigBuilder("spark.kubernetes.submitInDriver")
.internal()
.version("2.4.0")
.booleanConf
.createWithDefault(false)
val KUBERNETES_EXECUTOR_LIMIT_CORES =
ConfigBuilder("spark.kubernetes.executor.limit.cores")
.doc("Specify the hard cpu limit for each executor pod")
.version("2.3.0")
.stringConf
.createOptional
val KUBERNETES_EXECUTOR_SCHEDULER_NAME =
ConfigBuilder("spark.kubernetes.executor.scheduler.name")
.doc("Specify the scheduler name for each executor pod")
.version("3.0.0")
.stringConf
.createOptional
val KUBERNETES_EXECUTOR_REQUEST_CORES =
ConfigBuilder("spark.kubernetes.executor.request.cores")
.doc("Specify the cpu request for each executor pod")
.version("2.4.0")
.stringConf
.createOptional
val KUBERNETES_DRIVER_POD_NAME =
ConfigBuilder("spark.kubernetes.driver.pod.name")
.doc("Name of the driver pod.")
.version("2.3.0")
.stringConf
.createOptional
@ -178,12 +199,14 @@ private[spark] object Config extends Logging {
val KUBERNETES_DRIVER_POD_NAME_PREFIX =
ConfigBuilder("spark.kubernetes.driver.resourceNamePrefix")
.internal()
.version("3.0.0")
.stringConf
.createOptional
val KUBERNETES_EXECUTOR_POD_NAME_PREFIX =
ConfigBuilder("spark.kubernetes.executor.podNamePrefix")
.doc("Prefix to use in front of the executor pod names.")
.version("2.3.0")
.internal()
.stringConf
.createOptional
@ -191,6 +214,7 @@ private[spark] object Config extends Logging {
val KUBERNETES_ALLOCATION_BATCH_SIZE =
ConfigBuilder("spark.kubernetes.allocation.batch.size")
.doc("Number of pods to launch at once in each round of executor allocation.")
.version("2.3.0")
.intConf
.checkValue(value => value > 0, "Allocation batch size should be a positive integer")
.createWithDefault(5)
@ -198,6 +222,7 @@ private[spark] object Config extends Logging {
val KUBERNETES_ALLOCATION_BATCH_DELAY =
ConfigBuilder("spark.kubernetes.allocation.batch.delay")
.doc("Time to wait between each round of executor allocation.")
.version("2.3.0")
.timeConf(TimeUnit.MILLISECONDS)
.checkValue(value => value > 0, "Allocation batch delay must be a positive time value.")
.createWithDefaultString("1s")
@ -206,6 +231,7 @@ private[spark] object Config extends Logging {
ConfigBuilder("spark.kubernetes.executor.lostCheck.maxAttempts")
.doc("Maximum number of attempts allowed for checking the reason of an executor loss " +
"before it is assumed that the executor failed.")
.version("2.3.0")
.intConf
.checkValue(value => value > 0, "Maximum attempts of checks of executor lost reason " +
"must be a positive integer")
@ -215,12 +241,14 @@ private[spark] object Config extends Logging {
ConfigBuilder("spark.kubernetes.submission.waitAppCompletion")
.doc("In cluster mode, whether to wait for the application to finish before exiting the " +
"launcher process.")
.version("2.3.0")
.booleanConf
.createWithDefault(true)
val REPORT_INTERVAL =
ConfigBuilder("spark.kubernetes.report.interval")
.doc("Interval between reports of the current app status in cluster mode.")
.version("2.3.0")
.timeConf(TimeUnit.MILLISECONDS)
.checkValue(interval => interval > 0, s"Logging interval must be a positive time value.")
.createWithDefaultString("1s")
@ -229,6 +257,7 @@ private[spark] object Config extends Logging {
ConfigBuilder("spark.kubernetes.executor.apiPollingInterval")
.doc("Interval between polls against the Kubernetes API server to inspect the " +
"state of executors.")
.version("2.4.0")
.timeConf(TimeUnit.MILLISECONDS)
.checkValue(interval => interval > 0, s"API server polling interval must be a" +
" positive time value.")
@ -238,6 +267,7 @@ private[spark] object Config extends Logging {
ConfigBuilder("spark.kubernetes.executor.eventProcessingInterval")
.doc("Interval between successive inspection of executor events sent from the" +
" Kubernetes API.")
.version("2.4.0")
.timeConf(TimeUnit.MILLISECONDS)
.checkValue(interval => interval > 0, s"Event processing interval must be a positive" +
" time value.")
@ -247,6 +277,7 @@ private[spark] object Config extends Logging {
ConfigBuilder("spark.kubernetes.memoryOverheadFactor")
.doc("This sets the Memory Overhead Factor that will allocate memory to non-JVM jobs " +
"which in the case of JVM tasks will default to 0.10 and 0.40 for non-JVM jobs")
.version("2.4.0")
.doubleConf
.checkValue(mem_overhead => mem_overhead >= 0 && mem_overhead < 1,
"Ensure that memory overhead is a double between 0 --> 1.0")
@ -255,6 +286,7 @@ private[spark] object Config extends Logging {
val PYSPARK_MAJOR_PYTHON_VERSION =
ConfigBuilder("spark.kubernetes.pyspark.pythonVersion")
.doc("This sets the major Python version. Either 2 or 3. (Python2 or Python3)")
.version("2.4.0")
.stringConf
.checkValue(pv => List("2", "3").contains(pv),
"Ensure that major Python version is either Python2 or Python3")
@ -265,6 +297,7 @@ private[spark] object Config extends Logging {
.doc("Specify the local location of the krb5.conf file to be mounted on the driver " +
"and executors for Kerberos. Note: The KDC defined needs to be " +
"visible from inside the containers ")
.version("3.0.0")
.stringConf
.createOptional
@ -273,6 +306,7 @@ private[spark] object Config extends Logging {
.doc("Specify the name of the ConfigMap, containing the krb5.conf file, to be mounted " +
"on the driver and executors for Kerberos. Note: The KDC defined" +
"needs to be visible from inside the containers ")
.version("3.0.0")
.stringConf
.createOptional
@ -280,6 +314,7 @@ private[spark] object Config extends Logging {
ConfigBuilder("spark.kubernetes.hadoop.configMapName")
.doc("Specify the name of the ConfigMap, containing the HADOOP_CONF_DIR files, " +
"to be mounted on the driver and executors for custom Hadoop configuration.")
.version("3.0.0")
.stringConf
.createOptional
@ -287,6 +322,7 @@ private[spark] object Config extends Logging {
ConfigBuilder("spark.kubernetes.kerberos.tokenSecret.name")
.doc("Specify the name of the secret where your existing delegation tokens are stored. " +
"This removes the need for the job user to provide any keytab for launching a job")
.version("3.0.0")
.stringConf
.createOptional
@ -294,13 +330,15 @@ private[spark] object Config extends Logging {
ConfigBuilder("spark.kubernetes.kerberos.tokenSecret.itemKey")
.doc("Specify the item key of the data where your existing delegation tokens are stored. " +
"This removes the need for the job user to provide any keytab for launching a job")
.version("3.0.0")
.stringConf
.createOptional
val APP_RESOURCE_TYPE =
ConfigBuilder("spark.kubernetes.resource.type")
.doc("This sets the resource type internally")
.internal()
.doc("This sets the resource type internally")
.version("2.4.1")
.stringConf
.checkValues(Set(APP_RESOURCE_TYPE_JAVA, APP_RESOURCE_TYPE_PYTHON, APP_RESOURCE_TYPE_R))
.createOptional
@ -311,30 +349,35 @@ private[spark] object Config extends Logging {
"their medium set to Memory so that they will be created as tmpfs (i.e. RAM) backed " +
"volumes. This may improve performance but scratch space usage will count towards " +
"your pods memory limit so you may wish to request more memory.")
.version("3.0.0")
.booleanConf
.createWithDefault(false)
val KUBERNETES_DRIVER_PODTEMPLATE_FILE =
ConfigBuilder("spark.kubernetes.driver.podTemplateFile")
.doc("File containing a template pod spec for the driver")
.version("3.0.0")
.stringConf
.createOptional
val KUBERNETES_EXECUTOR_PODTEMPLATE_FILE =
ConfigBuilder("spark.kubernetes.executor.podTemplateFile")
.doc("File containing a template pod spec for executors")
.version("3.0.0")
.stringConf
.createOptional
val KUBERNETES_DRIVER_PODTEMPLATE_CONTAINER_NAME =
ConfigBuilder("spark.kubernetes.driver.podTemplateContainerName")
.doc("container name to be used as a basis for the driver in the given pod template")
.version("3.0.0")
.stringConf
.createOptional
val KUBERNETES_EXECUTOR_PODTEMPLATE_CONTAINER_NAME =
ConfigBuilder("spark.kubernetes.executor.podTemplateContainerName")
.doc("container name to be used as a basis for executors in the given pod template")
.version("3.0.0")
.stringConf
.createOptional
@ -347,12 +390,14 @@ private[spark] object Config extends Logging {
ConfigBuilder("spark.kubernetes.executor.deleteOnTermination")
.doc("If set to false then executor pods will not be deleted in case " +
"of failure or normal termination.")
.version("3.0.0")
.booleanConf
.createWithDefault(true)
val KUBERNETES_DYN_ALLOC_KILL_GRACE_PERIOD =
ConfigBuilder("spark.kubernetes.dynamicAllocation.deleteGracePeriod")
.doc("How long to wait for executors to shut down gracefully before a forceful kill.")
.version("3.0.0")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("5s")
@ -360,6 +405,7 @@ private[spark] object Config extends Logging {
ConfigBuilder("spark.kubernetes.appKillPodDeletionGracePeriod")
.doc("Time to wait for graceful deletion of Spark pods when spark-submit" +
" is used for killing an application.")
.version("3.0.0")
.timeConf(TimeUnit.SECONDS)
.createOptional
@ -367,6 +413,7 @@ private[spark] object Config extends Logging {
ConfigBuilder("spark.kubernetes.file.upload.path")
.doc("Hadoop compatible file system path where files from the local file system " +
"will be uploded to in cluster mode.")
.version("3.0.0")
.stringConf
.createOptional