diff --git a/core/src/main/scala/org/apache/spark/internal/config/package.scala b/core/src/main/scala/org/apache/spark/internal/config/package.scala index 158a4b7cfa..6011901d1d 100644 --- a/core/src/main/scala/org/apache/spark/internal/config/package.scala +++ b/core/src/main/scala/org/apache/spark/internal/config/package.scala @@ -243,7 +243,8 @@ package object config { .createWithDefault(false) private[spark] val MEMORY_OFFHEAP_SIZE = ConfigBuilder("spark.memory.offHeap.size") - .doc("The absolute amount of memory in bytes which can be used for off-heap allocation. " + + .doc("The absolute amount of memory which can be used for off-heap allocation, " + + " in bytes unless otherwise specified. " + "This setting has no impact on heap memory usage, so if your executors' total memory " + "consumption must fit within some hard limit then be sure to shrink your JVM heap size " + "accordingly. This must be set to a positive value when spark.memory.offHeap.enabled=true.") diff --git a/docs/configuration.md b/docs/configuration.md index 5cf42d5fe8..e0b7386ac9 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -866,7 +866,7 @@ Apart from these, the following properties are also available, and may be useful spark.shuffle.service.index.cache.size 100m - Cache entries limited to the specified memory footprint in bytes. + Cache entries limited to the specified memory footprint, in bytes unless otherwise specified. @@ -1207,16 +1207,18 @@ Apart from these, the following properties are also available, and may be useful spark.io.compression.lz4.blockSize 32k - Block size in bytes used in LZ4 compression, in the case when LZ4 compression codec + Block size used in LZ4 compression, in the case when LZ4 compression codec is used. Lowering this block size will also lower shuffle memory usage when LZ4 is used. + Default unit is bytes, unless otherwise specified. spark.io.compression.snappy.blockSize 32k - Block size in bytes used in Snappy compression, in the case when Snappy compression codec - is used. Lowering this block size will also lower shuffle memory usage when Snappy is used. + Block size in Snappy compression, in the case when Snappy compression codec is used. + Lowering this block size will also lower shuffle memory usage when Snappy is used. + Default unit is bytes, unless otherwise specified. @@ -1384,7 +1386,7 @@ Apart from these, the following properties are also available, and may be useful spark.memory.offHeap.size 0 - The absolute amount of memory in bytes which can be used for off-heap allocation. + The absolute amount of memory which can be used for off-heap allocation, in bytes unless otherwise specified. This setting has no impact on heap memory usage, so if your executors' total memory consumption must fit within some hard limit then be sure to shrink your JVM heap size accordingly. This must be set to a positive value when spark.memory.offHeap.enabled=true. @@ -1568,9 +1570,9 @@ Apart from these, the following properties are also available, and may be useful spark.storage.memoryMapThreshold 2m - Size in bytes of a block above which Spark memory maps when reading a block from disk. - This prevents Spark from memory mapping very small blocks. In general, memory - mapping has high overhead for blocks close to or below the page size of the operating system. + Size of a block above which Spark memory maps when reading a block from disk. Default unit is bytes, + unless specified otherwise. This prevents Spark from memory mapping very small blocks. In general, + memory mapping has high overhead for blocks close to or below the page size of the operating system.