[SPARK-26564] Fix wrong assertions and error messages for parameter checking

## What changes were proposed in this pull request?

If users set equivalent values to spark.network.timeout and spark.executor.heartbeatInterval, they get the following message:

```
java.lang.IllegalArgumentException: requirement failed: The value of spark.network.timeout=120s must be no less than the value of spark.executor.heartbeatInterval=120s.
```

But it's misleading since it can be read as they could be equal. So this PR replaces "no less than" with "greater than". Also, it fixes similar inconsistencies found in MLlib and SQL components.

## How was this patch tested?

Ran Spark with equivalent values for them manually and confirmed that the revised message was displayed.

Closes #23488 from sekikn/SPARK-26564.

Authored-by: Kengo Seki <sekikn@apache.org>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
This commit is contained in:
Kengo Seki 2019-01-12 14:53:33 -06:00 committed by Sean Owen
parent 5b37092311
commit 3bd77aa9f6
4 changed files with 4 additions and 4 deletions

View file

@ -594,7 +594,7 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Seria
// If spark.executor.heartbeatInterval bigger than spark.network.timeout,
// it will almost always cause ExecutorLostFailure. See SPARK-22754.
require(executorTimeoutThresholdMs > executorHeartbeatIntervalMs, "The value of " +
s"spark.network.timeout=${executorTimeoutThresholdMs}ms must be no less than the value of " +
s"spark.network.timeout=${executorTimeoutThresholdMs}ms must be greater than the value of " +
s"spark.executor.heartbeatInterval=${executorHeartbeatIntervalMs}ms.")
}

View file

@ -88,7 +88,7 @@ private[ml] class WeightedLeastSquares(
require(regParam >= 0.0, s"regParam cannot be negative: $regParam")
require(elasticNetParam >= 0.0 && elasticNetParam <= 1.0,
s"elasticNetParam must be in [0, 1]: $elasticNetParam")
require(maxIter >= 0, s"maxIter must be a positive integer: $maxIter")
require(maxIter > 0, s"maxIter must be a positive integer: $maxIter")
require(tol >= 0.0, s"tol must be >= 0, but was set to $tol")
/**

View file

@ -79,7 +79,7 @@ case class BroadcastExchangeExec(
val (numRows, input) = child.executeCollectIterator()
if (numRows >= 512000000) {
throw new SparkException(
s"Cannot broadcast the table with more than 512 millions rows: $numRows rows")
s"Cannot broadcast the table with 512 million or more rows: $numRows rows")
}
val beforeBuild = System.nanoTime()

View file

@ -413,7 +413,7 @@ private[execution] final class LongToUnsafeRowMap(val mm: TaskMemoryManager, cap
private def init(): Unit = {
if (mm != null) {
require(capacity < 512000000, "Cannot broadcast more than 512 millions rows")
require(capacity < 512000000, "Cannot broadcast 512 million or more rows")
var n = 1
while (n < capacity) n *= 2
ensureAcquireMemory(n * 2L * 8 + (1 << 20))