Skip to content

Commit 3bd77aa

Browse files
sekiknsrowen
authored andcommitted
[SPARK-26564] Fix wrong assertions and error messages for parameter checking
## What changes were proposed in this pull request? If users set equivalent values to spark.network.timeout and spark.executor.heartbeatInterval, they get the following message: ``` java.lang.IllegalArgumentException: requirement failed: The value of spark.network.timeout=120s must be no less than the value of spark.executor.heartbeatInterval=120s. ``` But it's misleading since it can be read as they could be equal. So this PR replaces "no less than" with "greater than". Also, it fixes similar inconsistencies found in MLlib and SQL components. ## How was this patch tested? Ran Spark with equivalent values for them manually and confirmed that the revised message was displayed. Closes apache#23488 from sekikn/SPARK-26564. Authored-by: Kengo Seki <[email protected]> Signed-off-by: Sean Owen <[email protected]>
1 parent 5b37092 commit 3bd77aa

File tree

4 files changed

+4
-4
lines changed

4 files changed

+4
-4
lines changed

core/src/main/scala/org/apache/spark/SparkConf.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -594,7 +594,7 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Seria
594594
// If spark.executor.heartbeatInterval bigger than spark.network.timeout,
595595
// it will almost always cause ExecutorLostFailure. See SPARK-22754.
596596
require(executorTimeoutThresholdMs > executorHeartbeatIntervalMs, "The value of " +
597-
s"spark.network.timeout=${executorTimeoutThresholdMs}ms must be no less than the value of " +
597+
s"spark.network.timeout=${executorTimeoutThresholdMs}ms must be greater than the value of " +
598598
s"spark.executor.heartbeatInterval=${executorHeartbeatIntervalMs}ms.")
599599
}
600600

mllib/src/main/scala/org/apache/spark/ml/optim/WeightedLeastSquares.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ private[ml] class WeightedLeastSquares(
8888
require(regParam >= 0.0, s"regParam cannot be negative: $regParam")
8989
require(elasticNetParam >= 0.0 && elasticNetParam <= 1.0,
9090
s"elasticNetParam must be in [0, 1]: $elasticNetParam")
91-
require(maxIter >= 0, s"maxIter must be a positive integer: $maxIter")
91+
require(maxIter > 0, s"maxIter must be a positive integer: $maxIter")
9292
require(tol >= 0.0, s"tol must be >= 0, but was set to $tol")
9393

9494
/**

sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/BroadcastExchangeExec.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ case class BroadcastExchangeExec(
7979
val (numRows, input) = child.executeCollectIterator()
8080
if (numRows >= 512000000) {
8181
throw new SparkException(
82-
s"Cannot broadcast the table with more than 512 millions rows: $numRows rows")
82+
s"Cannot broadcast the table with 512 million or more rows: $numRows rows")
8383
}
8484

8585
val beforeBuild = System.nanoTime()

sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -413,7 +413,7 @@ private[execution] final class LongToUnsafeRowMap(val mm: TaskMemoryManager, cap
413413

414414
private def init(): Unit = {
415415
if (mm != null) {
416-
require(capacity < 512000000, "Cannot broadcast more than 512 millions rows")
416+
require(capacity < 512000000, "Cannot broadcast 512 million or more rows")
417417
var n = 1
418418
while (n < capacity) n *= 2
419419
ensureAcquireMemory(n * 2L * 8 + (1 << 20))

0 commit comments

Comments
 (0)