diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala index a47136ea36..f377f13d30 100644 --- a/core/src/main/scala/org/apache/spark/SparkContext.scala +++ b/core/src/main/scala/org/apache/spark/SparkContext.scala @@ -2798,7 +2798,7 @@ object SparkContext extends Logging { defaultProf.maxTasksPerExecutor(sc.conf) < cpuSlots) { throw new IllegalArgumentException("The number of slots on an executor has to be " + "limited by the number of cores, otherwise you waste resources and " + - "dynamic allocation doesn't work properly. Your configuration has " + + "some scheduling doesn't work properly. Your configuration has " + s"core/task cpu slots = ${cpuSlots} and " + s"${limitingResource} = " + s"${defaultProf.maxTasksPerExecutor(sc.conf)}. Please adjust your configuration " + diff --git a/core/src/main/scala/org/apache/spark/resource/ResourceProfile.scala b/core/src/main/scala/org/apache/spark/resource/ResourceProfile.scala index 2608ab9fc0..5b2476c18c 100644 --- a/core/src/main/scala/org/apache/spark/resource/ResourceProfile.scala +++ b/core/src/main/scala/org/apache/spark/resource/ResourceProfile.scala @@ -168,7 +168,7 @@ class ResourceProfile( // limiting resource because the scheduler code uses that for slots throw new IllegalArgumentException("The number of slots on an executor has to be " + "limited by the number of cores, otherwise you waste resources and " + - "dynamic allocation doesn't work properly. Your configuration has " + + "some scheduling doesn't work properly. Your configuration has " + s"core/task cpu slots = ${taskLimit} and " + s"${execReq.resourceName} = ${numTasks}. " + "Please adjust your configuration so that all resources require same number " + @@ -183,12 +183,11 @@ class ResourceProfile( "no corresponding task resource request was specified.") } } - if(!shouldCheckExecCores && Utils.isDynamicAllocationEnabled(sparkConf)) { + if(!shouldCheckExecCores && execResourceToCheck.nonEmpty) { // if we can't rely on the executor cores config throw a warning for user logWarning("Please ensure that the number of slots available on your " + "executors is limited by the number of cores to task cpus and not another " + - "custom resource. If cores is not the limiting resource then dynamic " + - "allocation will not work properly!") + "custom resource.") } if (taskResourcesToCheck.nonEmpty) { throw new SparkException("No executor resource configs were not specified for the " +