Skip to content

Commit 35314cb

Browse files
committed
review feedback
1 parent e69851b commit 35314cb

File tree

3 files changed

+4
-4
lines changed

3 files changed

+4
-4
lines changed

core/src/main/scala/org/apache/spark/ExecutorAllocationClient.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ private[spark] trait ExecutorAllocationClient {
5959
* @param adjustTargetNumExecutors whether the target number of executors will be adjusted down
6060
* after these executors have been killed
6161
* @param countFailures if there are tasks running on the executors when they are killed, whether
62-
* those failures be counted to task failure limits?
62+
* to count those failures toward task failure limits
6363
* @param force whether to force kill busy executors, default false
6464
* @return the ids of the executors acknowledged by the cluster manager to be removed.
6565
*/

core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -337,10 +337,10 @@ private[spark] class ExecutorAllocationManager(
337337
// If the new target has not changed, avoid sending a message to the cluster manager
338338
if (numExecutorsTarget < oldNumExecutorsTarget) {
339339
// We lower the target number of executors but don't actively kill any yet. Killing is
340-
// controlled separately by an idle timeout. Its still helpful to reduce the target number
340+
// controlled separately by an idle timeout. It's still helpful to reduce the target number
341341
// in case an executor just happens to get lost (eg., bad hardware, or the cluster manager
342342
// preempts it) -- in that case, there is no point in trying to immediately get a new
343-
// executor, since we couldn't even use it yet.
343+
// executor, since we wouldn't even use it yet.
344344
client.requestTotalExecutors(numExecutorsTarget, localityAwareTasks, hostToLocalTaskCount)
345345
logDebug(s"Lowering target number of executors to $numExecutorsTarget (previously " +
346346
s"$oldNumExecutorsTarget) because not all requested executors are actually needed")

core/src/main/scala/org/apache/spark/SparkContext.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1687,7 +1687,7 @@ class SparkContext(config: SparkConf) extends Logging {
16871687
private[spark] def killAndReplaceExecutor(executorId: String): Boolean = {
16881688
schedulerBackend match {
16891689
case b: ExecutorAllocationClient =>
1690-
b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = false,
1690+
b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true,
16911691
force = true).nonEmpty
16921692
case _ =>
16931693
logWarning("Killing executors is not supported by current scheduler.")

0 commit comments

Comments
 (0)