-
Notifications
You must be signed in to change notification settings - Fork 25.6k
Actually bound the generic thread pool #17017
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
5608fa7
d032de2
e6a06b2
fd679a7
b89a935
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -26,16 +26,12 @@ | |
| import java.util.Arrays; | ||
| import java.util.concurrent.BlockingQueue; | ||
| import java.util.concurrent.LinkedTransferQueue; | ||
| import java.util.concurrent.SynchronousQueue; | ||
| import java.util.concurrent.ThreadFactory; | ||
| import java.util.concurrent.ThreadPoolExecutor; | ||
| import java.util.concurrent.TimeUnit; | ||
| import java.util.concurrent.atomic.AtomicInteger; | ||
| import java.util.stream.Collectors; | ||
|
|
||
| /** | ||
| * | ||
| */ | ||
| public class EsExecutors { | ||
|
|
||
| /** | ||
|
|
@@ -62,16 +58,11 @@ public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing(String name, | |
|
|
||
| public static EsThreadPoolExecutor newScaling(String name, int min, int max, long keepAliveTime, TimeUnit unit, ThreadFactory threadFactory, ThreadContext contextHolder) { | ||
| ExecutorScalingQueue<Runnable> queue = new ExecutorScalingQueue<>(); | ||
| // we force the execution, since we might run into concurrency issues in offer for ScalingBlockingQueue | ||
| EsThreadPoolExecutor executor = new EsThreadPoolExecutor(name, min, max, keepAliveTime, unit, queue, threadFactory, new ForceQueuePolicy(), contextHolder); | ||
| queue.executor = executor; | ||
| return executor; | ||
| } | ||
|
|
||
| public static EsThreadPoolExecutor newCached(String name, long keepAliveTime, TimeUnit unit, ThreadFactory threadFactory, ThreadContext contextHolder) { | ||
| return new EsThreadPoolExecutor(name, 0, Integer.MAX_VALUE, keepAliveTime, unit, new SynchronousQueue<Runnable>(), threadFactory, new EsAbortPolicy(), contextHolder); | ||
| } | ||
|
|
||
| public static EsThreadPoolExecutor newFixed(String name, int size, int queueCapacity, ThreadFactory threadFactory, ThreadContext contextHolder) { | ||
| BlockingQueue<Runnable> queue; | ||
| if (queueCapacity < 0) { | ||
|
|
@@ -114,6 +105,7 @@ public static ThreadFactory daemonThreadFactory(String namePrefix) { | |
| } | ||
|
|
||
| static class EsThreadFactory implements ThreadFactory { | ||
|
|
||
| final ThreadGroup group; | ||
| final AtomicInteger threadNumber = new AtomicInteger(1); | ||
| final String namePrefix; | ||
|
|
@@ -133,6 +125,7 @@ public Thread newThread(Runnable r) { | |
| t.setDaemon(true); | ||
| return t; | ||
| } | ||
|
|
||
| } | ||
|
|
||
| /** | ||
|
|
@@ -141,7 +134,6 @@ public Thread newThread(Runnable r) { | |
| private EsExecutors() { | ||
| } | ||
|
|
||
|
|
||
| static class ExecutorScalingQueue<E> extends LinkedTransferQueue<E> { | ||
|
|
||
| ThreadPoolExecutor executor; | ||
|
|
@@ -151,9 +143,17 @@ public ExecutorScalingQueue() { | |
|
|
||
| @Override | ||
| public boolean offer(E e) { | ||
| // first try to transfer to a waiting worker thread | ||
| if (!tryTransfer(e)) { | ||
| // check if there might be spare capacity in the thread | ||
| // pool executor | ||
|
||
| int left = executor.getMaximumPoolSize() - executor.getCorePoolSize(); | ||
| if (left > 0) { | ||
| // reject queuing the task to force the thread pool | ||
| // executor to add a worker if it can; combined | ||
| // with ForceQueuePolicy, this causes the thread | ||
| // pool to always scale up to max pool size and we | ||
| // only queue when there is no spare capacity | ||
| return false; | ||
| } else { | ||
| return super.offer(e); | ||
|
|
@@ -162,6 +162,7 @@ public boolean offer(E e) { | |
| return true; | ||
| } | ||
| } | ||
|
|
||
| } | ||
|
|
||
| /** | ||
|
|
@@ -184,4 +185,5 @@ public long rejected() { | |
| return 0; | ||
| } | ||
| } | ||
|
|
||
| } | ||
Large diffs are not rendered by default.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,62 @@ | ||
| /* | ||
| * Licensed to Elasticsearch under one or more contributor | ||
| * license agreements. See the NOTICE file distributed with | ||
| * this work for additional information regarding copyright | ||
| * ownership. Elasticsearch licenses this file to you under | ||
| * the Apache License, Version 2.0 (the "License"); you may | ||
| * not use this file except in compliance with the License. | ||
| * You may obtain a copy of the License at | ||
| * | ||
| * http://www.apache.org/licenses/LICENSE-2.0 | ||
| * | ||
| * Unless required by applicable law or agreed to in writing, | ||
| * software distributed under the License is distributed on an | ||
| * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
| * KIND, either express or implied. See the License for the | ||
| * specific language governing permissions and limitations | ||
| * under the License. | ||
| */ | ||
|
|
||
| package org.elasticsearch.threadpool; | ||
|
|
||
| import org.elasticsearch.test.ESTestCase; | ||
|
|
||
| import java.util.Map; | ||
| import java.util.stream.Collectors; | ||
|
|
||
| public abstract class ESThreadPoolTestCase extends ESTestCase { | ||
|
|
||
| protected final ThreadPool.Info info(final ThreadPool threadPool, final String name) { | ||
| for (final ThreadPool.Info info : threadPool.info()) { | ||
| if (info.getName().equals(name)) { | ||
| return info; | ||
| } | ||
| } | ||
| throw new IllegalArgumentException(name); | ||
| } | ||
|
|
||
| protected final ThreadPoolStats.Stats stats(final ThreadPool threadPool, final String name) { | ||
| for (final ThreadPoolStats.Stats stats : threadPool.stats()) { | ||
| if (name.equals(stats.getName())) { | ||
| return stats; | ||
| } | ||
| } | ||
| throw new IllegalArgumentException(name); | ||
| } | ||
|
|
||
| protected final void terminateThreadPoolIfNeeded(final ThreadPool threadPool) throws InterruptedException { | ||
| if (threadPool != null) { | ||
| terminate(threadPool); | ||
| } | ||
| } | ||
|
|
||
| static String randomThreadPool(final ThreadPool.ThreadPoolType type) { | ||
| return randomFrom( | ||
| ThreadPool.THREAD_POOL_TYPES | ||
| .entrySet().stream() | ||
| .filter(t -> t.getValue().equals(type)) | ||
| .map(Map.Entry::getKey) | ||
| .collect(Collectors.toList())); | ||
| } | ||
|
|
||
| } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
can we add a comment about using the
ForceQueuePolicycapturing our research? will be a shame to have to do it again :)There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
How I wish I wrote down what we found out at the time. Now that I look at the ExecutorScalingQueue, I see:
The place I marked with
***feels weird as it effectively returns false all the time, which is fine due to the fact that we want to prefer adding threads first and then, if it fails queue. Which is why we need the rejection policy. My guess is that this there for the case where people make a "fixed" thread pool out of a scaling one but having min==max (feels like an unneeded optimization to me, btw).Do you agree, and if so, can we add a comment documenting?
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, your explanation is basically correct, except I don't agree with the part about it being there for the case when people make a fixed thread pool out of a scaling one. It's there for the opposite case, when there could be spare capacity in the thread pool. It's because the JVM prefers queueing to creating new worker threads when the thread pool has reached core pool size that this is needed.
I added comments explaining the behavior.