|
25 | 25 | import org.elasticsearch.common.util.concurrent.EsRejectedExecutionHandler; |
26 | 26 | import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; |
27 | 27 | import org.elasticsearch.common.util.concurrent.ThreadContext; |
| 28 | +import org.elasticsearch.common.util.concurrent.ThrottledTaskRunner; |
28 | 29 | import org.elasticsearch.core.Nullable; |
29 | 30 | import org.elasticsearch.core.TimeValue; |
30 | 31 | import org.elasticsearch.node.Node; |
@@ -74,20 +75,28 @@ public class ThreadPool implements ReportingService<ThreadPoolInfo>, Scheduler, |
74 | 75 | */ |
75 | 76 | public static class Names { |
76 | 77 | /** |
77 | | - * A thread pool with a very high (but finite) maximum size for when there really is no other choice. |
| 78 | + * A thread pool with a very high (but finite) maximum size for use when there really is no other choice. |
78 | 79 | * <p> |
79 | 80 | * This pool may be used for one-off CPU-bound activities, but the maximum size is so high that it doesn't really work well to do a |
80 | | - * lot of CPU-bound work in parallel here. Likewise you can do IO on this pool, but using it for lots of concurrent IO is likely |
81 | | - * harmful in clusters with poor concurrent IO performance (especially if using spinning disks). Blocking on a future on this pool |
| 81 | + * lot of CPU-bound work in parallel here: submitting more CPU-bound tasks than we have CPUs to run them will burn a lot of CPU just |
| 82 | + * context-switching in order to try and make fair progress on all the tasks at once. Better to submit fewer tasks and wait for them |
| 83 | + * to complete before submitting more. See {@link ThrottledTaskRunner} and friends for utilities to help with this. |
| 84 | + * <p> |
| 85 | + * Likewise you can do IO on this pool, but using it for lots of concurrent IO is likely |
| 86 | + * harmful in clusters with poor concurrent IO performance (especially if using spinning disks). |
| 87 | + * <p> |
| 88 | + * Blocking on a future on this pool |
82 | 89 | * risks deadlock if there's a chance that the completion of the future depends on work being done on this pool. Unfortunately |
83 | 90 | * that's pretty likely in most cases because of how often this pool is used; it's really rare because of the high limit on the pool |
84 | | - * size, but when it happens it is extremely harmful to the node. |
| 91 | + * size, but when it happens it is extremely harmful to the node. |
85 | 92 | * <p> |
86 | 93 | * This pool is also used for recovery-related work. The recovery subsystem bounds its own concurrency, and therefore the amount of |
87 | 94 | * recovery work done on the {@code #GENERIC} pool, via {@code cluster.routing.allocation.node_concurrent_recoveries} and related |
88 | 95 | * settings. |
89 | 96 | * <p> |
90 | | - * This pool does not reject any task. If you submit a task after the pool starts to shut down, it may simply never run. |
| 97 | + * Other subsystems |
| 98 | + * <p> |
| 99 | + * This pool does not reject any task. Tasks you submit to this executor after the pool starts to shut down may simply never run. |
91 | 100 | */ |
92 | 101 | public static final String GENERIC = "generic"; |
93 | 102 |
|
|
0 commit comments