forked from kangjianwei/LearningJDK
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ForkJoinPool.java
4415 lines (3816 loc) · 194 KB
/
ForkJoinPool.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* This file is available under and governed by the GNU General Public
* License version 2 only, as published by the Free Software Foundation.
* However, the following notice accompanied the original version of this
* file:
*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package java.util.concurrent;
import java.lang.Thread.UncaughtExceptionHandler;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.VarHandle;
import java.security.AccessControlContext;
import java.security.AccessController;
import java.security.Permission;
import java.security.Permissions;
import java.security.PrivilegedAction;
import java.security.ProtectionDomain;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ForkJoinTask.AdaptedRunnable;
import java.util.concurrent.locks.LockSupport;
import java.util.function.Predicate;
/**
* An {@link ExecutorService} for running {@link ForkJoinTask}s.
* A {@code ForkJoinPool} provides the entry point for submissions
* from non-{@code ForkJoinTask} clients, as well as management and
* monitoring operations.
*
* <p>A {@code ForkJoinPool} differs from other kinds of {@link
* ExecutorService} mainly by virtue of employing
* <em>work-stealing</em>: all threads in the pool attempt to find and
* execute tasks submitted to the pool and/or created by other active
* tasks (eventually blocking waiting for work if none exist). This
* enables efficient processing when most tasks spawn other subtasks
* (as do most {@code ForkJoinTask}s), as well as when many small
* tasks are submitted to the pool from external clients. Especially
* when setting <em>asyncMode</em> to true in constructors, {@code
* ForkJoinPool}s may also be appropriate for use with event-style
* tasks that are never joined. All worker threads are initialized
* with {@link Thread#isDaemon} set {@code true}.
*
* <p>A static {@link #commonPool()} is available and appropriate for
* most applications. The common pool is used by any ForkJoinTask that
* is not explicitly submitted to a specified pool. Using the common
* pool normally reduces resource usage (its threads are slowly
* reclaimed during periods of non-use, and reinstated upon subsequent
* use).
*
* <p>For applications that require separate or custom pools, a {@code
* ForkJoinPool} may be constructed with a given target parallelism
* level; by default, equal to the number of available processors.
* The pool attempts to maintain enough active (or available) threads
* by dynamically adding, suspending, or resuming internal worker
* threads, even if some tasks are stalled waiting to join others.
* However, no such adjustments are guaranteed in the face of blocked
* I/O or other unmanaged synchronization. The nested {@link
* ManagedBlocker} interface enables extension of the kinds of
* synchronization accommodated. The default policies may be
* overridden using a constructor with parameters corresponding to
* those documented in class {@link ThreadPoolExecutor}.
*
* <p>In addition to execution and lifecycle control methods, this
* class provides status check methods (for example
* {@link #getStealCount}) that are intended to aid in developing,
* tuning, and monitoring fork/join applications. Also, method
* {@link #toString} returns indications of pool state in a
* convenient form for informal monitoring.
*
* <p>As is the case with other ExecutorServices, there are three
* main task execution methods summarized in the following table.
* These are designed to be used primarily by clients not already
* engaged in fork/join computations in the current pool. The main
* forms of these methods accept instances of {@code ForkJoinTask},
* but overloaded forms also allow mixed execution of plain {@code
* Runnable}- or {@code Callable}- based activities as well. However,
* tasks that are already executing in a pool should normally instead
* use the within-computation forms listed in the table unless using
* async event-style tasks that are not usually joined, in which case
* there is little difference among choice of methods.
*
* <table class="plain">
* <caption>Summary of task execution methods</caption>
* <tr>
* <td></td>
* <th scope="col"> Call from non-fork/join clients</th>
* <th scope="col"> Call from within fork/join computations</th>
* </tr>
* <tr>
* <th scope="row" style="text-align:left"> Arrange async execution</th>
* <td> {@link #execute(ForkJoinTask)}</td>
* <td> {@link ForkJoinTask#fork}</td>
* </tr>
* <tr>
* <th scope="row" style="text-align:left"> Await and obtain result</th>
* <td> {@link #invoke(ForkJoinTask)}</td>
* <td> {@link ForkJoinTask#invoke}</td>
* </tr>
* <tr>
* <th scope="row" style="text-align:left"> Arrange exec and obtain Future</th>
* <td> {@link #submit(ForkJoinTask)}</td>
* <td> {@link ForkJoinTask#fork} (ForkJoinTasks <em>are</em> Futures)</td>
* </tr>
* </table>
*
* <p>The parameters used to construct the common pool may be controlled by
* setting the following {@linkplain System#getProperty system properties}:
* <ul>
* <li>{@code java.util.concurrent.ForkJoinPool.common.parallelism}
* - the parallelism level, a non-negative integer
* <li>{@code java.util.concurrent.ForkJoinPool.common.threadFactory}
* - the class name of a {@link ForkJoinWorkerThreadFactory}.
* The {@linkplain ClassLoader#getSystemClassLoader() system class loader}
* is used to load this class.
* <li>{@code java.util.concurrent.ForkJoinPool.common.exceptionHandler}
* - the class name of a {@link UncaughtExceptionHandler}.
* The {@linkplain ClassLoader#getSystemClassLoader() system class loader}
* is used to load this class.
* <li>{@code java.util.concurrent.ForkJoinPool.common.maximumSpares}
* - the maximum number of allowed extra threads to maintain target
* parallelism (default 256).
* </ul>
* If no thread factory is supplied via a system property, then the
* common pool uses a factory that uses the system class loader as the
* {@linkplain Thread#getContextClassLoader() thread context class loader}.
* In addition, if a {@link SecurityManager} is present, then
* the common pool uses a factory supplying threads that have no
* {@link Permissions} enabled.
*
* Upon any error in establishing these settings, default parameters
* are used. It is possible to disable or limit the use of threads in
* the common pool by setting the parallelism property to zero, and/or
* using a factory that may return {@code null}. However doing so may
* cause unjoined tasks to never be executed.
*
* <p><b>Implementation notes</b>: This implementation restricts the
* maximum number of running threads to 32767. Attempts to create
* pools with greater than the maximum number result in
* {@code IllegalArgumentException}.
*
* <p>This implementation rejects submitted tasks (that is, by throwing
* {@link RejectedExecutionException}) only when the pool is shut down
* or internal resources have been exhausted.
*
* @since 1.7
* @author Doug Lea
*/
/*
* 工作池/任务池,负责并发任务的调度,以高效完成任务
*
* 【独立工作池】:由用户构造的ForkJoinPool
* 【共享工作池】:ForkJoinPool类内置的一个对象:common,由所有ForkJoinPool共享
*
* 推荐直接使用【共享工作池】
*/
public class ForkJoinPool extends AbstractExecutorService {
/*
* Implementation Overview
*
* This class and its nested classes provide the main
* functionality and control for a set of worker threads:
* Submissions from non-FJ threads enter into submission queues.
* Workers take these tasks and typically split them into subtasks
* that may be stolen by other workers. Work-stealing based on
* randomized scans generally leads to better throughput than
* "work dealing" in which producers assign tasks to idle threads,
* in part because threads that have finished other tasks before
* the signalled thread wakes up (which can be a long time) can
* take the task instead. Preference rules give first priority to
* processing tasks from their own queues (LIFO or FIFO, depending
* on mode), then to randomized FIFO steals of tasks in other
* queues. This framework began as vehicle for supporting
* tree-structured parallelism using work-stealing. Over time,
* its scalability advantages led to extensions and changes to
* better support more diverse usage contexts. Because most
* internal methods and nested classes are interrelated, their
* main rationale and descriptions are presented here; individual
* methods and nested classes contain only brief comments about
* details.
*
* WorkQueues
* ==========
*
* Most operations occur within work-stealing queues (in nested
* class WorkQueue). These are special forms of Deques that
* support only three of the four possible end-operations -- push,
* pop, and poll (aka steal), under the further constraints that
* push and pop are called only from the owning thread (or, as
* extended here, under a lock), while poll may be called from
* other threads. (If you are unfamiliar with them, you probably
* want to read Herlihy and Shavit's book "The Art of
* Multiprocessor programming", chapter 16 describing these in
* more detail before proceeding.) The main work-stealing queue
* design is roughly similar to those in the papers "Dynamic
* Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005
* (http://research.sun.com/scalable/pubs/index.html) and
* "Idempotent work stealing" by Michael, Saraswat, and Vechev,
* PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186).
* The main differences ultimately stem from GC requirements that
* we null out taken slots as soon as we can, to maintain as small
* a footprint as possible even in programs generating huge
* numbers of tasks. To accomplish this, we shift the CAS
* arbitrating pop vs poll (steal) from being on the indices
* ("base" and "top") to the slots themselves.
*
* Adding tasks then takes the form of a classic array push(task)
* in a circular buffer:
* q.array[q.top++ % length] = task;
*
* (The actual code needs to null-check and size-check the array,
* uses masking, not mod, for indexing a power-of-two-sized array,
* adds a release fence for publication, and possibly signals
* waiting workers to start scanning -- see below.) Both a
* successful pop and poll mainly entail a CAS of a slot from
* non-null to null.
*
* The pop operation (always performed by owner) is:
* if ((the task at top slot is not null) and
* (CAS slot to null))
* decrement top and return task;
*
* And the poll operation (usually by a stealer) is
* if ((the task at base slot is not null) and
* (CAS slot to null))
* increment base and return task;
*
* There are several variants of each of these. Most uses occur
* within operations that also interleave contention or emptiness
* tracking or inspection of elements before extracting them, so
* must interleave these with the above code. When performed by
* owner, getAndSet is used instead of CAS (see for example method
* nextLocalTask) which is usually more efficient, and possible
* because the top index cannot independently change during the
* operation.
*
* Memory ordering. See "Correct and Efficient Work-Stealing for
* Weak Memory Models" by Le, Pop, Cohen, and Nardelli, PPoPP 2013
* (http://www.di.ens.fr/~zappa/readings/ppopp13.pdf) for an
* analysis of memory ordering requirements in work-stealing
* algorithms similar to (but different than) the one used here.
* Extracting tasks in array slots via (fully fenced) CAS provides
* primary synchronization. The base and top indices imprecisely
* guide where to extract from. We do not usually require strict
* orderings of array and index updates. Many index accesses use
* plain mode, with ordering constrained by surrounding context
* (usually with respect to element CASes or the two WorkQueue
* volatile fields source and phase). When not otherwise already
* constrained, reads of "base" by queue owners use acquire-mode,
* and some externally callable methods preface accesses with
* acquire fences. Additionally, to ensure that index update
* writes are not coalesced or postponed in loops etc, "opaque"
* mode is used in a few cases where timely writes are not
* otherwise ensured. The "locked" versions of push- and pop-
* based methods for shared queues differ from owned versions
* because locking already forces some of the ordering.
*
* Because indices and slot contents cannot always be consistent,
* a check that base == top indicates (momentary) emptiness, but
* otherwise may err on the side of possibly making the queue
* appear nonempty when a push, pop, or poll have not fully
* committed, or making it appear empty when an update of top has
* not yet been visibly written. (Method isEmpty() checks the
* case of a partially completed removal of the last element.)
* Because of this, the poll operation, considered individually,
* is not wait-free. One thief cannot successfully continue until
* another in-progress one (or, if previously empty, a push)
* visibly completes. This can stall threads when required to
* consume from a given queue (see method poll()). However, in
* the aggregate, we ensure at least probabilistic
* non-blockingness. If an attempted steal fails, a scanning
* thief chooses a different random victim target to try next. So,
* in order for one thief to progress, it suffices for any
* in-progress poll or new push on any empty queue to complete.
*
* This approach also enables support of a user mode in which
* local task processing is in FIFO, not LIFO order, simply by
* using poll rather than pop. This can be useful in
* message-passing frameworks in which tasks are never joined.
*
* WorkQueues are also used in a similar way for tasks submitted
* to the pool. We cannot mix these tasks in the same queues used
* by workers. Instead, we randomly associate submission queues
* with submitting threads, using a form of hashing. The
* ThreadLocalRandom probe value serves as a hash code for
* choosing existing queues, and may be randomly repositioned upon
* contention with other submitters. In essence, submitters act
* like workers except that they are restricted to executing local
* tasks that they submitted. Insertion of tasks in shared mode
* requires a lock but we use only a simple spinlock (using field
* phase), because submitters encountering a busy queue move to a
* different position to use or create other queues -- they block
* only when creating and registering new queues. Because it is
* used only as a spinlock, unlocking requires only a "releasing"
* store (using setRelease) unless otherwise signalling.
*
* Management
* ==========
*
* The main throughput advantages of work-stealing stem from
* decentralized control -- workers mostly take tasks from
* themselves or each other, at rates that can exceed a billion
* per second. The pool itself creates, activates (enables
* scanning for and running tasks), deactivates, blocks, and
* terminates threads, all with minimal central information.
* There are only a few properties that we can globally track or
* maintain, so we pack them into a small number of variables,
* often maintaining atomicity without blocking or locking.
* Nearly all essentially atomic control state is held in a few
* volatile variables that are by far most often read (not
* written) as status and consistency checks. We pack as much
* information into them as we can.
*
* Field "ctl" contains 64 bits holding information needed to
* atomically decide to add, enqueue (on an event queue), and
* dequeue and release workers. To enable this packing, we
* restrict maximum parallelism to (1<<15)-1 (which is far in
* excess of normal operating range) to allow ids, counts, and
* their negations (used for thresholding) to fit into 16bit
* subfields.
*
* Field "mode" holds configuration parameters as well as lifetime
* status, atomically and monotonically setting SHUTDOWN, STOP,
* and finally TERMINATED bits.
*
* Field "workQueues" holds references to WorkQueues. It is
* updated (only during worker creation and termination) under
* lock (using field workerNamePrefix as lock), but is otherwise
* concurrently readable, and accessed directly. We also ensure
* that uses of the array reference itself never become too stale
* in case of resizing, by arranging that (re-)reads are separated
* by at least one acquiring read access. To simplify index-based
* operations, the array size is always a power of two, and all
* readers must tolerate null slots. Worker queues are at odd
* indices. Shared (submission) queues are at even indices, up to
* a maximum of 64 slots, to limit growth even if the array needs
* to expand to add more workers. Grouping them together in this
* way simplifies and speeds up task scanning.
*
* All worker thread creation is on-demand, triggered by task
* submissions, replacement of terminated workers, and/or
* compensation for blocked workers. However, all other support
* code is set up to work with other policies. To ensure that we
* do not hold on to worker references that would prevent GC, all
* accesses to workQueues are via indices into the workQueues
* array (which is one source of some of the messy code
* constructions here). In essence, the workQueues array serves as
* a weak reference mechanism. Thus for example the stack top
* subfield of ctl stores indices, not references.
*
* Queuing Idle Workers. Unlike HPC work-stealing frameworks, we
* cannot let workers spin indefinitely scanning for tasks when
* none can be found immediately, and we cannot start/resume
* workers unless there appear to be tasks available. On the
* other hand, we must quickly prod them into action when new
* tasks are submitted or generated. In many usages, ramp-up time
* is the main limiting factor in overall performance, which is
* compounded at program start-up by JIT compilation and
* allocation. So we streamline this as much as possible.
*
* The "ctl" field atomically maintains total worker and
* "released" worker counts, plus the head of the available worker
* queue (actually stack, represented by the lower 32bit subfield
* of ctl). Released workers are those known to be scanning for
* and/or running tasks. Unreleased ("available") workers are
* recorded in the ctl stack. These workers are made available for
* signalling by enqueuing in ctl (see method runWorker). The
* "queue" is a form of Treiber stack. This is ideal for
* activating threads in most-recently used order, and improves
* performance and locality, outweighing the disadvantages of
* being prone to contention and inability to release a worker
* unless it is topmost on stack. To avoid missed signal problems
* inherent in any wait/signal design, available workers rescan
* for (and if found run) tasks after enqueuing. Normally their
* release status will be updated while doing so, but the released
* worker ctl count may underestimate the number of active
* threads. (However, it is still possible to determine quiescence
* via a validation traversal -- see isQuiescent). After an
* unsuccessful rescan, available workers are blocked until
* signalled (see signalWork). The top stack state holds the
* value of the "phase" field of the worker: its index and status,
* plus a version counter that, in addition to the count subfields
* (also serving as version stamps) provide protection against
* Treiber stack ABA effects.
*
* Creating workers. To create a worker, we pre-increment counts
* (serving as a reservation), and attempt to construct a
* ForkJoinWorkerThread via its factory. Upon construction, the
* new thread invokes registerWorker, where it constructs a
* WorkQueue and is assigned an index in the workQueues array
* (expanding the array if necessary). The thread is then started.
* Upon any exception across these steps, or null return from
* factory, deregisterWorker adjusts counts and records
* accordingly. If a null return, the pool continues running with
* fewer than the target number workers. If exceptional, the
* exception is propagated, generally to some external caller.
* Worker index assignment avoids the bias in scanning that would
* occur if entries were sequentially packed starting at the front
* of the workQueues array. We treat the array as a simple
* power-of-two hash table, expanding as needed. The seedIndex
* increment ensures no collisions until a resize is needed or a
* worker is deregistered and replaced, and thereafter keeps
* probability of collision low. We cannot use
* ThreadLocalRandom.getProbe() for similar purposes here because
* the thread has not started yet, but do so for creating
* submission queues for existing external threads (see
* externalPush).
*
* WorkQueue field "phase" is used by both workers and the pool to
* manage and track whether a worker is UNSIGNALLED (possibly
* blocked waiting for a signal). When a worker is enqueued its
* phase field is set. Note that phase field updates lag queue CAS
* releases so usage requires care -- seeing a negative phase does
* not guarantee that the worker is available. When queued, the
* lower 16 bits of scanState must hold its pool index. So we
* place the index there upon initialization and otherwise keep it
* there or restore it when necessary.
*
* The ctl field also serves as the basis for memory
* synchronization surrounding activation. This uses a more
* efficient version of a Dekker-like rule that task producers and
* consumers sync with each other by both writing/CASing ctl (even
* if to its current value). This would be extremely costly. So
* we relax it in several ways: (1) Producers only signal when
* their queue is possibly empty at some point during a push
* operation (which requires conservatively checking size zero or
* one to cover races). (2) Other workers propagate this signal
* when they find tasks in a queue with size greater than one. (3)
* Workers only enqueue after scanning (see below) and not finding
* any tasks. (4) Rather than CASing ctl to its current value in
* the common case where no action is required, we reduce write
* contention by equivalently prefacing signalWork when called by
* an external task producer using a memory access with
* full-volatile semantics or a "fullFence".
*
* Almost always, too many signals are issued, in part because a
* task producer cannot tell if some existing worker is in the
* midst of finishing one task (or already scanning) and ready to
* take another without being signalled. So the producer might
* instead activate a different worker that does not find any
* work, and then inactivates. This scarcely matters in
* steady-state computations involving all workers, but can create
* contention and bookkeeping bottlenecks during ramp-up,
* ramp-down, and small computations involving only a few workers.
*
* Scanning. Method scan (from runWorker) performs top-level
* scanning for tasks. (Similar scans appear in helpQuiesce and
* pollScan.) Each scan traverses and tries to poll from each
* queue starting at a random index. Scans are not performed in
* ideal random permutation order, to reduce cacheline
* contention. The pseudorandom generator need not have
* high-quality statistical properties in the long term, but just
* within computations; We use Marsaglia XorShifts (often via
* ThreadLocalRandom.nextSecondarySeed), which are cheap and
* suffice. Scanning also includes contention reduction: When
* scanning workers fail to extract an apparently existing task,
* they soon restart at a different pseudorandom index. This form
* of backoff improves throughput when many threads are trying to
* take tasks from few queues, which can be common in some usages.
* Scans do not otherwise explicitly take into account core
* affinities, loads, cache localities, etc, However, they do
* exploit temporal locality (which usually approximates these) by
* preferring to re-poll from the same queue after a successful
* poll before trying others (see method topLevelExec). However
* this preference is bounded (see TOP_BOUND_SHIFT) as a safeguard
* against infinitely unfair looping under unbounded user task
* recursion, and also to reduce long-term contention when many
* threads poll few queues holding many small tasks. The bound is
* high enough to avoid much impact on locality and scheduling
* overhead.
*
* Trimming workers. To release resources after periods of lack of
* use, a worker starting to wait when the pool is quiescent will
* time out and terminate (see method runWorker) if the pool has
* remained quiescent for period given by field keepAlive.
*
* Shutdown and Termination. A call to shutdownNow invokes
* tryTerminate to atomically set a runState bit. The calling
* thread, as well as every other worker thereafter terminating,
* helps terminate others by cancelling their unprocessed tasks,
* and waking them up, doing so repeatedly until stable. Calls to
* non-abrupt shutdown() preface this by checking whether
* termination should commence by sweeping through queues (until
* stable) to ensure lack of in-flight submissions and workers
* about to process them before triggering the "STOP" phase of
* termination.
*
* Joining Tasks
* =============
*
* Any of several actions may be taken when one worker is waiting
* to join a task stolen (or always held) by another. Because we
* are multiplexing many tasks on to a pool of workers, we can't
* always just let them block (as in Thread.join). We also cannot
* just reassign the joiner's run-time stack with another and
* replace it later, which would be a form of "continuation", that
* even if possible is not necessarily a good idea since we may
* need both an unblocked task and its continuation to progress.
* Instead we combine two tactics:
*
* Helping: Arranging for the joiner to execute some task that it
* would be running if the steal had not occurred.
*
* Compensating: Unless there are already enough live threads,
* method tryCompensate() may create or re-activate a spare
* thread to compensate for blocked joiners until they unblock.
*
* A third form (implemented in tryRemoveAndExec) amounts to
* helping a hypothetical compensator: If we can readily tell that
* a possible action of a compensator is to steal and execute the
* task being joined, the joining thread can do so directly,
* without the need for a compensation thread.
*
* The ManagedBlocker extension API can't use helping so relies
* only on compensation in method awaitBlocker.
*
* The algorithm in awaitJoin entails a form of "linear helping".
* Each worker records (in field source) the id of the queue from
* which it last stole a task. The scan in method awaitJoin uses
* these markers to try to find a worker to help (i.e., steal back
* a task from and execute it) that could hasten completion of the
* actively joined task. Thus, the joiner executes a task that
* would be on its own local deque if the to-be-joined task had
* not been stolen. This is a conservative variant of the approach
* described in Wagner & Calder "Leapfrogging: a portable
* technique for implementing efficient futures" SIGPLAN Notices,
* 1993 (http://portal.acm.org/citation.cfm?id=155354). It differs
* mainly in that we only record queue ids, not full dependency
* links. This requires a linear scan of the workQueues array to
* locate stealers, but isolates cost to when it is needed, rather
* than adding to per-task overhead. Searches can fail to locate
* stealers GC stalls and the like delay recording sources.
* Further, even when accurately identified, stealers might not
* ever produce a task that the joiner can in turn help with. So,
* compensation is tried upon failure to find tasks to run.
*
* Compensation does not by default aim to keep exactly the target
* parallelism number of unblocked threads running at any given
* time. Some previous versions of this class employed immediate
* compensations for any blocked join. However, in practice, the
* vast majority of blockages are transient byproducts of GC and
* other JVM or OS activities that are made worse by replacement
* when they cause longer-term oversubscription. Rather than
* impose arbitrary policies, we allow users to override the
* default of only adding threads upon apparent starvation. The
* compensation mechanism may also be bounded. Bounds for the
* commonPool (see COMMON_MAX_SPARES) better enable JVMs to cope
* with programming errors and abuse before running out of
* resources to do so.
*
* Common Pool
* ===========
*
* The static common pool always exists after static
* initialization. Since it (or any other created pool) need
* never be used, we minimize initial construction overhead and
* footprint to the setup of about a dozen fields.
*
* When external threads submit to the common pool, they can
* perform subtask processing (see externalHelpComplete and
* related methods) upon joins. This caller-helps policy makes it
* sensible to set common pool parallelism level to one (or more)
* less than the total number of available cores, or even zero for
* pure caller-runs. We do not need to record whether external
* submissions are to the common pool -- if not, external help
* methods return quickly. These submitters would otherwise be
* blocked waiting for completion, so the extra effort (with
* liberally sprinkled task status checks) in inapplicable cases
* amounts to an odd form of limited spin-wait before blocking in
* ForkJoinTask.join.
*
* As a more appropriate default in managed environments, unless
* overridden by system properties, we use workers of subclass
* InnocuousForkJoinWorkerThread when there is a SecurityManager
* present. These workers have no permissions set, do not belong
* to any user-defined ThreadGroup, and erase all ThreadLocals
* after executing any top-level task (see
* WorkQueue.afterTopLevelExec). The associated mechanics (mainly
* in ForkJoinWorkerThread) may be JVM-dependent and must access
* particular Thread class fields to achieve this effect.
*
* Memory placement
* ================
*
* Performance can be very sensitive to placement of instances of
* ForkJoinPool and WorkQueues and their queue arrays. To reduce
* false-sharing impact, the @Contended annotation isolates
* adjacent WorkQueue instances, as well as the ForkJoinPool.ctl
* field. WorkQueue arrays are allocated (by their threads) with
* larger initial sizes than most ever need, mostly to reduce
* false sharing with current garbage collectors that use cardmark
* tables.
*
* Style notes
* ===========
*
* Memory ordering relies mainly on VarHandles. This can be
* awkward and ugly, but also reflects the need to control
* outcomes across the unusual cases that arise in very racy code
* with very few invariants. All fields are read into locals
* before use, and null-checked if they are references. Array
* accesses using masked indices include checks (that are always
* true) that the array length is non-zero to avoid compilers
* inserting more expensive traps. This is usually done in a
* "C"-like style of listing declarations at the heads of methods
* or blocks, and using inline assignments on first encounter.
* Nearly all explicit checks lead to bypass/return, not exception
* throws, because they may legitimately arise due to
* cancellation/revocation during shutdown.
*
* There is a lot of representation-level coupling among classes
* ForkJoinPool, ForkJoinWorkerThread, and ForkJoinTask. The
* fields of WorkQueue maintain data structures managed by
* ForkJoinPool, so are directly accessed. There is little point
* trying to reduce this, since any associated future changes in
* representations will need to be accompanied by algorithmic
* changes anyway. Several methods intrinsically sprawl because
* they must accumulate sets of consistent reads of fields held in
* local variables. Some others are artificially broken up to
* reduce producer/consumer imbalances due to dynamic compilation.
* There are also other coding oddities (including several
* unnecessary-looking hoisted null checks) that help some methods
* perform reasonably even when interpreted (not compiled).
*
* The order of declarations in this file is (with a few exceptions):
* (1) Static utility functions
* (2) Nested (static) classes
* (3) Static fields
* (4) Fields, along with constants used when unpacking some of them
* (5) Internal control methods
* (6) Callbacks and other support for ForkJoinTask methods
* (7) Exported methods
* (8) Static block initializing statics in minimally dependent order
*/
// Bounds
static final int SWIDTH = 16; // width of short
static final int SMASK = 0xffff; // short bits == max index
static final int MAX_CAP = 0x7fff; // max #workers - 1
static final int SQMASK = 0x007e; // 最多使用64个偶数插槽(最多64个【共享队列】)
// Masks and units for WorkQueue.phase and ctl sp subfield
static final int QLOCK = 1; // must be 1
static final int SS_SEQ = 1 << 16; // version count
static final int QUIET = 1 << 30; // not scanning or working
static final int UNSIGNALLED = 1 << 31; // must be negative
static final int DORMANT = QUIET | UNSIGNALLED;
// Mode bits and sentinels, some also used in WorkQueue id and.source fields
static final int OWNED = 1; // queue has owner thread
// 【提交线程】的【共享队列】没有此标记,【工作线程】的【工作队列】是否有此标记取决于构造器的asyncMode参数
static final int FIFO = 1 << 16; // fifo queue or access mode
static final int SHUTDOWN = 1 << 18;
static final int TERMINATED = 1 << 19;
static final int STOP = 1 << 31; // must be negative
/*
* 工作池模式
* 前16位记录运行状态,后16位记录并行度
*/
volatile int mode; // parallelism, runstate, queue mode
/**
* Initial capacity of work-stealing queue array.
* Must be a power of two, at least 2.
*/
static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
/**
* Maximum capacity for queue arrays.
* Must be a power of two less than or equal to 1 << (31 - width of array entry) to ensure lack of wraparound of index calculations,
* but defined to a value a bit less than this to help users trap runaway programs before saturating systems.
*/
static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M
/**
* The maximum number of top-level polls per worker before checking other queues, expressed as a bit shift to, in effect,
* multiply by pool size, and then use as random value mask, so average bound is about poolSize*(1<<TOP_BOUND_SHIFT).
* See above for rationale.
*/
static final int TOP_BOUND_SHIFT = 10;
/**
* Default idle timeout value (in milliseconds) for the thread triggering quiescence to park waiting for new work
*/
// 单位:毫秒
private static final long DEFAULT_KEEPALIVE = 60_000L;
/**
* Undershoot tolerance for idle timeouts
*/
// 容差
private static final long TIMEOUT_SLOP = 20L;
/**
* Increment for seed generators. See class ThreadLocal for explanation.
*/
// 生成均匀哈希值的魔数,与ThreadLocal中的HASH_INCREMENT常量作用一致
private static final int SEED_INCREMENT = 0x9e3779b9;
/*
* Bits and masks for field ctl, packed with 4 16-bit subfields:
* RC: Number of released (unqueued) workers minus target parallelism
* TC: Number of total workers minus target parallelism
* SS: version count and status of top waiting thread
* ID: poolIndex of top of Treiber stack of waiters
*
* When convenient, we can extract the lower 32 stack top bits (including version bits) as sp=(int)ctl.
* The offsets of counts by the target parallelism and the positionings of fields makes it possible to perform the most common checks via sign tests of fields:
* When ac is negative, there are not enough unqueued workers, when tc is negative, there are not enough total workers.
* When sp is non-zero, there are waiting workers.
* To deal with possibly negative fields, we use casts in and out of "short" and/or signed shifts to maintain signedness.
*
* Because it occupies uppermost bits, we can add one release count using getAndAddLong of RC_UNIT, rather than CAS, when returning from a blocked join.
* Other updates entail multiple subfields and masking, requiring CAS.
*
* The limits packed in field "bounds" are also offset by the parallelism level to make them comparable to the ctl rc and tc fields.
*/
// Lower and upper word masks
private static final long SP_MASK = 0xffffffffL; // 0x 0000 0000 FFFF FFFF
private static final long UC_MASK = ~SP_MASK; // 0x FFFF FFFF 0000 0000
// Release counts(用来计算【工作线程】活跃度)
private static final int RC_SHIFT = 48;
private static final long RC_UNIT = 0x0001L << RC_SHIFT; // 0x 0001 0000 0000 0000
private static final long RC_MASK = 0xffffL << RC_SHIFT; // 0x FFFF 0000 0000 0000
// Total counts(用来计算【工作线程】总数,>=并行度)
private static final int TC_SHIFT = 32;
private static final long TC_UNIT = 0x0001L << TC_SHIFT; // 0x 0000 0001 0000 0000
private static final long TC_MASK = 0xffffL << TC_SHIFT; // 0x 0000 FFFF 0000 0000
// sign
private static final long ADD_WORKER = 0x0001L << (TC_SHIFT + 15); // 0x 0000 8000 0000 0000
/*
* ① ② ③ ④
* 0x 0000 0000 0000 0000
*
* 第1个16位初始化为并行度的负数,
* 可用来计算活跃的线程数量
* (新建工作线程或工作线程从阻塞中恢复时增一,而工作线程转入阻塞时减一)
*
* 第2个16位初始化为工作池允许的【工作线程】的最大数量(>=并行度)的负数,
* 可用来计算当前创建的【工作线程】数量(不受休眠数量的影响)
*
* 第3、4个16位记录了最近一个转入阻塞的【工作线程】管辖的【工作队列】的phase信息
*
* 第3个16位是随机数
* 第4个16位记录工人数量
*/
@jdk.internal.vm.annotation.Contended("fjpctl") // segregate
volatile long ctl; // main pool control
// Instance fields
// 各线程累计的任务窃取数量
volatile long stealCount; // collects worker nsteals
final long keepAlive; // milliseconds before dropping if idle
// 魔数,辅助生成【工作队列】的ID
int indexSeed; // next worker index
final int bounds; // min, max threads packed as shorts
/*
* 【工作组】,容量为2的冪
*
* 一个【工作池】(ForkJoinPool)上对应一个【工作组】(WorkQueue数组)
* 每个【工作组】(WorkQueue数组)中包含多个【工作队列】(WorkQueue)
* 每个【工作队列】(WorkQueue)中都包含一个任务组(ForkJoinTask数组)
* 任务组(ForkJoinTask数组)中存放了正在排队的任务(ForkJoinTask)
*
* 这个模型可以简化为:【工作组】的每个插槽中带有一个任务队列(任务组的另一种称呼)
*/
WorkQueue[] workQueues; // 【工作组】
// 【工作线程】名称前缀
final String workerNamePrefix; // for worker thread string; sync lock
// 【工作池】上的【工作线程】工厂
final ForkJoinWorkerThreadFactory factory;
/**
* Creates a new ForkJoinWorkerThread.
* This factory is used unless overridden in ForkJoinPool constructors.
*/
// 默认的【工作线程】工厂
public static final ForkJoinWorkerThreadFactory defaultForkJoinWorkerThreadFactory;
// 未捕获异常处理器
final UncaughtExceptionHandler ueh; // per-worker UEH
final Predicate<? super ForkJoinPool> saturate;
/**
* Permission required for callers of methods that may start or kill threads.
*/
// 用于线程池关闭时的权限
static final RuntimePermission modifyThreadPermission;
/**
* Sequence number for creating workerNamePrefix.
*/
// 【工作池】编号(累加)
private static int poolNumberSequence;
/*▼ 共享工作池 ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓┓ */
/**
* Common (static) pool.
* Non-null for public use unless a static construction exception,
* but internal usages null-check on use to paranoically avoid potential initialization circularities as well as to simplify generated code.
*/
// 共享工作池:所有ForkJoinPool对象共享
static final ForkJoinPool common;
/**
* Common pool parallelism.
* To allow simpler use and management when common pool threads are disabled,
* we allow the underlying common.parallelism field to be zero,
* but in that case still report parallelism as 1 to reflect resulting caller-runs mechanics.
*/
// 共享工作池的并行度
static final int COMMON_PARALLELISM;
/**
* Limit on spare thread construction in tryCompensate.
*/
// 共享工作池中【工作线程】数量最大值
private static final int COMMON_MAX_SPARES;
/**
* The default value for COMMON_MAX_SPARES.
* Overridable using the "java.util.concurrent.ForkJoinPool.common.maximumSpares" system property.
* The default value is far in excess of normal requirements,
* but also far short of MAX_CAP and typical OS thread limits,
* so allows JVMs to catch misuse/abuse before running out of resources needed to do so.
*/
// 共享工作池默认的最大【工作线程】数量
private static final int DEFAULT_COMMON_MAX_SPARES = 256;
/*▲ 共享工作池 ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓┛ */
// VarHandle mechanics
private static final VarHandle CTL;
private static final VarHandle MODE;
static final VarHandle QA;
static {
try {
MethodHandles.Lookup l = MethodHandles.lookup();
CTL = l.findVarHandle(ForkJoinPool.class, "ctl", long.class);
MODE = l.findVarHandle(ForkJoinPool.class, "mode", int.class);
QA = MethodHandles.arrayElementVarHandle(ForkJoinTask[].class);
} catch(ReflectiveOperationException e) {
throw new ExceptionInInitializerError(e);
}
// 设置默认的F/J【工作线程】工厂
defaultForkJoinWorkerThreadFactory = new DefaultForkJoinWorkerThreadFactory();
// 设置权限
modifyThreadPermission = new RuntimePermission("modifyThread");
// Reduce the risk of rare disastrous classloading in first call to
// LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
Class<?> ensureLoaded = LockSupport.class;
// 设置共享工作池允许的最大【工作线程】数量
int commonMaxSpares = DEFAULT_COMMON_MAX_SPARES;
try {
// 可通过运行参数修改共享工作池允许的最大【工作线程】数量:-Djava.util.concurrent.ForkJoinPool.common.maximumSpares=<size>
String p = System.getProperty("java.util.concurrent.ForkJoinPool.common.maximumSpares");
if(p != null) {
commonMaxSpares = Integer.parseInt(p);
}
} catch(Exception ignore) {
}
COMMON_MAX_SPARES = commonMaxSpares;
// 初始化共享工作池
common = AccessController.doPrivileged(new PrivilegedAction<>() {
public ForkJoinPool run() {
return new ForkJoinPool((byte) 0);
}
});
// 设置共享工作池的并行度
COMMON_PARALLELISM = Math.max(common.mode & SMASK, 1);
}
/*▼ 构造器 ████████████████████████████████████████████████████████████████████████████████┓ */
/**
* Creates a {@code ForkJoinPool} with parallelism equal to {@link
* java.lang.Runtime#availableProcessors}, using defaults for all
* other parameters (see {@link #ForkJoinPool(int,
* ForkJoinWorkerThreadFactory, UncaughtExceptionHandler, boolean,
* int, int, int, Predicate, long, TimeUnit)}).
*
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public ForkJoinPool() {
this(Math.min(MAX_CAP, Runtime.getRuntime().availableProcessors()),
defaultForkJoinWorkerThreadFactory, null, false, 0, MAX_CAP, 1, null, DEFAULT_KEEPALIVE, TimeUnit.MILLISECONDS
);
}
/**
* Creates a {@code ForkJoinPool} with the indicated parallelism
* level, using defaults for all other parameters (see {@link
* #ForkJoinPool(int, ForkJoinWorkerThreadFactory,
* UncaughtExceptionHandler, boolean, int, int, int, Predicate,
* long, TimeUnit)}).
*
* @param parallelism the parallelism level
* @throws IllegalArgumentException if parallelism less than or
* equal to zero, or greater than implementation limit
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public ForkJoinPool(int parallelism) {
this(parallelism, defaultForkJoinWorkerThreadFactory, null, false, 0, MAX_CAP, 1, null, DEFAULT_KEEPALIVE, TimeUnit.MILLISECONDS);
}
/**
* Creates a {@code ForkJoinPool} with the given parameters (using
* defaults for others -- see {@link #ForkJoinPool(int,
* ForkJoinWorkerThreadFactory, UncaughtExceptionHandler, boolean,
* int, int, int, Predicate, long, TimeUnit)}).
*
* @param parallelism the parallelism level. For default value,
* use {@link java.lang.Runtime#availableProcessors}.
* @param factory the factory for creating new threads. For default value,
* use {@link #defaultForkJoinWorkerThreadFactory}.
* @param handler the handler for internal worker threads that
* terminate due to unrecoverable errors encountered while executing
* tasks. For default value, use {@code null}.
* @param asyncMode if true,
* establishes local first-in-first-out scheduling mode for forked
* tasks that are never joined. This mode may be more appropriate
* than default locally stack-based mode in applications in which
* worker threads only process event-style asynchronous tasks.
* For default value, use {@code false}.
* @throws IllegalArgumentException if parallelism less than or
* equal to zero, or greater than implementation limit
* @throws NullPointerException if the factory is null
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public ForkJoinPool(int parallelism, ForkJoinWorkerThreadFactory factory, UncaughtExceptionHandler handler, boolean asyncMode) {