/
threadpool.cs
1935 lines (1722 loc) · 81.3 KB
/
threadpool.cs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// ==++==
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// ==--==
//
// <OWNER>Microsoft</OWNER>
/*=============================================================================
**
** Class: ThreadPool
**
**
** Purpose: Class for creating and managing a threadpool
**
**
=============================================================================*/
#pragma warning disable 0420
/*
* Below you'll notice two sets of APIs that are separated by the
* use of 'Unsafe' in their names. The unsafe versions are called
* that because they do not propagate the calling stack onto the
* worker thread. This allows code to lose the calling stack and
* thereby elevate its security privileges. Note that this operation
* is much akin to the combined ability to control security policy
* and control security evidence. With these privileges, a person
* can gain the right to load assemblies that are fully trusted which
* then assert full trust and can call any code they want regardless
* of the previous stack information.
*/
namespace System.Threading
{
using System.Security;
using System.Runtime.Remoting;
using System.Security.Permissions;
using System;
using Microsoft.Win32;
using System.Runtime.CompilerServices;
using System.Runtime.ConstrainedExecution;
using System.Runtime.InteropServices;
using System.Runtime.Versioning;
using System.Collections.Generic;
using System.Diagnostics.Contracts;
using System.Diagnostics.CodeAnalysis;
using System.Diagnostics.Tracing;
internal static class ThreadPoolGlobals
{
//Per-appDomain quantum (in ms) for which the thread keeps processing
//requests in the current domain.
public static uint tpQuantum = 30U;
public static int processorCount = Environment.ProcessorCount;
public static bool tpHosted = ThreadPool.IsThreadPoolHosted();
public static volatile bool vmTpInitialized;
public static bool enableWorkerTracking;
[SecurityCritical]
public static ThreadPoolWorkQueue workQueue = new ThreadPoolWorkQueue();
[System.Security.SecuritySafeCritical] // static constructors should be safe to call
static ThreadPoolGlobals()
{
}
}
internal sealed class ThreadPoolWorkQueue
{
// Simple sparsely populated array to allow lock-free reading.
internal class SparseArray<T> where T : class
{
private volatile T[] m_array;
internal SparseArray(int initialSize)
{
m_array = new T[initialSize];
}
internal T[] Current
{
get { return m_array; }
}
internal int Add(T e)
{
while (true)
{
T[] array = m_array;
lock (array)
{
for (int i = 0; i < array.Length; i++)
{
if (array[i] == null)
{
Volatile.Write(ref array[i], e);
return i;
}
else if (i == array.Length - 1)
{
// Must resize. If we raced and lost, we start over again.
if (array != m_array)
continue;
T[] newArray = new T[array.Length * 2];
Array.Copy(array, newArray, i + 1);
newArray[i + 1] = e;
m_array = newArray;
return i + 1;
}
}
}
}
}
internal void Remove(T e)
{
T[] array = m_array;
lock (array)
{
for (int i = 0; i < m_array.Length; i++)
{
if (m_array[i] == e)
{
Volatile.Write(ref m_array[i], null);
break;
}
}
}
}
}
internal class WorkStealingQueue
{
private const int INITIAL_SIZE = 32;
internal volatile IThreadPoolWorkItem[] m_array = new IThreadPoolWorkItem[INITIAL_SIZE];
private volatile int m_mask = INITIAL_SIZE - 1;
#if DEBUG
// in debug builds, start at the end so we exercise the index reset logic.
private const int START_INDEX = int.MaxValue;
#else
private const int START_INDEX = 0;
#endif
private volatile int m_headIndex = START_INDEX;
private volatile int m_tailIndex = START_INDEX;
private SpinLock m_foreignLock = new SpinLock(false);
public void LocalPush(IThreadPoolWorkItem obj)
{
int tail = m_tailIndex;
// We're going to increment the tail; if we'll overflow, then we need to reset our counts
if (tail == int.MaxValue)
{
bool lockTaken = false;
try
{
m_foreignLock.Enter(ref lockTaken);
if (m_tailIndex == int.MaxValue)
{
//
// Rather than resetting to zero, we'll just mask off the bits we don't care about.
// This way we don't need to rearrange the items already in the queue; they'll be found
// correctly exactly where they are. One subtlety here is that we need to make sure that
// if head is currently < tail, it remains that way. This happens to just fall out from
// the bit-masking, because we only do this if tail == int.MaxValue, meaning that all
// bits are set, so all of the bits we're keeping will also be set. Thus it's impossible
// for the head to end up > than the tail, since you can't set any more bits than all of
// them.
//
m_headIndex = m_headIndex & m_mask;
m_tailIndex = tail = m_tailIndex & m_mask;
Contract.Assert(m_headIndex <= m_tailIndex);
}
}
finally
{
if (lockTaken)
m_foreignLock.Exit(true);
}
}
// When there are at least 2 elements' worth of space, we can take the fast path.
if (tail < m_headIndex + m_mask)
{
Volatile.Write(ref m_array[tail & m_mask], obj);
m_tailIndex = tail + 1;
}
else
{
// We need to contend with foreign pops, so we lock.
bool lockTaken = false;
try
{
m_foreignLock.Enter(ref lockTaken);
int head = m_headIndex;
int count = m_tailIndex - m_headIndex;
// If there is still space (one left), just add the element.
if (count >= m_mask)
{
// We're full; expand the queue by doubling its size.
IThreadPoolWorkItem[] newArray = new IThreadPoolWorkItem[m_array.Length << 1];
for (int i = 0; i < m_array.Length; i++)
newArray[i] = m_array[(i + head) & m_mask];
// Reset the field values, incl. the mask.
m_array = newArray;
m_headIndex = 0;
m_tailIndex = tail = count;
m_mask = (m_mask << 1) | 1;
}
Volatile.Write(ref m_array[tail & m_mask], obj);
m_tailIndex = tail + 1;
}
finally
{
if (lockTaken)
m_foreignLock.Exit(false);
}
}
}
[SuppressMessage("Microsoft.Concurrency", "CA8001", Justification = "Reviewed for thread safety")]
public bool LocalFindAndPop(IThreadPoolWorkItem obj)
{
// Fast path: check the tail. If equal, we can skip the lock.
if (m_array[(m_tailIndex - 1) & m_mask] == obj)
{
IThreadPoolWorkItem unused;
if (LocalPop(out unused))
{
Contract.Assert(unused == obj);
return true;
}
return false;
}
// Else, do an O(N) search for the work item. The theory of work stealing and our
// inlining logic is that most waits will happen on recently queued work. And
// since recently queued work will be close to the tail end (which is where we
// begin our search), we will likely find it quickly. In the worst case, we
// will traverse the whole local queue; this is typically not going to be a
// problem (although degenerate cases are clearly an issue) because local work
// queues tend to be somewhat shallow in length, and because if we fail to find
// the work item, we are about to block anyway (which is very expensive).
for (int i = m_tailIndex - 2; i >= m_headIndex; i--)
{
if (m_array[i & m_mask] == obj)
{
// If we found the element, block out steals to avoid interference.
// @
bool lockTaken = false;
try
{
m_foreignLock.Enter(ref lockTaken);
// If we lost the ----, bail.
if (m_array[i & m_mask] == null)
return false;
// Otherwise, null out the element.
Volatile.Write(ref m_array[i & m_mask], null);
// And then check to see if we can fix up the indexes (if we're at
// the edge). If we can't, we just leave nulls in the array and they'll
// get filtered out eventually (but may lead to superflous resizing).
if (i == m_tailIndex)
m_tailIndex -= 1;
else if (i == m_headIndex)
m_headIndex += 1;
return true;
}
finally
{
if (lockTaken)
m_foreignLock.Exit(false);
}
}
}
return false;
}
[SuppressMessage("Microsoft.Concurrency", "CA8001", Justification = "Reviewed for thread safety")]
public bool LocalPop(out IThreadPoolWorkItem obj)
{
while (true)
{
// Decrement the tail using a fence to ensure subsequent read doesn't come before.
int tail = m_tailIndex;
if (m_headIndex >= tail)
{
obj = null;
return false;
}
tail -= 1;
Interlocked.Exchange(ref m_tailIndex, tail);
// If there is no interaction with a take, we can head down the fast path.
if (m_headIndex <= tail)
{
int idx = tail & m_mask;
obj = Volatile.Read(ref m_array[idx]);
// Check for nulls in the array.
if (obj == null) continue;
m_array[idx] = null;
return true;
}
else
{
// Interaction with takes: 0 or 1 elements left.
bool lockTaken = false;
try
{
m_foreignLock.Enter(ref lockTaken);
if (m_headIndex <= tail)
{
// Element still available. Take it.
int idx = tail & m_mask;
obj = Volatile.Read(ref m_array[idx]);
// Check for nulls in the array.
if (obj == null) continue;
m_array[idx] = null;
return true;
}
else
{
// We lost the ----, element was stolen, restore the tail.
m_tailIndex = tail + 1;
obj = null;
return false;
}
}
finally
{
if (lockTaken)
m_foreignLock.Exit(false);
}
}
}
}
public bool TrySteal(out IThreadPoolWorkItem obj, ref bool missedSteal)
{
return TrySteal(out obj, ref missedSteal, 0); // no blocking by default.
}
private bool TrySteal(out IThreadPoolWorkItem obj, ref bool missedSteal, int millisecondsTimeout)
{
obj = null;
while (true)
{
if (m_headIndex >= m_tailIndex)
return false;
bool taken = false;
try
{
m_foreignLock.TryEnter(millisecondsTimeout, ref taken);
if (taken)
{
// Increment head, and ensure read of tail doesn't move before it (fence).
int head = m_headIndex;
Interlocked.Exchange(ref m_headIndex, head + 1);
if (head < m_tailIndex)
{
int idx = head & m_mask;
obj = Volatile.Read(ref m_array[idx]);
// Check for nulls in the array.
if (obj == null) continue;
m_array[idx] = null;
return true;
}
else
{
// Failed, restore head.
m_headIndex = head;
obj = null;
missedSteal = true;
}
}
else
{
missedSteal = true;
}
}
finally
{
if (taken)
m_foreignLock.Exit(false);
}
return false;
}
}
}
internal class QueueSegment
{
// Holds a segment of the queue. Enqueues/Dequeues start at element 0, and work their way up.
internal readonly IThreadPoolWorkItem[] nodes;
private const int QueueSegmentLength = 256;
// Holds the indexes of the lowest and highest valid elements of the nodes array.
// The low index is in the lower 16 bits, high index is in the upper 16 bits.
// Use GetIndexes and CompareExchangeIndexes to manipulate this.
private volatile int indexes;
// The next segment in the queue.
public volatile QueueSegment Next;
const int SixteenBits = 0xffff;
void GetIndexes(out int upper, out int lower)
{
int i = indexes;
upper = (i >> 16) & SixteenBits;
lower = i & SixteenBits;
Contract.Assert(upper >= lower);
Contract.Assert(upper <= nodes.Length);
Contract.Assert(lower <= nodes.Length);
Contract.Assert(upper >= 0);
Contract.Assert(lower >= 0);
}
bool CompareExchangeIndexes(ref int prevUpper, int newUpper, ref int prevLower, int newLower)
{
Contract.Assert(newUpper >= newLower);
Contract.Assert(newUpper <= nodes.Length);
Contract.Assert(newLower <= nodes.Length);
Contract.Assert(newUpper >= 0);
Contract.Assert(newLower >= 0);
Contract.Assert(newUpper >= prevUpper);
Contract.Assert(newLower >= prevLower);
Contract.Assert(newUpper == prevUpper ^ newLower == prevLower);
int oldIndexes = (prevUpper << 16) | (prevLower & SixteenBits);
int newIndexes = (newUpper << 16) | (newLower & SixteenBits);
int prevIndexes = Interlocked.CompareExchange(ref indexes, newIndexes, oldIndexes);
prevUpper = (prevIndexes >> 16) & SixteenBits;
prevLower = prevIndexes & SixteenBits;
return prevIndexes == oldIndexes;
}
[ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
public QueueSegment()
{
Contract.Assert(QueueSegmentLength <= SixteenBits);
nodes = new IThreadPoolWorkItem[QueueSegmentLength];
}
public bool IsUsedUp()
{
int upper, lower;
GetIndexes(out upper, out lower);
return (upper == nodes.Length) &&
(lower == nodes.Length);
}
public bool TryEnqueue(IThreadPoolWorkItem node)
{
//
// If there's room in this segment, atomically increment the upper count (to reserve
// space for this node), then store the node.
// Note that this leaves a window where it will look like there is data in that
// array slot, but it hasn't been written yet. This is taken care of in TryDequeue
// with a busy-wait loop, waiting for the element to become non-null. This implies
// that we can never store null nodes in this data structure.
//
Contract.Assert(null != node);
int upper, lower;
GetIndexes(out upper, out lower);
while (true)
{
if (upper == nodes.Length)
return false;
if (CompareExchangeIndexes(ref upper, upper + 1, ref lower, lower))
{
Contract.Assert(Volatile.Read(ref nodes[upper]) == null);
Volatile.Write(ref nodes[upper], node);
return true;
}
}
}
[SuppressMessage("Microsoft.Concurrency", "CA8001", Justification = "Reviewed for thread safety")]
public bool TryDequeue(out IThreadPoolWorkItem node)
{
//
// If there are nodes in this segment, increment the lower count, then take the
// element we find there.
//
int upper, lower;
GetIndexes(out upper, out lower);
while(true)
{
if (lower == upper)
{
node = null;
return false;
}
if (CompareExchangeIndexes(ref upper, upper, ref lower, lower + 1))
{
// It's possible that a concurrent call to Enqueue hasn't yet
// written the node reference to the array. We need to spin until
// it shows up.
SpinWait spinner = new SpinWait();
while ((node = Volatile.Read(ref nodes[lower])) == null)
spinner.SpinOnce();
// Null-out the reference so the object can be GC'd earlier.
nodes[lower] = null;
return true;
}
}
}
}
// The head and tail of the queue. We enqueue to the head, and dequeue from the tail.
internal volatile QueueSegment queueHead;
internal volatile QueueSegment queueTail;
internal bool loggingEnabled;
internal static SparseArray<WorkStealingQueue> allThreadQueues = new SparseArray<WorkStealingQueue>(16); //
private volatile int numOutstandingThreadRequests = 0;
public ThreadPoolWorkQueue()
{
queueTail = queueHead = new QueueSegment();
loggingEnabled = FrameworkEventSource.Log.IsEnabled(EventLevel.Verbose, FrameworkEventSource.Keywords.ThreadPool|FrameworkEventSource.Keywords.ThreadTransfer);
}
[SecurityCritical]
public ThreadPoolWorkQueueThreadLocals EnsureCurrentThreadHasQueue()
{
if (null == ThreadPoolWorkQueueThreadLocals.threadLocals)
ThreadPoolWorkQueueThreadLocals.threadLocals = new ThreadPoolWorkQueueThreadLocals(this);
return ThreadPoolWorkQueueThreadLocals.threadLocals;
}
[SecurityCritical]
internal void EnsureThreadRequested()
{
//
// If we have not yet requested #procs threads from the VM, then request a new thread.
// Note that there is a separate count in the VM which will also be incremented in this case,
// which is handled by RequestWorkerThread.
//
int count = numOutstandingThreadRequests;
while (count < ThreadPoolGlobals.processorCount)
{
int prev = Interlocked.CompareExchange(ref numOutstandingThreadRequests, count+1, count);
if (prev == count)
{
ThreadPool.RequestWorkerThread();
break;
}
count = prev;
}
}
[SecurityCritical]
internal void MarkThreadRequestSatisfied()
{
//
// The VM has called us, so one of our outstanding thread requests has been satisfied.
// Decrement the count so that future calls to EnsureThreadRequested will succeed.
// Note that there is a separate count in the VM which has already been decremented by the VM
// by the time we reach this point.
//
int count = numOutstandingThreadRequests;
while (count > 0)
{
int prev = Interlocked.CompareExchange(ref numOutstandingThreadRequests, count - 1, count);
if (prev == count)
{
break;
}
count = prev;
}
}
[SecurityCritical]
public void Enqueue(IThreadPoolWorkItem callback, bool forceGlobal)
{
ThreadPoolWorkQueueThreadLocals tl = null;
if (!forceGlobal)
tl = ThreadPoolWorkQueueThreadLocals.threadLocals;
if (loggingEnabled)
System.Diagnostics.Tracing.FrameworkEventSource.Log.ThreadPoolEnqueueWorkObject(callback);
if (null != tl)
{
tl.workStealingQueue.LocalPush(callback);
}
else
{
QueueSegment head = queueHead;
while (!head.TryEnqueue(callback))
{
Interlocked.CompareExchange(ref head.Next, new QueueSegment(), null);
while (head.Next != null)
{
Interlocked.CompareExchange(ref queueHead, head.Next, head);
head = queueHead;
}
}
}
EnsureThreadRequested();
}
[SecurityCritical]
internal bool LocalFindAndPop(IThreadPoolWorkItem callback)
{
ThreadPoolWorkQueueThreadLocals tl = ThreadPoolWorkQueueThreadLocals.threadLocals;
if (null == tl)
return false;
return tl.workStealingQueue.LocalFindAndPop(callback);
}
[SecurityCritical]
public void Dequeue(ThreadPoolWorkQueueThreadLocals tl, out IThreadPoolWorkItem callback, out bool missedSteal)
{
callback = null;
missedSteal = false;
WorkStealingQueue wsq = tl.workStealingQueue;
if (wsq.LocalPop(out callback))
Contract.Assert(null != callback);
if (null == callback)
{
QueueSegment tail = queueTail;
while (true)
{
if (tail.TryDequeue(out callback))
{
Contract.Assert(null != callback);
break;
}
if (null == tail.Next || !tail.IsUsedUp())
{
break;
}
else
{
Interlocked.CompareExchange(ref queueTail, tail.Next, tail);
tail = queueTail;
}
}
}
if (null == callback)
{
WorkStealingQueue[] otherQueues = allThreadQueues.Current;
int i = tl.random.Next(otherQueues.Length);
int c = otherQueues.Length;
while (c > 0)
{
WorkStealingQueue otherQueue = Volatile.Read(ref otherQueues[i % otherQueues.Length]);
if (otherQueue != null &&
otherQueue != wsq &&
otherQueue.TrySteal(out callback, ref missedSteal))
{
Contract.Assert(null != callback);
break;
}
i++;
c--;
}
}
}
[SecurityCritical]
static internal bool Dispatch()
{
var workQueue = ThreadPoolGlobals.workQueue;
//
// The clock is ticking! We have ThreadPoolGlobals.tpQuantum milliseconds to get some work done, and then
// we need to return to the VM.
//
int quantumStartTime = Environment.TickCount;
//
// Update our records to indicate that an outstanding request for a thread has now been fulfilled.
// From this point on, we are responsible for requesting another thread if we stop working for any
// reason, and we believe there might still be work in the queue.
//
// Note that if this thread is aborted before we get a chance to request another one, the VM will
// record a thread request on our behalf. So we don't need to worry about getting aborted right here.
//
workQueue.MarkThreadRequestSatisfied();
// Has the desire for logging changed since the last time we entered?
workQueue.loggingEnabled = FrameworkEventSource.Log.IsEnabled(EventLevel.Verbose, FrameworkEventSource.Keywords.ThreadPool|FrameworkEventSource.Keywords.ThreadTransfer);
//
// Assume that we're going to need another thread if this one returns to the VM. We'll set this to
// false later, but only if we're absolutely certain that the queue is empty.
//
bool needAnotherThread = true;
IThreadPoolWorkItem workItem = null;
try
{
//
// Set up our thread-local data
//
ThreadPoolWorkQueueThreadLocals tl = workQueue.EnsureCurrentThreadHasQueue();
//
// Loop until our quantum expires.
//
while ((Environment.TickCount - quantumStartTime) < ThreadPoolGlobals.tpQuantum)
{
//
// Dequeue and EnsureThreadRequested must be protected from ThreadAbortException.
// These are fast, so this will not delay aborts/AD-unloads for very long.
//
try { }
finally
{
bool missedSteal = false;
workQueue.Dequeue(tl, out workItem, out missedSteal);
if (workItem == null)
{
//
// No work. We're going to return to the VM once we leave this protected region.
// If we missed a steal, though, there may be more work in the queue.
// Instead of looping around and trying again, we'll just request another thread. This way
// we won't starve other AppDomains while we spin trying to get locks, and hopefully the thread
// that owns the contended work-stealing queue will pick up its own workitems in the meantime,
// which will be more efficient than this thread doing it anyway.
//
needAnotherThread = missedSteal;
}
else
{
//
// If we found work, there may be more work. Ask for another thread so that the other work can be processed
// in parallel. Note that this will only ask for a max of #procs threads, so it's safe to call it for every dequeue.
//
workQueue.EnsureThreadRequested();
}
}
if (workItem == null)
{
// Tell the VM we're returning normally, not because Hill Climbing asked us to return.
return true;
}
else
{
if (workQueue.loggingEnabled)
System.Diagnostics.Tracing.FrameworkEventSource.Log.ThreadPoolDequeueWorkObject(workItem);
//
// Execute the workitem outside of any finally blocks, so that it can be aborted if needed.
//
if (ThreadPoolGlobals.enableWorkerTracking)
{
bool reportedStatus = false;
try
{
try { }
finally
{
ThreadPool.ReportThreadStatus(true);
reportedStatus = true;
}
workItem.ExecuteWorkItem();
workItem = null;
}
finally
{
if (reportedStatus)
ThreadPool.ReportThreadStatus(false);
}
}
else
{
workItem.ExecuteWorkItem();
workItem = null;
}
//
// Notify the VM that we executed this workitem. This is also our opportunity to ask whether Hill Climbing wants
// us to return the thread to the pool or not.
//
if (!ThreadPool.NotifyWorkItemComplete())
return false;
}
}
// If we get here, it's because our quantum expired. Tell the VM we're returning normally.
return true;
}
catch (ThreadAbortException tae)
{
//
// This is here to catch the case where this thread is aborted between the time we exit the finally block in the dispatch
// loop, and the time we execute the work item. QueueUserWorkItemCallback uses this to update its accounting of whether
// it was executed or not (in debug builds only). Task uses this to communicate the ThreadAbortException to anyone
// who waits for the task to complete.
//
if (workItem != null)
workItem.MarkAborted(tae);
//
// In this case, the VM is going to request another thread on our behalf. No need to do it twice.
//
needAnotherThread = false;
// throw; //no need to explicitly rethrow a ThreadAbortException, and doing so causes allocations on amd64.
}
finally
{
//
// If we are exiting for any reason other than that the queue is definitely empty, ask for another
// thread to pick up where we left off.
//
if (needAnotherThread)
workQueue.EnsureThreadRequested();
}
// we can never reach this point, but the C# compiler doesn't know that, because it doesn't know the ThreadAbortException will be reraised above.
Contract.Assert(false);
return true;
}
}
// Holds a WorkStealingQueue, and remmoves it from the list when this object is no longer referened.
internal sealed class ThreadPoolWorkQueueThreadLocals
{
[ThreadStatic]
[SecurityCritical]
public static ThreadPoolWorkQueueThreadLocals threadLocals;
public readonly ThreadPoolWorkQueue workQueue;
public readonly ThreadPoolWorkQueue.WorkStealingQueue workStealingQueue;
public readonly Random random = new Random(Thread.CurrentThread.ManagedThreadId);
public ThreadPoolWorkQueueThreadLocals(ThreadPoolWorkQueue tpq)
{
workQueue = tpq;
workStealingQueue = new ThreadPoolWorkQueue.WorkStealingQueue();
ThreadPoolWorkQueue.allThreadQueues.Add(workStealingQueue);
}
[SecurityCritical]
private void CleanUp()
{
if (null != workStealingQueue)
{
if (null != workQueue)
{
bool done = false;
while (!done)
{
// Ensure that we won't be aborted between LocalPop and Enqueue.
try { }
finally
{
IThreadPoolWorkItem cb = null;
if (workStealingQueue.LocalPop(out cb))
{
Contract.Assert(null != cb);
workQueue.Enqueue(cb, true);
}
else
{
done = true;
}
}
}
}
ThreadPoolWorkQueue.allThreadQueues.Remove(workStealingQueue);
}
}
[SecuritySafeCritical]
~ThreadPoolWorkQueueThreadLocals()
{
// Since the purpose of calling CleanUp is to transfer any pending workitems into the global
// queue so that they will be executed by another thread, there's no point in doing this cleanup
// if we're in the process of shutting down or unloading the AD. In those cases, the work won't
// execute anyway. And there are subtle ----s involved there that would lead us to do the wrong
// thing anyway. So we'll only clean up if this is a "normal" finalization.
if (!(Environment.HasShutdownStarted || AppDomain.CurrentDomain.IsFinalizingForUnload()))
CleanUp();
}
}
internal sealed class RegisteredWaitHandleSafe : CriticalFinalizerObject
{
private static IntPtr InvalidHandle
{
[System.Security.SecuritySafeCritical] // auto-generated
get
{
return Win32Native.INVALID_HANDLE_VALUE;
}
}
private IntPtr registeredWaitHandle;
private WaitHandle m_internalWaitObject;
private bool bReleaseNeeded = false;
private volatile int m_lock = 0;
#if FEATURE_CORECLR
[System.Security.SecuritySafeCritical] // auto-generated
#endif
internal RegisteredWaitHandleSafe()
{
registeredWaitHandle = InvalidHandle;
}
internal IntPtr GetHandle()
{
return registeredWaitHandle;
}
internal void SetHandle(IntPtr handle)
{
registeredWaitHandle = handle;
}
[System.Security.SecurityCritical] // auto-generated
[ResourceExposure(ResourceScope.None)]
[ResourceConsumption(ResourceScope.Machine, ResourceScope.Machine)]
[ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
internal void SetWaitObject(WaitHandle waitObject)
{
// needed for DangerousAddRef
RuntimeHelpers.PrepareConstrainedRegions();
try
{
}
finally
{
m_internalWaitObject = waitObject;
if (waitObject != null)
{
m_internalWaitObject.SafeWaitHandle.DangerousAddRef(ref bReleaseNeeded);
}
}
}
[System.Security.SecurityCritical] // auto-generated
[ResourceExposure(ResourceScope.None)]
[ResourceConsumption(ResourceScope.Machine, ResourceScope.Machine)]
[ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
internal bool Unregister(
WaitHandle waitObject // object to be notified when all callbacks to delegates have completed
)
{
bool result = false;
// needed for DangerousRelease
RuntimeHelpers.PrepareConstrainedRegions();
try
{
}
finally
{
// lock(this) cannot be used reliably in Cer since thin lock could be