@@ -291,7 +291,6 @@ Thread::Thread() {
291
291
// The stack would act as a cache to avoid calls to ParkEvent::Allocate()
292
292
// and ::Release()
293
293
_ParkEvent = ParkEvent::Allocate (this );
294
- _MuxEvent = ParkEvent::Allocate (this );
295
294
296
295
#ifdef CHECK_UNHANDLED_OOPS
297
296
if (CheckUnhandledOops) {
@@ -439,7 +438,6 @@ Thread::~Thread() {
439
438
// It's possible we can encounter a null _ParkEvent, etc., in stillborn threads.
440
439
// We NULL out the fields for good hygiene.
441
440
ParkEvent::Release (_ParkEvent); _ParkEvent = NULL ;
442
- ParkEvent::Release (_MuxEvent); _MuxEvent = NULL ;
443
441
444
442
delete handle_area ();
445
443
delete metadata_handles ();
@@ -3560,6 +3558,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
3560
3558
3561
3559
// Initialize Java-Level synchronization subsystem
3562
3560
ObjectMonitor::Initialize ();
3561
+ ObjectSynchronizer::initialize ();
3563
3562
3564
3563
// Initialize global modules
3565
3564
jint status = init_globals ();
@@ -4582,22 +4581,11 @@ void Threads::print_threads_compiling(outputStream* st, char* buf, int buflen, b
4582
4581
}
4583
4582
4584
4583
4585
- // Internal SpinLock and Mutex
4586
- // Based on ParkEvent
4587
-
4588
- // Ad-hoc mutual exclusion primitives: SpinLock and Mux
4584
+ // Ad-hoc mutual exclusion primitives: SpinLock
4589
4585
//
4590
4586
// We employ SpinLocks _only for low-contention, fixed-length
4591
4587
// short-duration critical sections where we're concerned
4592
4588
// about native mutex_t or HotSpot Mutex:: latency.
4593
- // The mux construct provides a spin-then-block mutual exclusion
4594
- // mechanism.
4595
- //
4596
- // Testing has shown that contention on the ListLock guarding gFreeList
4597
- // is common. If we implement ListLock as a simple SpinLock it's common
4598
- // for the JVM to devolve to yielding with little progress. This is true
4599
- // despite the fact that the critical sections protected by ListLock are
4600
- // extremely short.
4601
4589
//
4602
4590
// TODO-FIXME: ListLock should be of type SpinLock.
4603
4591
// We should make this a 1st-class type, integrated into the lock
@@ -4650,150 +4638,6 @@ void Thread::SpinRelease(volatile int * adr) {
4650
4638
*adr = 0 ;
4651
4639
}
4652
4640
4653
- // muxAcquire and muxRelease:
4654
- //
4655
- // * muxAcquire and muxRelease support a single-word lock-word construct.
4656
- // The LSB of the word is set IFF the lock is held.
4657
- // The remainder of the word points to the head of a singly-linked list
4658
- // of threads blocked on the lock.
4659
- //
4660
- // * The current implementation of muxAcquire-muxRelease uses its own
4661
- // dedicated Thread._MuxEvent instance. If we're interested in
4662
- // minimizing the peak number of extant ParkEvent instances then
4663
- // we could eliminate _MuxEvent and "borrow" _ParkEvent as long
4664
- // as certain invariants were satisfied. Specifically, care would need
4665
- // to be taken with regards to consuming unpark() "permits".
4666
- // A safe rule of thumb is that a thread would never call muxAcquire()
4667
- // if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently
4668
- // park(). Otherwise the _ParkEvent park() operation in muxAcquire() could
4669
- // consume an unpark() permit intended for monitorenter, for instance.
4670
- // One way around this would be to widen the restricted-range semaphore
4671
- // implemented in park(). Another alternative would be to provide
4672
- // multiple instances of the PlatformEvent() for each thread. One
4673
- // instance would be dedicated to muxAcquire-muxRelease, for instance.
4674
- //
4675
- // * Usage:
4676
- // -- Only as leaf locks
4677
- // -- for short-term locking only as muxAcquire does not perform
4678
- // thread state transitions.
4679
- //
4680
- // Alternatives:
4681
- // * We could implement muxAcquire and muxRelease with MCS or CLH locks
4682
- // but with parking or spin-then-park instead of pure spinning.
4683
- // * Use Taura-Oyama-Yonenzawa locks.
4684
- // * It's possible to construct a 1-0 lock if we encode the lockword as
4685
- // (List,LockByte). Acquire will CAS the full lockword while Release
4686
- // will STB 0 into the LockByte. The 1-0 scheme admits stranding, so
4687
- // acquiring threads use timers (ParkTimed) to detect and recover from
4688
- // the stranding window. Thread/Node structures must be aligned on 256-byte
4689
- // boundaries by using placement-new.
4690
- // * Augment MCS with advisory back-link fields maintained with CAS().
4691
- // Pictorially: LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner.
4692
- // The validity of the backlinks must be ratified before we trust the value.
4693
- // If the backlinks are invalid the exiting thread must back-track through the
4694
- // the forward links, which are always trustworthy.
4695
- // * Add a successor indication. The LockWord is currently encoded as
4696
- // (List, LOCKBIT:1). We could also add a SUCCBIT or an explicit _succ variable
4697
- // to provide the usual futile-wakeup optimization.
4698
- // See RTStt for details.
4699
- //
4700
-
4701
-
4702
- const intptr_t LOCKBIT = 1 ;
4703
-
4704
- void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
4705
- intptr_t w = Atomic::cmpxchg (Lock, (intptr_t )0 , LOCKBIT);
4706
- if (w == 0 ) return ;
4707
- if ((w & LOCKBIT) == 0 && Atomic::cmpxchg (Lock, w, w|LOCKBIT) == w) {
4708
- return ;
4709
- }
4710
-
4711
- ParkEvent * const Self = Thread::current ()->_MuxEvent ;
4712
- assert ((intptr_t (Self) & LOCKBIT) == 0 , " invariant" );
4713
- for (;;) {
4714
- int its = (os::is_MP () ? 100 : 0 ) + 1 ;
4715
-
4716
- // Optional spin phase: spin-then-park strategy
4717
- while (--its >= 0 ) {
4718
- w = *Lock;
4719
- if ((w & LOCKBIT) == 0 && Atomic::cmpxchg (Lock, w, w|LOCKBIT) == w) {
4720
- return ;
4721
- }
4722
- }
4723
-
4724
- Self->reset ();
4725
- Self->OnList = intptr_t (Lock);
4726
- // The following fence() isn't _strictly necessary as the subsequent
4727
- // CAS() both serializes execution and ratifies the fetched *Lock value.
4728
- OrderAccess::fence ();
4729
- for (;;) {
4730
- w = *Lock;
4731
- if ((w & LOCKBIT) == 0 ) {
4732
- if (Atomic::cmpxchg (Lock, w, w|LOCKBIT) == w) {
4733
- Self->OnList = 0 ; // hygiene - allows stronger asserts
4734
- return ;
4735
- }
4736
- continue ; // Interference -- *Lock changed -- Just retry
4737
- }
4738
- assert (w & LOCKBIT, " invariant" );
4739
- Self->ListNext = (ParkEvent *) (w & ~LOCKBIT);
4740
- if (Atomic::cmpxchg (Lock, w, intptr_t (Self)|LOCKBIT) == w) break ;
4741
- }
4742
-
4743
- while (Self->OnList != 0 ) {
4744
- Self->park ();
4745
- }
4746
- }
4747
- }
4748
-
4749
- // Release() must extract a successor from the list and then wake that thread.
4750
- // It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme
4751
- // similar to that used by ParkEvent::Allocate() and ::Release(). DMR-based
4752
- // Release() would :
4753
- // (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list.
4754
- // (B) Extract a successor from the private list "in-hand"
4755
- // (C) attempt to CAS() the residual back into *Lock over null.
4756
- // If there were any newly arrived threads and the CAS() would fail.
4757
- // In that case Release() would detach the RATs, re-merge the list in-hand
4758
- // with the RATs and repeat as needed. Alternately, Release() might
4759
- // detach and extract a successor, but then pass the residual list to the wakee.
4760
- // The wakee would be responsible for reattaching and remerging before it
4761
- // competed for the lock.
4762
- //
4763
- // Both "pop" and DMR are immune from ABA corruption -- there can be
4764
- // multiple concurrent pushers, but only one popper or detacher.
4765
- // This implementation pops from the head of the list. This is unfair,
4766
- // but tends to provide excellent throughput as hot threads remain hot.
4767
- // (We wake recently run threads first).
4768
- //
4769
- // All paths through muxRelease() will execute a CAS.
4770
- // Release consistency -- We depend on the CAS in muxRelease() to provide full
4771
- // bidirectional fence/MEMBAR semantics, ensuring that all prior memory operations
4772
- // executed within the critical section are complete and globally visible before the
4773
- // store (CAS) to the lock-word that releases the lock becomes globally visible.
4774
- void Thread::muxRelease (volatile intptr_t * Lock) {
4775
- for (;;) {
4776
- const intptr_t w = Atomic::cmpxchg (Lock, LOCKBIT, (intptr_t )0 );
4777
- assert (w & LOCKBIT, " invariant" );
4778
- if (w == LOCKBIT) return ;
4779
- ParkEvent * const List = (ParkEvent *) (w & ~LOCKBIT);
4780
- assert (List != NULL , " invariant" );
4781
- assert (List->OnList == intptr_t (Lock), " invariant" );
4782
- ParkEvent * const nxt = List->ListNext ;
4783
- guarantee ((intptr_t (nxt) & LOCKBIT) == 0 , " invariant" );
4784
-
4785
- // The following CAS() releases the lock and pops the head element.
4786
- // The CAS() also ratifies the previously fetched lock-word value.
4787
- if (Atomic::cmpxchg (Lock, w, intptr_t (nxt)) != w) {
4788
- continue ;
4789
- }
4790
- List->OnList = 0 ;
4791
- OrderAccess::fence ();
4792
- List->unpark ();
4793
- return ;
4794
- }
4795
- }
4796
-
4797
4641
4798
4642
void Threads::verify () {
4799
4643
ALL_JAVA_THREADS (p) {
0 commit comments