diff --git a/Utilities/Thread.cpp b/Utilities/Thread.cpp index 82e49668814d..c98bdf2f6139 100644 --- a/Utilities/Thread.cpp +++ b/Utilities/Thread.cpp @@ -2235,8 +2235,8 @@ thread_base::native_entry thread_base::finalize(u64 _self) noexcept s_thread_pool[pos] = tls; atomic_wait::list<2> list{}; - list.set<0>(s_pool_ctr, 0, s_stop_bit); - list.set<1>(s_thread_pool[pos], tls); + list.template set<0>(s_pool_ctr, 0, s_stop_bit); + list.template set<1>(s_thread_pool[pos], tls); while (s_thread_pool[pos] == tls || atomic_storage::load(*tls) == fake_self) { @@ -2374,8 +2374,8 @@ void thread_ctrl::_wait_for(u64 usec, [[maybe_unused]] bool alert /* true */) // Wait for signal and thread state abort atomic_wait::list<2> list{}; - list.set<0>(_this->m_sync, 0, 4 + 1); - list.set<1>(_this->m_taskq, nullptr); + list.template set<0>(_this->m_sync, 0, 4 + 1); + list.template set<1>(_this->m_taskq, nullptr); list.wait(atomic_wait_timeout{usec <= 0xffff'ffff'ffff'ffff / 1000 ? usec * 1000 : 0xffff'ffff'ffff'ffff}); } diff --git a/Utilities/Thread.h b/Utilities/Thread.h index bc50dbbc6cfd..b64271525953 100644 --- a/Utilities/Thread.h +++ b/Utilities/Thread.h @@ -276,8 +276,8 @@ class thread_ctrl final } // Wait for both thread sync var and provided atomic var - template - static inline void wait_on(T& wait, U old, u64 usec = -1) + template + static inline void wait_on_custom(Func&& setter, u64 usec = -1) { auto _this = g_tls_this_thread; @@ -286,13 +286,19 @@ class thread_ctrl final return; } - atomic_wait::list<3> list{}; - list.set<0, Op>(wait, old); - list.set<1>(_this->m_sync, 0, 4 + 1); - list.set<2>(_this->m_taskq, nullptr); + atomic_wait::list list{}; + list.template set(_this->m_sync, 0, 4 + 1); + list.template set(_this->m_taskq, nullptr); + setter(list); list.wait(atomic_wait_timeout{usec <= 0xffff'ffff'ffff'ffff / 1000 ? usec * 1000 : 0xffff'ffff'ffff'ffff}); } + template + static inline void wait_on(T& wait, U old, u64 usec = -1) + { + wait_on_custom<1>([&](atomic_wait::list<3>& list){ list.template set<0, Op>(wait, old); }, usec); + } + // Exit. [[noreturn]] static void emergency_exit(std::string_view reason); diff --git a/rpcs3/Emu/Cell/SPUThread.cpp b/rpcs3/Emu/Cell/SPUThread.cpp index f51ff98246e7..fce482d3b776 100644 --- a/rpcs3/Emu/Cell/SPUThread.cpp +++ b/rpcs3/Emu/Cell/SPUThread.cpp @@ -3942,40 +3942,46 @@ s64 spu_thread::get_ch_value(u32 ch) spu_function_logger logger(*this, "MFC Events read"); - if (mask1 & SPU_EVENT_LR && raddr) + state += cpu_flag::wait; + + using resrv_ptr = std::add_pointer_t; + + resrv_ptr resrv_mem{}; + std::shared_ptr rdata_shm; + + if (raddr && mask1 & SPU_EVENT_LR) { - if (mask1 != SPU_EVENT_LR && mask1 != SPU_EVENT_LR + SPU_EVENT_TM) + auto area = vm::get(vm::any, raddr); + + if (area && (area->flags & vm::preallocated) && vm::check_addr(raddr)) { - // Combining LR with other flags needs another solution - fmt::throw_exception("Not supported: event mask 0x%x", mask1); + // Obtain pointer to pre-allocated storage + resrv_mem = vm::get_super_ptr(raddr); } - - for (; !events.count; events = get_events(mask1, false, true)) + else if (area) { - const auto old = state.add_fetch(cpu_flag::wait); - - if (is_stopped(old)) - { - return -1; - } + // Ensure possesion over reservation memory so it won't be deallocated + auto [base_addr, shm_] = area->peek(raddr); - if (is_paused(old)) + if (shm_) { - // Ensure reservation data won't change while paused for debugging purposes - check_state(); - continue; + const u32 data_offs = raddr - base_addr; + rdata_shm = std::move(shm_); + vm::writer_lock{}, resrv_mem = reinterpret_cast(rdata_shm->map_self() + data_offs); } - - vm::reservation_notifier(raddr).wait(rtime, -128, atomic_wait_timeout{100'000}); } - check_state(); - return events.events & mask1; + if (!resrv_mem) + { + spu_log.error("A dangling reservation address has been found while reading SPU_RdEventStat channel. (addr=0x%x, events_mask=0x%x)", raddr, mask1); + raddr = 0; + set_events(SPU_EVENT_LR); + } } - for (; !events.count; events = get_events(mask1, true, true)) + for (; !events.count; events = get_events(mask1 & ~SPU_EVENT_LR, true, true)) { - const auto old = state.add_fetch(cpu_flag::wait); + const auto old = +state; if (is_stopped(old)) { @@ -3984,7 +3990,28 @@ s64 spu_thread::get_ch_value(u32 ch) if (is_paused(old)) { + // Ensure spu_thread::rdata's stagnancy while the thread is paused for debugging purposes check_state(); + state += cpu_flag::wait; + continue; + } + + // Optimized check + if (raddr && (!vm::check_addr(raddr) || rtime != vm::reservation_acquire(raddr) || !cmp_rdata(rdata, *resrv_mem))) + { + raddr = 0; + set_events(SPU_EVENT_LR); + continue; + } + + if (raddr) + { + thread_ctrl::wait_on_custom<2>([&](atomic_wait::list<4>& list) + { + list.template set<0>(state, old); + list.template set<1>(vm::reservation_notifier(raddr), rtime, -128); + }, 100); + continue; }