Skip to content

Commit

Permalink
SPU: Cache reservation memory direct access handle (optimization)
Browse files Browse the repository at this point in the history
  • Loading branch information
elad335 committed May 4, 2022
1 parent 6366900 commit fd38488
Show file tree
Hide file tree
Showing 3 changed files with 65 additions and 32 deletions.
8 changes: 4 additions & 4 deletions Utilities/Thread.cpp
Expand Up @@ -2235,8 +2235,8 @@ thread_base::native_entry thread_base::finalize(u64 _self) noexcept
s_thread_pool[pos] = tls;

atomic_wait::list<2> list{};
list.set<0>(s_pool_ctr, 0, s_stop_bit);
list.set<1>(s_thread_pool[pos], tls);
list.template set<0>(s_pool_ctr, 0, s_stop_bit);
list.template set<1>(s_thread_pool[pos], tls);

while (s_thread_pool[pos] == tls || atomic_storage<thread_base*>::load(*tls) == fake_self)
{
Expand Down Expand Up @@ -2374,8 +2374,8 @@ void thread_ctrl::_wait_for(u64 usec, [[maybe_unused]] bool alert /* true */)

// Wait for signal and thread state abort
atomic_wait::list<2> list{};
list.set<0>(_this->m_sync, 0, 4 + 1);
list.set<1>(_this->m_taskq, nullptr);
list.template set<0>(_this->m_sync, 0, 4 + 1);
list.template set<1>(_this->m_taskq, nullptr);
list.wait(atomic_wait_timeout{usec <= 0xffff'ffff'ffff'ffff / 1000 ? usec * 1000 : 0xffff'ffff'ffff'ffff});
}

Expand Down
18 changes: 12 additions & 6 deletions Utilities/Thread.h
Expand Up @@ -276,8 +276,8 @@ class thread_ctrl final
}

// Wait for both thread sync var and provided atomic var
template <atomic_wait::op Op = atomic_wait::op::eq, typename T, typename U>
static inline void wait_on(T& wait, U old, u64 usec = -1)
template <uint Max, typename Func>
static inline void wait_on_custom(Func&& setter, u64 usec = -1)
{
auto _this = g_tls_this_thread;

Expand All @@ -286,13 +286,19 @@ class thread_ctrl final
return;
}

atomic_wait::list<3> list{};
list.set<0, Op>(wait, old);
list.set<1>(_this->m_sync, 0, 4 + 1);
list.set<2>(_this->m_taskq, nullptr);
atomic_wait::list<Max + 2> list{};
list.template set<Max>(_this->m_sync, 0, 4 + 1);
list.template set<Max + 1>(_this->m_taskq, nullptr);
setter(list);
list.wait(atomic_wait_timeout{usec <= 0xffff'ffff'ffff'ffff / 1000 ? usec * 1000 : 0xffff'ffff'ffff'ffff});
}

template <atomic_wait::op Op = atomic_wait::op::eq, typename T, typename U>
static inline void wait_on(T& wait, U old, u64 usec = -1)
{
wait_on_custom<1>([&](atomic_wait::list<3>& list){ list.template set<0, Op>(wait, old); }, usec);
}

// Exit.
[[noreturn]] static void emergency_exit(std::string_view reason);

Expand Down
71 changes: 49 additions & 22 deletions rpcs3/Emu/Cell/SPUThread.cpp
Expand Up @@ -3942,40 +3942,46 @@ s64 spu_thread::get_ch_value(u32 ch)

spu_function_logger logger(*this, "MFC Events read");

if (mask1 & SPU_EVENT_LR && raddr)
state += cpu_flag::wait;

using resrv_ptr = std::add_pointer_t<decltype(rdata)>;

resrv_ptr resrv_mem{};
std::shared_ptr<utils::shm> rdata_shm;

if (raddr && mask1 & SPU_EVENT_LR)
{
if (mask1 != SPU_EVENT_LR && mask1 != SPU_EVENT_LR + SPU_EVENT_TM)
auto area = vm::get(vm::any, raddr);

if (area && (area->flags & vm::preallocated) && vm::check_addr(raddr))
{
// Combining LR with other flags needs another solution
fmt::throw_exception("Not supported: event mask 0x%x", mask1);
// Obtain pointer to pre-allocated storage
resrv_mem = vm::get_super_ptr<decltype(rdata)>(raddr);
}

for (; !events.count; events = get_events(mask1, false, true))
else if (area)
{
const auto old = state.add_fetch(cpu_flag::wait);

if (is_stopped(old))
{
return -1;
}
// Ensure possesion over reservation memory so it won't be deallocated
auto [base_addr, shm_] = area->peek(raddr);

if (is_paused(old))
if (shm_)
{
// Ensure reservation data won't change while paused for debugging purposes
check_state();
continue;
const u32 data_offs = raddr - base_addr;
rdata_shm = std::move(shm_);
vm::writer_lock{}, resrv_mem = reinterpret_cast<resrv_ptr>(rdata_shm->map_self() + data_offs);
}

vm::reservation_notifier(raddr).wait(rtime, -128, atomic_wait_timeout{100'000});
}

check_state();
return events.events & mask1;
if (!resrv_mem)
{
spu_log.error("A dangling reservation address has been found while reading SPU_RdEventStat channel. (addr=0x%x, events_mask=0x%x)", raddr, mask1);
raddr = 0;
set_events(SPU_EVENT_LR);
}
}

for (; !events.count; events = get_events(mask1, true, true))
for (; !events.count; events = get_events(mask1 & ~SPU_EVENT_LR, true, true))
{
const auto old = state.add_fetch(cpu_flag::wait);
const auto old = +state;

if (is_stopped(old))
{
Expand All @@ -3984,7 +3990,28 @@ s64 spu_thread::get_ch_value(u32 ch)

if (is_paused(old))
{
// Ensure spu_thread::rdata's stagnancy while the thread is paused for debugging purposes
check_state();
state += cpu_flag::wait;
continue;
}

// Optimized check
if (raddr && (!vm::check_addr(raddr) || rtime != vm::reservation_acquire(raddr) || !cmp_rdata(rdata, *resrv_mem)))
{
raddr = 0;
set_events(SPU_EVENT_LR);
continue;
}

if (raddr)
{
thread_ctrl::wait_on_custom<2>([&](atomic_wait::list<4>& list)
{
list.template set<0>(state, old);
list.template set<1>(vm::reservation_notifier(raddr), rtime, -128);
}, 100);

continue;
}

Expand Down

0 comments on commit fd38488

Please sign in to comment.