Skip to content

Commit

Permalink
Merge branch 'master' into ds3playerled
Browse files Browse the repository at this point in the history
  • Loading branch information
Megamouse committed Mar 1, 2021
2 parents 1bc70a4 + 8e4451d commit 54e8297
Show file tree
Hide file tree
Showing 34 changed files with 857 additions and 176 deletions.
2 changes: 1 addition & 1 deletion 3rdparty/libpng
107 changes: 94 additions & 13 deletions Utilities/Thread.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2207,6 +2207,16 @@ thread_base::native_entry thread_base::make_trampoline(u64(*entry)(thread_base*
});
}

thread_state thread_ctrl::state()
{
auto _this = g_tls_this_thread;

// Drain execution queue
_this->exec();

return static_cast<thread_state>(_this->m_sync & 3);
}

void thread_ctrl::_wait_for(u64 usec, bool alert /* true */)
{
auto _this = g_tls_this_thread;
Expand Down Expand Up @@ -2256,13 +2266,16 @@ void thread_ctrl::_wait_for(u64 usec, bool alert /* true */)
}
#endif

if (_this->m_sync.bit_test_reset(2))
if (_this->m_sync.bit_test_reset(2) || _this->m_taskq)
{
return;
}

// Wait for signal and thread state abort
_this->m_sync.wait(0, 4 + 1, atomic_wait_timeout{usec <= 0xffff'ffff'ffff'ffff / 1000 ? usec * 1000 : 0xffff'ffff'ffff'ffff});
atomic_wait::list<2> list{};
list.set<0>(_this->m_sync, 0, 4 + 1);
list.set<1>(_this->m_taskq, nullptr);
list.wait(atomic_wait_timeout{usec <= 0xffff'ffff'ffff'ffff / 1000 ? usec * 1000 : 0xffff'ffff'ffff'ffff});
}

std::string thread_ctrl::get_name_cached()
Expand Down Expand Up @@ -2298,6 +2311,9 @@ thread_base::thread_base(native_entry entry, std::string_view name)

thread_base::~thread_base()
{
// Cleanup abandoned tasks: initialize default results and signal
this->exec();

// Only cleanup on errored status
if ((m_sync & 3) == 2)
{
Expand All @@ -2322,7 +2338,6 @@ bool thread_base::join(bool dtor) const
// Hacked for too sleepy threads (1ms) TODO: make sure it's unneeded and remove
const auto timeout = dtor && Emu.IsStopped() ? atomic_wait_timeout{1'000'000} : atomic_wait_timeout::inf;

bool warn = false;
auto stamp0 = __rdtsc();

for (u64 i = 0; (m_sync & 3) <= 1; i++)
Expand All @@ -2334,20 +2349,12 @@ bool thread_base::join(bool dtor) const
break;
}

if (i > 20 && Emu.IsStopped())
if (i >= 16 && !(i & (i - 1)) && timeout != atomic_wait_timeout::inf)
{
stamp0 = __rdtsc();
atomic_wait_engine::raw_notify(0, get_native_id());
stamp0 = __rdtsc() - stamp0;
warn = true;
sig_log.error(u8"Thread [%s] is too sleepy. Waiting for it %.3fµs already!", *m_tname.load(), (__rdtsc() - stamp0) / (utils::get_tsc_freq() / 1000000.));
}
}

if (warn)
{
sig_log.error(u8"Thread [%s] is too sleepy. Took %.3fµs to wake it up!", *m_tname.load(), stamp0 / (utils::get_tsc_freq() / 1000000.));
}

return (m_sync & 3) == 3;
}

Expand Down Expand Up @@ -2406,6 +2413,80 @@ u64 thread_base::get_cycles()
}
}

void thread_base::push(shared_ptr<thread_future> task)
{
const auto next = &task->next;
m_taskq.push_head(*next, std::move(task));
m_taskq.notify_one();
}

void thread_base::exec()
{
if (!m_taskq) [[likely]]
{
return;
}

while (shared_ptr<thread_future> head = m_taskq.exchange(null_ptr))
{
// TODO: check if adapting reverse algorithm is feasible here
shared_ptr<thread_future>* prev{};

for (auto ptr = head.get(); ptr; ptr = ptr->next.get())
{
utils::prefetch_exec(ptr->exec.load());

ptr->prev = prev;

if (ptr->next)
{
prev = &ptr->next;
}
}

if (!prev)
{
prev = &head;
}

for (auto ptr = prev->get(); ptr; ptr = ptr->prev->get())
{
if (auto task = ptr->exec.load()) [[likely]]
{
// Execute or discard (if aborting)
if ((m_sync & 3) == 0) [[likely]]
{
task(this, ptr);
}
else
{
task(nullptr, ptr);
}

// Notify waiters
ptr->exec.release(nullptr);
ptr->exec.notify_all();
}

if (ptr->next)
{
// Partial cleanup
ptr->next.reset();
}

if (!ptr->prev)
{
break;
}
}

if (!m_taskq) [[likely]]
{
return;
}
}
}

[[noreturn]] void thread_ctrl::emergency_exit(std::string_view reason)
{
if (std::string info = dump_useful_thread_info(); !info.empty())
Expand Down

0 comments on commit 54e8297

Please sign in to comment.