Skip to content

Commit

Permalink
Merge pull request #18706 from hrydgard/misc-cleanup
Browse files Browse the repository at this point in the history
Remove a bunch of dead code from CoreTiming ("threadsafe" events)
  • Loading branch information
hrydgard committed Jan 16, 2024
2 parents 69a25ba + 3fd5190 commit 7879ebd
Show file tree
Hide file tree
Showing 5 changed files with 18 additions and 221 deletions.
9 changes: 9 additions & 0 deletions Common/Serialize/SerializeList.h
Expand Up @@ -99,3 +99,12 @@ void DoLinkedList(PointerWrap &p, LinkedListItem<T> *&list_start, LinkedListItem
list_cur = list_cur->next;
}
}

inline void DoIgnoreUnusedLinkedList(PointerWrap &p) {
u8 shouldExist = 0;
Do(p, shouldExist);
if (shouldExist) {
// We don't support this linked list and haven't used it forever.
p.SetError(p.ERROR_FAILURE);
}
}
217 changes: 7 additions & 210 deletions Core/CoreTiming.cpp
Expand Up @@ -63,15 +63,7 @@ struct BaseEvent {
typedef LinkedListItem<BaseEvent> Event;

Event *first;
Event *tsFirst;
Event *tsLast;

// event pools
Event *eventPool = 0;
Event *eventTsPool = 0;
int allocatedTsEvents = 0;
// Optimization to skip MoveEvents when possible.
std::atomic<u32> hasTsEvents;

// Downcount has been moved to currentMIPS, to save a couple of clocks in every ARM JIT block
// as we can already reach that structure through a register.
Expand All @@ -82,8 +74,6 @@ s64 idledCycles;
s64 lastGlobalTimeTicks;
s64 lastGlobalTimeUs;

static std::mutex externalEventLock;

std::vector<MHzChangeCallback> mhzChangeCallbacks;

void FireMhzChange() {
Expand Down Expand Up @@ -140,31 +130,12 @@ Event* GetNewEvent()
return ev;
}

Event* GetNewTsEvent()
{
allocatedTsEvents++;

if(!eventTsPool)
return new Event;

Event* ev = eventTsPool;
eventTsPool = ev->next;
return ev;
}

void FreeEvent(Event* ev)
{
ev->next = eventPool;
eventPool = ev;
}

void FreeTsEvent(Event* ev)
{
ev->next = eventTsPool;
eventTsPool = ev;
allocatedTsEvents--;
}

int RegisterEvent(const char *name, TimedCallback callback) {
for (const auto &ty : event_types) {
if (!strcmp(ty.name, name)) {
Expand Down Expand Up @@ -222,14 +193,12 @@ void Init()
idledCycles = 0;
lastGlobalTimeTicks = 0;
lastGlobalTimeUs = 0;
hasTsEvents = 0;
mhzChangeCallbacks.clear();
CPU_HZ = initialHz;
}

void Shutdown()
{
MoveEvents();
ClearPendingEvents();
UnregisterAllEvents();

Expand All @@ -238,13 +207,6 @@ void Shutdown()
eventPool = ev->next;
delete ev;
}

std::lock_guard<std::mutex> lk(externalEventLock);
while (eventTsPool) {
Event *ev = eventTsPool;
eventTsPool = ev->next;
delete ev;
}
}

u64 GetTicks()
Expand All @@ -262,39 +224,6 @@ u64 GetIdleTicks()
return (u64)idledCycles;
}


// This is to be called when outside threads, such as the graphics thread, wants to
// schedule things to be executed on the main thread.
void ScheduleEvent_Threadsafe(s64 cyclesIntoFuture, int event_type, u64 userdata)
{
std::lock_guard<std::mutex> lk(externalEventLock);
Event *ne = GetNewTsEvent();
ne->time = GetTicks() + cyclesIntoFuture;
ne->type = event_type;
ne->next = 0;
ne->userdata = userdata;
if(!tsFirst)
tsFirst = ne;
if(tsLast)
tsLast->next = ne;
tsLast = ne;

hasTsEvents.store(1, std::memory_order::memory_order_release);
}

// Same as ScheduleEvent_Threadsafe(0, ...) EXCEPT if we are already on the CPU thread
// in which case the event will get handled immediately, before returning.
void ScheduleEvent_Threadsafe_Immediate(int event_type, u64 userdata)
{
if(false) //Core::IsCPUThread())
{
std::lock_guard<std::mutex> lk(externalEventLock);
event_types[event_type].callback(userdata, 0);
}
else
ScheduleEvent_Threadsafe(0, event_type, userdata);
}

void ClearPendingEvents()
{
while (first)
Expand Down Expand Up @@ -380,57 +309,6 @@ s64 UnscheduleEvent(int event_type, u64 userdata)
return result;
}

s64 UnscheduleThreadsafeEvent(int event_type, u64 userdata)
{
s64 result = 0;
std::lock_guard<std::mutex> lk(externalEventLock);
if (!tsFirst)
return result;
while(tsFirst)
{
if (tsFirst->type == event_type && tsFirst->userdata == userdata)
{
result = tsFirst->time - GetTicks();

Event *next = tsFirst->next;
FreeTsEvent(tsFirst);
tsFirst = next;
}
else
{
break;
}
}
if (!tsFirst)
{
tsLast = NULL;
return result;
}

Event *prev = tsFirst;
Event *ptr = prev->next;
while (ptr)
{
if (ptr->type == event_type && ptr->userdata == userdata)
{
result = ptr->time - GetTicks();

prev->next = ptr->next;
if (ptr == tsLast)
tsLast = prev;
FreeTsEvent(ptr);
ptr = prev->next;
}
else
{
prev = ptr;
ptr = ptr->next;
}
}

return result;
}

void RegisterMHzChangeCallback(MHzChangeCallback callback) {
mhzChangeCallbacks.push_back(callback);
}
Expand Down Expand Up @@ -485,57 +363,6 @@ void RemoveEvent(int event_type)
}
}

void RemoveThreadsafeEvent(int event_type)
{
std::lock_guard<std::mutex> lk(externalEventLock);
if (!tsFirst)
{
return;
}
while(tsFirst)
{
if (tsFirst->type == event_type)
{
Event *next = tsFirst->next;
FreeTsEvent(tsFirst);
tsFirst = next;
}
else
{
break;
}
}
if (!tsFirst)
{
tsLast = NULL;
return;
}
Event *prev = tsFirst;
Event *ptr = prev->next;
while (ptr)
{
if (ptr->type == event_type)
{
prev->next = ptr->next;
if (ptr == tsLast)
tsLast = prev;
FreeTsEvent(ptr);
ptr = prev->next;
}
else
{
prev = ptr;
ptr = ptr->next;
}
}
}

void RemoveAllEvents(int event_type)
{
RemoveThreadsafeEvent(event_type);
RemoveEvent(event_type);
}

void ProcessEvents() {
while (first) {
if (first->time <= (s64)GetTicks()) {
Expand All @@ -555,31 +382,6 @@ void ProcessEvents() {
}
}

void MoveEvents()
{
hasTsEvents.store(0, std::memory_order::memory_order_release);

std::lock_guard<std::mutex> lk(externalEventLock);
// Move events from async queue into main queue
while (tsFirst)
{
Event *next = tsFirst->next;
AddEventToQueue(tsFirst);
tsFirst = next;
}
tsLast = NULL;

// Move free events to threadsafe pool
while(allocatedTsEvents > 0 && eventPool)
{
Event *ev = eventPool;
eventPool = ev->next;
ev->next = eventTsPool;
eventTsPool = ev;
allocatedTsEvents--;
}
}

void ForceCheck()
{
int cyclesExecuted = slicelength - currentMIPS->downcount;
Expand All @@ -600,8 +402,6 @@ void Advance() {
globalTimer += cyclesExecuted;
currentMIPS->downcount = slicelength;

if (hasTsEvents.load(std::memory_order_acquire))
MoveEvents();
ProcessEvents();

if (!first) {
Expand Down Expand Up @@ -677,24 +477,20 @@ std::string GetScheduledEventsSummary() {
return text;
}

void Event_DoState(PointerWrap &p, BaseEvent *ev)
{
void Event_DoState(PointerWrap &p, BaseEvent *ev) {
// There may be padding, so do each one individually.
Do(p, ev->time);
Do(p, ev->userdata);
Do(p, ev->type);
usedEventTypes.insert(ev->type);
}

void Event_DoStateOld(PointerWrap &p, BaseEvent *ev)
{
void Event_DoStateOld(PointerWrap &p, BaseEvent *ev) {
Do(p, *ev);
usedEventTypes.insert(ev->type);
}

void DoState(PointerWrap &p) {
std::lock_guard<std::mutex> lk(externalEventLock);

auto s = p.Section("CoreTiming", 1, 3);
if (!s)
return;
Expand All @@ -718,11 +514,12 @@ void DoState(PointerWrap &p) {
restoredEventTypes.clear();

if (s >= 3) {
DoLinkedList<BaseEvent, GetNewEvent, FreeEvent, Event_DoState>(p, first, (Event **) NULL);
DoLinkedList<BaseEvent, GetNewTsEvent, FreeTsEvent, Event_DoState>(p, tsFirst, &tsLast);
DoLinkedList<BaseEvent, GetNewEvent, FreeEvent, Event_DoState>(p, first, (Event **)nullptr);
// This is here because we previously stored a second queue of "threadsafe" events. Gone now. Remove in the next section version upgrade.
DoIgnoreUnusedLinkedList(p);
} else {
DoLinkedList<BaseEvent, GetNewEvent, FreeEvent, Event_DoStateOld>(p, first, (Event **) NULL);
DoLinkedList<BaseEvent, GetNewTsEvent, FreeTsEvent, Event_DoStateOld>(p, tsFirst, &tsLast);
DoLinkedList<BaseEvent, GetNewEvent, FreeEvent, Event_DoStateOld>(p, first, (Event **)nullptr);
DoIgnoreUnusedLinkedList(p);
}

Do(p, CPU_HZ);
Expand Down
7 changes: 0 additions & 7 deletions Core/CoreTiming.h
Expand Up @@ -92,18 +92,11 @@ namespace CoreTiming
// userdata MAY NOT CONTAIN POINTERS. userdata might get written and reloaded from disk,
// when we implement state saves.
void ScheduleEvent(s64 cyclesIntoFuture, int event_type, u64 userdata=0);
void ScheduleEvent_Threadsafe(s64 cyclesIntoFuture, int event_type, u64 userdata=0);
void ScheduleEvent_Threadsafe_Immediate(int event_type, u64 userdata=0);
s64 UnscheduleEvent(int event_type, u64 userdata);
s64 UnscheduleThreadsafeEvent(int event_type, u64 userdata);

void RemoveEvent(int event_type);
void RemoveThreadsafeEvent(int event_type);
void RemoveAllEvents(int event_type);
bool IsScheduled(int event_type);
void Advance();
void MoveEvents();
void ProcessFifoWaitEvents();
void ForceCheck();

// Pretend that the main CPU has executed enough cycles to reach the next event.
Expand Down
4 changes: 2 additions & 2 deletions Core/HLE/sceUmd.cpp
Expand Up @@ -179,7 +179,7 @@ static void __KernelUmdActivate()
__KernelNotifyCallback(driveCBId, notifyArg);

// Don't activate immediately, take time to "spin up."
CoreTiming::RemoveAllEvents(umdStatChangeEvent);
CoreTiming::RemoveEvent(umdStatChangeEvent);
CoreTiming::ScheduleEvent(usToCycles(MICRO_DELAY_ACTIVATE), umdStatChangeEvent, 1);
}

Expand All @@ -189,7 +189,7 @@ static void __KernelUmdDeactivate()
if (driveCBId != 0)
__KernelNotifyCallback(driveCBId, notifyArg);

CoreTiming::RemoveAllEvents(umdStatChangeEvent);
CoreTiming::RemoveEvent(umdStatChangeEvent);
__UmdStatChange(0, 0);
}

Expand Down
2 changes: 0 additions & 2 deletions Core/MIPS/JitCommon/JitBlockCache.cpp
Expand Up @@ -247,8 +247,6 @@ static void ExpandRange(std::pair<u32, u32> &range, u32 newStart, u32 newEnd) {

void JitBlockCache::FinalizeBlock(int block_num, bool block_link) {
JitBlock &b = blocks_[block_num];
_dbg_assert_(b.blockNum == block_num);

_assert_msg_(Memory::IsValidAddress(b.originalAddress), "FinalizeBlock: Bad originalAddress %08x in block %d (b.num: %d) proxy: %s sz: %d", b.originalAddress, block_num, b.blockNum, b.proxyFor ? "y" : "n", b.codeSize);

b.originalFirstOpcode = Memory::Read_Opcode_JIT(b.originalAddress);
Expand Down

0 comments on commit 7879ebd

Please sign in to comment.