Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Need testing] SPU TSX Fixups #6147

Merged
merged 11 commits into from Jul 14, 2019
62 changes: 31 additions & 31 deletions rpcs3/Emu/Cell/Modules/cellSpurs.cpp
Expand Up @@ -19,7 +19,7 @@

LOG_CHANNEL(cellSpurs);

error_code sys_spu_image_close(vm::ptr<sys_spu_image> img);
error_code sys_spu_image_close(ppu_thread&, vm::ptr<sys_spu_image> img);

// TODO
struct cell_error_t
Expand Down Expand Up @@ -125,7 +125,7 @@ namespace _spurs
//s32 cellSpursDetachLv2EventQueue(vm::ptr<CellSpurs> spurs, u8 port);

// Enable the SPU exception event handler
s32 cellSpursEnableExceptionEventHandler(vm::ptr<CellSpurs> spurs, b8 flag);
s32 cellSpursEnableExceptionEventHandler(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, b8 flag);

//s32 cellSpursSetGlobalExceptionEventHandler(vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursGlobalExceptionEventHandler> eaHandler, vm::ptr<void> arg);
//s32 cellSpursUnsetGlobalExceptionEventHandler(vm::ptr<CellSpurs> spurs);
Expand Down Expand Up @@ -427,7 +427,7 @@ s32 _spurs::attach_lv2_eq(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 queue,
portMask |= 1ull << (i);
}

if (s32 res = sys_spu_thread_group_connect_event_all_threads(spurs->spuTG, queue, portMask, port))
if (s32 res = sys_spu_thread_group_connect_event_all_threads(ppu, spurs->spuTG, queue, portMask, port))
{
if (res == CELL_EISCONN)
{
Expand Down Expand Up @@ -705,7 +705,7 @@ void _spurs::event_helper_entry(ppu_thread& ppu, vm::ptr<CellSpurs> spurs)
events[0].data2 = event_data2;
events[0].data3 = event_data3;

if (sys_event_queue_tryreceive(spurs->eventQueue, events + 1, 7, count) != CELL_OK)
if (sys_event_queue_tryreceive(ppu, spurs->eventQueue, events + 1, 7, count) != CELL_OK)
{
continue;
}
Expand Down Expand Up @@ -783,7 +783,7 @@ s32 _spurs::create_event_helper(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 p

if (s32 rc = sys_event_port_connect_local(spurs->eventPort, spurs->eventQueue))
{
sys_event_port_destroy(spurs->eventPort);
sys_event_port_destroy(ppu, spurs->eventPort);

if (s32 rc2 = _spurs::detach_lv2_eq(spurs, spurs->spuPort, true))
{
Expand All @@ -808,8 +808,8 @@ s32 _spurs::create_event_helper(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 p

//if (!eht)
{
sys_event_port_disconnect(spurs->eventPort);
sys_event_port_destroy(spurs->eventPort);
sys_event_port_disconnect(ppu, spurs->eventPort);
sys_event_port_destroy(ppu, spurs->eventPort);

if (s32 rc = _spurs::detach_lv2_eq(spurs, spurs->spuPort, true))
{
Expand Down Expand Up @@ -849,7 +849,7 @@ s32 _spurs::finalize_spu(ppu_thread& ppu, vm::ptr<CellSpurs> spurs)
{
CHECK_SUCCESS(sys_spu_thread_group_join(ppu, spurs->spuTG, vm::null, vm::null));

if (s32 rc = sys_spu_thread_group_destroy(spurs->spuTG))
if (s32 rc = sys_spu_thread_group_destroy(ppu, spurs->spuTG))
{
if (rc == CELL_EBUSY)
{
Expand All @@ -864,13 +864,13 @@ s32 _spurs::finalize_spu(ppu_thread& ppu, vm::ptr<CellSpurs> spurs)
}
else
{
if (s32 rc = sys_spu_thread_group_destroy(spurs->spuTG))
if (s32 rc = sys_spu_thread_group_destroy(ppu, spurs->spuTG))
{
return rc;
}
}

CHECK_SUCCESS(sys_spu_image_close(spurs.ptr(&CellSpurs::spuImg)));
CHECK_SUCCESS(sys_spu_image_close(ppu, spurs.ptr(&CellSpurs::spuImg)));

return CELL_OK;
}
Expand All @@ -894,8 +894,8 @@ s32 _spurs::stop_event_helper(ppu_thread& ppu, vm::ptr<CellSpurs> spurs)

spurs->ppu1 = 0xFFFFFFFF;

CHECK_SUCCESS(sys_event_port_disconnect(spurs->eventPort));
CHECK_SUCCESS(sys_event_port_destroy(spurs->eventPort));
CHECK_SUCCESS(sys_event_port_disconnect(ppu, spurs->eventPort));
CHECK_SUCCESS(sys_event_port_destroy(ppu, spurs->eventPort));
CHECK_SUCCESS(_spurs::detach_lv2_eq(spurs, spurs->spuPort, true));
CHECK_SUCCESS(sys_event_queue_destroy(ppu, spurs->eventQueue, SYS_EVENT_QUEUE_DESTROY_FORCE));

Expand Down Expand Up @@ -957,25 +957,25 @@ s32 _spurs::initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 revision,
// Intialise SPURS context
const bool isSecond = (flags & SAF_SECOND_VERSION) != 0;

auto rollback = [=]
auto rollback = [&]
{
if (spurs->semPrv)
{
sys_semaphore_destroy((u32)spurs->semPrv);
sys_semaphore_destroy(ppu, ::narrow<u32>(+spurs->semPrv));
}

for (u32 i = 0; i < CELL_SPURS_MAX_WORKLOAD; i++)
{
if (spurs->wklF1[i].sem)
{
sys_semaphore_destroy((u32)spurs->wklF1[i].sem);
sys_semaphore_destroy(ppu, ::narrow<u32>(+spurs->wklF1[i].sem));
}

if (isSecond)
{
if (spurs->wklF2[i].sem)
{
sys_semaphore_destroy((u32)spurs->wklF2[i].sem);
sys_semaphore_destroy(ppu, ::narrow<u32>(+spurs->wklF2[i].sem));
}
}
}
Expand Down Expand Up @@ -1018,7 +1018,7 @@ s32 _spurs::initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 revision,

for (u32 i = 0; i < CELL_SPURS_MAX_WORKLOAD; i++)
{
if (s32 rc = sys_semaphore_create(sem, semAttr, 0, 1))
if (s32 rc = sys_semaphore_create(ppu, sem, semAttr, 0, 1))
{
return rollback(), rc;
}
Expand All @@ -1027,7 +1027,7 @@ s32 _spurs::initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 revision,

if (isSecond)
{
if (s32 rc = sys_semaphore_create(sem, semAttr, 0, 1))
if (s32 rc = sys_semaphore_create(ppu, sem, semAttr, 0, 1))
{
return rollback(), rc;
}
Expand All @@ -1038,7 +1038,7 @@ s32 _spurs::initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 revision,

// Create semaphore
semAttr->name_u64 = "_spuPrv\0"_u64;
if (s32 rc = sys_semaphore_create(sem, semAttr, 0, 1))
if (s32 rc = sys_semaphore_create(ppu, sem, semAttr, 0, 1))
{
return rollback(), rc;
}
Expand Down Expand Up @@ -1090,9 +1090,9 @@ s32 _spurs::initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 revision,
if (flags & SAF_UNKNOWN_FLAG_9) spuTgAttr->type |= 0x0800;
if (flags & SAF_SYSTEM_WORKLOAD_ENABLED) spuTgAttr->type |= SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM;

if (s32 rc = sys_spu_thread_group_create(spurs.ptr(&CellSpurs::spuTG), nSpus, spuPriority, spuTgAttr))
if (s32 rc = sys_spu_thread_group_create(ppu, spurs.ptr(&CellSpurs::spuTG), nSpus, spuPriority, spuTgAttr))
{
sys_spu_image_close(spurs.ptr(&CellSpurs::spuImg));
sys_spu_image_close(ppu, spurs.ptr(&CellSpurs::spuImg));
return rollback(), rc;
}

Expand All @@ -1112,10 +1112,10 @@ s32 _spurs::initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 revision,
spuThArgs->arg1 = (u64)num << 32;
spuThArgs->arg2 = (u64)spurs.addr();

if (s32 rc = sys_spu_thread_initialize(spurs.ptr(&CellSpurs::spus, num), spurs->spuTG, num, spurs.ptr(&CellSpurs::spuImg), spuThAttr, spuThArgs))
if (s32 rc = sys_spu_thread_initialize(ppu, spurs.ptr(&CellSpurs::spus, num), spurs->spuTG, num, spurs.ptr(&CellSpurs::spuImg), spuThAttr, spuThArgs))
{
sys_spu_thread_group_destroy(spurs->spuTG);
sys_spu_image_close(spurs.ptr(&CellSpurs::spuImg));
sys_spu_thread_group_destroy(ppu, spurs->spuTG);
sys_spu_image_close(ppu, spurs.ptr(&CellSpurs::spuImg));
return rollback(), rc;
}

Expand Down Expand Up @@ -1184,7 +1184,7 @@ s32 _spurs::initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 revision,
}

// Enable SPURS exception handler
if (s32 rc = cellSpursEnableExceptionEventHandler(spurs, true /*enable*/))
if (s32 rc = cellSpursEnableExceptionEventHandler(ppu, spurs, true /*enable*/))
{
_spurs::signal_to_handler_thread(ppu, spurs);
_spurs::join_handler_thread(ppu, spurs);
Expand Down Expand Up @@ -1717,7 +1717,7 @@ s32 cellSpursDetachLv2EventQueue(vm::ptr<CellSpurs> spurs, u8 port)
return _spurs::detach_lv2_eq(spurs, port, false);
}

s32 cellSpursEnableExceptionEventHandler(vm::ptr<CellSpurs> spurs, b8 flag)
s32 cellSpursEnableExceptionEventHandler(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, b8 flag)
{
cellSpurs.warning("cellSpursEnableExceptionEventHandler(spurs=*0x%x, flag=%d)", spurs, flag);

Expand All @@ -1737,14 +1737,14 @@ s32 cellSpursEnableExceptionEventHandler(vm::ptr<CellSpurs> spurs, b8 flag)
{
if (oldEnableEH == 0)
{
rc = sys_spu_thread_group_connect_event(spurs->spuTG, spurs->eventQueue, SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION);
rc = sys_spu_thread_group_connect_event(ppu, spurs->spuTG, spurs->eventQueue, SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION);
}
}
else
{
if (oldEnableEH == 1)
{
rc = sys_spu_thread_group_disconnect_event(spurs->eventQueue, SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION);
rc = sys_spu_thread_group_disconnect_event(ppu, spurs->eventQueue, SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION);
}
}

Expand Down Expand Up @@ -3071,7 +3071,7 @@ s32 cellSpursEventFlagAttachLv2EventQueue(ppu_thread& ppu, vm::ptr<CellSpursEven
return success(), CELL_OK;
}

sys_event_port_destroy(*eventPortId);
sys_event_port_destroy(ppu, *eventPortId);
}

if (_spurs::detach_lv2_eq(spurs, *port, true) == CELL_OK)
Expand Down Expand Up @@ -3132,8 +3132,8 @@ s32 cellSpursEventFlagDetachLv2EventQueue(ppu_thread& ppu, vm::ptr<CellSpursEven

if (eventFlag->direction == CELL_SPURS_EVENT_FLAG_ANY2ANY)
{
sys_event_port_disconnect(eventFlag->eventPortId);
sys_event_port_destroy(eventFlag->eventPortId);
sys_event_port_disconnect(ppu, eventFlag->eventPortId);
sys_event_port_destroy(ppu, eventFlag->eventPortId);
}

s32 rc = _spurs::detach_lv2_eq(spurs, port, true);
Expand Down
22 changes: 10 additions & 12 deletions rpcs3/Emu/Cell/Modules/sys_mmapper_.cpp
Expand Up @@ -2,43 +2,41 @@
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/lv2/sys_mmapper.h"



extern logs::channel sysPrxForUser;

s32 sys_mmapper_allocate_memory(u32 size, u64 flags, vm::ptr<u32> mem_id)
s32 sys_mmapper_allocate_memory(ppu_thread& ppu, u32 size, u64 flags, vm::ptr<u32> mem_id)
{
sysPrxForUser.notice("sys_mmapper_allocate_memory(size=0x%x, flags=0x%llx, mem_id=*0x%x)", size, flags, mem_id);

return sys_mmapper_allocate_shared_memory(0xffff000000000000ull, size, flags, mem_id);
return sys_mmapper_allocate_shared_memory(ppu, 0xffff000000000000ull, size, flags, mem_id);
}

s32 sys_mmapper_allocate_memory_from_container(u32 size, u32 cid, u64 flags, vm::ptr<u32> mem_id)
s32 sys_mmapper_allocate_memory_from_container(ppu_thread& ppu, u32 size, u32 cid, u64 flags, vm::ptr<u32> mem_id)
{
sysPrxForUser.notice("sys_mmapper_allocate_memory_from_container(size=0x%x, cid=0x%x, flags=0x%llx, mem_id=*0x%x)", size, cid, flags, mem_id);

return sys_mmapper_allocate_shared_memory_from_container(0xffff000000000000ull, size, cid, flags, mem_id);
return sys_mmapper_allocate_shared_memory_from_container(ppu, 0xffff000000000000ull, size, cid, flags, mem_id);
}

s32 sys_mmapper_map_memory(u32 addr, u32 mem_id, u64 flags)
s32 sys_mmapper_map_memory(ppu_thread& ppu, u32 addr, u32 mem_id, u64 flags)
{
sysPrxForUser.notice("sys_mmapper_map_memory(addr=0x%x, mem_id=0x%x, flags=0x%llx)", addr, mem_id, flags);

return sys_mmapper_map_shared_memory(addr, mem_id, flags);
return sys_mmapper_map_shared_memory(ppu, addr, mem_id, flags);
}

s32 sys_mmapper_unmap_memory(u32 addr, vm::ptr<u32> mem_id)
s32 sys_mmapper_unmap_memory(ppu_thread& ppu, u32 addr, vm::ptr<u32> mem_id)
{
sysPrxForUser.notice("sys_mmapper_unmap_memory(addr=0x%x, mem_id=*0x%x)", addr, mem_id);

return sys_mmapper_unmap_shared_memory(addr, mem_id);
return sys_mmapper_unmap_shared_memory(ppu, addr, mem_id);
}

s32 sys_mmapper_free_memory(u32 mem_id)
s32 sys_mmapper_free_memory(ppu_thread& ppu, u32 mem_id)
{
sysPrxForUser.notice("sys_mmapper_free_memory(mem_id=0x%x)", mem_id);

return sys_mmapper_free_shared_memory(mem_id);
return sys_mmapper_free_shared_memory(ppu, mem_id);
}

extern void sysPrxForUser_sys_mmapper_init()
Expand Down
8 changes: 4 additions & 4 deletions rpcs3/Emu/Cell/Modules/sys_spu_.cpp
Expand Up @@ -271,7 +271,7 @@ error_code sys_spu_elf_get_segments(u32 elf_img, vm::ptr<sys_spu_segment> segmen
return CELL_OK;
}

error_code sys_spu_image_import(vm::ptr<sys_spu_image> img, u32 src, u32 type)
error_code sys_spu_image_import(ppu_thread& ppu, vm::ptr<sys_spu_image> img, u32 src, u32 type)
{
sysPrxForUser.warning("sys_spu_image_import(img=*0x%x, src=0x%x, type=%d)", img, src, type);

Expand Down Expand Up @@ -324,7 +324,7 @@ error_code sys_spu_image_import(vm::ptr<sys_spu_image> img, u32 src, u32 type)
img_size = std::max<u32>(img_size, static_cast<u32>(p.p_offset + p.p_filesz));
}

return _sys_spu_image_import(img, src, img_size, 0);
return _sys_spu_image_import(ppu, img, src, img_size, 0);
}
else
{
Expand Down Expand Up @@ -357,7 +357,7 @@ error_code sys_spu_image_import(vm::ptr<sys_spu_image> img, u32 src, u32 type)
}
}

error_code sys_spu_image_close(vm::ptr<sys_spu_image> img)
error_code sys_spu_image_close(ppu_thread& ppu, vm::ptr<sys_spu_image> img)
{
sysPrxForUser.warning("sys_spu_image_close(img=*0x%x)", img);

Expand All @@ -369,7 +369,7 @@ error_code sys_spu_image_close(vm::ptr<sys_spu_image> img)
else if (img->type == SYS_SPU_IMAGE_TYPE_KERNEL)
{
// Call the syscall
return _sys_spu_image_close(img);
return _sys_spu_image_close(ppu, img);
}
else
{
Expand Down
11 changes: 9 additions & 2 deletions rpcs3/Emu/Cell/SPUThread.cpp
Expand Up @@ -117,13 +117,15 @@ namespace spu
std::array<std::atomic<u8>, 65536> atomic_instruction_table = {};
constexpr u32 native_jiffy_duration_us = 1500; //About 1ms resolution with a half offset

void acquire_pc_address(u32 pc, u32 timeout_ms = 3)
void acquire_pc_address(spu_thread& spu, u32 pc, u32 timeout_ms = 3)
{
const u8 max_concurrent_instructions = (u8)g_cfg.core.preferred_spu_threads;
const u32 pc_offset = pc >> 2;

if (atomic_instruction_table[pc_offset].load(std::memory_order_consume) >= max_concurrent_instructions)
{
spu.state += cpu_flag::wait;

if (timeout_ms > 0)
{
const u64 timeout = timeout_ms * 1000u; //convert to microseconds
Expand All @@ -150,6 +152,11 @@ namespace spu
const auto count = atomic_instruction_table[pc_offset].load(std::memory_order_consume) * 100ull;
busy_wait(count);
}

if (spu.test_stopped())
{
spu_runtime::g_escape(&spu);
}
}

atomic_instruction_table[pc_offset]++;
Expand All @@ -172,7 +179,7 @@ namespace spu
{
if (g_cfg.core.preferred_spu_threads > 0)
{
acquire_pc_address(pc, (u32)g_cfg.core.spu_delay_penalty);
acquire_pc_address(spu, pc, (u32)g_cfg.core.spu_delay_penalty);
active = true;
}
}
Expand Down