diff --git a/rpcs3/Emu/Cell/SPURecompiler.cpp b/rpcs3/Emu/Cell/SPURecompiler.cpp index d4aa77aca18b..3e9d3cb9cd16 100644 --- a/rpcs3/Emu/Cell/SPURecompiler.cpp +++ b/rpcs3/Emu/Cell/SPURecompiler.cpp @@ -4521,6 +4521,18 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator std::array store_context_last_id = fill_array(0); // Protects against illegal forward ordering std::array store_context_first_id = fill_array(usz{umax}); // Protects against illegal past store elimination (backwards ordering is not implemented) std::array store_context_ctr = fill_array(1); // Store barrier cointer + + bool does_gpr_barrier_proceed_last_store(u32 i) const noexcept + { + const usz counter = store_context_ctr[i]; + return counter != 1 && counter > store_context_last_id[i]; + } + + bool does_gpr_barrier_preceed_first_store(u32 i) const noexcept + { + const usz counter = store_context_ctr[i]; + return counter != 1 && counter < store_context_first_id[i]; + } }; struct function_info @@ -6019,7 +6031,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator for (u32 i = 0; i < 128; i++) { // Check if the store is beyond the last barrier - if (auto& bs = bqbi->store[i]; bs && bqbi->store_context_last_id[i] == bqbi->store_context_ctr[i]) + if (auto& bs = bqbi->store[i]; bs && !bqbi->does_gpr_barrier_proceed_last_store(i)) { for (auto& [a, b] : m_blocks) { @@ -6107,7 +6119,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator auto* cur = work_list[wi]; if (std::count(killers.begin(), killers.end(), cur)) { - work2_list.emplace_back(cur, bb_to_info[cur] && bb_to_info[cur]->store_context_first_id[i] > 1); + work2_list.emplace_back(cur, bb_to_info[cur] && bb_to_info[cur]->does_gpr_barrier_preceed_first_store(i)); continue; } @@ -6145,17 +6157,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator { auto [cur, found_user] = work2_list[wi]; - if (cur == bs->getParent()) - { - if (found_user) - { - // Reset: store is being used and preserved by ensure_gpr_stores() - killers.clear(); - break; - } - - continue; - } + ensure(cur != bs->getParent()); if (!found_user && wi >= work_list_tail_blocks_max_index) { @@ -6170,6 +6172,18 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator for (auto* p : llvm::predecessors(cur)) { + if (p == bs->getParent()) + { + if (found_user) + { + // Reset: store is being used and preserved by ensure_gpr_stores() + killers.clear(); + break; + } + + continue; + } + if (!worked_on[p]) { worked_on[p] = true; @@ -6181,6 +6195,11 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator work2_list.push_back(std::make_pair(p, true)); } } + + if (killers.empty()) + { + break; + } } // Finally erase the dead store @@ -6207,7 +6226,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator for (u32 i = 0; i < 128; i++) { // If store isn't erased, try to sink it - if (auto& bs = block_q[bi]->store[i]; bs && block_q[bi]->bb->targets.size() > 1 && block_q[bi]->store_context_last_id[i] == block_q[bi]->store_context_ctr[i]) + if (auto& bs = block_q[bi]->store[i]; bs && block_q[bi]->bb->targets.size() > 1 && !block_q[bi]->does_gpr_barrier_proceed_last_store(i)) { std::map> sucs; diff --git a/rpcs3/Emu/Cell/SPUThread.cpp b/rpcs3/Emu/Cell/SPUThread.cpp index 097695ecfdd8..6aa2e79e693f 100644 --- a/rpcs3/Emu/Cell/SPUThread.cpp +++ b/rpcs3/Emu/Cell/SPUThread.cpp @@ -2022,6 +2022,20 @@ spu_thread::spu_thread(utils::serial& ar, lv2_spu_group* group) serialize_common(ar); + raddr = ::narrow(ar.pop()); + + if (raddr) + { + // Acquire reservation + if (!vm::check_addr(raddr)) + { + fmt::throw_exception("SPU Serialization: Reservation address is not accessible! (addr=0x%x)", raddr); + } + + rtime = vm::reservation_acquire(raddr); + mov_rdata(rdata, *vm::get_super_ptr(raddr)); + } + status_npc.raw().npc = pc | u8{interrupts_enabled}; if (get_type() == spu_type::threaded) @@ -2058,8 +2072,8 @@ void spu_thread::save(utils::serial& ar) if (raddr) { - // Lose reservation at savestate load with an event if one existed at savestate save - set_events(SPU_EVENT_LR); + // Last check for reservation-lost event + get_events(SPU_EVENT_LR); } ar(index); @@ -2073,6 +2087,9 @@ void spu_thread::save(utils::serial& ar) serialize_common(ar); + // Let's save it as u64 for future proofing + ar(u64{raddr}); + if (get_type() == spu_type::threaded) { for (const auto& [key, q] : spuq)