diff --git a/CMakeLists.txt b/CMakeLists.txt index a1921b2..2fba934 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,6 +3,7 @@ project(PolyHook_2) set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) +# Turn Features On/Off, Set build options option(FEATURE_DETOURS "Implement detour functionality" ON) option(FEATURE_EXCEPTION "Implement all exception hooking functionality" ON) option(FEATURE_VIRTUALS "Implement all virtual table hooking functionality" ON) @@ -11,6 +12,12 @@ option(FEATURE_PE "Implement all win pe hooking functionality" ON) option(BUILD_DLL "Build dll & lib instead of tests" OFF) option(BUILD_STATIC "If BUILD_DLL is set, create the type that can be statically linked" ON) +# Calculate inclusion of necessary dependencies based on features + +# for now only inlinentd uses asmjit +set(DEP_ASMJIT_NEED ${FEATURE_INLINENTD}) +# todo: make inclusion of capstone stuff depend on feature flags + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd") #IDE's like it when header file are included as source files @@ -129,7 +136,7 @@ if(FEATURE_PE MATCHES ON) endif() endif() -if(FEATURE_INLINENTD MATCHES ON) +if(DEP_ASMJIT_NEED MATCHES ON) # only build tests if making exe if(BUILD_DLL MATCHES OFF) @@ -140,8 +147,15 @@ if(FEATURE_INLINENTD MATCHES ON) include("${ASMJIT_DIR}/CMakeLists.txt") include_directories(${ASMJIT_DIR}/src) - set(UNIT_TEST_SOURCES ${UNIT_TEST_SOURCES} - ${PROJECT_SOURCE_DIR}/UnitTests/TestAsmJit.cpp) + if(CMAKE_SIZEOF_VOID_P EQUAL 8) + # 64 bits + set(UNIT_TEST_SOURCES ${UNIT_TEST_SOURCES} + ${PROJECT_SOURCE_DIR}/UnitTests/TestDetourNoTDx64.cpp) + elseif(CMAKE_SIZEOF_VOID_P EQUAL 4) + # 32 bits + set(UNIT_TEST_SOURCES ${UNIT_TEST_SOURCES} + ${PROJECT_SOURCE_DIR}/UnitTests/TestDetourNoTDx86.cpp) + endif() endif() endif() diff --git a/UnitTests/TestAsmJit.cpp b/UnitTests/TestDetourNoTDx64.cpp similarity index 63% rename from UnitTests/TestAsmJit.cpp rename to UnitTests/TestDetourNoTDx64.cpp index ea3bc9e..d567384 100644 --- a/UnitTests/TestAsmJit.cpp +++ b/UnitTests/TestDetourNoTDx64.cpp @@ -33,7 +33,6 @@ TEST_CASE("Minimal Example", "[AsmJit]") { #include "headers/Detour/X64Detour.hpp" #include "headers/CapstoneDisassembler.hpp" - NOINLINE void hookMeInt(int a) { volatile int var = 1; int var2 = var + a; @@ -41,14 +40,24 @@ NOINLINE void hookMeInt(int a) { } NOINLINE void hookMeFloat(float a) { - volatile float ans = 0.0f; + float ans = 1.0f; ans += a; printf("%f %f\n", ans, a); } -uint64_t hookMeTramp = 0; -NOINLINE void myCallback(const PLH::ILCallback::Parameters* p) { - printf("holy balls it works: asInt:%d asFloat:%f\n", *(int*)p->getArgPtr(0), *(float*)p->getArgPtr(0)); +NOINLINE void hookMeIntFloatDouble(int a, float b, double c) { + volatile float ans = 0.0f; + ans += (float)a; + ans += c; + ans += b; + printf("%d %f %f %f\n", a, b, c, ans); +} + +NOINLINE void myCallback(const PLH::ILCallback::Parameters* p, const uint8_t count) { + printf("Argument Count: %d\n", count); + for (int i = 0; i < count; i++) { + printf("Arg: %d asInt:%d asFloat:%f asDouble:%f\n", i, *(int*)p->getArgPtr(i), *(float*)p->getArgPtr(i), *(double*)p->getArgPtr(i)); + } } TEST_CASE("Minimal ILCallback", "[AsmJit][ILCallback]") { @@ -59,11 +68,11 @@ TEST_CASE("Minimal ILCallback", "[AsmJit][ILCallback]") { asmjit::FuncSignature sig; std::vector args = { asmjit::TypeIdOf::kTypeId }; sig.init(asmjit::CallConv::kIdHost, asmjit::TypeIdOf::kTypeId, args.data(), (uint32_t)args.size()); - uint64_t JIT = callback.getJitFunc(sig, &myCallback, &hookMeTramp); + uint64_t JIT = callback.getJitFunc(sig, &myCallback); REQUIRE(JIT != 0); PLH::CapstoneDisassembler dis(PLH::Mode::x64); - PLH::x64Detour detour((char*)&hookMeInt, (char*)JIT, &hookMeTramp, dis); + PLH::x64Detour detour((char*)&hookMeInt, (char*)JIT, callback.getTrampolineHolder(), dis); REQUIRE(detour.hook() == true); hookMeInt(1337); REQUIRE(detour.unHook()); @@ -74,14 +83,30 @@ TEST_CASE("Minimal ILCallback", "[AsmJit][ILCallback]") { asmjit::FuncSignature sig; std::vector args = { asmjit::TypeIdOf::kTypeId }; sig.init(asmjit::CallConv::kIdHost, asmjit::TypeIdOf::kTypeId, args.data(), (uint32_t)args.size()); - uint64_t JIT = callback.getJitFunc(sig, &myCallback, &hookMeTramp); + uint64_t JIT = callback.getJitFunc(sig, &myCallback); REQUIRE(JIT != 0); PLH::CapstoneDisassembler dis(PLH::Mode::x64); - PLH::x64Detour detour((char*)&hookMeFloat, (char*)JIT, &hookMeTramp, dis); + PLH::x64Detour detour((char*)&hookMeFloat, (char*)JIT, callback.getTrampolineHolder(), dis); REQUIRE(detour.hook() == true); hookMeFloat(1337.1337f); REQUIRE(detour.unHook()); } + + SECTION("Int, float, double arguments") { + // void func(int), ABI must match hooked function + asmjit::FuncSignature sig; + std::vector args = { asmjit::TypeIdOf::kTypeId, asmjit::TypeIdOf::kTypeId, asmjit::TypeIdOf::kTypeId }; + sig.init(asmjit::CallConv::kIdHost, asmjit::TypeIdOf::kTypeId, args.data(), (uint32_t)args.size()); + uint64_t JIT = callback.getJitFunc(sig, &myCallback); + REQUIRE(JIT != 0); + + PLH::CapstoneDisassembler dis(PLH::Mode::x64); + PLH::x64Detour detour((char*)&hookMeIntFloatDouble, (char*)JIT, callback.getTrampolineHolder(), dis); + REQUIRE(detour.hook() == true); + + hookMeIntFloatDouble(1337, 1337.1337f, 1337.1337); + REQUIRE(detour.unHook()); + } } \ No newline at end of file diff --git a/UnitTests/TestDetourNoTDx86.cpp b/UnitTests/TestDetourNoTDx86.cpp new file mode 100644 index 0000000..2bf1f2d --- /dev/null +++ b/UnitTests/TestDetourNoTDx86.cpp @@ -0,0 +1,81 @@ +#include + +#include "headers/Detour/ILCallback.hpp" +#pragma warning( disable : 4244) + +typedef int(*Func)(void); +TEST_CASE("Minimal Example", "[AsmJit]") { + asmjit::JitRuntime rt; // Runtime specialized for JIT code execution. + + asmjit::CodeHolder code; // Holds code and relocation information. + code.init(rt.getCodeInfo()); // Initialize to the same arch as JIT runtime. + + asmjit::X86Assembler a(&code); // Create and attach X86Assembler to `code`. + a.mov(asmjit::x86::eax, 1); // Move one to 'eax' register. + a.ret(); // Return from function. + // ----> X86Assembler is no longer needed from here and can be destroyed <---- + + Func fn; + asmjit::Error err = rt.add(&fn, &code); // Add the generated code to the runtime. + if (err) { + REQUIRE(false); + } + + int result = fn(); // Execute the generated code. + REQUIRE(result == 1); + + // All classes use RAII, all resources will be released before `main()` returns, + // the generated function can be, however, released explicitly if you intend to + // reuse or keep the runtime alive, which you should in a production-ready code. + rt.release(fn); +} + +#include "headers/Detour/X86Detour.hpp" +#include "headers/CapstoneDisassembler.hpp" + +NOINLINE void hookMeInt(int a) { + volatile int var = 1; + int var2 = var + a; + printf("%d %d\n", var, var2); +} + +NOINLINE void hookMeFloat(float a) { + volatile float ans = 0.0f; + ans += a; + printf("%f %f\n", ans, a); +} + +NOINLINE void hookMeIntFloatDouble(int a, float b, double c) { + volatile float ans = 0.0f; + ans += (float)a; + ans += c; + ans += b; + printf("%d %f %f %f\n", a, b, c, ans); +} + +NOINLINE void myCallback(const PLH::ILCallback::Parameters* p, const uint8_t count) { + printf("Argument Count: %d\n", count); + for (int i = 0; i < count; i++) { + printf("Arg: %d asInt:%d asFloat:%f asDouble:%f\n", i, *(int*)p->getArgPtr(i), *(float*)p->getArgPtr(i), *(double*)p->getArgPtr(i)); + } +} + +//TEST_CASE("Minimal ILCallback", "[AsmJit][ILCallback]") { +// PLH::ILCallback callback; +// +// SECTION("Integer argument") { +// // void func(int), ABI must match hooked function +// asmjit::FuncSignature sig; +// std::vector args = { asmjit::TypeIdOf::kTypeId }; +// sig.init(asmjit::CallConv::kIdHost, asmjit::TypeIdOf::kTypeId, args.data(), (uint32_t)args.size()); +// uint64_t JIT = callback.getJitFunc(sig, &myCallback); +// REQUIRE(JIT != 0); +// +// PLH::CapstoneDisassembler dis(PLH::Mode::x86); +// PLH::x64Detour detour((char*)&hookMeInt, (char*)JIT, callback.getTrampolineHolder(), dis); +// REQUIRE(detour.hook() == true); +// hookMeInt(1337); +// REQUIRE(detour.unHook()); +// } +// +//} \ No newline at end of file diff --git a/headers/Detour/ADetour.hpp b/headers/Detour/ADetour.hpp index 503dbbd..4c1aaf1 100644 --- a/headers/Detour/ADetour.hpp +++ b/headers/Detour/ADetour.hpp @@ -102,7 +102,7 @@ class Detour : public PLH::IHook { uint64_t& minProlSz, uint64_t& roundProlSz); - void buildRelocationList(insts_t& prologue, const uint64_t roundProlSz, const int64_t delta, PLH::insts_t &instsNeedingEntry, PLH::insts_t &instsNeedingReloc); + bool buildRelocationList(insts_t& prologue, const uint64_t roundProlSz, const int64_t delta, PLH::insts_t &instsNeedingEntry, PLH::insts_t &instsNeedingReloc); template PLH::insts_t relocateTrampoline(insts_t& prologue, uint64_t jmpTblStart, const int64_t delta, const uint8_t jmpSz, MakeJmpFn makeJmp, const PLH::insts_t& instsNeedingReloc, const PLH::insts_t& instsNeedingEntry); diff --git a/headers/Detour/ILCallback.hpp b/headers/Detour/ILCallback.hpp index e188c74..cf15b1e 100644 --- a/headers/Detour/ILCallback.hpp +++ b/headers/Detour/ILCallback.hpp @@ -20,11 +20,12 @@ namespace PLH { // asm depends on this specific type uint64_t m_arguments[]; }; - typedef void(*tUserCallback)(const Parameters* params); + typedef void(*tUserCallback)(const Parameters* params, const uint8_t count); ILCallback() = default; ~ILCallback(); - uint64_t getJitFunc(const asmjit::FuncSignature sig, const tUserCallback callback, uint64_t* userTrampVar); + uint64_t getJitFunc(const asmjit::FuncSignature sig, const tUserCallback callback); + uint64_t* getTrampolineHolder(); private: // does a given type fit in a general purpose register (i.e. is it integer type) bool isGeneralReg(const uint8_t typeId) const; @@ -35,5 +36,8 @@ namespace PLH { asmjit::VMemMgr m_mem; uint64_t m_callbackBuf; asmjit::X86Mem argsStack; + + // ptr to trampoline allocated by hook, we hold this so user doesn't need to. + uint64_t m_trampolinePtr; }; } diff --git a/headers/Detour/x64Detour.hpp b/headers/Detour/x64Detour.hpp index 53ef641..20de0cd 100644 --- a/headers/Detour/x64Detour.hpp +++ b/headers/Detour/x64Detour.hpp @@ -33,7 +33,7 @@ class x64Detour : public Detour { uint8_t getPrefJmpSize() const; private: - std::optional makeTrampoline(insts_t& prologue); + bool makeTrampoline(insts_t& prologue, insts_t& trampolineOut); }; } #endif //POLYHOOK_2_X64DETOUR_HPP diff --git a/headers/Detour/x86Detour.hpp b/headers/Detour/x86Detour.hpp index 7c230bd..abd000f 100644 --- a/headers/Detour/x86Detour.hpp +++ b/headers/Detour/x86Detour.hpp @@ -31,7 +31,7 @@ class x86Detour : public Detour { uint8_t getJmpSize() const; private: - std::optional makeTrampoline(insts_t& prologue); + bool makeTrampoline(insts_t& prologue, insts_t& trampolineOut); }; } #endif //POLYHOOK_2_X86DETOUR_HPP diff --git a/sources/ADetour.cpp b/sources/ADetour.cpp index 6f7866a..0912f8e 100644 --- a/sources/ADetour.cpp +++ b/sources/ADetour.cpp @@ -81,7 +81,7 @@ bool PLH::Detour::expandProlSelfJmps(insts_t& prol, return true; } -void PLH::Detour::buildRelocationList(insts_t& prologue, const uint64_t roundProlSz, const int64_t delta, PLH::insts_t& instsNeedingEntry, PLH::insts_t& instsNeedingReloc) { +bool PLH::Detour::buildRelocationList(insts_t& prologue, const uint64_t roundProlSz, const int64_t delta, PLH::insts_t& instsNeedingEntry, PLH::insts_t& instsNeedingReloc) { assert(instsNeedingEntry.size() == 0); assert(instsNeedingReloc.size() == 0); assert(prologue.size() > 0); @@ -89,6 +89,7 @@ void PLH::Detour::buildRelocationList(insts_t& prologue, const uint64_t roundPro const uint64_t prolStart = prologue.front().getAddress(); for (auto& inst : prologue) { + // types that change control flow if (inst.isBranching() && inst.hasDisplacement() && (inst.getDestination() < prolStart || inst.getDestination() > prolStart + roundProlSz)) { @@ -102,7 +103,25 @@ void PLH::Detour::buildRelocationList(insts_t& prologue, const uint64_t roundPro instsNeedingReloc.push_back(inst); } } + + // data operations (duplicated because clearer) + if (!inst.isBranching() && inst.hasDisplacement()) { + const uint8_t dispSzBits = (uint8_t)inst.getDispSize() * 8; + const uint64_t maxInstDisp = (uint64_t)(std::pow(2, dispSzBits) / 2.0 - 1.0); + if ((uint64_t)std::llabs(delta) > maxInstDisp) { + /*EX: 48 8d 0d 96 79 07 00 lea rcx, [rip + 0x77996] + If instruction is moved beyond displacement field width + we can't fix the load. TODO: generate equivalent load + with asmjit and insert it at position + */ + ErrorLog::singleton().push("Cannot fixup IP relative data operation, relocation beyond displacement size", ErrorLevel::SEV); + return false; + }else { + instsNeedingReloc.push_back(inst); + } + } } + return true; } bool PLH::Detour::unHook() { diff --git a/sources/ILCallback.cpp b/sources/ILCallback.cpp index 44030ed..7d6833a 100644 --- a/sources/ILCallback.cpp +++ b/sources/ILCallback.cpp @@ -1,6 +1,6 @@ #include "headers/Detour/ILCallback.hpp" -uint64_t PLH::ILCallback::getJitFunc(const asmjit::FuncSignature sig, const PLH::ILCallback::tUserCallback callback, uint64_t* userTrampVar) { +uint64_t PLH::ILCallback::getJitFunc(const asmjit::FuncSignature sig, const PLH::ILCallback::tUserCallback callback) { asmjit::CodeHolder code; code.init(asmjit::CodeInfo(asmjit::ArchInfo::kTypeHost)); @@ -10,7 +10,7 @@ uint64_t PLH::ILCallback::getJitFunc(const asmjit::FuncSignature sig, const PLH: // to small to really need it cc.getFunc()->getFrameInfo().disablePreservedFP(); - + // map argument slots to registers, following abi. std::vector argRegisters; for (uint8_t arg_idx = 0; arg_idx < sig.getArgCount(); arg_idx++) { @@ -68,24 +68,25 @@ uint64_t PLH::ILCallback::getJitFunc(const asmjit::FuncSignature sig, const PLH: asmjit::X86Gp argStruct = cc.newIntPtr("argStruct"); cc.lea(argStruct, argsStack); + // fill reg to pass struct arg count to callback + asmjit::X86Gp argCountParam = cc.newU8(); + cc.mov(argCountParam, (uint8_t)sig.getArgCount()); + // call to user provided function (use ABI of host compiler) - cc.sub(asmjit::x86::rsp, 32); - auto call = cc.call(asmjit::imm_ptr((unsigned char*)callback), asmjit::FuncSignature1(asmjit::CallConv::kIdHost)); + auto call = cc.call(asmjit::imm_ptr((unsigned char*)callback), asmjit::FuncSignature2(asmjit::CallConv::kIdHost)); call->setArg(0, argStruct); - cc.add(asmjit::x86::rsp, 32); + call->setArg(1, argCountParam); // deref the trampoline ptr (must live longer) asmjit::X86Gp orig_ptr = cc.newUInt64(); - cc.mov(orig_ptr, (uint64_t)userTrampVar); + cc.mov(orig_ptr, (uint64_t)getTrampolineHolder()); cc.mov(orig_ptr, asmjit::x86::ptr(orig_ptr)); // call trampoline, map input args same order they were passed to us - cc.sub(asmjit::x86::rsp, 32); auto orig_call = cc.call(orig_ptr, sig); for (uint8_t arg_idx = 0; arg_idx < sig.getArgCount(); arg_idx++) { orig_call->setArg(arg_idx, argRegisters.at(arg_idx)); } - cc.add(asmjit::x86::rsp, 32); // end function cc.endFunc(); @@ -106,6 +107,10 @@ uint64_t PLH::ILCallback::getJitFunc(const asmjit::FuncSignature sig, const PLH: return m_callbackBuf; } +uint64_t* PLH::ILCallback::getTrampolineHolder() { + return &m_trampolinePtr; +} + bool PLH::ILCallback::isGeneralReg(const uint8_t typeId) const { switch (typeId) { case asmjit::TypeId::kI8: @@ -124,7 +129,6 @@ bool PLH::ILCallback::isGeneralReg(const uint8_t typeId) const { } } - bool PLH::ILCallback::isXmmReg(const uint8_t typeId) const { switch (typeId) { case asmjit::TypeId::kF32: diff --git a/sources/x64Detour.cpp b/sources/x64Detour.cpp index 884277c..0a276e8 100644 --- a/sources/x64Detour.cpp +++ b/sources/x64Detour.cpp @@ -50,6 +50,9 @@ bool PLH::x64Detour::hook() { return false; } + // remove, just for debugging w/o following recursion above + insts = m_disasm.disassemble(insts.at(0).getAddress(), insts.at(0).getAddress(), insts.at(0).getAddress() + 100); + // update given fn address to resolved one m_fnAddress = insts.front().getAddress(); @@ -81,10 +84,13 @@ bool PLH::x64Detour::hook() { ErrorLog::singleton().push("Prologue to overwrite:\n" + instsToStr(prologue) + "\n", ErrorLevel::INFO); { // copy all the prologue stuff to trampoline - auto jmpTblOpt = makeTrampoline(prologue); + insts_t jmpTblOpt; + if (!makeTrampoline(prologue, jmpTblOpt)) + return false; + ErrorLog::singleton().push("Trampoline:\n" + instsToStr(m_disasm.disassemble(m_trampoline, m_trampoline, m_trampoline + m_trampolineSz)) + "\n", ErrorLevel::INFO); - if (jmpTblOpt) - ErrorLog::singleton().push("Trampoline Jmp Tbl:\n" + instsToStr(*jmpTblOpt) + "\n", ErrorLevel::INFO); + if (jmpTblOpt.size() > 0) + ErrorLog::singleton().push("Trampoline Jmp Tbl:\n" + instsToStr(jmpTblOpt) + "\n", ErrorLevel::INFO); } *m_userTrampVar = m_trampoline; @@ -101,7 +107,7 @@ bool PLH::x64Detour::hook() { return true; } -std::optional PLH::x64Detour::makeTrampoline(insts_t& prologue) { +bool PLH::x64Detour::makeTrampoline(insts_t& prologue, insts_t& trampolineOut) { assert(prologue.size() > 0); const uint64_t prolStart = prologue.front().getAddress(); const uint16_t prolSz = calcInstsSz(prologue); @@ -109,11 +115,21 @@ std::optional PLH::x64Detour::makeTrampoline(insts_t& prologue) { /** Make a guess for the number entries we need so we can try to allocate a trampoline. The allocation address will change each attempt, which changes delta, which changes the number of needed entries. So - we just try until we hit that lucky number that works**/ + we just try until we hit that lucky number that works + + The relocation could also because of data operations too. But that's specific to the function and can't + work again on a retry (same function, duh). Return immediately in that case.**/ uint8_t neededEntryCount = 5; PLH::insts_t instsNeedingEntry; PLH::insts_t instsNeedingReloc; + + uint8_t retries = 0; do { + if (retries++ > 4) { + ErrorLog::singleton().push("Failed to calculate trampoline information", ErrorLevel::SEV); + return false; + } + if (m_trampoline != NULL) { delete[](unsigned char*)m_trampoline; neededEntryCount = (uint8_t)instsNeedingEntry.size(); @@ -126,7 +142,8 @@ std::optional PLH::x64Detour::makeTrampoline(insts_t& prologue) { int64_t delta = m_trampoline - prolStart; - buildRelocationList(prologue, prolSz, delta, instsNeedingEntry, instsNeedingReloc); + if (!buildRelocationList(prologue, prolSz, delta, instsNeedingEntry, instsNeedingReloc)) + return false; } while (instsNeedingEntry.size() > neededEntryCount); const int64_t delta = m_trampoline - prolStart; @@ -152,10 +169,8 @@ std::optional PLH::x64Detour::makeTrampoline(insts_t& prologue) { auto makeJmpFn = std::bind(makex64MinimumJump, _1, _2, std::bind(calcJmpHolder)); uint64_t jmpTblStart = jmpToProlAddr + getMinJmpSize(); - PLH::insts_t jmpTblEntries = relocateTrampoline(prologue, jmpTblStart, delta, getMinJmpSize(), + trampolineOut = relocateTrampoline(prologue, jmpTblStart, delta, getMinJmpSize(), makeJmpFn, instsNeedingReloc, instsNeedingEntry); - if (jmpTblEntries.size() > 0) - return jmpTblEntries; - else - return std::nullopt; + + return true; } \ No newline at end of file diff --git a/sources/x86Detour.cpp b/sources/x86Detour.cpp index c859f5d..6af448c 100644 --- a/sources/x86Detour.cpp +++ b/sources/x86Detour.cpp @@ -78,11 +78,13 @@ bool PLH::x86Detour::hook() { ErrorLog::singleton().push("Prologue to overwrite:\n" + instsToStr(prologue) + "\n", ErrorLevel::INFO); { // copy all the prologue stuff to trampoline - auto jmpTblOpt = makeTrampoline(prologue); + insts_t jmpTblOpt; + if (!makeTrampoline(prologue, jmpTblOpt)) + return false; ErrorLog::singleton().push("Trampoline:\n" + instsToStr(m_disasm.disassemble(m_trampoline, m_trampoline, m_trampoline + m_trampolineSz)) + "\n", ErrorLevel::INFO); - if (jmpTblOpt) - ErrorLog::singleton().push("Trampoline Jmp Tbl:\n" + instsToStr(*jmpTblOpt) + "\n", ErrorLevel::INFO); + if (jmpTblOpt.size() > 0) + ErrorLog::singleton().push("Trampoline Jmp Tbl:\n" + instsToStr(jmpTblOpt) + "\n", ErrorLevel::INFO); } *m_userTrampVar = m_trampoline; @@ -99,18 +101,29 @@ bool PLH::x86Detour::hook() { return true; } -std::optional PLH::x86Detour::makeTrampoline(insts_t& prologue) { +bool PLH::x86Detour::makeTrampoline(insts_t& prologue, insts_t& trampolineOut) { assert(prologue.size() > 0); const uint64_t prolStart = prologue.front().getAddress(); const uint16_t prolSz = calcInstsSz(prologue); /** Make a guess for the number entries we need so we can try to allocate a trampoline. The allocation address will change each attempt, which changes delta, which changes the number of needed entries. So - we just try until we hit that lucky number that works**/ + we just try until we hit that lucky number that works. + + The relocation could also because of data operations too. But that's specific to the function and can't + work again on a retry (same function, duh). Return immediately in that case. + **/ uint8_t neededEntryCount = 5; PLH::insts_t instsNeedingEntry; PLH::insts_t instsNeedingReloc; + + uint8_t retries = 0; do { + if (retries++ > 4) { + ErrorLog::singleton().push("Failed to calculate trampoline information", ErrorLevel::SEV); + return false; + } + if (m_trampoline != NULL) { delete[](unsigned char*)m_trampoline; neededEntryCount = (uint8_t)instsNeedingEntry.size(); @@ -122,7 +135,8 @@ std::optional PLH::x86Detour::makeTrampoline(insts_t& prologue) { int64_t delta = m_trampoline - prolStart; - buildRelocationList(prologue, prolSz, delta, instsNeedingEntry, instsNeedingReloc); + if (!buildRelocationList(prologue, prolSz, delta, instsNeedingEntry, instsNeedingReloc)) + return false; } while (instsNeedingEntry.size() > neededEntryCount); const int64_t delta = m_trampoline - prolStart; @@ -136,9 +150,6 @@ std::optional PLH::x86Detour::makeTrampoline(insts_t& prologue) { } uint64_t jmpTblStart = jmpToProlAddr + getJmpSize(); - PLH::insts_t jmpTblEntries = relocateTrampoline(prologue, jmpTblStart, delta, getJmpSize(), makex86Jmp, instsNeedingReloc, instsNeedingEntry); - if (jmpTblEntries.size() > 0) - return jmpTblEntries; - else - return std::nullopt; + trampolineOut = relocateTrampoline(prologue, jmpTblStart, delta, getJmpSize(), makex86Jmp, instsNeedingReloc, instsNeedingEntry); + return true; } \ No newline at end of file