diff --git a/dlls/ntdll/signal_x86_64.c b/dlls/ntdll/signal_x86_64.c index 849d0deda0..54e09f43d9 100644 --- a/dlls/ntdll/signal_x86_64.c +++ b/dlls/ntdll/signal_x86_64.c @@ -1887,6 +1887,11 @@ __ASM_GLOBAL_FUNC( set_full_cpu_context, "movq 0xb0(%rdi),%rdi\n\t" /* context->Rdi */ "iretq" ); +/* + * MH:W Patch declaration + */ + +extern int mhw_is_running(void); /*********************************************************************** * set_cpu_context @@ -1906,6 +1911,17 @@ void DECLSPEC_HIDDEN set_cpu_context( const CONTEXT *context ) amd64_thread_data()->dr6 = context->Dr6; amd64_thread_data()->dr7 = context->Dr7; } + /* + * Skip setting the context full + * as long as we have set/reset debug + * registers + */ + if(mhw_is_running()) { + if(flags & CONTEXT_DEBUG_REGISTERS) { + WARN( " [MH:W patch] skipping restoring full context\n" ); + return; + } + } if (flags & CONTEXT_FULL) { if (!(flags & CONTEXT_CONTROL)) diff --git a/dlls/ntdll/thread.c b/dlls/ntdll/thread.c index f73e141911..12be1c68e8 100644 --- a/dlls/ntdll/thread.c +++ b/dlls/ntdll/thread.c @@ -149,6 +149,194 @@ static ULONG_PTR get_image_addr(void) } #endif +/* + * Patch for MH:W after Release of 12/3/2020 + * + * The idea of this patch is not to perform I/O + * operations with the wineserver when getting and + * setting the context of a given thread, but to + * keep a local copy cahed (max MHW_MAX_CONTEXTS) + */ + +#define MHW_MAX_TH_MAP (256) +#define MHW_MAX_CONTEXTS MHW_MAX_TH_MAP +#define MHW_STEAM_ID "582010" +#define MHW_DEBUG_LOG "MHW_DEBUG_LOG" +#define MHW_CHECK_TODO (-2) +#define MHW_CHECK_YES (1) +#define MHW_CHECK_NO (0) +#define MHW_UNLIKELY(x) __builtin_expect(!!(x),0) +#define MHW_DEBUG_REGISTERS (0x00000010) + +typedef struct { + HANDLE handles[MHW_MAX_TH_MAP]; + pthread_t th[MHW_MAX_TH_MAP]; + int cur_el; + pthread_mutex_t mtx; +} MHW_TH_MAP; + +typedef struct { + pthread_t th[MHW_MAX_CONTEXTS]; + context_t ctx[MHW_MAX_CONTEXTS]; + int cur_el; + pthread_mutex_t mtx; +} MHW_CTX_MAP; + +static MHW_TH_MAP mhw_th_map = { .cur_el=0, .mtx=PTHREAD_MUTEX_INITIALIZER}; +static MHW_CTX_MAP mhw_ctx_map = { .cur_el=0, .mtx=PTHREAD_MUTEX_INITIALIZER}; + +static void mhw_th_map_add(HANDLE h, pthread_t th) { + pthread_mutex_lock(&mhw_th_map.mtx); + if(mhw_th_map.cur_el < MHW_MAX_TH_MAP) { + const int ce = mhw_th_map.cur_el; + mhw_th_map.handles[ce] = h; + mhw_th_map.th[ce] = th; + ++mhw_th_map.cur_el; + } else { + FIXME( " [MH:W patch] Reached a limit for threads\n" ); + } + pthread_mutex_unlock(&mhw_th_map.mtx); +} + +static HANDLE mhw_th_map_remove_by_th(pthread_t th) { + HANDLE rv = 0; + pthread_mutex_lock(&mhw_th_map.mtx); + for(int i = 0; i < mhw_th_map.cur_el; ++i) { + if(pthread_equal(mhw_th_map.th[i], th)) { + rv = mhw_th_map.handles[i]; + /* if we have more than 1 element + * move the last in its place */ + if(mhw_th_map.cur_el > 1) { + const int last_el = mhw_th_map.cur_el - 1; + mhw_th_map.th[i] = mhw_th_map.th[last_el]; + mhw_th_map.handles[i] = mhw_th_map.handles[last_el]; + } + --mhw_th_map.cur_el; + break; + } + } + pthread_mutex_unlock(&mhw_th_map.mtx); + return rv; +} + +static pthread_t mhw_th_map_find_by_h(HANDLE h) { + pthread_t rv = 0; + pthread_mutex_lock(&mhw_th_map.mtx); + for(int i = 0; i < mhw_th_map.cur_el; ++i) { + if(mhw_th_map.handles[i] == h) { + rv = mhw_th_map.th[i]; + break; + } + } + pthread_mutex_unlock(&mhw_th_map.mtx); + return rv; +} + +static HANDLE mhw_th_map_find_by_th(pthread_t th) { + HANDLE rv = 0; + pthread_mutex_lock(&mhw_th_map.mtx); + for(int i = 0; i < mhw_th_map.cur_el; ++i) { + if(pthread_equal(mhw_th_map.th[i], th)) { + rv = mhw_th_map.handles[i]; + break; + } + } + pthread_mutex_unlock(&mhw_th_map.mtx); + return rv; +} + +static int mhw_set_context(pthread_t th, const context_t* ctx) { + int idx = 0, + rv = 0; + pthread_mutex_lock(&mhw_ctx_map.mtx); + /* If you can find it, replace the content + */ + for(; idx < mhw_ctx_map.cur_el; ++idx) { + if(pthread_equal(mhw_ctx_map.th[idx], th)) { + memcpy(&mhw_ctx_map.ctx[idx], ctx, sizeof(context_t)); + rv = 1; + break; + } + } + /* Otherwise add it + */ + if(idx == mhw_ctx_map.cur_el) { + if(idx < MHW_MAX_CONTEXTS) { + mhw_ctx_map.th[idx] = th; + memcpy(&mhw_ctx_map.ctx[idx], ctx, sizeof(context_t)); + ++mhw_ctx_map.cur_el; + } else { + FIXME( " [MH:W patch] Reached a limit for contexts\n" ); + } + } + pthread_mutex_unlock(&mhw_ctx_map.mtx); + return rv; +} + +static void mhw_get_context_flags(const context_t* from, context_t* to, unsigned int flags) { + /* + * This flags may have specific logic, leave it + * like that for now + const static unsigned int CPU_FLAGS = SERVER_CTX_DEBUG_REGISTERS; + */ + to->flags |= flags; + if (flags & SERVER_CTX_CONTROL) to->ctl = from->ctl; + if (flags & SERVER_CTX_INTEGER) to->integer = from->integer; + if (flags & SERVER_CTX_SEGMENTS) to->seg = from->seg; + if (flags & SERVER_CTX_FLOATING_POINT) to->fp = from->fp; + if (flags & SERVER_CTX_DEBUG_REGISTERS) to->debug = from->debug; + if (flags & SERVER_CTX_EXTENDED_REGISTERS) to->ext = from->ext; +} + +static int mhw_get_context(pthread_t th, context_t* ctx, unsigned int flags) { + int rv = 0; + pthread_mutex_lock(&mhw_ctx_map.mtx); + for(int i = 0; i < mhw_ctx_map.cur_el; ++i) { + if(pthread_equal(mhw_ctx_map.th[i], th)) { + mhw_get_context_flags(&mhw_ctx_map.ctx[i], ctx, flags); + rv = 1; + break; + } + } + pthread_mutex_unlock(&mhw_ctx_map.mtx); + return rv; +} + +static int mhw_remove_context(pthread_t th) { + int rv = 0; + pthread_mutex_lock(&mhw_ctx_map.mtx); + for(int i = 0; i < mhw_ctx_map.cur_el; ++i) { + if(pthread_equal(mhw_ctx_map.th[i], th)) { + /* if we have more than 1 element + * move the last in its place */ + if(mhw_ctx_map.cur_el > 1) { + const int last_el = mhw_th_map.cur_el - 1; + mhw_ctx_map.th[i] = mhw_ctx_map.th[last_el]; + if(last_el != i) memcpy(&mhw_ctx_map.ctx[i], &mhw_ctx_map.ctx[last_el], sizeof(context_t)); + } + --mhw_th_map.cur_el; + rv = 1; + break; + } + } + pthread_mutex_unlock(&mhw_ctx_map.mtx); + return rv; +} + +int mhw_is_running(void) { + static int mhw_running_flag = MHW_CHECK_TODO; + /* don't care if we execute the below + * spuriously */ + if(MHW_UNLIKELY(mhw_running_flag == MHW_CHECK_TODO)) { + const char* p_gameid = getenv("SteamGameId"); + const int is_running = (!p_gameid || strcmp(p_gameid, MHW_STEAM_ID)) ? MHW_CHECK_NO : MHW_CHECK_YES; + /* atomic CAS */ + __sync_bool_compare_and_swap(&mhw_running_flag, MHW_CHECK_TODO, is_running); + } + + return mhw_running_flag == MHW_CHECK_YES; +} + /*********************************************************************** * thread_init * @@ -299,7 +487,21 @@ static void free_thread_data( TEB *teb ) void abort_thread( int status ) { pthread_sigmask( SIG_BLOCK, &server_block_set, NULL ); - if (interlocked_xchg_add( &nb_threads, -1 ) <= 1) _exit( get_unix_exit_code( status )); + if (interlocked_xchg_add( &nb_threads, -1 ) <= 1) { + /* + * If we're running MH:W perform some housekeeping + */ + if (mhw_is_running()) { + const pthread_t th = pthread_self(); + if(!mhw_remove_context(th)) { + WARN( " [MH:W patch] Can't remove context for id %lu\n", th); + } + if(!mhw_th_map_remove_by_th(th)) { + WARN( " [MH:W patch] Can't remove thread for id %lu\n", th); + } + } + _exit( get_unix_exit_code( status )); + } signal_exit_thread( status ); } @@ -354,6 +556,18 @@ void WINAPI RtlExitUserThread( ULONG status ) if (thread_data->pthread_id) { + /* + * If we're running MH:W perform some housekeeping + */ + if (mhw_is_running()) { + if(!mhw_remove_context(thread_data->pthread_id)) { + WARN( " [MH:W patch] Can't remove context for id %lu\n", thread_data->pthread_id); + } + if(!mhw_th_map_remove_by_th(thread_data->pthread_id)) { + WARN( " [MH:W patch] Can't remove thread for id %lu\n", thread_data->pthread_id); + } + } + pthread_join( thread_data->pthread_id, NULL ); free_thread_data( teb ); } @@ -552,6 +766,10 @@ NTSTATUS WINAPI RtlCreateUserThread( HANDLE process, SECURITY_DESCRIPTOR *descr, if (handle_ptr) *handle_ptr = handle; else NtClose( handle ); + if(mhw_is_running()) { + mhw_th_map_add(handle, pthread_id); + } + return STATUS_SUCCESS; error: @@ -658,7 +876,6 @@ NTSTATUS WINAPI NtAlertThread( HANDLE handle ) return STATUS_NOT_IMPLEMENTED; } - /****************************************************************************** * NtTerminateThread (NTDLL.@) * ZwTerminateThread (NTDLL.@) @@ -744,6 +961,27 @@ NTSTATUS set_thread_context( HANDLE handle, const context_t *context, BOOL *self NTSTATUS ret; DWORD dummy, i; + /* + * Short circuit in case of MH:W running + */ + if(mhw_is_running()) { + pthread_t th = (handle == GetCurrentThread()) ? pthread_self() : mhw_th_map_find_by_h(handle); + if(mhw_th_map_find_by_th(th)) { + *self = pthread_equal(th, pthread_self()); + if(mhw_set_context(th, context)) { + if (context->flags == MHW_DEBUG_REGISTERS) { + return STATUS_SUCCESS; + } else { + TRACE( " [MH:W patch] mhw_set_context(%lu, %p) self %i on handle %p (%p), falling through (%08X)\n", th, (void*)context, *self, (void*)handle, (void*)&ret, context->flags); + } + } else { + TRACE( " [MH:W patch] mhw_set_context(%lu, %p) first time, falling through\n", th, (void*)context); + } + } else { + WARN( " [MH:W patch] Can't resolve HANDLE %p to thread nor thread id %lu, falling through\n", (void*)handle, th); + } + } + SERVER_START_REQ( set_thread_context ) { req->handle = wine_server_obj_handle( handle ); @@ -790,6 +1028,28 @@ NTSTATUS get_thread_context( HANDLE handle, context_t *context, unsigned int fla NTSTATUS ret; DWORD dummy, i; + /* + * Short circuit in case of MH:W running + */ + if(mhw_is_running()) { + pthread_t th = (handle == GetCurrentThread()) ? pthread_self() : mhw_th_map_find_by_h(handle); + if(mhw_th_map_find_by_th(th)) { + *self = pthread_equal(th, pthread_self()); + if(!mhw_get_context(th, context, flags)) { + WARN( " [MH:W patch] Can't resolve handle map for %p, known thread %lu, falling through (this may be ok)\n", (void*)handle, th); + } else { + if (flags == MHW_DEBUG_REGISTERS) { + TRACE( " [MH:W patch] mhw_get_context(%lu, %p) flags %08X self %i (%p)\n", th, (void*)context, flags, *self, (void*)&ret); + return STATUS_SUCCESS; + } else { + TRACE( " [MH:W patch] mhw_get_context(%lu, %p) flags %08X self %i (%p), falling through\n", th, (void*)context, flags, *self, (void*)&ret); + } + } + } else { + WARN( " [MH:W patch] Can't resolve HANDLE %p to thread, unknown thread %lu, falling through (this is not ok)\n", (void*)handle, th); + } + } + SERVER_START_REQ( get_thread_context ) { req->handle = wine_server_obj_handle( handle ); @@ -825,6 +1085,32 @@ NTSTATUS get_thread_context( HANDLE handle, context_t *context, unsigned int fla NtResumeThread( handle, &dummy ); if (ret == STATUS_PENDING) ret = STATUS_ACCESS_DENIED; } + + /* + * If we're at this stage and MH:W is running, cache the + * result of the server operation + */ + if(mhw_is_running()) { + pthread_t th = (handle == GetCurrentThread()) ? pthread_self() : mhw_th_map_find_by_h(handle); + if(mhw_th_map_find_by_th(th)) { + *self = pthread_equal(th, pthread_self()); + if(mhw_set_context(th, context)) { + TRACE( " [MH:W patch] mhw_set_context(%lu, %p) %i, updating entry\n", th, (void*)context, ret); + } else { + TRACE( " [MH:W patch] mhw_set_context(%lu, %p) %i, cached added self %i (%p)\n", th, (void*)context, ret, *self, (void*)&ret); + } + } else { + if(handle == GetCurrentThread()) { + *self = 1; + mhw_set_context(pthread_self(), context); + WARN( " [MH:W patch] Couldn't resolve HANDLE %p thread, known thread %lu, cached this result\n", (void*)handle, th); + return STATUS_SUCCESS; + } else { + WARN( " [MH:W patch] Can't resolve HANDLE %p to thread, unknown thread %lu, did not cache this result\n", (void*)handle, th); + } + } + } + return ret; }