Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

i#111 x64: stack slowpath and redzone handling #2023

Merged
merged 2 commits into from Aug 26, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
23 changes: 20 additions & 3 deletions drmemory/alloc_drmem.c
Expand Up @@ -25,6 +25,7 @@
#include "slowpath.h"
#include "report.h"
#include "shadow.h"
#include "stack.h"
#include "syscall.h"
#include "alloc.h"
#include "heap.h"
Expand Down Expand Up @@ -1316,15 +1317,27 @@ client_handle_continue(void *drcontext, dr_mcontext_t *mc)
void
client_stack_alloc(byte *start, byte *end, bool defined)
{
if (options.shadowing && (options.check_uninitialized || options.check_stack_bounds))
if (options.shadowing &&
(options.check_uninitialized || options.check_stack_bounds)) {
shadow_set_range(start, end, defined ? SHADOW_DEFINED : SHADOW_UNDEFINED);
if (BEYOND_TOS_REDZONE_SIZE > 0)
shadow_set_range(start - BEYOND_TOS_REDZONE_SIZE,
end - BEYOND_TOS_REDZONE_SIZE, SHADOW_UNDEFINED);
}
}

void
client_stack_dealloc(byte *start, byte *end)
{
if (options.shadowing && (options.check_uninitialized || options.check_stack_bounds))
shadow_set_range(start, end, SHADOW_UNADDRESSABLE);
if (options.shadowing &&
(options.check_uninitialized || options.check_stack_bounds)) {
if (BEYOND_TOS_REDZONE_SIZE > 0) {
shadow_set_range(start, end, SHADOW_UNDEFINED);
shadow_set_range(start - BEYOND_TOS_REDZONE_SIZE,
end - BEYOND_TOS_REDZONE_SIZE, SHADOW_UNADDRESSABLE);
} else
shadow_set_range(start, end, SHADOW_UNADDRESSABLE);
}
if (options.shadowing && options.check_uninitialized)
register_shadow_set_dword(DR_REG_PTR_RETURN, SHADOW_DEFINED);
}
Expand Down Expand Up @@ -1701,6 +1714,10 @@ at_signal_handler(void)
shadow_set_byte(&info, sp, SHADOW_DEFINED);
sp++;
}
if (BEYOND_TOS_REDZONE_SIZE > 0) {
shadow_set_range((byte *)mc.xsp - BEYOND_TOS_REDZONE_SIZE, (byte *)mc.xsp,
SHADOW_UNDEFINED);
}
LOG(2, "signal handler: marked new frame defined "PFX"-"PFX"\n", mc.xsp, sp);
}

Expand Down
11 changes: 9 additions & 2 deletions drmemory/drmemory.c
Expand Up @@ -1352,9 +1352,16 @@ set_initial_structures(void *drcontext)
LOG(1, "initial stack is "PFX"-"PFX", sp="PFX"\n",
stack_base, stack_base + stack_size, mc.xsp);
set_known_range(stack_base, (app_pc)mc.xsp);
if (options.check_stack_bounds)
if (options.check_stack_bounds) {
set_initial_range((app_pc)mc.xsp, stack_base + stack_size);
else
if (BEYOND_TOS_REDZONE_SIZE > 0) {
size_t redzone_sz = BEYOND_TOS_REDZONE_SIZE;
if ((app_pc)mc.xsp - BEYOND_TOS_REDZONE_SIZE < stack_base)
redzone_sz = (app_pc)mc.xsp - stack_base;
shadow_set_range((app_pc)mc.xsp - redzone_sz, (app_pc)mc.xsp,
SHADOW_UNDEFINED);
}
} else
set_initial_range(stack_base, stack_base + stack_size);
/* rest is unaddressable by default, and memory walk skips known range */
} else {
Expand Down
69 changes: 47 additions & 22 deletions drmemory/slowpath.c
Expand Up @@ -1556,19 +1556,26 @@ handle_mem_ref_internal(uint flags, app_loc_t *loc, app_pc addr, size_t sz,
uint shadow = shadow_get_byte(&info, addr + i);
ASSERT(shadow <= 3, "internal error");
if (shadow == SHADOW_UNADDRESSABLE) {
if (TEST(MEMREF_PUSHPOP, flags) && !TEST(MEMREF_WRITE, flags)) {
ELOG(0, "ERROR: "PFX" popping unaddressable memory: possible Dr. Memory bug\n",
loc_to_print(loc));
if (TEST(MEMREF_PUSHPOP, flags) &&
(!TEST(MEMREF_WRITE, flags) || BEYOND_TOS_REDZONE_SIZE > 0)) {
ELOG(0, "ERROR: "PFX" popping unaddressable memory: possible Dr. Memory "
"bug\n", loc_to_print(loc));
if (options.pause_at_unaddressable)
wait_for_user("popping unaddressable memory!");
}
/* XXX: stack ranges: right now we assume that a push makes memory
* addressable, but really should check if in stack range
*/
if (TEST(MEMREF_PUSHPOP, flags) && TEST(MEMREF_WRITE, flags)) {
/* Push without stack redzone */
ASSERT(!TEST(MEMREF_MOVS, flags), "internal movs error");
shadow_set_byte(&info, addr + i, TEST(MEMREF_USE_VALUES, flags) ?
comb->dst[memref_idx(flags, i)] : SHADOW_DEFINED);
/* We shouldn't get here for BEYOND_TOS_REDZONE_SIZE > 0 */
if (BEYOND_TOS_REDZONE_SIZE > 0) {
shadow_set_byte(&info, addr + i - BEYOND_TOS_REDZONE_SIZE,
SHADOW_UNDEFINED);
}
} else {
/* We check stack bounds here and cache to avoid
* check_undefined_exceptions having to do it over and over (did
Expand Down Expand Up @@ -1689,11 +1696,19 @@ handle_mem_ref_internal(uint flags, app_loc_t *loc, app_pc addr, size_t sz,
ASSERT(false, "bitlevel NOT YET IMPLEMENTED");
}
if (TEST(MEMREF_PUSHPOP, flags)) {
shadow_set_byte(&info, addr + i, SHADOW_UNADDRESSABLE);
if (BEYOND_TOS_REDZONE_SIZE > 0) {
shadow_set_byte(&info, addr + i, SHADOW_UNDEFINED);
shadow_set_byte(&info, addr + i - BEYOND_TOS_REDZONE_SIZE,
SHADOW_UNADDRESSABLE);
} else
shadow_set_byte(&info, addr + i, SHADOW_UNADDRESSABLE);
}
} else if (!TEST(MEMREF_CHECK_ADDRESSABLE, flags)) {
uint newval;
if (TEST(MEMREF_PUSHPOP, flags)) {
if (TEST(MEMREF_PUSHPOP, flags) &&
(BEYOND_TOS_REDZONE_SIZE == 0 ||
shadow_get_byte(&info, addr + i - BEYOND_TOS_REDZONE_SIZE) !=
SHADOW_UNADDRESSABLE)) {
if (!handled_push_addr) {
/* only call once: don't want to mark push target as unaddr,
* so each byte will trigger here: avoid extra warnings in logs
Expand All @@ -1702,25 +1717,35 @@ handle_mem_ref_internal(uint flags, app_loc_t *loc, app_pc addr, size_t sz,
handle_push_addressable(loc, addr + i, addr, sz, mc);
}
}
if (TEST(MEMREF_MOVS, flags)) {
ASSERT(TEST(MEMREF_USE_VALUES, flags), "internal movs error");
ASSERT(memref_idx(flags, i) == i, "internal movs error");
newval = shadow_get_byte(&info, comb->movs_addr + i);
} else {
newval = TEST(MEMREF_USE_VALUES, flags) ?
comb->dst[memref_idx(flags, i)] : SHADOW_DEFINED;
}
if (shadow == SHADOW_DEFINED_BITLEVEL ||
newval == SHADOW_DEFINED_BITLEVEL) {
ASSERT(false, "bitlevel NOT YET IMPLEMENTED");
if (TEST(MEMREF_PUSHPOP, flags) && TEST(MEMREF_WRITE, flags) &&
BEYOND_TOS_REDZONE_SIZE > 0) {
/* Push with stack redzone */
ASSERT(!TEST(MEMREF_MOVS, flags), "internal movs error");
shadow_set_byte(&info, addr + i, TEST(MEMREF_USE_VALUES, flags) ?
comb->dst[memref_idx(flags, i)] : SHADOW_DEFINED);
shadow_set_byte(&info, addr + i - BEYOND_TOS_REDZONE_SIZE,
SHADOW_UNDEFINED);
} else {
if (shadow == newval) {
LOG(4, "store @"PFX" to "PFX" w/ already-same-val "PIFX"\n",
loc_to_print(loc), addr+i, newval);
if (TEST(MEMREF_MOVS, flags)) {
ASSERT(TEST(MEMREF_USE_VALUES, flags), "internal movs error");
ASSERT(memref_idx(flags, i) == i, "internal movs error");
newval = shadow_get_byte(&info, comb->movs_addr + i);
} else {
LOG(4, "store @"PFX" to "PFX" val="PIFX"\n",
loc_to_print(loc), addr + i, newval);
shadow_set_byte(&info, addr + i, newval);
newval = TEST(MEMREF_USE_VALUES, flags) ?
comb->dst[memref_idx(flags, i)] : SHADOW_DEFINED;
}
if (shadow == SHADOW_DEFINED_BITLEVEL ||
newval == SHADOW_DEFINED_BITLEVEL) {
ASSERT(false, "bitlevel NOT YET IMPLEMENTED");
} else {
if (shadow == newval) {
LOG(4, "store @"PFX" to "PFX" w/ already-same-val "PIFX"\n",
loc_to_print(loc), addr+i, newval);
} else {
LOG(4, "store @"PFX" to "PFX" val="PIFX"\n",
loc_to_print(loc), addr + i, newval);
shadow_set_byte(&info, addr + i, newval);
}
}
}
}
Expand Down
20 changes: 16 additions & 4 deletions drmemory/stack.c
@@ -1,5 +1,5 @@
/* **********************************************************
* Copyright (c) 2011-2015 Google, Inc. All rights reserved.
* Copyright (c) 2011-2017 Google, Inc. All rights reserved.
* Copyright (c) 2008-2010 VMware, Inc. All rights reserved.
* **********************************************************/

Expand Down Expand Up @@ -235,6 +235,12 @@ handle_push_addressable(app_loc_t *loc, app_pc addr, app_pc start_addr,
* pushed as unaddr!
*/
(addr - PAGE_SIZE), start_addr, SHADOW_UNADDRESSABLE);
if (BEYOND_TOS_REDZONE_SIZE > 0) {
size_t redzone_sz = BEYOND_TOS_REDZONE_SIZE;
if (start_addr - BEYOND_TOS_REDZONE_SIZE < stack_start)
redzone_sz = start_addr - stack_start;
shadow_set_range(start_addr - redzone_sz, start_addr, SHADOW_UNDEFINED);
}
check_stack_size_vs_threshold(dr_get_current_drcontext(), stack_size);
} else {
ELOG(0, "ERROR: "PFX" pushing addressable memory: possible Dr. Memory bug\n",
Expand Down Expand Up @@ -303,7 +309,8 @@ handle_esp_adjust(esp_adjust_t type, reg_t val/*either relative delta, or absolu
} else if (type == ESP_ADJUST_AND) {
ptr_int_t newval = mc.xsp & val;
delta = newval - mc.xsp;
LOG(3, "esp adjust and esp="PFX" delta=%d\n", mc.xsp, delta);
LOG(3, "esp adjust and mask="PIFX" esp="PFX" delta="SZFMT"\n",
val, mc.xsp, delta);
if ((delta > options.stack_swap_threshold ||
delta < -options.stack_swap_threshold) &&
check_stack_swap((byte *)mc.xsp, (byte *)newval)) {
Expand All @@ -325,6 +332,10 @@ handle_esp_adjust(esp_adjust_t type, reg_t val/*either relative delta, or absolu
}
if (delta != 0) {
if (sp_action == SP_ADJUST_ACTION_ZERO) {
if (BEYOND_TOS_REDZONE_SIZE > 0) {
/* FIXME i#1205: zeroing conflicts w/ redzone: NYI */
ASSERT_NOT_IMPLEMENTED();
}
if (delta < 0) {
/* zero out newly allocated stack space to avoid stale
* pointers from misleading our leak scan (PR 520916).
Expand All @@ -333,8 +344,9 @@ handle_esp_adjust(esp_adjust_t type, reg_t val/*either relative delta, or absolu
memset((app_pc)(mc.xsp + delta), 0, -delta);
}
} else {
shadow_set_range((app_pc) (delta > 0 ? mc.xsp : (mc.xsp + delta)),
(app_pc) (delta > 0 ? (mc.xsp + delta) : mc.xsp),
app_pc sp = (app_pc)mc.xsp - BEYOND_TOS_REDZONE_SIZE;
shadow_set_range(delta > 0 ? sp : (sp + delta),
delta > 0 ? (sp + delta) : sp,
(delta > 0 ? SHADOW_UNADDRESSABLE :
((sp_action == SP_ADJUST_ACTION_DEFINED) ?
SHADOW_DEFINED : SHADOW_UNDEFINED)));
Expand Down
9 changes: 8 additions & 1 deletion drmemory/stack.h
@@ -1,5 +1,5 @@
/* **********************************************************
* Copyright (c) 2011-2015 Google, Inc. All rights reserved.
* Copyright (c) 2011-2017 Google, Inc. All rights reserved.
* Copyright (c) 2008-2010 VMware, Inc. All rights reserved.
* **********************************************************/

Expand Down Expand Up @@ -46,6 +46,13 @@ extern uint zero_loop_aborts_thresh;
*/
#define TYPICAL_STACK_MIN_SIZE (32*1024)

/* Some ABI's have a stack redzone which we want to mark uninit. */
#if defined(X86_64) && defined(UNIX)
# define BEYOND_TOS_REDZONE_SIZE 128
#else
# define BEYOND_TOS_REDZONE_SIZE 0
#endif

/* Indicates what action to take on SP adjustments. Different from esp_adjust_t
* in stack.c, which is about what kind of adjustment is being made by the app.
*/
Expand Down
19 changes: 14 additions & 5 deletions drmemory/stack_x86.c
@@ -1,5 +1,5 @@
/* **********************************************************
* Copyright (c) 2011-2015 Google, Inc. All rights reserved.
* Copyright (c) 2011-2017 Google, Inc. All rights reserved.
* Copyright (c) 2008-2010 VMware, Inc. All rights reserved.
* **********************************************************/

Expand Down Expand Up @@ -309,8 +309,16 @@ instrument_esp_adjust_slowpath(void *drcontext, instrlist_t *bb, instr_t *inst,
PRE(bb, inst, INSTR_CREATE_mov_ld(drcontext, opnd_create_reg(arg_tgt),
arg));
}
} else
PRE(bb, inst, INSTR_CREATE_mov_st(drcontext, opnd_create_reg(arg_tgt), arg));
} else {
if (opnd_is_immed_int(arg)) {
instrlist_insert_mov_immed_ptrsz(drcontext, opnd_get_immed_int(arg),
opnd_create_reg(arg_tgt), bb, inst,
NULL, NULL);
} else {
PRE(bb, inst, INSTR_CREATE_mov_st(drcontext, opnd_create_reg(arg_tgt),
arg));
}
}
if (si1.xchg != REG_NULL) {
/* now put arg into ecx, and saved ecx into dead xchg-w/ reg */
insert_spill_or_restore(drcontext, bb, inst, &si1, true/*save*/, false);
Expand Down Expand Up @@ -687,8 +695,9 @@ generate_shared_esp_fastpath_helper(void *drcontext, instrlist_t *bb,
*/
uint shadow_dword_newmem = (sp_action == SP_ADJUST_ACTION_DEFINED ?
SHADOW_DWORD_DEFINED : SHADOW_DWORD_UNDEFINED);
uint shadow_dqword_newmem = (sp_action == SP_ADJUST_ACTION_DEFINED ?
SHADOW_DQWORD_DEFINED : SHADOW_DQWORD_UNDEFINED);
/* We make this signed so that 0xffffffff will encode for x64 as -1. */
int shadow_dqword_newmem = (sp_action == SP_ADJUST_ACTION_DEFINED ?
SHADOW_DQWORD_DEFINED : SHADOW_DQWORD_UNDEFINED);

push_unaligned = INSTR_CREATE_label(drcontext);
push_aligned = INSTR_CREATE_label(drcontext);
Expand Down
6 changes: 6 additions & 0 deletions drmemory/syscall_linux.c
Expand Up @@ -156,6 +156,12 @@ handle_clone(void *drcontext, dr_mcontext_t *mc)
/* assume that above newsp should stay defined */
shadow_set_range(stack_base, newsp, SHADOW_UNADDRESSABLE);
check_stack_size_vs_threshold(drcontext, stack_size);
if (BEYOND_TOS_REDZONE_SIZE > 0) {
size_t redzone_sz = BEYOND_TOS_REDZONE_SIZE;
if (newsp - BEYOND_TOS_REDZONE_SIZE < stack_base)
redzone_sz = newsp - stack_base;
shadow_set_range(newsp - redzone_sz, newsp, SHADOW_UNDEFINED);
}
}
} else {
LOG(0, "ERROR: cannot find bounds of new thread's stack "PFX"\n",
Expand Down