Skip to content

Commit

Permalink
Merge pull request #40 from WorksButNotTested/arm64-crash
Browse files Browse the repository at this point in the history
Fix
  • Loading branch information
vanhauser-thc committed Oct 17, 2022
2 parents 806769a + 98029fc commit fa07ebf
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 43 deletions.
3 changes: 0 additions & 3 deletions accel/tcg/cpu-exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -656,9 +656,6 @@ void afl_forkserver(CPUState *cpu) {

}

// Flush translation cache just before fork server starts.
tb_flush_sync();

/* All right, let's await orders... */

while (1) {
Expand Down
38 changes: 0 additions & 38 deletions accel/tcg/translate-all.c
Original file line number Diff line number Diff line change
Expand Up @@ -1537,44 +1537,6 @@ void tb_flush(CPUState *cpu)
}
}

/*
* If we call tb_flush, from inside cpu_exec, then it will queue do_tb_flush to
* run asyncronously. Since we wish to do this when we start the forkserver to
* flush any translated blocks which may have been translated before the
* configuration from environment variables has been parsed, this will cause the
* flush to be deferred and instead performed after the fork server is running
* resulting in the flush occurring repeatedly rather than just the once, with
* the obvious resulting performance overhead.
*
* However, we know that the fork server should be initialized when the target
* application has only a single thread (since the fork syscall will only clone
* the calling thread into the child process). Therefore, we don't need any
* synchronization with respect to any other VCPUs and can therefore perform the
* flush synchronously instead.
*/
void tb_flush_sync(void)
{
CPUState *cpu = NULL;
size_t num_cpus = 0;

if (!tcg_enabled()) {
return;
}

CPU_FOREACH(cpu) {
num_cpus++;
}

if (num_cpus != 1) {
fprintf(stderr, "Warning: More than one VCPU when attempting to flush "
"translation block cache. Skipping since we can't do it synchronously.");
return;
}

unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
}

/*
* Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
* so in order to prevent bit rot we compile them unconditionally in user-mode,
Expand Down
23 changes: 22 additions & 1 deletion accel/tcg/translator.c
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,28 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
}

if (db->pc_next == afl_entry_point) {
afl_setup();
static bool first = true;
/*
* We guard this section since we flush the translation cache after
* we load the configuration, which in turn means we will need to
* re-translate our block. If we were to perform this flush every
* time (rather than just when our configuration is first loaded),
* we would just end up translation this block repeatedly.
*/
if (first) {
afl_setup();
/*
* We flush the translation cache here since we may already have
* translated some blocks and included instrumentation in them
* before we have processed the configuration from the
* environment variables which configures which ranges to
* include and exclude. Therefore we may have some blocks in our
* cache which are incorrectly instrumented and cause some
* fuzzing stability or performance problems.
*/
tb_flush(cpu);
first = false;
}
gen_helper_afl_entry_routine(cpu_env);
}

Expand Down
1 change: 0 additions & 1 deletion include/exec/exec-all.h
Original file line number Diff line number Diff line change
Expand Up @@ -533,7 +533,6 @@ void tb_invalidate_phys_range(target_ulong start, target_ulong end);
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
#endif
void tb_flush(CPUState *cpu);
void tb_flush_sync(void);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags,
Expand Down

0 comments on commit fa07ebf

Please sign in to comment.