Skip to content

Commit

Permalink
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-2…
Browse files Browse the repository at this point in the history
…0191111' into staging

target-arm queue:
 * Remove old unassigned_access CPU hook API
 * Remove old ptimer_init_with_bh() API
 * hw/arm/boot: Set NSACR.{CP11, CP10} in dummy SMC setup routine

# gpg: Signature made Mon 11 Nov 2019 13:56:56 GMT
# gpg:                using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg:                issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate]
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>" [ultimate]
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate]
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* remotes/pmaydell/tags/pull-target-arm-20191111:
  hw/arm/boot: Set NSACR.{CP11, CP10} in dummy SMC setup routine
  Remove unassigned_access CPU hook
  ptimer: Remove old ptimer_init_with_bh() API

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
  • Loading branch information
pm215 committed Nov 11, 2019
2 parents 654efcb + 45c078f commit b626eb0
Show file tree
Hide file tree
Showing 7 changed files with 44 additions and 135 deletions.
2 changes: 0 additions & 2 deletions accel/tcg/cputlb.c
Expand Up @@ -931,8 +931,6 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
cpu_io_recompile(cpu, retaddr);
}

cpu->mem_io_access_type = access_type;

if (mr->global_locking && !qemu_mutex_iothread_locked()) {
qemu_mutex_lock_iothread();
locked = true;
Expand Down
3 changes: 3 additions & 0 deletions hw/arm/boot.c
Expand Up @@ -240,6 +240,9 @@ void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu,
};
uint32_t board_setup_blob[] = {
/* board setup addr */
0xee110f51, /* mrc p15, 0, r0, c1, c1, 2 ;read NSACR */
0xe3800b03, /* orr r0, #0xc00 ;set CP11, CP10 */
0xee010f51, /* mcr p15, 0, r0, c1, c1, 2 ;write NSACR */
0xe3a00e00 + (mvbar_addr >> 4), /* mov r0, #mvbar_addr */
0xee0c0f30, /* mcr p15, 0, r0, c12, c0, 1 ;set MVBAR */
0xee110f11, /* mrc p15, 0, r0, c1 , c1, 0 ;read SCR */
Expand Down
91 changes: 15 additions & 76 deletions hw/core/ptimer.c
Expand Up @@ -29,7 +29,6 @@ struct ptimer_state
int64_t last_event;
int64_t next_event;
uint8_t policy_mask;
QEMUBH *bh;
QEMUTimer *timer;
ptimer_cb callback;
void *callback_opaque;
Expand All @@ -46,12 +45,7 @@ struct ptimer_state
/* Use a bottom-half routine to avoid reentrancy issues. */
static void ptimer_trigger(ptimer_state *s)
{
if (s->bh) {
replay_bh_schedule_event(s->bh);
}
if (s->callback) {
s->callback(s->callback_opaque);
}
s->callback(s->callback_opaque);
}

static void ptimer_reload(ptimer_state *s, int delta_adjust)
Expand Down Expand Up @@ -296,23 +290,18 @@ uint64_t ptimer_get_count(ptimer_state *s)

void ptimer_set_count(ptimer_state *s, uint64_t count)
{
assert(s->in_transaction || !s->callback);
assert(s->in_transaction);
s->delta = count;
if (s->enabled) {
if (!s->callback) {
s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ptimer_reload(s, 0);
} else {
s->need_reload = true;
}
s->need_reload = true;
}
}

void ptimer_run(ptimer_state *s, int oneshot)
{
bool was_disabled = !s->enabled;

assert(s->in_transaction || !s->callback);
assert(s->in_transaction);

if (was_disabled && s->period == 0) {
if (!qtest_enabled()) {
Expand All @@ -322,81 +311,59 @@ void ptimer_run(ptimer_state *s, int oneshot)
}
s->enabled = oneshot ? 2 : 1;
if (was_disabled) {
if (!s->callback) {
s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ptimer_reload(s, 0);
} else {
s->need_reload = true;
}
s->need_reload = true;
}
}

/* Pause a timer. Note that this may cause it to "lose" time, even if it
is immediately restarted. */
void ptimer_stop(ptimer_state *s)
{
assert(s->in_transaction || !s->callback);
assert(s->in_transaction);

if (!s->enabled)
return;

s->delta = ptimer_get_count(s);
timer_del(s->timer);
s->enabled = 0;
if (s->callback) {
s->need_reload = false;
}
s->need_reload = false;
}

/* Set counter increment interval in nanoseconds. */
void ptimer_set_period(ptimer_state *s, int64_t period)
{
assert(s->in_transaction || !s->callback);
assert(s->in_transaction);
s->delta = ptimer_get_count(s);
s->period = period;
s->period_frac = 0;
if (s->enabled) {
if (!s->callback) {
s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ptimer_reload(s, 0);
} else {
s->need_reload = true;
}
s->need_reload = true;
}
}

/* Set counter frequency in Hz. */
void ptimer_set_freq(ptimer_state *s, uint32_t freq)
{
assert(s->in_transaction || !s->callback);
assert(s->in_transaction);
s->delta = ptimer_get_count(s);
s->period = 1000000000ll / freq;
s->period_frac = (1000000000ll << 32) / freq;
if (s->enabled) {
if (!s->callback) {
s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ptimer_reload(s, 0);
} else {
s->need_reload = true;
}
s->need_reload = true;
}
}

/* Set the initial countdown value. If reload is nonzero then also set
count = limit. */
void ptimer_set_limit(ptimer_state *s, uint64_t limit, int reload)
{
assert(s->in_transaction || !s->callback);
assert(s->in_transaction);
s->limit = limit;
if (reload)
s->delta = limit;
if (s->enabled && reload) {
if (!s->callback) {
s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ptimer_reload(s, 0);
} else {
s->need_reload = true;
}
s->need_reload = true;
}
}

Expand All @@ -407,7 +374,7 @@ uint64_t ptimer_get_limit(ptimer_state *s)

void ptimer_transaction_begin(ptimer_state *s)
{
assert(!s->in_transaction || !s->callback);
assert(!s->in_transaction);
s->in_transaction = true;
s->need_reload = false;
}
Expand Down Expand Up @@ -448,37 +415,12 @@ const VMStateDescription vmstate_ptimer = {
}
};

ptimer_state *ptimer_init_with_bh(QEMUBH *bh, uint8_t policy_mask)
{
ptimer_state *s;

s = (ptimer_state *)g_malloc0(sizeof(ptimer_state));
s->bh = bh;
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ptimer_tick, s);
s->policy_mask = policy_mask;

/*
* These two policies are incompatible -- trigger-on-decrement implies
* a timer trigger when the count becomes 0, but no-immediate-trigger
* implies a trigger when the count stops being 0.
*/
assert(!((policy_mask & PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT) &&
(policy_mask & PTIMER_POLICY_NO_IMMEDIATE_TRIGGER)));
return s;
}

ptimer_state *ptimer_init(ptimer_cb callback, void *callback_opaque,
uint8_t policy_mask)
{
ptimer_state *s;

/*
* The callback function is mandatory; so we use it to distinguish
* old-style QEMUBH ptimers from new transaction API ptimers.
* (ptimer_init_with_bh() allows a NULL bh pointer and at least
* one device (digic-timer) passes NULL, so it's not the case
* that either s->bh != NULL or s->callback != NULL.)
*/
/* The callback function is mandatory. */
assert(callback);

s = g_new0(ptimer_state, 1);
Expand All @@ -499,9 +441,6 @@ ptimer_state *ptimer_init(ptimer_cb callback, void *callback_opaque,

void ptimer_free(ptimer_state *s)
{
if (s->bh) {
qemu_bh_delete(s->bh);
}
timer_free(s->timer);
g_free(s);
}
7 changes: 5 additions & 2 deletions include/hw/arm/boot.h
Expand Up @@ -107,9 +107,12 @@ struct arm_boot_info {
void (*write_board_setup)(ARMCPU *cpu,
const struct arm_boot_info *info);

/* If set, the board specific loader/setup blob will be run from secure
/*
* If set, the board specific loader/setup blob will be run from secure
* mode, regardless of secure_boot. The blob becomes responsible for
* changing to non-secure state if implementing a non-secure boot
* changing to non-secure state if implementing a non-secure boot,
* including setting up EL3/Secure registers such as the NSACR as
* required by the Linux booting ABI before the switch to non-secure.
*/
bool secure_board_setup;

Expand Down
24 changes: 0 additions & 24 deletions include/hw/core/cpu.h
Expand Up @@ -72,10 +72,6 @@ typedef enum MMUAccessType {

typedef struct CPUWatchpoint CPUWatchpoint;

typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
bool is_write, bool is_exec, int opaque,
unsigned size);

struct TranslationBlock;

/**
Expand All @@ -87,8 +83,6 @@ struct TranslationBlock;
* @reset_dump_flags: #CPUDumpFlags to use for reset logging.
* @has_work: Callback for checking if there is work to do.
* @do_interrupt: Callback for interrupt handling.
* @do_unassigned_access: Callback for unassigned access handling.
* (this is deprecated: new targets should use do_transaction_failed instead)
* @do_unaligned_access: Callback for unaligned access handling, if
* the target defines #TARGET_ALIGNED_ONLY.
* @do_transaction_failed: Callback for handling failed memory transactions
Expand Down Expand Up @@ -175,7 +169,6 @@ typedef struct CPUClass {
int reset_dump_flags;
bool (*has_work)(CPUState *cpu);
void (*do_interrupt)(CPUState *cpu);
CPUUnassignedAccess do_unassigned_access;
void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
Expand Down Expand Up @@ -415,12 +408,6 @@ struct CPUState {
* we store some rarely used information in the CPU context.
*/
uintptr_t mem_io_pc;
/*
* This is only needed for the legacy cpu_unassigned_access() hook;
* when all targets using it have been converted to use
* cpu_transaction_failed() instead it can be removed.
*/
MMUAccessType mem_io_access_type;

int kvm_fd;
struct KVMState *kvm_state;
Expand Down Expand Up @@ -896,17 +883,6 @@ void cpu_interrupt(CPUState *cpu, int mask);
#ifdef NEED_CPU_H

#ifdef CONFIG_SOFTMMU
static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
bool is_write, bool is_exec,
int opaque, unsigned size)
{
CPUClass *cc = CPU_GET_CLASS(cpu);

if (cc->do_unassigned_access) {
cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
}
}

static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
Expand Down

0 comments on commit b626eb0

Please sign in to comment.