Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
target/riscv: Fix format for comments
Fix formats for multi-lines comments.
Add spaces around single line comments(after "/*" and before "*/").

Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
Acked-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Message-Id: <20230405085813.40643-4-liweiwei@iscas.ac.cn>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
  • Loading branch information
Weiwei Li authored and alistair23 committed May 5, 2023
1 parent c45eff3 commit 3b57254
Show file tree
Hide file tree
Showing 11 changed files with 151 additions and 104 deletions.
3 changes: 2 additions & 1 deletion target/riscv/arch_dump.c
@@ -1,4 +1,5 @@
/* Support for writing ELF notes for RISC-V architectures
/*
* Support for writing ELF notes for RISC-V architectures
*
* Copyright (C) 2021 Huawei Technologies Co., Ltd
*
Expand Down
2 changes: 1 addition & 1 deletion target/riscv/cpu.c
Expand Up @@ -56,7 +56,7 @@ struct isa_ext_data {
#define ISA_EXT_DATA_ENTRY(_name, _m_letter, _min_ver, _prop) \
{#_name, _m_letter, _min_ver, offsetof(struct RISCVCPUConfig, _prop)}

/**
/*
* Here are the ordering rules of extension naming defined by RISC-V
* specification :
* 1. All extensions should be separated from other multi-letter extensions
Expand Down
26 changes: 14 additions & 12 deletions target/riscv/cpu.h
Expand Up @@ -124,7 +124,7 @@ FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
typedef struct PMUCTRState {
/* Current value of a counter */
target_ulong mhpmcounter_val;
/* Current value of a counter in RV32*/
/* Current value of a counter in RV32 */
target_ulong mhpmcounterh_val;
/* Snapshot values of counter */
target_ulong mhpmcounter_prev;
Expand Down Expand Up @@ -280,8 +280,10 @@ struct CPUArchState {
target_ulong satp_hs;
uint64_t mstatus_hs;

/* Signals whether the current exception occurred with two-stage address
translation active. */
/*
* Signals whether the current exception occurred with two-stage address
* translation active.
*/
bool two_stage_lookup;
/*
* Signals whether the current exception occurred while doing two-stage
Expand All @@ -297,10 +299,10 @@ struct CPUArchState {
/* PMU counter state */
PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];

/* PMU event selector configured values. First three are unused*/
/* PMU event selector configured values. First three are unused */
target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS];

/* PMU event selector configured values for RV32*/
/* PMU event selector configured values for RV32 */
target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];

target_ulong sscratch;
Expand Down Expand Up @@ -389,17 +391,17 @@ struct CPUArchState {

OBJECT_DECLARE_CPU_TYPE(RISCVCPU, RISCVCPUClass, RISCV_CPU)

/**
/*
* RISCVCPUClass:
* @parent_realize: The parent class' realize handler.
* @parent_phases: The parent class' reset phase handlers.
*
* A RISCV CPU model.
*/
struct RISCVCPUClass {
/*< private >*/
/* < private > */
CPUClass parent_class;
/*< public >*/
/* < public > */
DeviceRealize parent_realize;
ResettablePhases parent_phases;
};
Expand Down Expand Up @@ -530,16 +532,16 @@ struct RISCVCPUConfig {

typedef struct RISCVCPUConfig RISCVCPUConfig;

/**
/*
* RISCVCPU:
* @env: #CPURISCVState
*
* A RISCV CPU.
*/
struct ArchCPU {
/*< private >*/
/* < private > */
CPUState parent_obj;
/*< public >*/
/* < public > */
CPUNegativeOffsetState neg;
CPURISCVState env;

Expand Down Expand Up @@ -813,7 +815,7 @@ enum {
CSR_TABLE_SIZE = 0x1000
};

/**
/*
* The event id are encoded based on the encoding specified in the
* SBI specification v0.3
*/
Expand Down
2 changes: 1 addition & 1 deletion target/riscv/cpu_bits.h
Expand Up @@ -731,7 +731,7 @@ typedef enum RISCVException {
#define MIE_SSIE (1 << IRQ_S_SOFT)
#define MIE_USIE (1 << IRQ_U_SOFT)

/* General PointerMasking CSR bits*/
/* General PointerMasking CSR bits */
#define PM_ENABLE 0x00000001ULL
#define PM_CURRENT 0x00000002ULL
#define PM_INSN 0x00000004ULL
Expand Down
57 changes: 38 additions & 19 deletions target/riscv/cpu_helper.c
Expand Up @@ -717,7 +717,8 @@ static int get_physical_address_pmp(CPURISCVState *env, int *prot,
return TRANSLATE_SUCCESS;
}

/* get_physical_address - get the physical address for this virtual address
/*
* get_physical_address - get the physical address for this virtual address
*
* Do a page table walk to obtain the physical address corresponding to a
* virtual address. Returns 0 if the translation was successful
Expand Down Expand Up @@ -745,9 +746,11 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
bool first_stage, bool two_stage,
bool is_debug)
{
/* NOTE: the env->pc value visible here will not be
/*
* NOTE: the env->pc value visible here will not be
* correct, but the value visible to the exception handler
* (riscv_cpu_do_interrupt) is correct */
* (riscv_cpu_do_interrupt) is correct
*/
MemTxResult res;
MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK;
Expand All @@ -767,8 +770,10 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
use_background = true;
}

/* MPRV does not affect the virtual-machine load/store
instructions, HLV, HLVX, and HSV. */
/*
* MPRV does not affect the virtual-machine load/store
* instructions, HLV, HLVX, and HSV.
*/
if (riscv_cpu_two_stage_lookup(mmu_idx)) {
mode = get_field(env->hstatus, HSTATUS_SPVP);
} else if (mode == PRV_M && access_type != MMU_INST_FETCH) {
Expand All @@ -778,8 +783,10 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
}

if (first_stage == false) {
/* We are in stage 2 translation, this is similar to stage 1. */
/* Stage 2 is always taken as U-mode */
/*
* We are in stage 2 translation, this is similar to stage 1.
* Stage 2 is always taken as U-mode
*/
mode = PRV_U;
}

Expand Down Expand Up @@ -1007,8 +1014,10 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
target_ulong *pte_pa =
qemu_map_ram_ptr(mr->ram_block, addr1);
#if TCG_OVERSIZED_GUEST
/* MTTCG is not enabled on oversized TCG guests so
* page table updates do not need to be atomic */
/*
* MTTCG is not enabled on oversized TCG guests so
* page table updates do not need to be atomic
*/
*pte_pa = pte = updated_pte;
#else
target_ulong old_pte =
Expand All @@ -1020,14 +1029,18 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
}
#endif
} else {
/* misconfigured PTE in ROM (AD bits are not preset) or
* PTE is in IO space and can't be updated atomically */
/*
* misconfigured PTE in ROM (AD bits are not preset) or
* PTE is in IO space and can't be updated atomically
*/
return TRANSLATE_FAIL;
}
}

/* for superpage mappings, make a fake leaf PTE for the TLB's
benefit. */
/*
* for superpage mappings, make a fake leaf PTE for the TLB's
* benefit.
*/
target_ulong vpn = addr >> PGSHIFT;

if (riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
Expand All @@ -1049,8 +1062,10 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
if (pte & PTE_X) {
*prot |= PAGE_EXEC;
}
/* add write permission on stores or if the page is already dirty,
so that we TLB miss on later writes to update the dirty bit */
/*
* add write permission on stores or if the page is already dirty,
* so that we TLB miss on later writes to update the dirty bit
*/
if ((pte & PTE_W) &&
(access_type == MMU_DATA_STORE || (pte & PTE_D))) {
*prot |= PAGE_WRITE;
Expand Down Expand Up @@ -1235,8 +1250,10 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
__func__, address, access_type, mmu_idx);

/* MPRV does not affect the virtual-machine load/store
instructions, HLV, HLVX, and HSV. */
/*
* MPRV does not affect the virtual-machine load/store
* instructions, HLV, HLVX, and HSV.
*/
if (riscv_cpu_two_stage_lookup(mmu_idx)) {
mode = get_field(env->hstatus, HSTATUS_SPVP);
} else if (mode == PRV_M && access_type != MMU_INST_FETCH &&
Expand Down Expand Up @@ -1577,7 +1594,8 @@ void riscv_cpu_do_interrupt(CPUState *cs)
bool write_gva = false;
uint64_t s;

/* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
/*
* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
* so we mask off the MSB and separate into trap type and cause.
*/
bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
Expand Down Expand Up @@ -1754,7 +1772,8 @@ void riscv_cpu_do_interrupt(CPUState *cs)
riscv_cpu_set_mode(env, PRV_M);
}

/* NOTE: it is not necessary to yield load reservations here. It is only
/*
* NOTE: it is not necessary to yield load reservations here. It is only
* necessary for an SC from "another hart" to cause a load reservation
* to be yielded. Refer to the memory consistency model section of the
* RISC-V ISA Specification.
Expand Down
6 changes: 3 additions & 3 deletions target/riscv/csr.c
Expand Up @@ -189,7 +189,7 @@ static RISCVException mctr(CPURISCVState *env, int csrno)
}
ctr_index = csrno - base_csrno;
if (!pmu_num || ctr_index >= pmu_num) {
/* The PMU is not enabled or counter is out of range*/
/* The PMU is not enabled or counter is out of range */
return RISCV_EXCP_ILLEGAL_INST;
}

Expand Down Expand Up @@ -877,7 +877,7 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
counter.mhpmcounter_val;

if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
/**
/*
* Counter should not increment if inhibit bit is set. We can't really
* stop the icount counting. Just return the counter value written by
* the supervisor to indicate that counter was not incremented.
Expand All @@ -891,7 +891,7 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
}
}

/**
/*
* The kernel computes the perf delta by subtracting the current value from
* the value it initialized previously (ctr_val).
*/
Expand Down
8 changes: 5 additions & 3 deletions target/riscv/insn_trans/trans_rvv.c.inc
Expand Up @@ -3136,9 +3136,11 @@ static bool trans_vfirst_m(DisasContext *s, arg_rmr *a)
return false;
}

/* vmsbf.m set-before-first mask bit */
/* vmsif.m set-includ-first mask bit */
/* vmsof.m set-only-first mask bit */
/*
* vmsbf.m set-before-first mask bit
* vmsif.m set-including-first mask bit
* vmsof.m set-only-first mask bit
*/
#define GEN_M_TRANS(NAME) \
static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
{ \
Expand Down
41 changes: 22 additions & 19 deletions target/riscv/pmp.c
Expand Up @@ -132,15 +132,15 @@ static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
static void pmp_decode_napot(target_ulong a, target_ulong *sa, target_ulong *ea)
{
/*
aaaa...aaa0 8-byte NAPOT range
aaaa...aa01 16-byte NAPOT range
aaaa...a011 32-byte NAPOT range
...
aa01...1111 2^XLEN-byte NAPOT range
a011...1111 2^(XLEN+1)-byte NAPOT range
0111...1111 2^(XLEN+2)-byte NAPOT range
1111...1111 Reserved
*/
* aaaa...aaa0 8-byte NAPOT range
* aaaa...aa01 16-byte NAPOT range
* aaaa...a011 32-byte NAPOT range
* ...
* aa01...1111 2^XLEN-byte NAPOT range
* a011...1111 2^(XLEN+1)-byte NAPOT range
* 0111...1111 2^(XLEN+2)-byte NAPOT range
* 1111...1111 Reserved
*/
a = (a << 2) | 0x3;
*sa = a & (a + 1);
*ea = a | (a + 1);
Expand Down Expand Up @@ -205,7 +205,8 @@ void pmp_update_rule_nums(CPURISCVState *env)
}
}

/* Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea'
/*
* Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea'
* end address values.
* This function is called relatively infrequently whereas the check that
* an address is within a pmp rule is called often, so optimise that one
Expand Down Expand Up @@ -329,8 +330,10 @@ int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
pmp_size = size;
}

/* 1.10 draft priv spec states there is an implicit order
from low to high */
/*
* 1.10 draft priv spec states there is an implicit order
* from low to high
*/
for (i = 0; i < MAX_RISCV_PMPS; i++) {
s = pmp_is_in_range(env, i, addr);
e = pmp_is_in_range(env, i, addr + pmp_size - 1);
Expand Down Expand Up @@ -609,13 +612,13 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index,
return TARGET_PAGE_SIZE;
} else {
/*
* At this point we have a tlb_size that is the smallest possible size
* That fits within a TARGET_PAGE_SIZE and the PMP region.
*
* If the size is less then TARGET_PAGE_SIZE we drop the size to 1.
* This means the result isn't cached in the TLB and is only used for
* a single translation.
*/
* At this point we have a tlb_size that is the smallest possible size
* That fits within a TARGET_PAGE_SIZE and the PMP region.
*
* If the size is less then TARGET_PAGE_SIZE we drop the size to 1.
* This means the result isn't cached in the TLB and is only used for
* a single translation.
*/
return 1;
}
}
Expand Down
8 changes: 4 additions & 4 deletions target/riscv/sbi_ecall_interface.h
Expand Up @@ -28,7 +28,7 @@
#define SBI_EXT_RFENCE 0x52464E43
#define SBI_EXT_HSM 0x48534D

/* SBI function IDs for BASE extension*/
/* SBI function IDs for BASE extension */
#define SBI_EXT_BASE_GET_SPEC_VERSION 0x0
#define SBI_EXT_BASE_GET_IMP_ID 0x1
#define SBI_EXT_BASE_GET_IMP_VERSION 0x2
Expand All @@ -37,13 +37,13 @@
#define SBI_EXT_BASE_GET_MARCHID 0x5
#define SBI_EXT_BASE_GET_MIMPID 0x6

/* SBI function IDs for TIME extension*/
/* SBI function IDs for TIME extension */
#define SBI_EXT_TIME_SET_TIMER 0x0

/* SBI function IDs for IPI extension*/
/* SBI function IDs for IPI extension */
#define SBI_EXT_IPI_SEND_IPI 0x0

/* SBI function IDs for RFENCE extension*/
/* SBI function IDs for RFENCE extension */
#define SBI_EXT_RFENCE_REMOTE_FENCE_I 0x0
#define SBI_EXT_RFENCE_REMOTE_SFENCE_VMA 0x1
#define SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID 0x2
Expand Down

0 comments on commit 3b57254

Please sign in to comment.