diff --git a/CMakeLists.txt b/CMakeLists.txt index 55024683..6cc00c17 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,5 @@ # RevCPU Top-Level CMake -# Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +# Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC # All Rights Reserved # contact@tactcomplabs.com # See LICENSE in the top level directory for licensing details @@ -83,7 +83,7 @@ else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wmissing-format-attribute -Wsuggest-final-methods -Wsuggest-final-types -Wvolatile") endif() -set(CMAKE_CXX_FLAGS "-std=c++17 ${FP_MODE_FLAG} -O2 -Wall -Wextra -Wsuggest-override -Wmissing-noreturn -Wvla -Wuninitialized -Wdouble-promotion -Wsign-conversion -Wno-unused-parameter -Wno-deprecated-declarations -Wno-macro-redefined -Werror ${CMAKE_CXX_FLAGS} -I./ ${LDFLAGS} ${REVCPU_COMPILER_MACROS}") +set(CMAKE_CXX_FLAGS "-std=c++17 ${FP_MODE_FLAG} -O2 -Wall -Wextra -Wsuggest-override -Wmissing-noreturn -Wvla -Wuninitialized -Wdouble-promotion -Wsign-conversion -Wconversion -Wno-unused-parameter -Wno-deprecated-declarations -Wno-macro-redefined -Werror ${CMAKE_CXX_FLAGS} -I./ ${LDFLAGS} ${REVCPU_COMPILER_MACROS}") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -Wall ${REVCPU_COMPILER_MACROS}") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3 -Wall ${REVCPU_COMPILER_MACROS}") diff --git a/common/include/RevCommon.h b/common/include/RevCommon.h index 64021692..7f98bda4 100644 --- a/common/include/RevCommon.h +++ b/common/include/RevCommon.h @@ -1,7 +1,7 @@ // // _Rev_Common_h_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -62,7 +62,7 @@ constexpr T&& make_dependent( T&& x ) { template constexpr auto ZeroExt( T val, int bits ) { using UT = std::make_unsigned_t; - return UT( val & ~( UT( ~UT{} ) << bits ) ); + return UT( UT( val ) & UT( ~( UT( ~UT{} ) << bits ) ) ); } /// Sign-extend value of bits size diff --git a/include/RevCPU.h b/include/RevCPU.h index 6d9ec0f2..9dba4b10 100644 --- a/include/RevCPU.h +++ b/include/RevCPU.h @@ -1,7 +1,7 @@ // // _RevCPU_h_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -266,7 +266,7 @@ class RevCPU : public SST::Component { uint8_t PrivTag{}; ///< RevCPU: private tag locator // uint32_t LToken{}; ///< RevCPU: token identifier for PAN Test - int address{ -1 }; ///< RevCPU: local network address + int64_t address{ -1 }; ///< RevCPU: local network address uint32_t fault_width{}; ///< RevCPU: the width (in bits) for target faults // int64_t fault_range{}; ///< RevCPU: the range of cycles to inject the fault diff --git a/include/RevCSR.h b/include/RevCSR.h index ecae9228..ae772271 100644 --- a/include/RevCSR.h +++ b/include/RevCSR.h @@ -1,7 +1,7 @@ // // _RevCSR_h_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -464,29 +464,29 @@ struct RevCSR : RevZicntr { }; ///< RevCSR: Register a custom getter for a particular CSR register - void SetCSRGetter( uint16_t csr, std::function handler ) { + void SetCSRGetter( uint32_t csr, std::function handler ) { handler ? (void) Getter.insert_or_assign( csr, std::move( handler ) ) : (void) Getter.erase( csr ); } ///< RevCSR: Register a custom setter for a particular CSR register - void SetCSRSetter( uint16_t csr, std::function handler ) { + void SetCSRSetter( uint32_t csr, std::function handler ) { handler ? (void) Setter.insert_or_assign( csr, std::move( handler ) ) : (void) Setter.erase( csr ); } ///< RevCSR: Get the custom getter for a particular CSR register // If no custom getter exists for this RevCSR, look for one in the owning RevCore - template - auto GetCSRGetter( CSR csr ) const { + template + auto GetCSRGetter( uint32_t csr ) const { auto it = Getter.find( csr ); - return it != Getter.end() && it->second ? it->second : make_dependent( GetCore() )->GetCSRGetter( csr ); + return it != Getter.end() && it->second ? it->second : make_dependent( GetCore() )->GetCSRGetter( csr ); } ///< RevCSR: Get the custom setter for a particular CSR register // If no custom setter exists for this RevCSR, look for one in the owning RevCore - template - auto GetCSRSetter( CSR csr ) { + template + auto GetCSRSetter( uint32_t csr ) { auto it = Setter.find( csr ); - return it != Setter.end() && it->second ? it->second : make_dependent( GetCore() )->GetCSRSetter( csr ); + return it != Setter.end() && it->second ? it->second : make_dependent( GetCore() )->GetCSRSetter( csr ); } /// Get the Floating-Point Rounding Mode @@ -497,7 +497,10 @@ struct RevCSR : RevZicntr { /// Get a CSR register template - XLEN GetCSR( uint16_t csr ) const { + XLEN GetCSR( uint32_t csr ) const { + // Check for valid CSR register + if( csr >= 0x1000 ) + fatal( "Invalid CSR register at PC = 0x%" PRIx64 "\n" ); // If a custom Getter exists, use it auto getter = GetCSRGetter( make_dependent( csr ) ); @@ -507,9 +510,9 @@ struct RevCSR : RevZicntr { // clang-format off switch( csr ) { // Floating Point flags - case fflags: return BitExtract<0, 5, XLEN>( CSR[fcsr] ); - case frm: return BitExtract<5, 3, XLEN>( CSR[fcsr] ); - case fcsr: return BitExtract<0, 8, XLEN>( CSR[fcsr] ); + case fflags: return BitExtract<0, 5>( XLEN( CSR[fcsr] ) ); + case frm: return BitExtract<5, 3>( XLEN( CSR[fcsr] ) ); + case fcsr: return BitExtract<0, 8>( XLEN( CSR[fcsr] ) ); // Performance Counters case cycle: return GetPerfCounter(); @@ -527,7 +530,10 @@ struct RevCSR : RevZicntr { /// Set a CSR register template - bool SetCSR( uint16_t csr, XLEN val ) { + bool SetCSR( uint32_t csr, XLEN val ) { + // Check for valid CSR register + if( csr >= 0x1000 ) + fatal( "Invalid CSR register at PC = 0x%" PRIx64 "\n" ); // Read-only CSRs cannot be written to if( csr >= 0xc00 && csr < 0xe00 ) @@ -554,8 +560,8 @@ struct RevCSR : RevZicntr { private: std::array CSR{}; ///< RegCSR: CSR registers - std::unordered_map> Getter{}; ///< RevCSR: CSR Getters - std::unordered_map> Setter{}; ///< RevCSR: CSR Setters + std::unordered_map> Getter{}; ///< RevCSR: CSR Getters + std::unordered_map> Setter{}; ///< RevCSR: CSR Setters }; // class RevCSR diff --git a/include/RevCore.h b/include/RevCore.h index db7b4233..5f394a07 100644 --- a/include/RevCore.h +++ b/include/RevCore.h @@ -1,7 +1,7 @@ // // _RevCore_h_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -278,30 +278,30 @@ class RevCore { uint64_t GetCycles() const { return cycles; } ///< RevCore: Register a custom getter for a particular CSR register - void SetCSRGetter( uint16_t csr, std::function handler ) { + void SetCSRGetter( uint32_t csr, std::function handler ) { handler ? (void) Getter.insert_or_assign( csr, std::move( handler ) ) : (void) Getter.erase( csr ); } ///< RevCore: Register a custom setter for a particular CSR register - void SetCSRSetter( uint16_t csr, std::function handler ) { + void SetCSRSetter( uint32_t csr, std::function handler ) { handler ? (void) Setter.insert_or_assign( csr, std::move( handler ) ) : (void) Setter.erase( csr ); } ///< RevCore: Get the custom getter for a particular CSR register - auto GetCSRGetter( uint16_t csr ) const { + auto GetCSRGetter( uint32_t csr ) const { auto it = Getter.find( csr ); - return it != Getter.end() ? it->second : std::function{}; + return it != Getter.end() ? it->second : std::function{}; } ///< RevCore: Get the custom setter for a particular CSR register - auto GetCSRSetter( uint16_t csr ) const { + auto GetCSRSetter( uint32_t csr ) const { auto it = Setter.find( csr ); - return it != Setter.end() ? it->second : std::function{}; + return it != Setter.end() ? it->second : std::function{}; } private: - std::unordered_map> Getter{}; - std::unordered_map> Setter{}; + std::unordered_map> Getter{}; + std::unordered_map> Setter{}; bool Halted = false; ///< RevCore: determines if the core is halted bool Stalled = false; ///< RevCore: determines if the core is stalled on instruction fetch @@ -841,7 +841,7 @@ class RevCore { } /// RevCore: Check LS queue for outstanding load - ignore x0 - static bool LSQCheck( uint32_t HartID, const RevRegFile* regFile, uint16_t reg, RevRegClass regClass ) { + static bool LSQCheck( uint32_t HartID, const RevRegFile* regFile, uint32_t reg, RevRegClass regClass ) { if( reg == 0 && regClass == RevRegClass::RegGPR ) { return false; // GPR x0 is not considered } else { @@ -850,7 +850,7 @@ class RevCore { } /// RevCore: Check scoreboard for a source register dependency - static bool ScoreboardCheck( const RevRegFile* regFile, uint16_t reg, RevRegClass regClass ) { + static bool ScoreboardCheck( const RevRegFile* regFile, uint32_t reg, RevRegClass regClass ) { switch( regClass ) { case RevRegClass::RegGPR: return reg != 0 && regFile->RV_Scoreboard[reg]; case RevRegClass::RegFLOAT: return regFile->FP_Scoreboard[reg]; diff --git a/include/RevExt.h b/include/RevExt.h index 11f1c5cd..33e3aaeb 100644 --- a/include/RevExt.h +++ b/include/RevExt.h @@ -1,7 +1,7 @@ // // _RevExt_h_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -55,7 +55,7 @@ struct RevExt { std::string_view GetName() const { return name; } /// RevExt: baseline execution function - bool Execute( uint32_t Inst, const RevInst& Payload, uint16_t HartID, RevRegFile* regFile ) const; + bool Execute( uint32_t Inst, const RevInst& Payload, uint32_t HartID, RevRegFile* regFile ) const; /// RevExt: retrieves the extension's instruction table const std::vector& GetTable() const { return table; } diff --git a/include/RevFeature.h b/include/RevFeature.h index 6f2ee7e5..3077e7c7 100644 --- a/include/RevFeature.h +++ b/include/RevFeature.h @@ -1,7 +1,7 @@ // // _RevFeature_h_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -95,7 +95,7 @@ class RevFeature { auto GetProcID() const { return ProcID; } /// GetHartToExecID: Retrieve the current executing Hart - uint16_t GetHartToExecID() const { return HartToExecID; } + auto GetHartToExecID() const { return HartToExecID; } /// SetHartToExecID: Set the current executing Hart void SetHartToExecID( uint32_t hart ) { HartToExecID = hart; } diff --git a/include/RevHart.h b/include/RevHart.h index cf580e21..f04f9239 100644 --- a/include/RevHart.h +++ b/include/RevHart.h @@ -1,7 +1,7 @@ // // _RevHart_h_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -56,7 +56,7 @@ class RevHart { const EcallState& GetEcallState() const { return Ecall; } ///< RevHart: Get Hart's ID - uint16_t GetID() const { return ID; } + uint32_t GetID() const { return ID; } ///< RevHart: Returns the ID of the assigned thread uint32_t GetAssignedThreadID() const { return Thread ? Thread->GetID() : _INVALID_TID_; } diff --git a/include/RevInstHelpers.h b/include/RevInstHelpers.h index 56f32edf..0fe7584a 100644 --- a/include/RevInstHelpers.h +++ b/include/RevInstHelpers.h @@ -1,7 +1,7 @@ // // _RevInstHelpers_h_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -425,7 +425,7 @@ inline auto negate( T x ) { // RISC-V requires INVALID exception when x * y is INVALID even when z = qNaN template inline auto revFMA( T x, T y, T z ) { - if( ( !y && std::isinf( x ) ) || ( !x && std::isinf( y ) ) ) { + if( ( y == 0 && std::isinf( x ) ) || ( x == 0 && std::isinf( y ) ) ) { feraiseexcept( FE_INVALID ); } return std::fma( x, y, z ); diff --git a/include/RevInstTable.h b/include/RevInstTable.h index 873a38a1..d1454dd9 100644 --- a/include/RevInstTable.h +++ b/include/RevInstTable.h @@ -1,7 +1,7 @@ // // _RevInstTable_h_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -89,11 +89,11 @@ class RevInst { uint8_t funct4 = 0; ///< RevInst: compressed funct4 value uint8_t funct6 = 0; ///< RevInst: compressed funct6 value uint8_t funct2or7 = 0; ///< RevInst: uncompressed funct2 or funct7 value - uint64_t rd = ~uint64_t{}; ///< RevInst: rd value - uint64_t rs1 = ~uint64_t{}; ///< RevInst: rs1 value - uint64_t rs2 = ~uint64_t{}; ///< RevInst: rs2 value - uint64_t rs3 = ~uint64_t{}; ///< RevInst: rs3 value - uint64_t imm = 0; ///< RevInst: immediate value + uint32_t rd = ~uint32_t{}; ///< RevInst: rd value + uint32_t rs1 = ~uint32_t{}; ///< RevInst: rs1 value + uint32_t rs2 = ~uint32_t{}; ///< RevInst: rs2 value + uint32_t rs3 = ~uint32_t{}; ///< RevInst: rs3 value + uint32_t imm = 0; ///< RevInst: immediate value bool raisefpe = 0; ///< RevInst: raises FP exceptions FRMode rm{ FRMode::None }; ///< RevInst: floating point rounding mode bool aq = false; ///< RevInst: aqr field for atomic instructions @@ -110,11 +110,12 @@ class RevInst { explicit RevInst() = default; // prevent aggregate initialization ///< RevInst: Sign-extended immediate value - constexpr int64_t ImmSignExt( int bits ) const { return SignExt( imm, bits ); } + constexpr auto ImmSignExt( int bits ) const { return SignExt( imm, bits ); } }; // RevInst /// CRegIdx: Maps the compressed index to normal index -constexpr auto CRegIdx( uint32_t x ) { +template +constexpr auto CRegIdx( T x ) { return x + 8; } diff --git a/include/RevLoader.h b/include/RevLoader.h index 572b5a35..c59ddcc4 100644 --- a/include/RevLoader.h +++ b/include/RevLoader.h @@ -1,7 +1,7 @@ // // _RevLoader_h_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -349,7 +349,7 @@ class RevLoader { bool LoadElf64( char* MemBuf, size_t Size ); ///< Breaks bulk writes into cache lines - bool WriteCacheLine( uint64_t Addr, size_t Len, const void* Data ); + bool WriteCacheLine( uint64_t Addr, uint32_t Len, const void* Data ); ///< RevLoader: Replaces first MemSegment (initialized to entire memory space) with the static memory void InitStaticMem(); diff --git a/include/RevMem.h b/include/RevMem.h index ba913847..c8f54381 100644 --- a/include/RevMem.h +++ b/include/RevMem.h @@ -1,7 +1,7 @@ // // _RevMem_h_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -167,10 +167,10 @@ class RevMem { // ---- Base Memory Interfaces // ---------------------------------------------------- /// RevMem: write to the target memory location with the target flags - bool WriteMem( uint32_t Hart, uint64_t Addr, size_t Len, const void* Data, RevFlag flags = RevFlag::F_NONE ); + bool WriteMem( uint32_t Hart, uint64_t Addr, uint32_t Len, const void* Data, RevFlag flags = RevFlag::F_NONE ); /// RevMem: read data from the target memory location - bool ReadMem( uint32_t Hart, uint64_t Addr, size_t Len, void* Target, const MemReq& req, RevFlag flags = RevFlag::F_NONE ); + bool ReadMem( uint32_t Hart, uint64_t Addr, uint32_t Len, void* Target, const MemReq& req, RevFlag flags = RevFlag::F_NONE ); /// RevMem: flush a cache line bool FlushLine( uint32_t Hart, uint64_t Addr ); @@ -194,12 +194,12 @@ class RevMem { void LR( uint32_t hart, uint64_t addr, size_t len, void* target, const MemReq& req, RevFlag flags ); /// RevMem: STORE CONDITIONAL memory interface - bool SC( uint32_t Hart, uint64_t addr, size_t len, void* data, RevFlag flags ); + bool SC( uint32_t Hart, uint64_t addr, uint32_t len, void* data, RevFlag flags ); /// RevMem: template AMO memory interface template bool AMOVal( uint32_t Hart, uint64_t Addr, T* Data, T* Target, const MemReq& req, RevFlag flags ) { - return AMOMem( Hart, Addr, sizeof( T ), Data, Target, req, flags ); + return AMOMem( Hart, Addr, uint32_t{ sizeof( T ) }, Data, Target, req, flags ); } // ---------------------------------------------------- @@ -228,7 +228,7 @@ class RevMem { // ---- Atomic/Future/LRSC Interfaces // ---------------------------------------------------- /// RevMem: Initiated an AMO request - bool AMOMem( uint32_t Hart, uint64_t Addr, size_t Len, void* Data, void* Target, const MemReq& req, RevFlag flags ); + bool AMOMem( uint32_t Hart, uint64_t Addr, uint32_t Len, void* Data, void* Target, const MemReq& req, RevFlag flags ); /// RevMem: Invalidate Matching LR reservations bool InvalidateLRReservations( uint32_t hart, uint64_t addr, size_t len ); diff --git a/include/RevMemCtrl.h b/include/RevMemCtrl.h index 25ee1e8a..f46ce374 100644 --- a/include/RevMemCtrl.h +++ b/include/RevMemCtrl.h @@ -654,24 +654,24 @@ class RevBasicMemCtrl final : public RevMemCtrl { StandardMem* memIface{}; ///< StandardMem memory interface RevStdMemHandlers* stdMemHandlers{}; ///< StandardMem interface response handlers bool hasCache{}; ///< detects whether cache layers are present - uint64_t lineSize{}; ///< cache line size - uint64_t max_loads{}; ///< maximum number of outstanding loads - uint64_t max_stores{}; ///< maximum number of outstanding stores - uint64_t max_flush{}; ///< maximum number of oustanding flush events - uint64_t max_llsc{}; ///< maximum number of outstanding llsc events - uint64_t max_readlock{}; ///< maximum number of oustanding readlock events - uint64_t max_writeunlock{}; ///< maximum number of oustanding writelock events - uint64_t max_custom{}; ///< maximum number of oustanding custom events - uint64_t max_ops{}; ///< maximum number of ops to issue per cycle - - uint64_t num_read{}; ///< number of outstanding read requests - uint64_t num_write{}; ///< number of outstanding write requests - uint64_t num_flush{}; ///< number of outstanding flush requests - uint64_t num_llsc{}; ///< number of outstanding LL/SC requests - uint64_t num_readlock{}; ///< number of oustanding readlock requests - uint64_t num_writeunlock{}; ///< number of oustanding writelock requests - uint64_t num_custom{}; ///< number of outstanding custom requests - uint64_t num_fence{}; ///< number of oustanding fence requests + uint32_t lineSize{}; ///< cache line size + uint32_t max_loads{}; ///< maximum number of outstanding loads + uint32_t max_stores{}; ///< maximum number of outstanding stores + uint32_t max_flush{}; ///< maximum number of oustanding flush events + uint32_t max_llsc{}; ///< maximum number of outstanding llsc events + uint32_t max_readlock{}; ///< maximum number of oustanding readlock events + uint32_t max_writeunlock{}; ///< maximum number of oustanding writelock events + uint32_t max_custom{}; ///< maximum number of oustanding custom events + uint32_t max_ops{}; ///< maximum number of ops to issue per cycle + + uint32_t num_read{}; ///< number of outstanding read requests + uint32_t num_write{}; ///< number of outstanding write requests + uint32_t num_flush{}; ///< number of outstanding flush requests + uint32_t num_llsc{}; ///< number of outstanding LL/SC requests + uint32_t num_readlock{}; ///< number of oustanding readlock requests + uint32_t num_writeunlock{}; ///< number of oustanding writelock requests + uint32_t num_custom{}; ///< number of outstanding custom requests + uint32_t num_fence{}; ///< number of oustanding fence requests std::vector requests{}; ///< outstanding StandardMem requests std::vector rqstQ{}; ///< queued memory requests diff --git a/include/RevTracer.h b/include/RevTracer.h index 591e50ee..2218eddd 100644 --- a/include/RevTracer.h +++ b/include/RevTracer.h @@ -1,7 +1,7 @@ // // _RevTracer_h_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -210,9 +210,9 @@ class RevTracer { /// RevTracer: capture instruction to be traced void SetFetchedInsn( uint64_t _pc, uint32_t _insn ); /// RevTracer: capture register read - void regRead( size_t r, uint64_t v ); + void regRead( uint64_t r, uint64_t v ); /// RevTracer: capture register write. - void regWrite( size_t r, uint64_t v ); + void regWrite( uint64_t r, uint64_t v ); /// RevTracer: capture memory write. void memWrite( uint64_t adr, size_t len, const void* data ); /// RevTracer: capture memory read @@ -275,7 +275,7 @@ class RevTracer { /// RevTracer: determine if this buffer should be rendered bool OutputOK(); /// RevTracer: format register address for rendering - std::string fmt_reg( uint8_t r ); + std::string fmt_reg( uint64_t r ); /// RevTracer: Format data associated with memory access std::string fmt_data( size_t len, uint64_t data ); /// RevTracer: Generate string from captured state diff --git a/include/RevZicntr.h b/include/RevZicntr.h index a1d87373..3903076e 100644 --- a/include/RevZicntr.h +++ b/include/RevZicntr.h @@ -1,7 +1,7 @@ // // _RevZicntr_h_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -77,13 +77,13 @@ class RevZicntr { fatal( "Illegal instruction at PC = 0x%" PRIx64 ": High half of Zicntr register not available on RV64\n" ); return 0; } else { - return COUNTER( this ); + return XLEN( COUNTER( this ) ); } } else { if constexpr( HALF == Half::Hi ) { - return COUNTER( this ) >> 32; + return XLEN( COUNTER( this ) >> 32 ); } else { - return COUNTER( this ) & 0xffffffff; + return XLEN( COUNTER( this ) & 0xffffffff ); } } } diff --git a/include/SST.h b/include/SST.h index affb0c57..908d6947 100644 --- a/include/SST.h +++ b/include/SST.h @@ -1,7 +1,7 @@ // // _SST_h_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -20,6 +20,7 @@ #pragma GCC diagnostic ignored "-Wsuggest-override" #pragma GCC diagnostic ignored "-Wdouble-promotion" #pragma GCC diagnostic ignored "-Wsign-conversion" +#pragma GCC diagnostic ignored "-Wconversion" #if defined( __GNUC__ ) && !defined( __clang__ ) #pragma GCC diagnostic ignored "-Wsuggest-final-methods" diff --git a/include/insns/Zicsr.h b/include/insns/Zicsr.h index 6bca6039..28cf1f96 100644 --- a/include/insns/Zicsr.h +++ b/include/insns/Zicsr.h @@ -1,7 +1,7 @@ // // _Zicsr_h_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // diff --git a/src/RevCPU.cc b/src/RevCPU.cc index aa33b316..ad9020b8 100644 --- a/src/RevCPU.cc +++ b/src/RevCPU.cc @@ -1,7 +1,7 @@ // // _RevCPU_cc_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -111,7 +111,7 @@ RevCPU::RevCPU( SST::ComponentId_t id, const SST::Params& params ) : SST::Compon std::string width = params.find( "fault_width", "1" ); DecodeFaultWidth( width ); - fault_width = params.find( "fault_range", "65536" ); + fault_width = params.find( "fault_range", "65536" ); FaultCntr = fault_width; } @@ -131,7 +131,7 @@ RevCPU::RevCPU( SST::ComponentId_t id, const SST::Params& params ) : SST::Compon } // Set TLB Size - const uint64_t tlbSize = params.find( "tlbSize", 512 ); + auto tlbSize = params.find( "tlbSize", 512 ); Mem->SetTLBSize( tlbSize ); // Set max heap size @@ -228,14 +228,14 @@ RevCPU::RevCPU( SST::ComponentId_t id, const SST::Params& params ) : SST::Compon } // Initial thread setup - uint32_t MainThreadID = id + 1; // Prevents having MainThreadID == 0 which is reserved for INVALID + uint32_t MainThreadID = uint32_t( id ) + 1; // Prevents having MainThreadID == 0 which is reserved for INVALID uint64_t StartAddr = 0; std::string StartSymbol; - bool IsStartSymbolProvided = Opts->GetStartSymbol( id, StartSymbol ); - bool IsStartAddrProvided = Opts->GetStartAddr( id, StartAddr ) && StartAddr != 0; - uint64_t ResolvedStartSymbolAddr = ( IsStartSymbolProvided ) ? Loader->GetSymbolAddr( StartSymbol ) : 0; + bool IsStartSymbolProvided = Opts->GetStartSymbol( uint32_t( id ), StartSymbol ); + bool IsStartAddrProvided = Opts->GetStartAddr( uint32_t( id ), StartAddr ) && StartAddr != 0; + uint64_t ResolvedStartSymbolAddr = IsStartSymbolProvided ? Loader->GetSymbolAddr( StartSymbol ) : 0; // If no start address has been provided ... if( !IsStartAddrProvided ) { @@ -338,7 +338,7 @@ void RevCPU::DecodeFaultWidth( const std::string& width ) { } else if( width == "word" ) { fault_width = 8; } else { - fault_width = std::stoul( width ); + fault_width = uint32_t( std::stoul( width ) ); } if( fault_width > 64 ) { @@ -558,7 +558,7 @@ bool RevCPU::clockTick( SST::Cycle_t currentCycle ) { output.verbose( CALL_INFO, 8, 0, "Cycle: %" PRIu64 "\n", currentCycle ); // Execute each enabled core - for( size_t i = 0; i < Procs.size(); i++ ) { + for( uint32_t i = 0; i < Procs.size(); i++ ) { // Check if we have more work to assign and places to put it UpdateThreadAssignments( i ); if( Enabled[i] ) { @@ -568,10 +568,10 @@ bool RevCPU::clockTick( SST::Cycle_t currentCycle ) { } UpdateCoreStatistics( i ); Enabled[i] = false; - output.verbose( CALL_INFO, 5, 0, "Closing Processor %zu at Cycle: %" PRIu64 "\n", i, currentCycle ); + output.verbose( CALL_INFO, 5, 0, "Closing Processor %" PRIu32 " at Cycle: %" PRIu64 "\n", i, currentCycle ); } if( EnableCoProc && !CoProcs[i]->ClockTick( currentCycle ) && !DisableCoprocClock ) { - output.verbose( CALL_INFO, 5, 0, "Closing Co-Processor %zu at Cycle: %" PRIu64 "\n", i, currentCycle ); + output.verbose( CALL_INFO, 5, 0, "Closing Co-Processor %" PRIu32 " at Cycle: %" PRIu64 "\n", i, currentCycle ); } } diff --git a/src/RevCore.cc b/src/RevCore.cc index 94e33535..ae02c9d4 100644 --- a/src/RevCore.cc +++ b/src/RevCore.cc @@ -46,8 +46,8 @@ RevCore::RevCore( LSQueue->clear(); // Create the Hart Objects - for( size_t i = 0; i < numHarts; i++ ) { - Harts.emplace_back( std::make_unique( i, LSQueue, [=]( const MemReq& req ) { this->MarkLoadComplete( req ); } ) ); + for( uint32_t i = 0; i < numHarts; i++ ) { + Harts.emplace_back( new RevHart( i, LSQueue, [=]( const MemReq& req ) { this->MarkLoadComplete( req ); } ) ); ValidHarts.set( i, true ); } @@ -453,13 +453,13 @@ RevInst RevCore::DecodeCIInst( uint32_t Inst, uint32_t Entry ) const { CompInst.imm |= ( Inst & 0b1000000000000 ) >> 3; // bit 9 CompInst.rs1 = 2; // Force rs1 to be x2 (stack pointer) // sign extend - CompInst.imm = uint64_t( CompInst.ImmSignExt( 10 ) ); + CompInst.imm = uint32_t( CompInst.ImmSignExt( 10 ) ); } else if( ( CompInst.opcode == 0b01 ) && ( CompInst.funct3 == 0b011 ) && ( CompInst.rd != 0 ) && ( CompInst.rd != 2 ) ) { // c.lui CompInst.imm = ( Inst & 0b1111100 ) << 10; // [16:12] CompInst.imm |= ( Inst & 0b1000000000000 ) << 5; // [17] // sign extend - CompInst.imm = uint64_t( CompInst.ImmSignExt( 18 ) ); + CompInst.imm = uint32_t( CompInst.ImmSignExt( 18 ) ); CompInst.imm >>= 12; //immd value will be re-aligned on execution } else if( CompInst.opcode == 0b01 && CompInst.funct3 == 0b010 && CompInst.rd != 0 ) { // c.li @@ -467,10 +467,10 @@ RevInst RevCore::DecodeCIInst( uint32_t Inst, uint32_t Entry ) const { CompInst.imm |= ( Inst & 0b1000000000000 ) >> 7; // [5] CompInst.rs1 = 0; // Force rs1 to be x0, expands to add rd, x0, imm // sign extend - CompInst.imm = uint64_t( CompInst.ImmSignExt( 6 ) ); + CompInst.imm = uint32_t( CompInst.ImmSignExt( 6 ) ); } else { // sign extend - CompInst.imm = uint64_t( CompInst.ImmSignExt( 6 ) ); + CompInst.imm = uint32_t( CompInst.ImmSignExt( 6 ) ); } //if c.addi, expands to addi %rd, %rd, $imm so set rs1 to rd -or- @@ -561,7 +561,7 @@ RevInst RevCore::DecodeCIWInst( uint32_t Inst, uint32_t Entry ) const { tmp[6] = imm[4]; tmp[7] = imm[5]; - CompInst.imm = tmp.to_ulong(); + CompInst.imm = uint32_t( tmp.to_ulong() ); // Set rs1 to x2 and scale offset by 4 if this is an addi4spn if( 0x00 == CompInst.opcode && 0x00 == CompInst.funct3 ) { @@ -725,7 +725,7 @@ RevInst RevCore::DecodeCBInst( uint32_t Inst, uint32_t Entry ) const { // registers CompInst.rd = CompInst.rs1 = BitExtract<7, 3>( Inst ); - CompInst.offset = BitExtract<2, 5>( Inst ); + CompInst.offset = uint16_t( BitExtract<2, 5>( Inst ) ); CompInst.offset |= ( Inst & 0b1110000000000 ) >> 5; //Apply compressed offset @@ -751,17 +751,17 @@ RevInst RevCore::DecodeCBInst( uint32_t Inst, uint32_t Entry ) const { tmp[7] = o[7]; } - CompInst.offset = (uint16_t) tmp.to_ulong() << 1; // scale to corrrect position to be consistent with other compressed ops + CompInst.offset = uint16_t( tmp.to_ulong() << 1 ); // scale to corrrect position to be consistent with other compressed ops if( 0b01 == CompInst.opcode && CompInst.funct3 >= 0b110 ) { //Set rs2 to x0 if c.beqz or c.bnez CompInst.rs2 = 0; CompInst.imm = CompInst.offset; - CompInst.imm = uint64_t( CompInst.ImmSignExt( 9 ) ); + CompInst.imm = uint32_t( CompInst.ImmSignExt( 9 ) ); } else { CompInst.imm = ( Inst & 0b01111100 ) >> 2; CompInst.imm |= ( Inst & 0b01000000000000 ) >> 7; - CompInst.imm = uint64_t( CompInst.ImmSignExt( 6 ) ); + CompInst.imm = uint32_t( CompInst.ImmSignExt( 6 ) ); } CompInst.instSize = 2; @@ -781,7 +781,7 @@ RevInst RevCore::DecodeCJInst( uint32_t Inst, uint32_t Entry ) const { CompInst.funct3 = InstTable[Entry].funct3; // registers - uint16_t offset = BitExtract<2, 11>( Inst ); + uint16_t offset = uint16_t( BitExtract<2, 11>( Inst ) ); //swizzle bits offset[11|4|9:8|10|6|7|3:1|5] std::bitset<16> offsetBits( offset ), target; @@ -796,13 +796,13 @@ RevInst RevCore::DecodeCJInst( uint32_t Inst, uint32_t Entry ) const { target[8] = offsetBits[8]; target[9] = offsetBits[6]; target[10] = offsetBits[10]; - CompInst.jumpTarget = (u_int16_t) target.to_ulong() << 1; + CompInst.jumpTarget = uint16_t( target.to_ulong() << 1 ); if( 0b01 == CompInst.opcode && ( 0b001 == CompInst.funct3 || 0b101 == CompInst.funct3 ) ) { //Set rd to x1 if this is a c.jal, x0 if this is a c.j CompInst.rd = 0b001 == CompInst.funct3; CompInst.imm = CompInst.jumpTarget; - CompInst.imm = uint64_t( CompInst.ImmSignExt( 12 ) ); + CompInst.imm = uint32_t( CompInst.ImmSignExt( 12 ) ); } CompInst.instSize = 2; @@ -1202,7 +1202,7 @@ RevInst RevCore::DecodeR4Inst( uint32_t Inst, uint32_t Entry ) const { // encodings DInst.opcode = InstTable[Entry].opcode; DInst.funct3 = 0x0; - DInst.funct2or7 = DECODE_FUNCT2( Inst ); + DInst.funct2or7 = uint8_t( DECODE_FUNCT2( Inst ) ); DInst.rm = DECODE_RM( Inst ); // Whether the instruction raises floating-point exceptions @@ -1293,7 +1293,7 @@ RevInst RevCore::FetchAndDecodeInst() { // Stage 1a: handle the crack fault injection if( CrackFault ) { - uint64_t rval = RevRand( 0, ( uint32_t{ 1 } << fault_width ) - 1 ); + auto rval = RevRand( 0, ( uint32_t{ 1 } << fault_width ) - 1 ); Inst |= rval; // clear the fault @@ -1501,12 +1501,12 @@ void RevCore::HandleRegFault( uint32_t width ) { if( feature->HasD() ) { uint64_t tmp; memcpy( &tmp, ®File->DPF[RegIdx], sizeof( tmp ) ); - tmp |= RevRand( 0, ~( ~uint32_t{ 0 } << width ) ); + tmp |= RevRand( 0, ~( ~uint64_t{ 0 } << width ) ); memcpy( ®File->DPF[RegIdx], &tmp, sizeof( tmp ) ); } else { uint32_t tmp; memcpy( &tmp, ®File->SPF[RegIdx], sizeof( tmp ) ); - tmp |= RevRand( 0, ~( ~uint64_t{ 0 } << width ) ); + tmp |= RevRand( 0, ~( ~uint32_t{ 0 } << width ) ); memcpy( ®File->SPF[RegIdx], &tmp, sizeof( tmp ) ); } RegPrefix = "f"; @@ -1536,7 +1536,7 @@ bool RevCore::DependencyCheck( uint32_t HartID, const RevInst* I ) const { // For ECALL, check for any outstanding dependencies on a0-a7 if( I->opcode == 0b1110011 && I->imm == 0 && I->funct3 == 0 && I->rd == 0 && I->rs1 == 0 ) { for( RevReg reg : { RevReg::a7, RevReg::a0, RevReg::a1, RevReg::a2, RevReg::a3, RevReg::a4, RevReg::a5, RevReg::a6 } ) { - if( LSQCheck( HartToDecodeID, RegFile, uint16_t( reg ), RevRegClass::RegGPR ) || ScoreboardCheck( RegFile, uint16_t( reg ), RevRegClass::RegGPR ) ) { + if( LSQCheck( HartToDecodeID, RegFile, safe_static_cast( reg ), RevRegClass::RegGPR ) || ScoreboardCheck( RegFile, uint16_t( reg ), RevRegClass::RegGPR ) ) { return true; } } @@ -1717,7 +1717,7 @@ bool RevCore::ClockTick( SST::Cycle_t currentCycle ) { RevExt* Ext = Extensions[EToE.first].get(); // -- BEGIN new pipelining implementation - Pipeline.emplace_back( std::make_pair( HartToExecID, Inst ) ); + Pipeline.emplace_back( HartToExecID, Inst ); if( Ext->GetName() == "RV32F" || Ext->GetName() == "RV32D" || Ext->GetName() == "RV64F" || Ext->GetName() == "RV64D" ) { Stats.floatsExec++; @@ -1864,7 +1864,7 @@ std::unique_ptr RevCore::PopThreadFromHart( uint32_t HartID ) { void RevCore::PrintStatSummary() { auto memStatsTotal = mem->GetMemStatsTotal(); - double eff = StatsTotal.totalCycles ? double( StatsTotal.cyclesBusy ) / StatsTotal.totalCycles : 0; + double eff = StatsTotal.totalCycles ? double( StatsTotal.cyclesBusy ) / double( StatsTotal.totalCycles ) : 0; output->verbose( CALL_INFO, 2, @@ -2015,7 +2015,7 @@ void RevCore::AssignThread( std::unique_ptr Thread ) { uint32_t RevCore::FindIdleHartID() const { uint32_t IdleHartID = _REV_INVALID_HART_ID_; // Iterate over IdleHarts to find the first idle hart - for( size_t i = 0; i < Harts.size(); i++ ) { + for( uint32_t i = 0; i < Harts.size(); i++ ) { if( IdleHarts[i] ) { IdleHartID = i; break; diff --git a/src/RevExt.cc b/src/RevExt.cc index b080071a..96589679 100644 --- a/src/RevExt.cc +++ b/src/RevExt.cc @@ -1,7 +1,7 @@ // // _RevExt_cc_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -13,7 +13,7 @@ namespace SST::RevCPU { /// Execute an instruction -bool RevExt::Execute( uint32_t Inst, const RevInst& payload, uint16_t HartID, RevRegFile* regFile ) const { +bool RevExt::Execute( uint32_t Inst, const RevInst& payload, uint32_t HartID, RevRegFile* regFile ) const { bool ( *func )( const RevFeature*, RevRegFile*, RevMem*, const RevInst& ); if( payload.compressed ) { diff --git a/src/RevFeature.cc b/src/RevFeature.cc index 56b67ca6..694fc05b 100644 --- a/src/RevFeature.cc +++ b/src/RevFeature.cc @@ -1,7 +1,7 @@ // // _RevFeature_cc_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -55,7 +55,7 @@ bool RevFeature::ParseMachineModel() { ///< Values of -1, 0 for the fourth and fifth values indicates no Rev support yet. ///< ///< ExtensionName DefaultMajor DefaultMinor MinSupportedVersion MaxSupportedVersion Flags - static constexpr std::tuple table[] = { + static constexpr std::tuple table[] = { { "I", 2, 1, 2, 2, RV_I }, { "E", 2, 0, -1, 0, RV_E }, // Unsupported { "M", 2, 0, 2, 2, RV_M | RV_ZMMUL }, @@ -113,7 +113,7 @@ bool RevFeature::ParseMachineModel() { snprintf( unsupported_version, sizeof( unsupported_version ), - "Error: Version %" PRIu32 ".%" PRIu32 " of %s extension is not supported\n", + "Error: Version %" PRIu64 ".%" PRIu64 " of %s extension is not supported\n", majorVersion, minorVersion, ext.data() diff --git a/src/RevLoader.cc b/src/RevLoader.cc index 881279c0..f45c4d1c 100644 --- a/src/RevLoader.cc +++ b/src/RevLoader.cc @@ -1,7 +1,7 @@ // // _RevLoader_cc_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -47,7 +47,7 @@ bool RevLoader::IsRVBig( const Elf64_Ehdr eh64 ) { } // breaks the write into cache line chunks -bool RevLoader::WriteCacheLine( uint64_t Addr, size_t Len, const void* Data ) { +bool RevLoader::WriteCacheLine( uint64_t Addr, uint32_t Len, const void* Data ) { if( Len == 0 ) { // nothing to do here, move along return true; @@ -82,7 +82,7 @@ bool RevLoader::WriteCacheLine( uint64_t Addr, size_t Len, const void* Data ) { } // write the first cache line - size_t TmpSize = BaseCacheAddr + lineSize - Addr; + uint32_t TmpSize = uint32_t( BaseCacheAddr + lineSize - Addr ); uint64_t TmpData = uint64_t( Data ); uint64_t TmpAddr = Addr; if( !mem->WriteMem( 0, TmpAddr, TmpSize, reinterpret_cast( TmpData ) ) ) { @@ -100,7 +100,7 @@ bool RevLoader::WriteCacheLine( uint64_t Addr, size_t Len, const void* Data ) { TmpSize = lineSize; } else { // this is probably the final write operation - TmpSize = ( Len - Total ); + TmpSize = uint32_t( Len - Total ); } if( !mem->WriteMem( 0, TmpAddr, TmpSize, reinterpret_cast( TmpData ) ) ) { @@ -210,8 +210,8 @@ bool RevLoader::LoadElf32( char* membuf, size_t sz ) { elfinfo.phdr_size = eh->e_phnum * sizeof( Elf32_Phdr ); // set the first stack pointer - uint32_t sp = mem->GetStackTop() - (uint32_t) ( elfinfo.phdr_size ); - WriteCacheLine( sp, elfinfo.phdr_size, ph ); + uint64_t sp = mem->GetStackTop() - elfinfo.phdr_size; + WriteCacheLine( sp, uint32_t( elfinfo.phdr_size ), ph ); mem->SetStackTop( sp ); // iterate over the program headers @@ -239,8 +239,8 @@ bool RevLoader::LoadElf32( char* membuf, size_t sz ) { if( sz < sh[eh->e_shstrndx].sh_offset + sh[eh->e_shstrndx].sh_size ) output->fatal( CALL_INFO, -1, "Error: RV32 Elf is unrecognizable\n" ); - uint32_t strtabidx = 0; - uint32_t symtabidx = 0; + uint64_t strtabidx = 0; + uint64_t symtabidx = 0; // Iterate over every section header for( size_t i = 0; i < eh->e_shnum; i++ ) { @@ -365,7 +365,7 @@ bool RevLoader::LoadElf64( char* membuf, size_t sz ) { // set the first stack pointer uint64_t sp = mem->GetStackTop() - elfinfo.phdr_size; - WriteCacheLine( sp, elfinfo.phdr_size, ph ); + WriteCacheLine( sp, uint32_t( elfinfo.phdr_size ), ph ); mem->SetStackTop( sp ); // iterate over the program headers @@ -376,10 +376,10 @@ bool RevLoader::LoadElf64( char* membuf, size_t sz ) { if( sz < ph[i].p_offset + ph[i].p_filesz ) { output->fatal( CALL_INFO, -1, "Error: RV64 Elf is unrecognizable\n" ); } - WriteCacheLine( ph[i].p_paddr, ph[i].p_filesz, (uint8_t*) ( membuf + ph[i].p_offset ) ); + WriteCacheLine( ph[i].p_paddr, uint32_t( ph[i].p_filesz ), (uint8_t*) ( membuf + ph[i].p_offset ) ); } std::vector zeros( ph[i].p_memsz - ph[i].p_filesz ); - WriteCacheLine( ph[i].p_paddr + ph[i].p_filesz, ph[i].p_memsz - ph[i].p_filesz, &zeros[0] ); + WriteCacheLine( ph[i].p_paddr + ph[i].p_filesz, uint32_t( ph[i].p_memsz - ph[i].p_filesz ), &zeros[0] ); } } @@ -393,8 +393,8 @@ bool RevLoader::LoadElf64( char* membuf, size_t sz ) { if( sz < sh[eh->e_shstrndx].sh_offset + sh[eh->e_shstrndx].sh_size ) output->fatal( CALL_INFO, -1, "Error: RV64 Elf is unrecognizable\n" ); - uint32_t strtabidx = 0; - uint32_t symtabidx = 0; + uint64_t strtabidx = 0; + uint64_t symtabidx = 0; // Iterate over every section header for( size_t i = 0; i < eh->e_shnum; i++ ) { @@ -421,7 +421,7 @@ bool RevLoader::LoadElf64( char* membuf, size_t sz ) { // Iterate over every symbol in the symbol table for( size_t i = 0; i < sh[symtabidx].sh_size / sizeof( Elf64_Sym ); i++ ) { // Calculate the maximum length of the symbol - uint32_t maxlen = sh[strtabidx].sh_size - sym[i].st_name; + uint64_t maxlen = sh[strtabidx].sh_size - sym[i].st_name; if( sym[i].st_name >= sh[strtabidx].sh_size ) output->fatal( CALL_INFO, -1, "Error: RV64 Elf is unrecognizable\n" ); if( strnlen( strtab + sym[i].st_name, maxlen ) >= maxlen ) @@ -484,13 +484,13 @@ bool RevLoader::LoadProgramArgs( const std::string& exe, const std::vectorsendREADLOCKRequest( hart, addr, reinterpret_cast( BaseMem ), len, target, req, flags ); + ctrl->sendREADLOCKRequest( hart, addr, uint64_t( BaseMem ), uint32_t( len ), target, req, flags ); } else { memcpy( target, BaseMem, len ); RevHandleFlagResp( target, len, flags ); @@ -124,7 +124,7 @@ bool RevMem::InvalidateLRReservations( uint32_t hart, uint64_t addr, size_t len return ret; } -bool RevMem::SC( uint32_t hart, uint64_t addr, size_t len, void* data, RevFlag flags ) { +bool RevMem::SC( uint32_t hart, uint64_t addr, uint32_t len, void* data, RevFlag flags ) { // Find the reservation for this hart (there can only be one active reservation per hart) auto it = LRSC.find( hart ); if( it != LRSC.end() ) { @@ -267,7 +267,7 @@ bool RevMem::isValidVirtAddr( const uint64_t vAddr ) { } uint64_t RevMem::AddMemSegAt( const uint64_t& BaseAddr, const uint64_t& SegSize ) { - MemSegs.emplace_back( std::make_shared( BaseAddr, SegSize ) ); + MemSegs.emplace_back( new MemSegment( BaseAddr, SegSize ) ); return BaseAddr; } @@ -333,7 +333,7 @@ uint64_t RevMem::AddRoundedMemSeg( uint64_t BaseAddr, const uint64_t& SegSize, s if( !Added ) { // BaseAddr & RoundedTopAddr not a part of a segment // Add rounded segment - MemSegs.emplace_back( std::make_shared( BaseAddr, RoundedSegSize ) ); + MemSegs.emplace_back( new MemSegment( BaseAddr, RoundedSegSize ) ); } return BaseAddr; @@ -342,7 +342,7 @@ uint64_t RevMem::AddRoundedMemSeg( uint64_t BaseAddr, const uint64_t& SegSize, s std::shared_ptr RevMem::AddThreadMem() { // Calculate the BaseAddr of the segment uint64_t BaseAddr = NextThreadMemAddr - ThreadMemSize; - ThreadMemSegs.emplace_back( std::make_shared( BaseAddr, ThreadMemSize ) ); + ThreadMemSegs.emplace_back( new MemSegment( BaseAddr, ThreadMemSize ) ); // Page boundary between NextThreadMemAddr = BaseAddr - pageSize - 1; return ThreadMemSegs.back(); @@ -371,7 +371,7 @@ uint64_t RevMem::AllocMem( const uint64_t& SegSize ) { if( oldFreeSegSize > SegSize ) { // New data will start where the free segment started NewSegBaseAddr = FreeSeg->getBaseAddr(); - MemSegs.emplace_back( std::make_shared( NewSegBaseAddr, SegSize ) ); + MemSegs.emplace_back( new MemSegment( NewSegBaseAddr, SegSize ) ); FreeSeg->setBaseAddr( FreeSeg->getBaseAddr() + SegSize ); FreeSeg->setSize( oldFreeSegSize - SegSize ); return NewSegBaseAddr; @@ -381,7 +381,7 @@ uint64_t RevMem::AllocMem( const uint64_t& SegSize ) { else if( oldFreeSegSize == SegSize ) { // New data will start where the free segment started NewSegBaseAddr = FreeSeg->getBaseAddr(); - MemSegs.emplace_back( std::make_shared( NewSegBaseAddr, SegSize ) ); + MemSegs.emplace_back( new MemSegment( NewSegBaseAddr, SegSize ) ); FreeMemSegs.erase( FreeMemSegs.begin() + ptrdiff_t( i ) ); return NewSegBaseAddr; } @@ -395,7 +395,7 @@ uint64_t RevMem::AllocMem( const uint64_t& SegSize ) { if( !NewSegBaseAddr ) { NewSegBaseAddr = heapend; } - MemSegs.emplace_back( std::make_shared( NewSegBaseAddr, SegSize ) ); + MemSegs.emplace_back( new MemSegment( NewSegBaseAddr, SegSize ) ); ExpandHeap( SegSize ); @@ -429,7 +429,7 @@ uint64_t RevMem::AllocMemAt( const uint64_t& BaseAddr, const uint64_t& SegSize ) // Create New FreeSeg that fills the upper part of the old FreeSeg uint64_t NewFreeSegBaseAddr = BaseAddr + SegSize; size_t NewFreeSegSize = OldFreeSegTop - NewFreeSegBaseAddr; - FreeMemSegs.emplace_back( std::make_shared( NewFreeSegBaseAddr, NewFreeSegSize ) ); + FreeMemSegs.emplace_back( new MemSegment( NewFreeSegBaseAddr, NewFreeSegSize ) ); } // If were allocating at the beginning of a FreeSeg (That doesn't take up the whole segment) @@ -477,7 +477,7 @@ uint64_t RevMem::AllocMemAt( const uint64_t& BaseAddr, const uint64_t& SegSize ) continue; } } - MemSegs.emplace_back( std::make_shared( BaseAddr, SegSize ) ); + MemSegs.emplace_back( new MemSegment( BaseAddr, SegSize ) ); } return ret; @@ -490,7 +490,7 @@ bool RevMem::FenceMem( uint32_t Hart ) { return true; // base RevMem support does nothing here } -bool RevMem::AMOMem( uint32_t Hart, uint64_t Addr, size_t Len, void* Data, void* Target, const MemReq& req, RevFlag flags ) { +bool RevMem::AMOMem( uint32_t Hart, uint64_t Addr, uint32_t Len, void* Data, void* Target, const MemReq& req, RevFlag flags ) { #ifdef _REV_DEBUG_ std::cout << "AMO of " << Len << " Bytes Starting at 0x" << std::hex << Addr << std::dec << std::endl; #endif @@ -501,9 +501,7 @@ bool RevMem::AMOMem( uint32_t Hart, uint64_t Addr, size_t Len, void* Data, void* uint64_t physAddr = CalcPhysAddr( pageNum, Addr ); unsigned char* BaseMem = &physMem[physAddr]; - ctrl->sendAMORequest( - Hart, Addr, reinterpret_cast( BaseMem ), Len, static_cast( Data ), Target, req, flags - ); + ctrl->sendAMORequest( Hart, Addr, uint64_t( BaseMem ), Len, static_cast( Data ), Target, req, flags ); } else { // process the request locally union { @@ -542,7 +540,7 @@ bool RevMem::AMOMem( uint32_t Hart, uint64_t Addr, size_t Len, void* Data, void* return true; } -bool RevMem::WriteMem( uint32_t Hart, uint64_t Addr, size_t Len, const void* Data, RevFlag flags ) { +bool RevMem::WriteMem( uint32_t Hart, uint64_t Addr, uint32_t Len, const void* Data, RevFlag flags ) { #ifdef _REV_DEBUG_ std::cout << "Writing " << Len << " Bytes Starting at 0x" << std::hex << Addr << std::dec << std::endl; #endif @@ -595,7 +593,7 @@ std::tuple RevMem::AdjPageAddr( uint64_t Addr, uin return { remainder, physAddr, adjPhysAddr }; } -bool RevMem::ReadMem( uint32_t Hart, uint64_t Addr, size_t Len, void* Target, const MemReq& req, RevFlag flags ) { +bool RevMem::ReadMem( uint32_t Hart, uint64_t Addr, uint32_t Len, void* Target, const MemReq& req, RevFlag flags ) { #ifdef _REV_DEBUG_ std::cout << "NEW READMEM: Reading " << Len << " Bytes Starting at 0x" << std::hex << Addr << std::dec << std::endl; #endif @@ -761,7 +759,7 @@ uint64_t RevMem::DeallocMem( uint64_t BaseAddr, uint64_t Size ) { // allocated data and is `Size` bytes long // - Before: |--------------------|--- AllocedSeg ---| // - After: |---- NewFreeSeg ----|--- AllocedSeg ---| - FreeMemSegs.emplace_back( std::make_shared( BaseAddr, Size ) ); + FreeMemSegs.emplace_back( new MemSegment( BaseAddr, Size ) ); } } @@ -779,7 +777,7 @@ void RevMem::InitHeap( const uint64_t& EndOfStaticData ) { ); } else { // Mark heap as free - FreeMemSegs.emplace_back( std::make_shared( EndOfStaticData + 1, maxHeapSize ) ); + FreeMemSegs.emplace_back( new MemSegment( EndOfStaticData + 1, maxHeapSize ) ); heapend = EndOfStaticData + 1; heapstart = EndOfStaticData + 1; diff --git a/src/RevMemCtrl.cc b/src/RevMemCtrl.cc index 19b34e0b..6b9d2d88 100644 --- a/src/RevMemCtrl.cc +++ b/src/RevMemCtrl.cc @@ -343,9 +343,9 @@ void RevBasicMemCtrl::init( uint32_t phase ) { // query the caching infrastructure if( phase == 1 ) { - lineSize = memIface->getLineSize(); + lineSize = uint32_t( memIface->getLineSize() ); if( lineSize > 0 ) { - output->verbose( CALL_INFO, 5, 0, "Detected cache layers; default line size=%" PRIu64 "\n", lineSize ); + output->verbose( CALL_INFO, 5, 0, "Detected cache layers; default line size=%" PRIu32 "\n", lineSize ); hasCache = true; } else { output->verbose( CALL_INFO, 5, 0, "No cache detected; disabling caching\n" ); @@ -442,7 +442,7 @@ uint32_t RevBasicMemCtrl::getBaseCacheLineSize( uint64_t Addr, uint32_t Size ) { bool done = false; uint64_t BaseCacheAddr = Addr; while( !done ) { - if( ( BaseCacheAddr % (uint64_t) ( lineSize ) ) == 0 ) { + if( BaseCacheAddr % lineSize == 0 ) { done = true; } else { BaseCacheAddr -= 1; @@ -462,11 +462,11 @@ uint32_t RevBasicMemCtrl::getBaseCacheLineSize( uint64_t Addr, uint32_t Size ) { } else { return lineSize; } - } else if( ( Addr + (uint64_t) ( Size ) ) <= ( BaseCacheAddr + (uint64_t) ( lineSize ) ) ) { + } else if( Addr + Size <= BaseCacheAddr + lineSize ) { // we stay within a single cache line return Size; } else { - return ( ( BaseCacheAddr + lineSize ) - Addr ); + return uint32_t( BaseCacheAddr + lineSize - Addr ); } } @@ -583,7 +583,7 @@ bool RevBasicMemCtrl::buildCacheMemRqst( RevMemOp* op, bool& Success ) { #ifdef _REV_DEBUG_ std::cout << "base cache line request size = " << BaseCacheLineSize << std::endl; #endif - uint32_t curByte = 0; + uint64_t curByte = 0; switch( op->getOp() ) { case MemOp::MemOpREAD: @@ -618,11 +618,7 @@ bool RevBasicMemCtrl::buildCacheMemRqst( RevMemOp* op, bool& Success ) { break; case MemOp::MemOpFLUSH: rqst = new Interfaces::StandardMem::FlushAddr( - op->getAddr(), - (uint64_t) ( BaseCacheLineSize ), - op->getInv(), - (uint64_t) ( BaseCacheLineSize ), - (StandardMem::Request::flags_t) TmpFlags + op->getAddr(), BaseCacheLineSize, op->getInv(), BaseCacheLineSize, (StandardMem::Request::flags_t) TmpFlags ); requests.push_back( rqst->getID() ); outstanding[rqst->getID()] = op; @@ -697,14 +693,14 @@ bool RevBasicMemCtrl::buildCacheMemRqst( RevMemOp* op, bool& Success ) { newBuf.clear(); uint64_t newBase = op->getAddr() + BaseCacheLineSize; uint64_t bytesLeft = (uint64_t) ( op->getSize() ) - BaseCacheLineSize; - uint64_t newSize = 0; + uint32_t newSize = 0; for( uint32_t i = 1; i < NumLines; i++ ) { // setup the adjusted size of the request if( bytesLeft < lineSize ) { - newSize = bytesLeft; + newSize = uint32_t( bytesLeft ); } else { - newSize = lineSize; + newSize = uint32_t( lineSize ); } // clear the adjusted buffer @@ -720,7 +716,7 @@ bool RevBasicMemCtrl::buildCacheMemRqst( RevMemOp* op, bool& Success ) { num_read++; break; case MemOp::MemOpWRITE: - for( uint32_t j = curByte; j < ( curByte + newSize ); j++ ) { + for( auto j = curByte; j < curByte + newSize; j++ ) { newBuf.push_back( tmpBuf[j] ); } curByte += newSize; @@ -749,7 +745,7 @@ bool RevBasicMemCtrl::buildCacheMemRqst( RevMemOp* op, bool& Success ) { num_readlock++; break; case MemOp::MemOpWRITEUNLOCK: - for( uint32_t j = curByte; j < ( curByte + newSize ); j++ ) { + for( auto j = curByte; j < curByte + newSize; j++ ) { newBuf.push_back( tmpBuf[j] ); } curByte += newSize; @@ -769,7 +765,7 @@ bool RevBasicMemCtrl::buildCacheMemRqst( RevMemOp* op, bool& Success ) { num_llsc++; break; case MemOp::MemOpSTORECOND: - for( uint32_t j = curByte; j < ( curByte + newSize ); j++ ) { + for( auto j = curByte; j < curByte + newSize; j++ ) { newBuf.push_back( tmpBuf[j] ); } curByte += newSize; @@ -830,11 +826,7 @@ bool RevBasicMemCtrl::buildRawMemRqst( RevMemOp* op, RevFlag TmpFlags ) { break; case MemOp::MemOpFLUSH: rqst = new Interfaces::StandardMem::FlushAddr( - op->getAddr(), - (uint64_t) ( op->getSize() ), - op->getInv(), - (uint64_t) ( op->getSize() ), - (StandardMem::Request::flags_t) TmpFlags + op->getAddr(), op->getSize(), op->getInv(), op->getSize(), (StandardMem::Request::flags_t) TmpFlags ); requests.push_back( rqst->getID() ); outstanding[rqst->getID()] = op; diff --git a/src/RevOpts.cc b/src/RevOpts.cc index 7179a9c3..840a61a0 100644 --- a/src/RevOpts.cc +++ b/src/RevOpts.cc @@ -1,7 +1,7 @@ // // _RevOpts_cc_ // -// Copyright (C) 2017-2024 Tactical Computing Laboratories, LLC +// Copyright (C) 2017-2025 Tactical Computing Laboratories, LLC // All Rights Reserved // contact@tactcomplabs.com // @@ -35,7 +35,7 @@ bool RevOpts::InitPropertyMap( const std::vector& Opts, MAP& map ) if( vstr.size() != 2 ) return false; - auto Core = std::stoull( vstr[0], nullptr, 0 ); + auto Core = uint32_t( std::stoull( vstr[0], nullptr, 0 ) ); if( Core >= numCores ) return false; diff --git a/src/RevSysCalls.cc b/src/RevSysCalls.cc index 9b172785..b8d7f39c 100644 --- a/src/RevSysCalls.cc +++ b/src/RevSysCalls.cc @@ -248,7 +248,7 @@ EcallStatus RevCore::ECALL_getcwd() { auto BufAddr = RegFile->GetX( RevReg::a0 ); auto size = RegFile->GetX( RevReg::a1 ); auto CWD = std::filesystem::current_path(); - mem->WriteMem( HartToExecID, BufAddr, size, CWD.c_str() ); + mem->WriteMem( HartToExecID, BufAddr, uint32_t( size ), CWD.c_str() ); // Returns null-terminated string in buf // (no need to set x10 since it's already got BufAddr) @@ -725,10 +725,10 @@ EcallStatus RevCore::ECALL_read() { std::vector TmpBuf( BufSize ); // Do the read on the host - int rc = read( fd, &TmpBuf[0], BufSize ); + auto rc = read( fd, &TmpBuf[0], BufSize ); // Write that data to the buffer inside of Rev - mem->WriteMem( HartToExecID, BufAddr, BufSize, &TmpBuf[0] ); + mem->WriteMem( HartToExecID, BufAddr, uint32_t( BufSize ), &TmpBuf[0] ); RegFile->SetX( RevReg::a0, rc ); return EcallStatus::SUCCESS; @@ -754,7 +754,7 @@ EcallStatus RevCore::ECALL_write() { auto nleft = nbytes - EcallState.string.size(); if( nleft == 0 && LSQueue->count( lsq_hash ) == 0 ) { - int rc = write( fd, EcallState.string.data(), EcallState.string.size() ); + auto rc = write( fd, EcallState.string.data(), EcallState.string.size() ); RegFile->SetX( RevReg::a0, rc ); DependencyClear( HartToExecID, RevReg::a0, RevRegClass::RegGPR ); return EcallStatus::SUCCESS; @@ -1048,7 +1048,7 @@ EcallStatus RevCore::ECALL_exit() { HartToExecID, status ); - exit( status ); + exit( int( status ) ); // return EcallStatus::SUCCESS; } @@ -3190,11 +3190,11 @@ EcallStatus RevCore::ECALL_pthread_create() { output->verbose( CALL_INFO, 2, 0, "ECALL: pthread_create called by thread %" PRIu32 " on hart %" PRIu32 "\n", ActiveThreadID, HartToExecID ); - uint64_t tidAddr = RegFile->GetX( RevReg::a0 ); + uint64_t tidAddr = RegFile->GetX( RevReg::a0 ); //uint64_t AttrPtr = RegFile->GetX(RevReg::a1); - uint64_t NewThreadPC = RegFile->GetX( RevReg::a2 ); - uint64_t ArgPtr = RegFile->GetX( RevReg::a3 ); - unsigned long int NewTID = GetNewThreadID(); + uint64_t NewThreadPC = RegFile->GetX( RevReg::a2 ); + uint64_t ArgPtr = RegFile->GetX( RevReg::a3 ); + uint32_t NewTID = GetNewThreadID(); CreateThread( NewTID, NewThreadPC, reinterpret_cast( ArgPtr ) ); mem->WriteMem( HartToExecID, tidAddr, sizeof( NewTID ), &NewTID, RevFlag::F_NONE ); @@ -3214,7 +3214,7 @@ EcallStatus RevCore::ECALL_pthread_join() { // Set current thread to blocked std::unique_ptr BlockedThread = PopThreadFromHart( HartToExecID ); BlockedThread->SetState( ThreadState::BLOCKED ); - BlockedThread->SetWaitingToJoinTID( RegFile->GetX( RevReg::a0 ) ); + BlockedThread->SetWaitingToJoinTID( RegFile->GetX( RevReg::a0 ) ); // Signal to RevCPU this thread is has changed state AddThreadsThatChangedState( std::move( BlockedThread ) ); diff --git a/src/RevTracer.cc b/src/RevTracer.cc index 9884529d..84f61ced 100644 --- a/src/RevTracer.cc +++ b/src/RevTracer.cc @@ -149,29 +149,29 @@ bool RevTracer::OutputOK() { return outputEnabled || events.f.trc_ctl; } -void RevTracer::regRead( size_t r, uint64_t v ) { - traceRecs.emplace_back( TraceRec_t( RegRead, r, v ) ); +void RevTracer::regRead( uint64_t r, uint64_t v ) { + traceRecs.emplace_back( RegRead, r, v ); } -void RevTracer::regWrite( size_t r, uint64_t v ) { - traceRecs.emplace_back( TraceRec_t( RegWrite, r, v ) ); +void RevTracer::regWrite( uint64_t r, uint64_t v ) { + traceRecs.emplace_back( RegWrite, r, v ); } void RevTracer::memWrite( uint64_t adr, size_t len, const void* data ) { // Only tracing the first 8 bytes. Retaining pointer in case we change that. uint64_t d = 0; memcpy( &d, data, len > sizeof( d ) ? sizeof( d ) : len ); - traceRecs.emplace_back( TraceRec_t( MemStore, adr, len, d ) ); + traceRecs.emplace_back( MemStore, adr, len, d ); } void RevTracer::memRead( uint64_t adr, size_t len, void* data ) { uint64_t d = 0; memcpy( &d, data, len > sizeof( d ) ? sizeof( d ) : len ); - traceRecs.emplace_back( TraceRec_t( MemLoad, adr, len, d ) ); + traceRecs.emplace_back( MemLoad, adr, len, d ); } void SST::RevCPU::RevTracer::memhSendRead( uint64_t adr, size_t len, uint16_t reg ) { - traceRecs.emplace_back( TraceRec_t( MemhSendLoad, adr, len, reg ) ); + traceRecs.emplace_back( MemhSendLoad, adr, len, reg ); } void RevTracer::memReadResponse( size_t len, void* data, const MemReq* req ) { @@ -182,11 +182,11 @@ void RevTracer::memReadResponse( size_t len, void* data, const MemReq* req ) { } void RevTracer::pcWrite( uint32_t newpc ) { - traceRecs.emplace_back( TraceRec_t( PcWrite, newpc, 0, 0 ) ); + traceRecs.emplace_back( PcWrite, newpc, 0, 0 ); } void RevTracer::pcWrite( uint64_t newpc ) { - traceRecs.emplace_back( TraceRec_t( PcWrite, newpc, 0, 0 ) ); + traceRecs.emplace_back( PcWrite, newpc, 0, 0 ); } void RevTracer::Exec( size_t cycle, uint32_t id, uint32_t hart, uint32_t tid, const std::string& fallbackMnemonic ) { @@ -354,16 +354,16 @@ void RevTracer::InstTraceReset() { instHeader.clear(); } -std::string RevTracer::fmt_reg( uint8_t r ) { +std::string RevTracer::fmt_reg( uint64_t r ) { std::stringstream s; #ifdef REV_USE_SPIKE if( r < 32 ) { s << xpr_name[r]; // defined in disasm.h return s.str(); } - s << "?" << (uint32_t) r; + s << "?" << r; #else - s << "x" << std::dec << (uint16_t) r; // Use SST::RevCPU::RevReg? + s << "x" << std::dec << r; // Use SST::RevCPU::RevReg? #endif return s.str(); } @@ -374,9 +374,9 @@ std::string RevTracer::fmt_data( size_t len, uint64_t d ) { return ""; s << "0x" << std::hex << std::setfill( '0' ); if( len > sizeof( d ) ) - s << std::setw( sizeof( d ) * 2 ) << d << "..+" << std::dec << len - sizeof( d ); + s << std::setw( int( sizeof( d ) * 2 ) ) << d << "..+" << std::dec << len - sizeof( d ); else { - s << std::setw( len * 2 ) << ( d & ~( ~uint64_t{} << len * 8 ) ); + s << std::setw( int( len * 2 ) ) << ( d & ~( ~uint64_t{} << len * 8 ) ); } return s.str(); }