Skip to content

Commit

Permalink
Memblock: VsegmentUnit's tlb resquest delay 1 cycle & latch vaddr
Browse files Browse the repository at this point in the history
This commit latch vaddr for merge Dcache's data and delay 1 cycle tlb request for better timing.
  • Loading branch information
weidingliu authored and good-circle committed Jun 12, 2024
1 parent d2c3e6d commit 633fae6
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 7 deletions.
7 changes: 4 additions & 3 deletions src/main/scala/xiangshan/backend/MemBlock.scala
Original file line number Diff line number Diff line change
Expand Up @@ -739,11 +739,12 @@ class MemBlockImp(outer: MemBlock) extends LazyModuleImp(outer)
// dtlb
loadUnits(i).io.tlb <> dtlb_reqs.take(LduCnt)(i)
if(i == 0 ){ // port 0 assign to vsegmentUnit
dtlb_reqs.take(LduCnt)(i).req.valid := loadUnits(i).io.tlb.req.valid || vSegmentUnit.io.dtlb.req.valid
val vsegmentDtlbReqValid = vSegmentUnit.io.dtlb.req.valid // segment tlb resquest need to delay 1 cycle
dtlb_reqs.take(LduCnt)(i).req.valid := loadUnits(i).io.tlb.req.valid || RegNext(vsegmentDtlbReqValid)
vSegmentUnit.io.dtlb.req.ready := dtlb_reqs.take(LduCnt)(i).req.ready
dtlb_reqs.take(LduCnt)(i).req.bits := Mux1H(Seq(
vSegmentUnit.io.dtlb.req.valid -> vSegmentUnit.io.dtlb.req.bits,
loadUnits(i).io.tlb.req.valid -> loadUnits(i).io.tlb.req.bits
RegNext(vsegmentDtlbReqValid) -> RegEnable(vSegmentUnit.io.dtlb.req.bits, vsegmentDtlbReqValid),
loadUnits(i).io.tlb.req.valid -> loadUnits(i).io.tlb.req.bits
))
}
// pmp
Expand Down
14 changes: 10 additions & 4 deletions src/main/scala/xiangshan/mem/vector/VSegmentUnit.scala
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ class VSegmentUnit (implicit p: Parameters) extends VLSUModule

val maxSegIdx = instMicroOp.vl - 1.U
val maxNfields = instMicroOp.uop.vpu.nf
val latchVaddr = RegInit(0.U(VAddrBits.W))

XSError((segmentIdx > maxSegIdx) && instMicroOpValid, s"segmentIdx > vl, something error!\n")
XSError((fieldIdx > maxNfields) && instMicroOpValid, s"fieldIdx > nfields, something error!\n")
Expand Down Expand Up @@ -276,6 +277,11 @@ class VSegmentUnit (implicit p: Parameters) extends VLSUModule
indexStride,
segmentOffset)
val vaddr = baseVaddr + (fieldIdx << alignedType).asUInt + realSegmentOffset

//latch vaddr
when(state === s_tlb_req){
latchVaddr := vaddr
}
/**
* tlb req and tlb resq
*/
Expand Down Expand Up @@ -349,7 +355,7 @@ class VSegmentUnit (implicit p: Parameters) extends VLSUModule
/**
* merge data for load
*/
val cacheData = LookupTree(vaddr(3,0), List(
val cacheData = LookupTree(latchVaddr(3,0), List(
"b0000".U -> io.rdcache.resp.bits.data_delayed(63, 0),
"b0001".U -> io.rdcache.resp.bits.data_delayed(63, 8),
"b0010".U -> io.rdcache.resp.bits.data_delayed(63, 16),
Expand Down Expand Up @@ -387,15 +393,15 @@ class VSegmentUnit (implicit p: Parameters) extends VLSUModule
alignedType = alignedType
)
val flowData = genVWdata(splitData, alignedType) // TODO: connect vstd, pass vector data
val wmask = genVWmask(vaddr, alignedType(1, 0)) & Fill(VLENB, segmentActive)
val wmask = genVWmask(latchVaddr, alignedType(1, 0)) & Fill(VLENB, segmentActive)

/**
* rdcache req, write request don't need to query dcache, because we write element to sbuffer
*/
io.rdcache.req := DontCare
io.rdcache.req.valid := state === s_cache_req && FuType.isVLoad(fuType)
io.rdcache.req.bits.cmd := MemoryOpConstants.M_XRD
io.rdcache.req.bits.vaddr := vaddr
io.rdcache.req.bits.vaddr := latchVaddr
io.rdcache.req.bits.mask := mask
io.rdcache.req.bits.data := flowData
io.rdcache.pf_source := LOAD_SOURCE.U
Expand Down Expand Up @@ -427,7 +433,7 @@ class VSegmentUnit (implicit p: Parameters) extends VLSUModule
io.sbuffer.bits.vecValid := state === s_send_data && segmentActive
io.sbuffer.bits.mask := wmask
io.sbuffer.bits.data := flowData
io.sbuffer.bits.vaddr := vaddr
io.sbuffer.bits.vaddr := latchVaddr
io.sbuffer.bits.cmd := MemoryOpConstants.M_XWR
io.sbuffer.bits.id := DontCare
io.sbuffer.bits.addr := instMicroOp.paddr
Expand Down

0 comments on commit 633fae6

Please sign in to comment.