From e0a47c715b94fedb9e082a16684d67ea45ee2e8f Mon Sep 17 00:00:00 2001 From: klin02 Date: Sat, 15 Nov 2025 16:32:29 +0800 Subject: [PATCH 01/14] submodule(difftest): refactor DiffTop, PhyReg, Refill, PGO_BOLT Latest difftest introduces following notable change: 1. CPU as submodule of Difftest Difftest now allows instantiating the CPU as a submodule and exposing additional IOs. LazyModules must explicitly provide clock and reset signals. (PRs: difftest#713, difftest#757, difftest#758) 2. Replace ArchReg + WriteBack with PhyReg + RenameTable The old embedded-into-CPU Difftest logic is removed. The new PhyReg + RenameTable interface eliminates additional phy-to-arch logic and multi- read-port overhead inside the CPU and removes interface of ArchReg/WriteBack. (PRs: difftest#714, difftest#754) 3. RefillEvent interface modification Refill checking now support masked refill-check to support low-power read. (PRs: difftest#719, difftest#723) 4. PGO_BOLT option for emu compilation A new PGO_BOLT mode is provided to accelerate EMU compilation under profile-guided optimization. (PR: difftest#756, difftest#761) --- difftest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/difftest b/difftest index 3cb2104b27d..35447e257af 160000 --- a/difftest +++ b/difftest @@ -1 +1 @@ -Subproject commit 3cb2104b27d12b8da5888075544db88613eda714 +Subproject commit 35447e257af29578002d14b4926efb5f3bd3efb7 From 3567d5da13cb4654e4c03ef1c8e80eafa64e7a2c Mon Sep 17 00:00:00 2001 From: klin02 Date: Fri, 7 Nov 2025 16:51:05 +0800 Subject: [PATCH 02/14] feat(top): adapt difftest interfaces for FPGA difftest This change moves XSDiffTop instantiation into the Difftest for fpgaDiff. DiffTop will generated with a SimTop wrapper, where DiffTest outputs, together with XSTop's IO ports, AXI4 buses of memory/peripheral/DMA will be exposed for external connection. Temporarily, we still expose XSNoCDiffTop with appended Difftest IOs, and generate XSTop and XSNoCTop with internal Difftest DPI-C, so as to avoid extra TopIOs and mixed generated files. We will try to refactor them like DiffTop later.(i.e. generate with a seperated Difftest) --- src/main/scala/top/Top.scala | 35 ++++++++++++++++++++++++--------- src/test/scala/top/SimTop.scala | 3 ++- 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/src/main/scala/top/Top.scala b/src/main/scala/top/Top.scala index 898f6075613..68f2d0fce81 100644 --- a/src/main/scala/top/Top.scala +++ b/src/main/scala/top/Top.scala @@ -20,7 +20,7 @@ package top import chisel3._ import chisel3.util._ import chisel3.experimental.dataview._ -import difftest.DifftestModule +import difftest.{DifftestModule, DifftestTopIO, HasDiffTestInterfaces} import xiangshan._ import utils._ import utility._ @@ -43,10 +43,9 @@ import freechips.rocketchip.interrupts._ import freechips.rocketchip.amba.axi4._ import freechips.rocketchip.jtag.JTAGIO import chisel3.experimental.annotate -import scala.collection.mutable.{Map} -import difftest.common.DifftestWiring -import difftest.util.Profile +import scala.collection.mutable.Map +import difftest.gateway.Gateway abstract class BaseXSSoc()(implicit p: Parameters) extends LazyModule with HasSoCParameter @@ -473,11 +472,24 @@ class XSTop()(implicit p: Parameters) extends BaseXSSoc() class XSTileDiffTop(implicit p: Parameters) extends XSTop { //TODO: need to keep the same module name as XSNoCDiffTop override lazy val desiredName: String = "XSTop" - class XSTileDiffTopImp(wrapper: XSTop) extends XSTopImp(wrapper) { - DifftestWiring.createAndConnectExtraIOs() - Profile.generateJson("XiangShan") - } + class XSTileDiffTopImp(wrapper: XSTop) extends XSTopImp(wrapper) with HasDiffTestInterfaces { + override def cpuName: Option[String] = Some("XiangShan") + override protected def implicitClock: Clock = io.clock + override protected def implicitReset: Reset = io.reset + override def connectTopIOsWithName(difftest: DifftestTopIO): Seq[(Data, String)] = { + val otherIOs = Seq(wrapper.nmi.getWrappedValue) ++ + dma.toSeq ++ + Seq( + memory, + peripheral, + ) + otherIOs.map(d => (d, d.instanceName)) ++ + io.elements.toSeq.collect { case (name, elem) + if !Seq("clock", "reset").contains(name) => (elem, "io_" + name) + } + } + } override lazy val module = new XSTileDiffTopImp(this) } @@ -497,8 +509,13 @@ object TopMain extends App { Generator.execute(firrtlOpts, soc.module, firtoolOpts) } else if (config(SoCParamsKey).UseXSTileDiffTop) { val soc = DisableMonitors(p => LazyModule(new XSTileDiffTop()(p)))(config) - Generator.execute(firrtlOpts, soc.module, firtoolOpts) + Generator.execute(firrtlOpts, DifftestModule.top(soc.module), firtoolOpts) } else { + if (enableDifftest) { + // TODO: Temporarily force XSTop to use internal DPI-C; will later split Top and Difftest like DiffTop + Gateway.setConfig("U") + } + val soc = if (config(SoCParamsKey).UseXSNoCTop) DisableMonitors(p => LazyModule(new XSNoCTop()(p)))(config) else diff --git a/src/test/scala/top/SimTop.scala b/src/test/scala/top/SimTop.scala index 5a94fe948a0..def801ebf33 100644 --- a/src/test/scala/top/SimTop.scala +++ b/src/test/scala/top/SimTop.scala @@ -93,7 +93,7 @@ class XiangShanSim(implicit p: Parameters) extends Module with HasDiffTestInterf val uart = IO(new UARTIO) simMMIO.io.uart <> uart - override def connectTopIOs(difftest: DifftestTopIO): Unit = { + override def connectTopIOs(difftest: DifftestTopIO): Seq[Data] = { difftest.uart <> uart val hasPerf = !debugOpts.FPGAPlatform && debugOpts.EnablePerfDebug @@ -105,6 +105,7 @@ class XiangShanSim(implicit p: Parameters) extends Module with HasDiffTestInterf val dump = if (hasPerf) WireDefault(difftest.perfCtrl.dump) else WireDefault(false.B) XSLog.collect(timer, logEnable, clean, dump) + Seq.empty } } From 389d72145b737221d8fb85530fccbe4f58768346 Mon Sep 17 00:00:00 2001 From: klin02 Date: Sat, 15 Nov 2025 16:24:55 +0800 Subject: [PATCH 03/14] feat(difftest): replace ArchReg and Writeback with PhyRegState This change refactors Difftest interfaces, replacing the previous ArchReg and WriteBack with PhyRegState and ArchRenameTable. By default, Difftest still extracts PhyReg and RenameTable into ArchReg on the hardware side, so that the extra multi-read area is accounted for within Difftest. When acceleration is enabled, this extraction is deferred to the software side, eliminating the extra hardware area overhead. Since 128-bit ArchVecReg are now treated as two 64-bit registers in Difftest, we add corresponding register splitting in XiangShan. The address indexs (InstrCommit.otherwpdest and RenameTable) are also converted from `index` to `2*index` and `2*index + 1` for reg splitting. Note XiangShan separates V0 and Vf register files. When mering the two regfiles and renameTable for Difftest, the indexs of Vf must add offset of V0RegSize to ensure correct indexing. --- .../scala/xiangshan/backend/Backend.scala | 4 - .../scala/xiangshan/backend/CtrlBlock.scala | 9 +- src/main/scala/xiangshan/backend/Region.scala | 8 -- .../xiangshan/backend/datapath/DataPath.scala | 117 +++++++----------- .../backend/datapath/WbArbiter.scala | 44 ------- .../xiangshan/backend/regfile/Regfile.scala | 69 ++++++++--- .../backend/rename/RenameTable.scala | 33 +++-- .../scala/xiangshan/backend/rob/Rob.scala | 15 ++- 8 files changed, 134 insertions(+), 165 deletions(-) diff --git a/src/main/scala/xiangshan/backend/Backend.scala b/src/main/scala/xiangshan/backend/Backend.scala index c26a20f7d29..b8849eb5e24 100644 --- a/src/main/scala/xiangshan/backend/Backend.scala +++ b/src/main/scala/xiangshan/backend/Backend.scala @@ -367,10 +367,6 @@ class BackendInlinedImp(override val wrapper: BackendInlined)(implicit p: Parame // for fpIQ write int regfile arbiter intRegion.io.fromFpIQ.get <> fpRegion.io.fpIQOut.get - intRegion.io.diffIntRat.foreach(_ := ctrlBlock.io.diff_int_rat.get) - fpRegion.io.diffFpRat.foreach(_ := ctrlBlock.io.diff_fp_rat.get) - vecRegion.io.diffVecRat.foreach(_ := ctrlBlock.io.diff_vec_rat.get) - vecRegion.io.diffV0Rat.foreach(_ := ctrlBlock.io.diff_v0_rat.get) vecRegion.io.diffVlRat.foreach(_ := ctrlBlock.io.diff_vl_rat.get) vecRegion.io.fromVecExcpMod.get.r := vecExcpMod.o.toVPRF.r vecRegion.io.fromVecExcpMod.get.w := vecExcpMod.o.toVPRF.w diff --git a/src/main/scala/xiangshan/backend/CtrlBlock.scala b/src/main/scala/xiangshan/backend/CtrlBlock.scala index a77818c8bc8..6a445d55910 100644 --- a/src/main/scala/xiangshan/backend/CtrlBlock.scala +++ b/src/main/scala/xiangshan/backend/CtrlBlock.scala @@ -659,6 +659,7 @@ class CtrlBlockImp( memCtrl.io.mdpFlodPcVec := mdpFlodPcVec memCtrl.io.dispatchLFSTio <> dispatch.io.lfst + rat.io.hartId := io.fromTop.hartId rat.io.redirect := s1_s3_redirect.valid rat.io.rabCommits := rob.io.rabCommits rat.io.diffCommits.foreach(_ := rob.io.diffCommits.get) @@ -819,10 +820,6 @@ class CtrlBlockImp( // rob to mem block io.robio.lsq <> rob.io.lsq - io.diff_int_rat.foreach(_ := rat.io.diff_int_rat.get) - io.diff_fp_rat .foreach(_ := rat.io.diff_fp_rat.get) - io.diff_vec_rat.foreach(_ := rat.io.diff_vec_rat.get) - io.diff_v0_rat .foreach(_ := rat.io.diff_v0_rat.get) io.diff_vl_rat .foreach(_ := rat.io.diff_vl_rat.get) rob.io.debug_ls := io.robio.debug_ls @@ -1016,10 +1013,6 @@ class CtrlBlockIO()(implicit p: Parameters, params: BackendParams) extends XSBun val lsdqFull = Bool() } }) - val diff_int_rat = if (params.basicDebugEn) Some(Vec(32, Output(UInt(PhyRegIdxWidth.W)))) else None - val diff_fp_rat = if (params.basicDebugEn) Some(Vec(32, Output(UInt(PhyRegIdxWidth.W)))) else None - val diff_vec_rat = if (params.basicDebugEn) Some(Vec(31, Output(UInt(PhyRegIdxWidth.W)))) else None - val diff_v0_rat = if (params.basicDebugEn) Some(Vec(1, Output(UInt(PhyRegIdxWidth.W)))) else None val diff_vl_rat = if (params.basicDebugEn) Some(Vec(1, Output(UInt(PhyRegIdxWidth.W)))) else None val sqCanAccept = Input(Bool()) diff --git a/src/main/scala/xiangshan/backend/Region.scala b/src/main/scala/xiangshan/backend/Region.scala index 9104e9df000..5b3162727a2 100644 --- a/src/main/scala/xiangshan/backend/Region.scala +++ b/src/main/scala/xiangshan/backend/Region.scala @@ -434,7 +434,6 @@ class Region(val params: SchdBlockParams)(implicit p: Parameters) extends XSModu dataPath.io.fromIntWb.get := wbDataPath.io.toIntPreg dataPath.io.fromPcTargetMem <> io.fromPcTargetMem.get dataPath.io.fromBypassNetwork := bypassNetwork.io.toDataPath - dataPath.io.diffIntRat.foreach(_ := io.diffIntRat.get) bypassNetwork.io.fromDataPath.int <> dataPath.io.toIntExu bypassNetwork.io.fromDataPath.immInfo := dataPath.io.og1ImmInfo @@ -550,7 +549,6 @@ class Region(val params: SchdBlockParams)(implicit p: Parameters) extends XSModu io.fpToIntIQResp.get := dataPath.io.toIntIQ dataPath.io.fromFpWb.get := wbDataPath.io.toFpPreg dataPath.io.fromBypassNetwork <> bypassNetwork.io.toDataPath - dataPath.io.diffFpRat.foreach(_ := io.diffFpRat.get) io.toFpPreg := wbDataPath.io.toFpPreg bypassNetwork.io.fromDataPath.fp <> dataPath.io.toFpExu @@ -632,8 +630,6 @@ class Region(val params: SchdBlockParams)(implicit p: Parameters) extends XSModu dataPath.io.fromV0Wb.get := wbDataPath.io.toV0Preg dataPath.io.fromVlWb.get := wbDataPath.io.toVlPreg dataPath.io.fromBypassNetwork <> bypassNetwork.io.toDataPath - dataPath.io.diffVecRat.foreach(_ := io.diffVecRat.get) - dataPath.io.diffV0Rat.foreach(_ := io.diffV0Rat.get) dataPath.io.diffVlRat.foreach(_ := io.diffVlRat.get) dataPath.io.fromVecExcpMod.foreach(_ := io.fromVecExcpMod.get) @@ -786,10 +782,6 @@ class RegionIO(val params: SchdBlockParams)(implicit p: Parameters) extends XSBu val flush = Flipped(ValidIO(new Redirect)) val ldCancel = Vec(backendParams.LduCnt, Flipped(new LoadCancelIO)) val fromPcTargetMem = Option.when(params.isIntSchd)(Flipped(new PcToDataPathIO(backendParams))) - val diffIntRat = Option.when(params.isIntSchd)(Input(Vec(32, UInt(params.pregIdxWidth.W)))) - val diffFpRat = Option.when(params.isFpSchd)(Input(Vec(32, UInt(params.pregIdxWidth.W)))) - val diffVecRat = Option.when(params.isVecSchd)(Input(Vec(31, UInt(params.pregIdxWidth.W)))) - val diffV0Rat = Option.when(params.isVecSchd)(Input(Vec(1, UInt(log2Up(V0PhyRegs).W)))) val diffVlRat = Option.when(params.isVecSchd)(Input(Vec(1, UInt(log2Up(VlPhyRegs).W)))) val diffVl = Option.when(params.isVecSchd)(Output(UInt(VlData().dataWidth.W))) val vlWriteBackInfoIn = new Bundle { diff --git a/src/main/scala/xiangshan/backend/datapath/DataPath.scala b/src/main/scala/xiangshan/backend/datapath/DataPath.scala index 117f588c4dd..9a9d0613ec5 100644 --- a/src/main/scala/xiangshan/backend/datapath/DataPath.scala +++ b/src/main/scala/xiangshan/backend/datapath/DataPath.scala @@ -3,7 +3,7 @@ package xiangshan.backend.datapath import org.chipsalliance.cde.config.Parameters import chisel3._ import chisel3.util._ -import difftest.{DiffArchFpRegState, DiffArchIntRegState, DiffArchVecRegState, DifftestModule} +import difftest._ import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp} import utility._ import utils.SeqUtils._ @@ -225,38 +225,29 @@ class DataPath(implicit p: Parameters, params: BackendParams, param: SchdBlockPa io.fromPcTargetMem.fromDataPathFtqPtr := pcReadFtqPtr io.fromPcTargetMem.fromDataPathFtqOffset := pcReadFtqOffset - private val intDiffRead: Option[(Vec[UInt], Vec[UInt])] = - OptionWrapper(backendParams.basicDebugEn && param.isIntSchd, (Wire(Vec(32, UInt(intSchdParams.pregIdxWidth.W))), Wire(Vec(32, UInt(XLEN.W))))) - private val fpDiffRead: Option[(Vec[UInt], Vec[UInt])] = - OptionWrapper(backendParams.basicDebugEn && param.isFpSchd, (Wire(Vec(32, UInt(fpSchdParams.pregIdxWidth.W))), Wire(Vec(32, UInt(XLEN.W))))) - private val vfDiffRead: Option[(Vec[UInt], Vec[UInt])] = - OptionWrapper(backendParams.basicDebugEn && param.isVecSchd, (Wire(Vec(31, UInt(vecSchdParams.pregIdxWidth.W))), Wire(Vec(31, UInt(VLEN.W))))) - private val v0DiffRead: Option[(Vec[UInt], Vec[UInt])] = - OptionWrapper(backendParams.basicDebugEn && param.isVecSchd, (Wire(Vec(1, UInt(log2Up(V0PhyRegs).W))), Wire(Vec(1, UInt(V0Data().dataWidth.W))))) + private val intDiffReadData: Option[Vec[UInt]] = + OptionWrapper(backendParams.basicDebugEn && param.isIntSchd, Wire(Vec(intSchdParams.numPregs, UInt(XLEN.W)))) + private val fpDiffReadData: Option[Vec[UInt]] = + OptionWrapper(backendParams.basicDebugEn && param.isFpSchd, Wire(Vec(fpSchdParams.numPregs, UInt(XLEN.W)))) + private val vfDiffReadData: Option[Vec[UInt]] = + OptionWrapper(backendParams.basicDebugEn && param.isVecSchd, Wire(Vec(vecSchdParams.numPregs, UInt(VLEN.W)))) + private val v0DiffReadData: Option[Vec[UInt]] = + OptionWrapper(backendParams.basicDebugEn && param.isVecSchd, Wire(Vec(V0PhyRegs, UInt(V0Data().dataWidth.W)))) private val vlDiffRead: Option[(Vec[UInt], Vec[UInt])] = OptionWrapper(backendParams.basicDebugEn && param.isVecSchd, (Wire(Vec(1, UInt(log2Up(VlPhyRegs).W))), Wire(Vec(1, UInt(VlData().dataWidth.W))))) - private val fpDiffReadData: Option[Vec[UInt]] = - OptionWrapper(backendParams.basicDebugEn && param.isFpSchd, Wire(Vec(32, UInt(XLEN.W)))) + private val vecDiffNumPregs = 2 * (V0PhyRegs + vecSchdParams.numPregs) private val vecDiffReadData: Option[Vec[UInt]] = - OptionWrapper(backendParams.basicDebugEn && param.isVecSchd, Wire(Vec(64, UInt(64.W)))) // v0 = Cat(Vec(1), Vec(0)) + OptionWrapper(backendParams.basicDebugEn && param.isVecSchd, Wire(Vec(vecDiffNumPregs, UInt(64.W)))) // v0 = Cat(Vec(1), Vec(0)) private val vlDiffReadData: Option[UInt] = OptionWrapper(backendParams.basicDebugEn && param.isVecSchd, Wire(UInt(VlData().dataWidth.W))) - - fpDiffReadData.foreach(_ := fpDiffRead - .get._2 - .slice(0, 32) - .map(_(63, 0)) - ) // fp only used [63, 0] - vecDiffReadData.foreach(_ := - v0DiffRead - .get._2 - .slice(0, 1) - .map(x => Seq(x(63, 0), x(127, 64))).flatten ++ - vfDiffRead - .get._2 - .slice(0, 31) + vecDiffReadData.foreach(_ := + v0DiffReadData + .get + .map(x => Seq(x(63, 0), x(127, 64))).flatten ++ + vfDiffReadData + .get .map(x => Seq(x(63, 0), x(127, 64))).flatten ) vlDiffReadData.foreach(_ := vlDiffRead @@ -275,8 +266,7 @@ class DataPath(implicit p: Parameters, params: BackendParams, param: SchdBlockPa val intRfWdata = Wire(Vec(io.fromIntWb.get.length, UInt(intSchdParams.rfDataWidth.W))) IntRegFileSplit("IntRegFile", intSchdParams.numPregs, splitNum, intRfRaddr, intRfRdata.get, intRfWen, intRfWaddr, intRfWdata, bankNum = 1, - debugReadAddr = intDiffRead.map(_._1), - debugReadData = intDiffRead.map(_._2) + debugAllRData = intDiffReadData ) intRfWaddr := io.fromIntWb.get.map(x => RegEnable(x.pdest, x.wen)).toSeq intRfWdata := io.fromIntWb.get.map(x => RegEnable(x.data, x.wen)).toSeq @@ -346,6 +336,12 @@ class DataPath(implicit p: Parameters, params: BackendParams, param: SchdBlockPa XSPerfAccumulate("IntRegCache24Hit", PopCount(int_regcache_24_hit_vec)) XSPerfAccumulate("IntRegCache32Hit", PopCount(int_regcache_32_hit_vec)) XSPerfHistogram("IntRegCache48Hit_hist", PopCount(int_regcache_48_hit_vec), true.B, 0, 16, 2) + if (env.AlwaysBasicDiff || env.EnableDifftest) { + // Delay of PhyRegFile should be same as RenameTable + val difftest = DifftestModule(new DiffPhyIntRegState(intSchdParams.numPregs), delay = 2) + difftest.coreid := io.hartId + difftest.value := intDiffReadData.get + } } else if (param.isFpSchd) { val fpRfRaddr = Wire(Vec(params.numPregRd(FpData()), UInt(fpSchdParams.pregIdxWidth.W))) @@ -354,8 +350,7 @@ class DataPath(implicit p: Parameters, params: BackendParams, param: SchdBlockPa val fpRfWdata = Wire(Vec(io.fromFpWb.get.length, UInt(fpSchdParams.rfDataWidth.W))) FpRegFileSplit("FpRegFile", fpSchdParams.numPregs, splitNum, fpRfRaddr, fpRfRdata.get, fpRfWen, fpRfWaddr, fpRfWdata, bankNum = 1, - debugReadAddr = fpDiffRead.map(_._1), - debugReadData = fpDiffRead.map(_._2) + debugAllRData = fpDiffReadData ) fpRfWaddr := io.fromFpWb.get.map(x => RegEnable(x.pdest, x.wen)).toSeq fpRfWdata := io.fromFpWb.get.map(x => RegEnable(x.data, x.wen)).toSeq @@ -366,6 +361,11 @@ class DataPath(implicit p: Parameters, params: BackendParams, param: SchdBlockPa else fpRfRaddr(portIdx) := 0.U } + if (env.AlwaysBasicDiff || env.EnableDifftest) { + val difftest = DifftestModule(new DiffPhyFpRegState(fpSchdParams.numPregs), delay = 2) + difftest.coreid := io.hartId + difftest.value := fpDiffReadData.get + } } else { val vfRfRaddr = Wire(Vec(params.numPregRd(VecData()), UInt(vecSchdParams.pregIdxWidth.W))) @@ -382,12 +382,10 @@ class DataPath(implicit p: Parameters, params: BackendParams, param: SchdBlockPa val vlRfWaddr = Wire(Vec(io.fromVlWb.get.length, UInt(log2Up(VlPhyRegs).W))) val vlRfWdata = Wire(Vec(io.fromVlWb.get.length, UInt(VlData().dataWidth.W))) VfRegFile("VfRegFile", vecSchdParams.numPregs, splitNum, vfRfRaddr, vfRfRdata.get, vfRfWen, vfRfWaddr, vfRfWdata, - debugReadAddr = vfDiffRead.map(_._1), - debugReadData = vfDiffRead.map(_._2) + debugAllRData = vfDiffReadData ) VfRegFile("V0RegFile", V0PhyRegs, v0RfSplitNum, v0RfRaddr, v0RfRdata.get, v0RfWen, v0RfWaddr, v0RfWdata, - debugReadAddr = v0DiffRead.map(_._1), - debugReadData = v0DiffRead.map(_._2) + debugAllRData = v0DiffReadData ) FpRegFile("VlRegFile", VlPhyRegs, vlRfRaddr, vlRfRdata.get, vlRfWen, vlRfWaddr, vlRfWdata, bankNum = 1, @@ -466,31 +464,23 @@ class DataPath(implicit p: Parameters, params: BackendParams, param: SchdBlockPa if (i % maxMergeNumPerCycle == 0) v0RfRdata.get(v0RdPortsIter.next()) else 0.U, ) } - } - - intDiffRead.foreach { case (addr, _) => - addr := io.diffIntRat.get - } - fpDiffRead.foreach { case (addr, _) => - addr := io.diffFpRat.get + if (env.AlwaysBasicDiff || env.EnableDifftest) { + val difftest = DifftestModule(new DiffPhyVecRegState(vecDiffNumPregs), delay = 2) + difftest.coreid := io.hartId + difftest.value := vecDiffReadData.get + } } - vfDiffRead.foreach { case (addr, _) => - addr := io.diffVecRat.get - } - v0DiffRead.foreach { case (addr, _) => - addr := io.diffV0Rat.get - } vlDiffRead.foreach { case (addr, _) => addr := io.diffVlRat.get } - println(s"[${param.getName}DataPath] " + - s"has intDiffRead: ${intDiffRead.nonEmpty}, " + - s"has fpDiffRead: ${fpDiffRead.nonEmpty}, " + - s"has vecDiffRead: ${vfDiffRead.nonEmpty}, " + - s"has v0DiffRead: ${v0DiffRead.nonEmpty}, " + + println(s"${param.getName}[DataPath] " + + s"has intDiffRead: ${intDiffReadData.nonEmpty}, " + + s"has fpDiffRead: ${fpDiffReadData.nonEmpty}, " + + s"has vecDiffRead: ${vfDiffReadData.nonEmpty}, " + + s"has v0DiffRead: ${v0DiffReadData.nonEmpty}, " + s"has vlDiffRead: ${vlDiffRead.nonEmpty}") val s1_addrOHs = Reg(MixedVec( @@ -733,25 +723,6 @@ class DataPath(implicit p: Parameters, params: BackendParams, param: SchdBlockPa } } - if (env.AlwaysBasicDiff || env.EnableDifftest) { - val delayedCnt = 2 - if (param.isIntSchd) { - val difftestArchIntRegState = DifftestModule(new DiffArchIntRegState, delay = delayedCnt) - difftestArchIntRegState.coreid := io.hartId - difftestArchIntRegState.value := intDiffRead.get._2 - } - if (param.isFpSchd) { - val difftestArchFpRegState = DifftestModule(new DiffArchFpRegState, delay = delayedCnt) - difftestArchFpRegState.coreid := io.hartId - difftestArchFpRegState.value := fpDiffReadData.get - } - if (param.isVecSchd) { - val difftestArchVecRegState = DifftestModule(new DiffArchVecRegState, delay = delayedCnt) - difftestArchVecRegState.coreid := io.hartId - difftestArchVecRegState.value := vecDiffReadData.get - } - } - XSPerfHistogram(s"IntRegFileRead_hist", PopCount(intRFReadArbiter.io.in.flatten.flatten.map(_.valid)), true.B, 0, 20, 1) XSPerfHistogram(s"FpRegFileRead_hist", PopCount(fpRFReadArbiter.io.in.flatten.flatten.map(_.valid)), true.B, 0, 20, 1) XSPerfHistogram(s"VfRegFileRead_hist", PopCount(vfRFReadArbiter.io.in.flatten.flatten.map(_.valid)), true.B, 0, 20, 1) @@ -892,10 +863,6 @@ class DataPathIO()(implicit p: Parameters, params: BackendParams, param: SchdBlo Output(UInt(RegCacheIdxWidth.W)) ) - val diffIntRat = if (params.basicDebugEn && param.isIntSchd) Some(Input(Vec(32, UInt(intSchdParams.pregIdxWidth.W)))) else None - val diffFpRat = if (params.basicDebugEn && param.isFpSchd) Some(Input(Vec(32, UInt(fpSchdParams.pregIdxWidth.W)))) else None - val diffVecRat = if (params.basicDebugEn && param.isVecSchd) Some(Input(Vec(31, UInt(vecSchdParams.pregIdxWidth.W)))) else None - val diffV0Rat = if (params.basicDebugEn && param.isVecSchd) Some(Input(Vec(1, UInt(log2Up(V0PhyRegs).W)))) else None val diffVlRat = if (params.basicDebugEn && param.isVecSchd) Some(Input(Vec(1, UInt(log2Up(VlPhyRegs).W)))) else None val diffVl = if (params.basicDebugEn && param.isVecSchd) Some(Output(UInt(VlData().dataWidth.W))) else None diff --git a/src/main/scala/xiangshan/backend/datapath/WbArbiter.scala b/src/main/scala/xiangshan/backend/datapath/WbArbiter.scala index 687e05d441b..3cdb793fb69 100644 --- a/src/main/scala/xiangshan/backend/datapath/WbArbiter.scala +++ b/src/main/scala/xiangshan/backend/datapath/WbArbiter.scala @@ -3,7 +3,6 @@ package xiangshan.backend.datapath import org.chipsalliance.cde.config.Parameters import chisel3._ import chisel3.util._ -import difftest.{DiffFpWriteback, DiffIntWriteback, DiffVecV0Writeback, DiffVecWriteback, DifftestModule} import utility.XSError import xiangshan.backend.BackendParams import xiangshan.backend.Bundles.{ExuOutput, WriteBackBundle} @@ -386,49 +385,6 @@ class WbDataPath(params: BackendParams, schdParams: SchdBlockParams)(implicit p: sink.bits := source.bits source.ready := true.B } - - // difftest - if (env.EnableDifftest || env.AlwaysBasicDiff) { - intWbArbiterOut.foreach(out => { - val difftest = DifftestModule(new DiffIntWriteback(IntPhyRegs)) - difftest.coreid := io.fromTop.hartId - difftest.valid := out.fire && out.bits.rfWen - difftest.address := out.bits.pdest - difftest.data := out.bits.data - }) - } - - if (env.EnableDifftest || env.AlwaysBasicDiff) { - fpWbArbiterOut.foreach(out => { - val difftest = DifftestModule(new DiffFpWriteback(FpPhyRegs)) - difftest.coreid := io.fromTop.hartId - difftest.valid := out.fire // all fp instr will write fp rf - difftest.address := out.bits.pdest - difftest.data := out.bits.data - }) - } - - if (env.EnableDifftest || env.AlwaysBasicDiff) { - vfWbArbiterOut.foreach(out => { - val difftest = DifftestModule(new DiffVecWriteback(VfPhyRegs)) - difftest.coreid := io.fromTop.hartId - difftest.valid := out.fire - difftest.address := out.bits.pdest - difftest.data(0) := out.bits.data(63, 0) - difftest.data(1) := out.bits.data(127, 64) - }) - } - - if (env.EnableDifftest || env.AlwaysBasicDiff) { - v0WbArbiterOut.foreach(out => { - val difftest = DifftestModule(new DiffVecV0Writeback(V0PhyRegs)) - difftest.coreid := io.fromTop.hartId - difftest.valid := out.fire - difftest.address := out.bits.pdest - difftest.data(0) := out.bits.data(63, 0) - difftest.data(1) := out.bits.data(127, 64) - }) - } } diff --git a/src/main/scala/xiangshan/backend/regfile/Regfile.scala b/src/main/scala/xiangshan/backend/regfile/Regfile.scala index 8a307270836..9cd055f0387 100644 --- a/src/main/scala/xiangshan/backend/regfile/Regfile.scala +++ b/src/main/scala/xiangshan/backend/regfile/Regfile.scala @@ -74,6 +74,7 @@ class Regfile val readPorts = Vec(numReadPorts, new RfReadPort(len, width)) val writePorts = Vec(numWritePorts, new RfWritePort(len, width)) val debug_rports = Vec(65, new RfReadPort(len, width)) + val debug_all_rdata = Vec(numPregs, UInt(len.W)) }) override def desiredName = name println(name + ": size:" + numPregs + " read: " + numReadPorts + " write: " + numWritePorts) @@ -129,6 +130,12 @@ class Regfile for (rport <- io.debug_rports) { rport.data := memForRead(rport.addr) } + io.debug_all_rdata.zipWithIndex.foreach { case (rdata, idx) => + if (idx == 0) + rdata := mem_0 + else + rdata := mem(idx) + } } object Regfile { @@ -146,6 +153,7 @@ object Regfile { bankNum : Int = 1, debugReadAddr: Option[Seq[UInt]], debugReadData: Option[Vec[UInt]], + debugAllRData: Option[Vec[UInt]], isVlRegfile : Boolean = false, )(implicit p: Parameters): Unit = { val numReadPorts = raddr.length @@ -197,6 +205,9 @@ object Regfile { rport.data }) } + if (debugAllRData.nonEmpty) { + debugAllRData.get := regfile.io.debug_all_rdata + } } } @@ -210,14 +221,15 @@ object IntRegFile { wen : Seq[Bool], waddr : Seq[UInt], wdata : Seq[UInt], - debugReadAddr: Option[Seq[UInt]], - debugReadData: Option[Vec[UInt]], + debugReadAddr: Option[Seq[UInt]] = None, + debugReadData: Option[Vec[UInt]] = None, + debugAllRData: Option[Vec[UInt]] = None, withReset : Boolean = false, bankNum : Int, )(implicit p: Parameters): Unit = { Regfile( name, numEntries, raddr, rdata, wen, waddr, wdata, - hasZero = true, withReset, bankNum, debugReadAddr, debugReadData) + hasZero = true, withReset, bankNum, debugReadAddr, debugReadData, debugAllRData) } } @@ -232,8 +244,9 @@ object IntRegFileSplit { wen : Seq[Bool], waddr : Seq[UInt], wdata : Seq[UInt], - debugReadAddr: Option[Seq[UInt]], - debugReadData: Option[Vec[UInt]], + debugReadAddr: Option[Seq[UInt]] = None, + debugReadData: Option[Vec[UInt]] = None, + debugAllRData: Option[Vec[UInt]] = None, withReset : Boolean = false, bankNum : Int, )(implicit p: Parameters): Unit = { @@ -248,6 +261,12 @@ object IntRegFileSplit { r := Cat((0 until splitNum).map(x => debugReadDataVec.get(x)(i)).reverse) } } + val debugAllRDataVec = OptionWrapper(debugAllRData.nonEmpty, Wire(Vec(splitNum, Vec(debugAllRData.get.length, UInt((debugAllRData.get.head.getWidth / splitNum).W))))) + if (debugAllRData.nonEmpty) { + debugAllRData.get.zipWithIndex.map { case (r, i) => + r := Cat((0 until splitNum).map(x => debugAllRDataVec.get(x)(i)).reverse) + } + } for (i <- 0 until splitNum){ val wdataThisPart = wdata.map { case x => val widthThisPart = x.getWidth / splitNum @@ -256,7 +275,9 @@ object IntRegFileSplit { val nameSuffix = if (splitNum > 1) s"Part${i}" else "" Regfile( name + nameSuffix, numEntries, raddr, rdataVec(i), wen, waddr, wdataThisPart, - hasZero = true, withReset, bankNum, debugReadAddr, OptionWrapper(debugReadData.nonEmpty, debugReadDataVec.get(i))) + hasZero = true, withReset, bankNum, debugReadAddr, OptionWrapper(debugReadData.nonEmpty, debugReadDataVec.get(i)), + OptionWrapper(debugAllRData.nonEmpty, debugAllRDataVec.get(i)) + ) } } } @@ -271,15 +292,16 @@ object FpRegFile { wen : Seq[Bool], waddr : Seq[UInt], wdata : Seq[UInt], - debugReadAddr: Option[Seq[UInt]], - debugReadData: Option[Vec[UInt]], + debugReadAddr: Option[Seq[UInt]] = None, + debugReadData: Option[Vec[UInt]] = None, + debugAllRData: Option[Vec[UInt]] = None, withReset : Boolean = false, bankNum : Int, isVlRegfile : Boolean = false, )(implicit p: Parameters): Unit = { Regfile( name, numEntries, raddr, rdata, wen, waddr, wdata, - hasZero = false, withReset, bankNum, debugReadAddr, debugReadData, isVlRegfile) + hasZero = false, withReset, bankNum, debugReadAddr, debugReadData, debugAllRData, isVlRegfile) } } @@ -294,8 +316,9 @@ object FpRegFileSplit { wen : Seq[Bool], waddr : Seq[UInt], wdata : Seq[UInt], - debugReadAddr: Option[Seq[UInt]], - debugReadData: Option[Vec[UInt]], + debugReadAddr: Option[Seq[UInt]] = None, + debugReadData: Option[Vec[UInt]] = None, + debugAllRData: Option[Vec[UInt]] = None, withReset : Boolean = false, bankNum : Int, isVlRegfile : Boolean = false, @@ -311,6 +334,12 @@ object FpRegFileSplit { r := Cat((0 until splitNum).map(x => debugReadDataVec.get(x)(i)).reverse) } } + val debugAllRDataVec = OptionWrapper(debugAllRData.nonEmpty, Wire(Vec(splitNum, Vec(debugAllRData.get.length, UInt((debugAllRData.get.head.getWidth / splitNum).W))))) + if (debugAllRData.nonEmpty) { + debugAllRData.get.zipWithIndex.map { case (r, i) => + r := Cat((0 until splitNum).map(x => debugAllRDataVec.get(x)(i)).reverse) + } + } for (i <- 0 until splitNum){ val wdataThisPart = wdata.map { case x => val widthThisPart = x.getWidth / splitNum @@ -319,7 +348,8 @@ object FpRegFileSplit { val nameSuffix = if (splitNum > 1) s"Part${i}" else "" Regfile( name + nameSuffix, numEntries, raddr, rdataVec(i), wen, waddr, wdataThisPart, - hasZero = false, withReset, bankNum, debugReadAddr, OptionWrapper(debugReadData.nonEmpty, debugReadDataVec.get(i)), isVlRegfile) + hasZero = false, withReset, bankNum, debugReadAddr, OptionWrapper(debugReadData.nonEmpty, debugReadDataVec.get(i)), + OptionWrapper(debugAllRData.nonEmpty, debugAllRDataVec.get(i)), isVlRegfile) } } } @@ -335,8 +365,9 @@ object VfRegFile { wen : Seq[Seq[Bool]], waddr : Seq[UInt], wdata : Seq[UInt], - debugReadAddr: Option[Seq[UInt]], - debugReadData: Option[Vec[UInt]], + debugReadAddr: Option[Seq[UInt]] = None, + debugReadData: Option[Vec[UInt]] = None, + debugAllRData: Option[Vec[UInt]] = None, withReset : Boolean = false, )(implicit p: Parameters): Unit = { require(splitNum >= 1, "splitNum should be no less than 1") @@ -344,7 +375,7 @@ object VfRegFile { if (splitNum == 1) { Regfile( name, numEntries, raddr, rdata, wen.head, waddr, wdata, - hasZero = false, withReset, bankNum = 1, debugReadAddr, debugReadData) + hasZero = false, withReset, bankNum = 1, debugReadAddr, debugReadData, debugAllRData) } else { val dataWidth = wdata.head.getWidth / splitNum val numReadPorts = raddr.length @@ -352,11 +383,12 @@ object VfRegFile { val wdataVec = Wire(Vec(splitNum, Vec(wdata.length, UInt(dataWidth.W)))) val rdataVec = Wire(Vec(splitNum, Vec(raddr.length, UInt(dataWidth.W)))) val debugRDataVec: Option[Vec[Vec[UInt]]] = debugReadData.map(x => Wire(Vec(splitNum, Vec(x.length, UInt(dataWidth.W))))) + val debugAllRDataVec: Option[Vec[Vec[UInt]]] = debugAllRData.map(x => Wire(Vec(splitNum, Vec(x.length, UInt(dataWidth.W))))) for (i <- 0 until splitNum) { wdataVec(i) := wdata.map(_ ((i + 1) * dataWidth - 1, i * dataWidth)) Regfile( name + s"Part${i}", numEntries, raddr, rdataVec(i), wen(i), waddr, wdataVec(i), - hasZero = false, withReset, bankNum = 1, debugReadAddr, debugRDataVec.map(_(i)) + hasZero = false, withReset, bankNum = 1, debugReadAddr, debugRDataVec.map(_(i)), debugAllRDataVec.map(_(i)) ) } for (i <- 0 until rdata.length) { @@ -367,6 +399,11 @@ object VfRegFile { debugReadData.get(i) := Cat(debugRDataVec.get.map(_ (i)).reverse) } } + if (debugAllRData.nonEmpty) { + for (i <- 0 until debugAllRData.get.length) { + debugAllRData.get(i) := Cat(debugAllRDataVec.get.map(_(i)).reverse) + } + } } } } \ No newline at end of file diff --git a/src/main/scala/xiangshan/backend/rename/RenameTable.scala b/src/main/scala/xiangshan/backend/rename/RenameTable.scala index 89d730035f6..aee8409235c 100644 --- a/src/main/scala/xiangshan/backend/rename/RenameTable.scala +++ b/src/main/scala/xiangshan/backend/rename/RenameTable.scala @@ -19,6 +19,7 @@ package xiangshan.backend.rename import org.chipsalliance.cde.config.Parameters import chisel3._ import chisel3.util._ +import difftest._ import utility.HasCircularQueuePtrHelper import utility.ParallelPriorityMux import utility.GatedValidRegNext @@ -214,6 +215,7 @@ class RenameTableWrapper(implicit p: Parameters) extends XSModule { private val numVecRatPorts = numVecRegSrc val io = IO(new Bundle() { + val hartId = Input(UInt(8.W)) val redirect = Input(Bool()) val rabCommits = Input(new RabCommitIO) val diffCommits = if (backendParams.basicDebugEn) Some(Input(new DiffCommitIO)) else None @@ -244,10 +246,6 @@ class RenameTableWrapper(implicit p: Parameters) extends XSModule { val debug_vl_rat = if (backendParams.debugEn) Some(Vec(1,Output(UInt(PhyRegIdxWidth.W)))) else None // for difftest - val diff_int_rat = if (backendParams.basicDebugEn) Some(Vec(32, Output(UInt(PhyRegIdxWidth.W)))) else None - val diff_fp_rat = if (backendParams.basicDebugEn) Some(Vec(32, Output(UInt(PhyRegIdxWidth.W)))) else None - val diff_vec_rat = if (backendParams.basicDebugEn) Some(Vec(31, Output(UInt(PhyRegIdxWidth.W)))) else None - val diff_v0_rat = if (backendParams.basicDebugEn) Some(Vec(1,Output(UInt(PhyRegIdxWidth.W)))) else None val diff_vl_rat = if (backendParams.basicDebugEn) Some(Vec(1,Output(UInt(PhyRegIdxWidth.W)))) else None }) @@ -258,7 +256,12 @@ class RenameTableWrapper(implicit p: Parameters) extends XSModule { val vlRat = Module(new RenameTable(Reg_Vl)) io.debug_int_rat .foreach(_ := intRat.io.debug_rdata.get) - io.diff_int_rat .foreach(_ := intRat.io.diff_rdata.get) + if (env.AlwaysBasicDiff || env.EnableDifftest) { + // Delay of RenameTable should be same as PhyRegFile + val difftest = DifftestModule(new DiffArchIntRenameTable(IntPhyRegs), delay = 2) + difftest.coreid := io.hartId + difftest.value := intRat.io.diff_rdata.get + } intRat.io.readPorts <> io.intReadPorts.flatten intRat.io.redirect := io.redirect intRat.io.snpt := io.snpt @@ -294,7 +297,11 @@ class RenameTableWrapper(implicit p: Parameters) extends XSModule { // debug read ports for difftest io.debug_fp_rat.foreach(_ := fpRat.io.debug_rdata.get) - io.diff_fp_rat .foreach(_ := fpRat.io.diff_rdata.get) + if (env.AlwaysBasicDiff || env.EnableDifftest) { + val difftest = DifftestModule(new DiffArchFpRenameTable(FpPhyRegs), delay = 2) + difftest.coreid := io.hartId + difftest.value := fpRat.io.diff_rdata.get + } fpRat.io.readPorts <> io.fpReadPorts.flatten fpRat.io.redirect := io.redirect fpRat.io.snpt := io.snpt @@ -325,9 +332,20 @@ class RenameTableWrapper(implicit p: Parameters) extends XSModule { } } + if (env.AlwaysBasicDiff || env.EnableDifftest) { + // Split each 128-bit vector reg into two 64-bit regs (lo, hi), so convert index to (2*index, 2*index+1) + val splitVecPregs = 2 * (V0PhyRegs + VfPhyRegs) + val difftest = DifftestModule(new DiffArchVecRenameTable(splitVecPregs), delay = 2) + difftest.coreid := io.hartId + // When merge v0Rat and vecRat, the index of vecRats should starts from V0PhyRegs + val vecRats = v0Rat.io.diff_rdata.get ++ vecRat.io.diff_rdata.get.map(_ + V0PhyRegs.U) + difftest.value := VecInit(vecRats.flatMap { r => + val splitDest = (r << 1).asUInt + Seq(splitDest, splitDest + 1.U) + }) + } // debug read ports for difftest io.debug_vec_rat .foreach(_ := vecRat.io.debug_rdata.get) - io.diff_vec_rat .foreach(_ := vecRat.io.diff_rdata.get) vecRat.io.readPorts <> io.vecReadPorts.flatten vecRat.io.redirect := io.redirect vecRat.io.snpt := io.snpt @@ -364,7 +382,6 @@ class RenameTableWrapper(implicit p: Parameters) extends XSModule { // debug read ports for difftest io.debug_v0_rat.foreach(_ := v0Rat.io.debug_rdata.get) - io.diff_v0_rat.foreach(_ := v0Rat.io.diff_rdata.get) v0Rat.io.readPorts <> io.v0ReadPorts v0Rat.io.redirect := io.redirect v0Rat.io.snpt := io.snpt diff --git a/src/main/scala/xiangshan/backend/rob/Rob.scala b/src/main/scala/xiangshan/backend/rob/Rob.scala index 85677b1a0b5..b5051be1360 100644 --- a/src/main/scala/xiangshan/backend/rob/Rob.scala +++ b/src/main/scala/xiangshan/backend/rob/Rob.scala @@ -1550,7 +1550,8 @@ class RobImp(override val wrapper: Rob)(implicit p: Parameters, params: BackendP val instr = uop.instr.asTypeOf(new XSInstBitFields) val isVLoad = instr.isVecLoad - val difftest = DifftestModule(new DiffInstrCommit(MaxPhyRegs), delay = 3, dontCare = true) + val diffMaxPhyRegs = Seq(MaxPhyRegs, 2 * (V0PhyRegs + VfPhyRegs)).max // For width of wpdest and otherwpdest + val difftest = DifftestModule(new DiffInstrCommit(diffMaxPhyRegs), delay = 3, dontCare = true) val dt_skip = Mux(eliminatedMove, false.B, exuOut.isSkipDiff) difftest.coreid := io.hartId difftest.index := i.U @@ -1563,7 +1564,17 @@ class RobImp(override val wrapper: Rob)(implicit p: Parameters, params: BackendP difftest.v0wen := io.commits.commitValid(i) && (uop.v0Wen || isVLoad && instr.VD === 0.U) difftest.wpdest := commitInfo.debug_pdest.get difftest.wdest := Mux(isVLoad, instr.VD, commitInfo.debug_ldest.get) - difftest.otherwpdest := debug_VecOtherPdest(ptr) + // When merge v0Rat and vecRat, the index of vecRats should starts from V0PhyRegs + // Split each 128-bit vector reg into two 64-bit regs (lo, hi), so convert index to (2*index, 2*index+1) + difftest.otherwpdest := debug_VecOtherPdest(ptr).zipWithIndex.flatMap { case (pdest, idx) => + val vecDest = if (idx == 0) { + Mux(difftest.v0wen, pdest, pdest + V0PhyRegs.U) + } else { + pdest + V0PhyRegs.U + } + val splitDest = (vecDest << 1).asUInt + Seq(splitDest, splitDest + 1.U) + } difftest.nFused := instrSize - 1.U when(difftest.valid) { assert(instrSize >= 1.U) From afd2b3e4ca3f96ab855eaafd5e3df69b83100e08 Mon Sep 17 00:00:00 2001 From: klin02 Date: Thu, 13 Nov 2025 15:41:24 +0800 Subject: [PATCH 04/14] fix(difftest): add masked RefillEvent, disable for now After #3051, we have supported low-power read (read only 5 out of 8 8B-banks) in ICacheDataArray, and in V3, it was further modified to dynamically compute the mask based on the taken branch location. Though, previous DiffRefillEvent does not support masked comparison, instead it checks entire 512bit, so it will fail if we enable checks on cacheid >=3 (OpenXiangShan/difftest#712). As we're refactoring ICache recently, we need this check to ensure the refactored code is working properly. This commit removes unused idtfr and introduces masked refill-check for Difftest, where each mask bit controls comparison of 8B data. However, as the newly added masked Refill fails CI, we disable it for now, and wait for later fixup. Co-authored-by: ngc7331 --- .../cache/dcache/mainpipe/MissQueue.scala | 2 +- .../scala/xiangshan/cache/mmu/L2TLB.scala | 2 +- .../frontend/icache/ICacheMainPipe.scala | 42 ++++++++++--------- .../frontend/icache/ICacheMissUnit.scala | 2 +- 4 files changed, 25 insertions(+), 23 deletions(-) diff --git a/src/main/scala/xiangshan/cache/dcache/mainpipe/MissQueue.scala b/src/main/scala/xiangshan/cache/dcache/mainpipe/MissQueue.scala index 4ce566ecdb6..5a32115bbae 100644 --- a/src/main/scala/xiangshan/cache/dcache/mainpipe/MissQueue.scala +++ b/src/main/scala/xiangshan/cache/dcache/mainpipe/MissQueue.scala @@ -1324,7 +1324,7 @@ class MissQueue(edge: TLEdgeOut, reqNum: Int)(implicit p: Parameters) extends DC difftest.valid := io.refill_to_ldq.valid && io.refill_to_ldq.bits.hasdata && io.refill_to_ldq.bits.refill_done difftest.addr := io.refill_to_ldq.bits.addr difftest.data := io.refill_to_ldq.bits.data_raw.asTypeOf(difftest.data) - difftest.idtfr := DontCare + difftest.mask := VecInit.fill(difftest.mask.getWidth)(true.B).asUInt } // Perf count diff --git a/src/main/scala/xiangshan/cache/mmu/L2TLB.scala b/src/main/scala/xiangshan/cache/mmu/L2TLB.scala index 2e285e8efd3..e3aee937810 100644 --- a/src/main/scala/xiangshan/cache/mmu/L2TLB.scala +++ b/src/main/scala/xiangshan/cache/mmu/L2TLB.scala @@ -557,7 +557,7 @@ class L2TLBImp(outer: L2TLB)(implicit p: Parameters) extends PtwModule(outer) wi difftest.valid := cache.io.refill.valid difftest.addr := difftest_ptw_addr(RegEnable(mem.d.bits.source, mem.d.valid)) difftest.data := refill_data.asTypeOf(difftest.data) - difftest.idtfr := DontCare + difftest.mask := VecInit.fill(difftest.mask.getWidth)(true.B).asUInt } if (env.EnableDifftest) { diff --git a/src/main/scala/xiangshan/frontend/icache/ICacheMainPipe.scala b/src/main/scala/xiangshan/frontend/icache/ICacheMainPipe.scala index d3bdac239b2..b2903434476 100644 --- a/src/main/scala/xiangshan/frontend/icache/ICacheMainPipe.scala +++ b/src/main/scala/xiangshan/frontend/icache/ICacheMainPipe.scala @@ -466,26 +466,28 @@ class ICacheMainPipe(implicit p: Parameters) extends ICacheModule /* *** difftest refill check *** */ if (env.EnableDifftest) { - val discard = toIfu.bits.exception.hasException || toIfu.bits.pmpMmio || Pbmt.isUncache(toIfu.bits.itlbPbmt) - val blkPaddrAll = - VecInit(s1_vAddr.map(va => (getPAddrFromPTag(va, s1_pTag)(PAddrBits - 1, blockOffBits) << blockOffBits).asUInt)) - (0 until DataBanks).foreach { i => - val diffMainPipeOut = DifftestModule(new DiffRefillEvent, dontCare = true) - diffMainPipeOut.coreid := io.hartId - diffMainPipeOut.index := (3 + i).U - - val bankSel = getBankSel(s1_offset, s1_blkEndOffset, s1_doubleline).map(_.asUInt).reduce(_ | _) - val lineSel = getLineSel(s1_offset) - - diffMainPipeOut.valid := s1_fire && bankSel(i).asBool && !discard - diffMainPipeOut.addr := Mux( - lineSel(i), - blkPaddrAll(1) + (i.U << log2Ceil(blockBytes / DataBanks)).asUInt, - blkPaddrAll(0) + (i.U << log2Ceil(blockBytes / DataBanks)).asUInt - ) - - diffMainPipeOut.data := s1_datas(i).asTypeOf(diffMainPipeOut.data) - diffMainPipeOut.idtfr := DontCare + val bankSel = getBankSel(s1_offset, s1_blkEndOffset, s1_doubleline) + + // do difftest for each fetched cache line + s1_vAddr.zipWithIndex.foreach { case (va, i) => + val difftest = DifftestModule(new DiffRefillEvent, dontCare = true) + difftest.coreid := io.hartId + difftest.index := (3 + i).U // magic number 3/4: ICache MainPipe refill test + + difftest.valid := false.B +// difftest.valid := s1_fire && !( +// toIfu.bits.exception.hasException || +// toIfu.bits.pmpMmio || +// Pbmt.isUncache(toIfu.bits.itlbPbmt) +// ) + difftest.addr := Cat(getBlkAddrFromPTag(va, s1_pTag), 0.U(blockOffBits.W)) + difftest.data := s1_datas.asTypeOf(difftest.data) + // NOTE: each mask bit controls (512bit / difftest.mask.getWidth) (currently 64bit) comparison + // this only works for DataBanks <= difftest.mask.getWidth (and isPow2) + difftest.mask := VecInit((0 until difftest.mask.getWidth).map { j => + // the i-th mask locates in (i / (difftest.mask.getWidth / DataBanks)) bank + bankSel(i)(j / (difftest.mask.getWidth / DataBanks)) + }).asUInt } } } diff --git a/src/main/scala/xiangshan/frontend/icache/ICacheMissUnit.scala b/src/main/scala/xiangshan/frontend/icache/ICacheMissUnit.scala index 43f79c76920..0dddece6ac9 100644 --- a/src/main/scala/xiangshan/frontend/icache/ICacheMissUnit.scala +++ b/src/main/scala/xiangshan/frontend/icache/ICacheMissUnit.scala @@ -330,6 +330,6 @@ class ICacheMissUnit(edge: TLEdgeOut)(implicit p: Parameters) extends ICacheModu difftest.valid := writeSramValid difftest.addr := Cat(mshrResp.blkPAddr, 0.U(blockOffBits.W)) difftest.data := respDataReg.asTypeOf(difftest.data) - difftest.idtfr := DontCare + difftest.mask := VecInit.fill(difftest.mask.getWidth)(true.B).asUInt } } From a01f518606d09207bf616273d0b8a0c4360dd47a Mon Sep 17 00:00:00 2001 From: klin02 Date: Mon, 1 Dec 2025 13:54:19 +0800 Subject: [PATCH 05/14] fix(Makefile): unify PLDM and FPGA args under DEBUG_ARGS This change unifies the args handling for PLDM and FPGA_DIFF into a shared DEBUG_ARGS options, with PLDM=1 or FPGA=1 used to distinguish between the two environments. For PLDM, we continue to enable the full Difftest by default as it used to be. For FPGA, we set it default to BASIC_DIFF and remove additional debug printing to reduce overhead. Any extra options can be supplied via DEBUG_ARGS as needed. Example usage: PLDM=1 DEBUG_ARGS="--difftest-config ZESN --perf-level CRITICAL" FPGA=1 DEBUG_ARGS="--difftest-config ESBIFDU --difftest-exclude Vec" --- Makefile | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 0552b93eb98..8ecdfa5c7d8 100644 --- a/Makefile +++ b/Makefile @@ -148,8 +148,7 @@ endif # public args sumup RELEASE_ARGS += $(MFC_ARGS) $(COMMON_EXTRA_ARGS) -DEBUG_ARGS += $(MFC_ARGS) $(COMMON_EXTRA_ARGS) -override PLDM_ARGS += $(MFC_ARGS) $(COMMON_EXTRA_ARGS) +override DEBUG_ARGS += $(MFC_ARGS) $(COMMON_EXTRA_ARGS) # co-simulation with DRAMsim3 ifeq ($(WITH_DRAMSIM3),1) @@ -197,12 +196,14 @@ endif # emu for the release version RELEASE_ARGS += --fpga-platform --reset-gen --firtool-opt --ignore-read-enable-mem --firtool-opt "--default-layer-specialization=disable" -DEBUG_ARGS += --enable-difftest --firtool-opt "--default-layer-specialization=enable" -override PLDM_ARGS += --enable-difftest --firtool-opt "--default-layer-specialization=enable" +override DEBUG_ARGS += --firtool-opt "--default-layer-specialization=enable" +ifeq ($(FPGA), 1) +override DEBUG_ARGS += --fpga-platform --disable-all --remove-assert +else +override DEBUG_ARGS += --enable-difftest +endif ifeq ($(RELEASE),1) override SIM_ARGS += $(RELEASE_ARGS) -else ifeq ($(PLDM),1) -override SIM_ARGS += $(PLDM_ARGS) else override SIM_ARGS += $(DEBUG_ARGS) endif @@ -215,8 +216,8 @@ override SIM_ARGS += $(foreach c,$(call splitcomma,$(FIRRTL_COVER)),--extract-$( endif # use RELEASE_ARGS for TopMain by default -ifeq ($(PLDM), 1) -TOPMAIN_ARGS += $(PLDM_ARGS) +ifeq ($(or $(PLDM),$(FPGA)), 1) +TOPMAIN_ARGS += $(DEBUG_ARGS) else TOPMAIN_ARGS += $(RELEASE_ARGS) endif From 5288496aee85209ba02a01db6c616dbc548b9bee Mon Sep 17 00:00:00 2001 From: Song Fangyuan <130735683+SFangYy@users.noreply.github.com> Date: Wed, 26 Nov 2025 14:27:44 +0800 Subject: [PATCH 06/14] feat(pdb): add difftest debugging commands for xspdb (#5175) ## Description This PR adds comprehensive difftest capabilities to xspdb, enabling developers to perform co-simulation debugging with reference models. ## Features ### Core APIs - `api_load_ref_so()`: Load difftest reference shared object - `api_init_ref()`: Initialize difftest reference - `api_set_difftest_diff()`: Turn on/off difftest checking - `api_difftest_reset()`: Reset difftest state ### Debugging Commands - `xload_difftest_ref_so `: Load reference .so file - `xdifftest_turn_on_with_ref `: Turn on difftest with reference - `xdifftest_reset`: Reset difftest state - `xdifftest_display [instance]`: Display difftest status - `xistep [count]`: Step through instructions and stop at commit - `xistep_break `: Control instruction breakpoint - `xpc`: Display current commit PCs and instructions ### PC Watching - `xwatch_commit_pc
`: Watch a specific commit PC - `xunwatch_commit_pc
`: Remove PC watch ### Configuration - `xdifftest_pmem_base_first_instr_address`: Configure memory base and entry point - `xexpdiffstate `: Export difftest state to variable ## Usage Examples ```bash $ make pdb-run (XiangShan) xload_difftest_ref_so /path/to/riscv64-nemu-interpreter-so (XiangShan) xdifftest_turn_on_with_ref /path/to/riscv64-nemu-interpreter-so (XiangShan) xistep 10 (XiangShan) xwatch_commit_pc 0x80000000 (XiangShan) xpc Co-authored-by: Zhicheng Yao --- .github/CODEOWNERS | 3 +- scripts/xspdb/xscmd/cmd_difftest.py | 157 +++++++++++++++++++++++++++- 2 files changed, 156 insertions(+), 4 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index f4035719c41..0ec122fddf5 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -44,5 +44,6 @@ huancun/ @linjuanZ src/main/scala/top/ @Tang-Haojin +scripts/Makefile.pdb @yaozhicheng @forever043 @SFangYy @Tang-Haojin scripts/pdb-run.py @yaozhicheng @forever043 @SFangYy @Tang-Haojin -scripts/xspdb/ @yaozhicheng @forever043 @SFangYy @Tang-Haojin +scripts/xspdb/ @yaozhicheng @SFangYy diff --git a/scripts/xspdb/xscmd/cmd_difftest.py b/scripts/xspdb/xscmd/cmd_difftest.py index 9b158a910a5..f41a6e25ab1 100644 --- a/scripts/xspdb/xscmd/cmd_difftest.py +++ b/scripts/xspdb/xscmd/cmd_difftest.py @@ -185,14 +185,14 @@ def do_xpc(self, a): def api_istep_update_commit_pc(self): old_p = self.condition_instrunct_istep.get("pc_old_list") - new_P = self.condition_instrunct_istep.get("pc_lst_list") - if not (old_p and new_P): + new_p = self.condition_instrunct_istep.get("pc_lst_list") + if not (old_p and new_p): self.istep_last_commit_pc = [] else: self.istep_last_commit_pc = [] for i in range(8): old_pc = int.from_bytes(old_p[i].AsBytes(), byteorder='little', signed=False) - new_pc = int.from_bytes(new_P[i].AsBytes(), byteorder='little', signed=False) + new_pc = int.from_bytes(new_p[i].AsBytes(), byteorder='little', signed=False) if old_pc != new_pc: self.istep_last_commit_pc.append(new_pc) @@ -326,3 +326,154 @@ def api_difftest_get_instance(self, instance=0): """ return self.df.GetDifftest(instance) + def complete_xdifftest_turn_on(self, text, line, begidx, endidx): + return [x for x in ["on", "off"] if x.startswith(text)] if text else ["on", "off"] + + def do_xdifftest_turn_on_with_ref(self, arg): + """Turn on the difftest diff with reference so + Args: + arg (string): ref so path + """ + if self.difftest_ref_is_inited: + error("difftest reference already inited") + return + if not arg.strip(): + error("difftest ref so path not found\n usage: xdifftest_turn_on_with_ref ") + return + if not self.api_load_ref_so(arg): + error(f"load difftest ref so {arg} failed") + return + self.api_set_difftest_diff(True) + + def complete_xdifftest_turn_on_with_ref(self, text, line, begidx, endidx): + return self.api_complite_localfile(text) + + def do_xexpdiffstate(self, var): + """Set a variable to difftest_stat + + Args: + var (string): Variable name + """ + self.curframe.f_locals[var] = self.difftest_stat + + def do_xwatch_commit_pc(self, arg): + """Watch commit PC + + Args: + arg (address): PC address + """ + if arg.strip() == "update": + checker = self.condition_watch_commit_pc.get("checker") + if checker: + checker.Reset() + return + try: + address = int(arg, 0) + except Exception as e: + error(f"convert {arg} to number fail: {str(e)}") + return + + if not self.condition_watch_commit_pc.get("checker"): + checker = self.xsp.ComUseCondCheck(self.dut.xclock) + cmtpccmp = self.xsp.ComUseRangeCheck(6, 8); + self.condition_watch_commit_pc["checker"] = checker + self.condition_watch_commit_pc["cmtpcmp"] = cmtpccmp + + checker = self.condition_watch_commit_pc["checker"] + if "watch_pc_0x%x_0"%address not in checker.ListCondition(): + cmtpccmp = self.condition_watch_commit_pc["cmtpcmp"] + target_pc = self.xsp.ComUseDataArray(8) + target_pc.FromBytes(address.to_bytes(8, byteorder='little', signed=False)) + pc_lst_list = [self.xsp.ComUseDataArray(self.difftest_stat.get_commit(i).get_pc_address(), 8) for i in range(8)] + for i, lpc in enumerate(pc_lst_list): + checker.SetCondition("watch_pc_0x%x_%d" % (address, i), lpc.BaseAddr(), target_pc.BaseAddr(), self.xsp.ComUseCondCmp_GE, 8, + 0, 0, 1, cmtpccmp.GetArrayCmp(), cmtpccmp.CSelf()) + checker.SetMaxCbs(1) + self.condition_watch_commit_pc["0x%x"%address] = {"pc_lst_list": pc_lst_list, "target_pc": target_pc} + else: + error(f"watch_commit_pc 0x{address:x} already exists") + return + cb_key = "watch_commit_pc" + self.dut.xclock.RemoveStepRisCbByDesc(cb_key) + self.dut.xclock.StepRis(checker.GetCb(), checker.CSelf(), cb_key) + message(f"watch commit pc: 0x{address:x}") + + def do_xunwatch_commit_pc(self, arg): + """Unwatch commit PC + + Args: + arg (address): PC address + """ + try: + address = int(arg, 0) + except Exception as e: + error(f"convert {arg} to number fail: {str(e)}") + return + checker = self.condition_watch_commit_pc.get("checker") + if not checker: + error("watch_commit_pc.checker not found") + return + if "watch_pc_0x%x_0"%address not in checker.ListCondition(): + error(f"watch_commit_pc 0x{address:x} not found") + return + key = "0x%x"%address + if key in self.condition_watch_commit_pc: + # remove cached entry for this watch target if present + self.condition_watch_commit_pc.pop(key, None) + for i in range(8): + checker.RemoveCondition("watch_pc_0x%x_%d" % (address, i)) + if len(checker.ListCondition()) < 1: + self.dut.xclock.RemoveStepRisCbByDesc("watch_commit_pc") + assert "watch_commit_pc" not in self.dut.xclock.ListSteRisCbDesc() + self.condition_watch_commit_pc.clear() + message("No commit pc to watch, remove checker") + + def do_xdifftest_display(self, arg): + """Display the difftest status + + Args: + arg (number): difftest instance to display, default is 0 + """ + instance = 0 + if arg.strip(): + try: + instance = int(arg) + except Exception as e: + error(f"convert {arg} to number fail: {str(e)}\n usage: xdifftest_display [instance]") + return + if not self.difftest_ref_is_inited: + error("difftest reference not inited") + return + x = self.api_difftest_get_instance(instance) + if x: + x.display() + else: + error(f"difftest instance {instance} not found") + + def do_xdifftest_pmem_base_first_instr_address(self, arg): + """Display or set PMEM_BASE, FIRST_INST_ADDRESS + + Args: + PMEM_BASE (int): PMEM_BASE value default None + FIRST_INST_ADDRESS (int): FIRST_INST_ADDRESS value default None + """ + a, b = None, None + str_usage = "usage xdifftest_pmem_base_first_instr_address [PMEM_BASE FIRST_INST_ADDRESS]" + if arg.strip(): + args = arg.split() + if len(args) != 2: + error(str_usage) + return + try: + a, b = int(args[0], 0), int(args[1], 0) + except Exception as e: + error("Error: %s\n%s"%(e, str_usage)) + return + x, y = self.api_update_pmem_base_and_first_inst_addr(a, b) + if (a is not None) and (b is not None): + message("PMEM_BASE = %s, FIRST_INST_ADDRESS = %s" % (hex(x), hex(y))) + elif (a and a != x) or (b and b != y): + error("Set PMEM_BASE(%s != %s), FIRST_INST_ADDRESS(%s != %s) fail!" % (a, x, b, y)) + else: + message("PMEM_BASE = %s, FIRST_INST_ADDRESS = %s" % (hex(x), hex(y))) + error(str_usage) From a398897bdd621592bbe1c3a26d86acb2d22e4a33 Mon Sep 17 00:00:00 2001 From: Song Fangyuan <130735683+SFangYy@users.noreply.github.com> Date: Wed, 26 Nov 2025 14:28:03 +0800 Subject: [PATCH 07/14] feat(pdb): add files processing commands for xspdb (#5176) ## Description This PR adds comprehensive file import/export capabilities to xspdb, enabling developers to dump and load Flash/RAM contents during debugging sessions. ## Features ### Core APIs - `api_export_flash(bin_file, force_size=-1)`: Programmatic Flash export with smart mret detection - `api_export_ram(end_address, bin_file)`: Programmatic RAM export - `api_export_unified_bin(ram_start, ram_end, bin_file)`: Export unified binary with Flash + RAM - `api_convert_reg_file(file_name)`: Parse register file in format ### Debugging Commands - `xexport_flash [force_size]`: Export Flash data to binary file - `xexport_ram
`: export RAM data from memory base to specified address - `xexport_bin [start_address]`: Combines Flash + RAM into single binary file when `start_address` is specified ## Usage Examples ```bash $ make pdb-run (XiangShan) xexport_flash /tmp/flash.bin (XiangShan) xexport_flash /tmp/flash.bin 512 (XiangShan) xexport_ram 0x80100000 /tmp/ram.bin (XiangShan) xexport_bin 0x80100000 /tmp/dump 0x80040000 (XiangShan) xexport_bin 0x80100000 /tmp/dump (XiangShan) xbytes_to_bin b'\x13\x00\x00\x00' test.bin Co-authored-by: Zhicheng Yao --- scripts/xspdb/xscmd/cmd_files.py | 221 +++++++++++++++++++++++++++++++ 1 file changed, 221 insertions(+) diff --git a/scripts/xspdb/xscmd/cmd_files.py b/scripts/xspdb/xscmd/cmd_files.py index c7ee00058c6..c8a36d0cf7b 100644 --- a/scripts/xspdb/xscmd/cmd_files.py +++ b/scripts/xspdb/xscmd/cmd_files.py @@ -34,6 +34,227 @@ def api_dut_bin_load(self, bin_file): self.api_init_mem() self.info_cache_asm.clear() + def api_export_flash(self, bin_file, force_size=-1): + """Export Flash data + + Args: + bin_file (string): Path to the export file + """ + if force_size < 0: + if not self.api_check_if_xspdb_init_bin_loaded(): + return + # search mret + mret = 0x30200073 + last_data = 0 + bin_data = bytearray() + bin_size = force_size if force_size > 0 else 1024*10 + for i in range(bin_size): + data = self.df.FlashRead(i*8) + bin_data += data.to_bytes(8, byteorder='little', signed=False) + if (last_data >> 32 == mret or last_data & 0xffffffff == mret) and force_size < 0: + break + last_data = data + with open(bin_file, "wb") as f: + f.write(bin_data) + info(f"export {len(bin_data)} bytes to flash file: {bin_file}") + + def api_export_ram(self, end_address, bin_file): + """Export memory data + + Args: + end_address (int): End address of memory + bin_file (string): Path to the export file + """ + if not self.mem_inited: + error("mem not loaded") + return + end_index = 8 + end_address - end_address % 8 + with open(bin_file, "wb") as f: + for index in range(self.mem_base, end_index, 8): + f.write(self.df.pmem_read(index).to_bytes(8, byteorder='little', signed=False)) + info(f"export {end_index - self.mem_base} bytes to ram file: {bin_file}") + + def api_export_unified_bin(self, ram_start, ram_end, bin_file): + """Export a unified bin file + + Args: + ram_start (int): Start address of memory + ram_end (int): End address of memory + bin_file (string): Path to the export file + """ + if not self.mem_inited: + error("mem not loaded") + return False + if not self.api_check_if_xspdb_init_bin_loaded(): + return False + # read flash data + mret = 0x30200073 + last_data = 0 + last_indx = 0 + bin_data = bytearray() + for i in range(1024*10): + data = self.df.FlashRead(i*8) + bin_data += data.to_bytes(8, byteorder='little', signed=False) + last_indx = i + 1 + if last_data >> 32 == mret or last_data & 0xffffffff == mret: + break + last_data = data + # check conflict + # mem base + mem_base = self.mem_base + ram_start = ram_start - ram_start % 8 + sta_index = (ram_start - mem_base)//8 + if sta_index < last_indx: + error(f"conflict with flash data, ram_start: 0x{ram_start:x}, flash_data_end: 0x{last_indx*8+ mem_base:x}, please check") + return None + ram_end = ram_end - ram_end % 8 + end_index = (ram_end - mem_base)//8 + 1 + # read ram data + with open(bin_file, "wb") as f: + f.write(bin_data) + for index in range(last_indx, end_index): + f.write(self.df.pmem_read(index*8 + mem_base).to_bytes(8, byteorder='little', signed=False)) + info(f"export {8*(end_index - last_indx) + len(bin_data)} bytes to unified bin file: {bin_file}") + return True + + def api_convert_reg_file(self, file_name): + """Parse a register file + + Args: + file_name (file): Register file + """ + assert os.path.exists(file_name), "file %s not found" % file_name + ret_iregs = {} + ret_fregs = {} + raw_iregs = {"x%d"%i : self.iregs[i] for i in range(32)} + raw_fregs = {"f%d"%i : self.fregs[i] for i in range(32)} + with open(file_name, "r") as f: + for i, l in enumerate(f.readlines()): + try: + l = l.strip() + if not l: + continue + key, value = l.split(":") + key = key.strip().lower() + value = int(value.strip(), 0) + if key in raw_iregs: + key = raw_iregs[key] + if key in self.iregs: + assert key not in ret_iregs, f"{key} already exists" + ret_iregs[key] = value + if key in raw_fregs: + key = raw_fregs[key] + if key in self.fregs: + assert key not in ret_fregs, f"{key} already exists" + ret_fregs[key] = value + except Exception as e: + assert False, f"line {i+1} parse fail: {str(e)}" + return ret_iregs, ret_fregs + + def do_xbytes_to_bin(self, arg): + """Convert bytes data to a binary file + + Args: + arg (string): Bytes data + """ + if not arg: + message("usage xbytes_to_bin ") + return + args = arg.strip().split() + if len(args) < 2: + message("usage xbytes_to_bin ") + return + try: + data = eval(args[0]) + if not isinstance(data, bytes): + error("data must be bytes, eg b'\\x00\\x01...'") + return + with open(args[1], "wb") as f: + f.write(data) + except Exception as e: + error(f"convert {args[0]} to bytes fail: {str(e)}") + + def complete_xbytes_to_bin(self, text, line, begidx, endidx): + return self.api_complite_localfile(text) + + def do_xexport_bin(self, arg): + """Export Flash + memory data to a file + + Args: + end_address (int): End address of memory + file_path (string): Path to the export file + start_address (int): Start address of memory + """ + mem_base = self.mem_base + start_address = mem_base + params = arg.strip().split() + if len(params) < 2: + message("usage: xexport_bin [start_address]") + return + file_path = params[1] + if os.path.isdir(file_path): + file_path = os.path.join(file_path, "XSPdb") + file_dir = os.path.dirname(file_path) + if not os.path.exists(file_dir): + os.makedirs(file_dir) + try: + if len(params) > 2: + start_address = int(params[2], 0) + end_address = int(params[0], 0) + if start_address != mem_base: + if self.api_export_unified_bin(start_address, end_address, file_path+"_all.bin") is not None: + return + warn(f"export unified bin to {file_path}_all.bin fail, try to export flash and ram individually") + self.api_export_flash(file_path + "_flash.bin") + self.api_export_ram(end_address, file_path + "_ram.bin") + except Exception as e: + error(f"convert {arg} to number fail: {str(e)}") + + def complete_xexport_bin(self, text, line, begidx, endidx): + return self.api_complite_localfile(text) + + def do_xexport_flash(self, arg): + """Export Flash data to a file + + Args: + path (string): Path to the export file + force_size (int): N*8 bytes to force export + """ + if not arg: + message("usage: xexport_flash [force_size]") + return + args = arg.split() + path = args[0] + fsiz = -1 + try: + if len(args) > 0: + fsiz = int(args[1], 0) + self.api_export_flash(path, fsiz) + except Exception as e: + error(f"{e}\n usage: xexport_flash [force_size]") + + def complete_xexport_flash(self, text, line, begidx, endidx): + return self.api_complite_localfile(text) + + def do_xexport_ram(self, arg): + """Export memory data to a file + + Args: + addr (int): Export address + arg (string): Path to the export file + """ + args = arg.strip().split() + if len(args) < 2: + message("usage: xexport_mem
") + return + try: + self.api_export_ram(int(args[0], 0), args[1]) + except Exception as e: + error(f"convert {args[0]} to number fail: {str(e)}") + + def complete_xexport_ram(self, text, line, begidx, endidx): + return self.api_complite_localfile(text) + def complete_xflash(self, text, line, begidx, endidx): return self.api_complite_localfile(text) From cb54b52944bba0349fd958eda738c2ba5ca74f75 Mon Sep 17 00:00:00 2001 From: Song Fangyuan <130735683+SFangYy@users.noreply.github.com> Date: Wed, 26 Nov 2025 15:52:14 +0800 Subject: [PATCH 08/14] feat(pdb): Add memory read/write and backtrace commands (#5179) ## Description This PR adds comprehensive memory read/write operations and call stack backtrace capabilities to xspdb, enabling developers to inspect and modify memory contents during debugging sessions, as well as analyze call stacks. ## Features ### Core APIs - `api_write_bytes_with_rw`: write implementation with custom read/write functions - `api_read_bytes_with_func`: read implementation with custom read function - `api_get_call_stack`: get call stack from specified SP and PC ## Usage Examples ```bash $ make pdb-run (XiangShan) xmem_read 0x80000000 16 data bytes(16): b'\x13\x00\x00\x00...' (XiagShan) xmem_read_range 0x80000000 0x80000010 data bytes(16): b'\x13\x00\x00\x00...' --- scripts/xspdb/xscmd/cmd_mrw.py | 354 +++++++++++++++++++++++++++++++++ 1 file changed, 354 insertions(+) create mode 100644 scripts/xspdb/xscmd/cmd_mrw.py diff --git a/scripts/xspdb/xscmd/cmd_mrw.py b/scripts/xspdb/xscmd/cmd_mrw.py new file mode 100644 index 00000000000..114cdb65d79 --- /dev/null +++ b/scripts/xspdb/xscmd/cmd_mrw.py @@ -0,0 +1,354 @@ +#*************************************************************************************** +# Copyright (c) 2025 Beijing Institute of Open Source Chip (BOSC) +# Copyright (c) 2025 Institute of Computing Technology, Chinese Academy of Sciences +# +# XiangShan is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# +# See the Mulan PSL v2 for more details. +#*************************************************************************************** + + +from . import error, info, message, warn +import struct + +class CmdMRW: + """Command class for MRW (Memory Read/Write) operations.""" + + + def api_write_bytes_with_rw(self, address, bytes, dword_read, dword_write): + """Write memory data + + Args: + address (int): Target memory address + bytes (bytes): Data to write + dword_read (function): Function to read uint64 + dword_write (function): Function to write uint64 + """ + if len(bytes) < 1: + error("write data length < 1") + return False + if not self.mem_inited: + error("mem not inited, please load a bin file") + return False + start_offset = address % 8 + head = dword_read(address - start_offset).to_bytes(8, + byteorder='little', signed=False)[:start_offset] + end_address = address + len(bytes) + end_offset = end_address % 8 + tail = dword_read(end_address - end_offset).to_bytes(8, + byteorder='little', signed=False)[end_offset:] + data_to_write = head + bytes + tail + assert len(data_to_write)%8 == 0 + base_address = address - start_offset + for i in range(len(data_to_write)//8): + dword_write(base_address + i*8, int.from_bytes(data_to_write[i*8:i*8+8], + byteorder='little', signed=False)) + info(f"write {len(data_to_write)} bytes to address: 0x{base_address:x} ({len(bytes)} bytes)") + return True + + def api_write_bytes(self, address, bytes): + """Write memory data + + Args: + address (int): Target memory address + bytes (bytes): Data to write + """ + if self.api_is_flash_address(address): + real_address = address - self.flash_base + if real_address < 0: + warn(f"write address {hex(address)} is not in Flash range, less than {hex(self.flash_base)} ignored") + return False + if real_address > 0x7FFFFFFF: + warn(f"write address {hex(address)} is not in Flash range, bigger than {hex(self.flash_base+ 0x7FFFFFFF)} (max uint32 0x7FFFFFFF) ignored") + return False + ret = self.api_write_bytes_with_rw(real_address, + bytes, self.df.FlashRead, self.df.FlashWrite) + else: + ret = self.api_write_bytes_with_rw(address, + bytes, self.df.pmem_read, self.df.pmem_write) + if ret: + # Delete asm data in cache + pos_str = address - address % self.info_cache_bsz + pos_end = address + len(bytes) + pos_end = (pos_end - pos_end % self.info_cache_bsz) + self.info_cache_bsz + for cache_index in range(pos_str, pos_end, self.info_cache_bsz): + if cache_index in self.info_cache_asm: + del self.info_cache_asm[cache_index] + return ret + + def do_xmem_write(self, arg): + """Write memory data + + Args: + arg (bytes/number): Memory address and data + """ + if not arg: + message("usage: xmem_write
") + return + args = arg.strip().split() + if len(args) < 2: + message("usage: xmem_write
") + return + try: + address = int(args[0], 0) + if arg[1].startswith("b"): + data = eval(args[1]) + else: + byte_count = max(1, len(args[1].replace("0x",""))//2) + data = int(args[1], 0).to_bytes(byte_count, byteorder='little', signed=False) + if not isinstance(data, bytes): + error("data must be bytes, eg b'\\x00\\x01...' or hex number") + return + self.api_write_bytes(address, data) + except Exception as e: + error(f"convert {args[0]} or {args[1]} to number/bytes fail: {str(e)}") + + def api_read_bytes_with_func(self, address, size, read_func): + """Read memory data + + Args: + address (int): Memory address + size (int): Size of data to read + read_func (callable): raw read funciton + Return: + bytes + """ + read_data = bytearray() + read_count = size//8 + 1 + start_address = address - address % 8 + start_offset = address - start_address + for index in range(read_count): + padd = start_address + 8*index + read_data += read_func(padd).to_bytes(8, byteorder='little', signed=False) + return read_data[start_offset: start_offset + size] + + def api_read_bytes_from(self, address, size): + """Read memory data + + Args: + address (int): Memory address + size (int): Size of data to read + Return: + bytes + """ + if not self.mem_inited: + error(f"memory is not inited") + return None + end_address = address + size + if ((self.api_is_flash_address(address) and not self.api_is_flash_address(end_address))) or \ + (not self.api_is_flash_address(address) and self.api_is_flash_address(end_address)): + error(f"read address {hex(address)} and {hex(end_address)} not in same range (overlaped with flash and mem)") + return None + if self.api_is_flash_address(address): + def _flash_read(addr): + return self.df.FlashRead(max(0, addr - self.flash_base)) + return self.api_read_bytes_with_func(address, size, _flash_read) + else: + return self.api_read_bytes_with_func(address, size, self.df.pmem_read) + + def do_xmem_copy(self, arg): + """copy memory data from one address to another + + Args: + source (int): Source address + target (int): Target address + size (int): Size of data to copy + """ + if not arg: + message("usage: xmem_copy ") + return + args = arg.strip().split() + if len(args) < 3: + message("usage: xmem_copy ") + return + try: + source = int(args[0], 0) + target = int(args[1], 0) + size = int(args[2], 0) + if size <= 0: + error("size must be > 0") + return + data = self.api_read_bytes_from(source, size) + if data is None: + error(f"read {size} bytes from address {hex(source)} fail") + return + self.api_write_bytes(target, data) + except Exception as e: + error(f"convert {args[0]} or {args[1]} or {args[2]} to number fail: {str(e)}") + + def do_xmem_copy_range_to(self, arg): + """copy memory data from one address to another + Args: + source_start (int): Source address start + source_emd (int): Source address end + target (int): Target address + """ + if not arg: + message("usage: xmem_copy_range_to ") + return + args = arg.strip().split() + if len(args) < 3: + message("usage: xmem_copy_range_to ") + return + try: + source_start = int(args[0], 0) + source_end = int(args[1], 0) + target = int(args[2], 0) + size = source_end - source_start + if size <= 0: + error("size must be > 0") + return + data = self.api_read_bytes_from(source_start, size) + if data is None: + error(f"read {size} bytes from address {hex(source_start)} fail") + return + self.api_write_bytes(target, data) + except Exception as e: + error(f"convert {args[0]} or {args[1]} or {args[2]} to number fail: {str(e)}") + + def do_xmem_read(self, arg): + """copy memory data from one address to another + + Args: + source (int): Source address + target (int): Target address + size (int): Size of data to copy + """ + if not arg: + error("usage: xmem_read ") + return + args = arg.strip().split() + if len(args) < 2: + error("usage: xmem_read ") + return + try: + addr = int(args[0], 0) + size = int(args[1], 0) + if size <= 0: + error("read size need > 0") + return + data = self.api_read_bytes_from(addr, size) + if data is None: + error(f"read None from {hex(addr), hex(addr + size)}") + return + message("data bytes(%d): %s"%(len(data), data)) + except Exception as e: + error(f"convert {args[0]} or {args[1]} to number fail: {str(e)}") + + def do_xmem_read_range(self, arg): + """Read memory data from one address to another + + Args: + source_start (int): Source address start + source_end (int): Source address end + """ + if not arg: + message("usage: xmem_read_range ") + return + args = arg.strip().split() + if len(args) < 2: + message("usage: xmem_read_range ") + return + try: + source_start = int(args[0], 0) + source_end = int(args[1], 0) + size = source_end - source_start + if size <= 0: + error("size must be > 0") + return + data = self.api_read_bytes_from(source_start, size) + if data is None: + error(f"read {size} bytes from address {hex(source_start)} fail") + return + message("data bytes(%d): %s"%(len(data), data)) + except Exception as e: + error(f"convert {args[0]} or {args[1]} to number fail: {str(e)}") + + def api_get_call_stack(self, sp, pc, max_depth=10): + """[FIXME: this implementation is incorrect] Get call stack from address + + Args: + sp (int): Stack pointer address + pc (int): Program counter address + max_depth (int): Maximum depth of call stack + Returns: + list: List of call stack addresses + """ + callstack = [(-1, pc, sp, self.api_address_to_symbol(pc))] + if not self.mem_inited: + return None + for depth in range(max_depth): + try: + sp_bytes = self.api_read_bytes_from(sp, 8) + ra_bytes = self.api_read_bytes_from(sp + 8, 8) + if (sp_bytes is None or ra_bytes is None) or (len(sp_bytes) != 8 or len(ra_bytes) != 8): + error(" [!] Failed to read memory at 0x{:x} or 0x{:x}".format(sp, sp + 8)) + break + next_sp = struct.unpack(" 0: + pc = args[0] + if len(args) > 1: + sp = args[1] + try: + if sp is None: + sp = self.xsp.GetFromU64Array(self.difftest_stat.regs_int.value, self.iregs_mapk["sp"]) + else: + sp = int(sp, 0) + if pc is None: + pc = self.api_info_get_last_commit_pc() + else: + pc = int(pc, 0) + callstack = self.api_get_call_stack(sp, pc) + if not callstack: + error("get call stack fail") + return + _, pc, sp, name = callstack[0] + message(f"Backtrace from (pc: {hex(pc)}, sp: {hex(sp)}) location: {name}") + message("Call Stack:") + for depth, ra, sp, name in callstack[1:]: + message(f"> depth {depth}: ra: {hex(ra)}, sp: {hex(sp)}, at: {name}") + except Exception as e: + error(f"convert args{arg} to pc or sp number fail: {str(e)}") From 51caa6d017e16a0cc31ed2e015bf1e539405e349 Mon Sep 17 00:00:00 2001 From: Song Fangyuan <130735683+SFangYy@users.noreply.github.com> Date: Wed, 26 Nov 2025 15:52:22 +0800 Subject: [PATCH 09/14] feat(pdb): add instruction and utility commands for xspdb (#5236) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Description This PR add instruction debugging commands for XSPdb. Users can decode/encode RISC‑V instructions, parse and load instruction files, and conveniently convert between bytes and integers. ## Features ### Core APIs - api_dasm_from_bytes(bytes, pc): Disassemble bytes to assembly text (used internally for display). - api_write_bytes(addr, data: bytes): Write a bytes buffer into DUT memory. - api_complite_localfile(text): Provide file path completion for CLI. - api_convert_uint64_bytes(file_name): Parse a file containing space-separated hex u64 strings into a bytes buffer. ### Debugging Commands - xdecode_instr : Decode a single instruction (int or bytes). Automatically detects compressed vs. 32-bit based on data. - xencode_instr {"instr"}: Encode instruction fields into machine code; prints asm/hex/bytes. - xparse_instr_file : Parse a file of uint64 strings to a printable hex-bytes sequence (little-endian). - xload_instr_file
: Parse a uint64-string file and write resulting bytes to DUT memory at
. Clears cached disassembly. - xnop_insert : Fill [start, end) range with RVC NOPs (0x0001). Performs range and alignment checks. ## Usage Examples ```bash $ make pdb-run (XiangShan) xload_difftest_ref_so /path/to/riscv64-nemu-interpreter-so (XiangShan) xdifftest_turn_on_with_ref /path/to/riscv64-nemu-interpreter-so (XiangShan) xistep 10 (XiangShan) xwatch_commit_pc 0x80000000 (XiangShan) xdecode_instr 0x00000013 (XiangShan) xdecode_instr b'\x13\x00\x00\x00' (XiangShan) xload_instr_file 0x80000000 instrs.txt Co-authored-by: Zhicheng Yao --- scripts/xspdb/xscmd/cmd_instr.py | 410 +++++++++++++++++++++++++++++++ scripts/xspdb/xscmd/cmd_tools.py | 76 ++++++ 2 files changed, 486 insertions(+) create mode 100644 scripts/xspdb/xscmd/cmd_instr.py create mode 100644 scripts/xspdb/xscmd/cmd_tools.py diff --git a/scripts/xspdb/xscmd/cmd_instr.py b/scripts/xspdb/xscmd/cmd_instr.py new file mode 100644 index 00000000000..f2fefb209df --- /dev/null +++ b/scripts/xspdb/xscmd/cmd_instr.py @@ -0,0 +1,410 @@ +#*************************************************************************************** +# Copyright (c) 2025 Beijing Institute of Open Source Chip (BOSC) +# Copyright (c) 2025 Institute of Computing Technology, Chinese Academy of Sciences +# +# XiangShan is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# +# See the Mulan PSL v2 for more details. +#*************************************************************************************** + +from . import error, message +import ast +import os + +class CmdInstr: + """Instruction command class + """ + + def api_decode_instr16(self, instr): + """Decode a RISC-V compressed (16-bit) instruction. + + Args: + instr: 16-bit integer/bytes representing the compressed instruction + + Returns: + Dictionary containing decoded fields: + { + 'type': str, # Instruction type (CR/CI/CIW/CL/CS/CA/CB/CJ) + 'opcode': int, # Primary opcode (2 bits) + 'funct3': int, # 3-bit function code + 'rd': int, # Destination register (normal or compressed) + 'rs1': int, # Source register 1 (normal or compressed) + 'rs2': int, # Source register 2 (normal or compressed) + 'imm': int, # Immediate value (signed) + 'is_compressed': True + 'asm': str, # Assembly string + } + """ + if isinstance(instr, bytes): + instr = int.from_bytes(instr, byteorder='little', signed=False) + # Convert to 16-bit unsigned + instr = instr & 0xffff + fields = { + 'is_compressed': True, + 'type': 'Unknown', + 'opcode': (instr >> 13) & 0x3, # Primary opcode (bits 15-13) + 'funct3': (instr >> 13) & 0x7, # For some instruction types + 'rd': 0, + 'rs1': 0, + 'rs2': 0, + 'imm': 0 + } + # Helper to expand compressed register numbers + def expand_reg(compressed_reg): + return 8 + (compressed_reg & 0x7) + # Main decoding logic + op = fields['opcode'] + if op == 0x0: # CIW format (Quadrant 0) + fields['type'] = 'CIW' + fields['rd'] = expand_reg((instr >> 2) & 0x7) # rd' + fields['imm'] = ((instr >> 5) & 0x3) << 3 | (instr >> 10) << 5 + fields['imm'] = (fields['imm'] & 0x3f) << 2 # Zero-extended + elif op == 0x1: # CI/CL format + funct3 = (instr >> 13) & 0x7 + if funct3 in [0, 2, 6]: # CI format + fields['type'] = 'CI' + fields['rd'] = expand_reg((instr >> 7) & 0x7) + imm = ((instr >> 12) & 0x1) << 5 | (instr >> 2) & 0x1f + if funct3 == 0: # C.ADDI + fields['imm'] = (imm << 26) >> 26 # Sign extend 6-bit + else: # C.LI etc. + fields['imm'] = imm + elif funct3 in [1, 3, 5, 7]: # CL format + fields['type'] = 'CL' + fields['rd'] = expand_reg((instr >> 7) & 0x7) + fields['rs1'] = expand_reg((instr >> 10) & 0x7) + imm = ((instr >> 5) & 0x3) << 6 | (instr >> 10) & 0x7 + imm = (imm << 25) >> 25 # Sign extend + elif op == 0x2: # CR/CS/CB format + funct4 = (instr >> 12) & 0xf + if funct4 == 0x8: # CR format + fields['type'] = 'CR' + fields['rd'] = (instr >> 7) & 0x1f + fields['rs1'] = (instr >> 7) & 0x1f + fields['rs2'] = (instr >> 2) & 0x1f + elif funct4 in [0x9, 0xa, 0xb]: # CS format + fields['type'] = 'CS' + fields['rs1'] = expand_reg((instr >> 10) & 0x7) + fields['rs2'] = expand_reg((instr >> 7) & 0x7) + imm = (instr >> 2) & 0x1f + fields['imm'] = imm + else: # CB format + fields['type'] = 'CB' + fields['rs1'] = expand_reg((instr >> 10) & 0x7) + imm = ((instr >> 12) & 0x1) << 8 | (instr >> 2) & 0x7 | (instr >> 7) & 0x18 + imm = (imm << 23) >> 23 # Sign extend 9-bit + fields['imm'] = imm + elif op == 0x3: # CJ format + fields['type'] = 'CJ' + imm = ((instr >> 12) & 0x1) << 11 | (instr >> 1) & 0x7ff + imm = (imm << 19) >> 19 # Sign extend 12-bit + fields['imm'] = imm + try: + fields['asm'] = self.api_dasm_from_bytes(instr.to_bytes(2, byteorder="little", signed=False), 0)[0][2] + except Exception as e: + fields['asm'] = f"unknown" + return fields + + def api_encode_instr16(self, fields): + """Encode compressed instruction fields back to 16-bit machine code. + + Args: + fields: Dictionary containing decoded fields + + Returns: + 16-bit integer representing the compressed instruction + """ + instr = 0 + typ = fields['type'] + # Common field handling + def compress_reg(reg): + return (reg - 8) & 0x7 if reg >= 8 else reg + if typ == 'CIW': + instr |= (0x0 << 13) + instr |= (compress_reg(fields['rd']) & 0x7) << 2 + imm = (fields['imm'] >> 2) & 0x3f + instr |= (imm & 0x3) << 5 | (imm >> 3) << 10 + elif typ == 'CI': + instr |= (0x1 << 13) + instr |= (fields['funct3'] & 0x7) << 13 + instr |= (compress_reg(fields['rd']) & 0x7) << 7 + imm = fields['imm'] & 0x3f + instr |= (imm & 0x1f) << 2 | (imm >> 5) << 12 + elif typ == 'CL': + instr |= (0x1 << 13) + instr |= (fields['funct3'] & 0x7) << 13 + instr |= (compress_reg(fields['rs1']) & 0x7) << 10 + instr |= (compress_reg(fields['rd']) & 0x7) << 7 + imm = fields['imm'] & 0x7f + instr |= (imm & 0x3) << 5 | (imm >> 2) << 10 + elif typ == 'CR': + instr |= (0x2 << 13) + instr |= 0x8 << 12 + instr |= (fields['rd'] & 0x1f) << 7 + instr |= (fields['rs2'] & 0x1f) << 2 + elif typ == 'CB': + instr |= (0x2 << 13) + imm = fields['imm'] & 0x1ff + instr |= (imm & 0x7) << 2 | (imm >> 3) << 7 | (imm >> 8) << 12 + instr |= (compress_reg(fields['rs1']) & 0x7) << 10 + elif typ == 'CJ': + instr |= (0x3 << 13) + imm = fields['imm'] & 0xfff + instr |= (imm & 0x7ff) << 1 | (imm >> 11) << 12 + return instr & 0xffff + + def api_decode_instr32(self, instr): + """Decode a RISC-V instruction into its components. + + Args: + instr: 32-bit integer/bytes representing the instruction + + Returns: + Dictionary containing decoded instruction fields: + { + 'type': str, # Instruction type (R/I/S/B/U/J) + 'opcode': int, # 7-bit opcode + 'rd': int, # Destination register (5 bits) + 'rs1': int, # Source register 1 (5 bits) + 'rs2': int, # Source register 2 (5 bits) + 'funct3': int, # 3-bit function code + 'funct7': int, # 7-bit function code + 'imm': int # Immediate value (signed) + 'asm': str # Assembly representation of the instruction + } + """ + if isinstance(instr, bytes): + instr = int.from_bytes(instr, byteorder='little', signed=False) + assert isinstance(instr, int), "instr must be a 32-bit integer or bytes" + # Extract common fields + opcode = instr & 0x7f + rd = (instr >> 7) & 0x1f + funct3 = (instr >> 12) & 0x7 + rs1 = (instr >> 15) & 0x1f + rs2 = (instr >> 20) & 0x1f + funct7 = (instr >> 25) & 0x7f + + # Determine instruction type + instr_type = None + imm = 0 + + # Immediate handling for different formats + if opcode in [0x37, 0x17]: # U-type (LUI/AUIPC) + instr_type = 'U' + imm = (instr & 0xfffff000) + elif opcode == 0x6f: # J-type (JAL) + instr_type = 'J' + imm = ((instr >> 31) & 0x1) << 20 + imm |= ((instr >> 21) & 0x3ff) << 1 + imm |= ((instr >> 20) & 0x1) << 11 + imm |= ((instr >> 12) & 0xff) << 12 + imm = (imm << 11) >> 11 # Sign extend + elif opcode in [0x67, 0x03, 0x13, 0x1b]: # I-type + instr_type = 'I' + imm = (instr >> 20) & 0xfff + if imm & 0x800: # Sign extend + imm |= 0xfffff000 + elif opcode == 0x63: # B-type + instr_type = 'B' + imm = ((instr >> 31) & 0x1) << 12 + imm |= ((instr >> 25) & 0x3f) << 5 + imm |= ((instr >> 8) & 0xf) << 1 + imm |= ((instr >> 7) & 0x1) << 11 + imm = (imm << 19) >> 19 # Sign extend + elif opcode == 0x23: # S-type + instr_type = 'S' + imm = ((instr >> 25) & 0x7f) << 5 + imm |= (instr >> 7) & 0x1f + if imm & 0x800: # Sign extend + imm |= 0xfffff000 + elif opcode == 0x33: # R-type + instr_type = 'R' + else: + instr_type = 'unknown' + try: + instr_asm = self.api_dasm_from_bytes(instr.to_bytes(4, byteorder="little", signed=False), 0)[0][2] + except Exception as e: + instr_asm = f"unknown" + return { + 'type': instr_type, + 'opcode': opcode, + 'rd': rd, + 'rs1': rs1, + 'rs2': rs2, + 'funct3': funct3, + 'funct7': funct7, + 'imm': imm, + "asm": instr_asm + } + + def api_encode_instr32(self, fields): + """Encode instruction fields back into machine code. + + Args: + fields: Dictionary containing instruction fields + + Returns: + 32-bit integer representations (int, bytes, asm) of the instruction + """ + instr = 0 + opcode = fields['opcode'] + instr_type = fields['type'] + + # Common fields + instr |= (opcode & 0x7f) + instr |= (fields['rd'] & 0x1f) << 7 + instr |= (fields['funct3'] & 0x7) << 12 + instr |= (fields['rs1'] & 0x1f) << 15 + instr |= (fields['rs2'] & 0x1f) << 20 + instr |= (fields['funct7'] & 0x7f) << 25 + + # Immediate handling + imm = fields.get('imm', 0) + if instr_type == 'U': + instr |= (imm & 0xfffff000) + elif instr_type == 'J': + imm_enc = (imm & 0x100000) >> 20 + imm_enc |= (imm & 0x3ff) << 21 + imm_enc |= (imm & 0x800) >> 1 + imm_enc |= (imm & 0x7ff000) >> 12 + instr |= imm_enc << 12 + elif instr_type == 'I': + instr |= (imm & 0xfff) << 20 + elif instr_type == 'B': + imm_enc = (imm & 0x1000) << 19 + imm_enc |= (imm & 0x7e0) << 20 + imm_enc |= (imm & 0x1e) << 7 + imm_enc |= (imm & 0x800) >> 4 + instr |= imm_enc + elif instr_type == 'S': + instr |= ((imm & 0xfe0) << 20) | ((imm & 0x1f) << 7) + + return instr & 0xffffffff # Ensure 32-bit + + def do_xdecode_instr(self, arg): + """Decode a binary instruction + + Args: + arg (int or bytes): Instruction data + """ + arg = arg.strip() + if not arg: + error("xdecode_instr ") + return + try: + is_compressed = False + if not arg.startswith("b'"): + arg = int(arg, 0) + if (arg & 0x3) != 0x3: + is_compressed = True + else: + arg = ast.literal_eval(arg) + if len(arg) == 2: + is_compressed = True + value = self.api_decode_instr16(arg) if is_compressed else self.api_decode_instr32(arg) + message(str(value)) + except Exception as e: + error(f"decode {arg} fail: {str(e)}") + + def do_xencode_instr(self, arg): + """Encode a binary instruction + + Args: + arg (dict): Instruction item data + """ + arg = arg.strip() + if not arg: + error('xencode_instr {"instr": "", "imm": , "rs1": , "rs2": , "rd": }') + return + try: + arg = ast.literal_eval(arg) + assert isinstance(arg, dict), "arg must be a dict" + if arg.get("is_compressed"): + instr = self.api_encode_instr16(arg) + instr_bytes = instr.to_bytes(2, byteorder="little", signed=False) + else: + instr = self.api_encode_instr32(arg) + instr_bytes = instr.to_bytes(4, byteorder="little", signed=False) + try: + instr_asm = self.api_dasm_from_bytes(instr_bytes, 0)[0][2] + except Exception as e: + instr_asm = "unknown" + instr_btext = "\\x".join([f"{i:02x}" for i in instr_bytes]) + message(f'asm: {instr_asm} hex: 0x{instr:04x} bytes: b\'{instr_btext}\'') + except Exception as e: + error(f"encode {arg} fail: {str(e)}") + + def do_xparse_instr_file(self, arg): + """Parse uint64 strings + + Args: + arg (file): File to parse + """ + if not arg: + message("usage: xparse_instr_file ") + return + if not os.path.exists(arg): + error("file %s not found" % arg) + return + hex_str = ''.join([f'\\x{byte:02x}' for byte in self.api_convert_uint64_bytes(arg)]) + message(hex_str) + + def complete_xparse_instr_file(self, text, line, begidx, endidx): + return self.api_complite_localfile(text) + + def do_xload_instr_file(self, arg): + """Load uint64 strings into memory + + Args: + arg (file): File to load + """ + params = arg.strip().split() + if not len(params) == 2: + error("xload_instr_file
") + return + if not os.path.exists(params[1]): + error("file %s not found" % params[1]) + return + try: + address = int(params[0], 0) + self.api_write_bytes(address, self.api_convert_uint64_bytes(params[1])) + self.info_cache_asm.clear() + except Exception as e: + error(f"convert {params[0]} to number fail: {str(e)}") + + def complete_xload_instr_file(self, text, line, begidx, endidx): + return self.api_complite_localfile(text) + + def do_xnop_insert(self, arg): + """Insert NOP instructions in a specified address range + + Args: + start (int): Start address + end (int): End address + """ + if not arg: + message("usage: xnop_insert ") + return + args = arg.strip().split() + if len(args) < 2: + message("usage: xnop_insert ") + return + try: + start = int(args[0], 0) + end = int(args[1], 0) + assert start < end, "start address must less than end address" + assert start % 2 == 0, "start address must be aligned to 2" + assert end % 2 == 0, "end address must be aligned to 2" + noop_data = bytearray() + for i in range((end - start) // 2): + noop_data += b'\x01\x00' # nop + self.api_write_bytes(start, noop_data) + except Exception as e: + error(f"convert {args[0]} or {args[1]} to number fail: {str(e)}") diff --git a/scripts/xspdb/xscmd/cmd_tools.py b/scripts/xspdb/xscmd/cmd_tools.py new file mode 100644 index 00000000000..e23e9088a75 --- /dev/null +++ b/scripts/xspdb/xscmd/cmd_tools.py @@ -0,0 +1,76 @@ +#*************************************************************************************** +# Copyright (c) 2025 Beijing Institute of Open Source Chip (BOSC) +# Copyright (c) 2025 Institute of Computing Technology, Chinese Academy of Sciences +# +# XiangShan is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# +# See the Mulan PSL v2 for more details. +#*************************************************************************************** + + +from . import message, error +import ast + +class CmdTools: + + def api_convert_uint64_bytes(self, file_name): + """Parse uint64 strings + + Args: + file_name (file): File to parse + """ + ret = bytearray() + with open(file_name, "r") as f: + for l in f: + l = l.strip() + if not l: + continue + for v in l.split(): + if not v.startswith("0x"): + v = "0x" + v + ccount = len(v) - 2 + assert ccount % 2 == 0, f"invalid hex string: {v}" + ret += int(v, 0).to_bytes(ccount//2, byteorder='little', signed=False) + return ret + + def do_xbytes2number(self, arg): + """Convert bytes to an integer + + Args: + arg (string): Bytes data + """ + if not arg: + error("bytes2number ") + return + try: + data_bytes = arg.strip() + if not data_bytes.startswith("b'"): + new_data_bytes = "b'" + for i in range(0, len(data_bytes), 2): + new_data_bytes += "\\x%s" % data_bytes[i:i+2] + data_bytes = new_data_bytes + "'" + message(f'{int.from_bytes(ast.literal_eval(data_bytes), byteorder="little", signed=False):x}') + except Exception as e: + error(f"convert {arg} to bytes fail: {str(e)}") + + def do_xnumber2bytes(self, arg): + """Convert an integer to bytes + + Args: + arg (string): Integer data + """ + if not arg: + error("number2bytes ") + return + try: + data = int(arg, 0) + message(f'b"{data.to_bytes(4, byteorder="little", signed=False).hex()}"') + except Exception as e: + error(f"convert {arg} to bytes fail: {str(e)}") From 7e374f536a22856cec0f22fbea37fffb417cff6c Mon Sep 17 00:00:00 2001 From: Song Fangyuan <130735683+SFangYy@users.noreply.github.com> Date: Tue, 2 Dec 2025 18:43:10 +0800 Subject: [PATCH 10/14] feat(pdb): add ELF symbol utilities, and trap controls for xspdb (#5239) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Description This commit introduces three scripting modules under XSPdb’s xscmd: - ELF: symbol table loading and address<->symbol translation, plus PC symbol block change tracing to ease debug navigation. - Trap: “good trap” detection, trap-break toggling, and trap info printing. Minor text/robustness polishes are included without changing public behavior. ## Features ### Core APIs - api_get_elf_symbol_dict(elf_file, search_dirs=["./ready-to-run"]): Invoke readelf -sW to build symbol dictionaries: by address, by name, and sorted lists for bisection. - api_update_local_elf_symbol_dict(): Rebuild symbol tables for the current executable (self.exec_bin_file). - api_turn_on_pc_symbol_block_change(on=True): Toggle PC symbol-block change tracing and auto-load symbols as needed. - api_is_hit_good_trap(show_log=False): Query whether a “good trap” is observed; optional log. - api_is_hit_good_loop(show_log=False): Heuristic “good loop” check based on committed instruction 0x6f. - api_break_on_trap(on: bool): Toggle break-on-trap checker. - api_get_trap_info(): Return trap info dict {pc, code, hasTrap, cycleCnt, hasWFI}. ### Debugging Commands - xtrace_pc_symbol_block_change: Enable/disable symbol block change tracing. When enabled, block transitions are printed automatically during stepping. - Tab-completion for on/off provided. - xgood_trap_disable [true|false]: Disable/enable the “good trap” checker. - xtrap_break_on / xtrap_break_off: Enable/disable “break on trap” behavior. - xtrap_info: Print current trap state (pc/code/hasTrap/cycleCnt/hasWFI). - cmd_com: Helper module; provides file name/path completion for other commands. ## Usage Examples ```bash $ make pdb-run (XiangShan) xload_difftest_ref_so /path/to/riscv64-nemu-interpreter-so (XiangShan) xdifftest_turn_on_with_ref /path/to/riscv64-nemu-interpreter-so (XiangShan) xistep 10 (XiangShan) xtrace_pc_symbol_block_change on (XiangShan) xtrace_pc_symbol_block_change off (XiangShan) xgood_trap_disable true (XiangShan) xtrap_break_on (XiangShan) xtrap_info Co-authored-by: Zhicheng Yao --- scripts/xspdb/xscmd/cmd_com.py | 39 +++++++++ scripts/xspdb/xscmd/cmd_elf.py | 100 +++++++++++++++++++++++ scripts/xspdb/xscmd/cmd_trap.py | 135 +++++++++++++++++++++++++++++++- 3 files changed, 273 insertions(+), 1 deletion(-) create mode 100644 scripts/xspdb/xscmd/cmd_com.py diff --git a/scripts/xspdb/xscmd/cmd_com.py b/scripts/xspdb/xscmd/cmd_com.py new file mode 100644 index 00000000000..6942dd912a4 --- /dev/null +++ b/scripts/xspdb/xscmd/cmd_com.py @@ -0,0 +1,39 @@ +#*************************************************************************************** +# Copyright (c) 2025 Beijing Institute of Open Source Chip (BOSC) +# Copyright (c) 2025 Institute of Computing Technology, Chinese Academy of Sciences +# +# XiangShan is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# +# See the Mulan PSL v2 for more details. +#*************************************************************************************** +import os + +class CmdComm: + + def api_complite_localfile(self, text): + """Auto-complete local files + + Args: + text (string): File name + + Returns: + list(string): Completion list + """ + text = text.strip() + if not text: + # os.listdir doesn't include '.' or '..', but keep a clear filter for safety + return [f for f in os.listdir('.') if f not in ('.', '..')] + path = "" + fname = text + if "/" in text: + path, fname = text.rsplit("/", 1) + ret = [os.path.join(path, f) for f in os.listdir(path if path else ".") if f.startswith(fname)] + return [f + ("/" if os.path.isdir(f) else "") for f in ret] + diff --git a/scripts/xspdb/xscmd/cmd_elf.py b/scripts/xspdb/xscmd/cmd_elf.py index fabc29214d0..aa954bcca34 100644 --- a/scripts/xspdb/xscmd/cmd_elf.py +++ b/scripts/xspdb/xscmd/cmd_elf.py @@ -89,6 +89,41 @@ def api_get_elf_symbol_dict(self, elf_file, search_dirs=["./ready-to-run"]): error(f"Failed to read ELF file: {e.output.decode()}") return None + def api_is_efl_file(self, file_path): + """Check if the file is an ELF file + + Args: + file_path (string): Path to the file + """ + if not os.path.exists(file_path): + error(f"{file_path} not found") + return False + with open(file_path, "rb") as f: + header = f.read(4) + if header == b"\x7fELF": + return True + else: + error(f"{file_path} is not an ELF file") + return False + return True + + def api_update_local_elf_symbol_dict(self): + """Update the symbol dictionary from loaded ELF file""" + if not self.exec_bin_file: + error("exec_bin_file not loaded, please xload it first") + return False + if not os.path.exists(self.exec_bin_file): + error(f"{self.exec_bin_file} not found") + return False + self.elf_current_exe_bin_is_efl = self.api_is_efl_file(self.exec_bin_file) + if not self.elf_current_exe_bin_is_efl: + error(f"{self.exec_bin_file} is not an ELF file") + return False + self.elf_symbol_dict = self.api_get_elf_symbol_dict(self.exec_bin_file) + count = len(self.elf_symbol_dict.get("addr", {})) + info(f"Loaded {count} symbols from {self.exec_bin_file}") + return True + def api_echo_pc_symbol_block_change(self, current_pc, last_block_addr, last_pc): block_addr = last_block_addr if current_pc < 0: @@ -123,6 +158,30 @@ def api_echo_pc_symbol_block_change(self, current_pc, last_block_addr, last_pc): f"-> {symbol_name}({hex(symbol_addr)})+{hex(delta_curr)}") return symbol_addr + def api_turn_on_pc_symbol_block_change(self, value = True): + """Enable or disable tracing PC symbol block change + Args: + value (bool): True to enable, False to disable + """ + self.flag_trace_pc_symbol_block_change = value + if value: + self.api_update_local_elf_symbol_dict() + + def do_xtrace_pc_symbol_block_change(self, arg): + """Enable or disable tracing PC symbol block change + + Args: + arg (string): "on" to enable, "off" to disable + """ + if arg == "on": + self.api_turn_on_pc_symbol_block_change(True) + message("PC symbol block change tracing enabled") + elif arg == "off": + self.api_turn_on_pc_symbol_block_change(False) + message("PC symbol block change tracing disabled") + else: + message("Usage: xtrace_pc_symbol_block_change [on|off]") + def complete_xtrace_pc_symbol_block_change(self, text, line, begidx, endidx): """Complete the command for tracing PC symbol block change @@ -133,3 +192,44 @@ def complete_xtrace_pc_symbol_block_change(self, text, line, begidx, endidx): endidx (int): Ending index """ return [x for x in ["on", "off"] if x.startswith(text)] if text else ["on", "off"] + + def api_address_to_symbol(self, addr): + """Convert address to symbol name + + Args: + addr (int): Address to convert + """ + if self.elf_current_exe_bin_is_efl is False: + return None + if self.elf_symbol_dict is None: + self.api_update_local_elf_symbol_dict() + if self.elf_symbol_dict is None: + return None + symbol_index = bisect.bisect_left(self.elf_symbol_dict["sorted_addr"], addr) - 1 + if symbol_index < 0: + return None + if symbol_index >= len(self.elf_symbol_dict["sorted_addr"]): + return None + symbol_addr = self.elf_symbol_dict["sorted_addr"][symbol_index] + symbol = self.elf_symbol_dict.get("addr", {}).get(symbol_addr) + if symbol: + return f"({','.join([s['name'] for s in symbol])}: {hex(symbol_addr)}) + {hex(addr - symbol_addr)}" + return None + + def api_symbol_to_address(self, symbol): + """Convert symbol name to address + + Args: + symbol (string): Symbol name to convert + """ + if self.elf_current_exe_bin_is_efl is False: + return None + if self.elf_symbol_dict is None: + self.api_update_local_elf_symbol_dict() + if self.elf_symbol_dict is None: + return None + addr = self.elf_symbol_dict.get("name", {}).get(symbol) + if addr: + return addr["addr"] + return None + diff --git a/scripts/xspdb/xscmd/cmd_trap.py b/scripts/xspdb/xscmd/cmd_trap.py index 176a3a5484e..47bdba26c13 100644 --- a/scripts/xspdb/xscmd/cmd_trap.py +++ b/scripts/xspdb/xscmd/cmd_trap.py @@ -22,10 +22,47 @@ class CmdTrap: """ def __init__(self): - assert hasattr(self, "dut"), "this class must be used in XSPdb, canot be used alone" + assert hasattr(self, "dut"), "this class must be used in XSPdb, cannot be used alone" self.condition_good_trap = {} self.break_on_trap = {} + def api_init_good_trap(self): + """Initialize the good trap""" + checker = self.condition_good_trap.get("checker") + if checker: + return + if hasattr(self.difftest_stat.trap, "get_code_address"): + checker = self.xsp.ComUseCondCheck(self.dut.xclock) + target_trap_vali = self.xsp.ComUseDataArray(1) + target_trap_code = self.xsp.ComUseDataArray(8) + target_trap_vali.FromBytes(int(0).to_bytes(1, byteorder='little', signed=False)) + target_trap_code.FromBytes(int(0).to_bytes(8, byteorder='little', signed=False)) #FIXME: is the good trap code zero ? + source_trap_code = self.xsp.ComUseDataArray(self.difftest_stat.trap.get_code_address(), 8) + source_trap_vali = self.xsp.ComUseDataArray(self.difftest_stat.trap.get_hasTrap_address(), 1) + checker.SetCondition("good_trap", source_trap_code.BaseAddr(), target_trap_code.BaseAddr(), self.xsp.ComUseCondCmp_EQ, 8, + source_trap_vali.BaseAddr(), target_trap_vali.BaseAddr(), 1) + checker.SetValidCmpMode("good_trap", self.xsp.ComUseCondCmp_NE) + else: + warn("trap.get_code_address not found, please build the latest difftest-python") + return + trap_key = "good_trap" + self.dut.xclock.RemoveStepRisCbByDesc(trap_key) + self.dut.xclock.StepRis(checker.GetCb(), checker.CSelf(), trap_key) + self.condition_good_trap["checker"] = checker + + def api_disable_good_trap(self, disable): + """disable good trap + Args: + disable (bool): Whether to disable good trap + """ + if disable: + checker = self.condition_good_trap.get("checker") + if checker: + self.dut.xclock.RemoveStepRisCbByDesc("good_trap") + self.condition_good_trap.clear() + else: + self.api_init_good_trap() + def api_is_hit_good_trap(self, show_log=False): """Check if the good trap is hit @@ -56,6 +93,36 @@ def api_is_hit_good_loop(self, show_log=False): return True return False + def api_break_on_trap(self, on): + """Set breakpoint on trap + + Args: + on (bool): Whether to set breakpoint on trap + """ + check = self.break_on_trap.get("checker") + if not check: + check = self.xsp.ComUseCondCheck(self.dut.xclock) + target_trap_vali = self.xsp.ComUseDataArray(1) + target_trap_vali.SetZero() + source_trap_vali = self.xsp.ComUseDataArray(self.difftest_stat.trap.get_hasTrap_address(), 1) + check.SetCondition("break_on_trap", source_trap_vali.BaseAddr(), target_trap_vali.BaseAddr(), self.xsp.ComUseCondCmp_EQ, 1) + self.break_on_trap["checker"] = check + trap_key = "break_on_trap" + self.break_on_trap["on"] = on + if on: + self.dut.xclock.RemoveStepRisCbByDesc(trap_key) + self.dut.xclock.StepRis(check.GetCb(), check.CSelf(), trap_key) + else: + self.dut.xclock.RemoveStepRisCbByDesc(trap_key) + + def api_is_trap_break_on(self): + """Check if the trap is break on + + Returns: + bool: Whether the trap is break on + """ + return self.break_on_trap.get("on", False) + def api_is_hit_trap_break(self, show_log=False): """Check if the trap is break @@ -70,3 +137,69 @@ def api_is_hit_trap_break(self, show_log=False): message(f"{GREEN}HIT TRAP BREAK pc: 0x{trap.pc:x} code: 0x{trap.code:x} hasWFI: {trap.hasWFI}{RESET}") return True return False + + def api_get_trap_info(self): + """Get trap information + + Returns: + dict: Trap information + """ + trap = self.difftest_stat.trap + return { + "pc": trap.pc, + "code": trap.code, + "hasTrap": trap.hasTrap, + "cycleCnt": trap.cycleCnt, + "hasWFI": trap.hasWFI + } + + def do_xgood_trap_disable(self, arg): + """Disable good trap + + Args: + arg (bool): Whether to disable good trap + """ + disable = True + if arg.strip(): + if arg.lower() == "false": + disable = False + elif arg.lower() == "true": + disable = True + else: + warn(f"arg {arg} is not true or false\n usage: xgood_trap_disable [true|false]") + return + self.api_disable_good_trap(disable) + if disable: + info("good trap is disabled") + else: + info("good trap is enabled") + + def complete_xgood_trap_disable(self, text, line, begidx, endidx): + return [x for x in ["true", "false"] if x.startswith(text)] if text else ["true", "false"] + + def do_xtrap_break_on(self, arg): + """Set breakpoint on trap + + Args: + arg (None): No arguments + """ + self.api_break_on_trap(True) + info("trap break on") + + def do_xtrap_break_off(self, arg): + """Unset breakpoint on trap + Args: + arg (None): No arguments + """ + self.api_break_on_trap(False) + info("trap break off") + + def do_xtrap_info(self, arg): + """Print trap information + + Args: + arg (None): No arguments + """ + info = self.api_get_trap_info() + message(f"trap pc: 0x{info['pc']:x} code: 0x{info['code']:x} hasTrap: {info['hasTrap']} cycle: 0x{info['cycleCnt']:x} hasWFI: {info['hasWFI']}") + From 8b252a0d77b6898d37ba31e544effc1418ae39d5 Mon Sep 17 00:00:00 2001 From: SFangYy Date: Thu, 27 Nov 2025 17:20:54 +0800 Subject: [PATCH 11/14] feat(pdb): add register management commands and register-file load/parse Co-authored-by: Zhicheng Yao --- scripts/xspdb/xscmd/cmd_regs.py | 180 ++++++++++++++++++++++++++++++++ 1 file changed, 180 insertions(+) create mode 100644 scripts/xspdb/xscmd/cmd_regs.py diff --git a/scripts/xspdb/xscmd/cmd_regs.py b/scripts/xspdb/xscmd/cmd_regs.py new file mode 100644 index 00000000000..3c765f24f19 --- /dev/null +++ b/scripts/xspdb/xscmd/cmd_regs.py @@ -0,0 +1,180 @@ +#*************************************************************************************** +# Copyright (c) 2025 Beijing Institute of Open Source Chip (BOSC) +# Copyright (c) 2025 Institute of Computing Technology, Chinese Academy of Sciences +# +# XiangShan is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# +# See the Mulan PSL v2 for more details. +#*************************************************************************************** + + +import os +from . import message, error + +class CmdRegs: + """Register operations""" + + def __init__(self): + assert hasattr(self, "dut"), "this class must be used in XSPdb, canot be used alone" + self.fregs = ["ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", + "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", + "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7", + "fs8", "fs9", "fs10", "fs11","ft8", "ft9", "ft10", "ft11"] + self.iregs = ["zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", + "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", + "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", + "s8", "s9", "s10","s11", "t3", "t4", "t5", "t6"] + self.iregs_mapk = {k: i for i, k in enumerate(self.iregs)} + self.iregs_mapv = {i: k for i, k in enumerate(self.iregs)} + self.fregs_mapk = {k: i for i, k in enumerate(self.fregs)} + self.fregs_mapv = {i: k for i, k in enumerate(self.fregs)} + self.mpc_iregs = self.iregs.copy() + self.mpc_iregs[0] = "mpc" + + def do_xlist_freg_map(self, arg): + """List floating-point register mappings + + Args: + arg (None): No arguments + """ + for i, r in enumerate(self.fregs): + message(f"x{i}: {r}", end=" ") + message("") + + def do_xlist_flash_fregs(self, arg): + """List Flash floating-point registers + + Args: + arg (None): No arguments + """ + for r in self.api_get_flash_init_fregs(): + message(f"{r[0]}: {hex(r[1])}", end=" ") + message("") + + def do_xlist_flash_iregs(self, arg): + """List Flash internal registers + + Args: + arg (None): No arguments + """ + for r in self.api_get_flash_init_iregs(): + message(f"{r[0]}: {hex(r[1])}", end=" ") + message("") + + def do_xset_fregs(self, arg): + """Set Flash floating-point registers (general) + + Args: + arg (string): Register values + """ + if not arg: + message("usage: xset_fregs , format: {\"reg_name\": value} or [value1, value2, ...]") + return + try: + self.api_set_flash_float_regs(eval(arg)) + except Exception as e: + error(f"set_fregs fail: {str(e)}") + + def do_xset_ireg(self, arg): + """Set a single Flash internal register (Integer) + + Args: + arg (string): Register name and value + """ + if not arg: + message("usage: xset_ireg ") + return + args = arg.strip().split() + if len(args) < 2: + message("usage: xset_ireg ") + return + try: + self.api_set_flash_int_regs({args[0]: int(args[1], 0)}) + except Exception as e: + error(f"set_ireg fail: {str(e)}") + + def do_xset_iregs(self, arg): + """Set Flash internal registers (Integer) + + Args: + arg (string): Register values + """ + if not arg: + message("usage: xset_iregs , format: {\"reg_name\": value} or [value1, value2, ...]") + return + try: + self.api_set_flash_int_regs(eval(arg)) + except Exception as e: + error(f"set_iregs fail: {str(e)}") + + def do_xset_freg(self, arg): + """Set a Flash floating-point register + + Args: + arg (string): Register name and value + """ + if not arg: + message("usage: xset_freg ") + return + args = arg.strip().split() + if len(args) < 2: + message("usage: xset_freg ") + return + try: + self.api_set_flash_float_regs({args[0]: int(args[1], 0)}) + except Exception as e: + error(f"set_freg fail: {str(e)}") + + def complete_xset_ireg(self, text, line, begidx, endidx): + return [k for k in ["mpc", "ra", "sp", "gp", "tp", "t0", "t1", "t2", + "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", + "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", + "s8", "s9", "s10","s11", "t3", "t4", "t5", "t6"] if k.startswith(text)] + + def complete_xset_freg(self, text, line, begidx, endidx): + return [k for k in self.fregs if k.startswith(text)] + + def do_xload_reg_file(self, arg): + """Load a register file + + Args: + arg (file): Register file + """ + if not arg: + error("load_reg_file ") + return + if not os.path.exists(arg): + error("file %s not found" % arg) + return + iregs, fregs = self.api_convert_reg_file(arg) + self.api_set_flash_int_regs(iregs) + self.api_set_flash_float_regs(fregs) + + def complete_xload_reg_file(self, text, line, begidx, endidx): + return self.api_complite_localfile(text) + + def do_xparse_reg_file(self, arg): + """Parse a register file + + Args: + arg (file): Register file + """ + if not arg: + error("parse_reg_file ") + return + if not os.path.exists(arg): + error("file %s not found" % arg) + return + iregs, fregs = self.api_convert_reg_file(arg) + message("iregs:\n", str(iregs)) + message("fregs:\n", str(fregs)) + + def complete_xparse_reg_file(self, text, line, begidx, endidx): + return self.api_complite_localfile(text) From 22cf02f884a901f334ad1594c0abc5d901c4b2c1 Mon Sep 17 00:00:00 2001 From: SFangYy Date: Thu, 27 Nov 2025 17:34:21 +0800 Subject: [PATCH 12/14] feat(pdb): add asm and dasm command for xspdb Co-authored-by: Zhicheng Yao --- scripts/xspdb/xscmd/__init__.py | 2 +- scripts/xspdb/xscmd/cmd_asm.py | 188 +++++++++++++++++++++++++ scripts/xspdb/xscmd/cmd_dasm.py | 240 ++++++++++++++++++++++++++++++++ 3 files changed, 429 insertions(+), 1 deletion(-) create mode 100644 scripts/xspdb/xscmd/cmd_asm.py create mode 100644 scripts/xspdb/xscmd/cmd_dasm.py diff --git a/scripts/xspdb/xscmd/__init__.py b/scripts/xspdb/xscmd/__init__.py index 14088575879..cce874b8794 100644 --- a/scripts/xspdb/xscmd/__init__.py +++ b/scripts/xspdb/xscmd/__init__.py @@ -16,4 +16,4 @@ from xspdb.xscmd.util import message, error, warn, info, GREEN, RESET, YELLOW -from xspdb.xscmd.util import get_completions, find_executable_in_dirs +from xspdb.xscmd.util import get_completions, find_executable_in_dirs, dasm_bytes diff --git a/scripts/xspdb/xscmd/cmd_asm.py b/scripts/xspdb/xscmd/cmd_asm.py new file mode 100644 index 00000000000..dbdf6e3d5ef --- /dev/null +++ b/scripts/xspdb/xscmd/cmd_asm.py @@ -0,0 +1,188 @@ +#*************************************************************************************** +# Copyright (c) 2025 Beijing Institute of Open Source Chip (BOSC) +# Copyright (c) 2025 Institute of Computing Technology, Chinese Academy of Sciences +# +# XiangShan is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# +# See the Mulan PSL v2 for more details. +#*************************************************************************************** + + +import os +import subprocess +import tempfile +import fnmatch +from . import info, error, message, warn, find_executable_in_dirs, YELLOW, RESET + + +class CmdASM: + """Assembly command class for disassembling data""" + + def api_asm_str(self, asm_str, entry_address=0x80000000, debug=True, target_secs=[], search_dirs=["./ready-to-run"]): + """Assemble RISC-V assembly code and return a dict mapping section start addresses to bytes (little-endian). + Uses riscv64-unknown-elf-gcc and objcopy. + + Args: + asm (str): RISC-V assembly code (can contain multiple .section/.text/.data). + entry_addr (int): Entry address for the first section (default 0x80000000). + debug (bool): Whether to enable debug info (default False). + + Returns: + Dict[int, bytes]: {address: bytes} for each section. + """ + cmd_gcc = "" + cmd_objdump = "" + cmd_objcopy = "" + # Check if gcc and objdump are available + cmd_prefix = ["riscv64-unknown-elf-", "riscv64-linux-gnu-"] + for prefix in cmd_prefix: + if not cmd_gcc: + cmd_gcc = find_executable_in_dirs(prefix+"gcc", search_dirs = search_dirs) + if not cmd_objdump: + cmd_objdump = find_executable_in_dirs(prefix+"objdump", search_dirs = search_dirs) + if not cmd_objcopy: + cmd_objcopy = find_executable_in_dirs(prefix+"objcopy", search_dirs = search_dirs) + if not cmd_gcc: + error(f"gcc with prefix[{'or'.join(cmd_prefix)}] not found, please install it") + return None + if not cmd_objdump: + error(f"objdump with prefix[{'or'.join(cmd_prefix)}] not found, please install it") + return None + if not cmd_objcopy: + error(f"objcopy with prefix[{'or'.join(cmd_prefix)}] not found, please install it") + return None + with tempfile.TemporaryDirectory() as tmpdir: + asm_file = os.path.join(tmpdir, "input.S") + elf_file = os.path.join(tmpdir, "output.elf") + map_file = os.path.join(tmpdir, "output.map") + # Write asm to file + with open(asm_file, "w") as f: + raw_asm = asm_str.replace("\\t", "\t").replace(";$", "\n").replace(";", "\n\t") + if "__start" not in raw_asm: + raw_asm = ".global _start\n_start:\n\t" + raw_asm + if debug: + info("User Input ASM:\n"+raw_asm) + f.write(raw_asm) + # Assemble to ELF + gcc_cmd = [ + cmd_gcc, + "-nostdlib", "-Ttext", hex(entry_address), + "-Wl,-Map=" + map_file, + asm_file, "-o", elf_file + ] + subprocess.check_call(gcc_cmd) + if debug: + objdump_dis_cmd = [ + cmd_objdump, + "-d", + elf_file + ] + objdump_dis_out = subprocess.check_output(objdump_dis_cmd, encoding="utf-8") + info("Final Decompiled ASM:\n"+objdump_dis_out) + # Get section info using objdump + objdump_cmd = [cmd_objdump, "-h", elf_file] + objdump_out = subprocess.check_output(objdump_cmd, encoding="utf-8") + # Parse section addresses and sizes + section_info = {} + for line in objdump_out.splitlines(): + parts = line.split() + if len(parts) >= 6 and parts[1].startswith('.'): + name = parts[1] + size = int(parts[2], 16) + addr = int(parts[3], 16) + if size > 0: + section_info[name] = (addr, size) + # Extract each section as binary + if debug: + info(f"Sections in ELF:\n{objdump_out}\n") + result = {} + for sec, (addr, size) in section_info.items(): + sec_bin = os.path.join(tmpdir, f"{sec[1:]}.bin") + objcopy_cmd = [ + cmd_objcopy, + f"-j{sec}", + "-O", "binary", + elf_file, sec_bin + ] + subprocess.check_call(objcopy_cmd) + with open(sec_bin, "rb") as f: + data = f.read() + if len(target_secs) > 0: + if not any(fnmatch.fnmatch(sec, pattern) for pattern in target_secs): + if debug: + info(f"Section {sec} not matched, skip") + continue + result[sec] = (addr, data) + if debug: + message = "" + for name, (addr, data) in result.items(): + message += f"Section[{name}] at {hex(addr)}: {data}\n" + info(f"Sections Parsed:\n{message}" + (f"Find {len(result)} sections." if message else f"{YELLOW}No sections found{RESET}")) + return result + + def do_xasm(self, arg, debug=True): + """Assemble RISC-V assembly code and return a dict mapping section start addresses to bytes (little-endian). + Uses riscv64-unknown-elf-gcc and objcopy. + + Args: + entry_addr (int): Entry address for the first section (default self.mem_base (0x80000000)). eg <0x80000000> + target_secs (list): List of section names to extract (default empty, all sections). eg [.text*, .data*], support wildcards. + asm_data (str): RISC-V assembly code (can contain multiple .section/.text/.data). + Returns: + Dict: {name: (address,bytes)} for each section. + + Examples: + xasm <0x80000000> [.text*,.data] addi a0, a0, 1 + xasm <0x80000000> addi a0, a0, 1 + xasm [.text*] addi a0, a0, 1 + """ + if not arg: + message("usage: xasm [] [[sections,...]] ") + return + target_secs = [] + arg = arg.strip() + entry_address = self.mem_base + try: + if arg.startswith("<"): + cmds = arg.split(">", 1) + entry_address = int(cmds[0].replace("<", ""), 0) + arg = cmds[1].strip() + if arg.startswith("["): + cmds = arg.split("]", 1) + target_secs = [s.strip() for s in cmds[0].replace("[", "").split(",")] + arg = cmds[1].strip() + asm_str = arg + if not asm_str: + message("usage: xasm [] ") + return + return self.api_asm_str(asm_str, entry_address=entry_address, debug=debug, target_secs=target_secs) + except Exception as e: + error(f"asm {arg} fail: {str(e)}") + return + + def do_xasm_insert(self, arg): + """Assemble RISC-V assembly code and insert it into the target address (with no debug message). + + Args: same as xasm + """ + sections = self.do_xasm(arg, debug=False) + if not sections: + warn("No sections found, ignore insert") + return + bytes_count = 0 + for sec, (addr, data) in sections.items(): + if len(data) == 0: + warn(f"Empty section: {sec}, skip") + continue + info(f"Insert Section[{sec}] at {hex(addr)}: with {len(data)} bytes") + if not self.api_write_bytes(addr, data): + break + bytes_count += len(data) + info(f"Total {bytes_count} bytes inserted") diff --git a/scripts/xspdb/xscmd/cmd_dasm.py b/scripts/xspdb/xscmd/cmd_dasm.py new file mode 100644 index 00000000000..44eea234c31 --- /dev/null +++ b/scripts/xspdb/xscmd/cmd_dasm.py @@ -0,0 +1,240 @@ +#*************************************************************************************** +# Copyright (c) 2025 Beijing Institute of Open Source Chip (BOSC) +# Copyright (c) 2025 Institute of Computing Technology, Chinese Academy of Sciences +# +# XiangShan is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# +# See the Mulan PSL v2 for more details. +#*************************************************************************************** + + +from . import dasm_bytes, error, info, message + +class CmdDASM: + + def api_is_flash_address(self, address): + """Check if the address is in Flash range + + Args: + address (int): Target address + + Returns: + bool: True if the address is in Flash range, False otherwise + """ + return self.flash_base <= address < self.flash_ends + + def api_merge_asm_list_overlap_append(self, a, b): + if len(b) == 0: + return a + if len(a) == 0: + return b + b_head = b[0][0] + a_end_index = -1 + a_size = len(a) + while abs(a_end_index) <= a_size: + if a[a_end_index][0] < b_head: + if a_end_index == -1: + return a + b + else: + return a[:a_end_index + 1] + b + a_end_index -= 1 + return b + + def api_all_data_to_asm(self, address, length): + """Convert memory data to assembly instructions + + Args: + address (int): Target memory address + length (int): Target memory length + + Returns: + list((address, hex, mnemonic, str)): Disassembly results + """ + end_address = address + length + if self.api_is_flash_address(address) and \ + not self.api_is_flash_address(end_address): + return self.api_merge_asm_list_overlap_append(self.api_flash_data_to_asm(address, self.flash_ends - address), + self.api_mem_data_to_asm(self.flash_ends, end_address - self.flash_ends)) + + if not self.api_is_flash_address(address) and \ + self.api_is_flash_address(end_address): + return self.api_merge_asm_list_overlap_append(self.api_mem_data_to_asm(address, self.flash_base - address), + self.api_flash_data_to_asm(self.flash_base, end_address - self.flash_base)) + + if self.api_is_flash_address(address): + return self.api_flash_data_to_asm(address, length) + else: + return self.api_mem_data_to_asm(address, length) + + def api_flash_data_to_asm(self, address, length): + """Convert Flash data to assembly instructions + + Args: + address (int): Target Flash address + length (int): Target Flash length + + Returns: + list((address, hex, mnemonic, str)): Disassembly results + """ + def _flash_read(addr): + return self.df.FlashRead(max(0, addr - self.flash_base)) + return self.api_read_data_as_asm(address, length, _flash_read) + + def api_mem_data_to_asm(self, address, length): + """Convert memory data to assembly instructions + + Args: + address (int): Target memory address + length (int): Target memory length + + Returns: + list((address, hex, mnemonic, str)): Disassembly results + """ + return self.api_read_data_as_asm(address, length, self.df.pmem_read) + + def api_dasm_from_bytes(self, bytes, start_address=0): + """Convert binary data to assembly instructions + + Args: + bytes (bytes): Binary data + start_address (int): Starting address + + Returns: + list((address, hex, mnemonic, str)): Disassembly results + """ + return dasm_bytes(bytes, start_address) + + def api_read_data_as_asm(self, address, length, read_func): + """Convert memory data to assembly instructions + + Args: + address (int): Target memory address + length (int): Target memory length + read_func (function): Function to read uint64 + + Returns: + list((address, hex, mnemonic, str)): Disassembly results + """ + dasm_list = [] + try: + sta_address = address - address % 2 # Starting memory address must be 2-byte aligned + end_address = sta_address + (2 + length//2 + length % 2) # Ending memory address must be 2-byte aligned; read at least 2 bytes + assert sta_address >=0 , "address need >=0 and not miss align" + assert length >=0, "length need >=0 " + + pmem_sta_address = sta_address - sta_address % 8 # Physical memory reads 8 bytes at a time; must be 8-byte aligned + pmem_end_address = end_address - end_address % 8 # Physical memory reads 8 bytes at a time; must be 8-byte aligned + count = 1 + pmem_end_address - pmem_sta_address + buffer = bytearray() + for index in range(count): + padd = pmem_sta_address + 8*index + buffer += read_func(padd).to_bytes(8, byteorder='little', signed=False) + # Calculate offset + offset = sta_address - pmem_sta_address + for instr in dasm_bytes(buffer[offset:], sta_address): + dasm_list.append(instr) + except Exception as e: + import traceback + error(f"disasm fail: {str(e)} {traceback.print_exc()}") + return dasm_list + + def do_xdasm(self, arg): + """Disassemble memory data + + Args: + arg (string): Memory address and length + """ + if not arg: + error("dasm
[length]") + return + args = arg.strip().split() + length = 10 + if len(args) < 2: + args.append(str(length)) + try: + address = int(args[0], 0) + length = int(args[1]) + for l in self.api_all_data_to_asm(address, length): + message("0x%x: %s\t%s\t%s" % (l[0], l[1], l[2], l[3])) + except Exception as e: + error(f"convert {args[0]} or {args[1]} to number fail: {str(e)}") + + def do_xdasmflash(self, arg): + """Disassemble Flash data + + Args: + arg (string): Flash address and length + """ + if not arg: + error("dasmflash
[length]") + return + args = arg.strip().split() + length = 10 + if len(args) < 2: + args.append(str(length)) + try: + address = int(args[0], 0) + length = int(args[1]) + for l in self.api_flash_data_to_asm(address, length): + message("0x%x: %s\t%s\t%s" % (l[0], l[1], l[2], l[3])) + except Exception as e: + error(f"convert {args[0]} or {args[1]} to number fail: {str(e)}") + + def do_xdasmbytes(self, arg): + """Disassemble binary data + + Args: + arg (string): Binary data + """ + if not arg: + error("dasmbytes [address]") + return + try: + params = arg.strip().split() + address = 0 + if len(params) > 1: + address = int(params[1], 0) + data_bytes = params[0].strip() + if not data_bytes.startswith("b'"): + new_data_bytes = "b'" + for i in range(0, len(data_bytes), 2): + new_data_bytes += "\\x%s" % params[0][i:i+2] + data_bytes = new_data_bytes + "'" + for i in self.api_dasm_from_bytes(eval(data_bytes), address): + message("0x%x: %s\t%s\t%s" % (i[0], i[1], i[2], i[3])) + except Exception as e: + error(f"convert {arg} to bytes fail: {str(e)}") + + def do_xdasmnumber(self, arg): + """Disassemble a number + + Args: + arg (string): Number data + """ + if not arg: + error("dasmbytes [address]") + return + try: + params = arg.strip().split() + address = 0 + if len(params) > 1: + address = int(params[1], 0) + for i in self.api_dasm_from_bytes(int(params[0], 0).to_bytes(4, byteorder="little", signed=False), address): + message("0x%x: %s\t%s\t%s" % (i[0], i[1], i[2], i[3])) + except Exception as e: + error(f"convert {arg} to bytes fail: {str(e)}") + + def do_xclear_dasm_cache(self, arg): + """Clear disassembly cache + + Args: + arg (None): No arguments + """ + self.info_cache_asm.clear() From 988aff1f62d47f5d5576feb0263fd066dc45a57a Mon Sep 17 00:00:00 2001 From: Song Fangyuan <130735683+SFangYy@users.noreply.github.com> Date: Wed, 3 Dec 2025 13:52:33 +0800 Subject: [PATCH 13/14] feat(pdb): add terminal ui for xspdb (#5299) --- scripts/xspdb/ui.py | 886 +++++++++++++++++++++++++++++++ scripts/xspdb/xscmd/cmd_batch.py | 2 + scripts/xspdb/xscmd/cmd_info.py | 243 +++++++++ scripts/xspdb/xspdb.py | 53 ++ 4 files changed, 1184 insertions(+) create mode 100644 scripts/xspdb/ui.py diff --git a/scripts/xspdb/ui.py b/scripts/xspdb/ui.py new file mode 100644 index 00000000000..fc9840599f3 --- /dev/null +++ b/scripts/xspdb/ui.py @@ -0,0 +1,886 @@ +#*************************************************************************************** +# Copyright (c) 2025 Beijing Institute of Open Source Chip (BOSC) +# Copyright (c) 2025 Institute of Computing Technology, Chinese Academy of Sciences +# +# XiangShan is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# +# See the Mulan PSL v2 for more details. +#*************************************************************************************** + + +import os +import io +import sys +import fcntl +import signal +import traceback +import time +import select +import readline +import ctypes +import threading +libc = ctypes.CDLL(None) + +_libc_stdout = ctypes.c_void_p.in_dll(libc, "stdout") + +def flush_cpp_stdout(): + libc.fflush(_libc_stdout) + +try: + import urwid +except ImportError: + urwid = None + +from xspdb.xscmd.util import GREEN, RESET, YELLOW + +class XiangShanSimpleTUI: + def __init__(self, pdb, console_max_height=10, content_asm_fix_width=55, console_prefix="(xiangshan)"): + self.asm_content = urwid.SimpleListWalker([]) + self.summary_info = urwid.SimpleListWalker([]) + self.pdb = pdb + self.pdb.on_update_tstep = self.update_console_ouput + self.console_input_cap = u"%s"%console_prefix + self.console_input = urwid.Edit(self.console_input_cap) + self.console_input_busy = ["(wait. )", "(wait.. )", "(wait...)"] + self.console_input_busy_index = -1 + self.console_default_txt = "\n\n\n\n" + self.console_outbuffer = self.console_default_txt; + self.console_output = ANSIText(self.console_outbuffer) + self.console_max_height = console_max_height + self.console_page_cache = None + self.console_page_cache_index = 0 + self.console_page_scroll_enable = True + self.content_asm_fix_width = content_asm_fix_width + self.cmd_history_index = readline.get_current_history_length() + 1 + self.last_key = None + self.last_line = None + self.complete_remain = [] + self.complete_maxshow = 100 + self.complete_tips = "\nAvailable commands:\n" + self._pdio = io.StringIO() + self.cpp_stderr_buffer = None + self.cpp_stdout_buffer = None + self.cmd_is_excuting = False + self.exit_error = None + self.batch_mode_depth = 0 + self.batch_mode_active = False + self._ui_lock = threading.Lock() # Add lock for thread safety + self.loop = None # Initialize loop to None + + self.file_list = urwid.ListBox(self.asm_content) + self.summary_pile = urwid.ListBox(self.summary_info) + + self.file_box = urwid.LineBox( + urwid.Filler(self.file_list, valign='top', height=('relative', 100)), + title=u"Memory Disassembly" + ) + self.summary_box = urwid.LineBox( + urwid.Filler(self.summary_pile, valign='top', height=("relative", 100)), + title=u"Summary Information" + ) + + top_pane = urwid.Columns([ + (self.content_asm_fix_width, self.file_box), + ("weight", 20, self.summary_box), + ], dividechars=0) + + console_box = urwid.LineBox( + urwid.Pile([ + ("flow", self.console_output), + ('flow', self.console_input), + ]), + title="Console") + + self.root = urwid.Frame( + body=urwid.Pile([ + ('weight', 1, top_pane) + ]), + footer=console_box, + focus_part="footer" + ) + # Note: skip update_asm_abs_info() during init, will be called after loop starts + # self.update_asm_abs_info() + # Note: need to update console output in the end + self._handle_stdout_error() + self.console_output.set_text(self._get_output(f"{GREEN}Tips: \n Press Esc button(or cmd exit) to exit tui. \n Ctrl+up/down/left/right to adjust the panels.{RESET}\n")) + + def update_top_pane(self): + """ + Update the layout of top_pane to reflect the new value of content_asm_fix_width. + """ + try: + # Ensure width is within reasonable bounds + self.content_asm_fix_width = max(10, min(self.content_asm_fix_width, 200)) + + self.root.body.contents[0] = ( + urwid.Columns([ + (self.content_asm_fix_width, self.file_box), + ("weight", 20, self.summary_box), + ], dividechars=0), + ('weight', 1) + ) + + # Safely redraw screen if loop exists + if hasattr(self, 'loop') and self.loop is not None: + try: + self.loop.draw_screen() + except: + # If draw_screen fails, we'll skip it - the screen will update later + pass + except Exception as e: + # If update fails, reset to a safe width and try again + try: + self.content_asm_fix_width = 55 # Reset to default + self.root.body.contents[0] = ( + urwid.Columns([ + (self.content_asm_fix_width, self.file_box), + ("weight", 20, self.summary_box), + ], dividechars=0), + ('weight', 1) + ) + except: + # If this also fails, just ignore - the layout will remain as is + pass + + def _redirect_stdout(self, on): + if not hasattr(self, "cpp_stdout_w"): + return + if on: + if not self.cpp_stderr_is_redirected: + os.dup2(self.cpp_stdout_w, 1) + self.cpp_stderr_is_redirected = True + else: + if self.cpp_stderr_is_redirected: + os.dup2(self.original_cpp_stdout, 1) + self.cpp_stderr_is_redirected = False + + def _redirect_stdout_on(self): + self.original_cpp_stdout = os.dup(1) + out_r, self.cpp_stdout_w = os.pipe() + self.cpp_stdout_buffer = os.fdopen(out_r, 'r') + self.cpp_stderr_is_redirected = False + # ignore redirect here: os.dup2(self.cpp_stdout_w, 1) + flags = fcntl.fcntl(self.cpp_stdout_buffer, fcntl.F_GETFL) + fcntl.fcntl(self.cpp_stdout_buffer, fcntl.F_SETFL, flags | os.O_NONBLOCK) + + def _redirect_stderr_on(self): + self.original_cpp_stderr = os.dup(2) + err_r, err_w = os.pipe() + os.dup2(err_w, 2) + self.cpp_stderr_buffer = os.fdopen(err_r, 'r') + flags = fcntl.fcntl(self.cpp_stderr_buffer, fcntl.F_GETFL) + fcntl.fcntl(self.cpp_stderr_buffer, fcntl.F_SETFL, flags | os.O_NONBLOCK) + + def _handle_stdout_error(self): + self._redirect_stderr_on() + self._redirect_stdout_on() + if getattr(self.pdb, "stdout", None): + self.old_stdout = self.pdb.stdout + self.pdb.stdout = self._pdio + self.sys_stdout = sys.stdout + sys.stdout = self._pdio + else: + self.old_stdout = sys.stdout + sys.stdout = self._pdio + if getattr(self.pdb, "stderr", None): + self.old_stderr = self.pdb.stderr + self.pdb.stderr = self._pdio + self.sys_stderr = sys.stderr + sys.stderr = self._pdio + else: + self.old_stderr = sys.stderr + sys.stderr = self._pdio + + def _redirect_stderr_off(self): + if self.cpp_stderr_buffer is not None: + os.dup2(self.original_cpp_stderr, 2) + os.close(self.original_cpp_stderr) + self.cpp_stderr_buffer.close() + self.cpp_stderr_buffer = None + + def _redirect_stdout_off(self): + if self.cpp_stdout_buffer is not None: + self._redirect_stdout(False) + os.close(self.original_cpp_stdout) + self.cpp_stdout_buffer.close() + self.cpp_stdout_buffer = None + + def _clear_stdout_error(self): + self._redirect_stderr_off() + self._redirect_stdout_off() + if getattr(self.pdb, "stdout", None): + self.pdb.stdout = self.old_stdout + sys.stdout = self.sys_stdout + else: + sys.stdout = self.old_stdout + if getattr(self.pdb, "stderr", None): + self.pdb.stderr = self.old_stderr + sys.stderr = self.sys_stderr + else: + sys.stderr = self.old_stderr + + def _get_pdb_out(self): + self._pdio.flush() + output = self._pdio.getvalue() + if self.cpp_stderr_buffer is not None: + try: + while True: + rlist, _, _ = select.select([self.cpp_stderr_buffer], [], [], 0) + if not rlist: + break + data = os.read(self.cpp_stderr_buffer.fileno(), 4096) + if not data: + break + output += data.decode(errors="replace") + except BlockingIOError: + pass + except Exception: + pass + if self.cpp_stdout_buffer is not None: + try: + while True: + rlist, _, _ = select.select([self.cpp_stdout_buffer], [], [], 0) + if not rlist: + break + data = os.read(self.cpp_stdout_buffer.fileno(), 4096) + if not data: + break + output += data.decode(errors="replace") + except BlockingIOError: + pass + except Exception: + pass + self._pdio.truncate(0) + self._pdio.seek(0) + return output + + def _get_output(self, txt="", clear=False): + if clear: + self.console_outbuffer = txt + if txt: + buffer = (self.console_outbuffer[-1] if self.console_outbuffer else "") + txt.replace("\t", " ") + # FIXME: why need remove duplicated '\n' ? + buffer = buffer.replace('\r', "\n").replace("\n\n", "\n") + if self.console_outbuffer: + self.console_outbuffer = self.console_outbuffer[:-1] + buffer + else: + self.console_outbuffer = buffer + self.console_outbuffer = "\n".join(self.console_outbuffer.split("\n")[-self.console_max_height:]) + + # Apply line wrapping based on console width + lines = self.console_outbuffer.split("\n") + width = self._get_console_width() + if width is not None: + wrapped = [] + for line in lines: + if not line: + wrapped.append("") + continue + wrapped.extend(self._wrap_console_line(line, width)) + lines = wrapped + return "\n".join(lines[-self.console_max_height:]) + + def _get_console_width(self): + """Get the current console width for line wrapping.""" + if not hasattr(self, 'loop') or self.loop is None: + return None + if not hasattr(self.loop, 'screen') or self.loop.screen is None: + return None + try: + cols, _ = self.loop.screen.get_cols_rows() + except Exception: + return None + width = max(1, cols - 2) + return width + + def _wrap_console_line(self, line, width): + """Wrap a single line to fit within the specified width, preserving ANSI codes.""" + if width <= 0: + return [line] + pattern = ANSIText.ANSI_ESCAPE_RE + tokens = [] + idx = 0 + for match in pattern.finditer(line): + if match.start() > idx: + tokens.append(("text", line[idx:match.start()])) + tokens.append(("ansi", match.group(0))) + idx = match.end() + if idx < len(line): + tokens.append(("text", line[idx:])) + if not tokens: + return [""] + result = [] + current = [] + current_len = 0 + for kind, value in tokens: + if kind == "ansi": + current.append(value) + continue + segment = value + while segment: + remaining = width - current_len + if remaining <= 0: + result.append("".join(current)) + current = [] + current_len = 0 + remaining = width + take = segment[:remaining] + current.append(take) + current_len += len(take) + segment = segment[remaining:] + if current_len >= width: + result.append("".join(current)) + current = [] + current_len = 0 + if current or not result: + result.append("".join(current)) + return result + + def cmd_history_get(self, index): + current_history_length = readline.get_current_history_length() + if index < 1 or index > current_history_length: + return None + return readline.get_history_item(index) + + def handle_input(self, key): + line = self.console_input.get_edit_text().lstrip() + if key == 'enter': + if self.console_page_cache is not None: + return + cmd = line + self.console_input.set_edit_text('') + self.process_command(cmd) + if cmd: + pre_cmd_index = readline.get_current_history_length() + if not (pre_cmd_index > 0 and readline.get_history_item(pre_cmd_index) == cmd): + readline.add_history(cmd) + self.cmd_history_index = readline.get_current_history_length() + 1 + elif key == 'esc': + if self.console_output_page_scroll("exit_page"): + return + self.exit() + elif key == 'ctrl up': + try: + self.console_max_height = max(3, self.console_max_height + 1) # Minimum height of 3 + new_text = self.console_outbuffer.split("\n") + new_text.insert(0, "") + self.console_outbuffer = "\n".join(new_text) + self.console_output.set_text(self._get_output()) + except Exception: + # If this fails, just ignore the keypress + pass + elif key == 'ctrl down': + try: + self.console_max_height = max(3, self.console_max_height - 1) # Minimum height of 3 + new_text = self.console_outbuffer.split("\n") + if len(new_text) > 1: # Ensure we don't remove all text + new_text = new_text[1:] + self.console_outbuffer = "\n".join(new_text) + self.console_output.set_text(self._get_output()) + except Exception: + # If this fails, just ignore the keypress + pass + elif key == 'ctrl left': + try: + self.content_asm_fix_width = max(10, self.content_asm_fix_width - 1) # Minimum width + self.update_top_pane() + except Exception: + # If this fails, just ignore the keypress + pass + elif key == 'ctrl right': + try: + self.content_asm_fix_width = min(200, self.content_asm_fix_width + 1) # Maximum width + self.update_top_pane() + except Exception: + # If this fails, just ignore the keypress + pass + elif key == 'ctrl f': + current = self.pdb.api_get_info_force_mid_address() + if current is None: + self.pdb.api_set_info_force_mid_address(self.pdb.api_get_last_info_mid_address()) + else: + self.pdb.api_set_info_force_mid_address(None) + self.update_asm_abs_info() + elif key == "ctrl u": + self.pdb.api_increase_info_force_address(-2) + self.update_asm_abs_info() + elif key == "ctrl n": + self.pdb.api_increase_info_force_address(2) + self.update_asm_abs_info() + elif key == "tab": + try: + self.complete_cmd(line) + except Exception as e: + self.console_output.set_text(self._get_output(f"{YELLOW}Complete cmd Error: {str(e)}\n{traceback.format_exc()}{RESET}\n")) + elif key == "up": + if self.console_output_page_scroll(1): + return + try: + self.cmd_history_index -= 1 + self.cmd_history_index = max(0, self.cmd_history_index) + hist_cmd = self.cmd_history_get(self.cmd_history_index) + if hist_cmd is not None: + self.console_input.set_edit_text(hist_cmd) + self.console_input.set_edit_pos(len(hist_cmd)) + except Exception: + # If history access fails, just ignore the keypress + pass + elif key == "down": + if self.console_output_page_scroll(-1): + return + try: + self.cmd_history_index += 1 + self.cmd_history_index = min(self.cmd_history_index, readline.get_current_history_length() + 1) + hist_cmd = self.cmd_history_get(self.cmd_history_index) + if hist_cmd is not None: + self.console_input.set_edit_text(hist_cmd) + self.console_input.set_edit_pos(len(hist_cmd)) + except Exception: + # If history access fails, just ignore the keypress + pass + self.last_key = key + self.last_line = line + + def complete_cmd(self, line): + if self.last_key == "tab" and self.last_line == line: + end_text = "" + cmd = self.complete_remain + if not cmd: + return + if len(cmd) > self.complete_maxshow: + end_text = f"\n...({len(cmd) - self.complete_maxshow} more)" + self.console_output.set_text(self._get_output() + self.complete_tips + " ".join(cmd[:self.complete_maxshow]) + end_text) + self.complete_remain = cmd[self.complete_maxshow:] + return + self.complete_remain = [] + state = 0 + cmp = [] + cmd, args, _ = self.pdb.parseline(line) + if " " in line: + complete_func = getattr(self.pdb, f"complete_{cmd}", None) + if complete_func: + arg = args + if " " in args: + arg = args.split()[-1] + idbg = line.find(arg) + cmp = complete_func(arg, line, idbg, len(line)) + else: + while True: + cmp_item = self.pdb.complete(line, state) + if not cmp_item: + break + state += 1 + cmp.append(cmp_item) + if cmp: + prefix = os.path.commonprefix(cmp) + full_cmd = line[:line.rfind(" ") + 1] if " " in line else "" + if prefix: + full_cmd += prefix + else: + full_cmd = line + self.console_input.set_edit_text(full_cmd) + self.console_input.set_edit_pos(len(full_cmd)) + end_text = "" + if len(cmp) > self.complete_maxshow: + self.complete_remain = cmp[self.complete_maxshow:] + end_text = f"\n...({len(self.complete_remain)} more)" + self.console_output.set_text(self._get_output() + self.complete_tips + " ".join(cmp[:self.complete_maxshow]) + end_text) + + def console_output_page_scroll(self, deta): + if self.console_page_cache is None: + return False + if deta == "exit_page": + if self.console_page_cache is not None: + self.console_page_cache = None + self.console_page_cache_index = 0 + self.console_output.set_text(self._get_output()) + self.console_input.set_caption(self.console_input_cap) + self.root.focus_part = 'footer' + else: + try: + self.console_page_cache_index += deta + self.console_page_cache_index = min(self.console_page_cache_index, len(self.console_page_cache) - self.console_max_height) + self.console_page_cache_index = max(self.console_page_cache_index, 0) + except Exception: + # If scrolling fails, reset to safe state + self.console_page_cache_index = 0 + self.update_console_ouput(False) + return True + + def update_console_ouput(self, redirect_stdout=True): + flush_cpp_stdout() + if redirect_stdout: + self._redirect_stdout(False) + + try: + if self.console_page_cache is not None: + pindex = self.console_page_cache_index + cache_len = len(self.console_page_cache) + # Ensure pindex is within bounds + pindex = max(0, min(pindex, cache_len - 1)) + end_index = min(cache_len, pindex + self.console_max_height) + text_data = "\n" + "\n".join(self.console_page_cache[pindex:end_index]) + else: + text_data = self._get_pdb_out() + text_lines = text_data.split("\n") + # just check the last output check + if len(text_lines) > self.console_max_height and redirect_stdout == False and self.console_page_scroll_enable == True: + self.console_page_cache = text_lines + self.console_page_cache_index = 0 + text_data = "\n" + "\n".join(text_lines[:self.console_max_height]) + self.console_input.set_caption(f"") + self.root.focus_part = None + + try: + self.console_output.set_text(self._get_output(text_data)) + except Exception: + # If setting text fails, try with empty string + self.console_output.set_text("") + + if self.console_input_busy_index >= 0: + self.console_input_busy_index += 1 + n = self.console_input_busy_index % len(self.console_input_busy) + self.console_input.set_caption(self.console_input_busy[n]) + + try: + if hasattr(self, 'loop') and self.loop is not None: + self.loop.screen.clear() + self.loop.draw_screen() + except Exception: + # If screen operations fail, just ignore + pass + except Exception as e: + # Complete fallback - just try to keep UI responsive + try: + self.console_output.set_text(self._get_output(f"{YELLOW}Console update error: {e}{RESET}\n")) + except: + pass + + if redirect_stdout: + self._redirect_stdout(True) + + def batch_mode_enter(self): + if self.batch_mode_depth == 0: + self.console_page_scroll_enable = False + self.batch_mode_active = True + self.batch_mode_depth += 1 + + def batch_mode_exit(self): + self.batch_mode_depth -= 1 + if self.batch_mode_depth == 0: + self.console_page_scroll_enable = True + self.batch_mode_active = False + self.batch_mode_depth = max(0, self.batch_mode_depth) + + def is_working_in_batch_mode(self): + if self.batch_mode_active: + return True + return self.pdb.is_working_in_batch_mode() + + def record_cmd(self, cmd): + self.pdb.record_cmd(cmd) + + def process_command(self, cmd): + if cmd == "clear": + self.console_output.set_text(self._get_output(self.console_default_txt, clear=True)) + return + if cmd.startswith("xload_script"): + args = cmd.strip().split() + if len(args) < 2: + self.console_output.set_text(self._get_output("Usage: xload_script [gap_time]\n")) + return + script_file = args[1] + gap_time = 0.2 + if len(args) > 2: + gap_time = float(args[2]) + if not os.path.exists(script_file): + self.console_output.set_text(self._get_output(f"Error: Script file {script_file} not found.\n")) + return + self.console_output.set_text(self._get_output(cmd + "\n")) + self.record_cmd(cmd) + self.batch_mode_enter() + self.pdb.api_exec_script(script_file, gap_time=gap_time) + self._exec_batch_cmds() + self.batch_mode_exit() + elif cmd.startswith("xload_log"): + args = cmd.strip().split() + if len(args) < 2: + self.console_output.set_text(self._get_output("Usage: xload_log [gap_time]\n")) + return + log_file = args[1] + gap_time = 0 + if len(args) > 2: + gap_time = float(args[2]) + if not os.path.exists(log_file): + self.console_output.set_text(self._get_output(f"Error: Log file {log_file} not found.\n")) + return + self.console_output.set_text(self._get_output(cmd + "\n")) + self.record_cmd(cmd) + self.batch_mode_enter() + self.pdb.api_exec_script(log_file, gap_time=gap_time, + target_prefix=self.pdb.log_cmd_prefix, + target_subfix=self.pdb.log_cmd_suffix, + ) + self._exec_batch_cmds() + self.batch_mode_exit() + else: + self.record_cmd(cmd) + self._exec_cmd(cmd) + + def _exec_cmd(self, cmd): + if cmd == "xcontinue_batch": + self._exec_batch_cmds() + return + if cmd in ["continue", "c", "count"]: + self.pdb.tui_ret = self.pdb.onecmd(cmd) + self.exit() + return self.pdb.tui_ret + if cmd in ["exit", "quit", "q"]: + self.exit() + return self.pdb.tui_ret + self.console_output.set_text(self._get_output(cmd + "\n")) + cap = self.console_input.caption + self.console_input_busy_index = 0 + self.console_input.set_caption(self.console_input_busy[self.console_input_busy_index]) + self.loop.draw_screen() + self.cmd_is_excuting = True + original_sigint = signal.getsignal(signal.SIGINT) + def _sigint_handler(s, f): + self.pdb._sigint_handler(s, f) + signal.signal(signal.SIGINT, _sigint_handler) + self._redirect_stdout(True) + ret = self.pdb.onecmd(cmd, log_cmd=False) + flush_cpp_stdout() + self._redirect_stdout(False) + signal.signal(signal.SIGINT, original_sigint) + self.cmd_is_excuting = False + self.console_input_busy_index = -1 + self.console_input.set_caption(cap) + self.update_asm_abs_info() + self.update_console_ouput(False) + return ret + + def update_asm_abs_info(self): + self.asm_content.clear() + asm_size = self.get_part_size("asm") + for l in self.pdb.api_asm_info(asm_size): + self.asm_content.append( + urwid.Text(l) + ) + self.summary_info.clear() + abs_size = self.get_part_size("abs") + for x in self.pdb.api_abs_info(abs_size): + self.summary_info.append( + urwid.Text(x) + ) + + def get_part_size(self, type): + w, h = urwid.raw_display.Screen().get_cols_rows() + header_h = self.root.header.rows((w,)) if self.root.header else 0 + footer_h = self.root.footer.rows((w,)) if self.root.footer else 0 + h = h - header_h - footer_h + w = w - 2 + if type == "asm": + return self.content_asm_fix_width, h - 2 + return w - self.content_asm_fix_width, h - 2 + + def exit(self, loop=None, arg=None): + clear_success = False + try: + if self.exit_error is None: + self._clear_stdout_error() + clear_success = True + except Exception as e: + import traceback + try: + self.console_output.set_text(self._get_output("%s\n%s\n"%(str(e), + traceback.format_exc()))) + except: + # If even console output fails, just print to stdout + print(f"Exit error: {e}") + traceback.print_exc() + self.exit_error = e + if clear_success: + raise urwid.ExitMainLoop() + + def _exec_batch_cmds(self): + def break_handler(c): + self.console_output.set_text(self._get_output(f"{YELLOW}Batch cmd excution is breaked, after {c} cmds{YELLOW}")) + self.loop.draw_screen() + return False + cmd_count = self.pdb._exec_batch_cmds(lambda x, _: self._exec_cmd(x), + break_handler=break_handler) + if cmd_count is False: + return + self.console_output.set_text(self._get_output(f"")) + self.loop.draw_screen() + + def check_exec_batch_cmds(self, loop, user_data=None): + try: + if not self.is_working_in_batch_mode(): + return + self._exec_batch_cmds() + except Exception as e: + import traceback + print(f"Error in check_exec_batch_cmds: {e}") + traceback.print_exc() + +# Color configuration (using ANSI color names) +palette = [ + ('success_green', 'light green', 'black'), + ('norm_red', 'light red', 'black'), + ('error_red', 'light red', 'black'), + ('body', 'white', 'black'), + ('divider', 'white', 'black'), + ('border', 'white', 'black'), + # Add ANSI color mappings + ('black', 'black', 'black'), + ('dark red', 'dark red', 'black'), + ('dark green', 'dark green', 'black'), + ('brown', 'brown', 'black'), + ('dark blue', 'dark blue', 'black'), + ('dark magenta', 'dark magenta','black'), + ('dark cyan', 'dark cyan', 'black'), + ('light gray', 'light gray', 'black'), + ('dark gray', 'dark gray', 'black'), + ('light red', 'light red', 'black'), + ('light green', 'light green', 'black'), + ('yellow', 'yellow', 'black'), + ('light blue', 'light blue', 'black'), + ('light magenta', 'light magenta','black'), + ('light cyan', 'light cyan', 'black'), + ('white', 'white', 'black'), +] + + +def enter_simple_tui(pdb): + if urwid is None: + print("urwid not found, please install urwid first.") + return + try: + app = XiangShanSimpleTUI(pdb) + loop = urwid.MainLoop( + app.root, + palette=palette, + unhandled_input=app.handle_input, + handle_mouse=False + ) + app.loop = loop + original_sigint = signal.getsignal(signal.SIGINT) + def _sigint_handler(s, f): + loop.set_alarm_in(0.0, app.exit) + signal.signal(signal.SIGINT, _sigint_handler) + # Initialize UI content after loop is created + def init_ui_content(loop, user_data): + try: + app.update_asm_abs_info() + except Exception as e: + import traceback + print(f"[Error] Failed to initialize UI content: {e}") + traceback.print_exc() + loop.set_alarm_in(0.0, init_ui_content) + loop.set_alarm_in(0.1, app.check_exec_batch_cmds) + loop.run() + signal.signal(signal.SIGINT, original_sigint) + except Exception as e: + import traceback + print(f"\n[XUI Error] {e}") + traceback.print_exc() + raise + +import re +class ANSIText(urwid.Text): + """ + A subclass of urwid.Text that supports ANSI color codes. + """ + ANSI_COLOR_MAP = { + '30': 'black', + '31': 'dark red', + '32': 'dark green', + '33': 'brown', + '34': 'dark blue', + '35': 'dark magenta', + '36': 'dark cyan', + '37': 'light gray', + '90': 'dark gray', + '91': 'light red', + '92': 'light green', + '93': 'yellow', + '94': 'light blue', + '95': 'light magenta', + '96': 'light cyan', + '97': 'white', + } + + ANSI_ESCAPE_RE = re.compile(r'\x1b\[(\d+)(;\d+)*m') + + def __init__(self, text='', align='left'): + super().__init__('', align=align, wrap='any') + self.set_text(text) + + def set_text(self, text): + """ + Parse the ANSI text and set it with urwid attributes. + """ + parsed_text = self._parse_ansi(text) + super().set_text(parsed_text) + + def _parse_ansi(self, text): + """ + Parse ANSI escape sequences and convert them to urwid attributes. + """ + segments = [] + current_attr = None + pos = 0 + + for match in self.ANSI_ESCAPE_RE.finditer(text): + start, end = match.span() + if start > pos: + segments.append((current_attr, text[pos:start])) + ansi_codes = match.group(0) + current_attr = self._ansi_to_attr(ansi_codes) + pos = end + + if pos < len(text): + segments.append((current_attr, text[pos:])) + + return segments + + def _ansi_to_attr(self, ansi_code): + """ + Convert ANSI escape codes to urwid attributes. + """ + codes = ansi_code[2:-1].split(';') + if len(codes) == 0: + return None # Reset attributes + + fg_code = codes[0] + fg_color = self.ANSI_COLOR_MAP.get(fg_code, None) + if fg_color: + return fg_color + return None + + def get_line_translation(self, maxcol: int, ta=None): + """Get line translation with error handling.""" + try: + return super().get_line_translation(maxcol, ta) + except Exception: + # Fallback: if translation fails, update cache and try again + if not self._cache_maxcol or self._cache_maxcol != maxcol or \ + not hasattr(self, "_cache_translation"): + try: + self._update_cache_translation(maxcol, ta) + except Exception: + # If cache update also fails, return empty translation + return [(0, None, None)] + try: + return self._cache_translation + except Exception: + return [(0, None, None)] diff --git a/scripts/xspdb/xscmd/cmd_batch.py b/scripts/xspdb/xscmd/cmd_batch.py index fac28fa3cd7..f3ab0c3609e 100644 --- a/scripts/xspdb/xscmd/cmd_batch.py +++ b/scripts/xspdb/xscmd/cmd_batch.py @@ -117,3 +117,5 @@ def do_xload_script(self, arg): def complete_xload_script(self, text, line, begidx, endidx): return self.api_complite_localfile(text) + def is_working_in_batch_mode(self): + return self.batch_depth > 0 diff --git a/scripts/xspdb/xscmd/cmd_info.py b/scripts/xspdb/xscmd/cmd_info.py index 3c323495fee..a9d70bf7107 100644 --- a/scripts/xspdb/xscmd/cmd_info.py +++ b/scripts/xspdb/xscmd/cmd_info.py @@ -32,6 +32,46 @@ def __init__(self): self.info_force_address = None self.info_last_address = None + def api_increase_info_force_address(self, deta): + """Increase the force mid address for disassembly Info (the disassembly info in the TUI window) + + Args: + deta (int): Address to increase + """ + if self.info_force_address is None: + return + self.info_force_address += deta + return self.info_force_address + + def api_get_last_info_mid_address(self): + """Get the last mid address of disassembly info + + Returns: + int: Address + """ + if self.info_last_address is not None: + return self.info_last_address + return self.mem_base + + def api_set_info_force_mid_address(self, val): + """Set the force mid address for disassembly Info (the disassembly info in the TUI window) + + Args: + val (int): Address to force disassembly + """ + if val is not None: + self.info_force_address = val + else: + self.info_force_address = None + + def api_get_info_force_mid_address(self): + """Get the force mid address for disassembly Info (the disassembly info in the TUI window) + + Returns: + int: Address + """ + return self.info_force_address + def api_info_get_last_commit_pc(self): """Get the last commit PC @@ -44,3 +84,206 @@ def api_info_get_last_commit_pc(self): valid_pc_list = [x[0] for x in self.api_commit_pc_list() if (x[1] or self.api_is_difftest_diff_run())] return max(valid_pc_list) if valid_pc_list else self.mem_base + def do_xset_dasm_info_force_mid_address(self, arg): + """Set the force mid address for disassembly Info (the disassembly info in the TUI window) + + Args: + arg (number or empty): Address to force disassembly + """ + if not arg.strip(): + info("reset dasm info force address to None") + self.api_set_info_force_mid_address(None) + return + try: + self.api_set_info_force_mid_address(int(arg, 0)) + info(f"force address set to 0x{self.info_force_address:x}") + except ValueError: + error(f"Invalid address: {arg}") + + def api_asm_info(self, size): + """Get the current memory disassembly + + Args: + size (int, int): Width, height = size + + Returns: + list[string]: Disassembly list + """ + # size: w, h + _, h = size + base_addr = self.mem_base + pc_list = self.api_commit_pc_list() + # ignore valid check when difftest is run + valid_pc_list = [x[0] for x in pc_list if (x[1] or self.api_is_difftest_diff_run())] + pc_last = base_addr + + if self.info_cached_cmpclist: + new_pc = [x[0] for x, y in zip(pc_list, self.info_cached_cmpclist) if x[0] != y[0] and x[1] != 0] + if new_pc: + pc_last = max(new_pc) + + if pc_last == base_addr and valid_pc_list: + pc_last = max(valid_pc_list) + + if self.info_force_address: + pc_last = self.info_force_address - self.info_force_address % 2 + + self.info_last_address = pc_last + self.info_cached_cmpclist = pc_list.copy() + # Check the cache first; if not found, generate it + cache_index = pc_last - pc_last % self.info_cache_bsz + asm_data = self.info_cache_asm.get(cache_index, + self.api_all_data_to_asm(cache_index, self.info_cache_bsz)) + self.info_cache_asm[cache_index] = asm_data + + # Need to check boundaries; if near a boundary, fetch adjacent cache blocks + cache_index_ext = base_addr + if pc_last % self.info_cache_bsz < h: + cache_index_ext = cache_index - self.info_cache_bsz + elif self.info_cache_bsz - pc_last % self.info_cache_bsz < h: + cache_index_ext = cache_index + self.info_cache_bsz + + # Boundary is valid + if cache_index_ext > base_addr: + asm_data_ext = self.info_cache_asm.get(cache_index_ext, + self.api_all_data_to_asm(cache_index_ext, self.info_cache_bsz)) + self.info_cache_asm[cache_index_ext] = asm_data_ext + if cache_index_ext < cache_index: + asm_data = self.api_merge_asm_list_overlap_append(asm_data_ext, asm_data) + else: + asm_data = self.api_merge_asm_list_overlap_append(asm_data, asm_data_ext) + + # Quickly locate the position of pc_last + address_list = [x[0] for x in asm_data] + pc_last_index = bisect.bisect_left(address_list, pc_last) + start_line = max(0, pc_last_index - h//2) + asm_lines = [] + for l in asm_data[start_line:start_line + h]: + find_pc = l[0] in valid_pc_list + line = "%s|0x%x: %s %s %s" % (">" if find_pc else " ", l[0], l[1], l[2], l[3]) + if find_pc and l[0] == pc_last: + line = ("norm_red", line) + if self.info_force_address is not None: + end_addr = l[0] + (2 if "c." in l[2] else 4) + if l[0] <= self.info_force_address < end_addr: + line = ("light blue", line) + asm_lines.append(line) + return asm_lines + + def api_abs_info(self, size): + """Get the current status summary information, such as general-purpose registers + + Args: + size (int, int): Width, height = size + + Returns: + list[string]: Status list + """ + # size: w, h + # FIXME + abs_list = [] + # Int regs + abs_list += ["IntReg:"] + def ireg_map(): + if not hasattr(self.xsp, "GetFromU64Array"): + return [('error_red',">")] + return " ".join(["%3s: 0x%x" % (self.iregs[i], + self.xsp.GetFromU64Array(self.difftest_stat.regs_int.value, i)) + for i in range(32)]) + abs_list += [ireg_map()] + # Float regs + abs_list += ["\nFloatReg:"] + def freg_map(): + return " ".join(["%3s: 0x%x" % (self.fregs[i], + self.xsp.GetFromU64Array(self.difftest_stat.regs_fp.value, i)) + for i in range(32)]) + abs_list += [freg_map()] + # Commit PCs + abs_list += ["\nCommit PC:"] + abs_list += [" ".join(["0x%x%s" % (x[0], "" if x[1] else "*") for x in self.api_commit_pc_list()])] + abs_list += ["max commit: 0x%x" % max([x[0] for x in self.api_commit_pc_list()])] + # Add other content to display here + + # csr + abs_list += ["\nCSR:"] + abs_list += ["mstatus: 0x%x " % self.difftest_stat.csr.mstatus + + "mcause: 0x%x " % self.difftest_stat.csr.mcause + + "mepc: 0x%x " % self.difftest_stat.csr.mepc + + "mtval: 0x%x " % self.difftest_stat.csr.mtval + + "mtvec: 0x%x " % self.difftest_stat.csr.mtvec + + "privilegeMode: %d " % self.difftest_stat.csr.privilegeMode + + "mie: 0x%x " % self.difftest_stat.csr.mie + + "mip: 0x%x " % self.difftest_stat.csr.mip + + "satp: 0x%x " % self.difftest_stat.csr.satp + + "sstatus: 0x%x " % self.difftest_stat.csr.sstatus + + "scause: 0x%x " % self.difftest_stat.csr.scause + + "sepc: 0x%x " % self.difftest_stat.csr.sepc + + "stval: 0x%x " % self.difftest_stat.csr.stval + + "stvec: 0x%x " % self.difftest_stat.csr.stvec + ] + # fcsr + abs_list += ["\nFCSR: 0x%x" % self.difftest_stat.fcsr.fcsr] + + # DASM info base address + if self.info_force_address is not None: + abs_list += [("light blue", f"\nDASM Window Force Mid Address: 0x{self.info_force_address:x}")] + + if self.api_is_difftest_diff_run(): + abs_list += [("light red", f"\nDifftest is running with ref: {self.api_get_ref_so_path()}")] + + # Bin file + abs_list += ["\nLoaded Bin:"] + abs_list += [f"file: {self.exec_bin_file}"] + + # Watch List + if self.info_watch_list: + abs_list += ["\nWatch List:"] + for k , v in self.info_watch_list.items(): + abs_list += [f"{k}({v.W()}): 0x{v.value:x}"] + + if self.flash_bin_file: + abs_list += ["\nFlash Bin:"] + abs_list += [f"file: {self.flash_bin_file}"] + + # Watched commit pc + commit_pc_cheker = self.condition_watch_commit_pc.get("checker") + if commit_pc_cheker: + stat_txt = "(Disabled)" if commit_pc_cheker.IsDisable() else "" + abs_list += [f"\nWatched Commit PC{stat_txt}:"] + watch_pc = OrderedDict() + for k, v in commit_pc_cheker.ListCondition().items(): + pc = k.split("_")[2] + if pc in watch_pc: + watch_pc[pc].append(v) + else: + watch_pc[pc] = [v] + for pc, v in watch_pc.items(): + checked = sum(v) > 0 + if checked: + abs_list += [("error_red", f"{pc}: {checked}")] + else: + abs_list += [f"{pc}: {checked}"] + + if self.api_is_hit_good_trap(): + abs_list += ["\nProgram:"] + abs_list += [("success_green", "HIT GOOD TRAP")] + elif self.api_is_hit_good_loop(): + abs_list += ["\nProgram:"] + abs_list += [("success_green", "HIT GOOD LOOP")] + + if self.api_is_trap_break_on(): + abs_list += ["\nTrap Info:"] + trap_info = self.api_get_trap_info() + abs_list += [f"pc: 0x{trap_info['pc']:x} code: 0x{trap_info['code']:x} hasTrap: {trap_info['hasTrap']} cycle: 0x{trap_info['cycleCnt']:x} hasWFI: {trap_info['hasWFI']}"] + + if self.api_is_xbreak_on(): + abs_list += ["\nXBreaks:"] + for br in self.api_xbreak_list(): + if br[4]: + abs_list += [("error_red", f"{br[0]}(0x{br[1]:x}) {br[2]} 0x{br[3]:x} hinted: {br[4]}")] + else: + abs_list += [f"{br[0]}(0x{br[1]:x}) {br[2]} 0x{br[3]:x} hinted: {br[4]}"] + + # TBD + # abs_list += [("error_red", "\nFIXME:\nMore Data to be done\n")] + return abs_list diff --git a/scripts/xspdb/xspdb.py b/scripts/xspdb/xspdb.py index 7e713395d9c..b496163c9d1 100644 --- a/scripts/xspdb/xspdb.py +++ b/scripts/xspdb/xspdb.py @@ -14,6 +14,7 @@ # See the Mulan PSL v2 for more details. #*************************************************************************************** + import os import sys import inspect @@ -29,6 +30,7 @@ from logging import DEBUG, INFO, WARNING, ERROR from xspdb.xscmd.util import load_module_from_file, load_package_from_dir, set_xspdb_log_level, set_xspdb_debug_level, logging_level_map from xspdb.xscmd.util import message, info, error, warn, build_prefix_tree, register_commands, YELLOW, RESET, xspdb_set_log, xspdb_set_log_file, log_message +from xspdb.ui import enter_simple_tui class XSPdb(pdb.Pdb): def __init__(self, dut, default_file=None, @@ -274,6 +276,57 @@ def parseline(self, line): cmd, arg, line = super().parseline(line) return cmd or "", arg, line + def do_xui(self, arg): + """Enter the Text UI interface + + Args: + arg (None): No arguments + """ + if self.in_tui: + error("Already in TUI") + return + try: + self.tui_ret = None + self.in_tui = True + enter_simple_tui(self) + self.in_tui = False + self.on_update_tstep = None + self.interrupt = False + info("XUI Exited.") + return self.tui_ret + except Exception as e: + import traceback + self.in_tui = False + error(f"XUI Error: {e}") + traceback.print_exc() + return False + + def do_xcmds(self, arg): + """Print all xcmds + + Args: + arg (None): No arguments + """ + cmd_count = 0 + max_cmd_len = 0 + cmds = [] + for cmd in dir(self): + if not cmd.startswith("do_x"): + continue + cmd_name = cmd[3:] + max_cmd_len = max(max_cmd_len, len(cmd_name)) + cmd_desc = f"{YELLOW}Description not found{RESET}" + try: + cmd_desc = getattr(self, cmd).__doc__.split("\n")[0] + except Exception as e: + pass + cmds.append((cmd, cmd_name, cmd_desc)) + cmd_count += 1 + cmds.sort(key=lambda x: x[0]) + for c in cmds: + message(("%-"+str(max_cmd_len+2)+"s: %s (from %s)") % (c[1], c[2], self.register_map.get(c[0], self.__class__.__name__))) + info(f"Total {cmd_count} xcommands") + def do_xcmds(self, arg): """Print all xcmds From bb37b9f2833a531a26e0af5ada7e6886864d8ec6 Mon Sep 17 00:00:00 2001 From: SFangYy Date: Tue, 2 Dec 2025 22:42:39 +0800 Subject: [PATCH 14/14] fix(pdb): resolve API incompatibility with difftest update This PR updates the xspdb to adapt to the API changes. --------- Co-authored-by: Zhicheng Yao --- scripts/xspdb/xscmd/cmd_difftest.py | 18 +++++++++++++++--- scripts/xspdb/xspdb.py | 8 ++++---- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/scripts/xspdb/xscmd/cmd_difftest.py b/scripts/xspdb/xscmd/cmd_difftest.py index f41a6e25ab1..677e650b070 100644 --- a/scripts/xspdb/xscmd/cmd_difftest.py +++ b/scripts/xspdb/xscmd/cmd_difftest.py @@ -70,11 +70,10 @@ def api_init_ref(self, force=False): self.df.finish_device() self.df.GoldenMemFinish() self.df.difftest_finish() - self.df.difftest_init() + self.api_init_mem() + self.df.difftest_init(True, self.exec_bin_file) self.difftest_stat = self.df.GetDifftest(0).dut self.df.init_device() - self.df.GoldenMemInit() - self.df.init_nemuproxy(0) self.difftest_ref_is_inited = True return True @@ -326,6 +325,19 @@ def api_difftest_get_instance(self, instance=0): """ return self.df.GetDifftest(instance) + def do_xdifftest_turn_on(self, arg): + """Turn on the difftest diff + + Args: + arg (string): Turn on or off + """ + if arg.strip() == "on": + self.api_set_difftest_diff(True) + elif arg.strip() == "off": + self.api_set_difftest_diff(False) + else: + error("usage: xdifftest_turn_on ") + def complete_xdifftest_turn_on(self, text, line, begidx, endidx): return [x for x in ["on", "off"] if x.startswith(text)] if text else ["on", "off"] diff --git a/scripts/xspdb/xspdb.py b/scripts/xspdb/xspdb.py index b496163c9d1..5f9b8a51b01 100644 --- a/scripts/xspdb/xspdb.py +++ b/scripts/xspdb/xspdb.py @@ -86,7 +86,7 @@ def __init__(self, dut, default_file=None, self.df.InitFlash("") self.xspdb_init_bin = "xspdb_flash_init.bin" self.flash_bin_file = None - self.df.difftest_init() + self.df.difftest_init(False, self.mem_size) self.difftest_stat = df.GetDifftest(0).dut self.difftest_flash = df.GetFlash() self.register_map = OrderedDict() @@ -101,17 +101,17 @@ def __init__(self, dut, default_file=None, def check_is_need_trace(self): if getattr(self, "__xspdb_need_fast_trace__", False) is True: setattr(self, "__xspdb_need_fast_trace__" ,False) - info("Force set trace") + info("Force set trace") self.set_trace() if self.interrupt is True: if getattr(self, "__xspdb_set_traced__", None) is None: - self.setattr(self, "__xspdb_set_traced__", True) + self.setattr(self, "__xspdb_set_traced__", True) info("Find interrupt, set trace") self.set_trace() return False def __init_pdb(self, args): - if args.log: + if args.log: self.api_log_enable_log(True) if args.log_file: self.api_log_set_log_file(args.log_file)