|
5 | 5 | #include <tuple> |
6 | 6 | #include <utility> |
7 | 7 |
|
8 | | -#include <vector> |
9 | 8 | #include <map> |
10 | 9 | #include <memory> |
11 | | -#include <unordered_map> |
12 | 10 | #include <set> |
| 11 | +#include <unordered_map> |
| 12 | +#include <vector> |
13 | 13 |
|
14 | | -// #include "iss/util.hpp" |
15 | | -// #include "iss/types.hxx" |
16 | | -#include "udb/xregister.hpp" |
17 | 14 | #include "udb/csr.hpp" |
18 | | -#include "udb/memory.hpp" |
19 | 15 | #include "udb/enum.hxx" |
| 16 | +#include "udb/memory.hpp" |
20 | 17 | #include "udb/version.hpp" |
21 | | -// #include "iss/bitfield_types.hxx" |
22 | | -// #include "iss/csr_types.hxx" |
23 | | -// #include "iss/inst.hpp" |
24 | | - |
| 18 | +#include "udb/xregister.hpp" |
25 | 19 |
|
26 | 20 | #ifdef assert |
27 | 21 | #undef assert |
28 | 22 | #endif |
29 | 23 |
|
30 | 24 | namespace udb { |
31 | 25 |
|
32 | | - // probably unwise to change these |
33 | | - static constexpr uint64_t LOG_MEM_REGION_SZ = 12; // 4k regions |
34 | | - static constexpr uint64_t LOG_EXECMAP_CHUNK_SZ = 12; |
| 26 | +// probably unwise to change these |
| 27 | +static constexpr uint64_t LOG_MEM_REGION_SZ = 12; // 4k regions |
| 28 | +static constexpr uint64_t LOG_EXECMAP_CHUNK_SZ = 12; |
35 | 29 |
|
36 | | - // derived values - do not modify |
37 | | - static constexpr uint64_t MEM_REGION_SZ = 1UL << LOG_MEM_REGION_SZ; |
38 | | - static constexpr uint64_t MEM_REGION_MASK = ~(MEM_REGION_SZ - 1); |
| 30 | +// derived values - do not modify |
| 31 | +static constexpr uint64_t MEM_REGION_SZ = 1UL << LOG_MEM_REGION_SZ; |
| 32 | +static constexpr uint64_t MEM_REGION_MASK = ~(MEM_REGION_SZ - 1); |
39 | 33 |
|
40 | | - static const constexpr uint64_t NS_BIT_OFFSET = 52; // non-secure bit |
41 | | - static const constexpr uint64_t NS_MASK = 1UL << NS_BIT_OFFSET; |
| 34 | +static const constexpr uint64_t NS_BIT_OFFSET = 52; // non-secure bit |
| 35 | +static const constexpr uint64_t NS_MASK = 1UL << NS_BIT_OFFSET; |
42 | 36 |
|
43 | | - // hash used to initialize each 64-bit word in memory |
44 | | - // assumes addr is the aligned physical address plus NS bit |
45 | | - inline uint64_t mem_init_hash(uint64_t addr) { |
46 | | - uint8_t ns = addr >> NS_BIT_OFFSET; // NOLINT |
47 | | - return ((addr ^ (addr >> 4)) & 0x0f0f0f0f0f0f0f0eUL) | // NOLINT |
48 | | - (0x1010101010101010UL << ns); // NOLINT |
49 | | - } |
| 37 | +// hash used to initialize each 64-bit word in memory |
| 38 | +// assumes addr is the aligned physical address plus NS bit |
| 39 | +inline uint64_t mem_init_hash(uint64_t addr) { |
| 40 | + uint8_t ns = addr >> NS_BIT_OFFSET; // NOLINT |
| 41 | + return ((addr ^ (addr >> 4)) & 0x0f0f0f0f0f0f0f0eUL) | // NOLINT |
| 42 | + (0x1010101010101010UL << ns); // NOLINT |
| 43 | +} |
| 44 | + |
| 45 | +class AbstractTracer { |
| 46 | +public: |
| 47 | + AbstractTracer() = default; |
50 | 48 |
|
51 | | - class AbstractTracer { |
52 | | - public: |
53 | | - AbstractTracer() = default; |
| 49 | + virtual void trace_exception() {} |
54 | 50 |
|
55 | | - virtual void trace_exception() {} |
| 51 | + virtual void trace_mem_read_phys(uint64_t paddr, unsigned len) {} |
| 52 | + virtual void trace_mem_write_phys(uint64_t paddr, unsigned len, |
| 53 | + uint64_t data) {} |
| 54 | +}; |
56 | 55 |
|
57 | | - virtual void trace_mem_read_phys(uint64_t paddr, unsigned len) {} |
58 | | - virtual void trace_mem_write_phys(uint64_t paddr, unsigned len, uint64_t data) {} |
| 56 | +class HartBase { |
| 57 | + // object that is thrown when an instruction encounters an exception |
| 58 | + class AbortInstruction : public std::exception { |
| 59 | + public: |
| 60 | + const char *what() const noexcept override { return "Instruction Abort"; } |
59 | 61 | }; |
60 | 62 |
|
61 | | - class HartBase { |
62 | | - // object that is thrown when an instruction encounters an exception |
63 | | - class AbortInstruction : public std::exception { |
64 | | - public: |
65 | | - const char* what() const noexcept override { return "Instruction Abort"; } |
66 | | - }; |
67 | | - public: |
68 | | - |
69 | | - HartBase(unsigned hart_id, Memory& mem, const nlohmann::json& cfg) |
70 | | - : m_hart_id(hart_id), |
71 | | - m_mem(mem), |
72 | | - m_tracer(nullptr), |
73 | | - m_current_priv_mode(PrivilegeMode::M) |
74 | | - { |
75 | | - } |
| 63 | +public: |
| 64 | + HartBase(unsigned hart_id, Memory &mem, const nlohmann::json &cfg) |
| 65 | + : m_hart_id(hart_id), m_mem(mem), m_tracer(nullptr), |
| 66 | + m_current_priv_mode(PrivilegeMode::M) {} |
76 | 67 |
|
77 | | - void attach_tracer(AbstractTracer* t) { |
78 | | - udb_assert(m_tracer == nullptr, "m_tracer NULL ptr"); |
79 | | - m_tracer = t; |
80 | | - } |
| 68 | + void attach_tracer(AbstractTracer *t) { |
| 69 | + udb_assert(m_tracer == nullptr, "m_tracer NULL ptr"); |
| 70 | + m_tracer = t; |
| 71 | + } |
81 | 72 |
|
82 | | - virtual void set_pc(uint64_t new_pc) = 0; |
83 | | - virtual void set_next_pc(uint64_t next_pc) = 0; |
84 | | - virtual uint64_t pc() const = 0; |
85 | | - virtual void advance_pc() = 0; |
86 | | - |
87 | | - PrivilegeMode mode() const { return m_current_priv_mode; } |
88 | | - void set_mode(const PrivilegeMode& next_mode) { m_current_priv_mode = next_mode; } |
89 | | - |
90 | | - // access a physical address. All translations and physical checks |
91 | | - // should have already occurred |
92 | | - template <unsigned Len> |
93 | | - Bits<Len> read_physical_memory(uint64_t paddr) |
94 | | - { |
95 | | - if (m_tracer != nullptr) { |
96 | | - m_tracer->trace_mem_read_phys(paddr, Len); |
97 | | - } |
98 | | - if constexpr (Len == 8) { |
99 | | - return m_mem.read<uint8_t>(paddr); |
100 | | - } else if constexpr (Len == 16) { |
101 | | - return m_mem.read<uint16_t>(paddr); |
102 | | - } else if constexpr (Len == 32) { |
103 | | - return m_mem.read<uint32_t>(paddr); |
104 | | - } else if constexpr (Len == 64) { |
105 | | - return m_mem.read<uint64_t>(paddr); |
106 | | - } else { |
107 | | - udb_assert(false, "TODO"); |
108 | | - return 0; |
109 | | - } |
110 | | - } |
| 73 | + virtual void set_pc(uint64_t new_pc) = 0; |
| 74 | + virtual void set_next_pc(uint64_t next_pc) = 0; |
| 75 | + virtual uint64_t pc() const = 0; |
| 76 | + virtual void advance_pc() = 0; |
111 | 77 |
|
112 | | - void assert(bool arg, const char* str) { |
113 | | - udb_assert(arg, str); |
114 | | - } |
| 78 | + PrivilegeMode mode() const { return m_current_priv_mode; } |
| 79 | + void set_mode(const PrivilegeMode &next_mode) { |
| 80 | + m_current_priv_mode = next_mode; |
| 81 | + } |
115 | 82 |
|
116 | | - // write a physical address. All translations and physical checks |
117 | | - // should have already occurred |
118 | | - template <unsigned Len> |
119 | | - void write_physical_memory(uint64_t paddr, const Bits<Len>& value) |
120 | | - { |
121 | | - if (m_tracer != nullptr) { |
122 | | - m_tracer->trace_mem_write_phys(paddr, Len, value.get()); |
123 | | - } |
124 | | - if constexpr (Len == 8) { |
125 | | - m_mem.write<uint8_t>(paddr, value); |
126 | | - } else if constexpr (Len == 16) { |
127 | | - m_mem.write<uint16_t>(paddr, value); |
128 | | - } else if constexpr (Len == 32) { |
129 | | - m_mem.write<uint32_t>(paddr, value); |
130 | | - } else if constexpr (Len == 64) { |
131 | | - m_mem.write<uint64_t>(paddr, value); |
132 | | - } else { |
133 | | - udb_assert(false, "TODO"); |
134 | | - } |
| 83 | + // access a physical address. All translations and physical checks |
| 84 | + // should have already occurred |
| 85 | + template <unsigned Len> Bits<Len> read_physical_memory(uint64_t paddr) { |
| 86 | + if (m_tracer != nullptr) { |
| 87 | + m_tracer->trace_mem_read_phys(paddr, Len); |
| 88 | + } |
| 89 | + if constexpr (Len == 8) { |
| 90 | + return m_mem.read<uint8_t>(paddr); |
| 91 | + } else if constexpr (Len == 16) { |
| 92 | + return m_mem.read<uint16_t>(paddr); |
| 93 | + } else if constexpr (Len == 32) { |
| 94 | + return m_mem.read<uint32_t>(paddr); |
| 95 | + } else if constexpr (Len == 64) { |
| 96 | + return m_mem.read<uint64_t>(paddr); |
| 97 | + } else { |
| 98 | + udb_assert(false, "TODO"); |
| 99 | + return 0; |
135 | 100 | } |
| 101 | + } |
| 102 | + |
| 103 | + void assert(bool arg, const char *str) { udb_assert(arg, str); } |
136 | 104 |
|
137 | | - [[noreturn]] void abort_current_instruction() |
138 | | - { |
139 | | - if (m_tracer != nullptr) { |
140 | | - m_tracer->trace_exception(); |
141 | | - } |
142 | | - throw AbortInstruction(); |
| 105 | + // write a physical address. All translations and physical checks |
| 106 | + // should have already occurred |
| 107 | + template <unsigned Len> |
| 108 | + void write_physical_memory(uint64_t paddr, const Bits<Len> &value) { |
| 109 | + if (m_tracer != nullptr) { |
| 110 | + m_tracer->trace_mem_write_phys(paddr, Len, value.get()); |
143 | 111 | } |
| 112 | + if constexpr (Len == 8) { |
| 113 | + m_mem.write<uint8_t>(paddr, value); |
| 114 | + } else if constexpr (Len == 16) { |
| 115 | + m_mem.write<uint16_t>(paddr, value); |
| 116 | + } else if constexpr (Len == 32) { |
| 117 | + m_mem.write<uint32_t>(paddr, value); |
| 118 | + } else if constexpr (Len == 64) { |
| 119 | + m_mem.write<uint64_t>(paddr, value); |
| 120 | + } else { |
| 121 | + udb_assert(false, "TODO"); |
| 122 | + } |
| 123 | + } |
144 | 124 |
|
145 | | - // |
146 | | - // virtual memory caching builtins |
147 | | - // |
| 125 | + [[noreturn]] void abort_current_instruction() { |
| 126 | + if (m_tracer != nullptr) { |
| 127 | + m_tracer->trace_exception(); |
| 128 | + } |
| 129 | + throw AbortInstruction(); |
| 130 | + } |
148 | 131 |
|
149 | | - void invalidate_all_translations() {} |
150 | | - void invalidate_asid_translations(Bits<16> asid) {} |
151 | | - void invalidate_vaddr_translations(uint64_t vaddr) {} |
152 | | - void invalidate_asid_vaddr_translations(Bits<16> asid, uint64_t vaddr) {} |
| 132 | + // |
| 133 | + // virtual memory caching builtins |
| 134 | + // |
153 | 135 |
|
154 | | - void sfence_all() {} |
155 | | - void sfence_asid(Bits<16> asid) {} |
156 | | - void sfence_vaddr(uint64_t vaddr) {} |
157 | | - void sfence_asid_vaddr(Bits<16> asid, uint64_t vaddr) {} |
| 136 | + void invalidate_all_translations() {} |
| 137 | + void invalidate_asid_translations(Bits<16> asid) {} |
| 138 | + void invalidate_vaddr_translations(uint64_t vaddr) {} |
| 139 | + void invalidate_asid_vaddr_translations(Bits<16> asid, uint64_t vaddr) {} |
158 | 140 |
|
| 141 | + void sfence_all() {} |
| 142 | + void sfence_asid(Bits<16> asid) {} |
| 143 | + void sfence_vaddr(uint64_t vaddr) {} |
| 144 | + void sfence_asid_vaddr(Bits<16> asid, uint64_t vaddr) {} |
159 | 145 |
|
160 | | - // Return true if the address at paddr has the PMA attribute 'attr' |
161 | | - bool check_pma(const uint64_t& paddr, const PmaAttribute& attr) const |
162 | | - { |
163 | | - return true; |
164 | | - } |
| 146 | + // Return true if the address at paddr has the PMA attribute 'attr' |
| 147 | + bool check_pma(const uint64_t &paddr, const PmaAttribute &attr) const { |
| 148 | + return true; |
| 149 | + } |
165 | 150 |
|
166 | | - // xlen of M-mode, i.e., MXLEN |
167 | | - virtual unsigned mxlen() = 0; |
| 151 | + // xlen of M-mode, i.e., MXLEN |
| 152 | + virtual unsigned mxlen() = 0; |
168 | 153 |
|
169 | | - virtual uint64_t xreg(unsigned num) const = 0; |
170 | | - virtual void set_xreg(unsigned num, uint64_t value) = 0; |
| 154 | + virtual uint64_t xreg(unsigned num) const = 0; |
| 155 | + virtual void set_xreg(unsigned num, uint64_t value) = 0; |
171 | 156 |
|
172 | | - virtual CsrBase* csr(unsigned address) = 0; |
173 | | - virtual const CsrBase* csr(unsigned address) const = 0; |
| 157 | + virtual CsrBase *csr(unsigned address) = 0; |
| 158 | + virtual const CsrBase *csr(unsigned address) const = 0; |
174 | 159 |
|
175 | | - virtual CsrBase* csr(const std::string &address) = 0; |
176 | | - virtual const CsrBase* csr(const std::string &address) const = 0; |
| 160 | + virtual CsrBase *csr(const std::string &address) = 0; |
| 161 | + virtual const CsrBase *csr(const std::string &address) const = 0; |
177 | 162 |
|
178 | | - virtual void printState(FILE* out = stdout) const = 0; |
| 163 | + virtual void printState(FILE *out = stdout) const = 0; |
179 | 164 |
|
180 | | - virtual bool implemented_Q_(const ExtensionName& ext) = 0; |
181 | | - virtual bool implemented_Q_(const ExtensionName& ext, const VersionRequirement& req) = 0; |
| 165 | + virtual bool implemented_Q_(const ExtensionName &ext) = 0; |
| 166 | + virtual bool implemented_Q_(const ExtensionName &ext, |
| 167 | + const VersionRequirement &req) = 0; |
182 | 168 |
|
183 | | - template <unsigned M> |
184 | | - Bits<64> read_hpm_counter(const Bits<M>& hpm_num) { return 0; } |
| 169 | + template <unsigned M> Bits<64> read_hpm_counter(const Bits<M> &hpm_num) { |
| 170 | + return 0; |
| 171 | + } |
185 | 172 |
|
186 | | - Bits<64> read_mcycle() { return 0; } |
| 173 | + Bits<64> read_mcycle() { return 0; } |
187 | 174 |
|
188 | | - Bits<64> sw_write_mcycle(const Bits<64>& cycle) { return 0; } |
| 175 | + Bits<64> sw_write_mcycle(const Bits<64> &cycle) { return 0; } |
189 | 176 |
|
190 | | - unsigned hartid() const { return m_hart_id; } |
| 177 | + unsigned hartid() const { return m_hart_id; } |
191 | 178 |
|
192 | | - protected: |
193 | | - const unsigned m_hart_id; |
194 | | - Memory& m_mem; |
195 | | - AbstractTracer* m_tracer; |
196 | | - PrivilegeMode m_current_priv_mode; |
197 | | - }; |
| 179 | +protected: |
| 180 | + const unsigned m_hart_id; |
| 181 | + Memory &m_mem; |
| 182 | + AbstractTracer *m_tracer; |
| 183 | + PrivilegeMode m_current_priv_mode; |
| 184 | +}; |
198 | 185 |
|
199 | | - // static_assert(HartBase<64>::sext(15, 3) == 0xffffffffffffffffull); |
200 | | - // static_assert(HartBase<64>::sext(14, 3) == 0xfffffffffffffffeull); |
201 | | - // static_assert(HartBase<64>::sext(7, 3) == 7); |
202 | | -} |
| 186 | +// static_assert(HartBase<64>::sext(15, 3) == 0xffffffffffffffffull); |
| 187 | +// static_assert(HartBase<64>::sext(14, 3) == 0xfffffffffffffffeull); |
| 188 | +// static_assert(HartBase<64>::sext(7, 3) == 7); |
| 189 | +} // namespace udb |
0 commit comments