@@ -141,24 +141,108 @@ bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
141
141
return false;
142
142
}
143
143
144
+ static int loongarch_page_table_walker (CPULoongArchState * env , hwaddr * physical ,
145
+ int * prot , target_ulong address )
146
+ {
147
+ CPUState * cs = env_cpu (env );
148
+ target_ulong index , phys ;
149
+ uint64_t dir_base , dir_width ;
150
+ uint64_t base ;
151
+ int level ;
152
+
153
+ if ((address >> 63 ) & 0x1 ) {
154
+ base = env -> CSR_PGDH ;
155
+ } else {
156
+ base = env -> CSR_PGDL ;
157
+ }
158
+ base &= TARGET_PHYS_MASK ;
159
+
160
+ for (level = 4 ; level > 0 ; level -- ) {
161
+ get_dir_base_width (env , & dir_base , & dir_width , level );
162
+
163
+ if (dir_width == 0 ) {
164
+ continue ;
165
+ }
166
+
167
+ /* get next level page directory */
168
+ index = (address >> dir_base ) & ((1 << dir_width ) - 1 );
169
+ phys = base | index << 3 ;
170
+ base = ldq_phys (cs -> as , phys ) & TARGET_PHYS_MASK ;
171
+ if (FIELD_EX64 (base , TLBENTRY , HUGE )) {
172
+ /* base is a huge pte */
173
+ break ;
174
+ }
175
+ }
176
+
177
+ /* pte */
178
+ if (FIELD_EX64 (base , TLBENTRY , HUGE )) {
179
+ /* Huge Page. base is pte */
180
+ base = FIELD_DP64 (base , TLBENTRY , LEVEL , 0 );
181
+ base = FIELD_DP64 (base , TLBENTRY , HUGE , 0 );
182
+ if (FIELD_EX64 (base , TLBENTRY , HGLOBAL )) {
183
+ base = FIELD_DP64 (base , TLBENTRY , HGLOBAL , 0 );
184
+ base = FIELD_DP64 (base , TLBENTRY , G , 1 );
185
+ }
186
+ } else {
187
+ /* Normal Page. base points to pte */
188
+ get_dir_base_width (env , & dir_base , & dir_width , 0 );
189
+ index = (address >> dir_base ) & ((1 << dir_width ) - 1 );
190
+ phys = base | index << 3 ;
191
+ base = ldq_phys (cs -> as , phys );
192
+ }
193
+
194
+ /* TODO: check plv and other bits? */
195
+
196
+ /* base is pte, in normal pte format */
197
+ if (!FIELD_EX64 (base , TLBENTRY , V )) {
198
+ return TLBRET_NOMATCH ;
199
+ }
200
+
201
+ if (!FIELD_EX64 (base , TLBENTRY , D )) {
202
+ * prot = PAGE_READ ;
203
+ } else {
204
+ * prot = PAGE_READ | PAGE_WRITE ;
205
+ }
206
+
207
+ /* get TARGET_PAGE_SIZE aligned physical address */
208
+ base += (address & TARGET_PHYS_MASK ) & ((1 << dir_base ) - 1 );
209
+ /* mask RPLV, NX, NR bits */
210
+ base = FIELD_DP64 (base , TLBENTRY_64 , RPLV , 0 );
211
+ base = FIELD_DP64 (base , TLBENTRY_64 , NX , 0 );
212
+ base = FIELD_DP64 (base , TLBENTRY_64 , NR , 0 );
213
+ /* mask other attribute bits */
214
+ * physical = base & TARGET_PAGE_MASK ;
215
+
216
+ return 0 ;
217
+ }
218
+
144
219
static int loongarch_map_address (CPULoongArchState * env , hwaddr * physical ,
145
220
int * prot , target_ulong address ,
146
- MMUAccessType access_type , int mmu_idx )
221
+ MMUAccessType access_type , int mmu_idx ,
222
+ int is_debug )
147
223
{
148
224
int index , match ;
149
225
150
226
match = loongarch_tlb_search (env , address , & index );
151
227
if (match ) {
152
228
return loongarch_map_tlb_entry (env , physical , prot ,
153
229
address , access_type , index , mmu_idx );
230
+ } else if (is_debug ) {
231
+ /*
232
+ * For debugger memory access, we want to do the map when there is a
233
+ * legal mapping, even if the mapping is not yet in TLB. return 0 if
234
+ * there is a valid map, else none zero.
235
+ */
236
+ return loongarch_page_table_walker (env , physical , prot , address );
154
237
}
155
238
156
239
return TLBRET_NOMATCH ;
157
240
}
158
241
#else
159
242
static int loongarch_map_address (CPULoongArchState * env , hwaddr * physical ,
160
243
int * prot , target_ulong address ,
161
- MMUAccessType access_type , int mmu_idx )
244
+ MMUAccessType access_type , int mmu_idx ,
245
+ int is_debug )
162
246
{
163
247
return TLBRET_NOMATCH ;
164
248
}
@@ -178,7 +262,7 @@ static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va,
178
262
179
263
int get_physical_address (CPULoongArchState * env , hwaddr * physical ,
180
264
int * prot , target_ulong address ,
181
- MMUAccessType access_type , int mmu_idx )
265
+ MMUAccessType access_type , int mmu_idx , int is_debug )
182
266
{
183
267
int user_mode = mmu_idx == MMU_USER_IDX ;
184
268
int kernel_mode = mmu_idx == MMU_KERNEL_IDX ;
@@ -222,7 +306,7 @@ int get_physical_address(CPULoongArchState *env, hwaddr *physical,
222
306
223
307
/* Mapped address */
224
308
return loongarch_map_address (env , physical , prot , address ,
225
- access_type , mmu_idx );
309
+ access_type , mmu_idx , is_debug );
226
310
}
227
311
228
312
hwaddr loongarch_cpu_get_phys_page_debug (CPUState * cs , vaddr addr )
@@ -232,7 +316,7 @@ hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
232
316
int prot ;
233
317
234
318
if (get_physical_address (env , & phys_addr , & prot , addr , MMU_DATA_LOAD ,
235
- cpu_mmu_index (cs , false)) != 0 ) {
319
+ cpu_mmu_index (cs , false), 1 ) != 0 ) {
236
320
return -1 ;
237
321
}
238
322
return phys_addr ;
0 commit comments