|
21 | 21 | #include "cpu.h"
|
22 | 22 | #include "internals.h"
|
23 | 23 | #include "exec/exec-all.h"
|
| 24 | +#include "exec/ram_addr.h" |
24 | 25 | #include "exec/cpu_ldst.h"
|
25 | 26 | #include "exec/helper-proto.h"
|
26 | 27 |
|
@@ -74,8 +75,138 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
74 | 75 | int ptr_size, MMUAccessType tag_access,
|
75 | 76 | int tag_size, uintptr_t ra)
|
76 | 77 | {
|
| 78 | +#ifdef CONFIG_USER_ONLY |
77 | 79 | /* Tag storage not implemented. */
|
78 | 80 | return NULL;
|
| 81 | +#else |
| 82 | + uintptr_t index; |
| 83 | + CPUIOTLBEntry *iotlbentry; |
| 84 | + int in_page, flags; |
| 85 | + ram_addr_t ptr_ra; |
| 86 | + hwaddr ptr_paddr, tag_paddr, xlat; |
| 87 | + MemoryRegion *mr; |
| 88 | + ARMASIdx tag_asi; |
| 89 | + AddressSpace *tag_as; |
| 90 | + void *host; |
| 91 | + |
| 92 | + /* |
| 93 | + * Probe the first byte of the virtual address. This raises an |
| 94 | + * exception for inaccessible pages, and resolves the virtual address |
| 95 | + * into the softmmu tlb. |
| 96 | + * |
| 97 | + * When RA == 0, this is for mte_probe1. The page is expected to be |
| 98 | + * valid. Indicate to probe_access_flags no-fault, then assert that |
| 99 | + * we received a valid page. |
| 100 | + */ |
| 101 | + flags = probe_access_flags(env, ptr, ptr_access, ptr_mmu_idx, |
| 102 | + ra == 0, &host, ra); |
| 103 | + assert(!(flags & TLB_INVALID_MASK)); |
| 104 | + |
| 105 | + /* |
| 106 | + * Find the iotlbentry for ptr. This *must* be present in the TLB |
| 107 | + * because we just found the mapping. |
| 108 | + * TODO: Perhaps there should be a cputlb helper that returns a |
| 109 | + * matching tlb entry + iotlb entry. |
| 110 | + */ |
| 111 | + index = tlb_index(env, ptr_mmu_idx, ptr); |
| 112 | +# ifdef CONFIG_DEBUG_TCG |
| 113 | + { |
| 114 | + CPUTLBEntry *entry = tlb_entry(env, ptr_mmu_idx, ptr); |
| 115 | + target_ulong comparator = (ptr_access == MMU_DATA_LOAD |
| 116 | + ? entry->addr_read |
| 117 | + : tlb_addr_write(entry)); |
| 118 | + g_assert(tlb_hit(comparator, ptr)); |
| 119 | + } |
| 120 | +# endif |
| 121 | + iotlbentry = &env_tlb(env)->d[ptr_mmu_idx].iotlb[index]; |
| 122 | + |
| 123 | + /* If the virtual page MemAttr != Tagged, access unchecked. */ |
| 124 | + if (!arm_tlb_mte_tagged(&iotlbentry->attrs)) { |
| 125 | + return NULL; |
| 126 | + } |
| 127 | + |
| 128 | + /* |
| 129 | + * If not backed by host ram, there is no tag storage: access unchecked. |
| 130 | + * This is probably a guest os bug though, so log it. |
| 131 | + */ |
| 132 | + if (unlikely(flags & TLB_MMIO)) { |
| 133 | + qemu_log_mask(LOG_GUEST_ERROR, |
| 134 | + "Page @ 0x%" PRIx64 " indicates Tagged Normal memory " |
| 135 | + "but is not backed by host ram\n", ptr); |
| 136 | + return NULL; |
| 137 | + } |
| 138 | + |
| 139 | + /* |
| 140 | + * The Normal memory access can extend to the next page. E.g. a single |
| 141 | + * 8-byte access to the last byte of a page will check only the last |
| 142 | + * tag on the first page. |
| 143 | + * Any page access exception has priority over tag check exception. |
| 144 | + */ |
| 145 | + in_page = -(ptr | TARGET_PAGE_MASK); |
| 146 | + if (unlikely(ptr_size > in_page)) { |
| 147 | + void *ignore; |
| 148 | + flags |= probe_access_flags(env, ptr + in_page, ptr_access, |
| 149 | + ptr_mmu_idx, ra == 0, &ignore, ra); |
| 150 | + assert(!(flags & TLB_INVALID_MASK)); |
| 151 | + } |
| 152 | + |
| 153 | + /* Any debug exception has priority over a tag check exception. */ |
| 154 | + if (unlikely(flags & TLB_WATCHPOINT)) { |
| 155 | + int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE; |
| 156 | + assert(ra != 0); |
| 157 | + cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, |
| 158 | + iotlbentry->attrs, wp, ra); |
| 159 | + } |
| 160 | + |
| 161 | + /* |
| 162 | + * Find the physical address within the normal mem space. |
| 163 | + * The memory region lookup must succeed because TLB_MMIO was |
| 164 | + * not set in the cputlb lookup above. |
| 165 | + */ |
| 166 | + mr = memory_region_from_host(host, &ptr_ra); |
| 167 | + tcg_debug_assert(mr != NULL); |
| 168 | + tcg_debug_assert(memory_region_is_ram(mr)); |
| 169 | + ptr_paddr = ptr_ra; |
| 170 | + do { |
| 171 | + ptr_paddr += mr->addr; |
| 172 | + mr = mr->container; |
| 173 | + } while (mr); |
| 174 | + |
| 175 | + /* Convert to the physical address in tag space. */ |
| 176 | + tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1); |
| 177 | + |
| 178 | + /* Look up the address in tag space. */ |
| 179 | + tag_asi = iotlbentry->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS; |
| 180 | + tag_as = cpu_get_address_space(env_cpu(env), tag_asi); |
| 181 | + mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL, |
| 182 | + tag_access == MMU_DATA_STORE, |
| 183 | + iotlbentry->attrs); |
| 184 | + |
| 185 | + /* |
| 186 | + * Note that @mr will never be NULL. If there is nothing in the address |
| 187 | + * space at @tag_paddr, the translation will return the unallocated memory |
| 188 | + * region. For our purposes, the result must be ram. |
| 189 | + */ |
| 190 | + if (unlikely(!memory_region_is_ram(mr))) { |
| 191 | + /* ??? Failure is a board configuration error. */ |
| 192 | + qemu_log_mask(LOG_UNIMP, |
| 193 | + "Tag Memory @ 0x%" HWADDR_PRIx " not found for " |
| 194 | + "Normal Memory @ 0x%" HWADDR_PRIx "\n", |
| 195 | + tag_paddr, ptr_paddr); |
| 196 | + return NULL; |
| 197 | + } |
| 198 | + |
| 199 | + /* |
| 200 | + * Ensure the tag memory is dirty on write, for migration. |
| 201 | + * Tag memory can never contain code or display memory (vga). |
| 202 | + */ |
| 203 | + if (tag_access == MMU_DATA_STORE) { |
| 204 | + ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat; |
| 205 | + cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION); |
| 206 | + } |
| 207 | + |
| 208 | + return memory_region_get_ram_ptr(mr) + xlat; |
| 209 | +#endif |
79 | 210 | }
|
80 | 211 |
|
81 | 212 | uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
|
|
0 commit comments