|
18 | 18 | #ifdef CPPTRACE_HAS_MACH_VM |
19 | 19 | #include <mach/mach_vm.h> |
20 | 20 | #endif |
| 21 | + #elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) |
| 22 | + #include <sys/sysctl.h> |
| 23 | + #if defined(__FreeBSD__) |
| 24 | + #include <sys/user.h> |
| 25 | + #endif |
21 | 26 | #else |
22 | 27 | #include <fstream> |
23 | 28 | #include <ios> |
@@ -107,6 +112,131 @@ namespace detail { |
107 | 112 | } |
108 | 113 | return perms; |
109 | 114 | } |
| 115 | + #elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) |
| 116 | + #if !defined(__OpenBSD__) |
| 117 | + // Fetch VM mappings via sysctl with retry on ENOMEM. |
| 118 | + // FreeBSD uses a len * 4/3 heuristic in kinfo_getvmmap, we try to be more robust by retrying. |
| 119 | + // On all BSD I have investigated, sysctl returns ENOMEM when the buffer is too small (mappings grew between the |
| 120 | + // size query and the data fetch). When this happens, oldlenp gives the amount copied, not the amount needed, so we |
| 121 | + // must re-query the size from scratch. |
| 122 | + std::vector<char> sysctl_vmmap(const int* mib, unsigned int miblen) { |
| 123 | + constexpr int max_retries = 3; |
| 124 | + for(int attempt = 0; attempt < max_retries; attempt++) { |
| 125 | + size_t len = 0; |
| 126 | + if(sysctl(mib, miblen, nullptr, &len, nullptr, 0) != 0) { |
| 127 | + throw internal_error("sysctl vmmap size query failed: {}", strerror(errno)); |
| 128 | + } |
| 129 | + auto original_len = len; |
| 130 | + // https://github.com/lattera/freebsd/blob/401a161083850a9a4ce916f37520c084cff1543b/lib/libutil/kinfo_getvmmap.c#L32C2-L32C20 |
| 131 | + len = len * 4 / 3; |
| 132 | + len -= len % sizeof(struct kinfo_vmentry); |
| 133 | + len = std::max(len, original_len); |
| 134 | + std::vector<char> buf(len); |
| 135 | + if(sysctl(mib, miblen, buf.data(), &len, nullptr, 0) == 0) { |
| 136 | + buf.resize(len); |
| 137 | + return buf; |
| 138 | + } |
| 139 | + if(errno != ENOMEM) { |
| 140 | + throw internal_error("sysctl vmmap failed: {}", strerror(errno)); |
| 141 | + } |
| 142 | + } |
| 143 | + throw internal_error("sysctl vmmap failed after {} retries due to growing memory mappings", max_retries); |
| 144 | + } |
| 145 | + #endif |
| 146 | + #if defined(__FreeBSD__) |
| 147 | + int get_page_protections(void* page) { |
| 148 | + int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()}; |
| 149 | + auto buf = sysctl_vmmap(mib, 4); |
| 150 | + auto addr = reinterpret_cast<uintptr_t>(page); |
| 151 | + char* pos = buf.data(); |
| 152 | + char* end = pos + buf.size(); |
| 153 | + while(pos < end) { |
| 154 | + auto* entry = reinterpret_cast<struct kinfo_vmentry*>(pos); |
| 155 | + if(entry->kve_structsize == 0) break; |
| 156 | + if(addr >= entry->kve_start && addr < entry->kve_end) { |
| 157 | + int perms = 0; |
| 158 | + if(entry->kve_protection & KVME_PROT_READ) perms |= PROT_READ; |
| 159 | + if(entry->kve_protection & KVME_PROT_WRITE) perms |= PROT_WRITE; |
| 160 | + if(entry->kve_protection & KVME_PROT_EXEC) perms |= PROT_EXEC; |
| 161 | + return perms; |
| 162 | + } |
| 163 | + pos += entry->kve_structsize; |
| 164 | + } |
| 165 | + throw internal_error( |
| 166 | + "Failed to find mapping for {>16:0h} via sysctl KERN_PROC_VMMAP", |
| 167 | + reinterpret_cast<uintptr_t>(page) |
| 168 | + ); |
| 169 | + } |
| 170 | + #elif defined(__NetBSD__) |
| 171 | + int get_page_protections(void* page) { |
| 172 | + int mib[5] = { |
| 173 | + CTL_VM, VM_PROC, VM_PROC_MAP, getpid(), |
| 174 | + static_cast<int>(sizeof(struct kinfo_vmentry)) |
| 175 | + }; |
| 176 | + auto buf = sysctl_vmmap(mib, 5); |
| 177 | + auto addr = reinterpret_cast<uintptr_t>(page); |
| 178 | + auto count = buf.size() / sizeof(struct kinfo_vmentry); |
| 179 | + auto* entries = reinterpret_cast<struct kinfo_vmentry*>(buf.data()); |
| 180 | + for(size_t i = 0; i < count; i++) { |
| 181 | + if(addr >= entries[i].kve_start && addr < entries[i].kve_end) { |
| 182 | + int perms = 0; |
| 183 | + if(entries[i].kve_protection & KVME_PROT_READ) perms |= PROT_READ; |
| 184 | + if(entries[i].kve_protection & KVME_PROT_WRITE) perms |= PROT_WRITE; |
| 185 | + if(entries[i].kve_protection & KVME_PROT_EXEC) perms |= PROT_EXEC; |
| 186 | + return perms; |
| 187 | + } |
| 188 | + } |
| 189 | + throw internal_error( |
| 190 | + "Failed to find mapping for {>16:0h} via sysctl VM_PROC_MAP", |
| 191 | + reinterpret_cast<uintptr_t>(page) |
| 192 | + ); |
| 193 | + } |
| 194 | + #elif defined(__OpenBSD__) |
| 195 | + // OpenBSD's KERN_PROC_VMMAP returns at most VMMAP_MAXLEN (64KB) per call and rejects larger buffers with EINVAL. |
| 196 | + // The API is paginated, setting kve_start in the first buffer entry tells the kernel where to resume. |
| 197 | + int get_page_protections(void* page) { |
| 198 | + int mib[3] = {CTL_KERN, KERN_PROC_VMMAP, getpid()}; |
| 199 | + size_t buf_size = 0; |
| 200 | + if(sysctl(mib, 3, nullptr, &buf_size, nullptr, 0) != 0) { |
| 201 | + throw internal_error("sysctl vmmap size query failed: {}", strerror(errno)); |
| 202 | + } |
| 203 | + buf_size -= buf_size % sizeof(struct kinfo_vmentry); |
| 204 | + auto addr = reinterpret_cast<uintptr_t>(page); |
| 205 | + std::vector<char> buf(buf_size); |
| 206 | + unsigned long next_start = 0; |
| 207 | + while(true) { |
| 208 | + reinterpret_cast<struct kinfo_vmentry*>(buf.data())->kve_start = next_start; |
| 209 | + size_t len = buf_size; |
| 210 | + if(sysctl(mib, 3, buf.data(), &len, nullptr, 0) != 0) { |
| 211 | + throw internal_error("sysctl vmmap failed: {}", strerror(errno)); |
| 212 | + } |
| 213 | + if(len == 0) { |
| 214 | + break; |
| 215 | + } |
| 216 | + auto count = len / sizeof(struct kinfo_vmentry); |
| 217 | + auto* entries = reinterpret_cast<struct kinfo_vmentry*>(buf.data()); |
| 218 | + for(size_t i = 0; i < count; i++) { |
| 219 | + if(addr >= entries[i].kve_start && addr < entries[i].kve_end) { |
| 220 | + int perms = 0; |
| 221 | + if(entries[i].kve_protection & KVE_PROT_READ) perms |= PROT_READ; |
| 222 | + if(entries[i].kve_protection & KVE_PROT_WRITE) perms |= PROT_WRITE; |
| 223 | + if(entries[i].kve_protection & KVE_PROT_EXEC) perms |= PROT_EXEC; |
| 224 | + return perms; |
| 225 | + } |
| 226 | + } |
| 227 | + if(len < buf_size) { |
| 228 | + break; |
| 229 | + } |
| 230 | + // basic sanity check for forward progress |
| 231 | + VERIFY(next_start == 0 || entries[count - 1].kve_end > next_start); |
| 232 | + next_start = entries[count - 1].kve_end; |
| 233 | + } |
| 234 | + throw internal_error( |
| 235 | + "Failed to find mapping for {>16:0h} via sysctl KERN_PROC_VMMAP", |
| 236 | + reinterpret_cast<uintptr_t>(page) |
| 237 | + ); |
| 238 | + } |
| 239 | + #endif |
110 | 240 | #else |
111 | 241 | // Code for reading /proc/self/maps |
112 | 242 | // Unfortunately this is the canonical and only way to get memory permissions on linux |
|
0 commit comments