Skip to content

Commit da19291

Browse files
committed
RFC: Add tests for some CHERI-specific behaviors
1 parent 095e8f1 commit da19291

File tree

1 file changed

+153
-0
lines changed

1 file changed

+153
-0
lines changed

src/test/func/cheri/cheri.cc

Lines changed: 153 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,153 @@
1+
#include <iostream>
2+
3+
#if defined(SNMALLOC_PASS_THROUGH) || !defined(__CHERI_PURE_CAPABILITY__)
4+
// This test does not make sense in pass-through or w/o CHERI
5+
int main()
6+
{
7+
return 0;
8+
}
9+
#else
10+
11+
// # define SNMALLOC_TRACING
12+
13+
# include <cheri/cherireg.h>
14+
# include <snmalloc/snmalloc.h>
15+
# include <stddef.h>
16+
17+
# if defined(__FreeBSD__)
18+
# include <sys/mman.h>
19+
# endif
20+
21+
using namespace snmalloc;
22+
23+
bool cap_len_is(void* cap, size_t expected)
24+
{
25+
return __builtin_cheri_length_get(cap) == expected;
26+
}
27+
28+
bool cap_vmem_perm_is(void* cap, bool expected)
29+
{
30+
# if defined(CHERI_PERM_SW_VMEM)
31+
return !!(__builtin_cheri_perms_get(cap) & CHERI_PERM_SW_VMEM) == expected;
32+
# else
33+
# warning "Don't know how to check VMEM permission bit"
34+
# endif
35+
}
36+
37+
int main()
38+
{
39+
40+
# if defined(__FreeBSD__)
41+
{
42+
size_t pagesize[8];
43+
int err = getpagesizes(pagesize, sizeof(pagesize) / sizeof(pagesize[0]));
44+
SNMALLOC_CHECK(err > 0);
45+
SNMALLOC_CHECK(pagesize[0] == OS_PAGE_SIZE);
46+
}
47+
# endif
48+
49+
auto alloc = get_scoped_allocator();
50+
51+
message("Grab small object");
52+
{
53+
static const size_t sz = 128;
54+
void* o1 = alloc->alloc(sz);
55+
SNMALLOC_CHECK(cap_len_is(o1, sz));
56+
SNMALLOC_CHECK(cap_vmem_perm_is(o1, false));
57+
alloc->dealloc(o1);
58+
}
59+
60+
/*
61+
* This large object is sized to end up in our alloc's local buddy allocators
62+
* when it's released.
63+
*/
64+
message("Grab large object");
65+
ptraddr_t alarge;
66+
{
67+
static const size_t sz = 1024 * 1024;
68+
void* olarge = alloc->alloc(sz);
69+
alarge = address_cast(olarge);
70+
SNMALLOC_CHECK(cap_len_is(olarge, sz));
71+
SNMALLOC_CHECK(cap_vmem_perm_is(olarge, false));
72+
73+
static_cast<uint8_t*>(olarge)[128] = 'x';
74+
static_cast<uint8_t*>(olarge)[128 + OS_PAGE_SIZE] = 'y';
75+
76+
# if defined(__FreeBSD__)
77+
static constexpr int irm =
78+
MINCORE_INCORE | MINCORE_REFERENCED | MINCORE_MODIFIED;
79+
char ic[2];
80+
int err = mincore(olarge, 2 * OS_PAGE_SIZE, ic);
81+
SNMALLOC_CHECK(err == 0);
82+
SNMALLOC_CHECK((ic[0] & irm) == irm);
83+
SNMALLOC_CHECK((ic[1] & irm) == irm);
84+
message("Large object in core; good");
85+
# endif
86+
87+
alloc->dealloc(olarge);
88+
}
89+
90+
message("Grab large object again, verify reuse");
91+
{
92+
static const size_t sz = 1024 * 1024;
93+
errno = 0;
94+
void* olarge = alloc->alloc<YesZero>(sz);
95+
int err = errno;
96+
97+
SNMALLOC_CHECK(alarge == address_cast(olarge));
98+
SNMALLOC_CHECK(err == 0);
99+
100+
# if defined(__FreeBSD__)
101+
/*
102+
* Verify that the zeroing took place by mmap, which should mean that the
103+
* first two pages are not in core. This implies that snmalloc successfully
104+
* re-derived a Chunk- or Arena-bounded pointer and used that, and its VMAP
105+
* permission, to tear pages out of the address space.
106+
*/
107+
static constexpr int irm =
108+
MINCORE_INCORE | MINCORE_REFERENCED | MINCORE_MODIFIED;
109+
char ic[2];
110+
err = mincore(olarge, 2 * OS_PAGE_SIZE, ic);
111+
SNMALLOC_CHECK(err == 0);
112+
SNMALLOC_CHECK((ic[0] & irm) == 0);
113+
SNMALLOC_CHECK((ic[1] & irm) == 0);
114+
message("Large object not in core; good");
115+
# endif
116+
117+
SNMALLOC_CHECK(static_cast<uint8_t*>(olarge)[128] == '\0');
118+
SNMALLOC_CHECK(static_cast<uint8_t*>(olarge)[128 + OS_PAGE_SIZE] == '\0');
119+
SNMALLOC_CHECK(cap_len_is(olarge, sz));
120+
SNMALLOC_CHECK(cap_vmem_perm_is(olarge, false));
121+
122+
alloc->dealloc(olarge);
123+
}
124+
125+
/*
126+
* Grab another CoreAlloc pointer from the pool and examine it.
127+
*
128+
* CoreAlloc-s come from the metadata pools of snmalloc, and so do not flow
129+
* through the usual allocation machinery.
130+
*/
131+
message("Grab CoreAlloc from pool for inspection");
132+
{
133+
static_assert(
134+
std::is_same_v<decltype(alloc.alloc), LocalAllocator<StandardConfig>>);
135+
136+
LocalCache lc{&StandardConfig::unused_remote};
137+
auto* ca = AllocPool<StandardConfig>::acquire(&lc);
138+
139+
SNMALLOC_CHECK(cap_len_is(ca, sizeof(*ca)));
140+
SNMALLOC_CHECK(cap_vmem_perm_is(ca, false));
141+
142+
/*
143+
* Putting ca back into the pool would require unhooking our local cache,
144+
* and that requires accessing privates. Since it's pretty harmless to do
145+
* so here at the end of our test, just leak it.
146+
*/
147+
}
148+
149+
message("CHERI checks OK");
150+
return 0;
151+
}
152+
153+
#endif

0 commit comments

Comments
 (0)