Skip to content

Commit 553925e

Browse files
[libc] implement secure random buffer filling with vDSO
1 parent 09e94d0 commit 553925e

File tree

11 files changed

+414
-4
lines changed

11 files changed

+414
-4
lines changed

libc/src/__support/OSUtil/linux/CMakeLists.txt

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,3 +53,25 @@ add_object_library(
5353
libc.src.errno.errno
5454
libc.src.sys.auxv.getauxval
5555
)
56+
57+
add_object_library(
58+
random
59+
HDRS
60+
random.h
61+
SRCS
62+
random.cpp
63+
DEPENDS
64+
libc.src.sys.random.getrandom
65+
libc.src.sys.mman.mmap
66+
libc.src.sys.mman.munmap
67+
libc.src.unistd.sysconf
68+
libc.src.errno.errno
69+
libc.src.__support.common
70+
libc.src.__support.OSUtil.linux.vdso
71+
libc.src.__support.threads.callonce
72+
libc.src.__support.threads.linux.raw_mutex
73+
libc.src.__support.threads.thread
74+
libc.src.sched.sched_getaffinity
75+
libc.src.sched.__sched_getcpucount
76+
)
77+

libc/src/__support/OSUtil/linux/aarch64/vdso.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@ LIBC_INLINE constexpr cpp::string_view symbol_name(VDSOSym sym) {
2323
return "__kernel_clock_gettime";
2424
case VDSOSym::ClockGetRes:
2525
return "__kernel_clock_getres";
26+
case VDSOSym::GetRandom:
27+
return "__kernel_getrandom";
2628
default:
2729
return "";
2830
}
Lines changed: 323 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,323 @@
1+
#include "src/__support/OSUtil/linux/random.h"
2+
#include "src/__support/CPP/mutex.h"
3+
#include "src/__support/CPP/new.h"
4+
#include "src/__support/OSUtil/linux/syscall.h"
5+
#include "src/__support/OSUtil/linux/vdso.h"
6+
#include "src/__support/OSUtil/linux/x86_64/vdso.h"
7+
#include "src/__support/libc_assert.h"
8+
#include "src/__support/memory_size.h"
9+
#include "src/__support/threads/callonce.h"
10+
#include "src/__support/threads/linux/callonce.h"
11+
#include "src/__support/threads/linux/raw_mutex.h"
12+
#include "src/errno/libc_errno.h"
13+
#include "src/sched/sched_getaffinity.h"
14+
#include "src/sched/sched_getcpucount.h"
15+
#include "src/stdlib/atexit.h"
16+
#include "src/sys/mman/mmap.h"
17+
#include "src/sys/mman/munmap.h"
18+
#include "src/sys/random/getrandom.h"
19+
#include "src/unistd/sysconf.h"
20+
#include <asm/param.h>
21+
22+
namespace LIBC_NAMESPACE_DECL {
23+
namespace {
24+
// errno protection
25+
struct ErrnoProtect {
26+
int backup;
27+
ErrnoProtect() : backup(libc_errno) { libc_errno = 0; }
28+
~ErrnoProtect() { libc_errno = backup; }
29+
};
30+
31+
// parameters for allocating per-thread random state
32+
struct Params {
33+
unsigned size_of_opaque_state;
34+
unsigned mmap_prot;
35+
unsigned mmap_flags;
36+
unsigned reserved[13];
37+
};
38+
39+
// for registering thread-specific atexit callbacks
40+
using Destructor = void(void *);
41+
extern "C" int __cxa_thread_atexit_impl(Destructor *, void *, void *);
42+
extern "C" [[gnu::weak, gnu::visibility("hidden")]] void *__dso_handle =
43+
nullptr;
44+
45+
class MMapContainer {
46+
void **ptr = nullptr;
47+
void **usage = nullptr;
48+
void **boundary = nullptr;
49+
50+
internal::SafeMemSize capacity() const {
51+
return internal::SafeMemSize{
52+
static_cast<size_t>(reinterpret_cast<ptrdiff_t>(boundary) -
53+
reinterpret_cast<ptrdiff_t>(ptr))};
54+
}
55+
56+
internal::SafeMemSize bytes() const {
57+
return capacity() * internal::SafeMemSize{sizeof(void *)};
58+
}
59+
60+
bool initialize() {
61+
internal::SafeMemSize page_size{static_cast<size_t>(sysconf(_SC_PAGESIZE))};
62+
if (!page_size.valid())
63+
return false;
64+
ptr = reinterpret_cast<void **>(mmap(nullptr, page_size,
65+
PROT_READ | PROT_WRITE,
66+
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
67+
if (ptr == MAP_FAILED)
68+
return false;
69+
usage = ptr;
70+
boundary = ptr + page_size / sizeof(void *);
71+
return true;
72+
}
73+
74+
bool grow(size_t additional) {
75+
if (ptr == nullptr)
76+
return initialize();
77+
78+
size_t old_capacity = capacity();
79+
80+
internal::SafeMemSize target_bytes{additional};
81+
internal::SafeMemSize new_bytes = bytes();
82+
target_bytes = target_bytes + size();
83+
target_bytes = target_bytes * internal::SafeMemSize{sizeof(void *)};
84+
85+
if (!target_bytes.valid())
86+
return false;
87+
while (new_bytes < target_bytes) {
88+
new_bytes = new_bytes * internal::SafeMemSize{static_cast<size_t>(2)};
89+
if (!new_bytes.valid())
90+
return false;
91+
}
92+
93+
// TODO: migrate to syscall wrapper once it's available
94+
auto result = syscall_impl<intptr_t>(
95+
SYS_mremap, bytes(), static_cast<size_t>(new_bytes), MREMAP_MAYMOVE);
96+
97+
if (result < 0 && result > -EXEC_PAGESIZE)
98+
return false;
99+
ptr = reinterpret_cast<void **>(result);
100+
usage = ptr + old_capacity;
101+
boundary = ptr + new_bytes / sizeof(void *);
102+
return true;
103+
}
104+
105+
public:
106+
MMapContainer() = default;
107+
~MMapContainer() {
108+
if (!ptr)
109+
return;
110+
munmap(ptr, bytes());
111+
}
112+
113+
bool ensure_space(size_t additional) {
114+
if (usage + additional >= boundary && !grow(additional))
115+
return false;
116+
return true;
117+
}
118+
119+
void push_unchecked(void *value) {
120+
LIBC_ASSERT(usage != boundary && "pushing into full container");
121+
*usage++ = value;
122+
}
123+
124+
using iterator = void **;
125+
using value_type = void *;
126+
iterator begin() const { return ptr; }
127+
iterator end() const { return usage; }
128+
129+
bool empty() const { return begin() == end(); }
130+
void *pop() {
131+
LIBC_ASSERT(!empty() && "popping from empty container");
132+
return *--usage;
133+
}
134+
internal::SafeMemSize size() const {
135+
return internal::SafeMemSize{static_cast<size_t>(
136+
reinterpret_cast<ptrdiff_t>(usage) - reinterpret_cast<ptrdiff_t>(ptr))};
137+
}
138+
};
139+
140+
class StateFactory {
141+
RawMutex mutex{};
142+
MMapContainer allocations{};
143+
MMapContainer freelist{};
144+
Params params{};
145+
size_t states_per_page = 0;
146+
size_t pages_per_allocation = 0;
147+
size_t page_size = 0;
148+
149+
bool prepare() {
150+
vdso::TypedSymbol<vdso::VDSOSym::GetRandom> vgetrandom;
151+
152+
if (!vgetrandom)
153+
return false;
154+
155+
// get the allocation configuration suggested by the kernel
156+
if (vgetrandom(nullptr, 0, 0, &params, ~0UL))
157+
return false;
158+
159+
cpu_set_t cs{};
160+
161+
if (LIBC_NAMESPACE::sched_getaffinity(0, sizeof(cs), &cs))
162+
return false;
163+
164+
internal::SafeMemSize count{static_cast<size_t>(
165+
LIBC_NAMESPACE::__sched_getcpucount(sizeof(cs), &cs))};
166+
167+
internal::SafeMemSize allocation_size =
168+
internal::SafeMemSize{
169+
static_cast<size_t>(params.size_of_opaque_state)} *
170+
count;
171+
172+
page_size = static_cast<size_t>(sysconf(_SC_PAGESIZE));
173+
allocation_size = allocation_size.align_up(page_size);
174+
if (!allocation_size.valid())
175+
return false;
176+
177+
states_per_page = page_size / params.size_of_opaque_state;
178+
pages_per_allocation = allocation_size / page_size;
179+
180+
return true;
181+
}
182+
183+
bool allocate_new_states() {
184+
if (!allocations.ensure_space(1))
185+
return false;
186+
187+
// we always ensure the freelist can contain all the allocated states
188+
internal::SafeMemSize total_size =
189+
internal::SafeMemSize{page_size} *
190+
internal::SafeMemSize{pages_per_allocation} *
191+
(internal::SafeMemSize{static_cast<size_t>(1)} + allocations.size());
192+
193+
if (!total_size.valid() ||
194+
!freelist.ensure_space(total_size - freelist.size()))
195+
return false;
196+
197+
auto *new_allocation =
198+
static_cast<char *>(mmap(nullptr, page_size * pages_per_allocation,
199+
params.mmap_prot, params.mmap_flags, -1, 0));
200+
if (new_allocation == MAP_FAILED)
201+
return false;
202+
203+
for (size_t i = 0; i < pages_per_allocation; ++i) {
204+
auto *page = new_allocation + i * page_size;
205+
for (size_t j = 0; j < states_per_page; ++j)
206+
freelist.push_unchecked(page + j * params.size_of_opaque_state);
207+
}
208+
return true;
209+
}
210+
211+
static StateFactory *instance() {
212+
alignas(StateFactory) static char storage[sizeof(StateFactory)]{};
213+
static CallOnceFlag flag = callonce_impl::NOT_CALLED;
214+
static bool valid = false;
215+
callonce(&flag, []() {
216+
auto *factory = new (storage) StateFactory();
217+
valid = factory->prepare();
218+
if (valid)
219+
atexit([]() {
220+
auto factory = reinterpret_cast<StateFactory *>(storage);
221+
factory->~StateFactory();
222+
valid = false;
223+
});
224+
});
225+
return valid ? reinterpret_cast<StateFactory *>(storage) : nullptr;
226+
}
227+
228+
void *acquire() {
229+
cpp::lock_guard guard{mutex};
230+
if (freelist.empty() && !allocate_new_states())
231+
return nullptr;
232+
return freelist.pop();
233+
}
234+
void release(void *state) {
235+
cpp::lock_guard guard{mutex};
236+
// there should be no need to check this pushing
237+
freelist.push_unchecked(state);
238+
}
239+
~StateFactory() {
240+
for (auto *allocation : allocations)
241+
munmap(allocation, page_size * pages_per_allocation);
242+
}
243+
244+
public:
245+
static void *acquire_global() {
246+
auto *factory = instance();
247+
if (!factory)
248+
return nullptr;
249+
return factory->acquire();
250+
}
251+
static void release_global(void *state) {
252+
auto *factory = instance();
253+
if (!factory)
254+
return;
255+
factory->release(state);
256+
}
257+
static size_t size_of_opaque_state() {
258+
return instance()->params.size_of_opaque_state;
259+
}
260+
};
261+
262+
void *acquire_tls() {
263+
static thread_local void *state = nullptr;
264+
// previous acquire failed, do not try again
265+
if (state == MAP_FAILED)
266+
return nullptr;
267+
// first acquirement
268+
if (state == nullptr) {
269+
state = StateFactory::acquire_global();
270+
// if still fails, remember the failure
271+
if (state == nullptr) {
272+
state = MAP_FAILED;
273+
return nullptr;
274+
} else {
275+
// register the release callback.
276+
if (__cxa_thread_atexit_impl(
277+
[](void *s) { StateFactory::release_global(s); }, state,
278+
__dso_handle)) {
279+
StateFactory::release_global(state);
280+
state = MAP_FAILED;
281+
return nullptr;
282+
}
283+
}
284+
}
285+
return state;
286+
}
287+
288+
template <class F> void random_fill_impl(F gen, void *buf, size_t size) {
289+
auto *buffer = reinterpret_cast<uint8_t *>(buf);
290+
while (size > 0) {
291+
ssize_t len = gen(buffer, size);
292+
if (len == -1) {
293+
if (libc_errno == EINTR)
294+
continue;
295+
break;
296+
}
297+
size -= len;
298+
buffer += len;
299+
}
300+
}
301+
} // namespace
302+
303+
void random_fill(void *buf, size_t size) {
304+
ErrnoProtect protect;
305+
void *state = acquire_tls();
306+
if (state) {
307+
random_fill_impl(
308+
[state](void *buf, size_t size) {
309+
vdso::TypedSymbol<vdso::VDSOSym::GetRandom> vgetrandom;
310+
return vgetrandom(buf, size, 0, state,
311+
StateFactory::size_of_opaque_state());
312+
},
313+
buf, size);
314+
} else {
315+
random_fill_impl(
316+
[](void *buf, size_t size) {
317+
return LIBC_NAMESPACE::getrandom(buf, size, 0);
318+
},
319+
buf, size);
320+
}
321+
}
322+
323+
} // namespace LIBC_NAMESPACE_DECL
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
//===-- Utilities for getting secure randomness -----------------*- C++ -*-===//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
9+
#ifndef LLVM_LIBC_SRC___SUPPORT_RANDOMNESS_H
10+
#define LLVM_LIBC_SRC___SUPPORT_RANDOMNESS_H
11+
12+
#include "src/__support/common.h"
13+
14+
#define __need_size_t
15+
#include <stddef.h>
16+
17+
namespace LIBC_NAMESPACE_DECL {
18+
void random_fill(void *buf, unsigned long size);
19+
} // namespace LIBC_NAMESPACE_DECL
20+
#endif // LLVM_LIBC_SRC___SUPPORT_RANDOMNESS_H

0 commit comments

Comments
 (0)