Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 10 additions & 6 deletions arch/x86/include/asm/cpu_entry_area.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,6 @@ struct cpu_entry_area {
*/
struct cea_exception_stacks estacks;
#endif
#ifdef CONFIG_CPU_SUP_INTEL
/*
* Per CPU debug store for Intel performance monitoring. Wastes a
* full page at the moment.
Expand All @@ -105,25 +104,30 @@ struct cpu_entry_area {
* Reserve enough fixmap PTEs.
*/
struct debug_store_buffers cpu_debug_buffers;
#endif
};

#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))

DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);

extern void setup_cpu_entry_areas(void);
extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);

/* Single page reserved for the readonly IDT mapping: */
#define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE
#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)

#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)

#define CPU_ENTRY_AREA_MAP_SIZE \
(CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)
#ifdef CONFIG_X86_32
#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + \
(CPU_ENTRY_AREA_SIZE * NR_CPUS) - \
CPU_ENTRY_AREA_BASE)
#else
#define CPU_ENTRY_AREA_MAP_SIZE P4D_SIZE
#endif


extern struct cpu_entry_area *get_cpu_entry_area(int cpu);

Expand Down
3 changes: 3 additions & 0 deletions arch/x86/include/asm/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,12 @@
#ifdef CONFIG_KASAN
void __init kasan_early_init(void);
void __init kasan_init(void);
void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid);
#else
static inline void kasan_early_init(void) { }
static inline void kasan_init(void) { }
static inline void kasan_populate_shadow_for_vaddr(void *va, size_t size,
int nid) { }
#endif

#endif
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/include/asm/pgtable_32_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,11 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
* Define this here and validate with BUILD_BUG_ON() in pgtable_32.c
* to avoid include recursion hell
*/
#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40)
#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 39)

#define CPU_ENTRY_AREA_BASE \
((FIXADDR_TOT_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) \
& PMD_MASK)
/* The +1 is for the readonly IDT page: */
#define CPU_ENTRY_AREA_BASE \
((FIXADDR_TOT_START - PAGE_SIZE*(CPU_ENTRY_AREA_PAGES+1)) & PMD_MASK)

#define LDT_BASE_ADDR \
((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
Expand Down
59 changes: 57 additions & 2 deletions arch/x86/mm/cpu_entry_area.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,23 +4,72 @@
#include <linux/percpu.h>
#include <linux/kallsyms.h>
#include <linux/kcore.h>
#include <linux/prandom.h>

#include <asm/cpu_entry_area.h>
#include <asm/pgtable.h>
#include <asm/fixmap.h>
#include <asm/desc.h>
#include <asm/kasan.h>
#include <asm/setup.h>

static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);

#ifdef CONFIG_X86_64
static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);

static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset);

static __always_inline unsigned int cea_offset(unsigned int cpu)
{
return per_cpu(_cea_offset, cpu);
}

static __init void init_cea_offsets(void)
{
unsigned int max_cea;
unsigned int i, j;

if (!kaslr_enabled()) {
for_each_possible_cpu(i)
per_cpu(_cea_offset, i) = i;
return;
}

max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;

/* O(sodding terrible) */
for_each_possible_cpu(i) {
unsigned int cea;

again:
cea = prandom_u32_max(max_cea);

for_each_possible_cpu(j) {
if (cea_offset(j) == cea)
goto again;

if (i == j)
break;
}

per_cpu(_cea_offset, i) = cea;
}
}
#else /* !X86_64 */

static __always_inline unsigned int cea_offset(unsigned int cpu)
{
return cpu;
}
static inline void init_cea_offsets(void) { }
#endif

/* Is called from entry code, so must be noinstr */
noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
{
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE;
BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);

return (struct cpu_entry_area *) va;
Expand Down Expand Up @@ -135,6 +184,9 @@ static void __init setup_cpu_entry_area(unsigned int cpu)
pgprot_t tss_prot = PAGE_KERNEL;
#endif

kasan_populate_shadow_for_vaddr(cea, CPU_ENTRY_AREA_SIZE,
early_cpu_to_node(cpu));

cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);

cea_map_percpu_pages(&cea->entry_stack_page,
Expand Down Expand Up @@ -186,7 +238,8 @@ static __init void setup_cpu_entry_area_ptes(void)
#ifdef CONFIG_X86_32
unsigned long start, end;

BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
/* The +1 is for the readonly IDT: */
BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);

start = CPU_ENTRY_AREA_BASE;
Expand All @@ -202,6 +255,8 @@ void __init setup_cpu_entry_areas(void)
{
unsigned int cpu;

init_cea_offsets();

setup_cpu_entry_area_ptes();

for_each_possible_cpu(cpu)
Expand Down
15 changes: 12 additions & 3 deletions arch/x86/mm/kasan_init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,18 @@ void __init kasan_early_init(void)
kasan_map_early_shadow(init_top_pgt);
}

void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid)
{
unsigned long shadow_start, shadow_end;

shadow_start = (unsigned long)kasan_mem_to_shadow(va);
shadow_start = round_down(shadow_start, PAGE_SIZE);
shadow_end = (unsigned long)kasan_mem_to_shadow(va + size);
shadow_end = round_up(shadow_end, PAGE_SIZE);

kasan_populate_shadow(shadow_start, shadow_end, nid);
}

void __init kasan_init(void)
{
int i;
Expand Down Expand Up @@ -354,9 +366,6 @@ void __init kasan_init(void)
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
shadow_cpu_entry_begin);

kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
(unsigned long)shadow_cpu_entry_end, 0);

kasan_populate_early_shadow(shadow_cpu_entry_end,
kasan_mem_to_shadow((void *)__START_KERNEL_map));

Expand Down
Loading