Skip to content
This repository was archived by the owner on Apr 9, 2025. It is now read-only.
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions arch/x86/include/asm/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,12 @@
#ifdef CONFIG_KASAN
void __init kasan_early_init(void);
void __init kasan_init(void);
void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid);
#else
static inline void kasan_early_init(void) { }
static inline void kasan_init(void) { }
static inline void kasan_populate_shadow_for_vaddr(void *va, size_t size,
int nid) { }
#endif

#endif
Expand Down
5 changes: 5 additions & 0 deletions arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -2274,6 +2274,11 @@ void __init arch_cpu_finalize_init(void)
fpu__init_cpu();

alternative_instructions();
unsigned long start = PFN_ALIGN(_text);
unsigned long end = PFN_ALIGN(_etext);
printk(KERN_INFO "Write protecting the kernel text: %luk\n",
(end - start) >> 10);
set_memory_ro(start, (end - start) >> PAGE_SHIFT);

if (IS_ENABLED(CONFIG_X86_64)) {
/*
Expand Down
9 changes: 7 additions & 2 deletions arch/x86/mm/cpu_entry_area.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <asm/fixmap.h>
#include <asm/desc.h>
#include <asm/setup.h>
#include <asm/kasan.h>

static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);

Expand Down Expand Up @@ -39,10 +40,11 @@ static __init void init_cea_offsets(void)

/* O(sodding terrible) */
for_each_possible_cpu(i) {
unsigned int cea;
static unsigned int cea = 1;

again:
cea = prandom_u32_max(max_cea);
//cea = prandom_u32_max(max_cea);
cea++;

for_each_possible_cpu(j) {
if (cea_offset(j) == cea)
Expand Down Expand Up @@ -196,6 +198,9 @@ static void __init setup_cpu_entry_area(unsigned int cpu)
pgprot_t tss_prot = PAGE_KERNEL;
#endif

kasan_populate_shadow_for_vaddr(cea, CPU_ENTRY_AREA_SIZE,
early_cpu_to_node(cpu));

cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);

cea_map_percpu_pages(&cea->entry_stack_page,
Expand Down
53 changes: 38 additions & 15 deletions arch/x86/mm/kasan_init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -316,10 +316,33 @@ void __init kasan_early_init(void)
kasan_map_early_shadow(init_top_pgt);
}

static unsigned long kasan_mem_to_shadow_align_down(unsigned long va)
{
unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va);

return round_down(shadow, PAGE_SIZE);
}

static unsigned long kasan_mem_to_shadow_align_up(unsigned long va)
{
unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va);

return round_up(shadow, PAGE_SIZE);
}

void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid)
{
unsigned long shadow_start, shadow_end;

shadow_start = kasan_mem_to_shadow_align_down((unsigned long)va);
shadow_end = kasan_mem_to_shadow_align_up((unsigned long)va + size);
kasan_populate_shadow(shadow_start, shadow_end, nid);
}

void __init kasan_init(void)
{
unsigned long shadow_cea_begin, shadow_cea_per_cpu_begin, shadow_cea_end;
int i;
void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;

memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));

Expand Down Expand Up @@ -360,16 +383,10 @@ void __init kasan_init(void)
map_range(&pfn_mapped[i]);
}

shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
shadow_cpu_entry_begin = (void *)round_down(
(unsigned long)shadow_cpu_entry_begin, PAGE_SIZE);

shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
CPU_ENTRY_AREA_MAP_SIZE);
shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
shadow_cpu_entry_end = (void *)round_up(
(unsigned long)shadow_cpu_entry_end, PAGE_SIZE);
shadow_cea_begin = kasan_mem_to_shadow_align_down(CPU_ENTRY_AREA_BASE);
shadow_cea_per_cpu_begin = kasan_mem_to_shadow_align_up(CPU_ENTRY_AREA_PER_CPU);
shadow_cea_end = kasan_mem_to_shadow_align_up(CPU_ENTRY_AREA_BASE +
CPU_ENTRY_AREA_MAP_SIZE);

kasan_populate_early_shadow(
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
Expand All @@ -391,12 +408,18 @@ void __init kasan_init(void)

kasan_populate_early_shadow(
kasan_mem_to_shadow((void *)VMALLOC_END + 1),
shadow_cpu_entry_begin);
(void *)shadow_cea_begin);

kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
(unsigned long)shadow_cpu_entry_end, 0);
/*
* Populate the shadow for the shared portion of the CPU entry area.
* Shadows for the per-CPU areas are mapped on-demand, as each CPU's
* area is randomly placed somewhere in the 512GiB range and mapping
* the entire 512GiB range is prohibitively expensive.
*/
kasan_populate_shadow(shadow_cea_begin,
shadow_cea_per_cpu_begin, 0);

kasan_populate_early_shadow(shadow_cpu_entry_end,
kasan_populate_early_shadow((void *)shadow_cea_end,
kasan_mem_to_shadow((void *)__START_KERNEL_map));

kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
Expand Down
1 change: 1 addition & 0 deletions init/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1363,6 +1363,7 @@ endif
config BOOT_CONFIG
bool "Boot config support"
select BLK_DEV_INITRD
select ARCH_KEEP_MEMBLOCK
help
Extra boot config allows system admin to pass a config file as
complemental extension of kernel cmdline when booting.
Expand Down
4 changes: 2 additions & 2 deletions mm/memtest.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,10 @@ static void __init memtest(u64 pattern, phys_addr_t start_phys, phys_addr_t size
last_bad = 0;

for (p = start; p < end; p++)
*p = pattern;
WRITE_ONCE(*p, pattern);

for (p = start; p < end; p++, start_phys_aligned += incr) {
if (*p == pattern)
if (READ_ONCE(*p) == pattern)
continue;
if (start_phys_aligned == last_bad + incr) {
last_bad += incr;
Expand Down