Skip to content

Commit 39d114d

Browse files
aryabininctmarinas
authored andcommitted
arm64: add KASAN support
This patch adds arch specific code for kernel address sanitizer (see Documentation/kasan.txt). 1/8 of kernel addresses reserved for shadow memory. There was no big enough hole for this, so virtual addresses for shadow were stolen from vmalloc area. At early boot stage the whole shadow region populated with just one physical page (kasan_zero_page). Later, this page reused as readonly zero shadow for some memory that KASan currently don't track (vmalloc). After mapping the physical memory, pages for shadow memory are allocated and mapped. Functions like memset/memmove/memcpy do a lot of memory accesses. If bad pointer passed to one of these function it is important to catch this. Compiler's instrumentation cannot do this since these functions are written in assembly. KASan replaces memory functions with manually instrumented variants. Original functions declared as weak symbols so strong definitions in mm/kasan/kasan.c could replace them. Original functions have aliases with '__' prefix in name, so we could call non-instrumented variant if needed. Some files built without kasan instrumentation (e.g. mm/slub.c). Original mem* function replaced (via #define) with prefixed variants to disable memory access checks for such files. Signed-off-by: Andrey Ryabinin <[email protected]> Tested-by: Linus Walleij <[email protected]> Reviewed-by: Catalin Marinas <[email protected]> Signed-off-by: Catalin Marinas <[email protected]>
1 parent fd2203d commit 39d114d

File tree

18 files changed

+288
-6
lines changed

18 files changed

+288
-6
lines changed

arch/arm64/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ config ARM64
4848
select HAVE_ARCH_AUDITSYSCALL
4949
select HAVE_ARCH_BITREVERSE
5050
select HAVE_ARCH_JUMP_LABEL
51+
select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP
5152
select HAVE_ARCH_KGDB
5253
select HAVE_ARCH_SECCOMP_FILTER
5354
select HAVE_ARCH_TRACEHOOK

arch/arm64/Makefile

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,13 @@ else
5555
TEXT_OFFSET := 0x00080000
5656
endif
5757

58+
# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - 3)) - (1 << 61)
59+
# in 32-bit arithmetic
60+
KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
61+
(0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \
62+
+ (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - 3)) \
63+
- (1 << (64 - 32 - 3)) )) )
64+
5865
export TEXT_OFFSET GZFLAGS
5966

6067
core-y += arch/arm64/kernel/ arch/arm64/mm/

arch/arm64/include/asm/kasan.h

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
#ifndef __ASM_KASAN_H
2+
#define __ASM_KASAN_H
3+
4+
#ifndef __ASSEMBLY__
5+
6+
#ifdef CONFIG_KASAN
7+
8+
#include <asm/memory.h>
9+
10+
/*
11+
* KASAN_SHADOW_START: beginning of the kernel virtual addresses.
12+
* KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses.
13+
*/
14+
#define KASAN_SHADOW_START (VA_START)
15+
#define KASAN_SHADOW_END (KASAN_SHADOW_START + (1UL << (VA_BITS - 3)))
16+
17+
/*
18+
* This value is used to map an address to the corresponding shadow
19+
* address by the following formula:
20+
* shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET;
21+
*
22+
* (1 << 61) shadow addresses - [KASAN_SHADOW_OFFSET,KASAN_SHADOW_END]
23+
* cover all 64-bits of virtual addresses. So KASAN_SHADOW_OFFSET
24+
* should satisfy the following equation:
25+
* KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - (1ULL << 61)
26+
*/
27+
#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << (64 - 3)))
28+
29+
void kasan_init(void);
30+
31+
#else
32+
static inline void kasan_init(void) { }
33+
#endif
34+
35+
#endif
36+
#endif

arch/arm64/include/asm/pgtable.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,14 @@
4141
* fixed mappings and modules
4242
*/
4343
#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
44+
45+
#ifndef CONFIG_KASAN
4446
#define VMALLOC_START (VA_START)
47+
#else
48+
#include <asm/kasan.h>
49+
#define VMALLOC_START (KASAN_SHADOW_END + SZ_64K)
50+
#endif
51+
4552
#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
4653

4754
#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))

arch/arm64/include/asm/string.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,17 +36,33 @@ extern __kernel_size_t strnlen(const char *, __kernel_size_t);
3636

3737
#define __HAVE_ARCH_MEMCPY
3838
extern void *memcpy(void *, const void *, __kernel_size_t);
39+
extern void *__memcpy(void *, const void *, __kernel_size_t);
3940

4041
#define __HAVE_ARCH_MEMMOVE
4142
extern void *memmove(void *, const void *, __kernel_size_t);
43+
extern void *__memmove(void *, const void *, __kernel_size_t);
4244

4345
#define __HAVE_ARCH_MEMCHR
4446
extern void *memchr(const void *, int, __kernel_size_t);
4547

4648
#define __HAVE_ARCH_MEMSET
4749
extern void *memset(void *, int, __kernel_size_t);
50+
extern void *__memset(void *, int, __kernel_size_t);
4851

4952
#define __HAVE_ARCH_MEMCMP
5053
extern int memcmp(const void *, const void *, size_t);
5154

55+
56+
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
57+
58+
/*
59+
* For files that are not instrumented (e.g. mm/slub.c) we
60+
* should use not instrumented version of mem* functions.
61+
*/
62+
63+
#define memcpy(dst, src, len) __memcpy(dst, src, len)
64+
#define memmove(dst, src, len) __memmove(dst, src, len)
65+
#define memset(s, c, n) __memset(s, c, n)
66+
#endif
67+
5268
#endif

arch/arm64/kernel/Makefile

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
77
CFLAGS_efi-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
88
CFLAGS_armv8_deprecated.o := -I$(src)
99

10+
KASAN_SANITIZE_efi-stub.o := n
11+
1012
CFLAGS_REMOVE_ftrace.o = -pg
1113
CFLAGS_REMOVE_insn.o = -pg
1214
CFLAGS_REMOVE_return_address.o = -pg

arch/arm64/kernel/arm64ksyms.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,9 @@ EXPORT_SYMBOL(strnlen);
5151
EXPORT_SYMBOL(memset);
5252
EXPORT_SYMBOL(memcpy);
5353
EXPORT_SYMBOL(memmove);
54+
EXPORT_SYMBOL(__memset);
55+
EXPORT_SYMBOL(__memcpy);
56+
EXPORT_SYMBOL(__memmove);
5457
EXPORT_SYMBOL(memchr);
5558
EXPORT_SYMBOL(memcmp);
5659

arch/arm64/kernel/head.S

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -444,6 +444,9 @@ __mmap_switched:
444444
str_l x21, __fdt_pointer, x5 // Save FDT pointer
445445
str_l x24, memstart_addr, x6 // Save PHYS_OFFSET
446446
mov x29, #0
447+
#ifdef CONFIG_KASAN
448+
bl kasan_early_init
449+
#endif
447450
b start_kernel
448451
ENDPROC(__mmap_switched)
449452

arch/arm64/kernel/image.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,12 @@ __efistub_strcmp = __pi_strcmp;
8080
__efistub_strncmp = __pi_strncmp;
8181
__efistub___flush_dcache_area = __pi___flush_dcache_area;
8282

83+
#ifdef CONFIG_KASAN
84+
__efistub___memcpy = __pi_memcpy;
85+
__efistub___memmove = __pi_memmove;
86+
__efistub___memset = __pi_memset;
87+
#endif
88+
8389
__efistub__text = _text;
8490
__efistub__end = _end;
8591
__efistub__edata = _edata;

arch/arm64/kernel/module.c

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
#include <linux/bitops.h>
2222
#include <linux/elf.h>
2323
#include <linux/gfp.h>
24+
#include <linux/kasan.h>
2425
#include <linux/kernel.h>
2526
#include <linux/mm.h>
2627
#include <linux/moduleloader.h>
@@ -34,9 +35,18 @@
3435

3536
void *module_alloc(unsigned long size)
3637
{
37-
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
38-
GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
39-
NUMA_NO_NODE, __builtin_return_address(0));
38+
void *p;
39+
40+
p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
41+
GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
42+
NUMA_NO_NODE, __builtin_return_address(0));
43+
44+
if (p && (kasan_module_alloc(p, size) < 0)) {
45+
vfree(p);
46+
return NULL;
47+
}
48+
49+
return p;
4050
}
4151

4252
enum aarch64_reloc_op {

arch/arm64/kernel/setup.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@
5454
#include <asm/elf.h>
5555
#include <asm/cpufeature.h>
5656
#include <asm/cpu_ops.h>
57+
#include <asm/kasan.h>
5758
#include <asm/sections.h>
5859
#include <asm/setup.h>
5960
#include <asm/smp_plat.h>
@@ -434,6 +435,9 @@ void __init setup_arch(char **cmdline_p)
434435

435436
paging_init();
436437
relocate_initrd();
438+
439+
kasan_init();
440+
437441
request_standard_resources();
438442

439443
early_ioremap_reset();

arch/arm64/lib/memcpy.S

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,10 @@
6868
stp \ptr, \regB, [\regC], \val
6969
.endm
7070

71+
.weak memcpy
72+
ENTRY(__memcpy)
7173
ENTRY(memcpy)
7274
#include "copy_template.S"
7375
ret
7476
ENDPIPROC(memcpy)
77+
ENDPROC(__memcpy)

arch/arm64/lib/memmove.S

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,12 +57,14 @@ C_h .req x12
5757
D_l .req x13
5858
D_h .req x14
5959

60+
.weak memmove
61+
ENTRY(__memmove)
6062
ENTRY(memmove)
6163
cmp dstin, src
62-
b.lo memcpy
64+
b.lo __memcpy
6365
add tmp1, src, count
6466
cmp dstin, tmp1
65-
b.hs memcpy /* No overlap. */
67+
b.hs __memcpy /* No overlap. */
6668

6769
add dst, dstin, count
6870
add src, src, count
@@ -195,3 +197,4 @@ ENTRY(memmove)
195197
b.ne .Ltail63
196198
ret
197199
ENDPIPROC(memmove)
200+
ENDPROC(__memmove)

arch/arm64/lib/memset.S

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,8 @@ dst .req x8
5454
tmp3w .req w9
5555
tmp3 .req x9
5656

57+
.weak memset
58+
ENTRY(__memset)
5759
ENTRY(memset)
5860
mov dst, dstin /* Preserve return value. */
5961
and A_lw, val, #255
@@ -214,3 +216,4 @@ ENTRY(memset)
214216
b.ne .Ltail_maybe_long
215217
ret
216218
ENDPIPROC(memset)
219+
ENDPROC(__memset)

arch/arm64/mm/Makefile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,3 +4,6 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
44
context.o proc.o pageattr.o
55
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
66
obj-$(CONFIG_ARM64_PTDUMP) += dump.o
7+
8+
obj-$(CONFIG_KASAN) += kasan_init.o
9+
KASAN_SANITIZE_kasan_init.o := n

0 commit comments

Comments
 (0)