2019-06-01 16:08:55 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2016-05-27 13:48:59 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2016, Rashmica Gupta, IBM Corp.
|
|
|
|
*
|
|
|
|
* This traverses the kernel pagetables and dumps the
|
|
|
|
* information about the used sections of memory to
|
|
|
|
* /sys/kernel/debug/kernel_pagetables.
|
|
|
|
*
|
|
|
|
* Derived from the arm64 implementation:
|
|
|
|
* Copyright (c) 2014, The Linux Foundation, Laura Abbott.
|
|
|
|
* (C) Copyright 2008 Intel Corporation, Arjan van de Ven.
|
|
|
|
*/
|
|
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <linux/fs.h>
|
2017-05-16 18:42:53 +08:00
|
|
|
#include <linux/hugetlb.h>
|
2016-05-27 13:48:59 +08:00
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/mm.h>
|
2018-11-26 09:59:16 +08:00
|
|
|
#include <linux/highmem.h>
|
2021-07-09 00:49:43 +08:00
|
|
|
#include <linux/ptdump.h>
|
2016-05-27 13:48:59 +08:00
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <asm/fixmap.h>
|
|
|
|
#include <linux/const.h>
|
powerpc: Book3S 64-bit outline-only KASAN support
Implement a limited form of KASAN for Book3S 64-bit machines running under
the Radix MMU, supporting only outline mode.
- Enable the compiler instrumentation to check addresses and maintain the
shadow region. (This is the guts of KASAN which we can easily reuse.)
- Require kasan-vmalloc support to handle modules and anything else in
vmalloc space.
- KASAN needs to be able to validate all pointer accesses, but we can't
instrument all kernel addresses - only linear map and vmalloc. On boot,
set up a single page of read-only shadow that marks all iomap and
vmemmap accesses as valid.
- Document KASAN in powerpc docs.
Background
----------
KASAN support on Book3S is a bit tricky to get right:
- It would be good to support inline instrumentation so as to be able to
catch stack issues that cannot be caught with outline mode.
- Inline instrumentation requires a fixed offset.
- Book3S runs code with translations off ("real mode") during boot,
including a lot of generic device-tree parsing code which is used to
determine MMU features.
[ppc64 mm note: The kernel installs a linear mapping at effective
address c000...-c008.... This is a one-to-one mapping with physical
memory from 0000... onward. Because of how memory accesses work on
powerpc 64-bit Book3S, a kernel pointer in the linear map accesses the
same memory both with translations on (accessing as an 'effective
address'), and with translations off (accessing as a 'real
address'). This works in both guests and the hypervisor. For more
details, see s5.7 of Book III of version 3 of the ISA, in particular
the Storage Control Overview, s5.7.3, and s5.7.5 - noting that this
KASAN implementation currently only supports Radix.]
- Some code - most notably a lot of KVM code - also runs with translations
off after boot.
- Therefore any offset has to point to memory that is valid with
translations on or off.
One approach is just to give up on inline instrumentation. This way
boot-time checks can be delayed until after the MMU is set is up, and we
can just not instrument any code that runs with translations off after
booting. Take this approach for now and require outline instrumentation.
Previous attempts allowed inline instrumentation. However, they came with
some unfortunate restrictions: only physically contiguous memory could be
used and it had to be specified at compile time. Maybe we can do better in
the future.
[paulus@ozlabs.org - Rebased onto 5.17. Note that a kernel with
CONFIG_KASAN=y will crash during boot on a machine using HPT
translation because not all the entry points to the generic
KASAN code are protected with a call to kasan_arch_is_ready().]
Originally-by: Balbir Singh <bsingharora@gmail.com> # ppc64 out-of-line radix version
Signed-off-by: Daniel Axtens <dja@axtens.net>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
[mpe: Update copyright year and comment formatting]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/YoTE69OQwiG7z+Gu@cleo
2022-05-18 18:05:31 +08:00
|
|
|
#include <linux/kasan.h>
|
2016-05-27 13:48:59 +08:00
|
|
|
#include <asm/page.h>
|
2020-05-19 13:48:55 +08:00
|
|
|
#include <asm/hugetlb.h>
|
2016-05-27 13:48:59 +08:00
|
|
|
|
2020-01-14 15:14:40 +08:00
|
|
|
#include <mm/mmu_decl.h>
|
|
|
|
|
2019-02-18 20:28:36 +08:00
|
|
|
#include "ptdump.h"
|
2018-10-09 21:51:58 +08:00
|
|
|
|
2016-05-27 13:48:59 +08:00
|
|
|
/*
|
|
|
|
* To visualise what is happening,
|
|
|
|
*
|
|
|
|
* - PTRS_PER_P** = how many entries there are in the corresponding P**
|
|
|
|
* - P**_SHIFT = how many bits of the address we use to index into the
|
|
|
|
* corresponding P**
|
|
|
|
* - P**_SIZE is how much memory we can access through the table - not the
|
|
|
|
* size of the table itself.
|
|
|
|
* P**={PGD, PUD, PMD, PTE}
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* Each entry of the PGD points to a PUD. Each entry of a PUD points to a
|
|
|
|
* PMD. Each entry of a PMD points to a PTE. And every PTE entry points to
|
|
|
|
* a page.
|
|
|
|
*
|
|
|
|
* In the case where there are only 3 levels, the PUD is folded into the
|
|
|
|
* PGD: every PUD has only one entry which points to the PMD.
|
|
|
|
*
|
|
|
|
* The page dumper groups page table entries of the same type into a single
|
|
|
|
* description. It uses pg_state to track the range information while
|
|
|
|
* iterating over the PTE entries. When the continuity is broken it then
|
|
|
|
* dumps out a description of the range - ie PTEs that are virtually contiguous
|
|
|
|
* with the same PTE flags are chunked together. This is to make it clear how
|
|
|
|
* different areas of the kernel virtual memory are used.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
struct pg_state {
|
2021-07-09 00:49:43 +08:00
|
|
|
struct ptdump_state ptdump;
|
2016-05-27 13:48:59 +08:00
|
|
|
struct seq_file *seq;
|
|
|
|
const struct addr_marker *marker;
|
|
|
|
unsigned long start_address;
|
2017-03-31 09:37:49 +08:00
|
|
|
unsigned long start_pa;
|
2021-07-09 00:49:42 +08:00
|
|
|
int level;
|
2016-05-27 13:48:59 +08:00
|
|
|
u64 current_flags;
|
2019-05-02 15:39:47 +08:00
|
|
|
bool check_wx;
|
|
|
|
unsigned long wx_pages;
|
2016-05-27 13:48:59 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct addr_marker {
|
|
|
|
unsigned long start_address;
|
|
|
|
const char *name;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct addr_marker address_markers[] = {
|
|
|
|
{ 0, "Start of kernel VM" },
|
2020-06-29 19:15:26 +08:00
|
|
|
#ifdef MODULES_VADDR
|
|
|
|
{ 0, "modules start" },
|
|
|
|
{ 0, "modules end" },
|
|
|
|
#endif
|
2016-05-27 13:48:59 +08:00
|
|
|
{ 0, "vmalloc() Area" },
|
|
|
|
{ 0, "vmalloc() End" },
|
2017-04-18 14:20:13 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
2016-05-27 13:48:59 +08:00
|
|
|
{ 0, "isa I/O start" },
|
|
|
|
{ 0, "isa I/O end" },
|
|
|
|
{ 0, "phb I/O start" },
|
|
|
|
{ 0, "phb I/O end" },
|
|
|
|
{ 0, "I/O remap start" },
|
|
|
|
{ 0, "I/O remap end" },
|
|
|
|
{ 0, "vmemmap start" },
|
2017-04-18 14:20:13 +08:00
|
|
|
#else
|
|
|
|
{ 0, "Early I/O remap start" },
|
|
|
|
{ 0, "Early I/O remap end" },
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
|
|
{ 0, "Highmem PTEs start" },
|
|
|
|
{ 0, "Highmem PTEs end" },
|
|
|
|
#endif
|
|
|
|
{ 0, "Fixmap start" },
|
|
|
|
{ 0, "Fixmap end" },
|
2019-04-27 00:23:32 +08:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
{ 0, "kasan shadow mem start" },
|
|
|
|
{ 0, "kasan shadow mem end" },
|
2017-04-18 14:20:13 +08:00
|
|
|
#endif
|
2016-05-27 13:48:59 +08:00
|
|
|
{ -1, NULL },
|
|
|
|
};
|
|
|
|
|
2021-07-09 00:49:43 +08:00
|
|
|
static struct ptdump_range ptdump_range[] __ro_after_init = {
|
|
|
|
{TASK_SIZE_MAX, ~0UL},
|
|
|
|
{0, 0}
|
|
|
|
};
|
|
|
|
|
2019-05-02 15:39:46 +08:00
|
|
|
#define pt_dump_seq_printf(m, fmt, args...) \
|
|
|
|
({ \
|
|
|
|
if (m) \
|
|
|
|
seq_printf(m, fmt, ##args); \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define pt_dump_seq_putc(m, c) \
|
|
|
|
({ \
|
|
|
|
if (m) \
|
|
|
|
seq_putc(m, c); \
|
|
|
|
})
|
|
|
|
|
2020-05-19 13:48:52 +08:00
|
|
|
void pt_dump_size(struct seq_file *m, unsigned long size)
|
|
|
|
{
|
2021-11-26 18:30:03 +08:00
|
|
|
static const char units[] = " KMGTPE";
|
2020-05-19 13:48:52 +08:00
|
|
|
const char *unit = units;
|
|
|
|
|
|
|
|
/* Work out what appropriate unit to use */
|
|
|
|
while (!(size & 1023) && unit[1]) {
|
|
|
|
size >>= 10;
|
|
|
|
unit++;
|
|
|
|
}
|
|
|
|
pt_dump_seq_printf(m, "%9lu%c ", size, *unit);
|
|
|
|
}
|
|
|
|
|
2016-05-27 13:48:59 +08:00
|
|
|
static void dump_flag_info(struct pg_state *st, const struct flag_info
|
|
|
|
*flag, u64 pte, int num)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++, flag++) {
|
|
|
|
const char *s = NULL;
|
|
|
|
u64 val;
|
|
|
|
|
|
|
|
/* flag not defined so don't check it */
|
|
|
|
if (flag->mask == 0)
|
|
|
|
continue;
|
|
|
|
/* Some 'flags' are actually values */
|
|
|
|
if (flag->is_val) {
|
|
|
|
val = pte & flag->val;
|
|
|
|
if (flag->shift)
|
|
|
|
val = val >> flag->shift;
|
2019-05-02 15:39:46 +08:00
|
|
|
pt_dump_seq_printf(st->seq, " %s:%llx", flag->set, val);
|
2016-05-27 13:48:59 +08:00
|
|
|
} else {
|
|
|
|
if ((pte & flag->mask) == flag->val)
|
|
|
|
s = flag->set;
|
|
|
|
else
|
|
|
|
s = flag->clear;
|
|
|
|
if (s)
|
2019-05-02 15:39:46 +08:00
|
|
|
pt_dump_seq_printf(st->seq, " %s", s);
|
2016-05-27 13:48:59 +08:00
|
|
|
}
|
|
|
|
st->current_flags &= ~flag->mask;
|
|
|
|
}
|
|
|
|
if (st->current_flags != 0)
|
2019-05-02 15:39:46 +08:00
|
|
|
pt_dump_seq_printf(st->seq, " unknown flags:%llx", st->current_flags);
|
2016-05-27 13:48:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dump_addr(struct pg_state *st, unsigned long addr)
|
|
|
|
{
|
2017-04-13 20:41:40 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
2019-02-18 20:25:20 +08:00
|
|
|
#define REG "0x%016lx"
|
2017-04-13 20:41:40 +08:00
|
|
|
#else
|
2019-02-18 20:25:20 +08:00
|
|
|
#define REG "0x%08lx"
|
2017-04-13 20:41:40 +08:00
|
|
|
#endif
|
2017-03-31 09:37:49 +08:00
|
|
|
|
2019-05-02 15:39:46 +08:00
|
|
|
pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1);
|
powerpc/mm: Properly coalesce pages in ptdump
Commit aaa229529244 ("powerpc/mm: Add physical address to Linux page
table dump") changed range coalescing to only combine ranges that are
both virtually and physically contiguous, in order to avoid erroneous
combination of unrelated mappings in IOREMAP space.
But in the VMALLOC space, mappings almost never have contiguous
physical pages, so the commit mentionned above leads to dumping one
line per page for vmalloc mappings.
Taking into account the vmalloc always leave a gap between two areas,
we never have two mappings dumped as a single combination even if they
have the exact same flags. The only space that may have encountered
such an issue was the early IOREMAP which is not using vmalloc engine.
But previous commits added gaps between early IO mappings, so it is
not an issue anymore.
That commit created some difficulties with KASAN mappings, see
commit cabe8138b23c ("powerpc: dump as a single line areas mapping a
single physical page.") and with huge page, see
commit b00ff6d8c1c3 ("powerpc/ptdump: Properly handle non standard
page size").
So, almost revert commit aaa229529244 to properly coalesce pages
mapped with the same flags as before, only keep the display of the
first physical address of the range, as it can be usefull especially
for IO mappings.
It brings back powerpc at the same level as other architectures and
simplifies the conversion to GENERIC PTDUMP.
With the patch:
---[ kasan shadow mem start ]---
0xf8000000-0xf8ffffff 0x07000000 16M huge rw present dirty accessed
0xf9000000-0xf91fffff 0x01434000 2M r present accessed
0xf9200000-0xf95affff 0x02104000 3776K rw present dirty accessed
0xfef5c000-0xfeffffff 0x01434000 656K r present accessed
---[ kasan shadow mem end ]---
Before:
---[ kasan shadow mem start ]---
0xf8000000-0xf8ffffff 0x07000000 16M huge rw present dirty accessed
0xf9000000-0xf91fffff 0x01434000 16K r present accessed
0xf9200000-0xf9203fff 0x02104000 16K rw present dirty accessed
0xf9204000-0xf9207fff 0x0213c000 16K rw present dirty accessed
0xf9208000-0xf920bfff 0x02174000 16K rw present dirty accessed
0xf920c000-0xf920ffff 0x02188000 16K rw present dirty accessed
0xf9210000-0xf9213fff 0x021dc000 16K rw present dirty accessed
0xf9214000-0xf9217fff 0x02220000 16K rw present dirty accessed
0xf9218000-0xf921bfff 0x023c0000 16K rw present dirty accessed
0xf921c000-0xf921ffff 0x023d4000 16K rw present dirty accessed
0xf9220000-0xf9227fff 0x023ec000 32K rw present dirty accessed
...
0xf93b8000-0xf93e3fff 0x02614000 176K rw present dirty accessed
0xf93e4000-0xf94c3fff 0x027c0000 896K rw present dirty accessed
0xf94c4000-0xf94c7fff 0x0236c000 16K rw present dirty accessed
0xf94c8000-0xf94cbfff 0x041f0000 16K rw present dirty accessed
0xf94cc000-0xf94cffff 0x029c0000 16K rw present dirty accessed
0xf94d0000-0xf94d3fff 0x041ec000 16K rw present dirty accessed
0xf94d4000-0xf94d7fff 0x0407c000 16K rw present dirty accessed
0xf94d8000-0xf94f7fff 0x041c0000 128K rw present dirty accessed
...
0xf95ac000-0xf95affff 0x042b0000 16K rw present dirty accessed
0xfef5c000-0xfeffffff 0x01434000 16K r present accessed
---[ kasan shadow mem end ]---
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c56ce1f5c3c75adc9811b1a5f9c410fa74183a8d.1618828806.git.christophe.leroy@csgroup.eu
2021-04-19 18:47:27 +08:00
|
|
|
pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
|
2021-11-26 18:30:03 +08:00
|
|
|
pt_dump_size(st->seq, addr - st->start_address);
|
2016-05-27 13:48:59 +08:00
|
|
|
}
|
|
|
|
|
2019-05-02 15:39:47 +08:00
|
|
|
static void note_prot_wx(struct pg_state *st, unsigned long addr)
|
|
|
|
{
|
2020-01-14 16:13:09 +08:00
|
|
|
pte_t pte = __pte(st->current_flags);
|
|
|
|
|
2024-01-30 18:34:34 +08:00
|
|
|
if (!st->check_wx)
|
2019-05-02 15:39:47 +08:00
|
|
|
return;
|
|
|
|
|
2020-01-14 16:13:09 +08:00
|
|
|
if (!pte_write(pte) || !pte_exec(pte))
|
2019-05-02 15:39:47 +08:00
|
|
|
return;
|
|
|
|
|
2024-01-30 18:34:34 +08:00
|
|
|
WARN_ONCE(IS_ENABLED(CONFIG_DEBUG_WX),
|
|
|
|
"powerpc/mm: Found insecure W+X mapping at address %p/%pS\n",
|
2019-05-02 15:39:47 +08:00
|
|
|
(void *)st->start_address, (void *)st->start_address);
|
|
|
|
|
|
|
|
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
2021-07-09 00:49:42 +08:00
|
|
|
static void note_page_update_state(struct pg_state *st, unsigned long addr, int level, u64 val)
|
2020-06-29 19:17:19 +08:00
|
|
|
{
|
2021-07-09 00:49:42 +08:00
|
|
|
u64 flag = level >= 0 ? val & pg_level[level].mask : 0;
|
2020-06-29 19:17:19 +08:00
|
|
|
u64 pa = val & PTE_RPN_MASK;
|
|
|
|
|
|
|
|
st->level = level;
|
|
|
|
st->current_flags = flag;
|
|
|
|
st->start_address = addr;
|
|
|
|
st->start_pa = pa;
|
|
|
|
|
|
|
|
while (addr >= st->marker[1].start_address) {
|
|
|
|
st->marker++;
|
|
|
|
pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-09 00:49:43 +08:00
|
|
|
static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val)
|
2016-05-27 13:48:59 +08:00
|
|
|
{
|
2021-07-09 00:49:42 +08:00
|
|
|
u64 flag = level >= 0 ? val & pg_level[level].mask : 0;
|
2021-07-09 00:49:43 +08:00
|
|
|
struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
|
2017-03-31 09:37:49 +08:00
|
|
|
|
2016-05-27 13:48:59 +08:00
|
|
|
/* At first no level is set */
|
2021-07-09 00:49:42 +08:00
|
|
|
if (st->level == -1) {
|
2019-05-02 15:39:46 +08:00
|
|
|
pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
|
2021-07-09 00:49:41 +08:00
|
|
|
note_page_update_state(st, addr, level, val);
|
2016-05-27 13:48:59 +08:00
|
|
|
/*
|
|
|
|
* Dump the section of virtual memory when:
|
|
|
|
* - the PTE flags from one entry to the next differs.
|
|
|
|
* - we change levels in the tree.
|
|
|
|
* - the address is in a different section of memory and is thus
|
|
|
|
* used for a different purpose, regardless of the flags.
|
|
|
|
*/
|
|
|
|
} else if (flag != st->current_flags || level != st->level ||
|
powerpc/mm: Properly coalesce pages in ptdump
Commit aaa229529244 ("powerpc/mm: Add physical address to Linux page
table dump") changed range coalescing to only combine ranges that are
both virtually and physically contiguous, in order to avoid erroneous
combination of unrelated mappings in IOREMAP space.
But in the VMALLOC space, mappings almost never have contiguous
physical pages, so the commit mentionned above leads to dumping one
line per page for vmalloc mappings.
Taking into account the vmalloc always leave a gap between two areas,
we never have two mappings dumped as a single combination even if they
have the exact same flags. The only space that may have encountered
such an issue was the early IOREMAP which is not using vmalloc engine.
But previous commits added gaps between early IO mappings, so it is
not an issue anymore.
That commit created some difficulties with KASAN mappings, see
commit cabe8138b23c ("powerpc: dump as a single line areas mapping a
single physical page.") and with huge page, see
commit b00ff6d8c1c3 ("powerpc/ptdump: Properly handle non standard
page size").
So, almost revert commit aaa229529244 to properly coalesce pages
mapped with the same flags as before, only keep the display of the
first physical address of the range, as it can be usefull especially
for IO mappings.
It brings back powerpc at the same level as other architectures and
simplifies the conversion to GENERIC PTDUMP.
With the patch:
---[ kasan shadow mem start ]---
0xf8000000-0xf8ffffff 0x07000000 16M huge rw present dirty accessed
0xf9000000-0xf91fffff 0x01434000 2M r present accessed
0xf9200000-0xf95affff 0x02104000 3776K rw present dirty accessed
0xfef5c000-0xfeffffff 0x01434000 656K r present accessed
---[ kasan shadow mem end ]---
Before:
---[ kasan shadow mem start ]---
0xf8000000-0xf8ffffff 0x07000000 16M huge rw present dirty accessed
0xf9000000-0xf91fffff 0x01434000 16K r present accessed
0xf9200000-0xf9203fff 0x02104000 16K rw present dirty accessed
0xf9204000-0xf9207fff 0x0213c000 16K rw present dirty accessed
0xf9208000-0xf920bfff 0x02174000 16K rw present dirty accessed
0xf920c000-0xf920ffff 0x02188000 16K rw present dirty accessed
0xf9210000-0xf9213fff 0x021dc000 16K rw present dirty accessed
0xf9214000-0xf9217fff 0x02220000 16K rw present dirty accessed
0xf9218000-0xf921bfff 0x023c0000 16K rw present dirty accessed
0xf921c000-0xf921ffff 0x023d4000 16K rw present dirty accessed
0xf9220000-0xf9227fff 0x023ec000 32K rw present dirty accessed
...
0xf93b8000-0xf93e3fff 0x02614000 176K rw present dirty accessed
0xf93e4000-0xf94c3fff 0x027c0000 896K rw present dirty accessed
0xf94c4000-0xf94c7fff 0x0236c000 16K rw present dirty accessed
0xf94c8000-0xf94cbfff 0x041f0000 16K rw present dirty accessed
0xf94cc000-0xf94cffff 0x029c0000 16K rw present dirty accessed
0xf94d0000-0xf94d3fff 0x041ec000 16K rw present dirty accessed
0xf94d4000-0xf94d7fff 0x0407c000 16K rw present dirty accessed
0xf94d8000-0xf94f7fff 0x041c0000 128K rw present dirty accessed
...
0xf95ac000-0xf95affff 0x042b0000 16K rw present dirty accessed
0xfef5c000-0xfeffffff 0x01434000 16K r present accessed
---[ kasan shadow mem end ]---
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c56ce1f5c3c75adc9811b1a5f9c410fa74183a8d.1618828806.git.christophe.leroy@csgroup.eu
2021-04-19 18:47:27 +08:00
|
|
|
addr >= st->marker[1].start_address) {
|
2016-05-27 13:48:59 +08:00
|
|
|
|
|
|
|
/* Check the PTE flags */
|
|
|
|
if (st->current_flags) {
|
2019-05-02 15:39:47 +08:00
|
|
|
note_prot_wx(st, addr);
|
2016-05-27 13:48:59 +08:00
|
|
|
dump_addr(st, addr);
|
|
|
|
|
|
|
|
/* Dump all the flags */
|
|
|
|
if (pg_level[st->level].flag)
|
|
|
|
dump_flag_info(st, pg_level[st->level].flag,
|
|
|
|
st->current_flags,
|
|
|
|
pg_level[st->level].num);
|
|
|
|
|
2019-05-02 15:39:46 +08:00
|
|
|
pt_dump_seq_putc(st->seq, '\n');
|
2016-05-27 13:48:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Address indicates we have passed the end of the
|
|
|
|
* current section of virtual memory
|
|
|
|
*/
|
2021-07-09 00:49:41 +08:00
|
|
|
note_page_update_state(st, addr, level, val);
|
2016-05-27 13:48:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void populate_markers(void)
|
|
|
|
{
|
2017-04-18 14:20:13 +08:00
|
|
|
int i = 0;
|
|
|
|
|
2020-06-29 19:15:23 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
2017-04-18 14:20:13 +08:00
|
|
|
address_markers[i++].start_address = PAGE_OFFSET;
|
2020-06-29 19:15:23 +08:00
|
|
|
#else
|
|
|
|
address_markers[i++].start_address = TASK_SIZE;
|
2020-06-29 19:15:26 +08:00
|
|
|
#endif
|
|
|
|
#ifdef MODULES_VADDR
|
|
|
|
address_markers[i++].start_address = MODULES_VADDR;
|
|
|
|
address_markers[i++].start_address = MODULES_END;
|
2020-06-29 19:15:23 +08:00
|
|
|
#endif
|
2017-04-18 14:20:13 +08:00
|
|
|
address_markers[i++].start_address = VMALLOC_START;
|
|
|
|
address_markers[i++].start_address = VMALLOC_END;
|
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
address_markers[i++].start_address = ISA_IO_BASE;
|
|
|
|
address_markers[i++].start_address = ISA_IO_END;
|
|
|
|
address_markers[i++].start_address = PHB_IO_BASE;
|
|
|
|
address_markers[i++].start_address = PHB_IO_END;
|
|
|
|
address_markers[i++].start_address = IOREMAP_BASE;
|
|
|
|
address_markers[i++].start_address = IOREMAP_END;
|
2019-04-17 20:59:14 +08:00
|
|
|
/* What is the ifdef about? */
|
2017-10-19 12:08:43 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
2019-04-17 20:59:14 +08:00
|
|
|
address_markers[i++].start_address = H_VMEMMAP_START;
|
2016-05-27 13:48:59 +08:00
|
|
|
#else
|
2017-04-18 14:20:13 +08:00
|
|
|
address_markers[i++].start_address = VMEMMAP_BASE;
|
|
|
|
#endif
|
|
|
|
#else /* !CONFIG_PPC64 */
|
|
|
|
address_markers[i++].start_address = ioremap_bot;
|
|
|
|
address_markers[i++].start_address = IOREMAP_TOP;
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
|
|
address_markers[i++].start_address = PKMAP_BASE;
|
|
|
|
address_markers[i++].start_address = PKMAP_ADDR(LAST_PKMAP);
|
2016-05-27 13:48:59 +08:00
|
|
|
#endif
|
2017-04-18 14:20:13 +08:00
|
|
|
address_markers[i++].start_address = FIXADDR_START;
|
|
|
|
address_markers[i++].start_address = FIXADDR_TOP;
|
powerpc: Book3S 64-bit outline-only KASAN support
Implement a limited form of KASAN for Book3S 64-bit machines running under
the Radix MMU, supporting only outline mode.
- Enable the compiler instrumentation to check addresses and maintain the
shadow region. (This is the guts of KASAN which we can easily reuse.)
- Require kasan-vmalloc support to handle modules and anything else in
vmalloc space.
- KASAN needs to be able to validate all pointer accesses, but we can't
instrument all kernel addresses - only linear map and vmalloc. On boot,
set up a single page of read-only shadow that marks all iomap and
vmemmap accesses as valid.
- Document KASAN in powerpc docs.
Background
----------
KASAN support on Book3S is a bit tricky to get right:
- It would be good to support inline instrumentation so as to be able to
catch stack issues that cannot be caught with outline mode.
- Inline instrumentation requires a fixed offset.
- Book3S runs code with translations off ("real mode") during boot,
including a lot of generic device-tree parsing code which is used to
determine MMU features.
[ppc64 mm note: The kernel installs a linear mapping at effective
address c000...-c008.... This is a one-to-one mapping with physical
memory from 0000... onward. Because of how memory accesses work on
powerpc 64-bit Book3S, a kernel pointer in the linear map accesses the
same memory both with translations on (accessing as an 'effective
address'), and with translations off (accessing as a 'real
address'). This works in both guests and the hypervisor. For more
details, see s5.7 of Book III of version 3 of the ISA, in particular
the Storage Control Overview, s5.7.3, and s5.7.5 - noting that this
KASAN implementation currently only supports Radix.]
- Some code - most notably a lot of KVM code - also runs with translations
off after boot.
- Therefore any offset has to point to memory that is valid with
translations on or off.
One approach is just to give up on inline instrumentation. This way
boot-time checks can be delayed until after the MMU is set is up, and we
can just not instrument any code that runs with translations off after
booting. Take this approach for now and require outline instrumentation.
Previous attempts allowed inline instrumentation. However, they came with
some unfortunate restrictions: only physically contiguous memory could be
used and it had to be specified at compile time. Maybe we can do better in
the future.
[paulus@ozlabs.org - Rebased onto 5.17. Note that a kernel with
CONFIG_KASAN=y will crash during boot on a machine using HPT
translation because not all the entry points to the generic
KASAN code are protected with a call to kasan_arch_is_ready().]
Originally-by: Balbir Singh <bsingharora@gmail.com> # ppc64 out-of-line radix version
Signed-off-by: Daniel Axtens <dja@axtens.net>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
[mpe: Update copyright year and comment formatting]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/YoTE69OQwiG7z+Gu@cleo
2022-05-18 18:05:31 +08:00
|
|
|
#endif /* CONFIG_PPC64 */
|
2019-04-27 00:23:32 +08:00
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
address_markers[i++].start_address = KASAN_SHADOW_START;
|
|
|
|
address_markers[i++].start_address = KASAN_SHADOW_END;
|
|
|
|
#endif
|
2016-05-27 13:48:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ptdump_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct pg_state st = {
|
|
|
|
.seq = m,
|
|
|
|
.marker = address_markers,
|
2021-07-09 00:49:42 +08:00
|
|
|
.level = -1,
|
2021-07-09 00:49:43 +08:00
|
|
|
.ptdump = {
|
|
|
|
.note_page = note_page,
|
|
|
|
.range = ptdump_range,
|
|
|
|
}
|
2016-05-27 13:48:59 +08:00
|
|
|
};
|
2018-08-15 19:29:45 +08:00
|
|
|
|
2016-05-27 13:48:59 +08:00
|
|
|
/* Traverse kernel page tables */
|
2021-07-09 00:49:43 +08:00
|
|
|
ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
|
2016-05-27 13:48:59 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-07-09 00:49:40 +08:00
|
|
|
DEFINE_SHOW_ATTRIBUTE(ptdump);
|
2016-05-27 13:48:59 +08:00
|
|
|
|
2021-12-17 06:00:18 +08:00
|
|
|
static void __init build_pgtable_complete_mask(void)
|
2016-05-27 13:48:59 +08:00
|
|
|
{
|
|
|
|
unsigned int i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(pg_level); i++)
|
|
|
|
if (pg_level[i].flag)
|
|
|
|
for (j = 0; j < pg_level[i].num; j++)
|
|
|
|
pg_level[i].mask |= pg_level[i].flag[j].mask;
|
|
|
|
}
|
|
|
|
|
2024-01-30 18:34:35 +08:00
|
|
|
bool ptdump_check_wx(void)
|
2019-05-02 15:39:47 +08:00
|
|
|
{
|
|
|
|
struct pg_state st = {
|
|
|
|
.seq = NULL,
|
2021-07-09 00:49:43 +08:00
|
|
|
.marker = (struct addr_marker[]) {
|
|
|
|
{ 0, NULL},
|
|
|
|
{ -1, NULL},
|
|
|
|
},
|
2021-07-09 00:49:42 +08:00
|
|
|
.level = -1,
|
2019-05-02 15:39:47 +08:00
|
|
|
.check_wx = true,
|
2021-07-09 00:49:43 +08:00
|
|
|
.ptdump = {
|
|
|
|
.note_page = note_page,
|
|
|
|
.range = ptdump_range,
|
|
|
|
}
|
2019-05-02 15:39:47 +08:00
|
|
|
};
|
|
|
|
|
2024-01-30 18:34:33 +08:00
|
|
|
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !mmu_has_feature(MMU_FTR_KERNEL_RO))
|
2024-01-30 18:34:35 +08:00
|
|
|
return true;
|
2024-01-30 18:34:33 +08:00
|
|
|
|
2021-07-09 00:49:43 +08:00
|
|
|
ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
|
2019-05-02 15:39:47 +08:00
|
|
|
|
2024-01-30 18:34:35 +08:00
|
|
|
if (st.wx_pages) {
|
2019-05-02 15:39:47 +08:00
|
|
|
pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
|
|
|
|
st.wx_pages);
|
2024-01-30 18:34:35 +08:00
|
|
|
|
|
|
|
return false;
|
|
|
|
} else {
|
2019-05-02 15:39:47 +08:00
|
|
|
pr_info("Checked W+X mappings: passed, no W+X pages found\n");
|
2024-01-30 18:34:35 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2019-05-02 15:39:47 +08:00
|
|
|
}
|
|
|
|
|
2021-07-09 00:49:43 +08:00
|
|
|
static int __init ptdump_init(void)
|
2016-05-27 13:48:59 +08:00
|
|
|
{
|
2021-07-09 00:49:43 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
if (!radix_enabled())
|
|
|
|
ptdump_range[0].start = KERN_VIRT_START;
|
|
|
|
else
|
|
|
|
ptdump_range[0].start = PAGE_OFFSET;
|
2021-08-31 21:51:51 +08:00
|
|
|
|
|
|
|
ptdump_range[0].end = PAGE_OFFSET + (PGDIR_SIZE * PTRS_PER_PGD);
|
2021-07-09 00:49:43 +08:00
|
|
|
#endif
|
|
|
|
|
2016-05-27 13:48:59 +08:00
|
|
|
populate_markers();
|
|
|
|
build_pgtable_complete_mask();
|
2021-07-09 00:49:43 +08:00
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_PTDUMP_DEBUGFS))
|
|
|
|
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
|
|
|
|
|
2020-02-09 18:58:59 +08:00
|
|
|
return 0;
|
2016-05-27 13:48:59 +08:00
|
|
|
}
|
|
|
|
device_initcall(ptdump_init);
|