RISC-V Patches for the 6.2 Merge Window, Part 1
* Support for the T-Head PMU via the perf subsystem. * ftrace support for rv32. * Support for non-volatile memory devices. * Various fixes and cleanups. -----BEGIN PGP SIGNATURE----- iQJHBAABCAAxFiEEKzw3R0RoQ7JKlDp6LhMZ81+7GIkFAmOZ6WsTHHBhbG1lckBk YWJiZWx0LmNvbQAKCRAuExnzX7sYiWGcD/wLGiHq3ekQhl5D+CaA1WlJ5XzQFfY2 bv1ZCZGdjuiv66jiMlmEsbpfUCk3bSAIjCO3MHQNDmTuPJztCHVJXOHbZFWItzzO soW4nXHKW1sGHa7hDLGQUPkltA48OdPoyqEDvlnpyEWFT+2xHwdFEURWE85FXGeq ZzFSKUQqX/V52n9TS4M4QtmNnQatR3TgIs8ttzD4JqwWFBbp4/iBfIGt6n3W24XH 9lKWikO4YOYUPl0KVIakM4d8NmX7g+7vhCKWavLke1fF/IQOlyWwA0eM8ryj33OG L1nFkqfF3mCw9i72WHftlc0rAgVqcYS8ntnQkPNpt2zPp3xFjDwEy+XiZrRE+sAp m5Ma2Tkw7G3ueBtXwP1yo+EKa7PrVFbCRD/rEpLJAC6+9ktvc7cYs39E08O+wrwT qkYThDolovqMOqfOq6afEGy5lfIa5U00vxK+3MXiE3eLEjHSJhwTXadUbwyMjJWE zOwA6p5NfDFzklESSNTtIBY85Zlh/g2q6GWCy7yBQnlaSdbpDxcnAlSZipq66Iqm 9ytdZiHid4BIRQxr5qyXTB184BvFnWNRs9NGhCj38uLEnuxwSChzwoh/WPDxLNte U9ouvwJO5U2qAZsMGJhY8W2s/9WvWpSqRhSMA/nnNV1Hh+URFz8rFXAln6kNn//v j+cYGCyjLnO1hg== =4Ak2 -----END PGP SIGNATURE----- Merge tag 'riscv-for-linus-6.2-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux Pull RISC-V updates from Palmer Dabbelt: - Support for the T-Head PMU via the perf subsystem - ftrace support for rv32 - Support for non-volatile memory devices - Various fixes and cleanups * tag 'riscv-for-linus-6.2-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (52 commits) Documentation: RISC-V: patch-acceptance: s/implementor/implementer Documentation: RISC-V: Mention the UEFI Standards Documentation: RISC-V: Allow patches for non-standard behavior Documentation: RISC-V: Fix a typo in patch-acceptance riscv: Fixup compile error with !MMU riscv: Fix P4D_SHIFT definition for 3-level page table mode riscv: Apply a static assert to riscv_isa_ext_id RISC-V: Add some comments about the shadow and overflow stacks RISC-V: Align the shadow stack RISC-V: Ensure Zicbom has a valid block size RISC-V: Introduce riscv_isa_extension_check RISC-V: Improve use of isa2hwcap[] riscv: Don't duplicate _ALTERNATIVE_CFG* macros riscv: alternatives: Drop the underscores from the assembly macro names riscv: alternatives: Don't name unused macro parameters riscv: Don't duplicate __ALTERNATIVE_CFG in __ALTERNATIVE_CFG_2 riscv: mm: call best_map_size many times during linear-mapping riscv: Move cast inside kernel_mapping_[pv]a_to_[vp]a riscv: Fix crash during early errata patching riscv: boot: add zstd support ...
This commit is contained in:
commit
eb67d239f3
|
@ -595,3 +595,32 @@ X2TLB
|
|||
-----
|
||||
|
||||
Indicates whether the crashed kernel enabled SH extended mode.
|
||||
|
||||
RISCV64
|
||||
=======
|
||||
|
||||
VA_BITS
|
||||
-------
|
||||
|
||||
The maximum number of bits for virtual addresses. Used to compute the
|
||||
virtual memory ranges.
|
||||
|
||||
PAGE_OFFSET
|
||||
-----------
|
||||
|
||||
Indicates the virtual kernel start address of the direct-mapped RAM region.
|
||||
|
||||
phys_ram_base
|
||||
-------------
|
||||
|
||||
Indicates the start physical RAM address.
|
||||
|
||||
MODULES_VADDR|MODULES_END|VMALLOC_START|VMALLOC_END|VMEMMAP_START|VMEMMAP_END|KERNEL_LINK_ADDR
|
||||
----------------------------------------------------------------------------------------------
|
||||
|
||||
Used to get the correct ranges:
|
||||
|
||||
* MODULES_VADDR ~ MODULES_END : Kernel module space.
|
||||
* VMALLOC_START ~ VMALLOC_END : vmalloc() / ioremap() space.
|
||||
* VMEMMAP_START ~ VMEMMAP_END : vmemmap space, used for struct page array.
|
||||
* KERNEL_LINK_ADDR : start address of Kernel link and BPF
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
| openrisc: | TODO |
|
||||
| parisc: | TODO |
|
||||
| powerpc: | ok |
|
||||
| riscv: | TODO |
|
||||
| riscv: | ok |
|
||||
| s390: | TODO |
|
||||
| sh: | TODO |
|
||||
| sparc: | TODO |
|
||||
|
|
|
@ -20,16 +20,22 @@ Submit Checklist Addendum
|
|||
-------------------------
|
||||
We'll only accept patches for new modules or extensions if the
|
||||
specifications for those modules or extensions are listed as being
|
||||
"Frozen" or "Ratified" by the RISC-V Foundation. (Developers may, of
|
||||
course, maintain their own Linux kernel trees that contain code for
|
||||
any draft extensions that they wish.)
|
||||
unlikely to be incompatibly changed in the future. For
|
||||
specifications from the RISC-V foundation this means "Frozen" or
|
||||
"Ratified", for the UEFI forum specifications this means a published
|
||||
ECR. (Developers may, of course, maintain their own Linux kernel trees
|
||||
that contain code for any draft extensions that they wish.)
|
||||
|
||||
Additionally, the RISC-V specification allows implementors to create
|
||||
Additionally, the RISC-V specification allows implementers to create
|
||||
their own custom extensions. These custom extensions aren't required
|
||||
to go through any review or ratification process by the RISC-V
|
||||
Foundation. To avoid the maintenance complexity and potential
|
||||
performance impact of adding kernel code for implementor-specific
|
||||
RISC-V extensions, we'll only to accept patches for extensions that
|
||||
have been officially frozen or ratified by the RISC-V Foundation.
|
||||
(Implementors, may, of course, maintain their own Linux kernel trees
|
||||
containing code for any custom extensions that they wish.)
|
||||
RISC-V extensions, we'll only consider patches for extensions that either:
|
||||
|
||||
- Have been officially frozen or ratified by the RISC-V Foundation, or
|
||||
- Have been implemented in hardware that is widely available, per standard
|
||||
Linux practice.
|
||||
|
||||
(Implementers, may, of course, maintain their own Linux kernel trees containing
|
||||
code for any custom extensions that they wish.)
|
||||
|
|
|
@ -25,6 +25,7 @@ config RISCV
|
|||
select ARCH_HAS_GIGANTIC_PAGE
|
||||
select ARCH_HAS_KCOV
|
||||
select ARCH_HAS_MMIOWB
|
||||
select ARCH_HAS_PMEM_API
|
||||
select ARCH_HAS_PTE_SPECIAL
|
||||
select ARCH_HAS_SET_DIRECT_MAP if MMU
|
||||
select ARCH_HAS_SET_MEMORY if MMU
|
||||
|
@ -72,6 +73,8 @@ config RISCV
|
|||
select GENERIC_VDSO_TIME_NS if HAVE_GENERIC_VDSO
|
||||
select HARDIRQS_SW_RESEND
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
|
||||
select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT && !XIP_KERNEL
|
||||
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
|
||||
select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
|
||||
select HAVE_ARCH_KASAN if MMU && 64BIT
|
||||
|
@ -99,6 +102,7 @@ config RISCV
|
|||
select HAVE_KPROBES if !XIP_KERNEL
|
||||
select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
|
||||
select HAVE_KRETPROBES if !XIP_KERNEL
|
||||
select HAVE_RETHOOK if !XIP_KERNEL
|
||||
select HAVE_MOVE_PMD
|
||||
select HAVE_MOVE_PUD
|
||||
select HAVE_PCI
|
||||
|
@ -123,12 +127,18 @@ config RISCV
|
|||
select PCI_MSI if PCI
|
||||
select RISCV_INTC
|
||||
select RISCV_TIMER if RISCV_SBI
|
||||
select SIFIVE_PLIC
|
||||
select SPARSE_IRQ
|
||||
select SYSCTL_EXCEPTION_TRACE
|
||||
select THREAD_INFO_IN_TASK
|
||||
select TRACE_IRQFLAGS_SUPPORT
|
||||
select UACCESS_MEMCPY if !MMU
|
||||
select ZONE_DMA32 if 64BIT
|
||||
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
|
||||
|
||||
config ARCH_MMAP_RND_BITS_MIN
|
||||
default 18 if 64BIT
|
||||
|
@ -274,11 +284,6 @@ config ARCH_RV64I
|
|||
bool "RV64I"
|
||||
select 64BIT
|
||||
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
|
||||
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
|
||||
select SWIOTLB if MMU
|
||||
|
||||
endchoice
|
||||
|
@ -502,7 +507,7 @@ config KEXEC_FILE
|
|||
select KEXEC_CORE
|
||||
select KEXEC_ELF
|
||||
select HAVE_IMA_KEXEC if IMA
|
||||
depends on 64BIT
|
||||
depends on 64BIT && MMU
|
||||
help
|
||||
This is new version of kexec system call. This system call is
|
||||
file based and takes file descriptors as system call argument
|
||||
|
@ -691,6 +696,8 @@ menu "CPU Power Management"
|
|||
|
||||
source "drivers/cpuidle/Kconfig"
|
||||
|
||||
source "drivers/cpufreq/Kconfig"
|
||||
|
||||
endmenu # "CPU Power Management"
|
||||
|
||||
source "arch/riscv/kvm/Kconfig"
|
||||
|
|
|
@ -66,4 +66,17 @@ config ERRATA_THEAD_CMO
|
|||
|
||||
If you don't know what to do here, say "Y".
|
||||
|
||||
config ERRATA_THEAD_PMU
|
||||
bool "Apply T-Head PMU errata"
|
||||
depends on ERRATA_THEAD && RISCV_PMU_SBI
|
||||
default y
|
||||
help
|
||||
The T-Head C9xx cores implement a PMU overflow extension very
|
||||
similar to the core SSCOFPMF extension.
|
||||
|
||||
This will apply the overflow errata to handle the non-standard
|
||||
behaviour via the regular SBI PMU driver and interface.
|
||||
|
||||
If you don't know what to do here, say "Y".
|
||||
|
||||
endmenu # "CPU errata selection"
|
||||
|
|
|
@ -3,7 +3,6 @@ menu "SoC selection"
|
|||
config SOC_MICROCHIP_POLARFIRE
|
||||
bool "Microchip PolarFire SoCs"
|
||||
select MCHP_CLK_MPFS
|
||||
select SIFIVE_PLIC
|
||||
help
|
||||
This enables support for Microchip PolarFire SoC platforms.
|
||||
|
||||
|
@ -18,7 +17,6 @@ config SOC_SIFIVE
|
|||
select SERIAL_SIFIVE_CONSOLE if TTY
|
||||
select CLK_SIFIVE
|
||||
select CLK_SIFIVE_PRCI
|
||||
select SIFIVE_PLIC
|
||||
select ERRATA_SIFIVE if !XIP_KERNEL
|
||||
help
|
||||
This enables support for SiFive SoC platform hardware.
|
||||
|
@ -27,7 +25,6 @@ config SOC_STARFIVE
|
|||
bool "StarFive SoCs"
|
||||
select PINCTRL
|
||||
select RESET_CONTROLLER
|
||||
select SIFIVE_PLIC
|
||||
help
|
||||
This enables support for StarFive SoC platform hardware.
|
||||
|
||||
|
@ -39,7 +36,6 @@ config SOC_VIRT
|
|||
select POWER_RESET_SYSCON_POWEROFF
|
||||
select GOLDFISH
|
||||
select RTC_DRV_GOLDFISH if RTC_CLASS
|
||||
select SIFIVE_PLIC
|
||||
select PM_GENERIC_DOMAINS if PM
|
||||
select PM_GENERIC_DOMAINS_OF if PM && OF
|
||||
select RISCV_SBI_CPUIDLE if CPU_IDLE && RISCV_SBI
|
||||
|
@ -52,7 +48,6 @@ config SOC_CANAAN
|
|||
select CLINT_TIMER if RISCV_M_MODE
|
||||
select SERIAL_SIFIVE if TTY
|
||||
select SERIAL_SIFIVE_CONSOLE if TTY
|
||||
select SIFIVE_PLIC
|
||||
select ARCH_HAS_RESET_CONTROLLER
|
||||
select PINCTRL
|
||||
select COMMON_CLK
|
||||
|
|
|
@ -56,6 +56,9 @@ $(obj)/Image.lzma: $(obj)/Image FORCE
|
|||
$(obj)/Image.lzo: $(obj)/Image FORCE
|
||||
$(call if_changed,lzo)
|
||||
|
||||
$(obj)/Image.zst: $(obj)/Image FORCE
|
||||
$(call if_changed,zstd)
|
||||
|
||||
$(obj)/loader.bin: $(obj)/loader FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ CONFIG_KVM=m
|
|||
CONFIG_JUMP_LABEL=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
CONFIG_SPARSEMEM_MANUAL=y
|
||||
CONFIG_BLK_DEV_THROTTLING=y
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
|
@ -123,6 +124,7 @@ CONFIG_MICROSEMI_PHY=y
|
|||
CONFIG_INPUT_MOUSEDEV=y
|
||||
CONFIG_SERIAL_8250=y
|
||||
CONFIG_SERIAL_8250_CONSOLE=y
|
||||
CONFIG_SERIAL_8250_DW=y
|
||||
CONFIG_SERIAL_OF_PLATFORM=y
|
||||
CONFIG_SERIAL_SH_SCI=y
|
||||
CONFIG_VIRTIO_CONSOLE=y
|
||||
|
@ -162,6 +164,7 @@ CONFIG_RPMSG_CHAR=y
|
|||
CONFIG_RPMSG_CTRL=y
|
||||
CONFIG_RPMSG_VIRTIO=y
|
||||
CONFIG_ARCH_R9A07G043=y
|
||||
CONFIG_LIBNVDIMM=y
|
||||
CONFIG_EXT4_FS=y
|
||||
CONFIG_EXT4_FS_POSIX_ACL=y
|
||||
CONFIG_EXT4_FS_SECURITY=y
|
||||
|
|
|
@ -47,6 +47,22 @@ static bool errata_probe_cmo(unsigned int stage,
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool errata_probe_pmu(unsigned int stage,
|
||||
unsigned long arch_id, unsigned long impid)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_ERRATA_THEAD_PMU))
|
||||
return false;
|
||||
|
||||
/* target-c9xx cores report arch_id and impid as 0 */
|
||||
if (arch_id != 0 || impid != 0)
|
||||
return false;
|
||||
|
||||
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static u32 thead_errata_probe(unsigned int stage,
|
||||
unsigned long archid, unsigned long impid)
|
||||
{
|
||||
|
@ -58,6 +74,9 @@ static u32 thead_errata_probe(unsigned int stage,
|
|||
if (errata_probe_cmo(stage, archid, impid))
|
||||
cpu_req_errata |= BIT(ERRATA_THEAD_CMO);
|
||||
|
||||
if (errata_probe_pmu(stage, archid, impid))
|
||||
cpu_req_errata |= BIT(ERRATA_THEAD_PMU);
|
||||
|
||||
return cpu_req_errata;
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
.endif
|
||||
.endm
|
||||
|
||||
.macro __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable
|
||||
.macro ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable
|
||||
886 :
|
||||
.option push
|
||||
.option norvc
|
||||
|
@ -44,30 +44,14 @@
|
|||
ALT_NEW_CONTENT \vendor_id, \errata_id, \enable, \new_c
|
||||
.endm
|
||||
|
||||
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
|
||||
__ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k)
|
||||
|
||||
.macro __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \
|
||||
new_c_2, vendor_id_2, errata_id_2, enable_2
|
||||
886 :
|
||||
.option push
|
||||
.option norvc
|
||||
.option norelax
|
||||
\old_c
|
||||
.option pop
|
||||
887 :
|
||||
ALT_NEW_CONTENT \vendor_id_1, \errata_id_1, \enable_1, \new_c_1
|
||||
.macro ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \
|
||||
new_c_2, vendor_id_2, errata_id_2, enable_2
|
||||
ALTERNATIVE_CFG \old_c, \new_c_1, \vendor_id_1, \errata_id_1, \enable_1
|
||||
ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2
|
||||
.endm
|
||||
|
||||
#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
|
||||
CONFIG_k_1, \
|
||||
new_c_2, vendor_id_2, errata_id_2, \
|
||||
CONFIG_k_2) \
|
||||
__ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, \
|
||||
IS_ENABLED(CONFIG_k_1), \
|
||||
new_c_2, vendor_id_2, errata_id_2, \
|
||||
IS_ENABLED(CONFIG_k_2)
|
||||
#define __ALTERNATIVE_CFG(...) ALTERNATIVE_CFG __VA_ARGS__
|
||||
#define __ALTERNATIVE_CFG_2(...) ALTERNATIVE_CFG_2 __VA_ARGS__
|
||||
|
||||
#else /* !__ASSEMBLY__ */
|
||||
|
||||
|
@ -109,63 +93,44 @@
|
|||
"887 :\n" \
|
||||
ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c)
|
||||
|
||||
#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \
|
||||
new_c_2, vendor_id_2, errata_id_2, enable_2) \
|
||||
__ALTERNATIVE_CFG(old_c, new_c_1, vendor_id_1, errata_id_1, enable_1) \
|
||||
ALT_NEW_CONTENT(vendor_id_2, errata_id_2, enable_2, new_c_2)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
|
||||
__ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k))
|
||||
|
||||
#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
|
||||
enable_1, \
|
||||
new_c_2, vendor_id_2, errata_id_2, \
|
||||
enable_2) \
|
||||
"886 :\n" \
|
||||
".option push\n" \
|
||||
".option norvc\n" \
|
||||
".option norelax\n" \
|
||||
old_c "\n" \
|
||||
".option pop\n" \
|
||||
"887 :\n" \
|
||||
ALT_NEW_CONTENT(vendor_id_1, errata_id_1, enable_1, new_c_1) \
|
||||
ALT_NEW_CONTENT(vendor_id_2, errata_id_2, enable_2, new_c_2)
|
||||
|
||||
#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
|
||||
CONFIG_k_1, \
|
||||
new_c_2, vendor_id_2, errata_id_2, \
|
||||
CONFIG_k_2) \
|
||||
__ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
|
||||
IS_ENABLED(CONFIG_k_1), \
|
||||
new_c_2, vendor_id_2, errata_id_2, \
|
||||
IS_ENABLED(CONFIG_k_2))
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
|
||||
new_c_2, vendor_id_2, errata_id_2, CONFIG_k_2) \
|
||||
__ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, IS_ENABLED(CONFIG_k_1), \
|
||||
new_c_2, vendor_id_2, errata_id_2, IS_ENABLED(CONFIG_k_2))
|
||||
|
||||
#else /* CONFIG_RISCV_ALTERNATIVE */
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
.macro __ALTERNATIVE_CFG old_c
|
||||
.macro ALTERNATIVE_CFG old_c
|
||||
\old_c
|
||||
.endm
|
||||
|
||||
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
|
||||
__ALTERNATIVE_CFG old_c
|
||||
#define _ALTERNATIVE_CFG(old_c, ...) \
|
||||
ALTERNATIVE_CFG old_c
|
||||
|
||||
#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
|
||||
CONFIG_k_1, \
|
||||
new_c_2, vendor_id_2, errata_id_2, \
|
||||
CONFIG_k_2) \
|
||||
__ALTERNATIVE_CFG old_c
|
||||
#define _ALTERNATIVE_CFG_2(old_c, ...) \
|
||||
ALTERNATIVE_CFG old_c
|
||||
|
||||
#else /* !__ASSEMBLY__ */
|
||||
|
||||
#define __ALTERNATIVE_CFG(old_c) \
|
||||
#define __ALTERNATIVE_CFG(old_c) \
|
||||
old_c "\n"
|
||||
|
||||
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
|
||||
#define _ALTERNATIVE_CFG(old_c, ...) \
|
||||
__ALTERNATIVE_CFG(old_c)
|
||||
|
||||
#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
|
||||
CONFIG_k_1, \
|
||||
new_c_2, vendor_id_2, errata_id_2, \
|
||||
CONFIG_k_2) \
|
||||
__ALTERNATIVE_CFG(old_c)
|
||||
#define _ALTERNATIVE_CFG_2(old_c, ...) \
|
||||
__ALTERNATIVE_CFG(old_c)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* CONFIG_RISCV_ALTERNATIVE */
|
||||
|
@ -193,13 +158,9 @@
|
|||
* on the following sample code and then replace ALTERNATIVE() with
|
||||
* ALTERNATIVE_2() to append its customized content.
|
||||
*/
|
||||
#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, \
|
||||
errata_id_1, CONFIG_k_1, \
|
||||
new_content_2, vendor_id_2, \
|
||||
errata_id_2, CONFIG_k_2) \
|
||||
_ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, \
|
||||
errata_id_1, CONFIG_k_1, \
|
||||
new_content_2, vendor_id_2, \
|
||||
errata_id_2, CONFIG_k_2)
|
||||
#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
|
||||
new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2) \
|
||||
_ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
|
||||
new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -17,6 +17,13 @@ static inline void local_flush_icache_all(void)
|
|||
|
||||
static inline void flush_dcache_page(struct page *page)
|
||||
{
|
||||
/*
|
||||
* HugeTLB pages are always fully mapped and only head page will be
|
||||
* set PG_dcache_clean (see comments in flush_icache_pte()).
|
||||
*/
|
||||
if (PageHuge(page))
|
||||
page = compound_head(page);
|
||||
|
||||
if (test_bit(PG_dcache_clean, &page->flags))
|
||||
clear_bit(PG_dcache_clean, &page->flags);
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#define ASM_ERRATA_LIST_H
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/csr.h>
|
||||
#include <asm/vendorid_list.h>
|
||||
|
||||
#ifdef CONFIG_ERRATA_SIFIVE
|
||||
|
@ -17,7 +18,8 @@
|
|||
#ifdef CONFIG_ERRATA_THEAD
|
||||
#define ERRATA_THEAD_PBMT 0
|
||||
#define ERRATA_THEAD_CMO 1
|
||||
#define ERRATA_THEAD_NUMBER 2
|
||||
#define ERRATA_THEAD_PMU 2
|
||||
#define ERRATA_THEAD_NUMBER 3
|
||||
#endif
|
||||
|
||||
#define CPUFEATURE_SVPBMT 0
|
||||
|
@ -142,6 +144,18 @@ asm volatile(ALTERNATIVE_2( \
|
|||
"r"((unsigned long)(_start) + (_size)) \
|
||||
: "a0")
|
||||
|
||||
#define THEAD_C9XX_RV_IRQ_PMU 17
|
||||
#define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5
|
||||
|
||||
#define ALT_SBI_PMU_OVERFLOW(__ovl) \
|
||||
asm volatile(ALTERNATIVE( \
|
||||
"csrr %0, " __stringify(CSR_SSCOUNTOVF), \
|
||||
"csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \
|
||||
THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \
|
||||
CONFIG_ERRATA_THEAD_PMU) \
|
||||
: "=r" (__ovl) : \
|
||||
: "memory")
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -5,4 +5,10 @@
|
|||
#include <asm-generic/hugetlb.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
static inline void arch_clear_hugepage_flags(struct page *page)
|
||||
{
|
||||
clear_bit(PG_dcache_clean, &page->flags);
|
||||
}
|
||||
#define arch_clear_hugepage_flags arch_clear_hugepage_flags
|
||||
|
||||
#endif /* _ASM_RISCV_HUGETLB_H */
|
||||
|
|
|
@ -59,8 +59,9 @@ enum riscv_isa_ext_id {
|
|||
RISCV_ISA_EXT_ZIHINTPAUSE,
|
||||
RISCV_ISA_EXT_SSTC,
|
||||
RISCV_ISA_EXT_SVINVAL,
|
||||
RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX,
|
||||
RISCV_ISA_EXT_ID_MAX
|
||||
};
|
||||
static_assert(RISCV_ISA_EXT_ID_MAX <= RISCV_ISA_EXT_MAX);
|
||||
|
||||
/*
|
||||
* This enum represents the logical ID for each RISC-V ISA extension static
|
||||
|
|
|
@ -135,4 +135,9 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
|
|||
|
||||
#include <asm-generic/io.h>
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#define arch_memremap_wb(addr, size) \
|
||||
((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL))
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_RISCV_IO_H */
|
||||
|
|
|
@ -39,6 +39,7 @@ crash_setup_regs(struct pt_regs *newregs,
|
|||
#define ARCH_HAS_KIMAGE_ARCH
|
||||
|
||||
struct kimage_arch {
|
||||
void *fdt; /* For CONFIG_KEXEC_FILE */
|
||||
unsigned long fdt_addr;
|
||||
};
|
||||
|
||||
|
@ -62,6 +63,10 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
|
|||
const Elf_Shdr *relsec,
|
||||
const Elf_Shdr *symtab);
|
||||
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
|
||||
|
||||
struct kimage;
|
||||
int arch_kimage_file_post_load_cleanup(struct kimage *image);
|
||||
#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -40,8 +40,6 @@ void arch_remove_kprobe(struct kprobe *p);
|
|||
int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr);
|
||||
bool kprobe_breakpoint_handler(struct pt_regs *regs);
|
||||
bool kprobe_single_step_handler(struct pt_regs *regs);
|
||||
void __kretprobe_trampoline(void);
|
||||
void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
|
||||
|
||||
#endif /* CONFIG_KPROBES */
|
||||
#endif /* _ASM_RISCV_KPROBES_H */
|
||||
|
|
|
@ -19,6 +19,8 @@ typedef struct {
|
|||
#ifdef CONFIG_SMP
|
||||
/* A local icache flush is needed before user execution can resume. */
|
||||
cpumask_t icache_stale_mask;
|
||||
/* A local tlb flush is needed before user execution can resume. */
|
||||
cpumask_t tlb_stale_mask;
|
||||
#endif
|
||||
} mm_context_t;
|
||||
|
||||
|
|
|
@ -123,20 +123,20 @@ extern phys_addr_t phys_ram_base;
|
|||
((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE))
|
||||
|
||||
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
|
||||
#define kernel_mapping_pa_to_va(y) ({ \
|
||||
unsigned long _y = y; \
|
||||
(IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \
|
||||
(void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset) : \
|
||||
(void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \
|
||||
#define kernel_mapping_pa_to_va(y) ({ \
|
||||
unsigned long _y = (unsigned long)(y); \
|
||||
(IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \
|
||||
(void *)(_y + kernel_map.va_kernel_xip_pa_offset) : \
|
||||
(void *)(_y + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \
|
||||
})
|
||||
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
|
||||
|
||||
#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
|
||||
#define kernel_mapping_va_to_pa(y) ({ \
|
||||
unsigned long _y = y; \
|
||||
(IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \
|
||||
((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : \
|
||||
((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
|
||||
unsigned long _y = (unsigned long)(y); \
|
||||
(IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \
|
||||
(_y - kernel_map.va_kernel_xip_pa_offset) : \
|
||||
(_y - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
|
||||
})
|
||||
|
||||
#define __va_to_pa_nodebug(x) ({ \
|
||||
|
|
|
@ -25,7 +25,11 @@ extern bool pgtable_l5_enabled;
|
|||
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
|
||||
|
||||
/* p4d is folded into pgd in case of 4-level page table */
|
||||
#define P4D_SHIFT 39
|
||||
#define P4D_SHIFT_L3 30
|
||||
#define P4D_SHIFT_L4 39
|
||||
#define P4D_SHIFT_L5 39
|
||||
#define P4D_SHIFT (pgtable_l5_enabled ? P4D_SHIFT_L5 : \
|
||||
(pgtable_l4_enabled ? P4D_SHIFT_L4 : P4D_SHIFT_L3))
|
||||
#define P4D_SIZE (_AC(1, UL) << P4D_SHIFT)
|
||||
#define P4D_MASK (~(P4D_SIZE - 1))
|
||||
|
||||
|
|
|
@ -415,9 +415,12 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
|
|||
* Relying on flush_tlb_fix_spurious_fault would suffice, but
|
||||
* the extra traps reduce performance. So, eagerly SFENCE.VMA.
|
||||
*/
|
||||
local_flush_tlb_page(address);
|
||||
flush_tlb_page(vma, address);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_UPDATE_MMU_TLB
|
||||
#define update_mmu_tlb update_mmu_cache
|
||||
|
||||
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp)
|
||||
{
|
||||
|
|
|
@ -327,4 +327,9 @@ int sbi_err_map_linux_errno(int err);
|
|||
static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; }
|
||||
static inline void sbi_init(void) {}
|
||||
#endif /* CONFIG_RISCV_SBI */
|
||||
|
||||
unsigned long riscv_cached_mvendorid(unsigned int cpu_id);
|
||||
unsigned long riscv_cached_marchid(unsigned int cpu_id);
|
||||
unsigned long riscv_cached_mimpid(unsigned int cpu_id);
|
||||
|
||||
#endif /* _ASM_RISCV_SBI_H */
|
||||
|
|
|
@ -22,6 +22,24 @@ static inline void local_flush_tlb_page(unsigned long addr)
|
|||
{
|
||||
ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
|
||||
}
|
||||
|
||||
static inline void local_flush_tlb_all_asid(unsigned long asid)
|
||||
{
|
||||
__asm__ __volatile__ ("sfence.vma x0, %0"
|
||||
:
|
||||
: "r" (asid)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline void local_flush_tlb_page_asid(unsigned long addr,
|
||||
unsigned long asid)
|
||||
{
|
||||
__asm__ __volatile__ ("sfence.vma %0, %1"
|
||||
:
|
||||
: "r" (addr), "r" (asid)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
#else /* CONFIG_MMU */
|
||||
#define local_flush_tlb_all() do { } while (0)
|
||||
#define local_flush_tlb_page(addr) do { } while (0)
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
/*
|
||||
* All systems with an MMU have a VDSO, but systems without an MMU don't
|
||||
* support shared libraries and therefor don't have one.
|
||||
* support shared libraries and therefore don't have one.
|
||||
*/
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
|
|
|
@ -1,4 +1,22 @@
|
|||
#ifndef _ASM_RISCV_VMALLOC_H
|
||||
#define _ASM_RISCV_VMALLOC_H
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
|
||||
|
||||
#define IOREMAP_MAX_ORDER (PUD_SHIFT)
|
||||
|
||||
#define arch_vmap_pud_supported arch_vmap_pud_supported
|
||||
static inline bool arch_vmap_pud_supported(pgprot_t prot)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#define arch_vmap_pmd_supported arch_vmap_pmd_supported
|
||||
static inline bool arch_vmap_pmd_supported(pgprot_t prot)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_RISCV_VMALLOC_H */
|
||||
|
|
|
@ -15,19 +15,23 @@ struct ucontext {
|
|||
struct ucontext *uc_link;
|
||||
stack_t uc_stack;
|
||||
sigset_t uc_sigmask;
|
||||
/* There's some padding here to allow sigset_t to be expanded in the
|
||||
/*
|
||||
* There's some padding here to allow sigset_t to be expanded in the
|
||||
* future. Though this is unlikely, other architectures put uc_sigmask
|
||||
* at the end of this structure and explicitly state it can be
|
||||
* expanded, so we didn't want to box ourselves in here. */
|
||||
* expanded, so we didn't want to box ourselves in here.
|
||||
*/
|
||||
__u8 __unused[1024 / 8 - sizeof(sigset_t)];
|
||||
/* We can't put uc_sigmask at the end of this structure because we need
|
||||
/*
|
||||
* We can't put uc_sigmask at the end of this structure because we need
|
||||
* to be able to expand sigcontext in the future. For example, the
|
||||
* vector ISA extension will almost certainly add ISA state. We want
|
||||
* to ensure all user-visible ISA state can be saved and restored via a
|
||||
* ucontext, so we're putting this at the end in order to allow for
|
||||
* infinite extensibility. Since we know this will be extended and we
|
||||
* assume sigset_t won't be extended an extreme amount, we're
|
||||
* prioritizing this. */
|
||||
* prioritizing this.
|
||||
*/
|
||||
struct sigcontext uc_mcontext;
|
||||
};
|
||||
|
||||
|
|
|
@ -81,6 +81,7 @@ obj-$(CONFIG_KGDB) += kgdb.o
|
|||
obj-$(CONFIG_KEXEC_CORE) += kexec_relocate.o crash_save_regs.o machine_kexec.o
|
||||
obj-$(CONFIG_KEXEC_FILE) += elf_kexec.o machine_kexec_file.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
obj-$(CONFIG_CRASH_CORE) += crash_core.o
|
||||
|
||||
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
|
||||
|
||||
|
|
|
@ -70,8 +70,6 @@ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
|
|||
return -1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
||||
struct riscv_cpuinfo {
|
||||
unsigned long mvendorid;
|
||||
unsigned long marchid;
|
||||
|
@ -79,6 +77,30 @@ struct riscv_cpuinfo {
|
|||
};
|
||||
static DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
|
||||
|
||||
unsigned long riscv_cached_mvendorid(unsigned int cpu_id)
|
||||
{
|
||||
struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
|
||||
|
||||
return ci->mvendorid;
|
||||
}
|
||||
EXPORT_SYMBOL(riscv_cached_mvendorid);
|
||||
|
||||
unsigned long riscv_cached_marchid(unsigned int cpu_id)
|
||||
{
|
||||
struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
|
||||
|
||||
return ci->marchid;
|
||||
}
|
||||
EXPORT_SYMBOL(riscv_cached_marchid);
|
||||
|
||||
unsigned long riscv_cached_mimpid(unsigned int cpu_id)
|
||||
{
|
||||
struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
|
||||
|
||||
return ci->mimpid;
|
||||
}
|
||||
EXPORT_SYMBOL(riscv_cached_mimpid);
|
||||
|
||||
static int riscv_cpuinfo_starting(unsigned int cpu)
|
||||
{
|
||||
struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
|
||||
|
@ -113,7 +135,9 @@ static int __init riscv_cpuinfo_init(void)
|
|||
|
||||
return 0;
|
||||
}
|
||||
device_initcall(riscv_cpuinfo_init);
|
||||
arch_initcall(riscv_cpuinfo_init);
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
||||
#define __RISCV_ISA_EXT_DATA(UPROP, EXTID) \
|
||||
{ \
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/bitmap.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/libfdt.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <asm/alternative.h>
|
||||
|
@ -68,21 +69,38 @@ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(__riscv_isa_extension_available);
|
||||
|
||||
static bool riscv_isa_extension_check(int id)
|
||||
{
|
||||
switch (id) {
|
||||
case RISCV_ISA_EXT_ZICBOM:
|
||||
if (!riscv_cbom_block_size) {
|
||||
pr_err("Zicbom detected in ISA string, but no cbom-block-size found\n");
|
||||
return false;
|
||||
} else if (!is_power_of_2(riscv_cbom_block_size)) {
|
||||
pr_err("cbom-block-size present, but is not a power-of-2\n");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void __init riscv_fill_hwcap(void)
|
||||
{
|
||||
struct device_node *node;
|
||||
const char *isa;
|
||||
char print_str[NUM_ALPHA_EXTS + 1];
|
||||
int i, j, rc;
|
||||
static unsigned long isa2hwcap[256] = {0};
|
||||
unsigned long isa2hwcap[26] = {0};
|
||||
unsigned long hartid;
|
||||
|
||||
isa2hwcap['i'] = isa2hwcap['I'] = COMPAT_HWCAP_ISA_I;
|
||||
isa2hwcap['m'] = isa2hwcap['M'] = COMPAT_HWCAP_ISA_M;
|
||||
isa2hwcap['a'] = isa2hwcap['A'] = COMPAT_HWCAP_ISA_A;
|
||||
isa2hwcap['f'] = isa2hwcap['F'] = COMPAT_HWCAP_ISA_F;
|
||||
isa2hwcap['d'] = isa2hwcap['D'] = COMPAT_HWCAP_ISA_D;
|
||||
isa2hwcap['c'] = isa2hwcap['C'] = COMPAT_HWCAP_ISA_C;
|
||||
isa2hwcap['i' - 'a'] = COMPAT_HWCAP_ISA_I;
|
||||
isa2hwcap['m' - 'a'] = COMPAT_HWCAP_ISA_M;
|
||||
isa2hwcap['a' - 'a'] = COMPAT_HWCAP_ISA_A;
|
||||
isa2hwcap['f' - 'a'] = COMPAT_HWCAP_ISA_F;
|
||||
isa2hwcap['d' - 'a'] = COMPAT_HWCAP_ISA_D;
|
||||
isa2hwcap['c' - 'a'] = COMPAT_HWCAP_ISA_C;
|
||||
|
||||
elf_hwcap = 0;
|
||||
|
||||
|
@ -189,15 +207,20 @@ void __init riscv_fill_hwcap(void)
|
|||
#define SET_ISA_EXT_MAP(name, bit) \
|
||||
do { \
|
||||
if ((ext_end - ext == sizeof(name) - 1) && \
|
||||
!memcmp(ext, name, sizeof(name) - 1)) \
|
||||
!memcmp(ext, name, sizeof(name) - 1) && \
|
||||
riscv_isa_extension_check(bit)) \
|
||||
set_bit(bit, this_isa); \
|
||||
} while (false) \
|
||||
|
||||
if (unlikely(ext_err))
|
||||
continue;
|
||||
if (!ext_long) {
|
||||
this_hwcap |= isa2hwcap[(unsigned char)(*ext)];
|
||||
set_bit(*ext - 'a', this_isa);
|
||||
int nr = *ext - 'a';
|
||||
|
||||
if (riscv_isa_extension_check(nr)) {
|
||||
this_hwcap |= isa2hwcap[nr];
|
||||
set_bit(nr, this_isa);
|
||||
}
|
||||
} else {
|
||||
SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF);
|
||||
SET_ISA_EXT_MAP("svpbmt", RISCV_ISA_EXT_SVPBMT);
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#include <linux/crash_core.h>
|
||||
#include <linux/pagemap.h>
|
||||
|
||||
void arch_crash_save_vmcoreinfo(void)
|
||||
{
|
||||
VMCOREINFO_NUMBER(VA_BITS);
|
||||
VMCOREINFO_NUMBER(phys_ram_base);
|
||||
|
||||
vmcoreinfo_append_str("NUMBER(PAGE_OFFSET)=0x%lx\n", PAGE_OFFSET);
|
||||
vmcoreinfo_append_str("NUMBER(VMALLOC_START)=0x%lx\n", VMALLOC_START);
|
||||
vmcoreinfo_append_str("NUMBER(VMALLOC_END)=0x%lx\n", VMALLOC_END);
|
||||
vmcoreinfo_append_str("NUMBER(VMEMMAP_START)=0x%lx\n", VMEMMAP_START);
|
||||
vmcoreinfo_append_str("NUMBER(VMEMMAP_END)=0x%lx\n", VMEMMAP_END);
|
||||
#ifdef CONFIG_64BIT
|
||||
vmcoreinfo_append_str("NUMBER(MODULES_VADDR)=0x%lx\n", MODULES_VADDR);
|
||||
vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END);
|
||||
#endif
|
||||
vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR);
|
||||
}
|
|
@ -21,6 +21,18 @@
|
|||
#include <linux/memblock.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
int arch_kimage_file_post_load_cleanup(struct kimage *image)
|
||||
{
|
||||
kvfree(image->arch.fdt);
|
||||
image->arch.fdt = NULL;
|
||||
|
||||
vfree(image->elf_headers);
|
||||
image->elf_headers = NULL;
|
||||
image->elf_headers_sz = 0;
|
||||
|
||||
return kexec_image_post_load_cleanup_default(image);
|
||||
}
|
||||
|
||||
static int riscv_kexec_elf_load(struct kimage *image, struct elfhdr *ehdr,
|
||||
struct kexec_elf_info *elf_info, unsigned long old_pbase,
|
||||
unsigned long new_pbase)
|
||||
|
@ -298,6 +310,8 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
|
|||
pr_err("Error add DTB kbuf ret=%d\n", ret);
|
||||
goto out_free_fdt;
|
||||
}
|
||||
/* Cache the fdt buffer address for memory cleanup */
|
||||
image->arch.fdt = fdt;
|
||||
pr_notice("Loaded device tree at 0x%lx\n", kbuf.mem);
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -248,7 +248,7 @@ ret_from_syscall_rejected:
|
|||
andi t0, t0, _TIF_SYSCALL_WORK
|
||||
bnez t0, handle_syscall_trace_exit
|
||||
|
||||
ret_from_exception:
|
||||
SYM_CODE_START_NOALIGN(ret_from_exception)
|
||||
REG_L s0, PT_STATUS(sp)
|
||||
csrc CSR_STATUS, SR_IE
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
|
@ -262,13 +262,13 @@ ret_from_exception:
|
|||
andi s0, s0, SR_SPP
|
||||
#endif
|
||||
bnez s0, resume_kernel
|
||||
SYM_CODE_END(ret_from_exception)
|
||||
|
||||
resume_userspace:
|
||||
/* Interrupts must be disabled here so flags are checked atomically */
|
||||
REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
|
||||
andi s1, s0, _TIF_WORK_MASK
|
||||
bnez s1, work_pending
|
||||
|
||||
bnez s1, resume_userspace_slow
|
||||
resume_userspace:
|
||||
#ifdef CONFIG_CONTEXT_TRACKING_USER
|
||||
call user_enter_callable
|
||||
#endif
|
||||
|
@ -368,19 +368,12 @@ resume_kernel:
|
|||
j restore_all
|
||||
#endif
|
||||
|
||||
work_pending:
|
||||
resume_userspace_slow:
|
||||
/* Enter slow path for supplementary processing */
|
||||
la ra, ret_from_exception
|
||||
andi s1, s0, _TIF_NEED_RESCHED
|
||||
bnez s1, work_resched
|
||||
work_notifysig:
|
||||
/* Handle pending signals and notify-resume requests */
|
||||
csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */
|
||||
move a0, sp /* pt_regs */
|
||||
move a1, s0 /* current_thread_info->flags */
|
||||
tail do_notify_resume
|
||||
work_resched:
|
||||
tail schedule
|
||||
call do_work_pending
|
||||
j resume_userspace
|
||||
|
||||
/* Slow paths for ptrace. */
|
||||
handle_syscall_trace_enter:
|
||||
|
|
|
@ -15,8 +15,8 @@
|
|||
|
||||
.macro SAVE_ABI_STATE
|
||||
addi sp, sp, -16
|
||||
sd s0, 0(sp)
|
||||
sd ra, 8(sp)
|
||||
REG_S s0, 0*SZREG(sp)
|
||||
REG_S ra, 1*SZREG(sp)
|
||||
addi s0, sp, 16
|
||||
.endm
|
||||
|
||||
|
@ -25,24 +25,26 @@
|
|||
* register if a0 was not saved.
|
||||
*/
|
||||
.macro SAVE_RET_ABI_STATE
|
||||
addi sp, sp, -32
|
||||
sd s0, 16(sp)
|
||||
sd ra, 24(sp)
|
||||
sd a0, 8(sp)
|
||||
addi s0, sp, 32
|
||||
addi sp, sp, -4*SZREG
|
||||
REG_S s0, 2*SZREG(sp)
|
||||
REG_S ra, 3*SZREG(sp)
|
||||
REG_S a0, 1*SZREG(sp)
|
||||
REG_S a1, 0*SZREG(sp)
|
||||
addi s0, sp, 4*SZREG
|
||||
.endm
|
||||
|
||||
.macro RESTORE_ABI_STATE
|
||||
ld ra, 8(sp)
|
||||
ld s0, 0(sp)
|
||||
REG_L ra, 1*SZREG(sp)
|
||||
REG_L s0, 0*SZREG(sp)
|
||||
addi sp, sp, 16
|
||||
.endm
|
||||
|
||||
.macro RESTORE_RET_ABI_STATE
|
||||
ld ra, 24(sp)
|
||||
ld s0, 16(sp)
|
||||
ld a0, 8(sp)
|
||||
addi sp, sp, 32
|
||||
REG_L ra, 3*SZREG(sp)
|
||||
REG_L s0, 2*SZREG(sp)
|
||||
REG_L a0, 1*SZREG(sp)
|
||||
REG_L a1, 0*SZREG(sp)
|
||||
addi sp, sp, 4*SZREG
|
||||
.endm
|
||||
|
||||
ENTRY(ftrace_stub)
|
||||
|
@ -71,9 +73,9 @@ ENTRY(return_to_handler)
|
|||
mv a0, t6
|
||||
#endif
|
||||
call ftrace_return_to_handler
|
||||
mv a1, a0
|
||||
mv a2, a0
|
||||
RESTORE_RET_ABI_STATE
|
||||
jalr a1
|
||||
jalr a2
|
||||
ENDPROC(return_to_handler)
|
||||
#endif
|
||||
|
||||
|
@ -82,16 +84,16 @@ ENTRY(MCOUNT_NAME)
|
|||
la t4, ftrace_stub
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
la t0, ftrace_graph_return
|
||||
ld t1, 0(t0)
|
||||
REG_L t1, 0(t0)
|
||||
bne t1, t4, do_ftrace_graph_caller
|
||||
|
||||
la t3, ftrace_graph_entry
|
||||
ld t2, 0(t3)
|
||||
REG_L t2, 0(t3)
|
||||
la t6, ftrace_graph_entry_stub
|
||||
bne t2, t6, do_ftrace_graph_caller
|
||||
#endif
|
||||
la t3, ftrace_trace_function
|
||||
ld t5, 0(t3)
|
||||
REG_L t5, 0(t3)
|
||||
bne t5, t4, do_trace
|
||||
ret
|
||||
|
||||
|
@ -101,10 +103,10 @@ ENTRY(MCOUNT_NAME)
|
|||
* prepare_to_return(&ra_to_caller_of_caller, ra_to_caller)
|
||||
*/
|
||||
do_ftrace_graph_caller:
|
||||
addi a0, s0, -8
|
||||
addi a0, s0, -SZREG
|
||||
mv a1, ra
|
||||
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
|
||||
ld a2, -16(s0)
|
||||
REG_L a2, -2*SZREG(s0)
|
||||
#endif
|
||||
SAVE_ABI_STATE
|
||||
call prepare_ftrace_return
|
||||
|
@ -117,7 +119,7 @@ do_ftrace_graph_caller:
|
|||
* (*ftrace_trace_function)(ra_to_caller, ra_to_caller_of_caller)
|
||||
*/
|
||||
do_trace:
|
||||
ld a1, -8(s0)
|
||||
REG_L a1, -SZREG(s0)
|
||||
mv a0, ra
|
||||
|
||||
SAVE_ABI_STATE
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o
|
||||
obj-$(CONFIG_KPROBES) += kprobes_trampoline.o
|
||||
obj-$(CONFIG_RETHOOK) += rethook.o rethook_trampoline.o
|
||||
obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o
|
||||
obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o
|
||||
CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE)
|
||||
|
|
|
@ -345,19 +345,6 @@ int __init arch_populate_kprobe_blacklist(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
|
||||
{
|
||||
return (void *)kretprobe_trampoline_handler(regs, NULL);
|
||||
}
|
||||
|
||||
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
ri->ret_addr = (kprobe_opcode_t *)regs->ra;
|
||||
ri->fp = NULL;
|
||||
regs->ra = (unsigned long) &__kretprobe_trampoline;
|
||||
}
|
||||
|
||||
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
|
||||
{
|
||||
return 0;
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Generic return hook for riscv.
|
||||
*/
|
||||
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/rethook.h>
|
||||
#include "rethook.h"
|
||||
|
||||
/* This is called from arch_rethook_trampoline() */
|
||||
unsigned long __used arch_rethook_trampoline_callback(struct pt_regs *regs)
|
||||
{
|
||||
return rethook_trampoline_handler(regs, regs->s0);
|
||||
}
|
||||
|
||||
NOKPROBE_SYMBOL(arch_rethook_trampoline_callback);
|
||||
|
||||
void arch_rethook_prepare(struct rethook_node *rhn, struct pt_regs *regs, bool mcount)
|
||||
{
|
||||
rhn->ret_addr = regs->ra;
|
||||
rhn->frame = regs->s0;
|
||||
|
||||
/* replace return addr with trampoline */
|
||||
regs->ra = (unsigned long)arch_rethook_trampoline;
|
||||
}
|
||||
|
||||
NOKPROBE_SYMBOL(arch_rethook_prepare);
|
|
@ -0,0 +1,8 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef __RISCV_RETHOOK_H
|
||||
#define __RISCV_RETHOOK_H
|
||||
|
||||
unsigned long arch_rethook_trampoline_callback(struct pt_regs *regs);
|
||||
void arch_rethook_prepare(struct rethook_node *rhn, struct pt_regs *regs, bool mcount);
|
||||
|
||||
#endif
|
|
@ -75,13 +75,13 @@
|
|||
REG_L x31, PT_T6(sp)
|
||||
.endm
|
||||
|
||||
ENTRY(__kretprobe_trampoline)
|
||||
ENTRY(arch_rethook_trampoline)
|
||||
addi sp, sp, -(PT_SIZE_ON_STACK)
|
||||
save_all_base_regs
|
||||
|
||||
move a0, sp /* pt_regs */
|
||||
|
||||
call trampoline_probe_handler
|
||||
call arch_rethook_trampoline_callback
|
||||
|
||||
/* use the result as the return-address */
|
||||
move ra, a0
|
||||
|
@ -90,4 +90,4 @@ ENTRY(__kretprobe_trampoline)
|
|||
addi sp, sp, PT_SIZE_ON_STACK
|
||||
|
||||
ret
|
||||
ENDPROC(__kretprobe_trampoline)
|
||||
ENDPROC(arch_rethook_trampoline)
|
|
@ -313,19 +313,27 @@ static void do_signal(struct pt_regs *regs)
|
|||
}
|
||||
|
||||
/*
|
||||
* notification of userspace execution resumption
|
||||
* - triggered by the _TIF_WORK_MASK flags
|
||||
* Handle any pending work on the resume-to-userspace path, as indicated by
|
||||
* _TIF_WORK_MASK. Entered from assembly with IRQs off.
|
||||
*/
|
||||
asmlinkage __visible void do_notify_resume(struct pt_regs *regs,
|
||||
unsigned long thread_info_flags)
|
||||
asmlinkage __visible void do_work_pending(struct pt_regs *regs,
|
||||
unsigned long thread_info_flags)
|
||||
{
|
||||
if (thread_info_flags & _TIF_UPROBE)
|
||||
uprobe_notify_resume(regs);
|
||||
|
||||
/* Handle pending signal delivery */
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
resume_user_mode_work(regs);
|
||||
do {
|
||||
if (thread_info_flags & _TIF_NEED_RESCHED) {
|
||||
schedule();
|
||||
} else {
|
||||
local_irq_enable();
|
||||
if (thread_info_flags & _TIF_UPROBE)
|
||||
uprobe_notify_resume(regs);
|
||||
/* Handle pending signal delivery */
|
||||
if (thread_info_flags & (_TIF_SIGPENDING |
|
||||
_TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
resume_user_mode_work(regs);
|
||||
}
|
||||
local_irq_disable();
|
||||
thread_info_flags = read_thread_flags();
|
||||
} while (thread_info_flags & _TIF_WORK_MASK);
|
||||
}
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
|
||||
extern asmlinkage void ret_from_exception(void);
|
||||
|
||||
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
||||
bool (*fn)(void *, unsigned long), void *arg)
|
||||
{
|
||||
|
@ -58,7 +60,14 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
|||
} else {
|
||||
fp = frame->fp;
|
||||
pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
|
||||
(unsigned long *)(fp - 8));
|
||||
&frame->ra);
|
||||
if (pc == (unsigned long)ret_from_exception) {
|
||||
if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
|
||||
break;
|
||||
|
||||
pc = ((struct pt_regs *)sp)->epc;
|
||||
fp = ((struct pt_regs *)sp)->s0;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -208,18 +208,18 @@ int is_valid_bugaddr(unsigned long pc)
|
|||
#endif /* CONFIG_GENERIC_BUG */
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
/*
|
||||
* Extra stack space that allows us to provide panic messages when the kernel
|
||||
* has overflowed its stack.
|
||||
*/
|
||||
static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
|
||||
overflow_stack)__aligned(16);
|
||||
/*
|
||||
* shadow stack, handled_ kernel_ stack_ overflow(in kernel/entry.S) is used
|
||||
* to get per-cpu overflow stack(get_overflow_stack).
|
||||
* A temporary stack for use by handle_kernel_stack_overflow. This is used so
|
||||
* we can call into C code to get the per-hart overflow stack. Usage of this
|
||||
* stack must be protected by spin_shadow_stack.
|
||||
*/
|
||||
long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)];
|
||||
asmlinkage unsigned long get_overflow_stack(void)
|
||||
{
|
||||
return (unsigned long)this_cpu_ptr(overflow_stack) +
|
||||
OVERFLOW_STACK_SIZE;
|
||||
}
|
||||
long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
|
||||
|
||||
/*
|
||||
* A pseudo spinlock to protect the shadow stack from being used by multiple
|
||||
|
@ -230,6 +230,12 @@ asmlinkage unsigned long get_overflow_stack(void)
|
|||
*/
|
||||
unsigned long spin_shadow_stack;
|
||||
|
||||
asmlinkage unsigned long get_overflow_stack(void)
|
||||
{
|
||||
return (unsigned long)this_cpu_ptr(overflow_stack) +
|
||||
OVERFLOW_STACK_SIZE;
|
||||
}
|
||||
|
||||
asmlinkage void handle_bad_stack(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long tsk_stk = (unsigned long)current->stack;
|
||||
|
|
|
@ -13,6 +13,8 @@ obj-y += extable.o
|
|||
obj-$(CONFIG_MMU) += fault.o pageattr.o
|
||||
obj-y += cacheflush.o
|
||||
obj-y += context.o
|
||||
obj-y += pgtable.o
|
||||
obj-y += pmem.o
|
||||
|
||||
ifeq ($(CONFIG_MMU),y)
|
||||
obj-$(CONFIG_SMP) += tlbflush.o
|
||||
|
|
|
@ -83,6 +83,13 @@ void flush_icache_pte(pte_t pte)
|
|||
{
|
||||
struct page *page = pte_page(pte);
|
||||
|
||||
/*
|
||||
* HugeTLB pages are always fully mapped, so only setting head page's
|
||||
* PG_dcache_clean flag is enough.
|
||||
*/
|
||||
if (PageHuge(page))
|
||||
page = compound_head(page);
|
||||
|
||||
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
|
||||
flush_icache_all();
|
||||
}
|
||||
|
|
|
@ -196,6 +196,16 @@ switch_mm_fast:
|
|||
|
||||
if (need_flush_tlb)
|
||||
local_flush_tlb_all();
|
||||
#ifdef CONFIG_SMP
|
||||
else {
|
||||
cpumask_t *mask = &mm->context.tlb_stale_mask;
|
||||
|
||||
if (cpumask_test_cpu(cpu, mask)) {
|
||||
cpumask_clear_cpu(cpu, mask);
|
||||
local_flush_tlb_all_asid(cntx & asid_mask);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void set_mm_noasid(struct mm_struct *mm)
|
||||
|
|
|
@ -672,10 +672,11 @@ void __init create_pgd_mapping(pgd_t *pgdp,
|
|||
static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
|
||||
{
|
||||
/* Upgrade to PMD_SIZE mappings whenever possible */
|
||||
if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
|
||||
return PAGE_SIZE;
|
||||
base &= PMD_SIZE - 1;
|
||||
if (!base && size >= PMD_SIZE)
|
||||
return PMD_SIZE;
|
||||
|
||||
return PMD_SIZE;
|
||||
return PAGE_SIZE;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XIP_KERNEL
|
||||
|
@ -926,15 +927,15 @@ static void __init pt_ops_set_early(void)
|
|||
*/
|
||||
static void __init pt_ops_set_fixmap(void)
|
||||
{
|
||||
pt_ops.alloc_pte = kernel_mapping_pa_to_va((uintptr_t)alloc_pte_fixmap);
|
||||
pt_ops.get_pte_virt = kernel_mapping_pa_to_va((uintptr_t)get_pte_virt_fixmap);
|
||||
pt_ops.alloc_pte = kernel_mapping_pa_to_va(alloc_pte_fixmap);
|
||||
pt_ops.get_pte_virt = kernel_mapping_pa_to_va(get_pte_virt_fixmap);
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
pt_ops.alloc_pmd = kernel_mapping_pa_to_va((uintptr_t)alloc_pmd_fixmap);
|
||||
pt_ops.get_pmd_virt = kernel_mapping_pa_to_va((uintptr_t)get_pmd_virt_fixmap);
|
||||
pt_ops.alloc_pud = kernel_mapping_pa_to_va((uintptr_t)alloc_pud_fixmap);
|
||||
pt_ops.get_pud_virt = kernel_mapping_pa_to_va((uintptr_t)get_pud_virt_fixmap);
|
||||
pt_ops.alloc_p4d = kernel_mapping_pa_to_va((uintptr_t)alloc_p4d_fixmap);
|
||||
pt_ops.get_p4d_virt = kernel_mapping_pa_to_va((uintptr_t)get_p4d_virt_fixmap);
|
||||
pt_ops.alloc_pmd = kernel_mapping_pa_to_va(alloc_pmd_fixmap);
|
||||
pt_ops.get_pmd_virt = kernel_mapping_pa_to_va(get_pmd_virt_fixmap);
|
||||
pt_ops.alloc_pud = kernel_mapping_pa_to_va(alloc_pud_fixmap);
|
||||
pt_ops.get_pud_virt = kernel_mapping_pa_to_va(get_pud_virt_fixmap);
|
||||
pt_ops.alloc_p4d = kernel_mapping_pa_to_va(alloc_p4d_fixmap);
|
||||
pt_ops.get_p4d_virt = kernel_mapping_pa_to_va(get_p4d_virt_fixmap);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1110,9 +1111,9 @@ static void __init setup_vm_final(void)
|
|||
if (end >= __pa(PAGE_OFFSET) + memory_limit)
|
||||
end = __pa(PAGE_OFFSET) + memory_limit;
|
||||
|
||||
map_size = best_map_size(start, end - start);
|
||||
for (pa = start; pa < end; pa += map_size) {
|
||||
va = (uintptr_t)__va(pa);
|
||||
map_size = best_map_size(pa, end - pa);
|
||||
|
||||
create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
|
||||
pgprot_from_va(va));
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
|
||||
int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void p4d_clear_huge(p4d_t *p4d)
|
||||
{
|
||||
}
|
||||
|
||||
int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
|
||||
{
|
||||
pud_t new_pud = pfn_pud(__phys_to_pfn(phys), prot);
|
||||
|
||||
set_pud(pud, new_pud);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pud_clear_huge(pud_t *pud)
|
||||
{
|
||||
if (!pud_leaf(READ_ONCE(*pud)))
|
||||
return 0;
|
||||
pud_clear(pud);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pud_free_pmd_page(pud_t *pud, unsigned long addr)
|
||||
{
|
||||
pmd_t *pmd = pud_pgtable(*pud);
|
||||
int i;
|
||||
|
||||
pud_clear(pud);
|
||||
|
||||
flush_tlb_kernel_range(addr, addr + PUD_SIZE);
|
||||
|
||||
for (i = 0; i < PTRS_PER_PMD; i++) {
|
||||
if (!pmd_none(pmd[i])) {
|
||||
pte_t *pte = (pte_t *)pmd_page_vaddr(pmd[i]);
|
||||
|
||||
pte_free_kernel(NULL, pte);
|
||||
}
|
||||
}
|
||||
|
||||
pmd_free(NULL, pmd);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
|
||||
{
|
||||
pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), prot);
|
||||
|
||||
set_pmd(pmd, new_pmd);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pmd_clear_huge(pmd_t *pmd)
|
||||
{
|
||||
if (!pmd_leaf(READ_ONCE(*pmd)))
|
||||
return 0;
|
||||
pmd_clear(pmd);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
|
||||
{
|
||||
pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
|
||||
|
||||
pmd_clear(pmd);
|
||||
|
||||
flush_tlb_kernel_range(addr, addr + PMD_SIZE);
|
||||
pte_free_kernel(NULL, pte);
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
|
|
@ -22,7 +22,7 @@ EXPORT_SYMBOL(__virt_to_phys);
|
|||
phys_addr_t __phys_addr_symbol(unsigned long x)
|
||||
{
|
||||
unsigned long kernel_start = kernel_map.virt_addr;
|
||||
unsigned long kernel_end = (unsigned long)_end;
|
||||
unsigned long kernel_end = kernel_start + kernel_map.size;
|
||||
|
||||
/*
|
||||
* Boundary checking aginst the kernel image mapping.
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2022 Ventana Micro Systems Inc.
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/libnvdimm.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
void arch_wb_cache_pmem(void *addr, size_t size)
|
||||
{
|
||||
ALT_CMO_OP(clean, addr, size, riscv_cbom_block_size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
|
||||
|
||||
void arch_invalidate_pmem(void *addr, size_t size)
|
||||
{
|
||||
ALT_CMO_OP(inval, addr, size, riscv_cbom_block_size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
|
|
@ -5,23 +5,7 @@
|
|||
#include <linux/sched.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
static inline void local_flush_tlb_all_asid(unsigned long asid)
|
||||
{
|
||||
__asm__ __volatile__ ("sfence.vma x0, %0"
|
||||
:
|
||||
: "r" (asid)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline void local_flush_tlb_page_asid(unsigned long addr,
|
||||
unsigned long asid)
|
||||
{
|
||||
__asm__ __volatile__ ("sfence.vma %0, %1"
|
||||
:
|
||||
: "r" (addr), "r" (asid)
|
||||
: "memory");
|
||||
}
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
|
@ -31,6 +15,7 @@ void flush_tlb_all(void)
|
|||
static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
|
||||
unsigned long size, unsigned long stride)
|
||||
{
|
||||
struct cpumask *pmask = &mm->context.tlb_stale_mask;
|
||||
struct cpumask *cmask = mm_cpumask(mm);
|
||||
unsigned int cpuid;
|
||||
bool broadcast;
|
||||
|
@ -44,6 +29,15 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
|
|||
if (static_branch_unlikely(&use_asid_allocator)) {
|
||||
unsigned long asid = atomic_long_read(&mm->context.id);
|
||||
|
||||
/*
|
||||
* TLB will be immediately flushed on harts concurrently
|
||||
* executing this MM context. TLB flush on other harts
|
||||
* is deferred until this MM context migrates there.
|
||||
*/
|
||||
cpumask_setall(pmask);
|
||||
cpumask_clear_cpu(cpuid, pmask);
|
||||
cpumask_andnot(pmask, pmask, cmask);
|
||||
|
||||
if (broadcast) {
|
||||
sbi_remote_sfence_vma_asid(cmask, start, size, asid);
|
||||
} else if (size <= stride) {
|
||||
|
|
|
@ -538,31 +538,14 @@ config TI_PRUSS_INTC
|
|||
different processors within the SoC.
|
||||
|
||||
config RISCV_INTC
|
||||
bool "RISC-V Local Interrupt Controller"
|
||||
bool
|
||||
depends on RISCV
|
||||
default y
|
||||
help
|
||||
This enables support for the per-HART local interrupt controller
|
||||
found in standard RISC-V systems. The per-HART local interrupt
|
||||
controller handles timer interrupts, software interrupts, and
|
||||
hardware interrupts. Without a per-HART local interrupt controller,
|
||||
a RISC-V system will be unable to handle any interrupts.
|
||||
|
||||
If you don't know what to do here, say Y.
|
||||
|
||||
config SIFIVE_PLIC
|
||||
bool "SiFive Platform-Level Interrupt Controller"
|
||||
bool
|
||||
depends on RISCV
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
|
||||
help
|
||||
This enables support for the PLIC chip found in SiFive (and
|
||||
potentially other) RISC-V systems. The PLIC controls devices
|
||||
interrupts and connects them to each core's local interrupt
|
||||
controller. Aside from timer and software interrupts, all other
|
||||
interrupt sources are subordinate to the PLIC.
|
||||
|
||||
If you don't know what to do here, say Y.
|
||||
|
||||
config EXYNOS_IRQ_COMBINER
|
||||
bool "Samsung Exynos IRQ combiner support" if COMPILE_TEST
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/cpu_pm.h>
|
||||
#include <linux/sched/clock.h>
|
||||
|
||||
#include <asm/errata_list.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/hwcap.h>
|
||||
|
||||
|
@ -47,6 +48,8 @@ static const struct attribute_group *riscv_pmu_attr_groups[] = {
|
|||
* per_cpu in case of harts with different pmu counters
|
||||
*/
|
||||
static union sbi_pmu_ctr_info *pmu_ctr_list;
|
||||
static bool riscv_pmu_use_irq;
|
||||
static unsigned int riscv_pmu_irq_num;
|
||||
static unsigned int riscv_pmu_irq;
|
||||
|
||||
struct sbi_pmu_event_data {
|
||||
|
@ -580,7 +583,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
|
|||
fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
|
||||
event = cpu_hw_evt->events[fidx];
|
||||
if (!event) {
|
||||
csr_clear(CSR_SIP, SIP_LCOFIP);
|
||||
csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
|
@ -588,13 +591,13 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
|
|||
pmu_sbi_stop_hw_ctrs(pmu);
|
||||
|
||||
/* Overflow status register should only be read after counter are stopped */
|
||||
overflow = csr_read(CSR_SSCOUNTOVF);
|
||||
ALT_SBI_PMU_OVERFLOW(overflow);
|
||||
|
||||
/*
|
||||
* Overflow interrupt pending bit should only be cleared after stopping
|
||||
* all the counters to avoid any race condition.
|
||||
*/
|
||||
csr_clear(CSR_SIP, SIP_LCOFIP);
|
||||
csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
|
||||
|
||||
/* No overflow bit is set */
|
||||
if (!overflow)
|
||||
|
@ -661,10 +664,10 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
|
|||
/* Stop all the counters so that they can be enabled from perf */
|
||||
pmu_sbi_stop_all(pmu);
|
||||
|
||||
if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
|
||||
if (riscv_pmu_use_irq) {
|
||||
cpu_hw_evt->irq = riscv_pmu_irq;
|
||||
csr_clear(CSR_IP, BIT(RV_IRQ_PMU));
|
||||
csr_set(CSR_IE, BIT(RV_IRQ_PMU));
|
||||
csr_clear(CSR_IP, BIT(riscv_pmu_irq_num));
|
||||
csr_set(CSR_IE, BIT(riscv_pmu_irq_num));
|
||||
enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
|
||||
}
|
||||
|
||||
|
@ -673,9 +676,9 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
|
|||
|
||||
static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
|
||||
{
|
||||
if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
|
||||
if (riscv_pmu_use_irq) {
|
||||
disable_percpu_irq(riscv_pmu_irq);
|
||||
csr_clear(CSR_IE, BIT(RV_IRQ_PMU));
|
||||
csr_clear(CSR_IE, BIT(riscv_pmu_irq_num));
|
||||
}
|
||||
|
||||
/* Disable all counters access for user mode now */
|
||||
|
@ -691,7 +694,18 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
|
|||
struct device_node *cpu, *child;
|
||||
struct irq_domain *domain = NULL;
|
||||
|
||||
if (!riscv_isa_extension_available(NULL, SSCOFPMF))
|
||||
if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
|
||||
riscv_pmu_irq_num = RV_IRQ_PMU;
|
||||
riscv_pmu_use_irq = true;
|
||||
} else if (IS_ENABLED(CONFIG_ERRATA_THEAD_PMU) &&
|
||||
riscv_cached_mvendorid(0) == THEAD_VENDOR_ID &&
|
||||
riscv_cached_marchid(0) == 0 &&
|
||||
riscv_cached_mimpid(0) == 0) {
|
||||
riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU;
|
||||
riscv_pmu_use_irq = true;
|
||||
}
|
||||
|
||||
if (!riscv_pmu_use_irq)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
for_each_of_cpu_node(cpu) {
|
||||
|
@ -713,7 +727,7 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
riscv_pmu_irq = irq_create_mapping(domain, RV_IRQ_PMU);
|
||||
riscv_pmu_irq = irq_create_mapping(domain, riscv_pmu_irq_num);
|
||||
if (!riscv_pmu_irq) {
|
||||
pr_err("Failed to map PMU interrupt for node\n");
|
||||
return -ENODEV;
|
||||
|
|
|
@ -38,7 +38,6 @@ arch/powerpc/kernel/entry_64.o
|
|||
arch/powerpc/kernel/fpu.o
|
||||
arch/powerpc/kernel/vector.o
|
||||
arch/powerpc/kernel/prom_init.o
|
||||
arch/riscv/kernel/head.o
|
||||
arch/s390/kernel/head64.o
|
||||
arch/sh/kernel/head_32.o
|
||||
arch/sparc/kernel/head_32.o
|
||||
|
|
Loading…
Reference in New Issue