MIPS: uaccess: Take EVA into account in [__]clear_user
__clear_user() (and clear_user() which uses it), always access the user mode address space, which results in EVA store instructions when EVA is enabled even if the current user address limit is KERNEL_DS. Fix this by adding a new symbol __bzero_kernel for the normal kernel address space bzero in EVA mode, and call that from __clear_user() if eva_kernel_access(). Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Markos Chandras <markos.chandras@imgtec.com> Cc: Paul Burton <paul.burton@imgtec.com> Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10844/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
parent
6f06a2c45d
commit
d6a428fb58
|
@ -1235,16 +1235,28 @@ __clear_user(void __user *addr, __kernel_size_t size)
|
|||
{
|
||||
__kernel_size_t res;
|
||||
|
||||
might_fault();
|
||||
__asm__ __volatile__(
|
||||
"move\t$4, %1\n\t"
|
||||
"move\t$5, $0\n\t"
|
||||
"move\t$6, %2\n\t"
|
||||
__MODULE_JAL(__bzero)
|
||||
"move\t%0, $6"
|
||||
: "=r" (res)
|
||||
: "r" (addr), "r" (size)
|
||||
: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
|
||||
if (eva_kernel_access()) {
|
||||
__asm__ __volatile__(
|
||||
"move\t$4, %1\n\t"
|
||||
"move\t$5, $0\n\t"
|
||||
"move\t$6, %2\n\t"
|
||||
__MODULE_JAL(__bzero_kernel)
|
||||
"move\t%0, $6"
|
||||
: "=r" (res)
|
||||
: "r" (addr), "r" (size)
|
||||
: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
|
||||
} else {
|
||||
might_fault();
|
||||
__asm__ __volatile__(
|
||||
"move\t$4, %1\n\t"
|
||||
"move\t$5, $0\n\t"
|
||||
"move\t$6, %2\n\t"
|
||||
__MODULE_JAL(__bzero)
|
||||
"move\t%0, $6"
|
||||
: "=r" (res)
|
||||
: "r" (addr), "r" (size)
|
||||
: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <asm/fpu.h>
|
||||
#include <asm/msa.h>
|
||||
|
||||
extern void *__bzero_kernel(void *__s, size_t __count);
|
||||
extern void *__bzero(void *__s, size_t __count);
|
||||
extern long __strncpy_from_kernel_nocheck_asm(char *__to,
|
||||
const char *__from, long __len);
|
||||
|
@ -64,6 +65,7 @@ EXPORT_SYMBOL(__copy_from_user_eva);
|
|||
EXPORT_SYMBOL(__copy_in_user_eva);
|
||||
EXPORT_SYMBOL(__copy_to_user_eva);
|
||||
EXPORT_SYMBOL(__copy_user_inatomic_eva);
|
||||
EXPORT_SYMBOL(__bzero_kernel);
|
||||
#endif
|
||||
EXPORT_SYMBOL(__bzero);
|
||||
EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);
|
||||
|
|
|
@ -283,6 +283,8 @@ LEAF(memset)
|
|||
1:
|
||||
#ifndef CONFIG_EVA
|
||||
FEXPORT(__bzero)
|
||||
#else
|
||||
FEXPORT(__bzero_kernel)
|
||||
#endif
|
||||
__BUILD_BZERO LEGACY_MODE
|
||||
|
||||
|
|
Loading…
Reference in New Issue