From 80c7889de7a8246e44a9632a2b7d15b41ab3fe41 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Wed, 8 Nov 2023 14:12:01 +0800 Subject: [PATCH 01/13] LoongArch: Support PREEMPT_DYNAMIC with static keys Since commit 4e90d0522a688371402c ("riscv: support PREEMPT_DYNAMIC with static keys"), the infrastructure is complete and we can simply select HAVE_PREEMPT_DYNAMIC_KEY to enable PREEMPT_DYNAMIC on LoongArch because we already support static keys. Signed-off-by: Huacai Chen --- arch/loongarch/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index e14396a2ddcb..86fdc8c55608 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -135,6 +135,7 @@ config LOONGARCH select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_DYNAMIC_KEY select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RETHOOK select HAVE_RSEQ From 21eb2bfe2748b238f06983e4308cb30611371605 Mon Sep 17 00:00:00 2001 From: WANG Rui Date: Wed, 8 Nov 2023 14:12:07 +0800 Subject: [PATCH 02/13] LoongArch: Disable module from accessing external data directly The distance between vmlinux and the module is too far so that PC-REL cannot be accessed directly, only GOT. When compiling module with GCC, the option `-mdirect-extern-access` is disabled by default. The Clang option `-fdirect-access-external-data` is enabled by default, so it needs to be explicitly disabled. Signed-off-by: WANG Rui Signed-off-by: Huacai Chen --- arch/loongarch/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index fb0fada43197..c31a62521e6b 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -68,6 +68,8 @@ LDFLAGS_vmlinux += -static -n -nostdlib ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS cflags-y += $(call cc-option,-mexplicit-relocs) KBUILD_CFLAGS_KERNEL += $(call cc-option,-mdirect-extern-access) +KBUILD_AFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data) +KBUILD_CFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data) KBUILD_AFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax) KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax) else From 71945968d8b128c955204baa33ec03bdd91bdc26 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Wed, 8 Nov 2023 14:12:15 +0800 Subject: [PATCH 03/13] LoongArch: Mark __percpu functions as always inline A recent change to the optimization pipeline in LLVM reveals some fragility around the inlining of LoongArch's __percpu functions, which manifests as a BUILD_BUG() failure: In file included from kernel/sched/build_policy.c:17: In file included from include/linux/sched/cputime.h:5: In file included from include/linux/sched/signal.h:5: In file included from include/linux/rculist.h:11: In file included from include/linux/rcupdate.h:26: In file included from include/linux/irqflags.h:18: arch/loongarch/include/asm/percpu.h:97:3: error: call to '__compiletime_assert_51' declared with 'error' attribute: BUILD_BUG failed 97 | BUILD_BUG(); | ^ include/linux/build_bug.h:59:21: note: expanded from macro 'BUILD_BUG' 59 | #define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed") | ^ include/linux/build_bug.h:39:37: note: expanded from macro 'BUILD_BUG_ON_MSG' 39 | #define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) | ^ include/linux/compiler_types.h:425:2: note: expanded from macro 'compiletime_assert' 425 | _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) | ^ include/linux/compiler_types.h:413:2: note: expanded from macro '_compiletime_assert' 413 | __compiletime_assert(condition, msg, prefix, suffix) | ^ include/linux/compiler_types.h:406:4: note: expanded from macro '__compiletime_assert' 406 | prefix ## suffix(); \ | ^ :86:1: note: expanded from here 86 | __compiletime_assert_51 | ^ 1 error generated. If these functions are not inlined (which the compiler is free to do even with functions marked with the standard 'inline' keyword), the BUILD_BUG() in the default case cannot be eliminated since the compiler cannot prove it is never used, resulting in a build failure due to the error attribute. Mark these functions as __always_inline to guarantee inlining so that the BUILD_BUG() only triggers when the default case genuinely cannot be eliminated due to an unexpected size. Cc: Closes: https://github.com/ClangBuiltLinux/linux/issues/1955 Fixes: 46859ac8af52 ("LoongArch: Add multi-processor (SMP) support") Link: https://github.com/llvm/llvm-project/commit/1a2e77cf9e11dbf56b5720c607313a566eebb16e Suggested-by: Nick Desaulniers Signed-off-by: Nathan Chancellor Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/percpu.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h index b9f567e66016..ed5da02b1cf6 100644 --- a/arch/loongarch/include/asm/percpu.h +++ b/arch/loongarch/include/asm/percpu.h @@ -32,7 +32,7 @@ static inline void set_my_cpu_offset(unsigned long off) #define __my_cpu_offset __my_cpu_offset #define PERCPU_OP(op, asm_op, c_op) \ -static inline unsigned long __percpu_##op(void *ptr, \ +static __always_inline unsigned long __percpu_##op(void *ptr, \ unsigned long val, int size) \ { \ unsigned long ret; \ @@ -63,7 +63,7 @@ PERCPU_OP(and, and, &) PERCPU_OP(or, or, |) #undef PERCPU_OP -static inline unsigned long __percpu_read(void *ptr, int size) +static __always_inline unsigned long __percpu_read(void *ptr, int size) { unsigned long ret; @@ -100,7 +100,7 @@ static inline unsigned long __percpu_read(void *ptr, int size) return ret; } -static inline void __percpu_write(void *ptr, unsigned long val, int size) +static __always_inline void __percpu_write(void *ptr, unsigned long val, int size) { switch (size) { case 1: @@ -132,8 +132,8 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size) } } -static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, - int size) +static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, + int size) { switch (size) { case 1: From affef66b65889a0ea0060e13e5f7fe569897d787 Mon Sep 17 00:00:00 2001 From: WANG Rui Date: Wed, 8 Nov 2023 14:12:15 +0800 Subject: [PATCH 04/13] LoongArch: Relax memory ordering for atomic operations This patch relaxes the implementation while satisfying the memory ordering requirements for atomic operations, which will help improve performance on LA664+. Unixbench with full threads (8) before after Dhrystone 2 using register variables 203910714.2 203909539.8 0.00% Double-Precision Whetstone 37930.9 37931 0.00% Execl Throughput 29431.5 29545.8 0.39% File Copy 1024 bufsize 2000 maxblocks 6645759.5 6676320 0.46% File Copy 256 bufsize 500 maxblocks 2138772.4 2144182.4 0.25% File Copy 4096 bufsize 8000 maxblocks 11640698.4 11602703 -0.33% Pipe Throughput 8849077.7 8917009.4 0.77% Pipe-based Context Switching 1255108.5 1287277.3 2.56% Process Creation 50825.9 50442.1 -0.76% Shell Scripts (1 concurrent) 25795.8 25942.3 0.57% Shell Scripts (8 concurrent) 3812.6 3835.2 0.59% System Call Overhead 9248212.6 9353348.6 1.14% ======= System Benchmarks Index Score 8076.6 8114.4 0.47% Signed-off-by: WANG Rui Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/atomic.h | 88 ++++++++++++++++++++++------- 1 file changed, 68 insertions(+), 20 deletions(-) diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h index e27f0c72d324..99af8b3160a8 100644 --- a/arch/loongarch/include/asm/atomic.h +++ b/arch/loongarch/include/asm/atomic.h @@ -36,19 +36,19 @@ static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ __asm__ __volatile__( \ - "am"#asm_op"_db.w" " $zero, %1, %0 \n" \ + "am"#asm_op".w" " $zero, %1, %0 \n" \ : "+ZB" (v->counter) \ : "r" (I) \ : "memory"); \ } -#define ATOMIC_OP_RETURN(op, I, asm_op, c_op) \ -static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ +#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \ +static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \ { \ int result; \ \ __asm__ __volatile__( \ - "am"#asm_op"_db.w" " %1, %2, %0 \n" \ + "am"#asm_op#mb".w" " %1, %2, %0 \n" \ : "+ZB" (v->counter), "=&r" (result) \ : "r" (I) \ : "memory"); \ @@ -56,13 +56,13 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ return result c_op I; \ } -#define ATOMIC_FETCH_OP(op, I, asm_op) \ -static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \ +static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \ { \ int result; \ \ __asm__ __volatile__( \ - "am"#asm_op"_db.w" " %1, %2, %0 \n" \ + "am"#asm_op#mb".w" " %1, %2, %0 \n" \ : "+ZB" (v->counter), "=&r" (result) \ : "r" (I) \ : "memory"); \ @@ -72,29 +72,53 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ #define ATOMIC_OPS(op, I, asm_op, c_op) \ ATOMIC_OP(op, I, asm_op) \ - ATOMIC_OP_RETURN(op, I, asm_op, c_op) \ - ATOMIC_FETCH_OP(op, I, asm_op) + ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \ + ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \ + ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \ + ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed) ATOMIC_OPS(add, i, add, +) ATOMIC_OPS(sub, -i, add, +) +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_add_return_acquire arch_atomic_add_return +#define arch_atomic_add_return_release arch_atomic_add_return #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_sub_return_acquire arch_atomic_sub_return +#define arch_atomic_sub_return_release arch_atomic_sub_return #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add +#define arch_atomic_fetch_add_release arch_atomic_fetch_add #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed #undef ATOMIC_OPS #define ATOMIC_OPS(op, I, asm_op) \ ATOMIC_OP(op, I, asm_op) \ - ATOMIC_FETCH_OP(op, I, asm_op) + ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \ + ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed) ATOMIC_OPS(and, i, and) ATOMIC_OPS(or, i, or) ATOMIC_OPS(xor, i, xor) +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and +#define arch_atomic_fetch_and_release arch_atomic_fetch_and #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or +#define arch_atomic_fetch_or_release arch_atomic_fetch_or #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed #undef ATOMIC_OPS @@ -172,18 +196,18 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v) static inline void arch_atomic64_##op(long i, atomic64_t *v) \ { \ __asm__ __volatile__( \ - "am"#asm_op"_db.d " " $zero, %1, %0 \n" \ + "am"#asm_op".d " " $zero, %1, %0 \n" \ : "+ZB" (v->counter) \ : "r" (I) \ : "memory"); \ } -#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \ -static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \ +#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \ +static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \ { \ long result; \ __asm__ __volatile__( \ - "am"#asm_op"_db.d " " %1, %2, %0 \n" \ + "am"#asm_op#mb".d " " %1, %2, %0 \n" \ : "+ZB" (v->counter), "=&r" (result) \ : "r" (I) \ : "memory"); \ @@ -191,13 +215,13 @@ static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \ return result c_op I; \ } -#define ATOMIC64_FETCH_OP(op, I, asm_op) \ -static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \ +#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \ +static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \ { \ long result; \ \ __asm__ __volatile__( \ - "am"#asm_op"_db.d " " %1, %2, %0 \n" \ + "am"#asm_op#mb".d " " %1, %2, %0 \n" \ : "+ZB" (v->counter), "=&r" (result) \ : "r" (I) \ : "memory"); \ @@ -207,29 +231,53 @@ static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \ #define ATOMIC64_OPS(op, I, asm_op, c_op) \ ATOMIC64_OP(op, I, asm_op) \ - ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \ - ATOMIC64_FETCH_OP(op, I, asm_op) + ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \ + ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \ + ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \ + ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed) ATOMIC64_OPS(add, i, add, +) ATOMIC64_OPS(sub, -i, add, +) +#define arch_atomic64_add_return arch_atomic64_add_return +#define arch_atomic64_add_return_acquire arch_atomic64_add_return +#define arch_atomic64_add_return_release arch_atomic64_add_return #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_sub_return arch_atomic64_sub_return +#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return +#define arch_atomic64_sub_return_release arch_atomic64_sub_return #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_fetch_add arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed #undef ATOMIC64_OPS #define ATOMIC64_OPS(op, I, asm_op) \ ATOMIC64_OP(op, I, asm_op) \ - ATOMIC64_FETCH_OP(op, I, asm_op) + ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \ + ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed) ATOMIC64_OPS(and, i, and) ATOMIC64_OPS(or, i, or) ATOMIC64_OPS(xor, i, xor) +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed #undef ATOMIC64_OPS From a2ccf46333d7b2cf9658f0d82ac74097c1542fae Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Wed, 8 Nov 2023 14:12:15 +0800 Subject: [PATCH 05/13] LoongArch/smp: Call rcutree_report_cpu_starting() earlier rcutree_report_cpu_starting() must be called before cpu_probe() to avoid the following lockdep splat that triggered by calling __alloc_pages() when CONFIG_PROVE_RCU_LIST=y: ============================= WARNING: suspicious RCU usage 6.6.0+ #980 Not tainted ----------------------------- kernel/locking/lockdep.c:3761 RCU-list traversed in non-reader section!! other info that might help us debug this: RCU used illegally from offline CPU! rcu_scheduler_active = 1, debug_locks = 1 1 lock held by swapper/1/0: #0: 900000000c82ef98 (&pcp->lock){+.+.}-{2:2}, at: get_page_from_freelist+0x894/0x1790 CPU: 1 PID: 0 Comm: swapper/1 Not tainted 6.6.0+ #980 Stack : 0000000000000001 9000000004f79508 9000000004893670 9000000100310000 90000001003137d0 0000000000000000 90000001003137d8 9000000004f79508 0000000000000000 0000000000000001 0000000000000000 90000000048a3384 203a656d616e2065 ca43677b3687e616 90000001002c3480 0000000000000008 000000000000009d 0000000000000000 0000000000000001 80000000ffffe0b8 000000000000000d 0000000000000033 0000000007ec0000 13bbf50562dad831 9000000005140748 0000000000000000 9000000004f79508 0000000000000004 0000000000000000 9000000005140748 90000001002bad40 0000000000000000 90000001002ba400 0000000000000000 9000000003573ec8 0000000000000000 00000000000000b0 0000000000000004 0000000000000000 0000000000070000 ... Call Trace: [<9000000003573ec8>] show_stack+0x38/0x150 [<9000000004893670>] dump_stack_lvl+0x74/0xa8 [<900000000360d2bc>] lockdep_rcu_suspicious+0x14c/0x190 [<900000000361235c>] __lock_acquire+0xd0c/0x2740 [<90000000036146f4>] lock_acquire+0x104/0x2c0 [<90000000048a955c>] _raw_spin_lock_irqsave+0x5c/0x90 [<900000000381cd5c>] rmqueue_bulk+0x6c/0x950 [<900000000381fc0c>] get_page_from_freelist+0xd4c/0x1790 [<9000000003821c6c>] __alloc_pages+0x1bc/0x3e0 [<9000000003583b40>] tlb_init+0x150/0x2a0 [<90000000035742a0>] per_cpu_trap_init+0xf0/0x110 [<90000000035712fc>] cpu_probe+0x3dc/0x7a0 [<900000000357ed20>] start_secondary+0x40/0xb0 [<9000000004897138>] smpboot_entry+0x54/0x58 raw_smp_processor_id() is required in order to avoid calling into lockdep before RCU has declared the CPU to be watched for readers. See also commit 29368e093921 ("x86/smpboot: Move rcu_cpu_starting() earlier"), commit de5d9dae150c ("s390/smp: move rcu_cpu_starting() earlier") and commit 99f070b62322 ("powerpc/smp: Call rcu_cpu_starting() earlier"). Signed-off-by: Huacai Chen --- arch/loongarch/kernel/smp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index ef35c871244f..5bca12d16e06 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -504,8 +504,9 @@ asmlinkage void start_secondary(void) unsigned int cpu; sync_counter(); - cpu = smp_processor_id(); + cpu = raw_smp_processor_id(); set_my_cpu_offset(per_cpu_offset(cpu)); + rcutree_report_cpu_starting(cpu); cpu_probe(); constant_clockevent_init(); From add28024405ed600afaa02749989d4fd119f9057 Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Wed, 8 Nov 2023 14:12:15 +0800 Subject: [PATCH 06/13] LoongArch: Add more instruction opcodes and emit_* helpers This patch adds more instruction opcodes and their corresponding emit_* helpers which will be used in later patches. Signed-off-by: Hengqi Chen Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/inst.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index 71e1ed4165c8..5350ae9ee380 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -65,6 +65,8 @@ enum reg2_op { revbd_op = 0x0f, revh2w_op = 0x10, revhd_op = 0x11, + extwh_op = 0x16, + extwb_op = 0x17, }; enum reg2i5_op { @@ -556,6 +558,8 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \ DEF_EMIT_REG2_FORMAT(revb2h, revb2h_op) DEF_EMIT_REG2_FORMAT(revb2w, revb2w_op) DEF_EMIT_REG2_FORMAT(revbd, revbd_op) +DEF_EMIT_REG2_FORMAT(extwh, extwh_op) +DEF_EMIT_REG2_FORMAT(extwb, extwb_op) #define DEF_EMIT_REG2I5_FORMAT(NAME, OP) \ static inline void emit_##NAME(union loongarch_instruction *insn, \ @@ -607,6 +611,9 @@ DEF_EMIT_REG2I12_FORMAT(lu52id, lu52id_op) DEF_EMIT_REG2I12_FORMAT(andi, andi_op) DEF_EMIT_REG2I12_FORMAT(ori, ori_op) DEF_EMIT_REG2I12_FORMAT(xori, xori_op) +DEF_EMIT_REG2I12_FORMAT(ldb, ldb_op) +DEF_EMIT_REG2I12_FORMAT(ldh, ldh_op) +DEF_EMIT_REG2I12_FORMAT(ldw, ldw_op) DEF_EMIT_REG2I12_FORMAT(ldbu, ldbu_op) DEF_EMIT_REG2I12_FORMAT(ldhu, ldhu_op) DEF_EMIT_REG2I12_FORMAT(ldwu, ldwu_op) @@ -685,9 +692,12 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \ insn->reg3_format.rk = rk; \ } +DEF_EMIT_REG3_FORMAT(addw, addw_op) DEF_EMIT_REG3_FORMAT(addd, addd_op) DEF_EMIT_REG3_FORMAT(subd, subd_op) DEF_EMIT_REG3_FORMAT(muld, muld_op) +DEF_EMIT_REG3_FORMAT(divd, divd_op) +DEF_EMIT_REG3_FORMAT(modd, modd_op) DEF_EMIT_REG3_FORMAT(divdu, divdu_op) DEF_EMIT_REG3_FORMAT(moddu, moddu_op) DEF_EMIT_REG3_FORMAT(and, and_op) @@ -699,6 +709,9 @@ DEF_EMIT_REG3_FORMAT(srlw, srlw_op) DEF_EMIT_REG3_FORMAT(srld, srld_op) DEF_EMIT_REG3_FORMAT(sraw, sraw_op) DEF_EMIT_REG3_FORMAT(srad, srad_op) +DEF_EMIT_REG3_FORMAT(ldxb, ldxb_op) +DEF_EMIT_REG3_FORMAT(ldxh, ldxh_op) +DEF_EMIT_REG3_FORMAT(ldxw, ldxw_op) DEF_EMIT_REG3_FORMAT(ldxbu, ldxbu_op) DEF_EMIT_REG3_FORMAT(ldxhu, ldxhu_op) DEF_EMIT_REG3_FORMAT(ldxwu, ldxwu_op) From 7111afe8fb5f15e11b8eff90d7aed1c58e3b1167 Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Wed, 8 Nov 2023 14:12:16 +0800 Subject: [PATCH 07/13] LoongArch: BPF: Support sign-extension load instructions Add support for sign-extension load instructions. Signed-off-by: Hengqi Chen Signed-off-by: Huacai Chen --- arch/loongarch/net/bpf_jit.c | 49 ++++++++++++++++++++++++++++-------- 1 file changed, 39 insertions(+), 10 deletions(-) diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index db9342b2d0e6..0c2bbca527ef 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -411,7 +411,11 @@ static int add_exception_handler(const struct bpf_insn *insn, off_t offset; struct exception_table_entry *ex; - if (!ctx->image || !ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM) + if (!ctx->image || !ctx->prog->aux->extable) + return 0; + + if (BPF_MODE(insn->code) != BPF_PROBE_MEM && + BPF_MODE(insn->code) != BPF_PROBE_MEMSX) return 0; if (WARN_ON_ONCE(ctx->num_exentries >= ctx->prog->aux->num_exentries)) @@ -450,7 +454,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext { u8 tm = -1; u64 func_addr; - bool func_addr_fixed; + bool func_addr_fixed, sign_extend; int i = insn - ctx->prog->insnsi; int ret, jmp_offset; const u8 code = insn->code; @@ -879,31 +883,56 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext case BPF_LDX | BPF_PROBE_MEM | BPF_W: case BPF_LDX | BPF_PROBE_MEM | BPF_H: case BPF_LDX | BPF_PROBE_MEM | BPF_B: + /* dst_reg = (s64)*(signed size *)(src_reg + off) */ + case BPF_LDX | BPF_MEMSX | BPF_B: + case BPF_LDX | BPF_MEMSX | BPF_H: + case BPF_LDX | BPF_MEMSX | BPF_W: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: + sign_extend = BPF_MODE(insn->code) == BPF_MEMSX || + BPF_MODE(insn->code) == BPF_PROBE_MEMSX; switch (BPF_SIZE(code)) { case BPF_B: if (is_signed_imm12(off)) { - emit_insn(ctx, ldbu, dst, src, off); + if (sign_extend) + emit_insn(ctx, ldb, dst, src, off); + else + emit_insn(ctx, ldbu, dst, src, off); } else { move_imm(ctx, t1, off, is32); - emit_insn(ctx, ldxbu, dst, src, t1); + if (sign_extend) + emit_insn(ctx, ldxb, dst, src, t1); + else + emit_insn(ctx, ldxbu, dst, src, t1); } break; case BPF_H: if (is_signed_imm12(off)) { - emit_insn(ctx, ldhu, dst, src, off); + if (sign_extend) + emit_insn(ctx, ldh, dst, src, off); + else + emit_insn(ctx, ldhu, dst, src, off); } else { move_imm(ctx, t1, off, is32); - emit_insn(ctx, ldxhu, dst, src, t1); + if (sign_extend) + emit_insn(ctx, ldxh, dst, src, t1); + else + emit_insn(ctx, ldxhu, dst, src, t1); } break; case BPF_W: if (is_signed_imm12(off)) { - emit_insn(ctx, ldwu, dst, src, off); - } else if (is_signed_imm14(off)) { - emit_insn(ctx, ldptrw, dst, src, off); + if (sign_extend) + emit_insn(ctx, ldw, dst, src, off); + else + emit_insn(ctx, ldwu, dst, src, off); } else { move_imm(ctx, t1, off, is32); - emit_insn(ctx, ldxwu, dst, src, t1); + if (sign_extend) + emit_insn(ctx, ldxw, dst, src, t1); + else + emit_insn(ctx, ldxwu, dst, src, t1); } break; case BPF_DW: From f48012f161508c743e1b39c3521a2b285d19c6aa Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Wed, 8 Nov 2023 14:12:16 +0800 Subject: [PATCH 08/13] LoongArch: BPF: Support sign-extension mov instructions Add support for sign-extension mov instructions. Signed-off-by: Hengqi Chen Signed-off-by: Huacai Chen --- arch/loongarch/net/bpf_jit.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index 0c2bbca527ef..ac9edf02675c 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -472,8 +472,23 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext /* dst = src */ case BPF_ALU | BPF_MOV | BPF_X: case BPF_ALU64 | BPF_MOV | BPF_X: - move_reg(ctx, dst, src); - emit_zext_32(ctx, dst, is32); + switch (off) { + case 0: + move_reg(ctx, dst, src); + emit_zext_32(ctx, dst, is32); + break; + case 8: + move_reg(ctx, t1, src); + emit_insn(ctx, extwb, dst, t1); + break; + case 16: + move_reg(ctx, t1, src); + emit_insn(ctx, extwh, dst, t1); + break; + case 32: + emit_insn(ctx, addw, dst, src, LOONGARCH_GPR_ZERO); + break; + } break; /* dst = imm */ From 4ebf9216e7dff0b38e350007da3b03afb15c816b Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Wed, 8 Nov 2023 14:12:16 +0800 Subject: [PATCH 09/13] LoongArch: BPF: Support unconditional bswap instructions Add support for unconditional bswap instruction. Since LoongArch is always little-endian, just treat unconditional bswap the same as big- endian conversion. Signed-off-by: Hengqi Chen Signed-off-by: Huacai Chen --- arch/loongarch/net/bpf_jit.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index ac9edf02675c..a8be6d4b058c 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -731,6 +731,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext break; case BPF_ALU | BPF_END | BPF_FROM_BE: + case BPF_ALU64 | BPF_END | BPF_FROM_LE: switch (imm) { case 16: emit_insn(ctx, revb2h, dst, dst); From 9ddd2b8d1a8b566195c196fe4249d04cd75cc73c Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Wed, 8 Nov 2023 14:12:16 +0800 Subject: [PATCH 10/13] LoongArch: BPF: Support 32-bit offset jmp instructions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for 32-bit offset jmp instruction. Currently, we use b instruction which supports range within ±128MB for such jumps. This should be large enough for BPF progs. Signed-off-by: Hengqi Chen Signed-off-by: Huacai Chen --- arch/loongarch/net/bpf_jit.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index a8be6d4b058c..050fcf233a34 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -848,7 +848,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext /* PC += off */ case BPF_JMP | BPF_JA: - jmp_offset = bpf2la_offset(i, off, ctx); + case BPF_JMP32 | BPF_JA: + if (BPF_CLASS(code) == BPF_JMP) + jmp_offset = bpf2la_offset(i, off, ctx); + else + jmp_offset = bpf2la_offset(i, imm, ctx); if (emit_uncond_jmp(ctx, jmp_offset) < 0) goto toofar; break; From 2425c9e002d2a1fdca34261b2fa6713eafef2163 Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Wed, 8 Nov 2023 14:12:21 +0800 Subject: [PATCH 11/13] LoongArch: BPF: Support signed div instructions Add support for signed div instructions. Signed-off-by: Hengqi Chen Signed-off-by: Huacai Chen --- arch/loongarch/net/bpf_jit.c | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index 050fcf233a34..7c0d129b82a4 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -553,20 +553,36 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext /* dst = dst / src */ case BPF_ALU | BPF_DIV | BPF_X: case BPF_ALU64 | BPF_DIV | BPF_X: - emit_zext_32(ctx, dst, is32); - move_reg(ctx, t1, src); - emit_zext_32(ctx, t1, is32); - emit_insn(ctx, divdu, dst, dst, t1); - emit_zext_32(ctx, dst, is32); + if (!off) { + emit_zext_32(ctx, dst, is32); + move_reg(ctx, t1, src); + emit_zext_32(ctx, t1, is32); + emit_insn(ctx, divdu, dst, dst, t1); + emit_zext_32(ctx, dst, is32); + } else { + emit_sext_32(ctx, dst, is32); + move_reg(ctx, t1, src); + emit_sext_32(ctx, t1, is32); + emit_insn(ctx, divd, dst, dst, t1); + emit_sext_32(ctx, dst, is32); + } break; /* dst = dst / imm */ case BPF_ALU | BPF_DIV | BPF_K: case BPF_ALU64 | BPF_DIV | BPF_K: - move_imm(ctx, t1, imm, is32); - emit_zext_32(ctx, dst, is32); - emit_insn(ctx, divdu, dst, dst, t1); - emit_zext_32(ctx, dst, is32); + if (!off) { + move_imm(ctx, t1, imm, is32); + emit_zext_32(ctx, dst, is32); + emit_insn(ctx, divdu, dst, dst, t1); + emit_zext_32(ctx, dst, is32); + } else { + move_imm(ctx, t1, imm, false); + emit_sext_32(ctx, t1, is32); + emit_sext_32(ctx, dst, is32); + emit_insn(ctx, divd, dst, dst, t1); + emit_sext_32(ctx, dst, is32); + } break; /* dst = dst % src */ From 7b6b13d32965ad7f1eb889d1a7058868a88eb29f Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Wed, 8 Nov 2023 14:12:21 +0800 Subject: [PATCH 12/13] LoongArch: BPF: Support signed mod instructions Add support for signed mod instructions. Signed-off-by: Hengqi Chen Signed-off-by: Huacai Chen --- arch/loongarch/net/bpf_jit.c | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index 7c0d129b82a4..169ff8b3915e 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -588,20 +588,36 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext /* dst = dst % src */ case BPF_ALU | BPF_MOD | BPF_X: case BPF_ALU64 | BPF_MOD | BPF_X: - emit_zext_32(ctx, dst, is32); - move_reg(ctx, t1, src); - emit_zext_32(ctx, t1, is32); - emit_insn(ctx, moddu, dst, dst, t1); - emit_zext_32(ctx, dst, is32); + if (!off) { + emit_zext_32(ctx, dst, is32); + move_reg(ctx, t1, src); + emit_zext_32(ctx, t1, is32); + emit_insn(ctx, moddu, dst, dst, t1); + emit_zext_32(ctx, dst, is32); + } else { + emit_sext_32(ctx, dst, is32); + move_reg(ctx, t1, src); + emit_sext_32(ctx, t1, is32); + emit_insn(ctx, modd, dst, dst, t1); + emit_sext_32(ctx, dst, is32); + } break; /* dst = dst % imm */ case BPF_ALU | BPF_MOD | BPF_K: case BPF_ALU64 | BPF_MOD | BPF_K: - move_imm(ctx, t1, imm, is32); - emit_zext_32(ctx, dst, is32); - emit_insn(ctx, moddu, dst, dst, t1); - emit_zext_32(ctx, dst, is32); + if (!off) { + move_imm(ctx, t1, imm, is32); + emit_zext_32(ctx, dst, is32); + emit_insn(ctx, moddu, dst, dst, t1); + emit_zext_32(ctx, dst, is32); + } else { + move_imm(ctx, t1, imm, false); + emit_sext_32(ctx, t1, is32); + emit_sext_32(ctx, dst, is32); + emit_insn(ctx, modd, dst, dst, t1); + emit_sext_32(ctx, dst, is32); + } break; /* dst = -dst */ From 1d375d65466e5c8d7a9406826d80d475a22e8c6d Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Wed, 8 Nov 2023 14:12:21 +0800 Subject: [PATCH 13/13] selftests/bpf: Enable cpu v4 tests for LoongArch Enable the cpu v4 tests for LoongArch. Currently, we don't have BPF trampoline in LoongArch JIT, so the fentry test `test_ptr_struct_arg` still failed, will followup. Test result attached below: # ./test_progs -t verifier_sdiv,verifier_movsx,verifier_ldsx,verifier_gotol,verifier_bswap #316/1 verifier_bswap/BSWAP, 16:OK #316/2 verifier_bswap/BSWAP, 16 @unpriv:OK #316/3 verifier_bswap/BSWAP, 32:OK #316/4 verifier_bswap/BSWAP, 32 @unpriv:OK #316/5 verifier_bswap/BSWAP, 64:OK #316/6 verifier_bswap/BSWAP, 64 @unpriv:OK #316 verifier_bswap:OK #330/1 verifier_gotol/gotol, small_imm:OK #330/2 verifier_gotol/gotol, small_imm @unpriv:OK #330 verifier_gotol:OK #338/1 verifier_ldsx/LDSX, S8:OK #338/2 verifier_ldsx/LDSX, S8 @unpriv:OK #338/3 verifier_ldsx/LDSX, S16:OK #338/4 verifier_ldsx/LDSX, S16 @unpriv:OK #338/5 verifier_ldsx/LDSX, S32:OK #338/6 verifier_ldsx/LDSX, S32 @unpriv:OK #338/7 verifier_ldsx/LDSX, S8 range checking, privileged:OK #338/8 verifier_ldsx/LDSX, S16 range checking:OK #338/9 verifier_ldsx/LDSX, S16 range checking @unpriv:OK #338/10 verifier_ldsx/LDSX, S32 range checking:OK #338/11 verifier_ldsx/LDSX, S32 range checking @unpriv:OK #338 verifier_ldsx:OK #349/1 verifier_movsx/MOV32SX, S8:OK #349/2 verifier_movsx/MOV32SX, S8 @unpriv:OK #349/3 verifier_movsx/MOV32SX, S16:OK #349/4 verifier_movsx/MOV32SX, S16 @unpriv:OK #349/5 verifier_movsx/MOV64SX, S8:OK #349/6 verifier_movsx/MOV64SX, S8 @unpriv:OK #349/7 verifier_movsx/MOV64SX, S16:OK #349/8 verifier_movsx/MOV64SX, S16 @unpriv:OK #349/9 verifier_movsx/MOV64SX, S32:OK #349/10 verifier_movsx/MOV64SX, S32 @unpriv:OK #349/11 verifier_movsx/MOV32SX, S8, range_check:OK #349/12 verifier_movsx/MOV32SX, S8, range_check @unpriv:OK #349/13 verifier_movsx/MOV32SX, S16, range_check:OK #349/14 verifier_movsx/MOV32SX, S16, range_check @unpriv:OK #349/15 verifier_movsx/MOV32SX, S16, range_check 2:OK #349/16 verifier_movsx/MOV32SX, S16, range_check 2 @unpriv:OK #349/17 verifier_movsx/MOV64SX, S8, range_check:OK #349/18 verifier_movsx/MOV64SX, S8, range_check @unpriv:OK #349/19 verifier_movsx/MOV64SX, S16, range_check:OK #349/20 verifier_movsx/MOV64SX, S16, range_check @unpriv:OK #349/21 verifier_movsx/MOV64SX, S32, range_check:OK #349/22 verifier_movsx/MOV64SX, S32, range_check @unpriv:OK #349/23 verifier_movsx/MOV64SX, S16, R10 Sign Extension:OK #349/24 verifier_movsx/MOV64SX, S16, R10 Sign Extension @unpriv:OK #349 verifier_movsx:OK #361/1 verifier_sdiv/SDIV32, non-zero imm divisor, check 1:OK #361/2 verifier_sdiv/SDIV32, non-zero imm divisor, check 1 @unpriv:OK #361/3 verifier_sdiv/SDIV32, non-zero imm divisor, check 2:OK #361/4 verifier_sdiv/SDIV32, non-zero imm divisor, check 2 @unpriv:OK #361/5 verifier_sdiv/SDIV32, non-zero imm divisor, check 3:OK #361/6 verifier_sdiv/SDIV32, non-zero imm divisor, check 3 @unpriv:OK #361/7 verifier_sdiv/SDIV32, non-zero imm divisor, check 4:OK #361/8 verifier_sdiv/SDIV32, non-zero imm divisor, check 4 @unpriv:OK #361/9 verifier_sdiv/SDIV32, non-zero imm divisor, check 5:OK #361/10 verifier_sdiv/SDIV32, non-zero imm divisor, check 5 @unpriv:OK #361/11 verifier_sdiv/SDIV32, non-zero imm divisor, check 6:OK #361/12 verifier_sdiv/SDIV32, non-zero imm divisor, check 6 @unpriv:OK #361/13 verifier_sdiv/SDIV32, non-zero imm divisor, check 7:OK #361/14 verifier_sdiv/SDIV32, non-zero imm divisor, check 7 @unpriv:OK #361/15 verifier_sdiv/SDIV32, non-zero imm divisor, check 8:OK #361/16 verifier_sdiv/SDIV32, non-zero imm divisor, check 8 @unpriv:OK #361/17 verifier_sdiv/SDIV32, non-zero reg divisor, check 1:OK #361/18 verifier_sdiv/SDIV32, non-zero reg divisor, check 1 @unpriv:OK #361/19 verifier_sdiv/SDIV32, non-zero reg divisor, check 2:OK #361/20 verifier_sdiv/SDIV32, non-zero reg divisor, check 2 @unpriv:OK #361/21 verifier_sdiv/SDIV32, non-zero reg divisor, check 3:OK #361/22 verifier_sdiv/SDIV32, non-zero reg divisor, check 3 @unpriv:OK #361/23 verifier_sdiv/SDIV32, non-zero reg divisor, check 4:OK #361/24 verifier_sdiv/SDIV32, non-zero reg divisor, check 4 @unpriv:OK #361/25 verifier_sdiv/SDIV32, non-zero reg divisor, check 5:OK #361/26 verifier_sdiv/SDIV32, non-zero reg divisor, check 5 @unpriv:OK #361/27 verifier_sdiv/SDIV32, non-zero reg divisor, check 6:OK #361/28 verifier_sdiv/SDIV32, non-zero reg divisor, check 6 @unpriv:OK #361/29 verifier_sdiv/SDIV32, non-zero reg divisor, check 7:OK #361/30 verifier_sdiv/SDIV32, non-zero reg divisor, check 7 @unpriv:OK #361/31 verifier_sdiv/SDIV32, non-zero reg divisor, check 8:OK #361/32 verifier_sdiv/SDIV32, non-zero reg divisor, check 8 @unpriv:OK #361/33 verifier_sdiv/SDIV64, non-zero imm divisor, check 1:OK #361/34 verifier_sdiv/SDIV64, non-zero imm divisor, check 1 @unpriv:OK #361/35 verifier_sdiv/SDIV64, non-zero imm divisor, check 2:OK #361/36 verifier_sdiv/SDIV64, non-zero imm divisor, check 2 @unpriv:OK #361/37 verifier_sdiv/SDIV64, non-zero imm divisor, check 3:OK #361/38 verifier_sdiv/SDIV64, non-zero imm divisor, check 3 @unpriv:OK #361/39 verifier_sdiv/SDIV64, non-zero imm divisor, check 4:OK #361/40 verifier_sdiv/SDIV64, non-zero imm divisor, check 4 @unpriv:OK #361/41 verifier_sdiv/SDIV64, non-zero imm divisor, check 5:OK #361/42 verifier_sdiv/SDIV64, non-zero imm divisor, check 5 @unpriv:OK #361/43 verifier_sdiv/SDIV64, non-zero imm divisor, check 6:OK #361/44 verifier_sdiv/SDIV64, non-zero imm divisor, check 6 @unpriv:OK #361/45 verifier_sdiv/SDIV64, non-zero reg divisor, check 1:OK #361/46 verifier_sdiv/SDIV64, non-zero reg divisor, check 1 @unpriv:OK #361/47 verifier_sdiv/SDIV64, non-zero reg divisor, check 2:OK #361/48 verifier_sdiv/SDIV64, non-zero reg divisor, check 2 @unpriv:OK #361/49 verifier_sdiv/SDIV64, non-zero reg divisor, check 3:OK #361/50 verifier_sdiv/SDIV64, non-zero reg divisor, check 3 @unpriv:OK #361/51 verifier_sdiv/SDIV64, non-zero reg divisor, check 4:OK #361/52 verifier_sdiv/SDIV64, non-zero reg divisor, check 4 @unpriv:OK #361/53 verifier_sdiv/SDIV64, non-zero reg divisor, check 5:OK #361/54 verifier_sdiv/SDIV64, non-zero reg divisor, check 5 @unpriv:OK #361/55 verifier_sdiv/SDIV64, non-zero reg divisor, check 6:OK #361/56 verifier_sdiv/SDIV64, non-zero reg divisor, check 6 @unpriv:OK #361/57 verifier_sdiv/SMOD32, non-zero imm divisor, check 1:OK #361/58 verifier_sdiv/SMOD32, non-zero imm divisor, check 1 @unpriv:OK #361/59 verifier_sdiv/SMOD32, non-zero imm divisor, check 2:OK #361/60 verifier_sdiv/SMOD32, non-zero imm divisor, check 2 @unpriv:OK #361/61 verifier_sdiv/SMOD32, non-zero imm divisor, check 3:OK #361/62 verifier_sdiv/SMOD32, non-zero imm divisor, check 3 @unpriv:OK #361/63 verifier_sdiv/SMOD32, non-zero imm divisor, check 4:OK #361/64 verifier_sdiv/SMOD32, non-zero imm divisor, check 4 @unpriv:OK #361/65 verifier_sdiv/SMOD32, non-zero imm divisor, check 5:OK #361/66 verifier_sdiv/SMOD32, non-zero imm divisor, check 5 @unpriv:OK #361/67 verifier_sdiv/SMOD32, non-zero imm divisor, check 6:OK #361/68 verifier_sdiv/SMOD32, non-zero imm divisor, check 6 @unpriv:OK #361/69 verifier_sdiv/SMOD32, non-zero reg divisor, check 1:OK #361/70 verifier_sdiv/SMOD32, non-zero reg divisor, check 1 @unpriv:OK #361/71 verifier_sdiv/SMOD32, non-zero reg divisor, check 2:OK #361/72 verifier_sdiv/SMOD32, non-zero reg divisor, check 2 @unpriv:OK #361/73 verifier_sdiv/SMOD32, non-zero reg divisor, check 3:OK #361/74 verifier_sdiv/SMOD32, non-zero reg divisor, check 3 @unpriv:OK #361/75 verifier_sdiv/SMOD32, non-zero reg divisor, check 4:OK #361/76 verifier_sdiv/SMOD32, non-zero reg divisor, check 4 @unpriv:OK #361/77 verifier_sdiv/SMOD32, non-zero reg divisor, check 5:OK #361/78 verifier_sdiv/SMOD32, non-zero reg divisor, check 5 @unpriv:OK #361/79 verifier_sdiv/SMOD32, non-zero reg divisor, check 6:OK #361/80 verifier_sdiv/SMOD32, non-zero reg divisor, check 6 @unpriv:OK #361/81 verifier_sdiv/SMOD64, non-zero imm divisor, check 1:OK #361/82 verifier_sdiv/SMOD64, non-zero imm divisor, check 1 @unpriv:OK #361/83 verifier_sdiv/SMOD64, non-zero imm divisor, check 2:OK #361/84 verifier_sdiv/SMOD64, non-zero imm divisor, check 2 @unpriv:OK #361/85 verifier_sdiv/SMOD64, non-zero imm divisor, check 3:OK #361/86 verifier_sdiv/SMOD64, non-zero imm divisor, check 3 @unpriv:OK #361/87 verifier_sdiv/SMOD64, non-zero imm divisor, check 4:OK #361/88 verifier_sdiv/SMOD64, non-zero imm divisor, check 4 @unpriv:OK #361/89 verifier_sdiv/SMOD64, non-zero imm divisor, check 5:OK #361/90 verifier_sdiv/SMOD64, non-zero imm divisor, check 5 @unpriv:OK #361/91 verifier_sdiv/SMOD64, non-zero imm divisor, check 6:OK #361/92 verifier_sdiv/SMOD64, non-zero imm divisor, check 6 @unpriv:OK #361/93 verifier_sdiv/SMOD64, non-zero imm divisor, check 7:OK #361/94 verifier_sdiv/SMOD64, non-zero imm divisor, check 7 @unpriv:OK #361/95 verifier_sdiv/SMOD64, non-zero imm divisor, check 8:OK #361/96 verifier_sdiv/SMOD64, non-zero imm divisor, check 8 @unpriv:OK #361/97 verifier_sdiv/SMOD64, non-zero reg divisor, check 1:OK #361/98 verifier_sdiv/SMOD64, non-zero reg divisor, check 1 @unpriv:OK #361/99 verifier_sdiv/SMOD64, non-zero reg divisor, check 2:OK #361/100 verifier_sdiv/SMOD64, non-zero reg divisor, check 2 @unpriv:OK #361/101 verifier_sdiv/SMOD64, non-zero reg divisor, check 3:OK #361/102 verifier_sdiv/SMOD64, non-zero reg divisor, check 3 @unpriv:OK #361/103 verifier_sdiv/SMOD64, non-zero reg divisor, check 4:OK #361/104 verifier_sdiv/SMOD64, non-zero reg divisor, check 4 @unpriv:OK #361/105 verifier_sdiv/SMOD64, non-zero reg divisor, check 5:OK #361/106 verifier_sdiv/SMOD64, non-zero reg divisor, check 5 @unpriv:OK #361/107 verifier_sdiv/SMOD64, non-zero reg divisor, check 6:OK #361/108 verifier_sdiv/SMOD64, non-zero reg divisor, check 6 @unpriv:OK #361/109 verifier_sdiv/SMOD64, non-zero reg divisor, check 7:OK #361/110 verifier_sdiv/SMOD64, non-zero reg divisor, check 7 @unpriv:OK #361/111 verifier_sdiv/SMOD64, non-zero reg divisor, check 8:OK #361/112 verifier_sdiv/SMOD64, non-zero reg divisor, check 8 @unpriv:OK #361/113 verifier_sdiv/SDIV32, zero divisor:OK #361/114 verifier_sdiv/SDIV32, zero divisor @unpriv:OK #361/115 verifier_sdiv/SDIV64, zero divisor:OK #361/116 verifier_sdiv/SDIV64, zero divisor @unpriv:OK #361/117 verifier_sdiv/SMOD32, zero divisor:OK #361/118 verifier_sdiv/SMOD32, zero divisor @unpriv:OK #361/119 verifier_sdiv/SMOD64, zero divisor:OK #361/120 verifier_sdiv/SMOD64, zero divisor @unpriv:OK #361 verifier_sdiv:OK Summary: 5/163 PASSED, 0 SKIPPED, 0 FAILED # ./test_progs -t ldsx_insn test_map_val_and_probed_memory:PASS:test_ldsx_insn__open 0 nsec test_map_val_and_probed_memory:PASS:test_ldsx_insn__load 0 nsec libbpf: prog 'test_ptr_struct_arg': failed to attach: ERROR: strerror_r(-524)=22 libbpf: prog 'test_ptr_struct_arg': failed to auto-attach: -524 test_map_val_and_probed_memory:FAIL:test_ldsx_insn__attach unexpected error: -524 (errno 524) #116/1 ldsx_insn/map_val and probed_memory:FAIL #116/2 ldsx_insn/ctx_member_sign_ext:OK #116/3 ldsx_insn/ctx_member_narrow_sign_ext:OK #116 ldsx_insn:FAIL All error logs: test_map_val_and_probed_memory:PASS:test_ldsx_insn__open 0 nsec test_map_val_and_probed_memory:PASS:test_ldsx_insn__load 0 nsec libbpf: prog 'test_ptr_struct_arg': failed to attach: ERROR: strerror_r(-524)=22 libbpf: prog 'test_ptr_struct_arg': failed to auto-attach: -524 test_map_val_and_probed_memory:FAIL:test_ldsx_insn__attach unexpected error: -524 (errno 524) #116/1 ldsx_insn/map_val and probed_memory:FAIL #116 ldsx_insn:FAIL Summary: 0/2 PASSED, 0 SKIPPED, 1 FAILED Signed-off-by: Hengqi Chen Signed-off-by: Huacai Chen --- tools/testing/selftests/bpf/progs/test_ldsx_insn.c | 3 ++- tools/testing/selftests/bpf/progs/verifier_bswap.c | 3 ++- tools/testing/selftests/bpf/progs/verifier_gotol.c | 3 ++- tools/testing/selftests/bpf/progs/verifier_ldsx.c | 3 ++- tools/testing/selftests/bpf/progs/verifier_movsx.c | 3 ++- tools/testing/selftests/bpf/progs/verifier_sdiv.c | 3 ++- 6 files changed, 12 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/test_ldsx_insn.c b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c index 3ddcb3777912..2a2a942737d7 100644 --- a/tools/testing/selftests/bpf/progs/test_ldsx_insn.c +++ b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c @@ -7,7 +7,8 @@ #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ - defined(__TARGET_ARCH_s390)) && __clang_major__ >= 18 + defined(__TARGET_ARCH_s390) || defined(__TARGET_ARCH_loongarch)) && \ + __clang_major__ >= 18 const volatile int skip = 0; #else const volatile int skip = 1; diff --git a/tools/testing/selftests/bpf/progs/verifier_bswap.c b/tools/testing/selftests/bpf/progs/verifier_bswap.c index 107525fb4a6a..e61755656e8d 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bswap.c +++ b/tools/testing/selftests/bpf/progs/verifier_bswap.c @@ -6,7 +6,8 @@ #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ - defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \ + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \ + defined(__TARGET_ARCH_loongarch)) && \ __clang_major__ >= 18 SEC("socket") diff --git a/tools/testing/selftests/bpf/progs/verifier_gotol.c b/tools/testing/selftests/bpf/progs/verifier_gotol.c index 9f202eda952f..d1edbcff9a18 100644 --- a/tools/testing/selftests/bpf/progs/verifier_gotol.c +++ b/tools/testing/selftests/bpf/progs/verifier_gotol.c @@ -6,7 +6,8 @@ #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ - defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \ + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \ + defined(__TARGET_ARCH_loongarch)) && \ __clang_major__ >= 18 SEC("socket") diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c b/tools/testing/selftests/bpf/progs/verifier_ldsx.c index 375525329637..d4427d8e1217 100644 --- a/tools/testing/selftests/bpf/progs/verifier_ldsx.c +++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c @@ -6,7 +6,8 @@ #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ - defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \ + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \ + defined(__TARGET_ARCH_loongarch)) && \ __clang_major__ >= 18 SEC("socket") diff --git a/tools/testing/selftests/bpf/progs/verifier_movsx.c b/tools/testing/selftests/bpf/progs/verifier_movsx.c index b2a04d1179d0..cbb9d6714f53 100644 --- a/tools/testing/selftests/bpf/progs/verifier_movsx.c +++ b/tools/testing/selftests/bpf/progs/verifier_movsx.c @@ -6,7 +6,8 @@ #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ - defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \ + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \ + defined(__TARGET_ARCH_loongarch)) && \ __clang_major__ >= 18 SEC("socket") diff --git a/tools/testing/selftests/bpf/progs/verifier_sdiv.c b/tools/testing/selftests/bpf/progs/verifier_sdiv.c index 8fc5174808b2..2a2271cf0294 100644 --- a/tools/testing/selftests/bpf/progs/verifier_sdiv.c +++ b/tools/testing/selftests/bpf/progs/verifier_sdiv.c @@ -6,7 +6,8 @@ #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ - defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \ + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \ + defined(__TARGET_ARCH_loongarch)) && \ __clang_major__ >= 18 SEC("socket")