From 455ca134a7b2121dd739d425d3be313fb52f0651 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Tue, 30 Apr 2024 11:17:20 +0200 Subject: [PATCH 01/10] x86/percpu: Introduce the pcpu_binary_op() macro Introduce the pcpu_binary_op() macro, a copy of the percpu_to_op() macro. Update percpu binary operators to use the new macro, since percpu_to_op() will be re-purposed as a raw percpu write accessor in a follow-up patch. No functional change intended. Signed-off-by: Uros Bizjak Signed-off-by: Ingo Molnar Cc: Andy Lutomirski Cc: Josh Poimboeuf Cc: Linus Torvalds Link: https://lore.kernel.org/r/20240430091833.196482-1-ubizjak@gmail.com --- arch/x86/include/asm/percpu.h | 47 ++++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 17 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 3bedee1801e2..cc40d8d9c272 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -163,6 +163,19 @@ do { \ : [var] "+m" (__my_cpu_var(_var))); \ }) +#define percpu_binary_op(size, qual, op, _var, _val) \ +do { \ + __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \ + if (0) { \ + typeof(_var) pto_tmp__; \ + pto_tmp__ = (_val); \ + (void)pto_tmp__; \ + } \ + asm qual(__pcpu_op2_##size(op, "%[val]", __percpu_arg([var])) \ + : [var] "+m" (__my_cpu_var(_var)) \ + : [val] __pcpu_reg_imm_##size(pto_val__)); \ +} while (0) + /* * Generate a percpu add to memory instruction and optimize code * if one is added or subtracted. @@ -182,7 +195,7 @@ do { \ else if (pao_ID__ == -1) \ percpu_unary_op(size, qual, "dec", var); \ else \ - percpu_to_op(size, qual, "add", var, val); \ + percpu_binary_op(size, qual, "add", var, val); \ } while (0) #define percpu_from_op(size, qual, op, _var) \ @@ -492,12 +505,12 @@ do { \ #define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val) #define raw_cpu_add_2(pcp, val) percpu_add_op(2, , (pcp), val) #define raw_cpu_add_4(pcp, val) percpu_add_op(4, , (pcp), val) -#define raw_cpu_and_1(pcp, val) percpu_to_op(1, , "and", (pcp), val) -#define raw_cpu_and_2(pcp, val) percpu_to_op(2, , "and", (pcp), val) -#define raw_cpu_and_4(pcp, val) percpu_to_op(4, , "and", (pcp), val) -#define raw_cpu_or_1(pcp, val) percpu_to_op(1, , "or", (pcp), val) -#define raw_cpu_or_2(pcp, val) percpu_to_op(2, , "or", (pcp), val) -#define raw_cpu_or_4(pcp, val) percpu_to_op(4, , "or", (pcp), val) +#define raw_cpu_and_1(pcp, val) percpu_binary_op(1, , "and", (pcp), val) +#define raw_cpu_and_2(pcp, val) percpu_binary_op(2, , "and", (pcp), val) +#define raw_cpu_and_4(pcp, val) percpu_binary_op(4, , "and", (pcp), val) +#define raw_cpu_or_1(pcp, val) percpu_binary_op(1, , "or", (pcp), val) +#define raw_cpu_or_2(pcp, val) percpu_binary_op(2, , "or", (pcp), val) +#define raw_cpu_or_4(pcp, val) percpu_binary_op(4, , "or", (pcp), val) #define raw_cpu_xchg_1(pcp, val) raw_percpu_xchg_op(pcp, val) #define raw_cpu_xchg_2(pcp, val) raw_percpu_xchg_op(pcp, val) #define raw_cpu_xchg_4(pcp, val) raw_percpu_xchg_op(pcp, val) @@ -505,12 +518,12 @@ do { \ #define this_cpu_add_1(pcp, val) percpu_add_op(1, volatile, (pcp), val) #define this_cpu_add_2(pcp, val) percpu_add_op(2, volatile, (pcp), val) #define this_cpu_add_4(pcp, val) percpu_add_op(4, volatile, (pcp), val) -#define this_cpu_and_1(pcp, val) percpu_to_op(1, volatile, "and", (pcp), val) -#define this_cpu_and_2(pcp, val) percpu_to_op(2, volatile, "and", (pcp), val) -#define this_cpu_and_4(pcp, val) percpu_to_op(4, volatile, "and", (pcp), val) -#define this_cpu_or_1(pcp, val) percpu_to_op(1, volatile, "or", (pcp), val) -#define this_cpu_or_2(pcp, val) percpu_to_op(2, volatile, "or", (pcp), val) -#define this_cpu_or_4(pcp, val) percpu_to_op(4, volatile, "or", (pcp), val) +#define this_cpu_and_1(pcp, val) percpu_binary_op(1, volatile, "and", (pcp), val) +#define this_cpu_and_2(pcp, val) percpu_binary_op(2, volatile, "and", (pcp), val) +#define this_cpu_and_4(pcp, val) percpu_binary_op(4, volatile, "and", (pcp), val) +#define this_cpu_or_1(pcp, val) percpu_binary_op(1, volatile, "or", (pcp), val) +#define this_cpu_or_2(pcp, val) percpu_binary_op(2, volatile, "or", (pcp), val) +#define this_cpu_or_4(pcp, val) percpu_binary_op(4, volatile, "or", (pcp), val) #define this_cpu_xchg_1(pcp, nval) this_percpu_xchg_op(pcp, nval) #define this_cpu_xchg_2(pcp, nval) this_percpu_xchg_op(pcp, nval) #define this_cpu_xchg_4(pcp, nval) this_percpu_xchg_op(pcp, nval) @@ -543,16 +556,16 @@ do { \ #define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp) #define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val) -#define raw_cpu_and_8(pcp, val) percpu_to_op(8, , "and", (pcp), val) -#define raw_cpu_or_8(pcp, val) percpu_to_op(8, , "or", (pcp), val) +#define raw_cpu_and_8(pcp, val) percpu_binary_op(8, , "and", (pcp), val) +#define raw_cpu_or_8(pcp, val) percpu_binary_op(8, , "or", (pcp), val) #define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(8, , pcp, val) #define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval) #define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval) #define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, , pcp, ovalp, nval) #define this_cpu_add_8(pcp, val) percpu_add_op(8, volatile, (pcp), val) -#define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val) -#define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val) +#define this_cpu_and_8(pcp, val) percpu_binary_op(8, volatile, "and", (pcp), val) +#define this_cpu_or_8(pcp, val) percpu_binary_op(8, volatile, "or", (pcp), val) #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val) #define this_cpu_xchg_8(pcp, nval) this_percpu_xchg_op(pcp, nval) #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval) From 08d564ad699ef32ceaf99d238b3d9c1f4ce5c998 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Tue, 30 Apr 2024 11:17:21 +0200 Subject: [PATCH 02/10] x86/percpu: Move some percpu macros around for readability Move some percpu macros around to make a follow-up patch more readable. No functional change intended. Signed-off-by: Uros Bizjak Signed-off-by: Ingo Molnar Cc: Andy Lutomirski Cc: Josh Poimboeuf Cc: Linus Torvalds Link: https://lore.kernel.org/r/20240430091833.196482-2-ubizjak@gmail.com --- arch/x86/include/asm/percpu.h | 63 +++++++++++++++++++---------------- 1 file changed, 34 insertions(+), 29 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index cc40d8d9c272..08113a2e5377 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -144,6 +144,29 @@ #define __pcpu_reg_imm_4(x) "ri" (x) #define __pcpu_reg_imm_8(x) "re" (x) +#ifdef CONFIG_USE_X86_SEG_SUPPORT + +#define __raw_cpu_read(qual, pcp) \ +({ \ + *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)); \ +}) + +#define __raw_cpu_write(qual, pcp, val) \ +do { \ + *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)) = (val); \ +} while (0) + +#else /* CONFIG_USE_X86_SEG_SUPPORT */ + +#define percpu_from_op(size, qual, op, _var) \ +({ \ + __pcpu_type_##size pfo_val__; \ + asm qual (__pcpu_op2_##size(op, __percpu_arg([var]), "%[val]") \ + : [val] __pcpu_reg_##size("=", pfo_val__) \ + : [var] "m" (__my_cpu_var(_var))); \ + (typeof(_var))(unsigned long) pfo_val__; \ +}) + #define percpu_to_op(size, qual, op, _var, _val) \ do { \ __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \ @@ -157,6 +180,17 @@ do { \ : [val] __pcpu_reg_imm_##size(pto_val__)); \ } while (0) +#endif /* CONFIG_USE_X86_SEG_SUPPORT */ + +#define percpu_stable_op(size, op, _var) \ +({ \ + __pcpu_type_##size pfo_val__; \ + asm(__pcpu_op2_##size(op, __force_percpu_arg(a[var]), "%[val]") \ + : [val] __pcpu_reg_##size("=", pfo_val__) \ + : [var] "i" (&(_var))); \ + (typeof(_var))(unsigned long) pfo_val__; \ +}) + #define percpu_unary_op(size, qual, op, _var) \ ({ \ asm qual (__pcpu_op1_##size(op, __percpu_arg([var])) \ @@ -198,24 +232,6 @@ do { \ percpu_binary_op(size, qual, "add", var, val); \ } while (0) -#define percpu_from_op(size, qual, op, _var) \ -({ \ - __pcpu_type_##size pfo_val__; \ - asm qual (__pcpu_op2_##size(op, __percpu_arg([var]), "%[val]") \ - : [val] __pcpu_reg_##size("=", pfo_val__) \ - : [var] "m" (__my_cpu_var(_var))); \ - (typeof(_var))(unsigned long) pfo_val__; \ -}) - -#define percpu_stable_op(size, op, _var) \ -({ \ - __pcpu_type_##size pfo_val__; \ - asm(__pcpu_op2_##size(op, __force_percpu_arg(a[var]), "%[val]") \ - : [val] __pcpu_reg_##size("=", pfo_val__) \ - : [var] "i" (&(_var))); \ - (typeof(_var))(unsigned long) pfo_val__; \ -}) - /* * Add return operation */ @@ -433,17 +449,6 @@ do { \ #define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp) #ifdef CONFIG_USE_X86_SEG_SUPPORT - -#define __raw_cpu_read(qual, pcp) \ -({ \ - *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)); \ -}) - -#define __raw_cpu_write(qual, pcp, val) \ -do { \ - *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)) = (val); \ -} while (0) - #define raw_cpu_read_1(pcp) __raw_cpu_read(, pcp) #define raw_cpu_read_2(pcp) __raw_cpu_read(, pcp) #define raw_cpu_read_4(pcp) __raw_cpu_read(, pcp) From a50ea641296699af1947336c2e75f6234f53548a Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Tue, 30 Apr 2024 11:17:22 +0200 Subject: [PATCH 03/10] x86/percpu: Unify percpu read-write accessors Redefine percpu_from_op() and percpu_to_op() as __raw_cpu_read() and __raw_cpu_write(). Unify __raw_cpu_{read,write}() macros between configs w/ and w/o USE_X86_SEG_SUPPORT in order to unify {raw,this}_cpu{read_write}_N() accessors between configs. No functional change intended. Signed-off-by: Uros Bizjak Signed-off-by: Ingo Molnar Cc: Andy Lutomirski Cc: Josh Poimboeuf Cc: Linus Torvalds Link: https://lore.kernel.org/r/20240430091833.196482-3-ubizjak@gmail.com --- arch/x86/include/asm/percpu.h | 72 ++++++++++++----------------------- 1 file changed, 25 insertions(+), 47 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 08113a2e5377..f360ac5fccde 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -146,28 +146,28 @@ #ifdef CONFIG_USE_X86_SEG_SUPPORT -#define __raw_cpu_read(qual, pcp) \ +#define __raw_cpu_read(size, qual, pcp) \ ({ \ *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)); \ }) -#define __raw_cpu_write(qual, pcp, val) \ +#define __raw_cpu_write(size, qual, pcp, val) \ do { \ *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)) = (val); \ } while (0) #else /* CONFIG_USE_X86_SEG_SUPPORT */ -#define percpu_from_op(size, qual, op, _var) \ +#define __raw_cpu_read(size, qual, _var) \ ({ \ __pcpu_type_##size pfo_val__; \ - asm qual (__pcpu_op2_##size(op, __percpu_arg([var]), "%[val]") \ + asm qual (__pcpu_op2_##size("mov", __percpu_arg([var]), "%[val]") \ : [val] __pcpu_reg_##size("=", pfo_val__) \ : [var] "m" (__my_cpu_var(_var))); \ (typeof(_var))(unsigned long) pfo_val__; \ }) -#define percpu_to_op(size, qual, op, _var, _val) \ +#define __raw_cpu_write(size, qual, _var, _val) \ do { \ __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \ if (0) { \ @@ -175,7 +175,7 @@ do { \ pto_tmp__ = (_val); \ (void)pto_tmp__; \ } \ - asm qual(__pcpu_op2_##size(op, "%[val]", __percpu_arg([var])) \ + asm qual(__pcpu_op2_##size("mov", "%[val]", __percpu_arg([var])) \ : [var] "+m" (__my_cpu_var(_var)) \ : [val] __pcpu_reg_imm_##size(pto_val__)); \ } while (0) @@ -448,54 +448,32 @@ do { \ */ #define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp) -#ifdef CONFIG_USE_X86_SEG_SUPPORT -#define raw_cpu_read_1(pcp) __raw_cpu_read(, pcp) -#define raw_cpu_read_2(pcp) __raw_cpu_read(, pcp) -#define raw_cpu_read_4(pcp) __raw_cpu_read(, pcp) -#define raw_cpu_write_1(pcp, val) __raw_cpu_write(, pcp, val) -#define raw_cpu_write_2(pcp, val) __raw_cpu_write(, pcp, val) -#define raw_cpu_write_4(pcp, val) __raw_cpu_write(, pcp, val) - -#define this_cpu_read_1(pcp) __raw_cpu_read(volatile, pcp) -#define this_cpu_read_2(pcp) __raw_cpu_read(volatile, pcp) -#define this_cpu_read_4(pcp) __raw_cpu_read(volatile, pcp) -#define this_cpu_write_1(pcp, val) __raw_cpu_write(volatile, pcp, val) -#define this_cpu_write_2(pcp, val) __raw_cpu_write(volatile, pcp, val) -#define this_cpu_write_4(pcp, val) __raw_cpu_write(volatile, pcp, val) +#define raw_cpu_read_1(pcp) __raw_cpu_read(1, , pcp) +#define raw_cpu_read_2(pcp) __raw_cpu_read(2, , pcp) +#define raw_cpu_read_4(pcp) __raw_cpu_read(4, , pcp) +#define raw_cpu_write_1(pcp, val) __raw_cpu_write(1, , pcp, val) +#define raw_cpu_write_2(pcp, val) __raw_cpu_write(2, , pcp, val) +#define raw_cpu_write_4(pcp, val) __raw_cpu_write(4, , pcp, val) + +#define this_cpu_read_1(pcp) __raw_cpu_read(1, volatile, pcp) +#define this_cpu_read_2(pcp) __raw_cpu_read(2, volatile, pcp) +#define this_cpu_read_4(pcp) __raw_cpu_read(4, volatile, pcp) +#define this_cpu_write_1(pcp, val) __raw_cpu_write(1, volatile, pcp, val) +#define this_cpu_write_2(pcp, val) __raw_cpu_write(2, volatile, pcp, val) +#define this_cpu_write_4(pcp, val) __raw_cpu_write(4, volatile, pcp, val) #ifdef CONFIG_X86_64 -#define raw_cpu_read_8(pcp) __raw_cpu_read(, pcp) -#define raw_cpu_write_8(pcp, val) __raw_cpu_write(, pcp, val) +#define raw_cpu_read_8(pcp) __raw_cpu_read(8, , pcp) +#define raw_cpu_write_8(pcp, val) __raw_cpu_write(8, , pcp, val) -#define this_cpu_read_8(pcp) __raw_cpu_read(volatile, pcp) -#define this_cpu_write_8(pcp, val) __raw_cpu_write(volatile, pcp, val) +#define this_cpu_read_8(pcp) __raw_cpu_read(8, volatile, pcp) +#define this_cpu_write_8(pcp, val) __raw_cpu_write(8, volatile, pcp, val) #endif -#define this_cpu_read_const(pcp) __raw_cpu_read(, pcp) +#ifdef CONFIG_USE_X86_SEG_SUPPORT +#define this_cpu_read_const(pcp) __raw_cpu_read(, , pcp) #else /* CONFIG_USE_X86_SEG_SUPPORT */ -#define raw_cpu_read_1(pcp) percpu_from_op(1, , "mov", pcp) -#define raw_cpu_read_2(pcp) percpu_from_op(2, , "mov", pcp) -#define raw_cpu_read_4(pcp) percpu_from_op(4, , "mov", pcp) -#define raw_cpu_write_1(pcp, val) percpu_to_op(1, , "mov", (pcp), val) -#define raw_cpu_write_2(pcp, val) percpu_to_op(2, , "mov", (pcp), val) -#define raw_cpu_write_4(pcp, val) percpu_to_op(4, , "mov", (pcp), val) - -#define this_cpu_read_1(pcp) percpu_from_op(1, volatile, "mov", pcp) -#define this_cpu_read_2(pcp) percpu_from_op(2, volatile, "mov", pcp) -#define this_cpu_read_4(pcp) percpu_from_op(4, volatile, "mov", pcp) -#define this_cpu_write_1(pcp, val) percpu_to_op(1, volatile, "mov", (pcp), val) -#define this_cpu_write_2(pcp, val) percpu_to_op(2, volatile, "mov", (pcp), val) -#define this_cpu_write_4(pcp, val) percpu_to_op(4, volatile, "mov", (pcp), val) - -#ifdef CONFIG_X86_64 -#define raw_cpu_read_8(pcp) percpu_from_op(8, , "mov", pcp) -#define raw_cpu_write_8(pcp, val) percpu_to_op(8, , "mov", (pcp), val) - -#define this_cpu_read_8(pcp) percpu_from_op(8, volatile, "mov", pcp) -#define this_cpu_write_8(pcp, val) percpu_to_op(8, volatile, "mov", (pcp), val) -#endif - /* * The generic per-cpu infrastrucutre is not suitable for * reading const-qualified variables. From 539615de7004a46778020183622856f4ca14e4ac Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Tue, 30 Apr 2024 11:17:23 +0200 Subject: [PATCH 04/10] x86/percpu: Introduce the __raw_cpu_read_const() macro Introduce the __raw_cpu_read_const() macro to further reduce ifdeffery and differences between configs w/ and w/o USE_X86_SEG_SUPPORT. No functional change intended. Signed-off-by: Uros Bizjak Signed-off-by: Ingo Molnar Cc: Andy Lutomirski Cc: Josh Poimboeuf Cc: Linus Torvalds Link: https://lore.kernel.org/r/20240430091833.196482-4-ubizjak@gmail.com --- arch/x86/include/asm/percpu.h | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index f360ac5fccde..d20255138426 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -156,6 +156,8 @@ do { \ *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)) = (val); \ } while (0) +#define __raw_cpu_read_const(pcp) __raw_cpu_read(, , pcp) + #else /* CONFIG_USE_X86_SEG_SUPPORT */ #define __raw_cpu_read(size, qual, _var) \ @@ -180,6 +182,12 @@ do { \ : [val] __pcpu_reg_imm_##size(pto_val__)); \ } while (0) +/* + * The generic per-cpu infrastrucutre is not suitable for + * reading const-qualified variables. + */ +#define __raw_cpu_read_const(pcp) ({ BUILD_BUG(); (typeof(pcp))0; }) + #endif /* CONFIG_USE_X86_SEG_SUPPORT */ #define percpu_stable_op(size, op, _var) \ @@ -470,16 +478,7 @@ do { \ #define this_cpu_write_8(pcp, val) __raw_cpu_write(8, volatile, pcp, val) #endif -#ifdef CONFIG_USE_X86_SEG_SUPPORT -#define this_cpu_read_const(pcp) __raw_cpu_read(, , pcp) -#else /* CONFIG_USE_X86_SEG_SUPPORT */ - -/* - * The generic per-cpu infrastrucutre is not suitable for - * reading const-qualified variables. - */ -#define this_cpu_read_const(pcp) ({ BUILD_BUG(); (typeof(pcp))0; }) -#endif /* CONFIG_USE_X86_SEG_SUPPORT */ +#define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp) #define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp) #define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp) From 1fe67aee8ab3fdab4357afc983a9e9ff3892d694 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Tue, 30 Apr 2024 11:17:24 +0200 Subject: [PATCH 05/10] x86/percpu: Fix operand constraint modifier in __raw_cpu_write() __raw_cpu_write() with !USE_X86_SEG_SUPPORT config uses read/write operand constraint modifier "+" for its memory location. This signals the compiler that the location is both read and written by the asm. This is not true, because MOV insn only writes to the output. Correct the modifier to "=" to inform the compiler that the memory location is only written to. This also prevents the compiler from value tracking the undefined value from the uninitialized memory. Signed-off-by: Uros Bizjak Signed-off-by: Ingo Molnar Cc: Andy Lutomirski Cc: Josh Poimboeuf Cc: Linus Torvalds Link: https://lore.kernel.org/r/20240430091833.196482-5-ubizjak@gmail.com --- arch/x86/include/asm/percpu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index d20255138426..c77393cd0273 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -178,7 +178,7 @@ do { \ (void)pto_tmp__; \ } \ asm qual(__pcpu_op2_##size("mov", "%[val]", __percpu_arg([var])) \ - : [var] "+m" (__my_cpu_var(_var)) \ + : [var] "=m" (__my_cpu_var(_var)) \ : [val] __pcpu_reg_imm_##size(pto_val__)); \ } while (0) From 48908919c9062bf9472def7389dd7cd9c6a45b70 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Mon, 20 May 2024 10:09:24 +0200 Subject: [PATCH 06/10] x86/percpu: Rename percpu_stable_op() to __raw_cpu_read_stable() Rename percpu_stable_op() to __raw_cpu_read_stable() to be in line with other read/write percpu accessors. No functional change intended. Signed-off-by: Uros Bizjak Signed-off-by: Ingo Molnar Cc: Uros Bizjak Cc: Andy Lutomirski Cc: Josh Poimboeuf Cc: Linus Torvalds Link: https://lore.kernel.org/r/20240520080951.121049-1-ubizjak@gmail.com --- arch/x86/include/asm/percpu.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index c77393cd0273..39762fcfe328 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -190,10 +190,10 @@ do { \ #endif /* CONFIG_USE_X86_SEG_SUPPORT */ -#define percpu_stable_op(size, op, _var) \ +#define __raw_cpu_read_stable(size, _var) \ ({ \ __pcpu_type_##size pfo_val__; \ - asm(__pcpu_op2_##size(op, __force_percpu_arg(a[var]), "%[val]") \ + asm(__pcpu_op2_##size("mov", __force_percpu_arg(a[var]), "%[val]") \ : [val] __pcpu_reg_##size("=", pfo_val__) \ : [var] "i" (&(_var))); \ (typeof(_var))(unsigned long) pfo_val__; \ @@ -480,9 +480,9 @@ do { \ #define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp) -#define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp) -#define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp) -#define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp) +#define this_cpu_read_stable_1(pcp) __raw_cpu_read_stable(1, pcp) +#define this_cpu_read_stable_2(pcp) __raw_cpu_read_stable(2, pcp) +#define this_cpu_read_stable_4(pcp) __raw_cpu_read_stable(4, pcp) #define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val) #define raw_cpu_add_2(pcp, val) percpu_add_op(2, , (pcp), val) @@ -535,7 +535,7 @@ do { \ * 32 bit must fall back to generic operations. */ #ifdef CONFIG_X86_64 -#define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp) +#define this_cpu_read_stable_8(pcp) __raw_cpu_read_stable(8, pcp) #define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val) #define raw_cpu_and_8(pcp, val) percpu_binary_op(8, , "and", (pcp), val) From 47c9dbd2fb5f98453840e18ebced9138ec8b4cc5 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Mon, 20 May 2024 10:09:25 +0200 Subject: [PATCH 07/10] x86/percpu: Move some percpu accessors around to reduce ifdeffery Move some percpu accessors around, mainly to reduce ifdeffery and improve readabilty by following dependencies between accessors. No functional change intended. Signed-off-by: Uros Bizjak Signed-off-by: Ingo Molnar Cc: Andy Lutomirski Cc: Josh Poimboeuf Cc: Linus Torvalds Link: https://lore.kernel.org/r/20240520080951.121049-2-ubizjak@gmail.com --- arch/x86/include/asm/percpu.h | 40 +++++++++++++++++------------------ 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 39762fcfe328..0f0d8973f8df 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -445,17 +445,6 @@ do { \ #define this_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval) #endif -/* - * this_cpu_read() makes gcc load the percpu variable every time it is - * accessed while this_cpu_read_stable() allows the value to be cached. - * this_cpu_read_stable() is more efficient and can be used if its value - * is guaranteed to be valid across cpus. The current users include - * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are - * actually per-thread variables implemented as per-CPU variables and - * thus stable for the duration of the respective task. - */ -#define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp) - #define raw_cpu_read_1(pcp) __raw_cpu_read(1, , pcp) #define raw_cpu_read_2(pcp) __raw_cpu_read(2, , pcp) #define raw_cpu_read_4(pcp) __raw_cpu_read(4, , pcp) @@ -470,16 +459,6 @@ do { \ #define this_cpu_write_2(pcp, val) __raw_cpu_write(2, volatile, pcp, val) #define this_cpu_write_4(pcp, val) __raw_cpu_write(4, volatile, pcp, val) -#ifdef CONFIG_X86_64 -#define raw_cpu_read_8(pcp) __raw_cpu_read(8, , pcp) -#define raw_cpu_write_8(pcp, val) __raw_cpu_write(8, , pcp, val) - -#define this_cpu_read_8(pcp) __raw_cpu_read(8, volatile, pcp) -#define this_cpu_write_8(pcp, val) __raw_cpu_write(8, volatile, pcp, val) -#endif - -#define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp) - #define this_cpu_read_stable_1(pcp) __raw_cpu_read_stable(1, pcp) #define this_cpu_read_stable_2(pcp) __raw_cpu_read_stable(2, pcp) #define this_cpu_read_stable_4(pcp) __raw_cpu_read_stable(4, pcp) @@ -535,6 +514,12 @@ do { \ * 32 bit must fall back to generic operations. */ #ifdef CONFIG_X86_64 +#define raw_cpu_read_8(pcp) __raw_cpu_read(8, , pcp) +#define raw_cpu_write_8(pcp, val) __raw_cpu_write(8, , pcp, val) + +#define this_cpu_read_8(pcp) __raw_cpu_read(8, volatile, pcp) +#define this_cpu_write_8(pcp, val) __raw_cpu_write(8, volatile, pcp, val) + #define this_cpu_read_stable_8(pcp) __raw_cpu_read_stable(8, pcp) #define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val) @@ -561,6 +546,19 @@ do { \ #define raw_cpu_read_long(pcp) raw_cpu_read_4(pcp) #endif +#define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp) + +/* + * this_cpu_read() makes gcc load the percpu variable every time it is + * accessed while this_cpu_read_stable() allows the value to be cached. + * this_cpu_read_stable() is more efficient and can be used if its value + * is guaranteed to be valid across cpus. The current users include + * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are + * actually per-thread variables implemented as per-CPU variables and + * thus stable for the duration of the respective task. + */ +#define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp) + #define x86_this_cpu_constant_test_bit(_nr, _var) \ ({ \ unsigned long __percpu *addr__ = \ From 61d73e4f7d538f3907d954a531169e8164aef56b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 20 May 2024 10:22:39 +0200 Subject: [PATCH 08/10] x86/percpu: Clean up a bit - Fix misc typos - There's 4 variants of the same spelling right now: 'per-CPU', 'per CPU', 'percpu' and 'per-cpu' Standardize on 'per-CPU' only. - s/makes gcc load /makes the compiler load - Instead of: #ifdef CONFIG_XXXX #define YYYY FOO #else #define YYYY BAR #endif Use the slightly more readable form of: #ifdef CONFIG_XXXX # define YYYY FOO #else # define YYYY BAR #endif - Standardize & expand '#else' and '#endif' comments - Fix comment style - Capitalize x86 instruction names in comments No change in code. Signed-off-by: Ingo Molnar Cc: Uros Bizjak Cc: Andy Lutomirski Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org --- arch/x86/include/asm/percpu.h | 91 +++++++++++++++++++---------------- 1 file changed, 50 insertions(+), 41 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 0f0d8973f8df..b424cb1008f7 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -3,30 +3,30 @@ #define _ASM_X86_PERCPU_H #ifdef CONFIG_X86_64 -#define __percpu_seg gs -#define __percpu_rel (%rip) +# define __percpu_seg gs +# define __percpu_rel (%rip) #else -#define __percpu_seg fs -#define __percpu_rel +# define __percpu_seg fs +# define __percpu_rel #endif #ifdef __ASSEMBLY__ #ifdef CONFIG_SMP -#define __percpu %__percpu_seg: +# define __percpu %__percpu_seg: #else -#define __percpu +# define __percpu #endif #define PER_CPU_VAR(var) __percpu(var)__percpu_rel #ifdef CONFIG_X86_64_SMP -#define INIT_PER_CPU_VAR(var) init_per_cpu__##var +# define INIT_PER_CPU_VAR(var) init_per_cpu__##var #else -#define INIT_PER_CPU_VAR(var) var +# define INIT_PER_CPU_VAR(var) var #endif -#else /* ...!ASSEMBLY */ +#else /* !__ASSEMBLY__: */ #include #include @@ -37,19 +37,19 @@ #ifdef CONFIG_CC_HAS_NAMED_AS #ifdef __CHECKER__ -#define __seg_gs __attribute__((address_space(__seg_gs))) -#define __seg_fs __attribute__((address_space(__seg_fs))) +# define __seg_gs __attribute__((address_space(__seg_gs))) +# define __seg_fs __attribute__((address_space(__seg_fs))) #endif #ifdef CONFIG_X86_64 -#define __percpu_seg_override __seg_gs +# define __percpu_seg_override __seg_gs #else -#define __percpu_seg_override __seg_fs +# define __percpu_seg_override __seg_fs #endif #define __percpu_prefix "" -#else /* CONFIG_CC_HAS_NAMED_AS */ +#else /* !CONFIG_CC_HAS_NAMED_AS: */ #define __percpu_seg_override #define __percpu_prefix "%%"__stringify(__percpu_seg)":" @@ -80,7 +80,8 @@ #define PER_CPU_VAR(var) %__percpu_seg:(var)__percpu_rel -#else /* CONFIG_SMP */ +#else /* !CONFIG_SMP: */ + #define __percpu_seg_override #define __percpu_prefix "" #define __force_percpu_prefix "" @@ -96,7 +97,7 @@ #define __force_percpu_arg(x) __force_percpu_prefix "%" #x /* - * Initialized pointers to per-cpu variables needed for the boot + * Initialized pointers to per-CPU variables needed for the boot * processor need to use these macros to get the proper address * offset from __per_cpu_load on SMP. * @@ -106,13 +107,15 @@ extern typeof(var) init_per_cpu_var(var) #ifdef CONFIG_X86_64_SMP -#define init_per_cpu_var(var) init_per_cpu__##var +# define init_per_cpu_var(var) init_per_cpu__##var #else -#define init_per_cpu_var(var) var +# define init_per_cpu_var(var) var #endif -/* For arch-specific code, we can use direct single-insn ops (they - * don't give an lvalue though). */ +/* + * For arch-specific code, we can use direct single-insn ops (they + * don't give an lvalue though). + */ #define __pcpu_type_1 u8 #define __pcpu_type_2 u16 @@ -158,7 +161,7 @@ do { \ #define __raw_cpu_read_const(pcp) __raw_cpu_read(, , pcp) -#else /* CONFIG_USE_X86_SEG_SUPPORT */ +#else /* !CONFIG_USE_X86_SEG_SUPPORT: */ #define __raw_cpu_read(size, qual, _var) \ ({ \ @@ -183,7 +186,7 @@ do { \ } while (0) /* - * The generic per-cpu infrastrucutre is not suitable for + * The generic per-CPU infrastrucutre is not suitable for * reading const-qualified variables. */ #define __raw_cpu_read_const(pcp) ({ BUILD_BUG(); (typeof(pcp))0; }) @@ -219,7 +222,7 @@ do { \ } while (0) /* - * Generate a percpu add to memory instruction and optimize code + * Generate a per-CPU add to memory instruction and optimize code * if one is added or subtracted. */ #define percpu_add_op(size, qual, var, val) \ @@ -266,9 +269,9 @@ do { \ }) /* - * this_cpu_xchg() is implemented using cmpxchg without a lock prefix. - * xchg is expensive due to the implied lock prefix. The processor - * cannot prefetch cachelines if xchg is used. + * this_cpu_xchg() is implemented using CMPXCHG without a LOCK prefix. + * XCHG is expensive due to the implied LOCK prefix. The processor + * cannot prefetch cachelines if XCHG is used. */ #define this_percpu_xchg_op(_var, _nval) \ ({ \ @@ -278,8 +281,8 @@ do { \ }) /* - * cmpxchg has no such implied lock semantics as a result it is much - * more efficient for cpu local operations. + * CMPXCHG has no such implied lock semantics as a result it is much + * more efficient for CPU-local operations. */ #define percpu_cmpxchg_op(size, qual, _var, _oval, _nval) \ ({ \ @@ -314,6 +317,7 @@ do { \ }) #if defined(CONFIG_X86_32) && !defined(CONFIG_UML) + #define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval) \ ({ \ union { \ @@ -374,7 +378,8 @@ do { \ #define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, , pcp, ovalp, nval) #define this_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, volatile, pcp, ovalp, nval) -#endif + +#endif /* defined(CONFIG_X86_32) && !defined(CONFIG_UML) */ #ifdef CONFIG_X86_64 #define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval); @@ -443,7 +448,8 @@ do { \ #define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, , pcp, ovalp, nval) #define this_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval) -#endif + +#endif /* CONFIG_X86_64 */ #define raw_cpu_read_1(pcp) __raw_cpu_read(1, , pcp) #define raw_cpu_read_2(pcp) __raw_cpu_read(2, , pcp) @@ -510,8 +516,8 @@ do { \ #define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, volatile, pcp, ovalp, nval) /* - * Per cpu atomic 64 bit operations are only available under 64 bit. - * 32 bit must fall back to generic operations. + * Per-CPU atomic 64-bit operations are only available under 64-bit kernels. + * 32-bit kernels must fall back to generic operations. */ #ifdef CONFIG_X86_64 #define raw_cpu_read_8(pcp) __raw_cpu_read(8, , pcp) @@ -539,20 +545,23 @@ do { \ #define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval) #define raw_cpu_read_long(pcp) raw_cpu_read_8(pcp) -#else -/* There is no generic 64 bit read stable operation for 32 bit targets. */ + +#else /* !CONFIG_X86_64: */ + +/* There is no generic 64-bit read stable operation for 32-bit targets. */ #define this_cpu_read_stable_8(pcp) ({ BUILD_BUG(); (typeof(pcp))0; }) #define raw_cpu_read_long(pcp) raw_cpu_read_4(pcp) -#endif + +#endif /* CONFIG_X86_64 */ #define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp) /* - * this_cpu_read() makes gcc load the percpu variable every time it is - * accessed while this_cpu_read_stable() allows the value to be cached. + * this_cpu_read() makes the compiler load the per-CPU variable every time + * it is accessed while this_cpu_read_stable() allows the value to be cached. * this_cpu_read_stable() is more efficient and can be used if its value - * is guaranteed to be valid across cpus. The current users include + * is guaranteed to be valid across CPUs. The current users include * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are * actually per-thread variables implemented as per-CPU variables and * thus stable for the duration of the respective task. @@ -626,12 +635,12 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off); #define early_per_cpu_ptr(_name) (_name##_early_ptr) #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) -#define early_per_cpu(_name, _cpu) \ +#define early_per_cpu(_name, _cpu) \ *(early_per_cpu_ptr(_name) ? \ &early_per_cpu_ptr(_name)[_cpu] : \ &per_cpu(_name, _cpu)) -#else /* !CONFIG_SMP */ +#else /* !CONFIG_SMP: */ #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ DEFINE_PER_CPU(_type, _name) = _initvalue @@ -651,6 +660,6 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off); #define early_per_cpu_ptr(_name) NULL /* no early_per_cpu_map() */ -#endif /* !CONFIG_SMP */ +#endif /* CONFIG_SMP */ #endif /* _ASM_X86_PERCPU_H */ From 9130ea06163fc229665b9ec4666de9f4ef68284d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 20 May 2024 10:45:06 +0200 Subject: [PATCH 09/10] x86/percpu: Clean up vertical alignment details - Fix/unify misc vertical alignment inconsistencies - Make CPP macros look a bit more like C code by adding an empty line after local variable declaration blocks, and before final rvalue statements. No change in code. Signed-off-by: Ingo Molnar Cc: Uros Bizjak Cc: Andy Lutomirski Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org --- arch/x86/include/asm/percpu.h | 321 ++++++++++++++++++---------------- 1 file changed, 171 insertions(+), 150 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index b424cb1008f7..c55a79d5feae 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -68,11 +68,12 @@ * sizeof(this_cpu_off) becames 4. */ #ifndef BUILD_VDSO32_64 -#define arch_raw_cpu_ptr(_ptr) \ -({ \ - unsigned long tcp_ptr__ = raw_cpu_read_long(this_cpu_off); \ - tcp_ptr__ += (__force unsigned long)(_ptr); \ - (typeof(*(_ptr)) __kernel __force *)tcp_ptr__; \ +#define arch_raw_cpu_ptr(_ptr) \ +({ \ + unsigned long tcp_ptr__ = raw_cpu_read_long(this_cpu_off); \ + \ + tcp_ptr__ += (__force unsigned long)(_ptr); \ + (typeof(*(_ptr)) __kernel __force *)tcp_ptr__; \ }) #else #define arch_raw_cpu_ptr(_ptr) ({ BUILD_BUG(); (typeof(_ptr))0; }) @@ -117,35 +118,35 @@ * don't give an lvalue though). */ -#define __pcpu_type_1 u8 -#define __pcpu_type_2 u16 -#define __pcpu_type_4 u32 -#define __pcpu_type_8 u64 +#define __pcpu_type_1 u8 +#define __pcpu_type_2 u16 +#define __pcpu_type_4 u32 +#define __pcpu_type_8 u64 -#define __pcpu_cast_1(val) ((u8)(((unsigned long) val) & 0xff)) -#define __pcpu_cast_2(val) ((u16)(((unsigned long) val) & 0xffff)) -#define __pcpu_cast_4(val) ((u32)(((unsigned long) val) & 0xffffffff)) -#define __pcpu_cast_8(val) ((u64)(val)) +#define __pcpu_cast_1(val) ((u8)(((unsigned long) val) & 0xff)) +#define __pcpu_cast_2(val) ((u16)(((unsigned long) val) & 0xffff)) +#define __pcpu_cast_4(val) ((u32)(((unsigned long) val) & 0xffffffff)) +#define __pcpu_cast_8(val) ((u64)(val)) -#define __pcpu_op1_1(op, dst) op "b " dst -#define __pcpu_op1_2(op, dst) op "w " dst -#define __pcpu_op1_4(op, dst) op "l " dst -#define __pcpu_op1_8(op, dst) op "q " dst +#define __pcpu_op1_1(op, dst) op "b " dst +#define __pcpu_op1_2(op, dst) op "w " dst +#define __pcpu_op1_4(op, dst) op "l " dst +#define __pcpu_op1_8(op, dst) op "q " dst #define __pcpu_op2_1(op, src, dst) op "b " src ", " dst #define __pcpu_op2_2(op, src, dst) op "w " src ", " dst #define __pcpu_op2_4(op, src, dst) op "l " src ", " dst #define __pcpu_op2_8(op, src, dst) op "q " src ", " dst -#define __pcpu_reg_1(mod, x) mod "q" (x) -#define __pcpu_reg_2(mod, x) mod "r" (x) -#define __pcpu_reg_4(mod, x) mod "r" (x) -#define __pcpu_reg_8(mod, x) mod "r" (x) +#define __pcpu_reg_1(mod, x) mod "q" (x) +#define __pcpu_reg_2(mod, x) mod "r" (x) +#define __pcpu_reg_4(mod, x) mod "r" (x) +#define __pcpu_reg_8(mod, x) mod "r" (x) -#define __pcpu_reg_imm_1(x) "qi" (x) -#define __pcpu_reg_imm_2(x) "ri" (x) -#define __pcpu_reg_imm_4(x) "ri" (x) -#define __pcpu_reg_imm_8(x) "re" (x) +#define __pcpu_reg_imm_1(x) "qi" (x) +#define __pcpu_reg_imm_2(x) "ri" (x) +#define __pcpu_reg_imm_4(x) "ri" (x) +#define __pcpu_reg_imm_8(x) "re" (x) #ifdef CONFIG_USE_X86_SEG_SUPPORT @@ -166,15 +167,18 @@ do { \ #define __raw_cpu_read(size, qual, _var) \ ({ \ __pcpu_type_##size pfo_val__; \ + \ asm qual (__pcpu_op2_##size("mov", __percpu_arg([var]), "%[val]") \ : [val] __pcpu_reg_##size("=", pfo_val__) \ : [var] "m" (__my_cpu_var(_var))); \ + \ (typeof(_var))(unsigned long) pfo_val__; \ }) #define __raw_cpu_write(size, qual, _var, _val) \ do { \ __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \ + \ if (0) { \ typeof(_var) pto_tmp__; \ pto_tmp__ = (_val); \ @@ -196,9 +200,11 @@ do { \ #define __raw_cpu_read_stable(size, _var) \ ({ \ __pcpu_type_##size pfo_val__; \ + \ asm(__pcpu_op2_##size("mov", __force_percpu_arg(a[var]), "%[val]") \ : [val] __pcpu_reg_##size("=", pfo_val__) \ : [var] "i" (&(_var))); \ + \ (typeof(_var))(unsigned long) pfo_val__; \ }) @@ -211,6 +217,7 @@ do { \ #define percpu_binary_op(size, qual, op, _var, _val) \ do { \ __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \ + \ if (0) { \ typeof(_var) pto_tmp__; \ pto_tmp__ = (_val); \ @@ -230,6 +237,7 @@ do { \ const int pao_ID__ = (__builtin_constant_p(val) && \ ((val) == 1 || (val) == -1)) ? \ (int)(val) : 0; \ + \ if (0) { \ typeof(var) pao_tmp__; \ pao_tmp__ = (val); \ @@ -249,6 +257,7 @@ do { \ #define percpu_add_return_op(size, qual, _var, _val) \ ({ \ __pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val); \ + \ asm qual (__pcpu_op2_##size("xadd", "%[tmp]", \ __percpu_arg([var])) \ : [tmp] __pcpu_reg_##size("+", paro_tmp__), \ @@ -264,7 +273,9 @@ do { \ #define raw_percpu_xchg_op(_var, _nval) \ ({ \ typeof(_var) pxo_old__ = raw_cpu_read(_var); \ + \ raw_cpu_write(_var, _nval); \ + \ pxo_old__; \ }) @@ -276,7 +287,9 @@ do { \ #define this_percpu_xchg_op(_var, _nval) \ ({ \ typeof(_var) pxo_old__ = this_cpu_read(_var); \ + \ do { } while (!this_cpu_try_cmpxchg(_var, &pxo_old__, _nval)); \ + \ pxo_old__; \ }) @@ -288,12 +301,14 @@ do { \ ({ \ __pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval); \ __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \ + \ asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]", \ __percpu_arg([var])) \ : [oval] "+a" (pco_old__), \ [var] "+m" (__my_cpu_var(_var)) \ : [nval] __pcpu_reg_##size(, pco_new__) \ : "memory"); \ + \ (typeof(_var))(unsigned long) pco_old__; \ }) @@ -303,6 +318,7 @@ do { \ __pcpu_type_##size *pco_oval__ = (__pcpu_type_##size *)(_ovalp); \ __pcpu_type_##size pco_old__ = *pco_oval__; \ __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \ + \ asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]", \ __percpu_arg([var])) \ CC_SET(z) \ @@ -313,6 +329,7 @@ do { \ : "memory"); \ if (unlikely(!success)) \ *pco_oval__ = pco_old__; \ + \ likely(success); \ }) @@ -343,8 +360,8 @@ do { \ old__.var; \ }) -#define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg64_op(8, , pcp, oval, nval) -#define this_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg64_op(8, volatile, pcp, oval, nval) +#define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg64_op(8, , pcp, oval, nval) +#define this_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg64_op(8, volatile, pcp, oval, nval) #define percpu_try_cmpxchg64_op(size, qual, _var, _ovalp, _nval) \ ({ \ @@ -373,6 +390,7 @@ do { \ : "memory"); \ if (unlikely(!success)) \ *_oval = old__.var; \ + \ likely(success); \ }) @@ -382,8 +400,8 @@ do { \ #endif /* defined(CONFIG_X86_32) && !defined(CONFIG_UML) */ #ifdef CONFIG_X86_64 -#define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval); -#define this_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval); +#define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval); +#define this_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval); #define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, , pcp, ovalp, nval); #define this_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval); @@ -413,8 +431,8 @@ do { \ old__.var; \ }) -#define raw_cpu_cmpxchg128(pcp, oval, nval) percpu_cmpxchg128_op(16, , pcp, oval, nval) -#define this_cpu_cmpxchg128(pcp, oval, nval) percpu_cmpxchg128_op(16, volatile, pcp, oval, nval) +#define raw_cpu_cmpxchg128(pcp, oval, nval) percpu_cmpxchg128_op(16, , pcp, oval, nval) +#define this_cpu_cmpxchg128(pcp, oval, nval) percpu_cmpxchg128_op(16, volatile, pcp, oval, nval) #define percpu_try_cmpxchg128_op(size, qual, _var, _ovalp, _nval) \ ({ \ @@ -451,66 +469,66 @@ do { \ #endif /* CONFIG_X86_64 */ -#define raw_cpu_read_1(pcp) __raw_cpu_read(1, , pcp) -#define raw_cpu_read_2(pcp) __raw_cpu_read(2, , pcp) -#define raw_cpu_read_4(pcp) __raw_cpu_read(4, , pcp) -#define raw_cpu_write_1(pcp, val) __raw_cpu_write(1, , pcp, val) -#define raw_cpu_write_2(pcp, val) __raw_cpu_write(2, , pcp, val) -#define raw_cpu_write_4(pcp, val) __raw_cpu_write(4, , pcp, val) - -#define this_cpu_read_1(pcp) __raw_cpu_read(1, volatile, pcp) -#define this_cpu_read_2(pcp) __raw_cpu_read(2, volatile, pcp) -#define this_cpu_read_4(pcp) __raw_cpu_read(4, volatile, pcp) -#define this_cpu_write_1(pcp, val) __raw_cpu_write(1, volatile, pcp, val) -#define this_cpu_write_2(pcp, val) __raw_cpu_write(2, volatile, pcp, val) -#define this_cpu_write_4(pcp, val) __raw_cpu_write(4, volatile, pcp, val) - -#define this_cpu_read_stable_1(pcp) __raw_cpu_read_stable(1, pcp) -#define this_cpu_read_stable_2(pcp) __raw_cpu_read_stable(2, pcp) -#define this_cpu_read_stable_4(pcp) __raw_cpu_read_stable(4, pcp) - -#define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val) -#define raw_cpu_add_2(pcp, val) percpu_add_op(2, , (pcp), val) -#define raw_cpu_add_4(pcp, val) percpu_add_op(4, , (pcp), val) -#define raw_cpu_and_1(pcp, val) percpu_binary_op(1, , "and", (pcp), val) -#define raw_cpu_and_2(pcp, val) percpu_binary_op(2, , "and", (pcp), val) -#define raw_cpu_and_4(pcp, val) percpu_binary_op(4, , "and", (pcp), val) -#define raw_cpu_or_1(pcp, val) percpu_binary_op(1, , "or", (pcp), val) -#define raw_cpu_or_2(pcp, val) percpu_binary_op(2, , "or", (pcp), val) -#define raw_cpu_or_4(pcp, val) percpu_binary_op(4, , "or", (pcp), val) -#define raw_cpu_xchg_1(pcp, val) raw_percpu_xchg_op(pcp, val) -#define raw_cpu_xchg_2(pcp, val) raw_percpu_xchg_op(pcp, val) -#define raw_cpu_xchg_4(pcp, val) raw_percpu_xchg_op(pcp, val) - -#define this_cpu_add_1(pcp, val) percpu_add_op(1, volatile, (pcp), val) -#define this_cpu_add_2(pcp, val) percpu_add_op(2, volatile, (pcp), val) -#define this_cpu_add_4(pcp, val) percpu_add_op(4, volatile, (pcp), val) -#define this_cpu_and_1(pcp, val) percpu_binary_op(1, volatile, "and", (pcp), val) -#define this_cpu_and_2(pcp, val) percpu_binary_op(2, volatile, "and", (pcp), val) -#define this_cpu_and_4(pcp, val) percpu_binary_op(4, volatile, "and", (pcp), val) -#define this_cpu_or_1(pcp, val) percpu_binary_op(1, volatile, "or", (pcp), val) -#define this_cpu_or_2(pcp, val) percpu_binary_op(2, volatile, "or", (pcp), val) -#define this_cpu_or_4(pcp, val) percpu_binary_op(4, volatile, "or", (pcp), val) -#define this_cpu_xchg_1(pcp, nval) this_percpu_xchg_op(pcp, nval) -#define this_cpu_xchg_2(pcp, nval) this_percpu_xchg_op(pcp, nval) -#define this_cpu_xchg_4(pcp, nval) this_percpu_xchg_op(pcp, nval) - -#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(1, , pcp, val) -#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(2, , pcp, val) -#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(4, , pcp, val) -#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, , pcp, oval, nval) -#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, , pcp, oval, nval) -#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, , pcp, oval, nval) -#define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval) percpu_try_cmpxchg_op(1, , pcp, ovalp, nval) -#define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval) percpu_try_cmpxchg_op(2, , pcp, ovalp, nval) -#define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, , pcp, ovalp, nval) - -#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(1, volatile, pcp, val) -#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(2, volatile, pcp, val) -#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(4, volatile, pcp, val) -#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, volatile, pcp, oval, nval) -#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, volatile, pcp, oval, nval) -#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, volatile, pcp, oval, nval) +#define raw_cpu_read_1(pcp) __raw_cpu_read(1, , pcp) +#define raw_cpu_read_2(pcp) __raw_cpu_read(2, , pcp) +#define raw_cpu_read_4(pcp) __raw_cpu_read(4, , pcp) +#define raw_cpu_write_1(pcp, val) __raw_cpu_write(1, , pcp, val) +#define raw_cpu_write_2(pcp, val) __raw_cpu_write(2, , pcp, val) +#define raw_cpu_write_4(pcp, val) __raw_cpu_write(4, , pcp, val) + +#define this_cpu_read_1(pcp) __raw_cpu_read(1, volatile, pcp) +#define this_cpu_read_2(pcp) __raw_cpu_read(2, volatile, pcp) +#define this_cpu_read_4(pcp) __raw_cpu_read(4, volatile, pcp) +#define this_cpu_write_1(pcp, val) __raw_cpu_write(1, volatile, pcp, val) +#define this_cpu_write_2(pcp, val) __raw_cpu_write(2, volatile, pcp, val) +#define this_cpu_write_4(pcp, val) __raw_cpu_write(4, volatile, pcp, val) + +#define this_cpu_read_stable_1(pcp) __raw_cpu_read_stable(1, pcp) +#define this_cpu_read_stable_2(pcp) __raw_cpu_read_stable(2, pcp) +#define this_cpu_read_stable_4(pcp) __raw_cpu_read_stable(4, pcp) + +#define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val) +#define raw_cpu_add_2(pcp, val) percpu_add_op(2, , (pcp), val) +#define raw_cpu_add_4(pcp, val) percpu_add_op(4, , (pcp), val) +#define raw_cpu_and_1(pcp, val) percpu_binary_op(1, , "and", (pcp), val) +#define raw_cpu_and_2(pcp, val) percpu_binary_op(2, , "and", (pcp), val) +#define raw_cpu_and_4(pcp, val) percpu_binary_op(4, , "and", (pcp), val) +#define raw_cpu_or_1(pcp, val) percpu_binary_op(1, , "or", (pcp), val) +#define raw_cpu_or_2(pcp, val) percpu_binary_op(2, , "or", (pcp), val) +#define raw_cpu_or_4(pcp, val) percpu_binary_op(4, , "or", (pcp), val) +#define raw_cpu_xchg_1(pcp, val) raw_percpu_xchg_op(pcp, val) +#define raw_cpu_xchg_2(pcp, val) raw_percpu_xchg_op(pcp, val) +#define raw_cpu_xchg_4(pcp, val) raw_percpu_xchg_op(pcp, val) + +#define this_cpu_add_1(pcp, val) percpu_add_op(1, volatile, (pcp), val) +#define this_cpu_add_2(pcp, val) percpu_add_op(2, volatile, (pcp), val) +#define this_cpu_add_4(pcp, val) percpu_add_op(4, volatile, (pcp), val) +#define this_cpu_and_1(pcp, val) percpu_binary_op(1, volatile, "and", (pcp), val) +#define this_cpu_and_2(pcp, val) percpu_binary_op(2, volatile, "and", (pcp), val) +#define this_cpu_and_4(pcp, val) percpu_binary_op(4, volatile, "and", (pcp), val) +#define this_cpu_or_1(pcp, val) percpu_binary_op(1, volatile, "or", (pcp), val) +#define this_cpu_or_2(pcp, val) percpu_binary_op(2, volatile, "or", (pcp), val) +#define this_cpu_or_4(pcp, val) percpu_binary_op(4, volatile, "or", (pcp), val) +#define this_cpu_xchg_1(pcp, nval) this_percpu_xchg_op(pcp, nval) +#define this_cpu_xchg_2(pcp, nval) this_percpu_xchg_op(pcp, nval) +#define this_cpu_xchg_4(pcp, nval) this_percpu_xchg_op(pcp, nval) + +#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(1, , pcp, val) +#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(2, , pcp, val) +#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(4, , pcp, val) +#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, , pcp, oval, nval) +#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, , pcp, oval, nval) +#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, , pcp, oval, nval) +#define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval) percpu_try_cmpxchg_op(1, , pcp, ovalp, nval) +#define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval) percpu_try_cmpxchg_op(2, , pcp, ovalp, nval) +#define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, , pcp, ovalp, nval) + +#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(1, volatile, pcp, val) +#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(2, volatile, pcp, val) +#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(4, volatile, pcp, val) +#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, volatile, pcp, oval, nval) +#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, volatile, pcp, oval, nval) +#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, volatile, pcp, oval, nval) #define this_cpu_try_cmpxchg_1(pcp, ovalp, nval) percpu_try_cmpxchg_op(1, volatile, pcp, ovalp, nval) #define this_cpu_try_cmpxchg_2(pcp, ovalp, nval) percpu_try_cmpxchg_op(2, volatile, pcp, ovalp, nval) #define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, volatile, pcp, ovalp, nval) @@ -520,42 +538,43 @@ do { \ * 32-bit kernels must fall back to generic operations. */ #ifdef CONFIG_X86_64 -#define raw_cpu_read_8(pcp) __raw_cpu_read(8, , pcp) -#define raw_cpu_write_8(pcp, val) __raw_cpu_write(8, , pcp, val) - -#define this_cpu_read_8(pcp) __raw_cpu_read(8, volatile, pcp) -#define this_cpu_write_8(pcp, val) __raw_cpu_write(8, volatile, pcp, val) - -#define this_cpu_read_stable_8(pcp) __raw_cpu_read_stable(8, pcp) - -#define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val) -#define raw_cpu_and_8(pcp, val) percpu_binary_op(8, , "and", (pcp), val) -#define raw_cpu_or_8(pcp, val) percpu_binary_op(8, , "or", (pcp), val) -#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(8, , pcp, val) -#define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval) -#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval) -#define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, , pcp, ovalp, nval) - -#define this_cpu_add_8(pcp, val) percpu_add_op(8, volatile, (pcp), val) -#define this_cpu_and_8(pcp, val) percpu_binary_op(8, volatile, "and", (pcp), val) -#define this_cpu_or_8(pcp, val) percpu_binary_op(8, volatile, "or", (pcp), val) -#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val) -#define this_cpu_xchg_8(pcp, nval) this_percpu_xchg_op(pcp, nval) -#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval) + +#define raw_cpu_read_8(pcp) __raw_cpu_read(8, , pcp) +#define raw_cpu_write_8(pcp, val) __raw_cpu_write(8, , pcp, val) + +#define this_cpu_read_8(pcp) __raw_cpu_read(8, volatile, pcp) +#define this_cpu_write_8(pcp, val) __raw_cpu_write(8, volatile, pcp, val) + +#define this_cpu_read_stable_8(pcp) __raw_cpu_read_stable(8, pcp) + +#define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val) +#define raw_cpu_and_8(pcp, val) percpu_binary_op(8, , "and", (pcp), val) +#define raw_cpu_or_8(pcp, val) percpu_binary_op(8, , "or", (pcp), val) +#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(8, , pcp, val) +#define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval) +#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval) +#define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, , pcp, ovalp, nval) + +#define this_cpu_add_8(pcp, val) percpu_add_op(8, volatile, (pcp), val) +#define this_cpu_and_8(pcp, val) percpu_binary_op(8, volatile, "and", (pcp), val) +#define this_cpu_or_8(pcp, val) percpu_binary_op(8, volatile, "or", (pcp), val) +#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val) +#define this_cpu_xchg_8(pcp, nval) this_percpu_xchg_op(pcp, nval) +#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval) #define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval) -#define raw_cpu_read_long(pcp) raw_cpu_read_8(pcp) +#define raw_cpu_read_long(pcp) raw_cpu_read_8(pcp) #else /* !CONFIG_X86_64: */ /* There is no generic 64-bit read stable operation for 32-bit targets. */ -#define this_cpu_read_stable_8(pcp) ({ BUILD_BUG(); (typeof(pcp))0; }) +#define this_cpu_read_stable_8(pcp) ({ BUILD_BUG(); (typeof(pcp))0; }) -#define raw_cpu_read_long(pcp) raw_cpu_read_4(pcp) +#define raw_cpu_read_long(pcp) raw_cpu_read_4(pcp) #endif /* CONFIG_X86_64 */ -#define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp) +#define this_cpu_read_const(pcp) __raw_cpu_read_const(pcp) /* * this_cpu_read() makes the compiler load the per-CPU variable every time @@ -566,30 +585,31 @@ do { \ * actually per-thread variables implemented as per-CPU variables and * thus stable for the duration of the respective task. */ -#define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp) +#define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp) #define x86_this_cpu_constant_test_bit(_nr, _var) \ ({ \ unsigned long __percpu *addr__ = \ (unsigned long __percpu *)&(_var) + ((_nr) / BITS_PER_LONG); \ + \ !!((1UL << ((_nr) % BITS_PER_LONG)) & raw_cpu_read(*addr__)); \ }) -#define x86_this_cpu_variable_test_bit(_nr, _var) \ -({ \ - bool oldbit; \ - \ - asm volatile("btl %[nr], " __percpu_arg([var]) \ - CC_SET(c) \ - : CC_OUT(c) (oldbit) \ - : [var] "m" (__my_cpu_var(_var)), \ - [nr] "rI" (_nr)); \ - oldbit; \ +#define x86_this_cpu_variable_test_bit(_nr, _var) \ +({ \ + bool oldbit; \ + \ + asm volatile("btl %[nr], " __percpu_arg([var]) \ + CC_SET(c) \ + : CC_OUT(c) (oldbit) \ + : [var] "m" (__my_cpu_var(_var)), \ + [nr] "rI" (_nr)); \ + oldbit; \ }) -#define x86_this_cpu_test_bit(_nr, _var) \ - (__builtin_constant_p(_nr) \ - ? x86_this_cpu_constant_test_bit(_nr, _var) \ +#define x86_this_cpu_test_bit(_nr, _var) \ + (__builtin_constant_p(_nr) \ + ? x86_this_cpu_constant_test_bit(_nr, _var) \ : x86_this_cpu_variable_test_bit(_nr, _var)) @@ -620,46 +640,47 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off); { [0 ... NR_CPUS-1] = _initvalue }; \ __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map -#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ +#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ EXPORT_PER_CPU_SYMBOL(_name) -#define DECLARE_EARLY_PER_CPU(_type, _name) \ - DECLARE_PER_CPU(_type, _name); \ - extern __typeof__(_type) *_name##_early_ptr; \ +#define DECLARE_EARLY_PER_CPU(_type, _name) \ + DECLARE_PER_CPU(_type, _name); \ + extern __typeof__(_type) *_name##_early_ptr; \ extern __typeof__(_type) _name##_early_map[] -#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ - DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \ - extern __typeof__(_type) *_name##_early_ptr; \ +#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ + DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \ + extern __typeof__(_type) *_name##_early_ptr; \ extern __typeof__(_type) _name##_early_map[] -#define early_per_cpu_ptr(_name) (_name##_early_ptr) -#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) -#define early_per_cpu(_name, _cpu) \ - *(early_per_cpu_ptr(_name) ? \ - &early_per_cpu_ptr(_name)[_cpu] : \ +#define early_per_cpu_ptr(_name) (_name##_early_ptr) +#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) + +#define early_per_cpu(_name, _cpu) \ + *(early_per_cpu_ptr(_name) ? \ + &early_per_cpu_ptr(_name)[_cpu] : \ &per_cpu(_name, _cpu)) #else /* !CONFIG_SMP: */ -#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ +#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ DEFINE_PER_CPU(_type, _name) = _initvalue #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue -#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ +#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ EXPORT_PER_CPU_SYMBOL(_name) -#define DECLARE_EARLY_PER_CPU(_type, _name) \ +#define DECLARE_EARLY_PER_CPU(_type, _name) \ DECLARE_PER_CPU(_type, _name) -#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ +#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ DECLARE_PER_CPU_READ_MOSTLY(_type, _name) -#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) -#define early_per_cpu_ptr(_name) NULL +#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) +#define early_per_cpu_ptr(_name) NULL /* no early_per_cpu_map() */ -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ #endif /* _ASM_X86_PERCPU_H */ From 47ff30cc1be7bf426c03ecc84371452109b416e4 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Mon, 20 May 2024 10:21:14 +0200 Subject: [PATCH 10/10] x86/percpu: Enable named address spaces for all capable GCC versions Enable named address spaces also for GCC 6, GCC 7 and GCC 8 releases. These compilers all produce kernel images that boot without problems. Use compile-time test to detect compiler support for named address spaces. The test passes with GCC 6 as the earliest compiler version where the support for named address spaces was introduced. Signed-off-by: Uros Bizjak Signed-off-by: Ingo Molnar Cc: Andy Lutomirski Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Ard Biesheuvel Link: https://lore.kernel.org/r/20240520082134.121320-1-ubizjak@gmail.com --- arch/x86/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9d16fee6bdb8..c9e0a54f469e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2430,7 +2430,8 @@ source "kernel/livepatch/Kconfig" endmenu config CC_HAS_NAMED_AS - def_bool CC_IS_GCC && GCC_VERSION >= 90100 + def_bool $(success,echo 'int __seg_fs fs; int __seg_gs gs;' | $(CC) -x c - -S -o /dev/null) + depends on CC_IS_GCC config CC_HAS_NAMED_AS_FIXED_SANITIZERS def_bool CC_IS_GCC && GCC_VERSION >= 130300