__  __    __   __  _____      _            _          _____ _          _ _ 
 |  \/  |   \ \ / / |  __ \    (_)          | |        / ____| |        | | |
 | \  / |_ __\ V /  | |__) | __ ___   ____ _| |_ ___  | (___ | |__   ___| | |
 | |\/| | '__|> <   |  ___/ '__| \ \ / / _` | __/ _ \  \___ \| '_ \ / _ \ | |
 | |  | | |_ / . \  | |   | |  | |\ V / (_| | ||  __/  ____) | | | |  __/ | |
 |_|  |_|_(_)_/ \_\ |_|   |_|  |_| \_/ \__,_|\__\___| |_____/|_| |_|\___V 2.1
 if you need WebShell for Seo everyday contact me on Telegram
 Telegram Address : @jackleet
        
        
For_More_Tools: Telegram: @jackleet | Bulk Smtp support mail sender | Business Mail Collector | Mail Bouncer All Mail | Bulk Office Mail Validator | Html Letter private



Upload:

Command:

[email protected]: ~ $
/* SPDX-License-Identifier: GPL-2.0 */

#ifndef _ASM_X86_NOSPEC_BRANCH_H_
#define _ASM_X86_NOSPEC_BRANCH_H_

#include <linux/static_key.h>
#include <linux/objtool.h>
#include <linux/linkage.h>

#include <asm/alternative.h>
#include <asm/cpufeatures.h>
#include <asm/msr-index.h>
#include <asm/unwind_hints.h>
#include <asm/percpu.h>
#include <asm/current.h>

/*
 * Call depth tracking for Intel SKL CPUs to address the RSB underflow
 * issue in software.
 *
 * The tracking does not use a counter. It uses uses arithmetic shift
 * right on call entry and logical shift left on return.
 *
 * The depth tracking variable is initialized to 0x8000.... when the call
 * depth is zero. The arithmetic shift right sign extends the MSB and
 * saturates after the 12th call. The shift count is 5 for both directions
 * so the tracking covers 12 nested calls.
 *
 *  Call
 *  0: 0x8000000000000000	0x0000000000000000
 *  1: 0xfc00000000000000	0xf000000000000000
 * ...
 * 11: 0xfffffffffffffff8	0xfffffffffffffc00
 * 12: 0xffffffffffffffff	0xffffffffffffffe0
 *
 * After a return buffer fill the depth is credited 12 calls before the
 * next stuffing has to take place.
 *
 * There is a inaccuracy for situations like this:
 *
 *  10 calls
 *   5 returns
 *   3 calls
 *   4 returns
 *   3 calls
 *   ....
 *
 * The shift count might cause this to be off by one in either direction,
 * but there is still a cushion vs. the RSB depth. The algorithm does not
 * claim to be perfect and it can be speculated around by the CPU, but it
 * is considered that it obfuscates the problem enough to make exploitation
 * extremely difficult.
 */
#define RET_DEPTH_SHIFT			5
#define RSB_RET_STUFF_LOOPS		16
#define RET_DEPTH_INIT			0x8000000000000000ULL
#define RET_DEPTH_INIT_FROM_CALL	0xfc00000000000000ULL
#define RET_DEPTH_CREDIT		0xffffffffffffffffULL

#ifdef CONFIG_CALL_THUNKS_DEBUG
# define CALL_THUNKS_DEBUG_INC_CALLS				\
	incq	PER_CPU_VAR(__x86_call_count);
# define CALL_THUNKS_DEBUG_INC_RETS				\
	incq	PER_CPU_VAR(__x86_ret_count);
# define CALL_THUNKS_DEBUG_INC_STUFFS				\
	incq	PER_CPU_VAR(__x86_stuffs_count);
# define CALL_THUNKS_DEBUG_INC_CTXSW				\
	incq	PER_CPU_VAR(__x86_ctxsw_count);
#else
# define CALL_THUNKS_DEBUG_INC_CALLS
# define CALL_THUNKS_DEBUG_INC_RETS
# define CALL_THUNKS_DEBUG_INC_STUFFS
# define CALL_THUNKS_DEBUG_INC_CTXSW
#endif

#if defined(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) && !defined(COMPILE_OFFSETS)

#include <asm/asm-offsets.h>

#define CREDIT_CALL_DEPTH					\
	movq	$-1, PER_CPU_VAR(pcpu_hot + X86_call_depth);

#define RESET_CALL_DEPTH					\
	xor	%eax, %eax;					\
	bts	$63, %rax;					\
	movq	%rax, PER_CPU_VAR(pcpu_hot + X86_call_depth);

#define RESET_CALL_DEPTH_FROM_CALL				\
	movb	$0xfc, %al;					\
	shl	$56, %rax;					\
	movq	%rax, PER_CPU_VAR(pcpu_hot + X86_call_depth);	\
	CALL_THUNKS_DEBUG_INC_CALLS

#define INCREMENT_CALL_DEPTH					\
	sarq	$5, PER_CPU_VAR(pcpu_hot + X86_call_depth);	\
	CALL_THUNKS_DEBUG_INC_CALLS

#else
#define CREDIT_CALL_DEPTH
#define RESET_CALL_DEPTH
#define RESET_CALL_DEPTH_FROM_CALL
#define INCREMENT_CALL_DEPTH
#endif

/*
 * Fill the CPU return stack buffer.
 *
 * Each entry in the RSB, if used for a speculative 'ret', contains an
 * infinite 'pause; lfence; jmp' loop to capture speculative execution.
 *
 * This is required in various cases for retpoline and IBRS-based
 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
 * eliminate potentially bogus entries from the RSB, and sometimes
 * purely to ensure that it doesn't get empty, which on some CPUs would
 * allow predictions from other (unwanted!) sources to be used.
 *
 * We define a CPP macro such that it can be used from both .S files and
 * inline assembly. It's possible to do a .macro and then include that
 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
 */

#define RETPOLINE_THUNK_SIZE	32
#define RSB_CLEAR_LOOPS		32	/* To forcibly overwrite all entries */

/*
 * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
 */
#define __FILL_RETURN_SLOT			\
	ANNOTATE_INTRA_FUNCTION_CALL;		\
	call	772f;				\
	int3;					\
772:

/*
 * Stuff the entire RSB.
 *
 * Google experimented with loop-unrolling and this turned out to be
 * the optimal version - two calls, each with their own speculation
 * trap should their return address end up getting used, in a loop.
 */
#ifdef CONFIG_X86_64
#define __FILL_RETURN_BUFFER(reg, nr)			\
	mov	$(nr/2), reg;				\
771:							\
	__FILL_RETURN_SLOT				\
	__FILL_RETURN_SLOT				\
	add	$(BITS_PER_LONG/8) * 2, %_ASM_SP;	\
	dec	reg;					\
	jnz	771b;					\
	/* barrier for jnz misprediction */		\
	lfence;						\
	CREDIT_CALL_DEPTH				\
	CALL_THUNKS_DEBUG_INC_CTXSW
#else
/*
 * i386 doesn't unconditionally have LFENCE, as such it can't
 * do a loop.
 */
#define __FILL_RETURN_BUFFER(reg, nr)			\
	.rept nr;					\
	__FILL_RETURN_SLOT;				\
	.endr;						\
	add	$(BITS_PER_LONG/8) * nr, %_ASM_SP;
#endif

/*
 * Stuff a single RSB slot.
 *
 * To mitigate Post-Barrier RSB speculation, one CALL instruction must be
 * forced to retire before letting a RET instruction execute.
 *
 * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
 * before this point.
 */
#define __FILL_ONE_RETURN				\
	__FILL_RETURN_SLOT				\
	add	$(BITS_PER_LONG/8), %_ASM_SP;		\
	lfence;

#ifdef __ASSEMBLER__

/*
 * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
 * vs RETBleed validation.
 */
#define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE

/*
 * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
 * eventually turn into its own annotation.
 */
.macro VALIDATE_UNRET_END
#if defined(CONFIG_NOINSTR_VALIDATION) && \
	(defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO))
	ANNOTATE_RETPOLINE_SAFE
	nop
#endif
.endm

/*
 * Emits a conditional CS prefix that is compatible with
 * -mindirect-branch-cs-prefix.
 */
.macro __CS_PREFIX reg:req
	.irp rs,r8,r9,r10,r11,r12,r13,r14,r15
	.ifc \reg,\rs
	.byte 0x2e
	.endif
	.endr
.endm

/*
 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
 * indirect jmp/call which may be susceptible to the Spectre variant 2
 * attack.
 *
 * NOTE: these do not take kCFI into account and are thus not comparable to C
 * indirect calls, take care when using. The target of these should be an ENDBR
 * instruction irrespective of kCFI.
 */
.macro JMP_NOSPEC reg:req
#ifdef CONFIG_MITIGATION_RETPOLINE
	__CS_PREFIX \reg
	jmp	__x86_indirect_thunk_\reg
#else
	jmp	*%\reg
	int3
#endif
.endm

.macro CALL_NOSPEC reg:req
#ifdef CONFIG_MITIGATION_RETPOLINE
	__CS_PREFIX \reg
	call	__x86_indirect_thunk_\reg
#else
	call	*%\reg
#endif
.endm

 /*
  * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
  * monstrosity above, manually.
  */
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
	ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
		__stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
		__stringify(nop;nop;__FILL_ONE_RETURN), \ftr2

.Lskip_rsb_\@:
.endm

/*
 * The CALL to srso_alias_untrain_ret() must be patched in directly at
 * the spot where untraining must be done, ie., srso_alias_untrain_ret()
 * must be the target of a CALL instruction instead of indirectly
 * jumping to a wrapper which then calls it. Therefore, this macro is
 * called outside of __UNTRAIN_RET below, for the time being, before the
 * kernel can support nested alternatives with arbitrary nesting.
 */
.macro CALL_UNTRAIN_RET
#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)
	ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \
		          "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
#endif
.endm

/*
 * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
 * return thunk isn't mapped into the userspace tables (then again, AMD
 * typically has NO_MELTDOWN).
 *
 * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
 * entry_ibpb() will clobber AX, CX, DX.
 *
 * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
 * where we have a stack but before any RET instruction.
 */
.macro __UNTRAIN_RET ibpb_feature, call_depth_insns
#if defined(CONFIG_MITIGATION_RETHUNK) || defined(CONFIG_MITIGATION_IBPB_ENTRY)
	VALIDATE_UNRET_END
	CALL_UNTRAIN_RET
	ALTERNATIVE_2 "",						\
		      "call entry_ibpb", \ibpb_feature,			\
		     __stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
#endif
.endm

#define UNTRAIN_RET \
	__UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH)

#define UNTRAIN_RET_VM \
	__UNTRAIN_RET X86_FEATURE_IBPB_ON_VMEXIT, __stringify(RESET_CALL_DEPTH)

#define UNTRAIN_RET_FROM_CALL \
	__UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH_FROM_CALL)


.macro CALL_DEPTH_ACCOUNT
#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
	ALTERNATIVE "",							\
		    __stringify(INCREMENT_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
#endif
.endm

/*
 * Macro to execute VERW insns that mitigate transient data sampling
 * attacks such as MDS or TSA. On affected systems a microcode update
 * overloaded VERW insns to also clear the CPU buffers. VERW clobbers
 * CFLAGS.ZF.
 * Note: Only the memory operand variant of VERW clears the CPU buffers.
 */
.macro __CLEAR_CPU_BUFFERS feature
#ifdef CONFIG_X86_64
	ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature
#else
	/*
	 * In 32bit mode, the memory operand must be a %cs reference. The data
	 * segments may not be usable (vm86 mode), and the stack segment may not
	 * be flat (ESPFIX32).
	 */
	ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature
#endif
.endm

#define CLEAR_CPU_BUFFERS \
	__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF

#define VM_CLEAR_CPU_BUFFERS \
	__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM

#ifdef CONFIG_X86_64
.macro CLEAR_BRANCH_HISTORY
	ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
.endm

.macro CLEAR_BRANCH_HISTORY_VMEXIT
	ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT
.endm
#else
#define CLEAR_BRANCH_HISTORY
#define CLEAR_BRANCH_HISTORY_VMEXIT
#endif

#else /* __ASSEMBLER__ */

#define ITS_THUNK_SIZE	64

typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
typedef u8 its_thunk_t[ITS_THUNK_SIZE];
extern retpoline_thunk_t __x86_indirect_thunk_array[];
extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
extern its_thunk_t	 __x86_indirect_its_thunk_array[];

#ifdef CONFIG_MITIGATION_RETHUNK
extern void __x86_return_thunk(void);
#else
static inline void __x86_return_thunk(void) {}
#endif

#ifdef CONFIG_MITIGATION_UNRET_ENTRY
extern void retbleed_return_thunk(void);
#else
static inline void retbleed_return_thunk(void) {}
#endif

extern void srso_alias_untrain_ret(void);

#ifdef CONFIG_MITIGATION_SRSO
extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void);
#else
static inline void srso_return_thunk(void) {}
static inline void srso_alias_return_thunk(void) {}
#endif

#ifdef CONFIG_MITIGATION_ITS
extern void its_return_thunk(void);
#else
static inline void its_return_thunk(void) {}
#endif

extern void retbleed_return_thunk(void);
extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void);

extern void entry_untrain_ret(void);
extern void entry_ibpb(void);

#ifdef CONFIG_X86_64
extern void clear_bhb_loop(void);
#endif

extern void (*x86_return_thunk)(void);

extern void __warn_thunk(void);

#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
extern void call_depth_return_thunk(void);

#define CALL_DEPTH_ACCOUNT					\
	ALTERNATIVE("",						\
		    __stringify(INCREMENT_CALL_DEPTH),		\
		    X86_FEATURE_CALL_DEPTH)

#ifdef CONFIG_CALL_THUNKS_DEBUG
DECLARE_PER_CPU(u64, __x86_call_count);
DECLARE_PER_CPU(u64, __x86_ret_count);
DECLARE_PER_CPU(u64, __x86_stuffs_count);
DECLARE_PER_CPU(u64, __x86_ctxsw_count);
#endif
#else /* !CONFIG_MITIGATION_CALL_DEPTH_TRACKING */

static inline void call_depth_return_thunk(void) {}
#define CALL_DEPTH_ACCOUNT ""

#endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */

#ifdef CONFIG_MITIGATION_RETPOLINE

#define GEN(reg) \
	extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
#include <asm/GEN-for-each-reg.h>
#undef GEN

#define GEN(reg)						\
	extern retpoline_thunk_t __x86_indirect_call_thunk_ ## reg;
#include <asm/GEN-for-each-reg.h>
#undef GEN

#define GEN(reg)						\
	extern retpoline_thunk_t __x86_indirect_jump_thunk_ ## reg;
#include <asm/GEN-for-each-reg.h>
#undef GEN

#ifdef CONFIG_X86_64

/*
 * Emits a conditional CS prefix that is compatible with
 * -mindirect-branch-cs-prefix.
 */
#define __CS_PREFIX(reg)				\
	".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n"	\
	".ifc \\rs," reg "\n"				\
	".byte 0x2e\n"					\
	".endif\n"					\
	".endr\n"

/*
 * Inline asm uses the %V modifier which is only in newer GCC
 * which is ensured when CONFIG_MITIGATION_RETPOLINE is defined.
 */
#ifdef CONFIG_MITIGATION_RETPOLINE
#define CALL_NOSPEC	__CS_PREFIX("%V[thunk_target]")	\
			"call __x86_indirect_thunk_%V[thunk_target]\n"
#else
#define CALL_NOSPEC	"call *%[thunk_target]\n"
#endif

# define THUNK_TARGET(addr) [thunk_target] "r" (addr)

#else /* CONFIG_X86_32 */
/*
 * For i386 we use the original ret-equivalent retpoline, because
 * otherwise we'll run out of registers. We don't care about CET
 * here, anyway.
 */
# define CALL_NOSPEC						\
	ALTERNATIVE_2(						\
	ANNOTATE_RETPOLINE_SAFE					\
	"call *%[thunk_target]\n",				\
	"       jmp    904f;\n"					\
	"       .align 16\n"					\
	"901:	call   903f;\n"					\
	"902:	pause;\n"					\
	"    	lfence;\n"					\
	"       jmp    902b;\n"					\
	"       .align 16\n"					\
	"903:	lea    4(%%esp), %%esp;\n"			\
	"       pushl  %[thunk_target];\n"			\
	"       ret;\n"						\
	"       .align 16\n"					\
	"904:	call   901b;\n",				\
	X86_FEATURE_RETPOLINE,					\
	"lfence;\n"						\
	ANNOTATE_RETPOLINE_SAFE					\
	"call *%[thunk_target]\n",				\
	X86_FEATURE_RETPOLINE_LFENCE)

# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
#endif
#else /* No retpoline for C / inline asm */
# define CALL_NOSPEC "call *%[thunk_target]\n"
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
#endif

/* The Spectre V2 mitigation variants */
enum spectre_v2_mitigation {
	SPECTRE_V2_NONE,
	SPECTRE_V2_RETPOLINE,
	SPECTRE_V2_LFENCE,
	SPECTRE_V2_EIBRS,
	SPECTRE_V2_EIBRS_RETPOLINE,
	SPECTRE_V2_EIBRS_LFENCE,
	SPECTRE_V2_IBRS,
};

/* The indirect branch speculation control variants */
enum spectre_v2_user_mitigation {
	SPECTRE_V2_USER_NONE,
	SPECTRE_V2_USER_STRICT,
	SPECTRE_V2_USER_STRICT_PREFERRED,
	SPECTRE_V2_USER_PRCTL,
	SPECTRE_V2_USER_SECCOMP,
};

/* The Speculative Store Bypass disable variants */
enum ssb_mitigation {
	SPEC_STORE_BYPASS_NONE,
	SPEC_STORE_BYPASS_DISABLE,
	SPEC_STORE_BYPASS_PRCTL,
	SPEC_STORE_BYPASS_SECCOMP,
};

static __always_inline
void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
{
	asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
		: : "c" (msr),
		    "a" ((u32)val),
		    "d" ((u32)(val >> 32)),
		    [feature] "i" (feature)
		: "memory");
}

extern u64 x86_pred_cmd;

DECLARE_PER_CPU(bool, x86_ibpb_exit_to_user);

static inline void indirect_branch_prediction_barrier(void)
{
	alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
}

/* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base;
DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
extern void update_spec_ctrl_cond(u64 val);
extern u64 spec_ctrl_current(void);

/*
 * With retpoline, we must use IBRS to restrict branch prediction
 * before calling into firmware.
 *
 * (Implemented as CPP macros due to header hell.)
 */
#define firmware_restrict_branch_speculation_start()			\
do {									\
	preempt_disable();						\
	alternative_msr_write(MSR_IA32_SPEC_CTRL,			\
			      spec_ctrl_current() | SPEC_CTRL_IBRS,	\
			      X86_FEATURE_USE_IBRS_FW);			\
	alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,		\
			      X86_FEATURE_USE_IBPB_FW);			\
} while (0)

#define firmware_restrict_branch_speculation_end()			\
do {									\
	alternative_msr_write(MSR_IA32_SPEC_CTRL,			\
			      spec_ctrl_current(),			\
			      X86_FEATURE_USE_IBRS_FW);			\
	preempt_enable();						\
} while (0)

DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);

DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);

DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);

DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);

extern u16 x86_verw_sel;

#include <asm/segment.h>

/**
 * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns
 *
 * This uses the otherwise unused and obsolete VERW instruction in
 * combination with microcode which triggers a CPU buffer flush when the
 * instruction is executed.
 */
static __always_inline void x86_clear_cpu_buffers(void)
{
	static const u16 ds = __KERNEL_DS;

	/*
	 * Has to be the memory-operand variant because only that
	 * guarantees the CPU buffer flush functionality according to
	 * documentation. The register-operand variant does not.
	 * Works with any segment selector, but a valid writable
	 * data segment is the fastest variant.
	 *
	 * "cc" clobber is required because VERW modifies ZF.
	 */
	asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
}

/**
 * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
 * and TSA vulnerabilities.
 *
 * Clear CPU buffers if the corresponding static key is enabled
 */
static __always_inline void x86_idle_clear_cpu_buffers(void)
{
	if (static_branch_likely(&cpu_buf_idle_clear))
		x86_clear_cpu_buffers();
}

#endif /* __ASSEMBLER__ */

#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */

Filemanager

Name Type Size Permission Actions
e820 Folder 0755
fpu Folder 0755
numachip Folder 0755
shared Folder 0755
trace Folder 0755
uv Folder 0755
vdso Folder 0755
xen Folder 0755
GEN-for-each-reg.h File 345 B 0644
Kbuild File 348 B 0644
acenv.h File 1.42 KB 0644
acpi.h File 6.16 KB 0644
acrn.h File 2.12 KB 0644
agp.h File 835 B 0644
alternative.h File 12.87 KB 0644
amd-ibs.h File 4.9 KB 0644
amd_hsmp.h File 364 B 0644
amd_nb.h File 1.59 KB 0644
amd_node.h File 1.41 KB 0644
apic.h File 14.79 KB 0644
apicdef.h File 5.52 KB 0644
apm.h File 1.73 KB 0644
arch_hweight.h File 1.06 KB 0644
archrandom.h File 1.46 KB 0644
asm-offsets.h File 35 B 0644
asm-prototypes.h File 587 B 0644
asm.h File 6.11 KB 0644
atomic.h File 4.36 KB 0644
atomic64_32.h File 7.81 KB 0644
atomic64_64.h File 4.26 KB 0644
audit.h File 351 B 0644
barrier.h File 2.37 KB 0644
bios_ebda.h File 914 B 0644
bitops.h File 11.06 KB 0644
boot.h File 2.46 KB 0644
bootparam_utils.h File 2.84 KB 0644
bug.h File 2.67 KB 0644
bugs.h File 344 B 0644
cache.h File 622 B 0644
cacheflush.h File 329 B 0644
cacheinfo.h File 485 B 0644
ce4100.h File 121 B 0644
cfi.h File 2.78 KB 0644
checksum.h File 323 B 0644
checksum_32.h File 4.57 KB 0644
checksum_64.h File 5.02 KB 0644
clocksource.h File 480 B 0644
cmdline.h File 375 B 0644
cmpxchg.h File 7.29 KB 0644
cmpxchg_32.h File 4.02 KB 0644
cmpxchg_64.h File 2.5 KB 0644
coco.h File 703 B 0644
compat.h File 2.35 KB 0644
cpu.h File 2.15 KB 0644
cpu_device_id.h File 9.02 KB 0644
cpu_entry_area.h File 4.1 KB 0644
cpufeature.h File 7.75 KB 0644
cpufeatures.h File 38.88 KB 0644
cpuid.h File 4.55 KB 0644
cpuidle_haltpoll.h File 189 B 0644
cpumask.h File 905 B 0644
crash.h File 300 B 0644
crash_reserve.h File 1.28 KB 0644
current.h File 1.18 KB 0644
debugreg.h File 4.41 KB 0644
delay.h File 275 B 0644
desc.h File 11.23 KB 0644
desc_defs.h File 4.84 KB 0644
device.h File 176 B 0644
disabled-features.h File 4.19 KB 0644
div64.h File 2.4 KB 0644
dma-mapping.h File 237 B 0644
dma.h File 9.47 KB 0644
dmi.h File 556 B 0644
doublefault.h File 351 B 0644
dwarf2.h File 1.29 KB 0644
edac.h File 474 B 0644
efi.h File 13.49 KB 0644
elf.h File 11.92 KB 0644
elfcore-compat.h File 890 B 0644
emergency-restart.h File 202 B 0644
emulate_prefix.h File 484 B 0644
enclu.h File 181 B 0644
entry-common.h File 3.3 KB 0644
espfix.h File 426 B 0644
exec.h File 37 B 0644
extable.h File 1.82 KB 0644
extable_fixup_types.h File 2.34 KB 0644
fixmap.h File 5.95 KB 0644
floppy.h File 6.7 KB 0644
fpu.h File 221 B 0644
frame.h File 2.5 KB 0644
fred.h File 3.18 KB 0644
fsgsbase.h File 2.02 KB 0644
ftrace.h File 4.87 KB 0644
futex.h File 2.65 KB 0644
gart.h File 2.62 KB 0644
genapic.h File 22 B 0644
geode.h File 693 B 0644
gsseg.h File 1.35 KB 0644
hardirq.h File 2.52 KB 0644
highmem.h File 2.44 KB 0644
hpet.h File 2.89 KB 0644
hugetlb.h File 243 B 0644
hw_breakpoint.h File 2.04 KB 0644
hw_irq.h File 2.99 KB 0644
hyperv_timer.h File 177 B 0644
hypervisor.h File 2.3 KB 0644
i8259.h File 2.01 KB 0644
ia32.h File 1.74 KB 0644
ibt.h File 2.52 KB 0644
idtentry.h File 24.48 KB 0644
imr.h File 1.64 KB 0644
inat.h File 6.36 KB 0644
inat_types.h File 341 B 0644
init.h File 842 B 0644
insn-eval.h File 1.67 KB 0644
insn.h File 8.38 KB 0644
inst.h File 2.02 KB 0644
intel-family.h File 6.64 KB 0644
intel-mid.h File 624 B 0644
intel_ds.h File 947 B 0644
intel_pt.h File 1.25 KB 0644
intel_punit_ipc.h File 4.38 KB 0644
intel_telemetry.h File 3.66 KB 0644
invpcid.h File 1.45 KB 0644
io.h File 11.78 KB 0644
io_apic.h File 5.06 KB 0644
io_bitmap.h File 1.34 KB 0644
iomap.h File 536 B 0644
iommu.h File 965 B 0644
iosf_mbi.h File 7.21 KB 0644
irq.h File 1.22 KB 0644
irq_remapping.h File 1.98 KB 0644
irq_stack.h File 7.49 KB 0644
irq_vectors.h File 4.11 KB 0644
irq_work.h File 358 B 0644
irqdomain.h File 1.79 KB 0644
irqflags.h File 3.11 KB 0644
ist.h File 294 B 0644
jailhouse_para.h File 449 B 0644
jump_label.h File 1.47 KB 0644
kasan.h File 1.26 KB 0644
kaslr.h File 398 B 0644
kbdleds.h File 454 B 0644
kdebug.h File 1.06 KB 0644
kexec-bzimage64.h File 195 B 0644
kexec.h File 7.01 KB 0644
kfence.h File 1.59 KB 0644
kgdb.h File 2.09 KB 0644
kmsan.h File 2.75 KB 0644
kprobes.h File 3.21 KB 0644
kvm-x86-ops.h File 4.7 KB 0644
kvm-x86-pmu-ops.h File 782 B 0644
kvm_host.h File 73.84 KB 0644
kvm_page_track.h File 1.94 KB 0644
kvm_para.h File 4.13 KB 0644
kvm_types.h File 178 B 0644
kvm_vcpu_regs.h File 606 B 0644
kvmclock.h File 477 B 0644
linkage.h File 4.03 KB 0644
local.h File 4.18 KB 0644
mach_timer.h File 1.55 KB 0644
mach_traps.h File 1013 B 0644
math_emu.h File 395 B 0644
mc146818rtc.h File 2.77 KB 0644
mce.h File 13.51 KB 0644
mem_encrypt.h File 3.43 KB 0644
memtype.h File 853 B 0644
microcode.h File 2.17 KB 0644
misc.h File 143 B 0644
mman.h File 412 B 0644
mmconfig.h File 374 B 0644
mmu.h File 2.05 KB 0644
mmu_context.h File 7.01 KB 0644
module.h File 437 B 0644
mpspec.h File 1.86 KB 0644
mpspec_def.h File 4.16 KB 0644
mshyperv.h File 10.18 KB 0644
msi.h File 1.73 KB 0644
msr-index.h File 47.14 KB 0644
msr-trace.h File 1.35 KB 0644
msr.h File 11.44 KB 0644
mtrr.h File 5.47 KB 0644
mwait.h File 4.88 KB 0644
nmi.h File 1.34 KB 0644
nops.h File 2.24 KB 0644
nospec-branch.h File 17.28 KB 0644
numa.h File 1.73 KB 0644
numa_32.h File 256 B 0644
olpc.h File 2.33 KB 0644
olpc_ofw.h File 1.1 KB 0644
orc_header.h File 483 B 0644
orc_lookup.h File 1.04 KB 0644
orc_types.h File 2.13 KB 0644
page.h File 2.44 KB 0644
page_32.h File 680 B 0644
page_32_types.h File 2.34 KB 0644
page_64.h File 2.88 KB 0644
page_64_types.h File 3.07 KB 0644
page_types.h File 1.99 KB 0644
paravirt.h File 17.87 KB 0644
paravirt_api_clock.h File 26 B 0644
paravirt_types.h File 17.32 KB 0644
parport.h File 314 B 0644
pc-conf-reg.h File 723 B 0644
pci-direct.h File 850 B 0644
pci-functions.h File 654 B 0644
pci.h File 2.87 KB 0644
pci_x86.h File 6.7 KB 0644
percpu.h File 23.33 KB 0644
perf_event.h File 19.81 KB 0644
perf_event_p4.h File 26.12 KB 0644
pgalloc.h File 4.88 KB 0644
pgtable-2level.h File 3.29 KB 0644
pgtable-2level_types.h File 945 B 0644
pgtable-3level.h File 6.47 KB 0644
pgtable-3level_types.h File 1.1 KB 0644
pgtable-invert.h File 1.07 KB 0644
pgtable.h File 43.44 KB 0644
pgtable_32.h File 2.12 KB 0644
pgtable_32_areas.h File 1.55 KB 0644
pgtable_32_types.h File 634 B 0644
pgtable_64.h File 7.69 KB 0644
pgtable_64_types.h File 6.33 KB 0644
pgtable_areas.h File 623 B 0644
pgtable_types.h File 18.02 KB 0644
pkeys.h File 3.13 KB 0644
pkru.h File 1.31 KB 0644
platform_sst_audio.h File 3.03 KB 0644
pm-trace.h File 611 B 0644
posix_types.h File 144 B 0644
posted_intr.h File 2.89 KB 0644
preempt.h File 4.14 KB 0644
probe_roms.h File 273 B 0644
processor-cyrix.h File 383 B 0644
processor-flags.h File 1.79 KB 0644
processor.h File 19.7 KB 0644
prom.h File 845 B 0644
proto.h File 1.12 KB 0644
pti.h File 385 B 0644
ptrace.h File 11.96 KB 0644
purgatory.h File 237 B 0644
pvclock-abi.h File 1.5 KB 0644
pvclock.h File 2.74 KB 0644
qrwlock.h File 199 B 0644
qspinlock.h File 3.06 KB 0644
qspinlock_paravirt.h File 2.08 KB 0644
realmode.h File 2.11 KB 0644
reboot.h File 1.38 KB 0644
reboot_fixups.h File 183 B 0644
required-features.h File 2.68 KB 0644
resctrl.h File 5.03 KB 0644
rmwcc.h File 1.81 KB 0644
runtime-const.h File 1.67 KB 0644
seccomp.h File 1.18 KB 0644
sections.h File 490 B 0644
segment.h File 9.92 KB 0644
serial.h File 1.11 KB 0644
set_memory.h File 3.96 KB 0644
setup.h File 3.72 KB 0644
setup_arch.h File 77 B 0644
setup_data.h File 565 B 0644
sev-common.h File 7.69 KB 0644
sev.h File 15.12 KB 0644
sgx.h File 12.85 KB 0644
shmparam.h File 193 B 0644
shstk.h File 1.45 KB 0644
sigcontext.h File 261 B 0644
sigframe.h File 2.09 KB 0644
sighandling.h File 1.72 KB 0644
signal.h File 2.27 KB 0644
simd.h File 287 B 0644
smap.h File 1.44 KB 0644
smp.h File 4.63 KB 0644
softirq_stack.h File 216 B 0644
sparsemem.h File 830 B 0644
spec-ctrl.h File 3.02 KB 0644
special_insns.h File 7.22 KB 0644
spinlock.h File 1.19 KB 0644
spinlock_types.h File 253 B 0644
sta2x11.h File 352 B 0644
stackprotector.h File 2.65 KB 0644
stacktrace.h File 2.78 KB 0644
static_call.h File 2.99 KB 0644
string.h File 129 B 0644
string_32.h File 5.36 KB 0644
string_64.h File 2.5 KB 0644
suspend.h File 496 B 0644
suspend_32.h File 876 B 0644
suspend_64.h File 1.79 KB 0644
svm.h File 16.87 KB 0644
switch_to.h File 2.4 KB 0644
sync_bitops.h File 3.26 KB 0644
sync_core.h File 3.24 KB 0644
syscall.h File 3.49 KB 0644
syscall_wrapper.h File 9.02 KB 0644
syscalls.h File 374 B 0644
tdx.h File 3.42 KB 0644
text-patching.h File 5.55 KB 0644
thermal.h File 428 B 0644
thread_info.h File 8.17 KB 0644
time.h File 355 B 0644
timer.h File 989 B 0644
timex.h File 546 B 0644
tlb.h File 659 B 0644
tlbbatch.h File 332 B 0644
tlbflush.h File 11.69 KB 0644
topology.h File 8.74 KB 0644
trace_clock.h File 406 B 0644
trap_pf.h File 881 B 0644
trapnr.h File 1.7 KB 0644
traps.h File 1.52 KB 0644
tsc.h File 1.71 KB 0644
uaccess.h File 20.11 KB 0644
uaccess_32.h File 1.13 KB 0644
uaccess_64.h File 5.36 KB 0644
umip.h File 317 B 0644
unaccepted_memory.h File 742 B 0644
unistd.h File 1.59 KB 0644
unwind.h File 3.89 KB 0644
unwind_hints.h File 2.03 KB 0644
uprobes.h File 1017 B 0644
user.h File 2.2 KB 0644
user32.h File 2.11 KB 0644
user_32.h File 4.78 KB 0644
user_64.h File 5.07 KB 0644
vdso.h File 1.36 KB 0644
vermagic.h File 1.95 KB 0644
vga.h File 740 B 0644
vgtod.h File 422 B 0644
video.h File 497 B 0644
vm86.h File 2.15 KB 0644
vmalloc.h File 560 B 0644
vmware.h File 9.36 KB 0644
vmx.h File 27.23 KB 0644
vmxfeatures.h File 6.43 KB 0644
vsyscall.h File 988 B 0644
word-at-a-time.h File 1.98 KB 0644
x86_init.h File 11.89 KB 0644
xor.h File 10.39 KB 0644
xor_32.h File 14.54 KB 0644
xor_64.h File 716 B 0644
xor_avx.h File 4.53 KB 0644
Filemanager