__  __    __   __  _____      _            _          _____ _          _ _ 
 |  \/  |   \ \ / / |  __ \    (_)          | |        / ____| |        | | |
 | \  / |_ __\ V /  | |__) | __ ___   ____ _| |_ ___  | (___ | |__   ___| | |
 | |\/| | '__|> <   |  ___/ '__| \ \ / / _` | __/ _ \  \___ \| '_ \ / _ \ | |
 | |  | | |_ / . \  | |   | |  | |\ V / (_| | ||  __/  ____) | | | |  __/ | |
 |_|  |_|_(_)_/ \_\ |_|   |_|  |_| \_/ \__,_|\__\___| |_____/|_| |_|\___V 2.1
 if you need WebShell for Seo everyday contact me on Telegram
 Telegram Address : @jackleet
        
        
For_More_Tools: Telegram: @jackleet | Bulk Smtp support mail sender | Business Mail Collector | Mail Bouncer All Mail | Bulk Office Mail Validator | Html Letter private



Upload:

Command:

[email protected]: ~ $
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Based on arch/arm/include/asm/tlbflush.h
 *
 * Copyright (C) 1999-2003 Russell King
 * Copyright (C) 2012 ARM Ltd.
 */
#ifndef __ASM_TLBFLUSH_H
#define __ASM_TLBFLUSH_H

#ifndef __ASSEMBLY__

#include <linux/bitfield.h>
#include <linux/mm_types.h>
#include <linux/sched.h>
#include <linux/mmu_notifier.h>
#include <asm/cputype.h>
#include <asm/mmu.h>

/*
 * Raw TLBI operations.
 *
 * Where necessary, use the __tlbi() macro to avoid asm()
 * boilerplate. Drivers and most kernel code should use the TLB
 * management routines in preference to the macro below.
 *
 * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
 * on whether a particular TLBI operation takes an argument or
 * not. The macros handles invoking the asm with or without the
 * register argument as appropriate.
 */
#define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE			       \
			       "tlbi " #op "\n"				       \
		   ALTERNATIVE("nop\n			nop",		       \
			       "dsb ish\n		tlbi " #op,	       \
			       ARM64_WORKAROUND_REPEAT_TLBI,		       \
			       CONFIG_ARM64_WORKAROUND_REPEAT_TLBI)	       \
			    : : )

#define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE			       \
			       "tlbi " #op ", %0\n"			       \
		   ALTERNATIVE("nop\n			nop",		       \
			       "dsb ish\n		tlbi " #op ", %0",     \
			       ARM64_WORKAROUND_REPEAT_TLBI,		       \
			       CONFIG_ARM64_WORKAROUND_REPEAT_TLBI)	       \
			    : : "r" (arg))

#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)

#define __tlbi(op, ...)		__TLBI_N(op, ##__VA_ARGS__, 1, 0)

#define __tlbi_user(op, arg) do {						\
	if (arm64_kernel_unmapped_at_el0())					\
		__tlbi(op, (arg) | USER_ASID_FLAG);				\
} while (0)

/* This macro creates a properly formatted VA operand for the TLBI */
#define __TLBI_VADDR(addr, asid)				\
	({							\
		unsigned long __ta = (addr) >> 12;		\
		__ta &= GENMASK_ULL(43, 0);			\
		__ta |= (unsigned long)(asid) << 48;		\
		__ta;						\
	})

/*
 * Get translation granule of the system, which is decided by
 * PAGE_SIZE.  Used by TTL.
 *  - 4KB	: 1
 *  - 16KB	: 2
 *  - 64KB	: 3
 */
#define TLBI_TTL_TG_4K		1
#define TLBI_TTL_TG_16K		2
#define TLBI_TTL_TG_64K		3

static inline unsigned long get_trans_granule(void)
{
	switch (PAGE_SIZE) {
	case SZ_4K:
		return TLBI_TTL_TG_4K;
	case SZ_16K:
		return TLBI_TTL_TG_16K;
	case SZ_64K:
		return TLBI_TTL_TG_64K;
	default:
		return 0;
	}
}

/*
 * Level-based TLBI operations.
 *
 * When ARMv8.4-TTL exists, TLBI operations take an additional hint for
 * the level at which the invalidation must take place. If the level is
 * wrong, no invalidation may take place. In the case where the level
 * cannot be easily determined, the value TLBI_TTL_UNKNOWN will perform
 * a non-hinted invalidation. Any provided level outside the hint range
 * will also cause fall-back to non-hinted invalidation.
 *
 * For Stage-2 invalidation, use the level values provided to that effect
 * in asm/stage2_pgtable.h.
 */
#define TLBI_TTL_MASK		GENMASK_ULL(47, 44)

#define TLBI_TTL_UNKNOWN	INT_MAX

#define __tlbi_level(op, addr, level) do {				\
	u64 arg = addr;							\
									\
	if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) &&	\
	    level >= 0 && level <= 3) {					\
		u64 ttl = level & 3;					\
		ttl |= get_trans_granule() << 2;			\
		arg &= ~TLBI_TTL_MASK;					\
		arg |= FIELD_PREP(TLBI_TTL_MASK, ttl);			\
	}								\
									\
	__tlbi(op, arg);						\
} while(0)

#define __tlbi_user_level(op, arg, level) do {				\
	if (arm64_kernel_unmapped_at_el0())				\
		__tlbi_level(op, (arg | USER_ASID_FLAG), level);	\
} while (0)

/*
 * This macro creates a properly formatted VA operand for the TLB RANGE. The
 * value bit assignments are:
 *
 * +----------+------+-------+-------+-------+----------------------+
 * |   ASID   |  TG  | SCALE |  NUM  |  TTL  |        BADDR         |
 * +-----------------+-------+-------+-------+----------------------+
 * |63      48|47  46|45   44|43   39|38   37|36                   0|
 *
 * The address range is determined by below formula: [BADDR, BADDR + (NUM + 1) *
 * 2^(5*SCALE + 1) * PAGESIZE)
 *
 * Note that the first argument, baddr, is pre-shifted; If LPA2 is in use, BADDR
 * holds addr[52:16]. Else BADDR holds page number. See for example ARM DDI
 * 0487J.a section C5.5.60 "TLBI VAE1IS, TLBI VAE1ISNXS, TLB Invalidate by VA,
 * EL1, Inner Shareable".
 *
 */
#define TLBIR_ASID_MASK		GENMASK_ULL(63, 48)
#define TLBIR_TG_MASK		GENMASK_ULL(47, 46)
#define TLBIR_SCALE_MASK	GENMASK_ULL(45, 44)
#define TLBIR_NUM_MASK		GENMASK_ULL(43, 39)
#define TLBIR_TTL_MASK		GENMASK_ULL(38, 37)
#define TLBIR_BADDR_MASK	GENMASK_ULL(36,  0)

#define __TLBI_VADDR_RANGE(baddr, asid, scale, num, ttl)		\
	({								\
		unsigned long __ta = 0;					\
		unsigned long __ttl = (ttl >= 1 && ttl <= 3) ? ttl : 0;	\
		__ta |= FIELD_PREP(TLBIR_BADDR_MASK, baddr);		\
		__ta |= FIELD_PREP(TLBIR_TTL_MASK, __ttl);		\
		__ta |= FIELD_PREP(TLBIR_NUM_MASK, num);		\
		__ta |= FIELD_PREP(TLBIR_SCALE_MASK, scale);		\
		__ta |= FIELD_PREP(TLBIR_TG_MASK, get_trans_granule());	\
		__ta |= FIELD_PREP(TLBIR_ASID_MASK, asid);		\
		__ta;							\
	})

/* These macros are used by the TLBI RANGE feature. */
#define __TLBI_RANGE_PAGES(num, scale)	\
	((unsigned long)((num) + 1) << (5 * (scale) + 1))
#define MAX_TLBI_RANGE_PAGES		__TLBI_RANGE_PAGES(31, 3)

/*
 * Generate 'num' values from -1 to 31 with -1 rejected by the
 * __flush_tlb_range() loop below. Its return value is only
 * significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
 * 'pages' is more than that, you must iterate over the overall
 * range.
 */
#define __TLBI_RANGE_NUM(pages, scale)					\
	({								\
		int __pages = min((pages),				\
				  __TLBI_RANGE_PAGES(31, (scale)));	\
		(__pages >> (5 * (scale) + 1)) - 1;			\
	})

/*
 *	TLB Invalidation
 *	================
 *
 * 	This header file implements the low-level TLB invalidation routines
 *	(sometimes referred to as "flushing" in the kernel) for arm64.
 *
 *	Every invalidation operation uses the following template:
 *
 *	DSB ISHST	// Ensure prior page-table updates have completed
 *	TLBI ...	// Invalidate the TLB
 *	DSB ISH		// Ensure the TLB invalidation has completed
 *      if (invalidated kernel mappings)
 *		ISB	// Discard any instructions fetched from the old mapping
 *
 *
 *	The following functions form part of the "core" TLB invalidation API,
 *	as documented in Documentation/core-api/cachetlb.rst:
 *
 *	flush_tlb_all()
 *		Invalidate the entire TLB (kernel + user) on all CPUs
 *
 *	flush_tlb_mm(mm)
 *		Invalidate an entire user address space on all CPUs.
 *		The 'mm' argument identifies the ASID to invalidate.
 *
 *	flush_tlb_range(vma, start, end)
 *		Invalidate the virtual-address range '[start, end)' on all
 *		CPUs for the user address space corresponding to 'vma->mm'.
 *		Note that this operation also invalidates any walk-cache
 *		entries associated with translations for the specified address
 *		range.
 *
 *	flush_tlb_kernel_range(start, end)
 *		Same as flush_tlb_range(..., start, end), but applies to
 * 		kernel mappings rather than a particular user address space.
 *		Whilst not explicitly documented, this function is used when
 *		unmapping pages from vmalloc/io space.
 *
 *	flush_tlb_page(vma, addr)
 *		Invalidate a single user mapping for address 'addr' in the
 *		address space corresponding to 'vma->mm'.  Note that this
 *		operation only invalidates a single, last-level page-table
 *		entry and therefore does not affect any walk-caches.
 *
 *
 *	Next, we have some undocumented invalidation routines that you probably
 *	don't want to call unless you know what you're doing:
 *
 *	local_flush_tlb_all()
 *		Same as flush_tlb_all(), but only applies to the calling CPU.
 *
 *	__flush_tlb_kernel_pgtable(addr)
 *		Invalidate a single kernel mapping for address 'addr' on all
 *		CPUs, ensuring that any walk-cache entries associated with the
 *		translation are also invalidated.
 *
 *	__flush_tlb_range(vma, start, end, stride, last_level, tlb_level)
 *		Invalidate the virtual-address range '[start, end)' on all
 *		CPUs for the user address space corresponding to 'vma->mm'.
 *		The invalidation operations are issued at a granularity
 *		determined by 'stride' and only affect any walk-cache entries
 *		if 'last_level' is equal to false. tlb_level is the level at
 *		which the invalidation must take place. If the level is wrong,
 *		no invalidation may take place. In the case where the level
 *		cannot be easily determined, the value TLBI_TTL_UNKNOWN will
 *		perform a non-hinted invalidation.
 *
 *
 *	Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
 *	on top of these routines, since that is our interface to the mmu_gather
 *	API as used by munmap() and friends.
 */
static inline void local_flush_tlb_all(void)
{
	dsb(nshst);
	__tlbi(vmalle1);
	dsb(nsh);
	isb();
}

static inline void flush_tlb_all(void)
{
	dsb(ishst);
	__tlbi(vmalle1is);
	dsb(ish);
	isb();
}

static inline void flush_tlb_mm(struct mm_struct *mm)
{
	unsigned long asid;

	dsb(ishst);
	asid = __TLBI_VADDR(0, ASID(mm));
	__tlbi(aside1is, asid);
	__tlbi_user(aside1is, asid);
	dsb(ish);
	mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
}

static inline void __flush_tlb_page_nosync(struct mm_struct *mm,
					   unsigned long uaddr)
{
	unsigned long addr;

	dsb(ishst);
	addr = __TLBI_VADDR(uaddr, ASID(mm));
	__tlbi(vale1is, addr);
	__tlbi_user(vale1is, addr);
	mmu_notifier_arch_invalidate_secondary_tlbs(mm, uaddr & PAGE_MASK,
						(uaddr & PAGE_MASK) + PAGE_SIZE);
}

static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
					 unsigned long uaddr)
{
	return __flush_tlb_page_nosync(vma->vm_mm, uaddr);
}

static inline void flush_tlb_page(struct vm_area_struct *vma,
				  unsigned long uaddr)
{
	flush_tlb_page_nosync(vma, uaddr);
	dsb(ish);
}

static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
{
	/*
	 * TLB flush deferral is not required on systems which are affected by
	 * ARM64_WORKAROUND_REPEAT_TLBI, as __tlbi()/__tlbi_user() implementation
	 * will have two consecutive TLBI instructions with a dsb(ish) in between
	 * defeating the purpose (i.e save overall 'dsb ish' cost).
	 */
	if (alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI))
		return false;

	return true;
}

static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
					     struct mm_struct *mm,
					     unsigned long uaddr)
{
	__flush_tlb_page_nosync(mm, uaddr);
}

/*
 * If mprotect/munmap/etc occurs during TLB batched flushing, we need to ensure
 * all the previously issued TLBIs targeting mm have completed. But since we
 * can be executing on a remote CPU, a DSB cannot guarantee this like it can
 * for arch_tlbbatch_flush(). Our only option is to flush the entire mm.
 */
static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
{
	flush_tlb_mm(mm);
}

/*
 * To support TLB batched flush for multiple pages unmapping, we only send
 * the TLBI for each page in arch_tlbbatch_add_pending() and wait for the
 * completion at the end in arch_tlbbatch_flush(). Since we've already issued
 * TLBI for each page so only a DSB is needed to synchronise its effect on the
 * other CPUs.
 *
 * This will save the time waiting on DSB comparing issuing a TLBI;DSB sequence
 * for each page.
 */
static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
	dsb(ish);
}

/*
 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
 * necessarily a performance improvement.
 */
#define MAX_DVM_OPS	PTRS_PER_PTE

/*
 * __flush_tlb_range_op - Perform TLBI operation upon a range
 *
 * @op:	TLBI instruction that operates on a range (has 'r' prefix)
 * @start:	The start address of the range
 * @pages:	Range as the number of pages from 'start'
 * @stride:	Flush granularity
 * @asid:	The ASID of the task (0 for IPA instructions)
 * @tlb_level:	Translation Table level hint, if known
 * @tlbi_user:	If 'true', call an additional __tlbi_user()
 *              (typically for user ASIDs). 'flase' for IPA instructions
 * @lpa2:	If 'true', the lpa2 scheme is used as set out below
 *
 * When the CPU does not support TLB range operations, flush the TLB
 * entries one by one at the granularity of 'stride'. If the TLB
 * range ops are supported, then:
 *
 * 1. If FEAT_LPA2 is in use, the start address of a range operation must be
 *    64KB aligned, so flush pages one by one until the alignment is reached
 *    using the non-range operations. This step is skipped if LPA2 is not in
 *    use.
 *
 * 2. The minimum range granularity is decided by 'scale', so multiple range
 *    TLBI operations may be required. Start from scale = 3, flush the largest
 *    possible number of pages ((num+1)*2^(5*scale+1)) that fit into the
 *    requested range, then decrement scale and continue until one or zero pages
 *    are left. We must start from highest scale to ensure 64KB start alignment
 *    is maintained in the LPA2 case.
 *
 * 3. If there is 1 page remaining, flush it through non-range operations. Range
 *    operations can only span an even number of pages. We save this for last to
 *    ensure 64KB start alignment is maintained for the LPA2 case.
 */
#define __flush_tlb_range_op(op, start, pages, stride,			\
				asid, tlb_level, tlbi_user, lpa2)	\
do {									\
	typeof(start) __flush_start = start;				\
	typeof(pages) __flush_pages = pages;				\
	int num = 0;							\
	int scale = 3;							\
	int shift = lpa2 ? 16 : PAGE_SHIFT;				\
	unsigned long addr;						\
									\
	while (__flush_pages > 0) {					\
		if (!system_supports_tlb_range() ||			\
		    __flush_pages == 1 ||				\
		    (lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) {	\
			addr = __TLBI_VADDR(__flush_start, asid);	\
			__tlbi_level(op, addr, tlb_level);		\
			if (tlbi_user)					\
				__tlbi_user_level(op, addr, tlb_level);	\
			__flush_start += stride;			\
			__flush_pages -= stride >> PAGE_SHIFT;		\
			continue;					\
		}							\
									\
		num = __TLBI_RANGE_NUM(__flush_pages, scale);		\
		if (num >= 0) {						\
			addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \
						scale, num, tlb_level);	\
			__tlbi(r##op, addr);				\
			if (tlbi_user)					\
				__tlbi_user(r##op, addr);		\
			__flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
			__flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
		}							\
		scale--;						\
	}								\
} while (0)

#define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
	__flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());

static inline bool __flush_tlb_range_limit_excess(unsigned long start,
		unsigned long end, unsigned long pages, unsigned long stride)
{
	/*
	 * When the system does not support TLB range based flush
	 * operation, (MAX_DVM_OPS - 1) pages can be handled. But
	 * with TLB range based operation, MAX_TLBI_RANGE_PAGES
	 * pages can be handled.
	 */
	if ((!system_supports_tlb_range() &&
	     (end - start) >= (MAX_DVM_OPS * stride)) ||
	    pages > MAX_TLBI_RANGE_PAGES)
		return true;

	return false;
}

static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
				     unsigned long start, unsigned long end,
				     unsigned long stride, bool last_level,
				     int tlb_level)
{
	unsigned long asid, pages;

	start = round_down(start, stride);
	end = round_up(end, stride);
	pages = (end - start) >> PAGE_SHIFT;

	if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
		flush_tlb_mm(vma->vm_mm);
		return;
	}

	dsb(ishst);
	asid = ASID(vma->vm_mm);

	if (last_level)
		__flush_tlb_range_op(vale1is, start, pages, stride, asid,
				     tlb_level, true, lpa2_is_enabled());
	else
		__flush_tlb_range_op(vae1is, start, pages, stride, asid,
				     tlb_level, true, lpa2_is_enabled());

	mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
}

static inline void __flush_tlb_range(struct vm_area_struct *vma,
				     unsigned long start, unsigned long end,
				     unsigned long stride, bool last_level,
				     int tlb_level)
{
	__flush_tlb_range_nosync(vma, start, end, stride,
				 last_level, tlb_level);
	dsb(ish);
}

static inline void flush_tlb_range(struct vm_area_struct *vma,
				   unsigned long start, unsigned long end)
{
	/*
	 * We cannot use leaf-only invalidation here, since we may be invalidating
	 * table entries as part of collapsing hugepages or moving page tables.
	 * Set the tlb_level to TLBI_TTL_UNKNOWN because we can not get enough
	 * information here.
	 */
	__flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
}

static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
	const unsigned long stride = PAGE_SIZE;
	unsigned long pages;

	start = round_down(start, stride);
	end = round_up(end, stride);
	pages = (end - start) >> PAGE_SHIFT;

	if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
		flush_tlb_all();
		return;
	}

	dsb(ishst);
	__flush_tlb_range_op(vaale1is, start, pages, stride, 0,
			     TLBI_TTL_UNKNOWN, false, lpa2_is_enabled());
	dsb(ish);
	isb();
}

/*
 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
 * table levels (pgd/pud/pmd).
 */
static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
{
	unsigned long addr = __TLBI_VADDR(kaddr, 0);

	dsb(ishst);
	__tlbi(vaae1is, addr);
	dsb(ish);
	isb();
}
#endif

#endif

Filemanager

Name Type Size Permission Actions
stacktrace Folder 0755
vdso Folder 0755
xen Folder 0755
Kbuild File 528 B 0644
acenv.h File 395 B 0644
acpi.h File 5.48 KB 0644
alternative-macros.h File 6.45 KB 0644
alternative.h File 1.08 KB 0644
apple_m1_pmu.h File 2.23 KB 0644
arch_gicv3.h File 4.44 KB 0644
arch_timer.h File 4.84 KB 0644
archrandom.h File 2.99 KB 0644
arm-cci.h File 254 B 0644
arm_dsu_pmu.h File 2.94 KB 0644
arm_pmuv3.h File 3.4 KB 0644
asm-bug.h File 952 B 0644
asm-extable.h File 3.59 KB 0644
asm-offsets.h File 35 B 0644
asm-prototypes.h File 958 B 0644
asm-uaccess.h File 2.38 KB 0644
asm_pointer_auth.h File 2.49 KB 0644
assembler.h File 21.19 KB 0644
atomic.h File 7.23 KB 0644
atomic_ll_sc.h File 10.75 KB 0644
atomic_lse.h File 8.2 KB 0644
barrier.h File 5.85 KB 0644
bitops.h File 813 B 0644
bitrev.h File 452 B 0644
boot.h File 369 B 0644
brk-imm.h File 1.27 KB 0644
bug.h File 572 B 0644
cache.h File 3.28 KB 0644
cacheflush.h File 4.61 KB 0644
checksum.h File 1.06 KB 0644
clocksource.h File 136 B 0644
cmpxchg.h File 7.15 KB 0644
compat.h File 2.13 KB 0644
compiler.h File 979 B 0644
cpu.h File 1.57 KB 0644
cpu_ops.h File 1.92 KB 0644
cpucaps.h File 2.28 KB 0644
cpufeature.h File 32.74 KB 0644
cpuidle.h File 1.03 KB 0644
cputype.h File 13.99 KB 0644
crash_reserve.h File 335 B 0644
current.h File 517 B 0644
daifflags.h File 3.45 KB 0644
dcc.h File 981 B 0644
debug-monitors.h File 3.26 KB 0644
device.h File 189 B 0644
dmi.h File 850 B 0644
efi.h File 5.05 KB 0644
el2_setup.h File 11.6 KB 0644
elf.h File 8 KB 0644
esr.h File 17.16 KB 0644
exception.h File 3.29 KB 0644
exec.h File 278 B 0644
extable.h File 1.36 KB 0644
fixmap.h File 3.11 KB 0644
fpsimd.h File 11.63 KB 0644
fpsimdmacros.h File 7.7 KB 0644
fpu.h File 309 B 0644
ftrace.h File 5.93 KB 0644
futex.h File 2.71 KB 0644
gcs.h File 2.1 KB 0644
gpr-num.h File 708 B 0644
hardirq.h File 2.28 KB 0644
hugetlb.h File 3.07 KB 0644
hw_breakpoint.h File 4.16 KB 0644
hwcap.h File 8.77 KB 0644
hyp_image.h File 1.87 KB 0644
hypervisor.h File 444 B 0644
image.h File 1.48 KB 0644
insn-def.h File 571 B 0644
insn.h File 23.8 KB 0644
io.h File 8.9 KB 0644
irq.h File 571 B 0644
irq_work.h File 192 B 0644
irqflags.h File 4.31 KB 0644
jump_label.h File 1.32 KB 0644
kasan.h File 578 B 0644
kernel-pgtable.h File 3.3 KB 0644
kexec.h File 3.37 KB 0644
kfence.h File 680 B 0644
kgdb.h File 3.27 KB 0644
kprobes.h File 965 B 0644
kvm_arm.h File 14.34 KB 0644
kvm_asm.h File 12.18 KB 0644
kvm_emulate.h File 16.59 KB 0644
kvm_host.h File 47.41 KB 0644
kvm_hyp.h File 5.14 KB 0644
kvm_mmu.h File 11.14 KB 0644
kvm_mte.h File 1.35 KB 0644
kvm_nested.h File 6.23 KB 0644
kvm_pgtable.h File 28.64 KB 0644
kvm_pkvm.h File 5.35 KB 0644
kvm_ptrauth.h File 4.04 KB 0644
kvm_ras.h File 594 B 0644
kvm_types.h File 185 B 0644
linkage.h File 1.19 KB 0644
lse.h File 933 B 0644
mem_encrypt.h File 999 B 0644
memory.h File 14.24 KB 0644
mman.h File 2.36 KB 0644
mmu.h File 3.47 KB 0644
mmu_context.h File 8.5 KB 0644
module.h File 1.73 KB 0644
module.lds.h File 795 B 0644
mshyperv.h File 1.34 KB 0644
mte-def.h File 577 B 0644
mte-kasan.h File 5.58 KB 0644
mte.h File 6.99 KB 0644
neon-intrinsics.h File 959 B 0644
neon.h File 385 B 0644
numa.h File 165 B 0644
page-def.h File 296 B 0644
page.h File 1.34 KB 0644
paravirt.h File 580 B 0644
paravirt_api_clock.h File 26 B 0644
pci.h File 469 B 0644
percpu.h File 8.31 KB 0644
perf_event.h File 515 B 0644
pgalloc.h File 3.04 KB 0644
pgtable-hwdef.h File 11.47 KB 0644
pgtable-prot.h File 8.19 KB 0644
pgtable-types.h File 1.41 KB 0644
pgtable.h File 52.72 KB 0644
pkeys.h File 2.36 KB 0644
pointer_auth.h File 4.68 KB 0644
por.h File 635 B 0644
preempt.h File 2.71 KB 0644
probes.h File 549 B 0644
proc-fns.h File 562 B 0644
processor.h File 12.19 KB 0644
ptdump.h File 1.78 KB 0644
ptrace.h File 9.35 KB 0644
pvclock-abi.h File 374 B 0644
rsi.h File 1.64 KB 0644
rsi_cmds.h File 3.85 KB 0644
rsi_smc.h File 5.23 KB 0644
runtime-const.h File 2.3 KB 0644
rwonce.h File 1.96 KB 0644
scs.h File 1.09 KB 0644
sdei.h File 1.53 KB 0644
seccomp.h File 891 B 0644
sections.h File 1.14 KB 0644
semihost.h File 537 B 0644
set_memory.h File 715 B 0644
setup.h File 789 B 0644
shmparam.h File 425 B 0644
signal.h File 650 B 0644
signal32.h File 1.93 KB 0644
simd.h File 1.04 KB 0644
smp.h File 3.44 KB 0644
smp_plat.h File 824 B 0644
sparsemem.h File 747 B 0644
spectre.h File 3.75 KB 0644
spinlock.h File 601 B 0644
spinlock_types.h File 366 B 0644
stack_pointer.h File 247 B 0644
stackprotector.h File 1.15 KB 0644
stacktrace.h File 2.96 KB 0644
stage2_pgtable.h File 1.03 KB 0644
stat.h File 947 B 0644
string.h File 1.89 KB 0644
suspend.h File 1.65 KB 0644
sync_bitops.h File 1.06 KB 0644
syscall.h File 1.97 KB 0644
syscall_wrapper.h File 3.09 KB 0644
sysreg.h File 46.33 KB 0644
system_misc.h File 880 B 0644
text-patching.h File 544 B 0644
thread_info.h File 3.96 KB 0644
timex.h File 343 B 0644
tlb.h File 2.65 KB 0644
tlbbatch.h File 281 B 0644
tlbflush.h File 17.05 KB 0644
topology.h File 1.22 KB 0644
trans_pgd.h File 1.02 KB 0644
traps.h File 4.37 KB 0644
uaccess.h File 14.43 KB 0644
unistd.h File 898 B 0644
unistd32.h File 240 B 0644
uprobes.h File 579 B 0644
vdso.h File 468 B 0644
vectors.h File 1.74 KB 0644
vermagic.h File 200 B 0644
virt.h File 4.26 KB 0644
vmalloc.h File 805 B 0644
vmap_stack.h File 739 B 0644
vncr_mapping.h File 3.56 KB 0644
word-at-a-time.h File 1.5 KB 0644
xor.h File 1.88 KB 0644
Filemanager