__ __ __ __ _____ _ _ _____ _ _ _ | \/ | \ \ / / | __ \ (_) | | / ____| | | | | | \ / |_ __\ V / | |__) | __ ___ ____ _| |_ ___ | (___ | |__ ___| | | | |\/| | '__|> < | ___/ '__| \ \ / / _` | __/ _ \ \___ \| '_ \ / _ \ | | | | | | |_ / . \ | | | | | |\ V / (_| | || __/ ____) | | | | __/ | | |_| |_|_(_)_/ \_\ |_| |_| |_| \_/ \__,_|\__\___| |_____/|_| |_|\___V 2.1 if you need WebShell for Seo everyday contact me on Telegram Telegram Address : @jackleetFor_More_Tools:
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
*/
#ifndef _ASM_RISCV_ATOMIC_H
#define _ASM_RISCV_ATOMIC_H
#ifdef CONFIG_GENERIC_ATOMIC64
# include <asm-generic/atomic64.h>
#else
# if (__riscv_xlen < 64)
# error "64-bit atomics require XLEN to be at least 64"
# endif
#endif
#include <asm/cmpxchg.h>
#define __atomic_acquire_fence() \
__asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
#define __atomic_release_fence() \
__asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
static __always_inline int arch_atomic_read(const atomic_t *v)
{
return READ_ONCE(v->counter);
}
static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
WRITE_ONCE(v->counter, i);
}
#ifndef CONFIG_GENERIC_ATOMIC64
#define ATOMIC64_INIT(i) { (i) }
static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
{
return READ_ONCE(v->counter);
}
static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
WRITE_ONCE(v->counter, i);
}
#endif
/*
* First, the atomic ops that have no ordering constraints and therefor don't
* have the AQ or RL bits set. These don't return anything, so there's only
* one version to worry about.
*/
#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
static __always_inline \
void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
{ \
__asm__ __volatile__ ( \
" amo" #asm_op "." #asm_type " zero, %1, %0" \
: "+A" (v->counter) \
: "r" (I) \
: "memory"); \
} \
#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, asm_op, I) \
ATOMIC_OP (op, asm_op, I, w, int, )
#else
#define ATOMIC_OPS(op, asm_op, I) \
ATOMIC_OP (op, asm_op, I, w, int, ) \
ATOMIC_OP (op, asm_op, I, d, s64, 64)
#endif
ATOMIC_OPS(add, add, i)
ATOMIC_OPS(sub, add, -i)
ATOMIC_OPS(and, and, i)
ATOMIC_OPS( or, or, i)
ATOMIC_OPS(xor, xor, i)
#undef ATOMIC_OP
#undef ATOMIC_OPS
/*
* Atomic ops that have ordered, relaxed, acquire, and release variants.
* There's two flavors of these: the arithmatic ops have both fetch and return
* versions, while the logical ops only have fetch versions.
*/
#define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
static __always_inline \
c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i, \
atomic##prefix##_t *v) \
{ \
register c_type ret; \
__asm__ __volatile__ ( \
" amo" #asm_op "." #asm_type " %1, %2, %0" \
: "+A" (v->counter), "=r" (ret) \
: "r" (I) \
: "memory"); \
return ret; \
} \
static __always_inline \
c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
{ \
register c_type ret; \
__asm__ __volatile__ ( \
" amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
: "+A" (v->counter), "=r" (ret) \
: "r" (I) \
: "memory"); \
return ret; \
}
#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
static __always_inline \
c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i, \
atomic##prefix##_t *v) \
{ \
return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
} \
static __always_inline \
c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
{ \
return arch_atomic##prefix##_fetch_##op(i, v) c_op I; \
}
#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, asm_op, c_op, I) \
ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
#else
#define ATOMIC_OPS(op, asm_op, c_op, I) \
ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
ATOMIC_FETCH_OP( op, asm_op, I, d, s64, 64) \
ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
#endif
ATOMIC_OPS(add, add, +, i)
ATOMIC_OPS(sub, add, +, -i)
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#define arch_atomic_add_return arch_atomic_add_return
#define arch_atomic_sub_return arch_atomic_sub_return
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#define arch_atomic_fetch_add arch_atomic_fetch_add
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
#ifndef CONFIG_GENERIC_ATOMIC64
#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
#define arch_atomic64_add_return arch_atomic64_add_return
#define arch_atomic64_sub_return arch_atomic64_sub_return
#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
#endif
#undef ATOMIC_OPS
#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, asm_op, I) \
ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
#else
#define ATOMIC_OPS(op, asm_op, I) \
ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
#endif
ATOMIC_OPS(and, and, i)
ATOMIC_OPS( or, or, i)
ATOMIC_OPS(xor, xor, i)
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#define arch_atomic_fetch_and arch_atomic_fetch_and
#define arch_atomic_fetch_or arch_atomic_fetch_or
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#ifndef CONFIG_GENERIC_ATOMIC64
#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
#endif
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx) \
({ \
__asm__ __volatile__ ( \
"0: lr." sfx " %[p], %[c]\n" \
" beq %[p], %[u], 1f\n" \
" add %[rc], %[p], %[a]\n" \
" sc." sfx ".rl %[rc], %[rc], %[c]\n" \
" bnez %[rc], 0b\n" \
" fence rw, rw\n" \
"1:\n" \
: [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
: [a]"r" (_a), [u]"r" (_u) \
: "memory"); \
})
/* This is required to provide a full barrier on success. */
static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
_arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w");
return prev;
}
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#ifndef CONFIG_GENERIC_ATOMIC64
static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 prev;
long rc;
_arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d");
return prev;
}
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif
#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx) \
({ \
__asm__ __volatile__ ( \
"0: lr." sfx " %[p], %[c]\n" \
" bltz %[p], 1f\n" \
" addi %[rc], %[p], 1\n" \
" sc." sfx ".rl %[rc], %[rc], %[c]\n" \
" bnez %[rc], 0b\n" \
" fence rw, rw\n" \
"1:\n" \
: [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
: \
: "memory"); \
})
static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
{
int prev, rc;
_arch_atomic_inc_unless_negative(prev, rc, v->counter, "w");
return !(prev < 0);
}
#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx) \
({ \
__asm__ __volatile__ ( \
"0: lr." sfx " %[p], %[c]\n" \
" bgtz %[p], 1f\n" \
" addi %[rc], %[p], -1\n" \
" sc." sfx ".rl %[rc], %[rc], %[c]\n" \
" bnez %[rc], 0b\n" \
" fence rw, rw\n" \
"1:\n" \
: [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
: \
: "memory"); \
})
static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
{
int prev, rc;
_arch_atomic_dec_unless_positive(prev, rc, v->counter, "w");
return !(prev > 0);
}
#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx) \
({ \
__asm__ __volatile__ ( \
"0: lr." sfx " %[p], %[c]\n" \
" addi %[rc], %[p], -1\n" \
" bltz %[rc], 1f\n" \
" sc." sfx ".rl %[rc], %[rc], %[c]\n" \
" bnez %[rc], 0b\n" \
" fence rw, rw\n" \
"1:\n" \
: [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
: \
: "memory"); \
})
static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
{
int prev, rc;
_arch_atomic_dec_if_positive(prev, rc, v->counter, "w");
return prev - 1;
}
#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
#ifndef CONFIG_GENERIC_ATOMIC64
static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
{
s64 prev;
long rc;
_arch_atomic_inc_unless_negative(prev, rc, v->counter, "d");
return !(prev < 0);
}
#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
{
s64 prev;
long rc;
_arch_atomic_dec_unless_positive(prev, rc, v->counter, "d");
return !(prev > 0);
}
#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 prev;
long rc;
_arch_atomic_dec_if_positive(prev, rc, v->counter, "d");
return prev - 1;
}
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#endif
#endif /* _ASM_RISCV_ATOMIC_H */
| Name | Type | Size | Permission | Actions |
|---|---|---|---|---|
| vdso | Folder | 0755 |
|
|
| vendor_extensions | Folder | 0755 |
|
|
| Kbuild | File | 453 B | 0644 |
|
| acenv.h | File | 243 B | 0644 |
|
| acpi.h | File | 2.49 KB | 0644 |
|
| alternative-macros.h | File | 5 KB | 0644 |
|
| alternative.h | File | 2.33 KB | 0644 |
|
| arch_hweight.h | File | 1.55 KB | 0644 |
|
| archrandom.h | File | 1.5 KB | 0644 |
|
| asm-extable.h | File | 2.27 KB | 0644 |
|
| asm-offsets.h | File | 35 B | 0644 |
|
| asm-prototypes.h | File | 2.09 KB | 0644 |
|
| asm.h | File | 4.01 KB | 0644 |
|
| assembler.h | File | 2.23 KB | 0644 |
|
| atomic.h | File | 10.25 KB | 0644 |
|
| barrier.h | File | 2.8 KB | 0644 |
|
| bitops.h | File | 9.8 KB | 0644 |
|
| bug.h | File | 2.13 KB | 0644 |
|
| bugs.h | File | 463 B | 0644 |
|
| cache.h | File | 897 B | 0644 |
|
| cacheflush.h | File | 2.67 KB | 0644 |
|
| cacheinfo.h | File | 511 B | 0644 |
|
| cfi.h | File | 810 B | 0644 |
|
| checksum.h | File | 2.48 KB | 0644 |
|
| clint.h | File | 797 B | 0644 |
|
| clocksource.h | File | 136 B | 0644 |
|
| cmpxchg.h | File | 11.44 KB | 0644 |
|
| compat.h | File | 4.05 KB | 0644 |
|
| cpu.h | File | 172 B | 0644 |
|
| cpu_ops.h | File | 971 B | 0644 |
|
| cpu_ops_sbi.h | File | 609 B | 0644 |
|
| cpufeature-macros.h | File | 1.67 KB | 0644 |
|
| cpufeature.h | File | 4.38 KB | 0644 |
|
| cpuidle.h | File | 450 B | 0644 |
|
| crash_reserve.h | File | 291 B | 0644 |
|
| csr.h | File | 16.32 KB | 0644 |
|
| current.h | File | 1001 B | 0644 |
|
| delay.h | File | 471 B | 0644 |
|
| dma-noncoherent.h | File | 835 B | 0644 |
|
| dmi.h | File | 640 B | 0644 |
|
| efi.h | File | 1.18 KB | 0644 |
|
| elf.h | File | 4.64 KB | 0644 |
|
| entry-common.h | File | 1.04 KB | 0644 |
|
| errata_list.h | File | 3.7 KB | 0644 |
|
| exec.h | File | 172 B | 0644 |
|
| extable.h | File | 1.46 KB | 0644 |
|
| fence.h | File | 564 B | 0644 |
|
| fixmap.h | File | 1.75 KB | 0644 |
|
| fpu.h | File | 291 B | 0644 |
|
| ftrace.h | File | 6.12 KB | 0644 |
|
| futex.h | File | 2.42 KB | 0644 |
|
| gdb_xml.h | File | 5.47 KB | 0644 |
|
| gpr-num.h | File | 2.37 KB | 0644 |
|
| hugetlb.h | File | 1.76 KB | 0644 |
|
| hwcap.h | File | 3.58 KB | 0644 |
|
| hwprobe.h | File | 913 B | 0644 |
|
| image.h | File | 1.63 KB | 0644 |
|
| insn-def.h | File | 5.72 KB | 0644 |
|
| insn.h | File | 15.29 KB | 0644 |
|
| io.h | File | 5.19 KB | 0644 |
|
| irq.h | File | 2.09 KB | 0644 |
|
| irq_stack.h | File | 843 B | 0644 |
|
| irq_work.h | File | 225 B | 0644 |
|
| irqflags.h | File | 1.12 KB | 0644 |
|
| jump_label.h | File | 1.6 KB | 0644 |
|
| kasan.h | File | 1.55 KB | 0644 |
|
| kdebug.h | File | 158 B | 0644 |
|
| kexec.h | File | 1.84 KB | 0644 |
|
| kfence.h | File | 674 B | 0644 |
|
| kgdb.h | File | 2.44 KB | 0644 |
|
| kprobes.h | File | 1.19 KB | 0644 |
|
| kvm_aia.h | File | 5.42 KB | 0644 |
|
| kvm_host.h | File | 11.56 KB | 0644 |
|
| kvm_nacl.h | File | 7.52 KB | 0644 |
|
| kvm_types.h | File | 184 B | 0644 |
|
| kvm_vcpu_fp.h | File | 1.69 KB | 0644 |
|
| kvm_vcpu_insn.h | File | 1.21 KB | 0644 |
|
| kvm_vcpu_pmu.h | File | 4.52 KB | 0644 |
|
| kvm_vcpu_sbi.h | File | 3.3 KB | 0644 |
|
| kvm_vcpu_timer.h | File | 1.56 KB | 0644 |
|
| kvm_vcpu_vector.h | File | 2.12 KB | 0644 |
|
| linkage.h | File | 267 B | 0644 |
|
| membarrier.h | File | 1.73 KB | 0644 |
|
| mmio.h | File | 5.17 KB | 0644 |
|
| mmiowb.h | File | 365 B | 0644 |
|
| mmu.h | File | 1.05 KB | 0644 |
|
| mmu_context.h | File | 1.2 KB | 0644 |
|
| module.h | File | 3.3 KB | 0644 |
|
| module.lds.h | File | 207 B | 0644 |
|
| numa.h | File | 165 B | 0644 |
|
| page.h | File | 5.95 KB | 0644 |
|
| paravirt.h | File | 610 B | 0644 |
|
| paravirt_api_clock.h | File | 26 B | 0644 |
|
| pci.h | File | 728 B | 0644 |
|
| perf_event.h | File | 575 B | 0644 |
|
| pgalloc.h | File | 3.83 KB | 0644 |
|
| pgtable-32.h | File | 1.07 KB | 0644 |
|
| pgtable-64.h | File | 9.69 KB | 0644 |
|
| pgtable-bits.h | File | 1.32 KB | 0644 |
|
| pgtable.h | File | 25.35 KB | 0644 |
|
| probes.h | File | 563 B | 0644 |
|
| processor.h | File | 6.31 KB | 0644 |
|
| ptrace.h | File | 4.3 KB | 0644 |
|
| sbi.h | File | 15.98 KB | 0644 |
|
| scs.h | File | 1.08 KB | 0644 |
|
| seccomp.h | File | 504 B | 0644 |
|
| sections.h | File | 883 B | 0644 |
|
| semihost.h | File | 596 B | 0644 |
|
| set_memory.h | File | 2.03 KB | 0644 |
|
| signal32.h | File | 358 B | 0644 |
|
| simd.h | File | 1.73 KB | 0644 |
|
| smp.h | File | 2.51 KB | 0644 |
|
| soc.h | File | 627 B | 0644 |
|
| sparsemem.h | File | 331 B | 0644 |
|
| spinlock.h | File | 1.27 KB | 0644 |
|
| stackprotector.h | File | 589 B | 0644 |
|
| stacktrace.h | File | 774 B | 0644 |
|
| string.h | File | 1.4 KB | 0644 |
|
| suspend.h | File | 1.81 KB | 0644 |
|
| switch_to.h | File | 3.32 KB | 0644 |
|
| sync_core.h | File | 689 B | 0644 |
|
| syscall.h | File | 2.48 KB | 0644 |
|
| syscall_table.h | File | 137 B | 0644 |
|
| syscall_wrapper.h | File | 3.96 KB | 0644 |
|
| text-patching.h | File | 449 B | 0644 |
|
| thread_info.h | File | 3.97 KB | 0644 |
|
| timex.h | File | 1.79 KB | 0644 |
|
| tlb.h | File | 582 B | 0644 |
|
| tlbbatch.h | File | 273 B | 0644 |
|
| tlbflush.h | File | 2.15 KB | 0644 |
|
| topology.h | File | 822 B | 0644 |
|
| trace.h | File | 1.03 KB | 0644 |
|
| uaccess.h | File | 10.62 KB | 0644 |
|
| unistd.h | File | 719 B | 0644 |
|
| uprobes.h | File | 1.04 KB | 0644 |
|
| vdso.h | File | 965 B | 0644 |
|
| vector.h | File | 11.37 KB | 0644 |
|
| vendor_extensions.h | File | 3.17 KB | 0644 |
|
| vendorid_list.h | File | 268 B | 0644 |
|
| vermagic.h | File | 213 B | 0644 |
|
| vmalloc.h | File | 574 B | 0644 |
|
| word-at-a-time.h | File | 1.72 KB | 0644 |
|
| xip_fixup.h | File | 1.45 KB | 0644 |
|
| xor.h | File | 1.65 KB | 0644 |
|