__  __    __   __  _____      _            _          _____ _          _ _ 
 |  \/  |   \ \ / / |  __ \    (_)          | |        / ____| |        | | |
 | \  / |_ __\ V /  | |__) | __ ___   ____ _| |_ ___  | (___ | |__   ___| | |
 | |\/| | '__|> <   |  ___/ '__| \ \ / / _` | __/ _ \  \___ \| '_ \ / _ \ | |
 | |  | | |_ / . \  | |   | |  | |\ V / (_| | ||  __/  ____) | | | |  __/ | |
 |_|  |_|_(_)_/ \_\ |_|   |_|  |_| \_/ \__,_|\__\___| |_____/|_| |_|\___V 2.1
 if you need WebShell for Seo everyday contact me on Telegram
 Telegram Address : @jackleet
        
        
For_More_Tools: Telegram: @jackleet | Bulk Smtp support mail sender | Business Mail Collector | Mail Bouncer All Mail | Bulk Office Mail Validator | Html Letter private



Upload:

Command:

[email protected]: ~ $
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2000 Philipp Rumpf <[email protected]>
 * Copyright (C) 2006 Kyle McMartin <[email protected]>
 */

#ifndef _ASM_PARISC_ATOMIC_H_
#define _ASM_PARISC_ATOMIC_H_

#include <linux/types.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>

/*
 * Atomic operations that C can't guarantee us.  Useful for
 * resource counting etc..
 *
 * And probably incredibly slow on parisc.  OTOH, we don't
 * have to write any serious assembly.   prumpf
 */

#ifdef CONFIG_SMP
#include <asm/spinlock.h>
#include <asm/cache.h>		/* we use L1_CACHE_BYTES */

/* Use an array of spinlocks for our atomic_ts.
 * Hash function to index into a different SPINLOCK.
 * Since "a" is usually an address, use one spinlock per cacheline.
 */
#  define ATOMIC_HASH_SIZE 4
#  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))

extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;

/* Can't use raw_spin_lock_irq because of #include problems, so
 * this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do {	\
	arch_spinlock_t *s = ATOMIC_HASH(l);	\
	local_irq_save(f);			\
	arch_spin_lock(s);			\
} while(0)

#define _atomic_spin_unlock_irqrestore(l,f) do {	\
	arch_spinlock_t *s = ATOMIC_HASH(l);		\
	arch_spin_unlock(s);				\
	local_irq_restore(f);				\
} while(0)


#else
#  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
#  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
#endif

/*
 * Note that we need not lock read accesses - aligned word writes/reads
 * are atomic, so a reader never sees inconsistent values.
 */

static __inline__ void arch_atomic_set(atomic_t *v, int i)
{
	unsigned long flags;
	_atomic_spin_lock_irqsave(v, flags);

	v->counter = i;

	_atomic_spin_unlock_irqrestore(v, flags);
}

#define arch_atomic_set_release(v, i)	arch_atomic_set((v), (i))

static __inline__ int arch_atomic_read(const atomic_t *v)
{
	return READ_ONCE((v)->counter);
}

#define ATOMIC_OP(op, c_op)						\
static __inline__ void arch_atomic_##op(int i, atomic_t *v)		\
{									\
	unsigned long flags;						\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	v->counter c_op i;						\
	_atomic_spin_unlock_irqrestore(v, flags);			\
}

#define ATOMIC_OP_RETURN(op, c_op)					\
static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v)	\
{									\
	unsigned long flags;						\
	int ret;							\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	ret = (v->counter c_op i);					\
	_atomic_spin_unlock_irqrestore(v, flags);			\
									\
	return ret;							\
}

#define ATOMIC_FETCH_OP(op, c_op)					\
static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v)	\
{									\
	unsigned long flags;						\
	int ret;							\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	ret = v->counter;						\
	v->counter c_op i;						\
	_atomic_spin_unlock_irqrestore(v, flags);			\
									\
	return ret;							\
}

#define ATOMIC_OPS(op, c_op)						\
	ATOMIC_OP(op, c_op)						\
	ATOMIC_OP_RETURN(op, c_op)					\
	ATOMIC_FETCH_OP(op, c_op)

ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)

#define arch_atomic_add_return	arch_atomic_add_return
#define arch_atomic_sub_return	arch_atomic_sub_return
#define arch_atomic_fetch_add	arch_atomic_fetch_add
#define arch_atomic_fetch_sub	arch_atomic_fetch_sub

#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op)						\
	ATOMIC_OP(op, c_op)						\
	ATOMIC_FETCH_OP(op, c_op)

ATOMIC_OPS(and, &=)
ATOMIC_OPS(or, |=)
ATOMIC_OPS(xor, ^=)

#define arch_atomic_fetch_and	arch_atomic_fetch_and
#define arch_atomic_fetch_or	arch_atomic_fetch_or
#define arch_atomic_fetch_xor	arch_atomic_fetch_xor

#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

#ifdef CONFIG_64BIT

#define ATOMIC64_INIT(i) { (i) }

#define ATOMIC64_OP(op, c_op)						\
static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v)		\
{									\
	unsigned long flags;						\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	v->counter c_op i;						\
	_atomic_spin_unlock_irqrestore(v, flags);			\
}

#define ATOMIC64_OP_RETURN(op, c_op)					\
static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v)	\
{									\
	unsigned long flags;						\
	s64 ret;							\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	ret = (v->counter c_op i);					\
	_atomic_spin_unlock_irqrestore(v, flags);			\
									\
	return ret;							\
}

#define ATOMIC64_FETCH_OP(op, c_op)					\
static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v)	\
{									\
	unsigned long flags;						\
	s64 ret;							\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	ret = v->counter;						\
	v->counter c_op i;						\
	_atomic_spin_unlock_irqrestore(v, flags);			\
									\
	return ret;							\
}

#define ATOMIC64_OPS(op, c_op)						\
	ATOMIC64_OP(op, c_op)						\
	ATOMIC64_OP_RETURN(op, c_op)					\
	ATOMIC64_FETCH_OP(op, c_op)

ATOMIC64_OPS(add, +=)
ATOMIC64_OPS(sub, -=)

#define arch_atomic64_add_return	arch_atomic64_add_return
#define arch_atomic64_sub_return	arch_atomic64_sub_return
#define arch_atomic64_fetch_add		arch_atomic64_fetch_add
#define arch_atomic64_fetch_sub		arch_atomic64_fetch_sub

#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, c_op)						\
	ATOMIC64_OP(op, c_op)						\
	ATOMIC64_FETCH_OP(op, c_op)

ATOMIC64_OPS(and, &=)
ATOMIC64_OPS(or, |=)
ATOMIC64_OPS(xor, ^=)

#define arch_atomic64_fetch_and		arch_atomic64_fetch_and
#define arch_atomic64_fetch_or		arch_atomic64_fetch_or
#define arch_atomic64_fetch_xor		arch_atomic64_fetch_xor

#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP

static __inline__ void
arch_atomic64_set(atomic64_t *v, s64 i)
{
	unsigned long flags;
	_atomic_spin_lock_irqsave(v, flags);

	v->counter = i;

	_atomic_spin_unlock_irqrestore(v, flags);
}

#define arch_atomic64_set_release(v, i)	arch_atomic64_set((v), (i))

static __inline__ s64
arch_atomic64_read(const atomic64_t *v)
{
	return READ_ONCE((v)->counter);
}

#endif /* !CONFIG_64BIT */


#endif /* _ASM_PARISC_ATOMIC_H_ */

Filemanager

Name Type Size Permission Actions
Kbuild File 194 B 0644
alternative.h File 2.01 KB 0644
asm-offsets.h File 35 B 0644
asmregs.h File 2.42 KB 0644
assembly.h File 13.98 KB 0644
atomic.h File 5.97 KB 0644
barrier.h File 2.5 KB 0644
bitops.h File 5.32 KB 0644
bug.h File 2.56 KB 0644
cache.h File 2.28 KB 0644
cacheflush.h File 3.08 KB 0644
cachetype.h File 178 B 0644
checksum.h File 4.6 KB 0644
cmpxchg.h File 3.67 KB 0644
compat.h File 3.3 KB 0644
compat_ucontext.h File 591 B 0644
current.h File 435 B 0644
delay.h File 533 B 0644
dma-mapping.h File 996 B 0644
dma.h File 5.65 KB 0644
dwarf.h File 456 B 0644
eisa_bus.h File 492 B 0644
eisa_eeprom.h File 4.22 KB 0644
elf.h File 14.15 KB 0644
extable.h File 2.17 KB 0644
fixmap.h File 2.2 KB 0644
floppy.h File 6.05 KB 0644
ftrace.h File 805 B 0644
futex.h File 2.65 KB 0644
grfioctl.h File 2.69 KB 0644
hardirq.h File 976 B 0644
hardware.h File 4.14 KB 0644
hash.h File 5.07 KB 0644
hugetlb.h File 1004 B 0644
io.h File 6.42 KB 0644
irq.h File 1.14 KB 0644
irqflags.h File 1.22 KB 0644
jump_label.h File 1.06 KB 0644
kbdleds.h File 477 B 0644
kexec.h File 827 B 0644
kfence.h File 865 B 0644
kgdb.h File 1.32 KB 0644
kprobes.h File 1.31 KB 0644
ldcw.h File 2.49 KB 0644
led.h File 1.18 KB 0644
linkage.h File 738 B 0644
mman.h File 808 B 0644
mmu.h File 199 B 0644
mmu_context.h File 2.37 KB 0644
mmzone.h File 202 B 0644
module.h File 527 B 0644
page.h File 5.09 KB 0644
parisc-device.h File 1.93 KB 0644
parport.h File 358 B 0644
pci.h File 5.71 KB 0644
pdc.h File 4.39 KB 0644
pdc_chassis.h File 14.4 KB 0644
pdcpat.h File 16.7 KB 0644
perf.h File 1.89 KB 0644
perf_event.h File 152 B 0644
pgalloc.h File 1.4 KB 0644
pgtable.h File 17.18 KB 0644
prefetch.h File 1.12 KB 0644
processor.h File 10.65 KB 0644
psw.h File 2.41 KB 0644
ptrace.h File 1.6 KB 0644
ropes.h File 9.86 KB 0644
rt_sigframe.h File 410 B 0644
runway.h File 183 B 0644
seccomp.h File 651 B 0644
sections.h File 332 B 0644
serial.h File 124 B 0644
shmparam.h File 968 B 0644
signal.h File 348 B 0644
smp.h File 1.18 KB 0644
socket.h File 310 B 0644
sparsemem.h File 345 B 0644
special_insns.h File 1.6 KB 0644
spinlock.h File 3.4 KB 0644
spinlock_types.h File 974 B 0644
string.h File 247 B 0644
superio.h File 3.25 KB 0644
switch_to.h File 332 B 0644
syscall.h File 1.44 KB 0644
text-patching.h File 445 B 0644
thread_info.h File 2.91 KB 0644
timex.h File 403 B 0644
tlb.h File 288 B 0644
tlbflush.h File 1.94 KB 0644
topology.h File 402 B 0644
traps.h File 666 B 0644
uaccess.h File 4.88 KB 0644
ucontext.h File 327 B 0644
unistd.h File 4.78 KB 0644
unwind.h File 2.66 KB 0644
vdso.h File 598 B 0644
video.h File 328 B 0644
vmalloc.h File 96 B 0644
Filemanager