[ SYSTEM ]: Linux srv.persadacompanies.com 4.18.0-553.56.1.el8_10.x86_64 #1 SMP Tue Jun 10 05:00:59 EDT 2025 x86_64
[ SERVER ]: Apache | PHP: 8.4.19
[ USER ]: persadamedika | IP: 45.64.1.108
GEFORCE FILE MANAGER
/
usr
/
src
/
kernels
/
4.18.0-553.64.1.el8_10.x86_64
/
include
/
asm-generic
/
bitops
/
UPLOAD:
NAME
SIZE
QUICK PERMS
ACTIONS
📄 __ffs.h
777 B
SET
[ EDIT ]
|
[ DEL ]
📄 __fls.h
920 B
SET
[ EDIT ]
|
[ DEL ]
📄 arch_hweight.h
555 B
SET
[ EDIT ]
|
[ DEL ]
📄 atomic.h
1,511 B
SET
[ EDIT ]
|
[ DEL ]
📄 builtin-__ffs.h
379 B
SET
[ EDIT ]
|
[ DEL ]
📄 builtin-__fls.h
436 B
SET
[ EDIT ]
|
[ DEL ]
📄 builtin-ffs.h
410 B
SET
[ EDIT ]
|
[ DEL ]
📄 builtin-fls.h
403 B
SET
[ EDIT ]
|
[ DEL ]
📄 const_hweight.h
1,711 B
SET
[ EDIT ]
|
[ DEL ]
📄 ext2-atomic-setbit.h
403 B
SET
[ EDIT ]
|
[ DEL ]
📄 ext2-atomic.h
600 B
SET
[ EDIT ]
|
[ DEL ]
📄 ffs.h
654 B
SET
[ EDIT ]
|
[ DEL ]
📄 ffz.h
325 B
SET
[ EDIT ]
|
[ DEL ]
📄 find.h
3,244 B
SET
[ EDIT ]
|
[ DEL ]
📄 fls.h
674 B
SET
[ EDIT ]
|
[ DEL ]
📄 fls64.h
860 B
SET
[ EDIT ]
|
[ DEL ]
📄 hweight.h
254 B
SET
[ EDIT ]
|
[ DEL ]
📄 le.h
2,239 B
SET
[ EDIT ]
|
[ DEL ]
📄 lock.h
2,505 B
SET
[ EDIT ]
|
[ DEL ]
📄 non-atomic.h
3,535 B
SET
[ EDIT ]
|
[ DEL ]
📄 sched.h
760 B
SET
[ EDIT ]
|
[ DEL ]
DELETE SELECTED
[ CLOSE ]
EDIT: lock.h
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_GENERIC_BITOPS_LOCK_H_ #define _ASM_GENERIC_BITOPS_LOCK_H_ #include <linux/atomic.h> #include <linux/compiler.h> #include <asm/barrier.h> /** * test_and_set_bit_lock - Set a bit and return its old value, for lock * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and provides acquire barrier semantics if * the returned value is 0. * It can be used to implement bit locks. */ static inline int test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); if (READ_ONCE(*p) & mask) return 1; old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); return !!(old & mask); } /** * clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * * This operation is atomic and provides release barrier semantics. */ static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); } /** * __clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all * the bits in the word are protected by this lock some archs can use weaker * ops to safely unlock. * * See for example x86's implementation. */ static inline void __clear_bit_unlock(unsigned int nr, volatile unsigned long *p) { unsigned long old; p += BIT_WORD(nr); old = READ_ONCE(*p); old &= ~BIT_MASK(nr); atomic_long_set_release((atomic_long_t *)p, old); } /** * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom * byte is negative, for unlock. * @nr: the bit to clear * @addr: the address to start counting from * * This is a bit of a one-trick-pony for the filemap code, which clears * PG_locked and tests PG_waiters, */ #ifndef clear_bit_unlock_is_negative_byte static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr, volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); return !!(old & BIT(7)); } #define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte #endif #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */