Skip to content
Snippets Groups Projects
Select Git revision
  • febdbfe8a91ce0d11939d4940b592eb0dba8d663
  • openEuler-1.0-LTS default protected
  • openEuler-22.09
  • OLK-5.10
  • openEuler-22.03-LTS
  • openEuler-22.03-LTS-Ascend
  • master
  • openEuler-22.03-LTS-LoongArch-NW
  • openEuler-22.09-HCK
  • openEuler-20.03-LTS-SP3
  • openEuler-21.09
  • openEuler-21.03
  • openEuler-20.09
  • 4.19.90-2210.5.0
  • 5.10.0-123.0.0
  • 5.10.0-60.63.0
  • 5.10.0-60.62.0
  • 4.19.90-2210.4.0
  • 5.10.0-121.0.0
  • 5.10.0-60.61.0
  • 4.19.90-2210.3.0
  • 5.10.0-60.60.0
  • 5.10.0-120.0.0
  • 5.10.0-60.59.0
  • 5.10.0-119.0.0
  • 4.19.90-2210.2.0
  • 4.19.90-2210.1.0
  • 5.10.0-118.0.0
  • 5.10.0-106.19.0
  • 5.10.0-60.58.0
  • 4.19.90-2209.6.0
  • 5.10.0-106.18.0
  • 5.10.0-106.17.0
33 results

bitops.h

Blame
  • bitops.h 6.08 KiB
    #ifndef _LINUX_BITOPS_H
    #define _LINUX_BITOPS_H
    #include <asm/types.h>
    
    #ifdef	__KERNEL__
    #define BIT(nr)			(1UL << (nr))
    #define BIT_ULL(nr)		(1ULL << (nr))
    #define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
    #define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
    #define BIT_ULL_MASK(nr)	(1ULL << ((nr) % BITS_PER_LONG_LONG))
    #define BIT_ULL_WORD(nr)	((nr) / BITS_PER_LONG_LONG)
    #define BITS_PER_BYTE		8
    #define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
    #endif
    
    /*
     * Create a contiguous bitmask starting at bit position @l and ending at
     * position @h. For example
     * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
     */
    #define GENMASK(h, l)		(((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
    #define GENMASK_ULL(h, l)	(((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
    
    extern unsigned int __sw_hweight8(unsigned int w);
    extern unsigned int __sw_hweight16(unsigned int w);
    extern unsigned int __sw_hweight32(unsigned int w);
    extern unsigned long __sw_hweight64(__u64 w);
    
    /*
     * Include this here because some architectures need generic_ffs/fls in
     * scope
     */
    #include <asm/bitops.h>
    
    /*
     * Provide __deprecated wrappers for the new interface, avoid flag day changes.
     * We need the ugly external functions to break header recursion hell.
     */
    #ifndef smp_mb__before_clear_bit
    static inline void __deprecated smp_mb__before_clear_bit(void)
    {
    	extern void __smp_mb__before_atomic(void);
    	__smp_mb__before_atomic();
    }
    #endif
    
    #ifndef smp_mb__after_clear_bit
    static inline void __deprecated smp_mb__after_clear_bit(void)
    {
    	extern void __smp_mb__after_atomic(void);
    	__smp_mb__after_atomic();
    }
    #endif
    
    #define for_each_set_bit(bit, addr, size) \
    	for ((bit) = find_first_bit((addr), (size));		\
    	     (bit) < (size);					\
    	     (bit) = find_next_bit((addr), (size), (bit) + 1))
    
    /* same as for_each_set_bit() but use bit as value to start with */
    #define for_each_set_bit_from(bit, addr, size) \
    	for ((bit) = find_next_bit((addr), (size), (bit));	\
    	     (bit) < (size);					\
    	     (bit) = find_next_bit((addr), (size), (bit) + 1))
    
    #define for_each_clear_bit(bit, addr, size) \
    	for ((bit) = find_first_zero_bit((addr), (size));	\
    	     (bit) < (size);					\
    	     (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
    
    /* same as for_each_clear_bit() but use bit as value to start with */
    #define for_each_clear_bit_from(bit, addr, size) \
    	for ((bit) = find_next_zero_bit((addr), (size), (bit));	\
    	     (bit) < (size);					\
    	     (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
    
    static __inline__ int get_bitmask_order(unsigned int count)
    {
    	int order;
    
    	order = fls(count);
    	return order;	/* We could be slightly more clever with -1 here... */
    }
    
    static __inline__ int get_count_order(unsigned int count)
    {
    	int order;
    
    	order = fls(count) - 1;
    	if (count & (count - 1))
    		order++;
    	return order;
    }
    
    static inline unsigned long hweight_long(unsigned long w)
    {
    	return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
    }
    
    /**
     * rol64 - rotate a 64-bit value left
     * @word: value to rotate
     * @shift: bits to roll
     */
    static inline __u64 rol64(__u64 word, unsigned int shift)
    {
    	return (word << shift) | (word >> (64 - shift));
    }
    
    /**
     * ror64 - rotate a 64-bit value right
     * @word: value to rotate
     * @shift: bits to roll
     */
    static inline __u64 ror64(__u64 word, unsigned int shift)
    {
    	return (word >> shift) | (word << (64 - shift));
    }
    
    /**
     * rol32 - rotate a 32-bit value left
     * @word: value to rotate
     * @shift: bits to roll
     */
    static inline __u32 rol32(__u32 word, unsigned int shift)
    {
    	return (word << shift) | (word >> (32 - shift));
    }
    
    /**
     * ror32 - rotate a 32-bit value right
     * @word: value to rotate
     * @shift: bits to roll
     */
    static inline __u32 ror32(__u32 word, unsigned int shift)
    {
    	return (word >> shift) | (word << (32 - shift));
    }
    
    /**
     * rol16 - rotate a 16-bit value left
     * @word: value to rotate
     * @shift: bits to roll
     */
    static inline __u16 rol16(__u16 word, unsigned int shift)
    {
    	return (word << shift) | (word >> (16 - shift));
    }
    
    /**
     * ror16 - rotate a 16-bit value right
     * @word: value to rotate
     * @shift: bits to roll
     */
    static inline __u16 ror16(__u16 word, unsigned int shift)
    {
    	return (word >> shift) | (word << (16 - shift));
    }
    
    /**
     * rol8 - rotate an 8-bit value left
     * @word: value to rotate
     * @shift: bits to roll
     */
    static inline __u8 rol8(__u8 word, unsigned int shift)
    {
    	return (word << shift) | (word >> (8 - shift));
    }
    
    /**
     * ror8 - rotate an 8-bit value right
     * @word: value to rotate
     * @shift: bits to roll
     */
    static inline __u8 ror8(__u8 word, unsigned int shift)
    {
    	return (word >> shift) | (word << (8 - shift));
    }
    
    /**
     * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
     * @value: value to sign extend
     * @index: 0 based bit index (0<=index<32) to sign bit
     */
    static inline __s32 sign_extend32(__u32 value, int index)
    {
    	__u8 shift = 31 - index;
    	return (__s32)(value << shift) >> shift;
    }
    
    static inline unsigned fls_long(unsigned long l)
    {
    	if (sizeof(l) == 4)
    		return fls(l);
    	return fls64(l);
    }
    
    /**
     * __ffs64 - find first set bit in a 64 bit word
     * @word: The 64 bit word
     *
     * On 64 bit arches this is a synomyn for __ffs
     * The result is not defined if no bits are set, so check that @word
     * is non-zero before calling this.
     */
    static inline unsigned long __ffs64(u64 word)
    {
    #if BITS_PER_LONG == 32
    	if (((u32)word) == 0UL)
    		return __ffs((u32)(word >> 32)) + 32;
    #elif BITS_PER_LONG != 64
    #error BITS_PER_LONG not 32 or 64
    #endif
    	return __ffs((unsigned long)word);
    }
    
    #ifdef __KERNEL__
    
    #ifndef set_mask_bits
    #define set_mask_bits(ptr, _mask, _bits)	\
    ({								\
    	const typeof(*ptr) mask = (_mask), bits = (_bits);	\
    	typeof(*ptr) old, new;					\
    								\
    	do {							\
    		old = ACCESS_ONCE(*ptr);			\
    		new = (old & ~mask) | bits;			\
    	} while (cmpxchg(ptr, old, new) != old);		\
    								\
    	new;							\
    })
    #endif
    
    #ifndef find_last_bit
    /**
     * find_last_bit - find the last set bit in a memory region
     * @addr: The address to start the search at
     * @size: The maximum size to search
     *
     * Returns the bit number of the first set bit, or size.
     */
    extern unsigned long find_last_bit(const unsigned long *addr,
    				   unsigned long size);
    #endif
    
    #endif /* __KERNEL__ */
    #endif