mirror of
				https://github.com/smaeul/u-boot.git
				synced 2025-10-22 16:48:14 +01:00 
			
		
		
		
	Sync with Linux commit ad0376eb1483b ("Merge tag 'edac_for_4.11_2'").
Signed-off-by: Peng Fan <peng.fan@nxp.com>
Cc: Tom Rini <trini@konsulko.com>
		
	
			
		
			
				
	
	
		
			258 lines
		
	
	
		
			5.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			258 lines
		
	
	
		
			5.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef _LINUX_MATH64_H
 | |
| #define _LINUX_MATH64_H
 | |
| 
 | |
| #include <div64.h>
 | |
| #include <linux/bitops.h>
 | |
| #include <linux/types.h>
 | |
| 
 | |
| #if BITS_PER_LONG == 64
 | |
| 
 | |
| #define div64_long(x, y) div64_s64((x), (y))
 | |
| #define div64_ul(x, y)   div64_u64((x), (y))
 | |
| 
 | |
| /**
 | |
|  * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
 | |
|  *
 | |
|  * This is commonly provided by 32bit archs to provide an optimized 64bit
 | |
|  * divide.
 | |
|  */
 | |
| static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
 | |
| {
 | |
| 	*remainder = dividend % divisor;
 | |
| 	return dividend / divisor;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
 | |
|  */
 | |
| static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
 | |
| {
 | |
| 	*remainder = dividend % divisor;
 | |
| 	return dividend / divisor;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
 | |
|  */
 | |
| static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
 | |
| {
 | |
| 	*remainder = dividend % divisor;
 | |
| 	return dividend / divisor;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * div64_u64 - unsigned 64bit divide with 64bit divisor
 | |
|  */
 | |
| static inline u64 div64_u64(u64 dividend, u64 divisor)
 | |
| {
 | |
| 	return dividend / divisor;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * div64_s64 - signed 64bit divide with 64bit divisor
 | |
|  */
 | |
| static inline s64 div64_s64(s64 dividend, s64 divisor)
 | |
| {
 | |
| 	return dividend / divisor;
 | |
| }
 | |
| 
 | |
| #elif BITS_PER_LONG == 32
 | |
| 
 | |
| #define div64_long(x, y) div_s64((x), (y))
 | |
| #define div64_ul(x, y)   div_u64((x), (y))
 | |
| 
 | |
| #ifndef div_u64_rem
 | |
| static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
 | |
| {
 | |
| 	*remainder = do_div(dividend, divisor);
 | |
| 	return dividend;
 | |
| }
 | |
| #endif
 | |
| 
 | |
| #ifndef div_s64_rem
 | |
| extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
 | |
| #endif
 | |
| 
 | |
| #ifndef div64_u64_rem
 | |
| extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
 | |
| #endif
 | |
| 
 | |
| #ifndef div64_u64
 | |
| extern u64 div64_u64(u64 dividend, u64 divisor);
 | |
| #endif
 | |
| 
 | |
| #ifndef div64_s64
 | |
| extern s64 div64_s64(s64 dividend, s64 divisor);
 | |
| #endif
 | |
| 
 | |
| #endif /* BITS_PER_LONG */
 | |
| 
 | |
| /**
 | |
|  * div_u64 - unsigned 64bit divide with 32bit divisor
 | |
|  *
 | |
|  * This is the most common 64bit divide and should be used if possible,
 | |
|  * as many 32bit archs can optimize this variant better than a full 64bit
 | |
|  * divide.
 | |
|  */
 | |
| #ifndef div_u64
 | |
| static inline u64 div_u64(u64 dividend, u32 divisor)
 | |
| {
 | |
| 	u32 remainder;
 | |
| 	return div_u64_rem(dividend, divisor, &remainder);
 | |
| }
 | |
| #endif
 | |
| 
 | |
| /**
 | |
|  * div_s64 - signed 64bit divide with 32bit divisor
 | |
|  */
 | |
| #ifndef div_s64
 | |
| static inline s64 div_s64(s64 dividend, s32 divisor)
 | |
| {
 | |
| 	s32 remainder;
 | |
| 	return div_s64_rem(dividend, divisor, &remainder);
 | |
| }
 | |
| #endif
 | |
| 
 | |
| u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
 | |
| 
 | |
| static __always_inline u32
 | |
| __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
 | |
| {
 | |
| 	u32 ret = 0;
 | |
| 
 | |
| 	while (dividend >= divisor) {
 | |
| 		/* The following asm() prevents the compiler from
 | |
| 		   optimising this loop into a modulo operation.  */
 | |
| 		asm("" : "+rm"(dividend));
 | |
| 
 | |
| 		dividend -= divisor;
 | |
| 		ret++;
 | |
| 	}
 | |
| 
 | |
| 	*remainder = dividend;
 | |
| 
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| #ifndef mul_u32_u32
 | |
| /*
 | |
|  * Many a GCC version messes this up and generates a 64x64 mult :-(
 | |
|  */
 | |
| static inline u64 mul_u32_u32(u32 a, u32 b)
 | |
| {
 | |
| 	return (u64)a * b;
 | |
| }
 | |
| #endif
 | |
| 
 | |
| #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
 | |
| 
 | |
| #ifndef mul_u64_u32_shr
 | |
| static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
 | |
| {
 | |
| 	return (u64)(((unsigned __int128)a * mul) >> shift);
 | |
| }
 | |
| #endif /* mul_u64_u32_shr */
 | |
| 
 | |
| #ifndef mul_u64_u64_shr
 | |
| static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
 | |
| {
 | |
| 	return (u64)(((unsigned __int128)a * mul) >> shift);
 | |
| }
 | |
| #endif /* mul_u64_u64_shr */
 | |
| 
 | |
| #else
 | |
| 
 | |
| #ifndef mul_u64_u32_shr
 | |
| static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
 | |
| {
 | |
| 	u32 ah, al;
 | |
| 	u64 ret;
 | |
| 
 | |
| 	al = a;
 | |
| 	ah = a >> 32;
 | |
| 
 | |
| 	ret = mul_u32_u32(al, mul) >> shift;
 | |
| 	if (ah)
 | |
| 		ret += mul_u32_u32(ah, mul) << (32 - shift);
 | |
| 
 | |
| 	return ret;
 | |
| }
 | |
| #endif /* mul_u64_u32_shr */
 | |
| 
 | |
| #ifndef mul_u64_u64_shr
 | |
| static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
 | |
| {
 | |
| 	union {
 | |
| 		u64 ll;
 | |
| 		struct {
 | |
| #ifdef __BIG_ENDIAN
 | |
| 			u32 high, low;
 | |
| #else
 | |
| 			u32 low, high;
 | |
| #endif
 | |
| 		} l;
 | |
| 	} rl, rm, rn, rh, a0, b0;
 | |
| 	u64 c;
 | |
| 
 | |
| 	a0.ll = a;
 | |
| 	b0.ll = b;
 | |
| 
 | |
| 	rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
 | |
| 	rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
 | |
| 	rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
 | |
| 	rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
 | |
| 
 | |
| 	/*
 | |
| 	 * Each of these lines computes a 64-bit intermediate result into "c",
 | |
| 	 * starting at bits 32-95.  The low 32-bits go into the result of the
 | |
| 	 * multiplication, the high 32-bits are carried into the next step.
 | |
| 	 */
 | |
| 	rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
 | |
| 	rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
 | |
| 	rh.l.high = (c >> 32) + rh.l.high;
 | |
| 
 | |
| 	/*
 | |
| 	 * The 128-bit result of the multiplication is in rl.ll and rh.ll,
 | |
| 	 * shift it right and throw away the high part of the result.
 | |
| 	 */
 | |
| 	if (shift == 0)
 | |
| 		return rl.ll;
 | |
| 	if (shift < 64)
 | |
| 		return (rl.ll >> shift) | (rh.ll << (64 - shift));
 | |
| 	return rh.ll >> (shift & 63);
 | |
| }
 | |
| #endif /* mul_u64_u64_shr */
 | |
| 
 | |
| #endif
 | |
| 
 | |
| #ifndef mul_u64_u32_div
 | |
| static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
 | |
| {
 | |
| 	union {
 | |
| 		u64 ll;
 | |
| 		struct {
 | |
| #ifdef __BIG_ENDIAN
 | |
| 			u32 high, low;
 | |
| #else
 | |
| 			u32 low, high;
 | |
| #endif
 | |
| 		} l;
 | |
| 	} u, rl, rh;
 | |
| 
 | |
| 	u.ll = a;
 | |
| 	rl.ll = mul_u32_u32(u.l.low, mul);
 | |
| 	rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
 | |
| 
 | |
| 	/* Bits 32-63 of the result will be in rh.l.low. */
 | |
| 	rl.l.high = do_div(rh.ll, divisor);
 | |
| 
 | |
| 	/* Bits 0-31 of the result will be in rl.l.low.	*/
 | |
| 	do_div(rl.ll, divisor);
 | |
| 
 | |
| 	rl.l.high = rh.l.low;
 | |
| 	return rl.ll;
 | |
| }
 | |
| #endif /* mul_u64_u32_div */
 | |
| 
 | |
| #endif /* _LINUX_MATH64_H */
 |