12 static inline u32 vfp_shiftright32jamming(
u32 val,
unsigned int shift)
16 val = val >> shift | ((val << (32 - shift)) != 0);
23 static inline u64 vfp_shiftright64jamming(
u64 val,
unsigned int shift)
27 val = val >> shift | ((val << (64 - shift)) != 0);
34 static inline u32 vfp_hi64to32jamming(
u64 val)
39 "cmp %Q1, #1 @ vfp_hi64to32jamming\n\t"
42 :
"=r" (
v) :
"r" (val) :
"cc");
49 asm(
"adds %Q0, %Q2, %Q4\n\t"
50 "adcs %R0, %R2, %R4\n\t"
51 "adcs %Q1, %Q3, %Q5\n\t"
53 :
"=r" (nl),
"=r" (nh)
54 :
"0" (nl),
"1" (nh),
"r" (ml),
"r" (mh)
62 asm(
"subs %Q0, %Q2, %Q4\n\t"
63 "sbcs %R0, %R2, %R4\n\t"
64 "sbcs %Q1, %Q3, %Q5\n\t"
65 "sbc %R1, %R3, %R5\n\t"
66 :
"=r" (nl),
"=r" (nh)
67 :
"0" (nl),
"1" (nh),
"r" (ml),
"r" (mh)
90 rh += ((
u64)(rma < rmb) << 32) + (rma >> 32);
100 static inline void shift64left(
u64 *resh,
u64 *resl,
u64 n)
106 static inline u64 vfp_hi64multiply64(
u64 n,
u64 m)
109 mul64to128(&rh, &rl, n, m);
110 return rh | (rl != 0);
113 static inline u64 vfp_estimate_div128to64(
u64 nh,
u64 nl,
u64 m)
115 u64 mh, ml, remh, reml, termh, terml, z;
120 if (mh << 32 <= nh) {
121 z = 0xffffffff00000000ULL;
127 mul64to128(&termh, &terml, m, z);
128 sub128(&remh, &reml, nh, nl, termh, terml);
130 while ((
s64)remh < 0) {
132 add128(&remh, &reml, remh, reml, mh, ml);
134 remh = (remh << 32) | (reml >> 32);
135 if (mh << 32 <= remh) {
147 #define vfp_sign_negate(sign) (sign ^ 0x8000)
167 #define VFP_SINGLE_MANTISSA_BITS (23)
168 #define VFP_SINGLE_EXPONENT_BITS (8)
169 #define VFP_SINGLE_LOW_BITS (32 - VFP_SINGLE_MANTISSA_BITS - 2)
170 #define VFP_SINGLE_LOW_BITS_MASK ((1 << VFP_SINGLE_LOW_BITS) - 1)
175 #define VFP_SINGLE_SIGNIFICAND_QNAN (1 << (VFP_SINGLE_MANTISSA_BITS - 1 + VFP_SINGLE_LOW_BITS))
180 #define vfp_single_packed_sign(v) ((v) & 0x80000000)
181 #define vfp_single_packed_negate(v) ((v) ^ 0x80000000)
182 #define vfp_single_packed_abs(v) ((v) & ~0x80000000)
183 #define vfp_single_packed_exponent(v) (((v) >> VFP_SINGLE_MANTISSA_BITS) & ((1 << VFP_SINGLE_EXPONENT_BITS) - 1))
184 #define vfp_single_packed_mantissa(v) ((v) & ((1 << VFP_SINGLE_MANTISSA_BITS) - 1))
191 static inline void vfp_single_unpack(
struct vfp_single *
s,
s32 val)
198 significand = (
u32) val;
201 significand |= 0x40000000;
212 val = (s->
sign << 16) +
218 #define VFP_NUMBER (1<<0)
219 #define VFP_ZERO (1<<1)
220 #define VFP_DENORMAL (1<<2)
221 #define VFP_INFINITY (1<<3)
222 #define VFP_NAN (1<<4)
223 #define VFP_NAN_SIGNAL (1<<5)
225 #define VFP_QNAN (VFP_NAN)
226 #define VFP_SNAN (VFP_NAN|VFP_NAN_SIGNAL)
228 static inline int vfp_single_type(
struct vfp_single *s)
248 #define vfp_single_normaliseround(sd,vsd,fpscr,except,func) __vfp_single_normaliseround(sd,vsd,fpscr,except)
269 #define VFP_REG_ZERO 32
271 #define VFP_REG_ZERO 16
276 #define VFP_DOUBLE_MANTISSA_BITS (52)
277 #define VFP_DOUBLE_EXPONENT_BITS (11)
278 #define VFP_DOUBLE_LOW_BITS (64 - VFP_DOUBLE_MANTISSA_BITS - 2)
279 #define VFP_DOUBLE_LOW_BITS_MASK ((1 << VFP_DOUBLE_LOW_BITS) - 1)
284 #define VFP_DOUBLE_SIGNIFICAND_QNAN (1ULL << (VFP_DOUBLE_MANTISSA_BITS - 1 + VFP_DOUBLE_LOW_BITS))
289 #define vfp_double_packed_sign(v) ((v) & (1ULL << 63))
290 #define vfp_double_packed_negate(v) ((v) ^ (1ULL << 63))
291 #define vfp_double_packed_abs(v) ((v) & ~(1ULL << 63))
292 #define vfp_double_packed_exponent(v) (((v) >> VFP_DOUBLE_MANTISSA_BITS) & ((1 << VFP_DOUBLE_EXPONENT_BITS) - 1))
293 #define vfp_double_packed_mantissa(v) ((v) & ((1ULL << VFP_DOUBLE_MANTISSA_BITS) - 1))
300 static inline void vfp_double_unpack(
struct vfp_double *s,
s64 val)
307 significand = (
u64) val;
310 significand |= (1ULL << 62);
327 static inline int vfp_double_type(
struct vfp_double *s)
353 #define VFP_NAN_FLAG 0x100
361 #define VFP_EXCEPTION_ERROR ((u32)-1 & ~VFP_NAN_FLAG)
370 #define OP_SCALAR (1 << 0)
371 #define OP_SD (1 << 1)
372 #define OP_DD (1 << 1)
373 #define OP_SM (1 << 2)