22 static inline void fp_denormalize(
struct fp_ext *
reg,
unsigned int cnt)
30 (reg->
mant.
m32[0] << (32 - cnt));
35 if (reg->
mant.
m32[1] << (40 - cnt))
42 asm volatile (
"bfextu %1{%2,#8},%0" :
"=d" (reg->
lowmant)
44 if (reg->
mant.
m32[1] << (40 - cnt))
64 static inline int fp_overnormalize(
struct fp_ext *reg)
69 asm (
"bfffo %1{#0,#32},%0" :
"=d" (shift) :
"dm" (reg->
mant.
m32[0]));
73 asm (
"bfffo %1{#0,#32},%0" :
"=d" (shift) :
"dm" (reg->
mant.
m32[1]));
87 asm volatile (
"add.b %1,%0" :
"=d,g" (dest->
lowmant)
89 asm volatile (
"addx.l %1,%0" :
"=d" (dest->
mant.
m32[1])
91 asm volatile (
"addx.l %1,%0" :
"=d" (dest->
mant.
m32[0])
93 asm volatile (
"addx.l %0,%0" :
"=d" (carry) :
"0" (0));
98 static inline int fp_addcarry(
struct fp_ext *reg)
100 if (++reg->
exp == 0x7fff) {
115 static inline void fp_submant(
struct fp_ext *dest,
struct fp_ext *src1,
119 asm volatile (
"sub.b %1,%0" :
"=d,g" (dest->
lowmant)
121 asm volatile (
"subx.l %1,%0" :
"=d" (dest->
mant.
m32[1])
123 asm volatile (
"subx.l %1,%0" :
"=d" (dest->
mant.
m32[0])
127 #define fp_mul64(desth, destl, src1, src2) ({ \
128 asm ("mulu.l %2,%1:%0" : "=d" (destl), "=d" (desth) \
129 : "dm" (src1), "0" (src2)); \
131 #define fp_div64(quot, rem, srch, srcl, div) \
132 asm ("divu.l %2,%1:%0" : "=d" (quot), "=d" (rem) \
133 : "dm" (div), "1" (srch), "0" (srcl))
134 #define fp_add64(dest1, dest2, src1, src2) ({ \
135 asm ("add.l %1,%0" : "=d,dm" (dest2) \
136 : "dm,d" (src2), "0,0" (dest2)); \
137 asm ("addx.l %1,%0" : "=d" (dest1) \
138 : "d" (src1), "0" (dest1)); \
140 #define fp_addx96(dest, src) ({ \
142 asm volatile ("add.l %1,%0" : "=d,g" (dest->m32[2]) \
143 : "g,d" (temp.m32[1]), "0,0" (dest->m32[2])); \
144 asm volatile ("addx.l %1,%0" : "=d" (dest->m32[1]) \
145 : "d" (temp.m32[0]), "0" (dest->m32[1])); \
146 asm volatile ("addx.l %1,%0" : "=d" (dest->m32[0]) \
147 : "d" (0), "0" (dest->m32[0])); \
149 #define fp_sub64(dest, src) ({ \
150 asm ("sub.l %1,%0" : "=d,dm" (dest.m32[1]) \
151 : "dm,d" (src.m32[1]), "0,0" (dest.m32[1])); \
152 asm ("subx.l %1,%0" : "=d" (dest.m32[0]) \
153 : "d" (src.m32[0]), "0" (dest.m32[0])); \
155 #define fp_sub96c(dest, srch, srcm, srcl) ({ \
157 asm ("sub.l %1,%0" : "=d,dm" (dest.m32[2]) \
158 : "dm,d" (srcl), "0,0" (dest.m32[2])); \
159 asm ("subx.l %1,%0" : "=d" (dest.m32[1]) \
160 : "d" (srcm), "0" (dest.m32[1])); \
161 asm ("subx.l %2,%1; scs %0" : "=d" (carry), "=d" (dest.m32[0]) \
162 : "d" (srch), "1" (dest.m32[0])); \
166 static inline void fp_multiplymant(
union fp_mant128 *dest,
struct fp_ext *src1,
181 static inline void fp_dividemant(
union fp_mant128 *dest,
struct fp_ext *src,
186 unsigned long *mantp = dest->
m32;
210 dummy = (dummy >> 1) | fix;
211 fp_div64(fix, dummy, fix, 0, dummy);
214 for (i = 0; i < 3; i++, mantp++) {
218 fp_mul64(*mantp, dummy, first, fix);
223 fp_mul64(*mantp, dummy, first, fix);
244 static inline void fp_putmant128(
struct fp_ext *dest,
union fp_mant128 *src,
253 if (src->
m32[3] || (src->
m32[2] << 8))
257 asm volatile (
"lsl.l #1,%0"
258 :
"=d" (
tmp) :
"0" (src->
m32[2]));
259 asm volatile (
"roxl.l #1,%0"
260 :
"=d" (dest->
mant.
m32[1]) :
"0" (src->
m32[1]));
261 asm volatile (
"roxl.l #1,%0"
262 :
"=d" (dest->
mant.
m32[0]) :
"0" (src->
m32[0]));
264 if (src->
m32[3] || (tmp << 8))
268 asm volatile (
"lsr.l #1,%1; roxr.l #1,%0"
270 :
"d" (src->
m32[0]),
"0" (src->
m32[1]));
271 asm volatile (
"roxr.l #1,%0"
272 :
"=d" (dest->
mant.
m32[1]) :
"0" (src->
m32[2]));
273 asm volatile (
"roxr.l #1,%0"
274 :
"=d" (
tmp) :
"0" (src->
m32[3]));
276 if (src->
m32[3] << 7)
283 if (src->
m32[3] << 8)