Go to the documentation of this file.
112 #ifndef HEADER_BN_LCL_H
113 #define HEADER_BN_LCL_H
148 #define BN_window_bits_for_exponent_size(b) \
158 #define BN_window_bits_for_exponent_size(b) \
170 #define MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH ( 64 )
171 #define MOD_EXP_CTIME_MIN_CACHE_LINE_MASK (MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - 1)
185 #if MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH == 64
187 # define BN_window_bits_for_ctime_exponent_size(b) \
192 # define BN_MAX_WINDOW_BITS_FOR_CTIME_EXPONENT_SIZE (6)
194 #elif MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH == 32
196 # define BN_window_bits_for_ctime_exponent_size(b) \
200 # define BN_MAX_WINDOW_BITS_FOR_CTIME_EXPONENT_SIZE (5)
207 #define BN_MULL_SIZE_NORMAL (16)
208 #define BN_MUL_RECURSIVE_SIZE_NORMAL (16)
209 #define BN_SQR_RECURSIVE_SIZE_NORMAL (16)
210 #define BN_MUL_LOW_RECURSIVE_SIZE_NORMAL (32)
211 #define BN_MONT_CTX_SET_SIZE_WORD (64)
213 #if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC)
237 # if defined(__alpha) && (defined(SIXTY_FOUR_BIT_LONG) || defined(SIXTY_FOUR_BIT))
240 # define BN_UMULT_HIGH(a,b) (BN_ULONG)asm("umulh %a0,%a1,%v0",(a),(b))
241 # elif defined(__GNUC__) && __GNUC__>=2
242 # define BN_UMULT_HIGH(a,b) ({ \
243 register BN_ULONG ret; \
244 asm ("umulh %1,%2,%0" \
249 # elif defined(_ARCH_PPC) && defined(__64BIT__) && defined(SIXTY_FOUR_BIT_LONG)
250 # if defined(__GNUC__) && __GNUC__>=2
251 # define BN_UMULT_HIGH(a,b) ({ \
252 register BN_ULONG ret; \
253 asm ("mulhdu %0,%1,%2" \
258 # elif (defined(__x86_64) || defined(__x86_64__)) && \
259 (defined(SIXTY_FOUR_BIT_LONG) || defined(SIXTY_FOUR_BIT))
260 # if defined(__GNUC__) && __GNUC__>=2
261 # define BN_UMULT_HIGH(a,b) ({ \
262 register BN_ULONG ret,discard; \
264 : "=a"(discard),"=d"(ret) \
268 # define BN_UMULT_LOHI(low,high,a,b) \
270 : "=a"(low),"=d"(high) \
274 # elif (defined(_M_AMD64) || defined(_M_X64)) && defined(SIXTY_FOUR_BIT)
275 # if defined(_MSC_VER) && _MSC_VER>=1400
276 unsigned __int64 __umulh (
unsigned __int64
a,
unsigned __int64
b);
277 unsigned __int64 _umul128 (
unsigned __int64
a,
unsigned __int64
b,
278 unsigned __int64 *
h);
279 # pragma intrinsic(__umulh,_umul128)
280 # define BN_UMULT_HIGH(a,b) __umulh((a),(b))
281 # define BN_UMULT_LOHI(low,high,a,b) ((low)=_umul128((a),(b),&(high)))
283 # elif defined(__mips) && (defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG))
284 # if defined(__GNUC__) && __GNUC__>=2
285 # define BN_UMULT_HIGH(a,b) ({ \
286 register BN_ULONG ret; \
287 asm ("dmultu %1,%2" \
289 : "r"(a), "r"(b) : "l"); \
291 # define BN_UMULT_LOHI(low,high,a,b) \
292 asm ("dmultu %2,%3" \
293 : "=l"(low),"=h"(high) \
302 #define Lw(t) (((BN_ULONG)(t))&BN_MASK2)
303 #define Hw(t) (((BN_ULONG)((t)>>BN_BITS2))&BN_MASK2)
306 #define bn_clear_top2max(a) \
308 int ind = (a)->dmax - (a)->top; \
309 BN_ULONG *ftl = &(a)->d[(a)->top-1]; \
310 for (; ind != 0; ind--) \
314 #define bn_clear_top2max(a)
318 #define mul_add(r,a,w,c) { \
320 t=(BN_ULLONG)w * (a) + (r) + (c); \
325 #define mul(r,a,w,c) { \
327 t=(BN_ULLONG)w * (a) + (c); \
332 #define sqr(r0,r1,a) { \
334 t=(BN_ULLONG)(a)*(a); \
339 #elif defined(BN_UMULT_LOHI)
340 #define mul_add(r,a,w,c) { \
341 BN_ULONG high,low,ret,tmp=(a); \
343 BN_UMULT_LOHI(low,high,w,tmp); \
345 (c) = (ret<(c))?1:0; \
348 (c) += (ret<low)?1:0; \
352 #define mul(r,a,w,c) { \
353 BN_ULONG high,low,ret,ta=(a); \
354 BN_UMULT_LOHI(low,high,w,ta); \
357 (c) += (ret<low)?1:0; \
361 #define sqr(r0,r1,a) { \
363 BN_UMULT_LOHI(r0,r1,tmp,tmp); \
366 #elif defined(BN_UMULT_HIGH)
367 #define mul_add(r,a,w,c) { \
368 BN_ULONG high,low,ret,tmp=(a); \
370 high= BN_UMULT_HIGH(w,tmp); \
373 (c) = (ret<(c))?1:0; \
376 (c) += (ret<low)?1:0; \
380 #define mul(r,a,w,c) { \
381 BN_ULONG high,low,ret,ta=(a); \
383 high= BN_UMULT_HIGH(w,ta); \
386 (c) += (ret<low)?1:0; \
390 #define sqr(r0,r1,a) { \
393 (r1) = BN_UMULT_HIGH(tmp,tmp); \
401 #define LBITS(a) ((a)&BN_MASK2l)
402 #define HBITS(a) (((a)>>BN_BITS4)&BN_MASK2l)
403 #define L2HBITS(a) (((a)<<BN_BITS4)&BN_MASK2)
405 #define LLBITS(a) ((a)&BN_MASKl)
406 #define LHBITS(a) (((a)>>BN_BITS2)&BN_MASKl)
407 #define LL2HBITS(a) ((BN_ULLONG)((a)&BN_MASKl)<<BN_BITS2)
409 #define mul64(l,h,bl,bh) \
411 BN_ULONG m,m1,lt,ht; \
419 m=(m+m1)&BN_MASK2; if (m < m1) ht+=L2HBITS((BN_ULONG)1); \
422 lt=(lt+m1)&BN_MASK2; if (lt < m1) ht++; \
427 #define sqr64(lo,ho,in) \
437 h+=(m&BN_MASK2h1)>>(BN_BITS4-1); \
438 m =(m&BN_MASK2l)<<(BN_BITS4+1); \
439 l=(l+m)&BN_MASK2; if (l < m) h++; \
444 #define mul_add(r,a,bl,bh,c) { \
450 mul64(l,h,(bl),(bh)); \
453 l=(l+(c))&BN_MASK2; if (l < (c)) h++; \
455 l=(l+(c))&BN_MASK2; if (l < (c)) h++; \
460 #define mul(r,a,bl,bh,c) { \
466 mul64(l,h,(bl),(bh)); \
469 l+=(c); if ((l&BN_MASK2) < (c)) h++; \
475 #if defined(OPENSSL_DOING_MAKEDEPEND) && defined(OPENSSL_FIPS)
482 void bn_sqr_normal(BN_ULONG *r,
const BN_ULONG *
a,
int n, BN_ULONG *tmp);
489 int dna,
int dnb,BN_ULONG *
t);
491 int n,
int tna,
int tnb,BN_ULONG *
t);
496 void bn_mul_high(BN_ULONG *r,BN_ULONG *
a,BN_ULONG *
b,BN_ULONG *l,
int n2,
502 int bn_mul_mont(BN_ULONG *rp,
const BN_ULONG *ap,
const BN_ULONG *
bp,
const BN_ULONG *np,
const BN_ULONG *n0,
int num);