cryptlib  3.4.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Macros
sha2.c
Go to the documentation of this file.
1 /*
2  ---------------------------------------------------------------------------
3  Copyright (c) 2002, Dr Brian Gladman, Worcester, UK. All rights reserved.
4 
5  LICENSE TERMS
6 
7  The free distribution and use of this software in both source and binary
8  form is allowed (with or without changes) provided that:
9 
10  1. distributions of this source code include the above copyright
11  notice, this list of conditions and the following disclaimer;
12 
13  2. distributions in binary form include the above copyright
14  notice, this list of conditions and the following disclaimer
15  in the documentation and/or other associated materials;
16 
17  3. the copyright holder's name is not used to endorse products
18  built using this software without specific written permission.
19 
20  ALTERNATIVELY, provided that this notice is retained in full, this product
21  may be distributed under the terms of the GNU General Public License (GPL),
22  in which case the provisions of the GPL apply INSTEAD OF those given above.
23 
24  DISCLAIMER
25 
26  This software is provided 'as is' with no explicit or implied warranties
27  in respect of its properties, including, but not limited to, correctness
28  and/or fitness for purpose.
29  ---------------------------------------------------------------------------
30  Issue Date: 01/08/2005
31 
32  This is a byte oriented version of SHA2 that operates on arrays of bytes
33  stored in memory. This code implements sha256, sha384 and sha512 but the
34  latter two functions rely on efficient 64-bit integer operations that
35  may not be very efficient on 32-bit machines
36 
37  The sha256 functions use a type 'sha256_ctx' to hold details of the
38  current hash state and uses the following three calls:
39 
40  void sha256_begin(sha256_ctx ctx[1])
41  void sha256_hash(const unsigned char data[],
42  unsigned long len, sha256_ctx ctx[1])
43  void sha_end1(unsigned char hval[], sha256_ctx ctx[1])
44 
45  The first subroutine initialises a hash computation by setting up the
46  context in the sha256_ctx context. The second subroutine hashes 8-bit
47  bytes from array data[] into the hash state withinh sha256_ctx context,
48  the number of bytes to be hashed being given by the the unsigned long
49  integer len. The third subroutine completes the hash calculation and
50  places the resulting digest value in the array of 8-bit bytes hval[].
51 
52  The sha384 and sha512 functions are similar and use the interfaces:
53 
54  void sha384_begin(sha384_ctx ctx[1]);
55  void sha384_hash(const unsigned char data[],
56  unsigned long len, sha384_ctx ctx[1]);
57  void sha384_end(unsigned char hval[], sha384_ctx ctx[1]);
58 
59  void sha512_begin(sha512_ctx ctx[1]);
60  void sha512_hash(const unsigned char data[],
61  unsigned long len, sha512_ctx ctx[1]);
62  void sha512_end(unsigned char hval[], sha512_ctx ctx[1]);
63 
64  In addition there is a function sha2 that can be used to call all these
65  functions using a call with a hash length parameter as follows:
66 
67  int sha2_begin(unsigned long len, sha2_ctx ctx[1]);
68  void sha2_hash(const unsigned char data[],
69  unsigned long len, sha2_ctx ctx[1]);
70  void sha2_end(unsigned char hval[], sha2_ctx ctx[1]);
71 
72  My thanks to Erik Andersen <[email protected]> for testing this code
73  on big-endian systems and for his assistance with corrections
74 */
75 
76 #if 0
77 #define UNROLL_SHA2 /* for SHA2 loop unroll */
78 #endif
79 
80 #include <string.h> /* for memcpy() etc. */
81 
82 #if defined( INC_ALL )
83  #include "sha2.h"
84  #include "brg_endian.h"
85 #else
86  #include "crypt/sha2.h"
87  #include "crypt/brg_endian.h"
88 #endif
89 
90 #if defined(__cplusplus)
91 extern "C"
92 {
93 #endif
94 
95 #if defined( _MSC_VER ) && ( _MSC_VER > 800 )
96 #pragma intrinsic(memcpy)
97 #endif
98 
99 #if 0 && defined(_MSC_VER)
100 #define rotl32 _lrotl
101 #define rotr32 _lrotr
102 #else
103 #define rotl32(x,n) (((x) << n) | ((x) >> (32 - n)))
104 #define rotr32(x,n) (((x) >> n) | ((x) << (32 - n)))
105 #endif
106 
107 #if !defined(bswap_32)
108 #define bswap_32(x) ((rotr32((x), 24) & 0x00ff00ff) | (rotr32((x), 8) & 0xff00ff00))
109 #endif
110 
111 #if (PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN)
112 #define SWAP_BYTES
113 #else
114 #undef SWAP_BYTES
115 #endif
116 
117 #if 0
118 
119 #define ch(x,y,z) (((x) & (y)) ^ (~(x) & (z)))
120 #define maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
121 
122 #else /* Thanks to Rich Schroeppel and Colin Plumb for the following */
123 
124 #define ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
125 #define maj(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
126 
127 #endif
128 
129 /* round transforms for SHA256 and SHA512 compression functions */
130 
131 #define vf(n,i) v[(n - i) & 7]
132 
133 #define hf(i) (p[i & 15] += \
134  g_1(p[(i + 14) & 15]) + p[(i + 9) & 15] + g_0(p[(i + 1) & 15]))
135 
136 #define v_cycle(i,j) \
137  vf(7,i) += (j ? hf(i) : p[i]) + k_0[i+j] \
138  + s_1(vf(4,i)) + ch(vf(4,i),vf(5,i),vf(6,i)); \
139  vf(3,i) += vf(7,i); \
140  vf(7,i) += s_0(vf(0,i))+ maj(vf(0,i),vf(1,i),vf(2,i))
141 
142 #if defined(SHA_224) || defined(SHA_256)
143 
144 #define SHA256_MASK (SHA256_BLOCK_SIZE - 1)
145 
146 #if defined(SWAP_BYTES)
147 #define bsw_32(p,n) \
148  { int _i = (n); while(_i--) ((uint_32t*)p)[_i] = bswap_32(((uint_32t*)p)[_i]); }
149 #else
150 #define bsw_32(p,n)
151 #endif
152 
153 #define s_0(x) (rotr32((x), 2) ^ rotr32((x), 13) ^ rotr32((x), 22))
154 #define s_1(x) (rotr32((x), 6) ^ rotr32((x), 11) ^ rotr32((x), 25))
155 #define g_0(x) (rotr32((x), 7) ^ rotr32((x), 18) ^ ((x) >> 3))
156 #define g_1(x) (rotr32((x), 17) ^ rotr32((x), 19) ^ ((x) >> 10))
157 #define k_0 k256
158 
159 /* rotated SHA256 round definition. Rather than swapping variables as in */
160 /* FIPS-180, different variables are 'rotated' on each round, returning */
161 /* to their starting positions every eight rounds */
162 
163 #define q(n) v##n
164 
165 #define one_cycle(a,b,c,d,e,f,g,h,k,w) \
166  q(h) += s_1(q(e)) + ch(q(e), q(f), q(g)) + k + w; \
167  q(d) += q(h); q(h) += s_0(q(a)) + maj(q(a), q(b), q(c))
168 
169 /* SHA256 mixing data */
170 
171 const uint_32t k256[64] =
172 { 0x428a2f98ul, 0x71374491ul, 0xb5c0fbcful, 0xe9b5dba5ul,
173  0x3956c25bul, 0x59f111f1ul, 0x923f82a4ul, 0xab1c5ed5ul,
174  0xd807aa98ul, 0x12835b01ul, 0x243185beul, 0x550c7dc3ul,
175  0x72be5d74ul, 0x80deb1feul, 0x9bdc06a7ul, 0xc19bf174ul,
176  0xe49b69c1ul, 0xefbe4786ul, 0x0fc19dc6ul, 0x240ca1ccul,
177  0x2de92c6ful, 0x4a7484aaul, 0x5cb0a9dcul, 0x76f988daul,
178  0x983e5152ul, 0xa831c66dul, 0xb00327c8ul, 0xbf597fc7ul,
179  0xc6e00bf3ul, 0xd5a79147ul, 0x06ca6351ul, 0x14292967ul,
180  0x27b70a85ul, 0x2e1b2138ul, 0x4d2c6dfcul, 0x53380d13ul,
181  0x650a7354ul, 0x766a0abbul, 0x81c2c92eul, 0x92722c85ul,
182  0xa2bfe8a1ul, 0xa81a664bul, 0xc24b8b70ul, 0xc76c51a3ul,
183  0xd192e819ul, 0xd6990624ul, 0xf40e3585ul, 0x106aa070ul,
184  0x19a4c116ul, 0x1e376c08ul, 0x2748774cul, 0x34b0bcb5ul,
185  0x391c0cb3ul, 0x4ed8aa4aul, 0x5b9cca4ful, 0x682e6ff3ul,
186  0x748f82eeul, 0x78a5636ful, 0x84c87814ul, 0x8cc70208ul,
187  0x90befffaul, 0xa4506cebul, 0xbef9a3f7ul, 0xc67178f2ul,
188 };
189 
190 /* Compile 64 bytes of hash data into SHA256 digest value */
191 /* NOTE: this routine assumes that the byte order in the */
192 /* ctx->wbuf[] at this point is such that low address bytes */
193 /* in the ORIGINAL byte stream will go into the high end of */
194 /* words on BOTH big and little endian systems */
195 
197 {
198 #if !defined(UNROLL_SHA2)
199 
200  uint_32t j, *p = ctx->wbuf, v[8];
201 
202  memcpy(v, ctx->hash, 8 * sizeof(uint_32t));
203 
204  for(j = 0; j < 64; j += 16)
205  {
206  v_cycle( 0, j); v_cycle( 1, j);
207  v_cycle( 2, j); v_cycle( 3, j);
208  v_cycle( 4, j); v_cycle( 5, j);
209  v_cycle( 6, j); v_cycle( 7, j);
210  v_cycle( 8, j); v_cycle( 9, j);
211  v_cycle(10, j); v_cycle(11, j);
212  v_cycle(12, j); v_cycle(13, j);
213  v_cycle(14, j); v_cycle(15, j);
214  }
215 
216  ctx->hash[0] += v[0]; ctx->hash[1] += v[1];
217  ctx->hash[2] += v[2]; ctx->hash[3] += v[3];
218  ctx->hash[4] += v[4]; ctx->hash[5] += v[5];
219  ctx->hash[6] += v[6]; ctx->hash[7] += v[7];
220 
221 #else
222 
223  uint_32t *p = ctx->wbuf,v0,v1,v2,v3,v4,v5,v6,v7;
224 
225  v0 = ctx->hash[0]; v1 = ctx->hash[1];
226  v2 = ctx->hash[2]; v3 = ctx->hash[3];
227  v4 = ctx->hash[4]; v5 = ctx->hash[5];
228  v6 = ctx->hash[6]; v7 = ctx->hash[7];
229 
230  one_cycle(0,1,2,3,4,5,6,7,k256[ 0],p[ 0]);
231  one_cycle(7,0,1,2,3,4,5,6,k256[ 1],p[ 1]);
232  one_cycle(6,7,0,1,2,3,4,5,k256[ 2],p[ 2]);
233  one_cycle(5,6,7,0,1,2,3,4,k256[ 3],p[ 3]);
234  one_cycle(4,5,6,7,0,1,2,3,k256[ 4],p[ 4]);
235  one_cycle(3,4,5,6,7,0,1,2,k256[ 5],p[ 5]);
236  one_cycle(2,3,4,5,6,7,0,1,k256[ 6],p[ 6]);
237  one_cycle(1,2,3,4,5,6,7,0,k256[ 7],p[ 7]);
238  one_cycle(0,1,2,3,4,5,6,7,k256[ 8],p[ 8]);
239  one_cycle(7,0,1,2,3,4,5,6,k256[ 9],p[ 9]);
240  one_cycle(6,7,0,1,2,3,4,5,k256[10],p[10]);
241  one_cycle(5,6,7,0,1,2,3,4,k256[11],p[11]);
242  one_cycle(4,5,6,7,0,1,2,3,k256[12],p[12]);
243  one_cycle(3,4,5,6,7,0,1,2,k256[13],p[13]);
244  one_cycle(2,3,4,5,6,7,0,1,k256[14],p[14]);
245  one_cycle(1,2,3,4,5,6,7,0,k256[15],p[15]);
246 
247  one_cycle(0,1,2,3,4,5,6,7,k256[16],hf( 0));
248  one_cycle(7,0,1,2,3,4,5,6,k256[17],hf( 1));
249  one_cycle(6,7,0,1,2,3,4,5,k256[18],hf( 2));
250  one_cycle(5,6,7,0,1,2,3,4,k256[19],hf( 3));
251  one_cycle(4,5,6,7,0,1,2,3,k256[20],hf( 4));
252  one_cycle(3,4,5,6,7,0,1,2,k256[21],hf( 5));
253  one_cycle(2,3,4,5,6,7,0,1,k256[22],hf( 6));
254  one_cycle(1,2,3,4,5,6,7,0,k256[23],hf( 7));
255  one_cycle(0,1,2,3,4,5,6,7,k256[24],hf( 8));
256  one_cycle(7,0,1,2,3,4,5,6,k256[25],hf( 9));
257  one_cycle(6,7,0,1,2,3,4,5,k256[26],hf(10));
258  one_cycle(5,6,7,0,1,2,3,4,k256[27],hf(11));
259  one_cycle(4,5,6,7,0,1,2,3,k256[28],hf(12));
260  one_cycle(3,4,5,6,7,0,1,2,k256[29],hf(13));
261  one_cycle(2,3,4,5,6,7,0,1,k256[30],hf(14));
262  one_cycle(1,2,3,4,5,6,7,0,k256[31],hf(15));
263 
264  one_cycle(0,1,2,3,4,5,6,7,k256[32],hf( 0));
265  one_cycle(7,0,1,2,3,4,5,6,k256[33],hf( 1));
266  one_cycle(6,7,0,1,2,3,4,5,k256[34],hf( 2));
267  one_cycle(5,6,7,0,1,2,3,4,k256[35],hf( 3));
268  one_cycle(4,5,6,7,0,1,2,3,k256[36],hf( 4));
269  one_cycle(3,4,5,6,7,0,1,2,k256[37],hf( 5));
270  one_cycle(2,3,4,5,6,7,0,1,k256[38],hf( 6));
271  one_cycle(1,2,3,4,5,6,7,0,k256[39],hf( 7));
272  one_cycle(0,1,2,3,4,5,6,7,k256[40],hf( 8));
273  one_cycle(7,0,1,2,3,4,5,6,k256[41],hf( 9));
274  one_cycle(6,7,0,1,2,3,4,5,k256[42],hf(10));
275  one_cycle(5,6,7,0,1,2,3,4,k256[43],hf(11));
276  one_cycle(4,5,6,7,0,1,2,3,k256[44],hf(12));
277  one_cycle(3,4,5,6,7,0,1,2,k256[45],hf(13));
278  one_cycle(2,3,4,5,6,7,0,1,k256[46],hf(14));
279  one_cycle(1,2,3,4,5,6,7,0,k256[47],hf(15));
280 
281  one_cycle(0,1,2,3,4,5,6,7,k256[48],hf( 0));
282  one_cycle(7,0,1,2,3,4,5,6,k256[49],hf( 1));
283  one_cycle(6,7,0,1,2,3,4,5,k256[50],hf( 2));
284  one_cycle(5,6,7,0,1,2,3,4,k256[51],hf( 3));
285  one_cycle(4,5,6,7,0,1,2,3,k256[52],hf( 4));
286  one_cycle(3,4,5,6,7,0,1,2,k256[53],hf( 5));
287  one_cycle(2,3,4,5,6,7,0,1,k256[54],hf( 6));
288  one_cycle(1,2,3,4,5,6,7,0,k256[55],hf( 7));
289  one_cycle(0,1,2,3,4,5,6,7,k256[56],hf( 8));
290  one_cycle(7,0,1,2,3,4,5,6,k256[57],hf( 9));
291  one_cycle(6,7,0,1,2,3,4,5,k256[58],hf(10));
292  one_cycle(5,6,7,0,1,2,3,4,k256[59],hf(11));
293  one_cycle(4,5,6,7,0,1,2,3,k256[60],hf(12));
294  one_cycle(3,4,5,6,7,0,1,2,k256[61],hf(13));
295  one_cycle(2,3,4,5,6,7,0,1,k256[62],hf(14));
296  one_cycle(1,2,3,4,5,6,7,0,k256[63],hf(15));
297 
298  ctx->hash[0] += v0; ctx->hash[1] += v1;
299  ctx->hash[2] += v2; ctx->hash[3] += v3;
300  ctx->hash[4] += v4; ctx->hash[5] += v5;
301  ctx->hash[6] += v6; ctx->hash[7] += v7;
302 #endif
303 }
304 
305 /* SHA256 hash data in an array of bytes into hash buffer */
306 /* and call the hash_compile function as required. */
307 
308 VOID_RETURN sha256_hash(const unsigned char data[], unsigned long len, sha256_ctx ctx[1])
309 { uint_32t pos = (uint_32t)(ctx->count[0] & SHA256_MASK),
310  space = SHA256_BLOCK_SIZE - pos;
311  const unsigned char *sp = data;
312 
313  if((ctx->count[0] += len) < len)
314  ++(ctx->count[1]);
315 
316  while(len >= space) /* tranfer whole blocks while possible */
317  {
318  memcpy(((unsigned char*)ctx->wbuf) + pos, sp, space);
319  sp += space; len -= space; space = SHA256_BLOCK_SIZE; pos = 0;
320  bsw_32(ctx->wbuf, SHA256_BLOCK_SIZE >> 2)
321  sha256_compile(ctx);
322  }
323 
324  memcpy(((unsigned char*)ctx->wbuf) + pos, sp, len);
325 }
326 
327 /* SHA256 Final padding and digest calculation */
328 
329 static void sha_end1(unsigned char hval[], sha256_ctx ctx[1], const unsigned int hlen)
330 { uint_32t i = (uint_32t)(ctx->count[0] & SHA256_MASK);
331 
332  /* put bytes in the buffer in an order in which references to */
333  /* 32-bit words will put bytes with lower addresses into the */
334  /* top of 32 bit words on BOTH big and little endian machines */
335  bsw_32(ctx->wbuf, (i + 3) >> 2)
336 
337  /* we now need to mask valid bytes and add the padding which is */
338  /* a single 1 bit and as many zero bits as necessary. Note that */
339  /* we can always add the first padding byte here because the */
340  /* buffer always has at least one empty slot */
341  ctx->wbuf[i >> 2] &= 0xffffff80 << 8 * (~i & 3);
342  ctx->wbuf[i >> 2] |= 0x00000080 << 8 * (~i & 3);
343 
344  /* we need 9 or more empty positions, one for the padding byte */
345  /* (above) and eight for the length count. If there is not */
346  /* enough space pad and empty the buffer */
347  if(i > SHA256_BLOCK_SIZE - 9)
348  {
349  if(i < 60) ctx->wbuf[15] = 0;
350  sha256_compile(ctx);
351  i = 0;
352  }
353  else /* compute a word index for the empty buffer positions */
354  i = (i >> 2) + 1;
355 
356  while(i < 14) /* and zero pad all but last two positions */
357  ctx->wbuf[i++] = 0;
358 
359  /* the following 32-bit length fields are assembled in the */
360  /* wrong byte order on little endian machines but this is */
361  /* corrected later since they are only ever used as 32-bit */
362  /* word values. */
363  ctx->wbuf[14] = (ctx->count[1] << 3) | (ctx->count[0] >> 29);
364  ctx->wbuf[15] = ctx->count[0] << 3;
365  sha256_compile(ctx);
366 
367  /* extract the hash value as bytes in case the hash buffer is */
368  /* mislaigned for 32-bit words */
369  for(i = 0; i < hlen; ++i)
370  hval[i] = (uint_8t)((ctx->hash[i >> 2] >> (8 * (~i & 3))) & 0xff);
371 }
372 
373 #endif
374 
375 #if defined(SHA_224)
376 
377 const uint_32t i224[8] =
378 {
379  0xc1059ed8ul, 0x367cd507ul, 0x3070dd17ul, 0xf70e5939ul,
380  0xffc00b31ul, 0x68581511ul, 0x64f98fa7ul, 0xbefa4fa4ul
381 };
382 
384 {
385  ctx->count[0] = ctx->count[1] = 0;
386  memcpy(ctx->hash, i224, 8 * sizeof(uint_32t));
387 }
388 
389 VOID_RETURN sha224_end(unsigned char hval[], sha224_ctx ctx[1])
390 {
391  sha_end1(hval, ctx, SHA224_DIGEST_SIZE);
392 }
393 
394 VOID_RETURN sha224(unsigned char hval[], const unsigned char data[], unsigned long len)
395 { sha224_ctx cx[1];
396 
397  sha224_begin(cx);
398  sha224_hash(data, len, cx);
399  sha_end1(hval, cx, SHA224_DIGEST_SIZE);
400 }
401 
402 #endif
403 
404 #if defined(SHA_256)
405 
406 const uint_32t i256[8] =
407 {
408  0x6a09e667ul, 0xbb67ae85ul, 0x3c6ef372ul, 0xa54ff53aul,
409  0x510e527ful, 0x9b05688cul, 0x1f83d9abul, 0x5be0cd19ul
410 };
411 
413 {
414  ctx->count[0] = ctx->count[1] = 0;
415  memcpy(ctx->hash, i256, 8 * sizeof(uint_32t));
416 }
417 
418 VOID_RETURN sha256_end(unsigned char hval[], sha256_ctx ctx[1])
419 {
420  sha_end1(hval, ctx, SHA256_DIGEST_SIZE);
421 }
422 
423 VOID_RETURN sha256(unsigned char hval[], const unsigned char data[], unsigned long len)
424 { sha256_ctx cx[1];
425 
426  sha256_begin(cx);
427  sha256_hash(data, len, cx);
428  sha_end1(hval, cx, SHA256_DIGEST_SIZE);
429 }
430 
431 #endif
432 
433 #if defined(SHA_384) || defined(SHA_512)
434 
435 #define SHA512_MASK (SHA512_BLOCK_SIZE - 1)
436 
437 #define rotr64(x,n) (((x) >> n) | ((x) << (64 - n)))
438 
439 #if !defined(bswap_64)
440 #define bswap_64(x) (((uint_64t)(bswap_32((uint_32t)(x)))) << 32 | bswap_32((uint_32t)((x) >> 32)))
441 #endif
442 
443 #if defined(SWAP_BYTES)
444 #define bsw_64(p,n) \
445  { int _i = (n); while(_i--) ((uint_64t*)p)[_i] = bswap_64(((uint_64t*)p)[_i]); }
446 #else
447 #define bsw_64(p,n)
448 #endif
449 
450 /* SHA512 mixing function definitions */
451 
452 #ifdef s_0
453 # undef s_0
454 # undef s_1
455 # undef g_0
456 # undef g_1
457 # undef k_0
458 #endif
459 
460 #define s_0(x) (rotr64((x), 28) ^ rotr64((x), 34) ^ rotr64((x), 39))
461 #define s_1(x) (rotr64((x), 14) ^ rotr64((x), 18) ^ rotr64((x), 41))
462 #define g_0(x) (rotr64((x), 1) ^ rotr64((x), 8) ^ ((x) >> 7))
463 #define g_1(x) (rotr64((x), 19) ^ rotr64((x), 61) ^ ((x) >> 6))
464 #define k_0 k512
465 
466 /* SHA384/SHA512 mixing data */
467 
468 const uint_64t k512[80] =
469 {
470  li_64(428a2f98d728ae22), li_64(7137449123ef65cd),
471  li_64(b5c0fbcfec4d3b2f), li_64(e9b5dba58189dbbc),
472  li_64(3956c25bf348b538), li_64(59f111f1b605d019),
473  li_64(923f82a4af194f9b), li_64(ab1c5ed5da6d8118),
474  li_64(d807aa98a3030242), li_64(12835b0145706fbe),
475  li_64(243185be4ee4b28c), li_64(550c7dc3d5ffb4e2),
476  li_64(72be5d74f27b896f), li_64(80deb1fe3b1696b1),
477  li_64(9bdc06a725c71235), li_64(c19bf174cf692694),
478  li_64(e49b69c19ef14ad2), li_64(efbe4786384f25e3),
479  li_64(0fc19dc68b8cd5b5), li_64(240ca1cc77ac9c65),
480  li_64(2de92c6f592b0275), li_64(4a7484aa6ea6e483),
481  li_64(5cb0a9dcbd41fbd4), li_64(76f988da831153b5),
482  li_64(983e5152ee66dfab), li_64(a831c66d2db43210),
483  li_64(b00327c898fb213f), li_64(bf597fc7beef0ee4),
484  li_64(c6e00bf33da88fc2), li_64(d5a79147930aa725),
485  li_64(06ca6351e003826f), li_64(142929670a0e6e70),
486  li_64(27b70a8546d22ffc), li_64(2e1b21385c26c926),
487  li_64(4d2c6dfc5ac42aed), li_64(53380d139d95b3df),
488  li_64(650a73548baf63de), li_64(766a0abb3c77b2a8),
489  li_64(81c2c92e47edaee6), li_64(92722c851482353b),
490  li_64(a2bfe8a14cf10364), li_64(a81a664bbc423001),
491  li_64(c24b8b70d0f89791), li_64(c76c51a30654be30),
492  li_64(d192e819d6ef5218), li_64(d69906245565a910),
493  li_64(f40e35855771202a), li_64(106aa07032bbd1b8),
494  li_64(19a4c116b8d2d0c8), li_64(1e376c085141ab53),
495  li_64(2748774cdf8eeb99), li_64(34b0bcb5e19b48a8),
496  li_64(391c0cb3c5c95a63), li_64(4ed8aa4ae3418acb),
497  li_64(5b9cca4f7763e373), li_64(682e6ff3d6b2b8a3),
498  li_64(748f82ee5defb2fc), li_64(78a5636f43172f60),
499  li_64(84c87814a1f0ab72), li_64(8cc702081a6439ec),
500  li_64(90befffa23631e28), li_64(a4506cebde82bde9),
501  li_64(bef9a3f7b2c67915), li_64(c67178f2e372532b),
502  li_64(ca273eceea26619c), li_64(d186b8c721c0c207),
503  li_64(eada7dd6cde0eb1e), li_64(f57d4f7fee6ed178),
504  li_64(06f067aa72176fba), li_64(0a637dc5a2c898a6),
505  li_64(113f9804bef90dae), li_64(1b710b35131c471b),
506  li_64(28db77f523047d84), li_64(32caab7b40c72493),
507  li_64(3c9ebe0a15c9bebc), li_64(431d67c49c100d4c),
508  li_64(4cc5d4becb3e42b6), li_64(597f299cfc657e2a),
509  li_64(5fcb6fab3ad6faec), li_64(6c44198c4a475817)
510 };
511 
512 /* Compile 128 bytes of hash data into SHA384/512 digest */
513 /* NOTE: this routine assumes that the byte order in the */
514 /* ctx->wbuf[] at this point is such that low address bytes */
515 /* in the ORIGINAL byte stream will go into the high end of */
516 /* words on BOTH big and little endian systems */
517 
518 VOID_RETURN sha512_compile(sha512_ctx ctx[1])
519 { uint_64t v[8], *p = ctx->wbuf;
520  uint_32t j;
521 
522  memcpy(v, ctx->hash, 8 * sizeof(uint_64t));
523 
524  for(j = 0; j < 80; j += 16)
525  {
526  v_cycle( 0, j); v_cycle( 1, j);
527  v_cycle( 2, j); v_cycle( 3, j);
528  v_cycle( 4, j); v_cycle( 5, j);
529  v_cycle( 6, j); v_cycle( 7, j);
530  v_cycle( 8, j); v_cycle( 9, j);
531  v_cycle(10, j); v_cycle(11, j);
532  v_cycle(12, j); v_cycle(13, j);
533  v_cycle(14, j); v_cycle(15, j);
534  }
535 
536  ctx->hash[0] += v[0]; ctx->hash[1] += v[1];
537  ctx->hash[2] += v[2]; ctx->hash[3] += v[3];
538  ctx->hash[4] += v[4]; ctx->hash[5] += v[5];
539  ctx->hash[6] += v[6]; ctx->hash[7] += v[7];
540 }
541 
542 /* Compile 128 bytes of hash data into SHA256 digest value */
543 /* NOTE: this routine assumes that the byte order in the */
544 /* ctx->wbuf[] at this point is in such an order that low */
545 /* address bytes in the ORIGINAL byte stream placed in this */
546 /* buffer will now go to the high end of words on BOTH big */
547 /* and little endian systems */
548 
549 VOID_RETURN sha512_hash(const unsigned char data[], unsigned long len, sha512_ctx ctx[1])
550 { uint_32t pos = (uint_32t)(ctx->count[0] & SHA512_MASK),
551  space = SHA512_BLOCK_SIZE - pos;
552  const unsigned char *sp = data;
553 
554  if((ctx->count[0] += len) < len)
555  ++(ctx->count[1]);
556 
557  while(len >= space) /* tranfer whole blocks while possible */
558  {
559  memcpy(((unsigned char*)ctx->wbuf) + pos, sp, space);
560  sp += space; len -= space; space = SHA512_BLOCK_SIZE; pos = 0;
561  bsw_64(ctx->wbuf, SHA512_BLOCK_SIZE >> 3);
562  sha512_compile(ctx);
563  }
564 
565  memcpy(((unsigned char*)ctx->wbuf) + pos, sp, len);
566 }
567 
568 /* SHA384/512 Final padding and digest calculation */
569 
570 static void sha_end2(unsigned char hval[], sha512_ctx ctx[1], const unsigned int hlen)
571 { uint_32t i = (uint_32t)(ctx->count[0] & SHA512_MASK);
572 
573  /* put bytes in the buffer in an order in which references to */
574  /* 32-bit words will put bytes with lower addresses into the */
575  /* top of 32 bit words on BOTH big and little endian machines */
576  bsw_64(ctx->wbuf, (i + 7) >> 3);
577 
578  /* we now need to mask valid bytes and add the padding which is */
579  /* a single 1 bit and as many zero bits as necessary. Note that */
580  /* we can always add the first padding byte here because the */
581  /* buffer always has at least one empty slot */
582  ctx->wbuf[i >> 3] &= li_64(ffffffffffffff00) << 8 * (~i & 7);
583  ctx->wbuf[i >> 3] |= li_64(0000000000000080) << 8 * (~i & 7);
584 
585  /* we need 17 or more empty byte positions, one for the padding */
586  /* byte (above) and sixteen for the length count. If there is */
587  /* not enough space pad and empty the buffer */
588  if(i > SHA512_BLOCK_SIZE - 17)
589  {
590  if(i < 120) ctx->wbuf[15] = 0;
591  sha512_compile(ctx);
592  i = 0;
593  }
594  else
595  i = (i >> 3) + 1;
596 
597  while(i < 14)
598  ctx->wbuf[i++] = 0;
599 
600  /* the following 64-bit length fields are assembled in the */
601  /* wrong byte order on little endian machines but this is */
602  /* corrected later since they are only ever used as 64-bit */
603  /* word values. */
604  ctx->wbuf[14] = (ctx->count[1] << 3) | (ctx->count[0] >> 61);
605  ctx->wbuf[15] = ctx->count[0] << 3;
606  sha512_compile(ctx);
607 
608  /* extract the hash value as bytes in case the hash buffer is */
609  /* misaligned for 32-bit words */
610  for(i = 0; i < hlen; ++i)
611  hval[i] = (unsigned char)(ctx->hash[i >> 3] >> (8 * (~i & 7)));
612 }
613 
614 #endif
615 
616 #if defined(SHA_384)
617 
618 /* SHA384 initialisation data */
619 
620 const uint_64t i384[80] =
621 {
622  li_64(cbbb9d5dc1059ed8), li_64(629a292a367cd507),
623  li_64(9159015a3070dd17), li_64(152fecd8f70e5939),
624  li_64(67332667ffc00b31), li_64(8eb44a8768581511),
625  li_64(db0c2e0d64f98fa7), li_64(47b5481dbefa4fa4)
626 };
627 
628 VOID_RETURN sha384_begin(sha384_ctx ctx[1])
629 {
630  ctx->count[0] = ctx->count[1] = 0;
631  memcpy(ctx->hash, i384, 8 * sizeof(uint_64t));
632 }
633 
634 VOID_RETURN sha384_end(unsigned char hval[], sha384_ctx ctx[1])
635 {
636  sha_end2(hval, ctx, SHA384_DIGEST_SIZE);
637 }
638 
639 VOID_RETURN sha384(unsigned char hval[], const unsigned char data[], unsigned long len)
640 { sha384_ctx cx[1];
641 
642  sha384_begin(cx);
643  sha384_hash(data, len, cx);
644  sha_end2(hval, cx, SHA384_DIGEST_SIZE);
645 }
646 
647 #endif
648 
649 #if defined(SHA_512)
650 
651 /* SHA512 initialisation data */
652 
653 const uint_64t i512[80] =
654 {
655  li_64(6a09e667f3bcc908), li_64(bb67ae8584caa73b),
656  li_64(3c6ef372fe94f82b), li_64(a54ff53a5f1d36f1),
657  li_64(510e527fade682d1), li_64(9b05688c2b3e6c1f),
658  li_64(1f83d9abfb41bd6b), li_64(5be0cd19137e2179)
659 };
660 
661 VOID_RETURN sha512_begin(sha512_ctx ctx[1])
662 {
663  ctx->count[0] = ctx->count[1] = 0;
664  memcpy(ctx->hash, i512, 8 * sizeof(uint_64t));
665 }
666 
667 VOID_RETURN sha512_end(unsigned char hval[], sha512_ctx ctx[1])
668 {
669  sha_end2(hval, ctx, SHA512_DIGEST_SIZE);
670 }
671 
672 VOID_RETURN sha512(unsigned char hval[], const unsigned char data[], unsigned long len)
673 { sha512_ctx cx[1];
674 
675  sha512_begin(cx);
676  sha512_hash(data, len, cx);
677  sha_end2(hval, cx, SHA512_DIGEST_SIZE);
678 }
679 
680 #endif
681 
682 #if defined(SHA_2)
683 
684 #define CTX_224(x) ((x)->uu->ctx256)
685 #define CTX_256(x) ((x)->uu->ctx256)
686 #define CTX_384(x) ((x)->uu->ctx512)
687 #define CTX_512(x) ((x)->uu->ctx512)
688 
689 /* SHA2 initialisation */
690 
691 INT_RETURN sha2_begin(unsigned long len, sha2_ctx ctx[1])
692 {
693  switch(len)
694  {
695 #if defined(SHA_224)
696  case 224:
697  case 28: CTX_256(ctx)->count[0] = CTX_256(ctx)->count[1] = 0;
698  memcpy(CTX_256(ctx)->hash, i224, 32);
699  ctx->sha2_len = 28; return EXIT_SUCCESS;
700 #endif
701 #if defined(SHA_256)
702  case 256:
703  case 32: CTX_256(ctx)->count[0] = CTX_256(ctx)->count[1] = 0;
704  memcpy(CTX_256(ctx)->hash, i256, 32);
705  ctx->sha2_len = 32; return EXIT_SUCCESS;
706 #endif
707 #if defined(SHA_384)
708  case 384:
709  case 48: CTX_384(ctx)->count[0] = CTX_384(ctx)->count[1] = 0;
710  memcpy(CTX_384(ctx)->hash, i384, 64);
711  ctx->sha2_len = 48; return EXIT_SUCCESS;
712 #endif
713 #if defined(SHA_512)
714  case 512:
715  case 64: CTX_512(ctx)->count[0] = CTX_512(ctx)->count[1] = 0;
716  memcpy(CTX_512(ctx)->hash, i512, 64);
717  ctx->sha2_len = 64; return EXIT_SUCCESS;
718 #endif
719  default: return EXIT_FAILURE;
720  }
721 }
722 
723 VOID_RETURN sha2_hash(const unsigned char data[], unsigned long len, sha2_ctx ctx[1])
724 {
725  switch(ctx->sha2_len)
726  {
727 #if defined(SHA_224)
728  case 28: sha224_hash(data, len, CTX_224(ctx)); return;
729 #endif
730 #if defined(SHA_256)
731  case 32: sha256_hash(data, len, CTX_256(ctx)); return;
732 #endif
733 #if defined(SHA_384)
734  case 48: sha384_hash(data, len, CTX_384(ctx)); return;
735 #endif
736 #if defined(SHA_512)
737  case 64: sha512_hash(data, len, CTX_512(ctx)); return;
738 #endif
739  }
740 }
741 
742 VOID_RETURN sha2_end(unsigned char hval[], sha2_ctx ctx[1])
743 {
744  switch(ctx->sha2_len)
745  {
746 #if defined(SHA_224)
747  case 28: sha_end1(hval, CTX_224(ctx), SHA224_DIGEST_SIZE); return;
748 #endif
749 #if defined(SHA_256)
750  case 32: sha_end1(hval, CTX_256(ctx), SHA256_DIGEST_SIZE); return;
751 #endif
752 #if defined(SHA_384)
753  case 48: sha_end2(hval, CTX_384(ctx), SHA384_DIGEST_SIZE); return;
754 #endif
755 #if defined(SHA_512)
756  case 64: sha_end2(hval, CTX_512(ctx), SHA512_DIGEST_SIZE); return;
757 #endif
758  }
759 }
760 
761 #if 0 /* Can't be enabled because MVS doesn't allow a function name to have
762  the same name as a module name, this is safe to comment out since we
763  never use it - pcg */
764 
765 INT_RETURN sha2(unsigned char hval[], unsigned long size,
766  const unsigned char data[], unsigned long len)
767 { sha2_ctx cx[1];
768 
769  if(sha2_begin(size, cx) == EXIT_SUCCESS)
770  {
771  sha2_hash(data, len, cx); sha2_end(hval, cx); return EXIT_SUCCESS;
772  }
773  else
774  return EXIT_FAILURE;
775 }
776 #endif /* 0 */
777 
778 #endif
779 
780 #if defined(__cplusplus)
781 }
782 #endif
783