cryptlib  3.4.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Macros
gcm.c
Go to the documentation of this file.
1 /*
2 ---------------------------------------------------------------------------
3 Copyright (c) 1998-2010, Brian Gladman, Worcester, UK. All rights reserved.
4 
5 The redistribution and use of this software (with or without changes)
6 is allowed without the payment of fees or royalties provided that:
7 
8  source code distributions include the above copyright notice, this
9  list of conditions and the following disclaimer;
10 
11  binary distributions include the above copyright notice, this list
12  of conditions and the following disclaimer in their documentation.
13 
14 This software is provided 'as is' with no explicit or implied warranties
15 in respect of its operation, including, but not limited to, correctness
16 and fitness for purpose.
17 ---------------------------------------------------------------------------
18 Issue Date: 30/03/2011
19 
20  My thanks to:
21 
22  Colin Sinclair for finding an error and suggesting a number of
23  improvements to this code.
24 
25  John Viega and David McGrew for their support in the development
26  of this code and to David for testing it on a big-endIAN system.
27 
28  Mark Rodenkirch and Jason Papadopoulos for their help in finding
29  a bug in the fast buffer operations on big endian systems.
30 */
31 
32 #if defined( INC_ALL ) /* pcg */
33  #include "gcm.h"
34  #include "mode_hdr.h"
35 #else
36  #include "crypt/gcm.h"
37  #include "crypt/mode_hdr.h"
38 #endif /* Compiler-specific includes */
39 
40 /* This GCM implementation needs a Galois Field multiplier for GF(2^128).
41  which operates on field elements using a polynomial field representation
42  x^127 + x^126 + ... + x^2 + x + 1 using the bits in a bit sequence that
43  will be numbered by the power of x that they represent. GCM uses the
44  polynomial x^128 + x^7 + x^2 + x + 1 as its basis for representation.
45 
46  The obvious way of representing this in a computer system is to map GF
47  'x' to the binary integer '2' - but this was way too obvious for any
48  cryptographer to adopt!
49 
50  Here bytes are numbered in memory order and bits within bytes according
51  to their integer numeric significance. The term 'little endian' is then
52  used to describe mappings in which numeric (power of 2) or field (power
53  of x) significance increase with increasing bit or byte numbers with
54  'big endian' being used to describe the inverse situation.
55 
56  GCM uses little endian byte ordering and big endian bit ordering, a
57  representation that will be described as LB. Hence the low end of the
58  field polynomial is in byte[0], which has the value 0xe1 rather than
59  0x87 in the more obvious mappings.
60 
61  The related field multipler can use this mapping but if you want to
62  use an alternative (e.g hardware) multiplier that uses a different
63  polynomial field representation, you can do so by changing the form
64  used for the field elements when this alternative multiplier is used.
65 
66  If GF_REPRESENTATION is defined as one of:
67 
68  REVERSE_BITS // change to LL
69  REVERSE_BYTES | REVERSE_BITS // change to BL
70  REVERSE_NONE // no change
71  REVERSE_BYTES // change to BB
72 
73  then an appropriate change of representation will occur before and
74  after calls to your revised field multiplier. To use this you need
75  to add gf_convert.c to your application.
76 */
77 
78 #if defined(__cplusplus)
79 extern "C"
80 {
81 #endif
82 
83 #if 1
84 # undef GF_REPRESENTATION
85 #elif 0
86 # define GF_REPRESENTATION REVERSE_BITS
87 #elif 0
88 # define GF_REPRESENTATION REVERSE_BYTES | REVERSE_BITS
89 #elif 0
90 # define GF_REPRESENTATION REVERSE_NONE
91 #elif 0
92 # define GF_REPRESENTATION REVERSE_BITS
93 #endif
94 
95 #define BLOCK_SIZE GCM_BLOCK_SIZE /* block length */
96 #define BLK_ADR_MASK (BLOCK_SIZE - 1) /* mask for 'in block' address */
97 #define CTR_POS 12
98 
99 #define inc_ctr(x) \
100  { int i = BLOCK_SIZE; while(i-- > CTR_POS && !++(UI8_PTR(x)[i])) ; }
101 
102 ret_type gcm_init_and_key( /* initialise mode and set key */
103  const unsigned char key[], /* the key value */
104  unsigned long key_len, /* and its length in bytes */
105  gcm_ctx ctx[1]) /* the mode context */
106 {
107  memset(ctx->ghash_h, 0, sizeof(ctx->ghash_h));
108 
109  /* set the AES key */
110  aes_encrypt_key(key, key_len, ctx->aes);
111 
112  /* compute E(0) (for the hash function) */
113  aes_encrypt(UI8_PTR(ctx->ghash_h), UI8_PTR(ctx->ghash_h), ctx->aes);
114 
115 #if defined( GF_REPRESENTATION )
116  convert_representation(ctx->ghash_h, ctx->ghash_h, GF_REPRESENTATION);
117 #endif
118 
119 #if defined( TABLES_64K )
120  init_64k_table(ctx->ghash_h, ctx->gf_t64k);
121 #elif defined( TABLES_8K )
122  init_8k_table(ctx->ghash_h, ctx->gf_t8k);
123 #elif defined( TABLES_4K )
124  init_4k_table(ctx->ghash_h, ctx->gf_t4k);
125 #elif defined( TABLES_256 )
126  init_256_table(ctx->ghash_h, ctx->gf_t256);
127 #endif
128 #if defined( GF_REPRESENTATION )
129  convert_representation(ctx->ghash_h, ctx->ghash_h, GF_REPRESENTATION);
130 #endif
131  return RETURN_GOOD;
132 }
133 
134 void gf_mul_hh(gf_t a, gcm_ctx ctx[1])
135 {
136 #if defined( GF_REPRESENTATION ) || !defined( NO_TABLES )
137  gf_t scr;
138 #endif
139 #if defined( GF_REPRESENTATION )
140  convert_representation(a, a, GF_REPRESENTATION);
141 #endif
142 
143 #if defined( TABLES_64K )
144  gf_mul_64k(a, ctx->gf_t64k, scr);
145 #elif defined( TABLES_8K )
146  gf_mul_8k(a, ctx->gf_t8k, scr);
147 #elif defined( TABLES_4K )
148  gf_mul_4k(a, ctx->gf_t4k, scr);
149 #elif defined( TABLES_256 )
150  gf_mul_256(a, ctx->gf_t256, scr);
151 #else
152 # if defined( GF_REPRESENTATION )
153  convert_representation(scr, ctx->ghash_h, GF_REPRESENTATION);
154  gf_mul(a, scr);
155 # else
156  gf_mul(a, ctx->ghash_h);
157 # endif
158 #endif
159 
160 #if defined( GF_REPRESENTATION )
161  convert_representation(a, a, GF_REPRESENTATION);
162 #endif
163 }
164 
165 ret_type gcm_init_message( /* initialise a new message */
166  const unsigned char iv[], /* the initialisation vector */
167  unsigned long iv_len, /* and its length in bytes */
168  gcm_ctx ctx[1]) /* the mode context */
169 { uint_32t i, n_pos = 0;
170  uint_8t *p;
171 
172  memset(ctx->ctr_val, 0, BLOCK_SIZE);
173  if(iv_len == CTR_POS)
174  {
175  memcpy(ctx->ctr_val, iv, CTR_POS); UI8_PTR(ctx->ctr_val)[15] = 0x01;
176  }
177  else
178  { n_pos = iv_len;
179  while(n_pos >= BLOCK_SIZE)
180  {
181  xor_block_aligned(ctx->ctr_val, ctx->ctr_val, iv);
182  n_pos -= BLOCK_SIZE;
183  iv += BLOCK_SIZE;
184  gf_mul_hh((void*)ctx->ctr_val, ctx);
185  }
186 
187  if(n_pos)
188  {
189  p = UI8_PTR(ctx->ctr_val);
190  while(n_pos-- > 0)
191  *p++ ^= *iv++;
192  gf_mul_hh((void*)ctx->ctr_val, ctx);
193  }
194  n_pos = (iv_len << 3);
195  for(i = BLOCK_SIZE - 1; n_pos; --i, n_pos >>= 8)
196  UI8_PTR(ctx->ctr_val)[i] ^= (unsigned char)n_pos;
197  gf_mul_hh((void*)ctx->ctr_val, ctx);
198  }
199 
200  ctx->y0_val = *UI32_PTR(UI8_PTR(ctx->ctr_val) + CTR_POS);
201  memset(ctx->hdr_ghv, 0, BLOCK_SIZE);
202  memset(ctx->txt_ghv, 0, BLOCK_SIZE);
203  ctx->hdr_cnt = 0;
204  ctx->txt_ccnt = ctx->txt_acnt = 0;
205  return RETURN_GOOD;
206 }
207 
208 ret_type gcm_auth_header( /* authenticate the header */
209  const unsigned char hdr[], /* the header buffer */
210  unsigned long hdr_len, /* and its length in bytes */
211  gcm_ctx ctx[1]) /* the mode context */
212 { uint_32t cnt = 0, b_pos = (uint_32t)ctx->hdr_cnt & BLK_ADR_MASK;
213 
214  if(!hdr_len)
215  return RETURN_GOOD;
216 
217  if(ctx->hdr_cnt && b_pos == 0)
218  gf_mul_hh((void*)ctx->hdr_ghv, ctx);
219 
220  if(!((hdr - (UI8_PTR(ctx->hdr_ghv) + b_pos)) & BUF_ADRMASK))
221  {
222  while(cnt < hdr_len && (b_pos & BUF_ADRMASK))
223  UI8_PTR(ctx->hdr_ghv)[b_pos++] ^= hdr[cnt++];
224 
225  while(cnt + BUF_INC <= hdr_len && b_pos <= BLOCK_SIZE - BUF_INC)
226  {
227  *UNIT_PTR(UI8_PTR(ctx->hdr_ghv) + b_pos) ^= *UNIT_PTR(hdr + cnt);
228  cnt += BUF_INC; b_pos += BUF_INC;
229  }
230 
231  while(cnt + BLOCK_SIZE <= hdr_len)
232  {
233  gf_mul_hh((void*)ctx->hdr_ghv, ctx);
234  xor_block_aligned(ctx->hdr_ghv, ctx->hdr_ghv, hdr + cnt);
235  cnt += BLOCK_SIZE;
236  }
237  }
238  else
239  {
240  while(cnt < hdr_len && b_pos < BLOCK_SIZE)
241  UI8_PTR(ctx->hdr_ghv)[b_pos++] ^= hdr[cnt++];
242 
243  while(cnt + BLOCK_SIZE <= hdr_len)
244  {
245  gf_mul_hh((void*)ctx->hdr_ghv, ctx);
246  xor_block(ctx->hdr_ghv, ctx->hdr_ghv, hdr + cnt);
247  cnt += BLOCK_SIZE;
248  }
249  }
250 
251  while(cnt < hdr_len)
252  {
253  if(b_pos == BLOCK_SIZE)
254  {
255  gf_mul_hh((void*)ctx->hdr_ghv, ctx);
256  b_pos = 0;
257  }
258  UI8_PTR(ctx->hdr_ghv)[b_pos++] ^= hdr[cnt++];
259  }
260 
261  ctx->hdr_cnt += cnt;
262  return RETURN_GOOD;
263 }
264 
265 ret_type gcm_auth_data( /* authenticate ciphertext data */
266  const unsigned char data[], /* the data buffer */
267  unsigned long data_len, /* and its length in bytes */
268  gcm_ctx ctx[1]) /* the mode context */
269 { uint_32t cnt = 0, b_pos = (uint_32t)ctx->txt_acnt & BLK_ADR_MASK;
270 
271  if(!data_len)
272  return RETURN_GOOD;
273 
274  if(ctx->txt_acnt && b_pos == 0)
275  gf_mul_hh((void*)ctx->txt_ghv, ctx);
276 
277  if(!((data - (UI8_PTR(ctx->txt_ghv) + b_pos)) & BUF_ADRMASK))
278  {
279  while(cnt < data_len && (b_pos & BUF_ADRMASK))
280  UI8_PTR(ctx->txt_ghv)[b_pos++] ^= data[cnt++];
281 
282  while(cnt + BUF_INC <= data_len && b_pos <= BLOCK_SIZE - BUF_INC)
283  {
284  *UNIT_PTR(UI8_PTR(ctx->txt_ghv) + b_pos) ^= *UNIT_PTR(data + cnt);
285  cnt += BUF_INC; b_pos += BUF_INC;
286  }
287 
288  while(cnt + BLOCK_SIZE <= data_len)
289  {
290  gf_mul_hh((void*)ctx->txt_ghv, ctx);
291  xor_block_aligned(ctx->txt_ghv, ctx->txt_ghv, data + cnt);
292  cnt += BLOCK_SIZE;
293  }
294  }
295  else
296  {
297  while(cnt < data_len && b_pos < BLOCK_SIZE)
298  UI8_PTR(ctx->txt_ghv)[b_pos++] ^= data[cnt++];
299 
300  while(cnt + BLOCK_SIZE <= data_len)
301  {
302  gf_mul_hh((void*)ctx->txt_ghv, ctx);
303  xor_block(ctx->txt_ghv, ctx->txt_ghv, data + cnt);
304  cnt += BLOCK_SIZE;
305  }
306  }
307 
308  while(cnt < data_len)
309  {
310  if(b_pos == BLOCK_SIZE)
311  {
312  gf_mul_hh((void*)ctx->txt_ghv, ctx);
313  b_pos = 0;
314  }
315  UI8_PTR(ctx->txt_ghv)[b_pos++] ^= data[cnt++];
316  }
317 
318  ctx->txt_acnt += cnt;
319  return RETURN_GOOD;
320 }
321 
322 ret_type gcm_crypt_data( /* encrypt or decrypt data */
323  unsigned char data[], /* the data buffer */
324  unsigned long data_len, /* and its length in bytes */
325  gcm_ctx ctx[1]) /* the mode context */
326 { uint_32t cnt = 0, b_pos = (uint_32t)ctx->txt_ccnt & BLK_ADR_MASK;
327 
328  if(!data_len)
329  return RETURN_GOOD;
330 
331  if(!((data - (UI8_PTR(ctx->enc_ctr) + b_pos)) & BUF_ADRMASK))
332  {
333  if(b_pos)
334  {
335  while(cnt < data_len && (b_pos & BUF_ADRMASK))
336  data[cnt++] ^= UI8_PTR(ctx->enc_ctr)[b_pos++];
337 
338  while(cnt + BUF_INC <= data_len && b_pos <= BLOCK_SIZE - BUF_INC)
339  {
340  *UNIT_PTR(data + cnt) ^= *UNIT_PTR(UI8_PTR(ctx->enc_ctr) + b_pos);
341  cnt += BUF_INC; b_pos += BUF_INC;
342  }
343  }
344 
345  while(cnt + BLOCK_SIZE <= data_len)
346  {
347  inc_ctr(ctx->ctr_val);
348  aes_encrypt(UI8_PTR(ctx->ctr_val), UI8_PTR(ctx->enc_ctr), ctx->aes);
349  xor_block_aligned(data + cnt, data + cnt, ctx->enc_ctr);
350  cnt += BLOCK_SIZE;
351  }
352  }
353  else
354  {
355  if(b_pos)
356  while(cnt < data_len && b_pos < BLOCK_SIZE)
357  data[cnt++] ^= UI8_PTR(ctx->enc_ctr)[b_pos++];
358 
359  while(cnt + BLOCK_SIZE <= data_len)
360  {
361  inc_ctr(ctx->ctr_val);
362  aes_encrypt(UI8_PTR(ctx->ctr_val), UI8_PTR(ctx->enc_ctr), ctx->aes);
363  xor_block(data + cnt, data + cnt, ctx->enc_ctr);
364  cnt += BLOCK_SIZE;
365  }
366  }
367 
368  while(cnt < data_len)
369  {
370  if(b_pos == BLOCK_SIZE || !b_pos)
371  {
372  inc_ctr(ctx->ctr_val);
373  aes_encrypt(UI8_PTR(ctx->ctr_val), UI8_PTR(ctx->enc_ctr), ctx->aes);
374  b_pos = 0;
375  }
376  data[cnt++] ^= UI8_PTR(ctx->enc_ctr)[b_pos++];
377  }
378 
379  ctx->txt_ccnt += cnt;
380  return RETURN_GOOD;
381 }
382 
383 ret_type gcm_compute_tag( /* compute authentication tag */
384  unsigned char tag[], /* the buffer for the tag */
385  unsigned long tag_len, /* and its length in bytes */
386  gcm_ctx ctx[1]) /* the mode context */
387 { uint_32t i, ln;
388  gf_t tbuf;
389 
390  if(ctx->txt_acnt != ctx->txt_ccnt && ctx->txt_ccnt > 0)
391  return RETURN_ERROR;
392 
393  gf_mul_hh((void*)ctx->hdr_ghv, ctx);
394  gf_mul_hh((void*)ctx->txt_ghv, ctx);
395 
396  if(ctx->hdr_cnt)
397  {
398  ln = (uint_32t)((ctx->txt_acnt + BLOCK_SIZE - 1) / BLOCK_SIZE);
399  if(ln)
400  {
401 #if 1 /* alternative versions of the exponentiation operation */
402  memcpy(tbuf, ctx->ghash_h, BLOCK_SIZE);
403 # if defined( GF_REPRESENTATION )
404  convert_representation(tbuf, tbuf, GF_REPRESENTATION);
405  convert_representation(ctx->hdr_ghv, ctx->hdr_ghv, GF_REPRESENTATION);
406 # endif
407  for( ; ; )
408  {
409  if(ln & 1)
410  {
411  gf_mul((void*)ctx->hdr_ghv, tbuf);
412  }
413  if(!(ln >>= 1))
414  break;
415  gf_mul(tbuf, tbuf);
416  }
417 #else /* this one seems slower on x86 and x86_64 :-( */
418  i = ln | ln >> 1; i |= i >> 2; i |= i >> 4;
419  i |= i >> 8; i |= i >> 16; i &= ~(i >> 1);
420  memset(tbuf, 0, BLOCK_SIZE);
421  UI8_PTR(tbuf)[0] = 0x80;
422  while(i)
423  {
424 # if defined( GF_REPRESENTATION )
425  convert_representation(tbuf, tbuf, GF_REPRESENTATION);
426 # endif
427  gf_mul(tbuf, tbuf);
428 # if defined( GF_REPRESENTATION )
429  convert_representation(tbuf, tbuf, GF_REPRESENTATION);
430 # endif
431  if(i & ln)
432  gf_mul_hh(tbuf, ctx);
433  i >>= 1;
434  }
435 # if defined( GF_REPRESENTATION )
436  convert_representation(tbuf, tbuf, GF_REPRESENTATION);
437  convert_representation(ctx->hdr_ghv, ctx->hdr_ghv, GF_REPRESENTATION);
438 # endif
439  gf_mul((void*)ctx->hdr_ghv, tbuf);
440 #endif
441 #if defined( GF_REPRESENTATION )
442  convert_representation(ctx->hdr_ghv, ctx->hdr_ghv, GF_REPRESENTATION);
443 # endif
444  }
445  }
446 
447  i = BLOCK_SIZE;
448 #ifdef BRG_UI64
449  { uint_64t tm = ((uint_64t)ctx->txt_acnt) << 3;
450  while(i-- > 0)
451  {
452  UI8_PTR(ctx->hdr_ghv)[i] ^= UI8_PTR(ctx->txt_ghv)[i] ^ (unsigned char)tm;
453  tm = (i == 8 ? (((uint_64t)ctx->hdr_cnt) << 3) : tm >> 8);
454  }
455  }
456 #else
457  { uint_32t tm = ctx->txt_acnt << 3;
458 
459  while(i-- > 0)
460  {
461  UI8_PTR(ctx->hdr_ghv)[i] ^= UI8_PTR(ctx->txt_ghv)[i] ^ (unsigned char)tm;
462  if(i & 3)
463  tm >>= 8;
464  else if(i == 4)
465  tm = ctx->txt_acnt >> 29;
466  else if(i == 8)
467  tm = ctx->hdr_cnt << 3;
468  else
469  tm = ctx->hdr_cnt >> 29;
470  }
471  }
472 #endif
473 
474  gf_mul_hh((void*)ctx->hdr_ghv, ctx);
475 
476  memcpy(ctx->enc_ctr, ctx->ctr_val, BLOCK_SIZE);
477  *UI32_PTR(UI8_PTR(ctx->enc_ctr) + CTR_POS) = ctx->y0_val;
478  aes_encrypt(UI8_PTR(ctx->enc_ctr), UI8_PTR(ctx->enc_ctr), ctx->aes);
479  for(i = 0; i < (unsigned int)tag_len; ++i)
480  tag[i] = (unsigned char)(UI8_PTR(ctx->hdr_ghv)[i] ^ UI8_PTR(ctx->enc_ctr)[i]);
481 
482  return (ctx->txt_ccnt == ctx->txt_acnt ? RETURN_GOOD : RETURN_WARN);
483 }
484 
485 ret_type gcm_end( /* clean up and end operation */
486  gcm_ctx ctx[1]) /* the mode context */
487 {
488  memset(ctx, 0, sizeof(gcm_ctx));
489  return RETURN_GOOD;
490 }
491 
492 ret_type gcm_encrypt( /* encrypt & authenticate data */
493  unsigned char data[], /* the data buffer */
494  unsigned long data_len, /* and its length in bytes */
495  gcm_ctx ctx[1]) /* the mode context */
496 {
497 
498  gcm_crypt_data(data, data_len, ctx);
499  gcm_auth_data(data, data_len, ctx);
500  return RETURN_GOOD;
501 }
502 
503 ret_type gcm_decrypt( /* authenticate & decrypt data */
504  unsigned char data[], /* the data buffer */
505  unsigned long data_len, /* and its length in bytes */
506  gcm_ctx ctx[1]) /* the mode context */
507 {
508  gcm_auth_data(data, data_len, ctx);
509  gcm_crypt_data(data, data_len, ctx);
510  return RETURN_GOOD;
511 }
512 
513 ret_type gcm_encrypt_message( /* encrypt an entire message */
514  const unsigned char iv[], /* the initialisation vector */
515  unsigned long iv_len, /* and its length in bytes */
516  const unsigned char hdr[], /* the header buffer */
517  unsigned long hdr_len, /* and its length in bytes */
518  unsigned char msg[], /* the message buffer */
519  unsigned long msg_len, /* and its length in bytes */
520  unsigned char tag[], /* the buffer for the tag */
521  unsigned long tag_len, /* and its length in bytes */
522  gcm_ctx ctx[1]) /* the mode context */
523 {
524  gcm_init_message(iv, iv_len, ctx);
525  gcm_auth_header(hdr, hdr_len, ctx);
526  gcm_encrypt(msg, msg_len, ctx);
527  return gcm_compute_tag(tag, tag_len, ctx) ? RETURN_ERROR : RETURN_GOOD;
528 }
529 
530 ret_type gcm_decrypt_message( /* decrypt an entire message */
531  const unsigned char iv[], /* the initialisation vector */
532  unsigned long iv_len, /* and its length in bytes */
533  const unsigned char hdr[], /* the header buffer */
534  unsigned long hdr_len, /* and its length in bytes */
535  unsigned char msg[], /* the message buffer */
536  unsigned long msg_len, /* and its length in bytes */
537  const unsigned char tag[], /* the buffer for the tag */
538  unsigned long tag_len, /* and its length in bytes */
539  gcm_ctx ctx[1]) /* the mode context */
540 { uint_8t local_tag[BLOCK_SIZE];
541  ret_type rr;
542 
543  gcm_init_message(iv, iv_len, ctx);
544  gcm_auth_header(hdr, hdr_len, ctx);
545  gcm_decrypt(msg, msg_len, ctx);
546  rr = gcm_compute_tag(local_tag, tag_len, ctx);
547  return (rr != RETURN_GOOD || memcmp(tag, local_tag, tag_len)) ? RETURN_ERROR : RETURN_GOOD;
548 }
549 
550 #if defined(__cplusplus)
551 }
552 #endif