31 #include <linux/types.h>
34 #include <linux/random.h>
39 # define RPCDBG_FACILITY RPCDBG_AUTH
43 gss_krb5_padding(
int blocksize,
int length)
45 return blocksize - (length % blocksize);
49 gss_krb5_add_padding(
struct xdr_buf *
buf,
int offset,
int blocksize)
51 int padding = gss_krb5_padding(blocksize, buf->len - offset);
55 if (buf->page_len || buf->tail[0].iov_len)
62 memset(p, padding, padding);
66 gss_krb5_remove_padding(
struct xdr_buf *buf,
int blocksize)
70 size_t len = buf->len;
73 pad = *(
u8 *)(buf->head[0].iov_base + len - 1);
74 if (pad > buf->head[0].iov_len)
76 buf->head[0].iov_len -=
pad;
79 len -= buf->head[0].iov_len;
80 if (len <= buf->page_len) {
81 unsigned int last = (buf->page_base + len - 1)
83 unsigned int offset = (buf->page_base + len - 1)
91 BUG_ON(len > buf->tail[0].iov_len);
92 pad = *(
u8 *)(buf->tail[0].iov_base + len - 1);
158 gss_wrap_kerberos_v1(
struct krb5_ctx *kctx,
int offset,
159 struct xdr_buf *buf,
struct page **
pages)
162 struct xdr_netobj md5cksum = {.len =
sizeof(cksumdata),
164 int blocksize = 0, plainlen;
165 unsigned char *
ptr, *msg_start;
168 struct page **tmp_pages;
171 u32 conflen = kctx->
gk5e->conflen;
173 dprintk(
"RPC: %s\n", __func__);
177 blocksize = crypto_blkcipher_blocksize(kctx->
enc);
178 gss_krb5_add_padding(buf, offset, blocksize);
179 BUG_ON((buf->len - offset) % blocksize);
180 plainlen = conflen + buf->len -
offset;
186 ptr = buf->head[0].iov_base +
offset;
191 BUG_ON((buf->len - offset - headlen) % blocksize);
195 kctx->
gk5e->cksumlength + plainlen, &ptr);
210 if (kctx->
gk5e->keyed_cksum)
211 cksumkey = kctx->
cksum;
216 tmp_pages = buf->
pages;
218 if (
make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,
220 return GSS_S_FAILURE;
221 buf->pages = tmp_pages;
225 spin_lock(&krb5_seq_lock);
227 spin_unlock(&krb5_seq_lock);
233 return GSS_S_FAILURE;
238 cipher = crypto_alloc_blkcipher(kctx->
gk5e->encrypt_name, 0,
241 return GSS_S_FAILURE;
246 offset + headlen - conflen, pages);
247 crypto_free_blkcipher(cipher);
249 return GSS_S_FAILURE;
252 offset + headlen - conflen, pages))
253 return GSS_S_FAILURE;
256 return (kctx->
endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
260 gss_unwrap_kerberos_v1(
struct krb5_ctx *kctx,
int offset,
struct xdr_buf *buf)
265 struct xdr_netobj md5cksum = {.len =
sizeof(cksumdata),
275 u32 conflen = kctx->
gk5e->conflen;
279 dprintk(
"RPC: gss_unwrap_kerberos\n");
281 ptr = (
u8 *)buf->head[0].iov_base + offset;
284 return GSS_S_DEFECTIVE_TOKEN;
288 return GSS_S_DEFECTIVE_TOKEN;
294 signalg = ptr[2] + (ptr[3] << 8);
295 if (signalg != kctx->
gk5e->signalg)
296 return GSS_S_DEFECTIVE_TOKEN;
298 sealalg = ptr[4] + (ptr[5] << 8);
299 if (sealalg != kctx->
gk5e->sealalg)
300 return GSS_S_DEFECTIVE_TOKEN;
302 if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
303 return GSS_S_DEFECTIVE_TOKEN;
310 (
unsigned char *)buf->
head[0].iov_base;
316 ptr + 8, &direction, &seqnum))
319 if ((kctx->initiate && direction != 0xff) ||
320 (!kctx->initiate && direction != 0))
327 cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
330 return GSS_S_FAILURE;
335 crypto_free_blkcipher(cipher);
337 return GSS_S_DEFECTIVE_TOKEN;
340 return GSS_S_DEFECTIVE_TOKEN;
343 if (kctx->gk5e->keyed_cksum)
344 cksumkey = kctx->cksum;
350 return GSS_S_FAILURE;
352 if (
memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
353 kctx->gk5e->cksumlength))
354 return GSS_S_BAD_SIG;
360 if (now > kctx->endtime)
361 return GSS_S_CONTEXT_EXPIRED;
368 blocksize = crypto_blkcipher_blocksize(kctx->enc);
369 data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
371 orig_start = buf->head[0].iov_base +
offset;
372 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
373 memmove(orig_start, data_start, data_len);
374 buf->head[0].iov_len -= (data_start - orig_start);
375 buf->len -= (data_start - orig_start);
377 if (gss_krb5_remove_padding(buf, blocksize))
378 return GSS_S_DEFECTIVE_TOKEN;
380 return GSS_S_COMPLETE;
393 #define LOCAL_BUF_LEN 32u
395 static void rotate_buf_a_little(
struct xdr_buf *buf,
unsigned int shift)
399 unsigned int this_len,
i;
412 static void _rotate_left(
struct xdr_buf *buf,
unsigned int shift)
418 while (shifted < shift) {
420 rotate_buf_a_little(buf, this_shift);
421 shifted += this_shift;
425 static void rotate_left(
u32 base,
struct xdr_buf *buf,
unsigned int shift)
427 struct xdr_buf subbuf;
430 _rotate_left(&subbuf, shift);
434 gss_wrap_kerberos_v2(
struct krb5_ctx *kctx,
u32 offset,
435 struct xdr_buf *buf,
struct page **pages)
445 dprintk(
"RPC: %s\n", __func__);
448 return GSS_S_FAILURE;
452 return GSS_S_FAILURE;
455 ptr = plainhdr = buf->head[0].iov_base +
offset;
470 blocksize = crypto_blkcipher_blocksize(kctx->
acceptor_enc);
475 be64ptr = (
__be64 *)be16ptr;
476 spin_lock(&krb5_seq_lock);
478 spin_unlock(&krb5_seq_lock);
480 err = (*kctx->
gk5e->encrypt_v2)(kctx, offset, buf, ec, pages);
485 return (kctx->
endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
489 gss_unwrap_kerberos_v2(
struct krb5_ctx *kctx,
int offset,
struct xdr_buf *buf)
497 u32 headskip, tailskip;
499 unsigned int movelen;
502 dprintk(
"RPC: %s\n", __func__);
505 return GSS_S_FAILURE;
507 ptr = buf->head[0].iov_base +
offset;
510 return GSS_S_DEFECTIVE_TOKEN;
514 (kctx->
initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
515 return GSS_S_BAD_SIG;
518 dprintk(
"%s: token missing expected sealed flag\n", __func__);
519 return GSS_S_DEFECTIVE_TOKEN;
523 return GSS_S_DEFECTIVE_TOKEN;
531 rotate_left(offset + 16, buf, rrc);
533 err = (*kctx->
gk5e->decrypt_v2)(kctx, offset, buf,
534 &headskip, &tailskip);
536 return GSS_S_FAILURE;
543 buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
544 decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
546 dprintk(
"%s: error %u getting decrypted_hdr\n", __func__, err);
547 return GSS_S_FAILURE;
549 if (
memcmp(ptr, decrypted_hdr, 6)
550 ||
memcmp(ptr + 8, decrypted_hdr + 8, 8)) {
551 dprintk(
"%s: token hdr, plaintext hdr mismatch!\n", __func__);
552 return GSS_S_FAILURE;
560 return GSS_S_CONTEXT_EXPIRED;
569 movelen =
min_t(
unsigned int, buf->head[0].iov_len, buf->len);
570 movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
571 BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
572 buf->head[0].iov_len);
573 memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
574 buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
575 buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
577 return GSS_S_COMPLETE;
582 struct xdr_buf *buf,
struct page **pages)
584 struct krb5_ctx *kctx = gctx->internal_ctx_id;
591 case ENCTYPE_ARCFOUR_HMAC:
592 return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
595 return gss_wrap_kerberos_v2(kctx, offset, buf, pages);
602 struct krb5_ctx *kctx = gctx->internal_ctx_id;
609 case ENCTYPE_ARCFOUR_HMAC:
610 return gss_unwrap_kerberos_v1(kctx, offset, buf);
613 return gss_unwrap_kerberos_v2(kctx, offset, buf);