27 #define pr_fmt(fmt) "IPv6: " fmt
32 #include <linux/module.h>
37 #include <linux/kernel.h>
39 #include <linux/random.h>
40 #include <linux/slab.h>
46 #include <linux/icmpv6.h>
53 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
66 static void *esp_alloc_tmp(
struct crypto_aead *aead,
int nfrags,
int seqihlen)
72 len += crypto_aead_ivsize(aead);
75 len += crypto_aead_alignmask(aead) &
76 ~(crypto_tfm_ctx_alignment() - 1);
77 len =
ALIGN(len, crypto_tfm_ctx_alignment());
88 static inline __be32 *esp_tmp_seqhi(
void *
tmp)
93 static inline u8 *esp_tmp_iv(
struct crypto_aead *aead,
void *tmp,
int seqhilen)
95 return crypto_aead_ivsize(aead) ?
97 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
105 req = (
void *)
PTR_ALIGN(iv + crypto_aead_ivsize(aead),
106 crypto_tfm_ctx_alignment());
107 aead_givcrypt_set_tfm(req, aead);
115 req = (
void *)
PTR_ALIGN(iv + crypto_aead_ivsize(aead),
116 crypto_tfm_ctx_alignment());
117 aead_request_set_tfm(req, aead);
124 return (
void *)
ALIGN((
unsigned long)(req + 1) +
125 crypto_aead_reqsize(aead),
132 return (
void *)
ALIGN((
unsigned long)(req + 1) +
133 crypto_aead_reqsize(aead),
171 alen = crypto_aead_authsize(aead);
179 if (skb->
len < padto)
180 tfclen = padto - skb->
len;
182 blksize =
ALIGN(crypto_aead_blocksize(aead), 4);
183 clen =
ALIGN(skb->
len + 2 + tfclen, blksize);
186 plen = clen - skb->
len - tfclen;
188 err =
skb_cow_data(skb, tfclen + plen + alen, &trailer);
193 assoclen =
sizeof(*esph);
199 seqhilen +=
sizeof(
__be32);
200 assoclen += seqhilen;
203 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
209 seqhi = esp_tmp_seqhi(tmp);
210 iv = esp_tmp_iv(aead, tmp, seqhilen);
211 req = esp_tmp_givreq(aead, iv);
212 asg = esp_givreq_sg(aead, req);
216 tail = skb_tail_pointer(trailer);
223 for (i = 0; i < plen - 2; i++)
226 tail[plen - 2] = plen - 2;
227 tail[plen - 1] = *skb_mac_header(skb);
230 skb_push(skb, -skb_network_offset(skb));
234 esph->
spi = x->
id.spi;
244 sg_set_buf(asg, &esph->
spi,
sizeof(
__be32));
246 sg_set_buf(asg + 1, seqhi, seqhilen);
251 aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
252 aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
253 aead_givcrypt_set_assoc(req, asg, assoclen);
254 aead_givcrypt_set_giv(req, esph->
enc_data,
258 err = crypto_aead_givencrypt(req);
271 static int esp_input_done2(
struct sk_buff *skb,
int err)
276 int alen = crypto_aead_authsize(aead);
277 int hlen =
sizeof(
struct ip_esp_hdr) + crypto_aead_ivsize(aead);
278 int elen = skb->
len - hlen;
279 int hdr_len = skb_network_header_len(skb);
293 if (padlen + 2 + alen >= elen) {
295 "padlen=%d, elen=%d\n", padlen + 2, elen - alen);
301 pskb_trim(skb, skb->
len - alen - padlen - 2);
302 __skb_pull(skb, hlen);
303 skb_set_transport_header(skb, -hdr_len);
329 int elen = skb->
len -
sizeof(*esph) - crypto_aead_ivsize(aead);
341 if (!pskb_may_pull(skb,
sizeof(*esph) + crypto_aead_ivsize(aead))) {
358 assoclen =
sizeof(*esph);
364 seqhilen +=
sizeof(
__be32);
365 assoclen += seqhilen;
368 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
373 seqhi = esp_tmp_seqhi(tmp);
374 iv = esp_tmp_iv(aead, tmp, seqhilen);
375 req = esp_tmp_req(aead, iv);
376 asg = esp_req_sg(aead, req);
387 skb_to_sgvec(skb, sg,
sizeof(*esph) + crypto_aead_ivsize(aead), elen);
391 sg_set_buf(asg, &esph->
spi,
sizeof(
__be32));
393 sg_set_buf(asg + 1, seqhi, seqhilen);
398 aead_request_set_callback(req, 0, esp_input_done, skb);
399 aead_request_set_crypt(req, sg, sg, elen, iv);
400 aead_request_set_assoc(req, asg, assoclen);
402 ret = crypto_aead_decrypt(req);
406 ret = esp_input_done2(skb, ret);
415 u32 blksize =
ALIGN(crypto_aead_blocksize(esp->
aead), 4);
417 unsigned int net_adj;
420 net_adj =
sizeof(
struct ipv6hdr);
424 return ((mtu - x->
props.header_len - crypto_aead_authsize(esp->
aead) -
425 net_adj) & ~(align - 1)) + (net_adj - 2);
453 static void esp6_destroy(
struct xfrm_state *x)
460 crypto_free_aead(esp->
aead);
464 static int esp_init_aead(
struct xfrm_state *x)
477 err = crypto_aead_setkey(aead, x->
aead->alg_key,
478 (x->
aead->alg_key_len + 7) / 8);
490 static int esp_init_authenc(
struct xfrm_state *x)
511 x->
aalg ? x->
aalg->alg_name :
"digest_null",
517 x->
aalg ? x->
aalg->alg_name :
"digest_null",
529 keylen = (x->
aalg ? (x->
aalg->alg_key_len + 7) / 8 : 0) +
547 p += (x->
aalg->alg_key_len + 7) / 8;
553 if (aalg_desc->
uinfo.
auth.icv_fullbits/8 !=
554 crypto_aead_authsize(aead)) {
557 crypto_aead_authsize(aead),
563 aead, x->
aalg->alg_trunc_len / 8);
571 err = crypto_aead_setkey(aead, key, keylen);
580 static int esp6_init_state(
struct xfrm_state *x)
597 err = esp_init_aead(x);
599 err = esp_init_authenc(x);
609 crypto_aead_ivsize(aead);
610 switch (x->
props.mode) {
625 align =
ALIGN(crypto_aead_blocksize(aead), 4);
628 x->
props.trailer_len = align + 1 + crypto_aead_authsize(esp->
aead);
634 static const struct xfrm_type esp6_type =
636 .description =
"ESP6",
640 .init_state = esp6_init_state,
641 .destructor = esp6_destroy,
642 .get_mtu = esp6_get_mtu,
644 .output = esp6_output,
648 static const struct inet6_protocol esp6_protocol = {
650 .err_handler = esp6_err,
651 .flags = INET6_PROTO_NOPOLICY,
654 static int __init esp6_init(
void)
657 pr_info(
"%s: can't add xfrm type\n", __func__);
661 pr_info(
"%s: can't add protocol\n", __func__);
669 static void __exit esp6_fini(
void)
672 pr_info(
"%s: can't remove protocol\n", __func__);
674 pr_info(
"%s: can't remove xfrm type\n", __func__);