1 #define pr_fmt(fmt) "IPsec: " fmt
6 #include <linux/module.h>
11 #include <linux/kernel.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/slab.h>
16 #include <linux/in6.h>
26 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
38 static void *esp_alloc_tmp(
struct crypto_aead *aead,
int nfrags,
int seqhilen)
44 len += crypto_aead_ivsize(aead);
47 len += crypto_aead_alignmask(aead) &
48 ~(crypto_tfm_ctx_alignment() - 1);
49 len =
ALIGN(len, crypto_tfm_ctx_alignment());
60 static inline __be32 *esp_tmp_seqhi(
void *
tmp)
64 static inline u8 *esp_tmp_iv(
struct crypto_aead *aead,
void *tmp,
int seqhilen)
66 return crypto_aead_ivsize(aead) ?
68 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
76 req = (
void *)
PTR_ALIGN(iv + crypto_aead_ivsize(aead),
77 crypto_tfm_ctx_alignment());
78 aead_givcrypt_set_tfm(req, aead);
86 req = (
void *)
PTR_ALIGN(iv + crypto_aead_ivsize(aead),
87 crypto_tfm_ctx_alignment());
88 aead_request_set_tfm(req, aead);
95 return (
void *)
ALIGN((
unsigned long)(req + 1) +
96 crypto_aead_reqsize(aead),
103 return (
void *)
ALIGN((
unsigned long)(req + 1) +
104 crypto_aead_reqsize(aead),
146 alen = crypto_aead_authsize(aead);
154 if (skb->
len < padto)
155 tfclen = padto - skb->
len;
157 blksize =
ALIGN(crypto_aead_blocksize(aead), 4);
158 clen =
ALIGN(skb->
len + 2 + tfclen, blksize);
161 plen = clen - skb->
len - tfclen;
163 err =
skb_cow_data(skb, tfclen + plen + alen, &trailer);
168 assoclen =
sizeof(*esph);
174 seqhilen +=
sizeof(
__be32);
175 assoclen += seqhilen;
178 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
182 seqhi = esp_tmp_seqhi(tmp);
183 iv = esp_tmp_iv(aead, tmp, seqhilen);
184 req = esp_tmp_givreq(aead, iv);
185 asg = esp_givreq_sg(aead, req);
189 tail = skb_tail_pointer(trailer);
196 for (i = 0; i < plen - 2; i++)
199 tail[plen - 2] = plen - 2;
200 tail[plen - 1] = *skb_mac_header(skb);
203 skb_push(skb, -skb_network_offset(skb));
215 spin_lock_bh(&x->
lock);
219 spin_unlock_bh(&x->
lock);
221 uh = (
struct udphdr *)esph;
224 uh->
len =
htons(skb->
len - skb_transport_offset(skb));
227 switch (encap_type) {
233 udpdata32 = (
__be32 *)(uh + 1);
234 udpdata32[0] = udpdata32[1] = 0;
242 esph->
spi = x->
id.spi;
252 sg_set_buf(asg, &esph->
spi,
sizeof(
__be32));
254 sg_set_buf(asg + 1, seqhi, seqhilen);
259 aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
260 aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
261 aead_givcrypt_set_assoc(req, asg, assoclen);
262 aead_givcrypt_set_giv(req, esph->
enc_data,
266 err = crypto_aead_givencrypt(req);
279 static int esp_input_done2(
struct sk_buff *skb,
int err)
281 const struct iphdr *iph;
285 int alen = crypto_aead_authsize(aead);
286 int hlen =
sizeof(
struct ip_esp_hdr) + crypto_aead_ivsize(aead);
287 int elen = skb->
len - hlen;
302 if (padlen + 2 + alen >= elen)
312 struct udphdr *uh = (
void *)(skb_network_header(skb) + ihl);
347 pskb_trim(skb, skb->
len - alen - padlen - 2);
348 __skb_pull(skb, hlen);
349 skb_set_transport_header(skb, -ihl);
380 int elen = skb->
len -
sizeof(*esph) - crypto_aead_ivsize(aead);
392 if (!pskb_may_pull(skb,
sizeof(*esph) + crypto_aead_ivsize(aead)))
402 assoclen =
sizeof(*esph);
408 seqhilen +=
sizeof(
__be32);
409 assoclen += seqhilen;
413 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
418 seqhi = esp_tmp_seqhi(tmp);
419 iv = esp_tmp_iv(aead, tmp, seqhilen);
420 req = esp_tmp_req(aead, iv);
421 asg = esp_req_sg(aead, req);
432 skb_to_sgvec(skb, sg,
sizeof(*esph) + crypto_aead_ivsize(aead), elen);
436 sg_set_buf(asg, &esph->
spi,
sizeof(
__be32));
438 sg_set_buf(asg + 1, seqhi, seqhilen);
443 aead_request_set_callback(req, 0, esp_input_done, skb);
444 aead_request_set_crypt(req, sg, sg, elen, iv);
445 aead_request_set_assoc(req, asg, assoclen);
447 err = crypto_aead_decrypt(req);
451 err = esp_input_done2(skb, err);
460 u32 blksize =
ALIGN(crypto_aead_blocksize(esp->
aead), 4);
462 unsigned int net_adj;
464 switch (x->
props.mode) {
467 net_adj =
sizeof(
struct iphdr);
476 return ((mtu - x->
props.header_len - crypto_aead_authsize(esp->
aead) -
477 net_adj) & ~(align - 1)) + (net_adj - 2);
487 switch (icmp_hdr(skb)->type) {
516 crypto_free_aead(esp->
aead);
520 static int esp_init_aead(
struct xfrm_state *x)
533 err = crypto_aead_setkey(aead, x->
aead->alg_key,
534 (x->
aead->alg_key_len + 7) / 8);
546 static int esp_init_authenc(
struct xfrm_state *x)
567 x->
aalg ? x->
aalg->alg_name :
"digest_null",
573 x->
aalg ? x->
aalg->alg_name :
"digest_null",
585 keylen = (x->
aalg ? (x->
aalg->alg_key_len + 7) / 8 : 0) +
603 p += (x->
aalg->alg_key_len + 7) / 8;
609 if (aalg_desc->
uinfo.
auth.icv_fullbits/8 !=
610 crypto_aead_authsize(aead)) {
613 crypto_aead_authsize(aead),
619 aead, x->
aalg->alg_trunc_len / 8);
627 err = crypto_aead_setkey(aead, key, keylen);
636 static int esp_init_state(
struct xfrm_state *x)
650 err = esp_init_aead(x);
652 err = esp_init_authenc(x);
662 crypto_aead_ivsize(aead);
682 align =
ALIGN(crypto_aead_blocksize(aead), 4);
685 x->
props.trailer_len = align + 1 + crypto_aead_authsize(esp->
aead);
693 .description =
"ESP4",
697 .init_state = esp_init_state,
698 .destructor = esp_destroy,
699 .get_mtu = esp4_get_mtu,
706 .err_handler = esp4_err,
711 static int __init esp4_init(
void)
714 pr_info(
"%s: can't add xfrm type\n", __func__);
718 pr_info(
"%s: can't add protocol\n", __func__);
725 static void __exit esp4_fini(
void)
728 pr_info(
"%s: can't remove protocol\n", __func__);
730 pr_info(
"%s: can't remove xfrm type\n", __func__);