1 #define pr_fmt(fmt) "IPsec: " fmt
5 #include <linux/module.h>
6 #include <linux/slab.h>
21 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
23 static void *ah_alloc_tmp(
struct crypto_ahash *ahash,
int nfrags,
28 len = size + crypto_ahash_digestsize(ahash) +
29 (crypto_ahash_alignmask(ahash) &
30 ~(crypto_tfm_ctx_alignment() - 1));
32 len =
ALIGN(len, crypto_tfm_ctx_alignment());
34 len +=
sizeof(
struct ahash_request) + crypto_ahash_reqsize(ahash);
42 static inline u8 *ah_tmp_auth(
void *
tmp,
unsigned int offset)
50 return PTR_ALIGN((
u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
58 req = (
void *)
PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
59 crypto_tfm_ctx_alignment());
61 ahash_request_set_tfm(req, ahash);
69 return (
void *)
ALIGN((
unsigned long)(req + 1) +
70 crypto_ahash_reqsize(ahash),
80 unsigned char *optptr = (
unsigned char *)(iph+1);
81 int l = iph->ihl*4 -
sizeof(
struct iphdr);
94 if (optlen<2 || optlen>l)
107 memcpy(daddr, optptr+optlen-4, 4);
110 memset(optptr, 0, optlen);
125 struct iphdr *top_iph = ip_hdr(skb);
127 int ihl = ip_hdrlen(skb);
130 icv = ah_tmp_icv(ahp->
ahash, iph, ihl);
136 if (top_iph->ihl != 5) {
138 memcpy(top_iph+1, iph+1, top_iph->ihl*4 -
sizeof(
struct iphdr));
155 struct iphdr *iph, *top_iph;
166 skb_push(skb, -skb_network_offset(skb));
168 ihl = ip_hdrlen(skb);
171 iph = ah_alloc_tmp(ahash, nfrags, ihl);
175 icv = ah_tmp_icv(ahash, iph, ihl);
176 req = ah_tmp_req(ahash, icv);
177 sg = ah_req_sg(ahash, req);
181 top_iph = ip_hdr(skb);
187 if (top_iph->ihl != 5) {
189 memcpy(iph+1, top_iph+1, top_iph->ihl*4 -
sizeof(
struct iphdr));
190 err = ip_clear_mutable_options(top_iph, &top_iph->
daddr);
195 ah->
nexthdr = *skb_mac_header(skb);
216 ahash_request_set_crypt(req, sg, icv, skb->
len);
217 ahash_request_set_callback(req, 0, ah_output_done, skb);
236 if (top_iph->ihl != 5) {
238 memcpy(top_iph+1, iph+1, top_iph->ihl*4 -
sizeof(
struct iphdr));
251 struct iphdr *work_iph;
256 int ihl = ip_hdrlen(skb);
257 int ah_hlen = (ah->
hdrlen + 2) << 2;
260 auth_data = ah_tmp_auth(work_iph, ihl);
270 memcpy(skb_network_header(skb), work_iph, ihl);
271 __skb_pull(skb, ah_hlen + ihl);
272 skb_set_transport_header(skb, -ihl);
290 struct iphdr *iph, *work_iph;
295 if (!pskb_may_pull(skb,
sizeof(*ah)))
303 ah_hlen = (ah->
hdrlen + 2) << 2;
315 if (!pskb_may_pull(skb, ah_hlen))
320 if (skb_cloned(skb) &&
333 ihl = ip_hdrlen(skb);
335 work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->
icv_trunc_len);
339 auth_data = ah_tmp_auth(work_iph, ihl);
341 req = ah_tmp_req(ahash, icv);
342 sg = ah_req_sg(ahash, req);
344 memcpy(work_iph, iph, ihl);
352 if (ihl >
sizeof(*iph)) {
354 err = ip_clear_mutable_options(iph, &dummy);
364 ahash_request_set_crypt(req, sg, icv, skb->
len);
365 ahash_request_set_callback(req, 0, ah_input_done, skb);
382 memcpy(skb_network_header(skb), work_iph, ihl);
383 __skb_pull(skb, ah_hlen + ihl);
384 skb_set_transport_header(skb, -ihl);
401 switch (icmp_hdr(skb)->type) {
423 static int ah_init_state(
struct xfrm_state *x)
445 (x->
aalg->alg_key_len + 7) / 8))
457 if (aalg_desc->
uinfo.
auth.icv_fullbits/8 !=
458 crypto_ahash_digestsize(ahash)) {
459 pr_info(
"%s: %s digestsize %u != %hu\n",
460 __func__, x->
aalg->alg_name,
461 crypto_ahash_digestsize(ahash),
485 crypto_free_ahash(ahp->
ahash);
498 crypto_free_ahash(ahp->
ahash);
505 .description =
"AH4",
509 .init_state = ah_init_state,
510 .destructor = ah_destroy,
517 .err_handler = ah4_err,
522 static int __init ah4_init(
void)
525 pr_info(
"%s: can't add xfrm type\n", __func__);
529 pr_info(
"%s: can't add protocol\n", __func__);
536 static void __exit ah4_fini(
void)
539 pr_info(
"%s: can't remove protocol\n", __func__);
541 pr_info(
"%s: can't remove xfrm type\n", __func__);