17 #include <linux/slab.h>
19 #include <linux/list.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
27 #include <linux/audit.h>
32 #ifdef CONFIG_XFRM_STATISTICS
42 static struct dst_entry *xfrm_policy_sk_bundles;
53 static int xfrm_bundle_ok(
struct xfrm_dst *xdst);
68 (fl4->flowi4_proto == sel->
proto || !sel->
proto) &&
81 (fl6->flowi6_proto == sel->
proto || !sel->
proto) &&
90 return __xfrm4_selector_match(sel, fl);
92 return __xfrm6_selector_match(sel, fl);
115 static inline struct dst_entry *__xfrm_dst_lookup(
struct net *
net,
int tos,
123 afinfo = xfrm_policy_get_afinfo(family);
127 dst = afinfo->
dst_lookup(net, tos, saddr, daddr);
129 xfrm_policy_put_afinfo(afinfo);
139 struct net *net = xs_net(x);
153 dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
156 if (prev_saddr != saddr)
157 memcpy(prev_saddr, saddr,
sizeof(*prev_saddr));
158 if (prev_daddr != daddr)
159 memcpy(prev_daddr, daddr,
sizeof(*prev_daddr));
165 static inline unsigned long make_jiffies(
long secs)
173 static void xfrm_policy_timer(
unsigned long data)
186 dir = xfrm_policy_id2dir(xp->
index);
188 if (xp->
lft.hard_add_expires_seconds) {
189 long tmo = xp->
lft.hard_add_expires_seconds +
190 xp->
curlft.add_time - now;
196 if (xp->
lft.hard_use_expires_seconds) {
197 long tmo = xp->
lft.hard_use_expires_seconds +
204 if (xp->
lft.soft_add_expires_seconds) {
205 long tmo = xp->
lft.soft_add_expires_seconds +
206 xp->
curlft.add_time - now;
214 if (xp->
lft.soft_use_expires_seconds) {
215 long tmo = xp->
lft.soft_use_expires_seconds +
259 return !pol->
walk.dead;
268 .get = xfrm_policy_flo_get,
269 .check = xfrm_policy_flo_check,
270 .delete = xfrm_policy_flo_delete,
285 INIT_LIST_HEAD(&policy->
walk.all);
286 INIT_HLIST_NODE(&policy->
bydst);
287 INIT_HLIST_NODE(&policy->
byidx);
291 (
unsigned long)policy);
292 policy->flo.ops = &xfrm_policy_fc_ops;
307 security_xfrm_policy_free(policy->
security);
316 static void xfrm_policy_kill(
struct xfrm_policy *policy)
318 policy->
walk.dead = 1;
323 xfrm_pol_put(policy);
325 xfrm_pol_put(policy);
328 static unsigned int xfrm_policy_hashmax
__read_mostly = 1 * 1024 * 1024;
330 static inline unsigned int idx_hash(
struct net *net,
u32 index)
332 return __idx_hash(index, net->xfrm.policy_idx_hmask);
335 static struct hlist_head *policy_hash_bysel(
struct net *net,
337 unsigned short family,
int dir)
339 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
340 unsigned int hash = __sel_hash(sel, family, hmask);
342 return (hash == hmask + 1 ?
343 &net->xfrm.policy_inexact[dir] :
344 net->xfrm.policy_bydst[dir].table + hash);
347 static struct hlist_head *policy_hash_direct(
struct net *net,
350 unsigned short family,
int dir)
352 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
353 unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
355 return net->xfrm.policy_bydst[dir].table +
hash;
360 unsigned int nhashmask)
374 hlist_add_head(&pol->
bydst, ndsttable+h);
380 hlist_add_after(entry0, &pol->
bydst);
384 if (!hlist_empty(list)) {
390 static void xfrm_idx_hash_transfer(
struct hlist_head *list,
392 unsigned int nhashmask)
400 h = __idx_hash(pol->
index, nhashmask);
401 hlist_add_head(&pol->
byidx, nidxtable+h);
405 static unsigned long xfrm_new_hash_mask(
unsigned int old_hmask)
407 return ((old_hmask + 1) << 1) - 1;
410 static void xfrm_bydst_resize(
struct net *net,
int dir)
412 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
413 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
415 struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
424 for (i = hmask; i >= 0; i--)
425 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
427 net->xfrm.policy_bydst[dir].table = ndst;
428 net->xfrm.policy_bydst[dir].hmask = nhashmask;
435 static void xfrm_byidx_resize(
struct net *net,
int total)
437 unsigned int hmask = net->xfrm.policy_idx_hmask;
438 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
439 unsigned int nsize = (nhashmask + 1) *
sizeof(
struct hlist_head);
440 struct hlist_head *oidx = net->xfrm.policy_byidx;
449 for (i = hmask; i >= 0; i--)
450 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
452 net->xfrm.policy_byidx = nidx;
453 net->xfrm.policy_idx_hmask = nhashmask;
460 static inline int xfrm_bydst_should_resize(
struct net *net,
int dir,
int *total)
462 unsigned int cnt = net->xfrm.policy_count[dir];
463 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
468 if ((hmask + 1) < xfrm_policy_hashmax &&
475 static inline int xfrm_byidx_should_resize(
struct net *net,
int total)
477 unsigned int hmask = net->xfrm.policy_idx_hmask;
479 if ((hmask + 1) < xfrm_policy_hashmax &&
495 si->
spdhcnt = net->xfrm.policy_idx_hmask;
504 struct net *net =
container_of(work,
struct net, xfrm.policy_hash_work);
511 if (xfrm_bydst_should_resize(net, dir, &total))
512 xfrm_bydst_resize(net, dir);
514 if (xfrm_byidx_should_resize(net, total))
515 xfrm_byidx_resize(net, total);
522 static u32 xfrm_gen_index(
struct net *net,
int dir)
524 static u32 idx_generator;
533 idx = (idx_generator | dir);
537 list = net->xfrm.policy_byidx + idx_hash(net, idx);
540 if (p->
index == idx) {
557 for (i = 0; i < len; i++) {
567 struct net *net = xp_net(policy);
575 chain = policy_hash_bysel(net, &policy->
selector, policy->
family, dir);
581 (mark & pol->
mark.m) == pol->
mark.v &&
592 newpos = &pol->
bydst;
599 hlist_add_after(newpos, &policy->
bydst);
601 hlist_add_head(&policy->
bydst, chain);
602 xfrm_pol_hold(policy);
603 net->xfrm.policy_count[dir]++;
607 __xfrm_policy_unlink(delpol, dir);
608 policy->
index = delpol ? delpol->
index : xfrm_gen_index(net, dir);
609 hlist_add_head(&policy->
byidx, net->xfrm.policy_byidx+idx_hash(net, policy->
index));
611 policy->
curlft.use_time = 0;
613 xfrm_pol_hold(policy);
614 list_add(&policy->
walk.all, &net->xfrm.policy_all);
618 xfrm_policy_kill(delpol);
619 else if (xfrm_bydst_should_resize(net, dir,
NULL))
637 chain = policy_hash_bysel(net, sel, sel->
family, dir);
640 if (pol->
type == type &&
641 (mark & pol->
mark.m) == pol->
mark.v &&
642 !selector_cmp(sel, &pol->
selector) &&
643 xfrm_sec_ctx_match(ctx, pol->
security)) {
646 *err = security_xfrm_policy_delete(
652 __xfrm_policy_unlink(pol, dir);
661 xfrm_policy_kill(ret);
667 int dir,
u32 id,
int delete,
int *
err)
674 if (xfrm_policy_id2dir(
id) != dir)
679 chain = net->xfrm.policy_byidx + idx_hash(net,
id);
682 if (pol->
type == type && pol->
index ==
id &&
683 (mark & pol->
mark.m) == pol->
mark.v) {
686 *err = security_xfrm_policy_delete(
692 __xfrm_policy_unlink(pol, dir);
701 xfrm_policy_kill(ret);
706 #ifdef CONFIG_SECURITY_NETWORK_XFRM
708 xfrm_policy_flush_secctx_check(
struct net *net,
u8 type,
struct xfrm_audit *audit_info)
718 &net->xfrm.policy_inexact[dir], bydst) {
719 if (pol->
type != type)
721 err = security_xfrm_policy_delete(pol->
security);
723 xfrm_audit_policy_delete(pol, 0,
730 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
732 net->xfrm.policy_bydst[dir].table + i,
734 if (pol->
type != type)
736 err = security_xfrm_policy_delete(
739 xfrm_audit_policy_delete(pol, 0,
752 xfrm_policy_flush_secctx_check(
struct net *net,
u8 type,
struct xfrm_audit *audit_info)
760 int dir, err = 0, cnt = 0;
764 err = xfrm_policy_flush_secctx_check(net, type, audit_info);
775 &net->xfrm.policy_inexact[dir], bydst) {
776 if (pol->
type != type)
778 __xfrm_policy_unlink(pol, dir);
782 xfrm_audit_policy_delete(pol, 1, audit_info->
loginuid,
786 xfrm_policy_kill(pol);
792 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
795 net->xfrm.policy_bydst[dir].table + i,
797 if (pol->
type != type)
799 __xfrm_policy_unlink(pol, dir);
803 xfrm_audit_policy_delete(pol, 1,
807 xfrm_policy_kill(pol);
835 if (list_empty(&walk->
walk.all) && walk->
seq != 0)
839 if (list_empty(&walk->
walk.all))
850 error =
func(pol, xfrm_policy_id2dir(pol->
index),
853 list_move_tail(&walk->
walk.all, &x->
all);
858 if (walk->
seq == 0) {
862 list_del_init(&walk->
walk.all);
871 INIT_LIST_HEAD(&walk->
walk.all);
880 if (list_empty(&walk->
walk.all))
894 static int xfrm_policy_match(
const struct xfrm_policy *pol,
895 const struct flowi *fl,
896 u8 type,
u16 family,
int dir)
902 if (pol->
family != family ||
903 (fl->flowi_mark & pol->
mark.m) != pol->
mark.v ||
909 ret = security_xfrm_policy_lookup(pol->
security, fl->flowi_secid,
915 static struct xfrm_policy *xfrm_policy_lookup_bytype(
struct net *net,
u8 type,
916 const struct flowi *fl,
926 daddr = xfrm_flowi_daddr(fl, family);
927 saddr = xfrm_flowi_saddr(fl, family);
932 chain = policy_hash_direct(net, daddr, saddr, family, dir);
935 err = xfrm_policy_match(pol, fl, type, family, dir);
949 chain = &net->xfrm.policy_inexact[dir];
951 err = xfrm_policy_match(pol, fl, type, family, dir);
959 }
else if (pol->
priority < priority) {
973 __xfrm_policy_lookup(
struct net *net,
const struct flowi *fl,
u16 family,
u8 dir)
975 #ifdef CONFIG_XFRM_SUB_POLICY
986 xfrm_policy_lookup(
struct net *net,
const struct flowi *fl,
u16 family,
994 pol = __xfrm_policy_lookup(net, fl, family, dir);
995 if (IS_ERR_OR_NULL(pol))
996 return ERR_CAST(pol);
1005 static inline int policy_to_flow_dir(
int dir)
1023 const struct flowi *fl)
1028 if ((pol = sk->sk_policy[dir]) !=
NULL) {
1038 err = security_xfrm_policy_lookup(pol->
security,
1040 policy_to_flow_dir(dir));
1043 else if (err == -
ESRCH)
1055 static void __xfrm_policy_link(
struct xfrm_policy *pol,
int dir)
1057 struct net *net = xp_net(pol);
1061 list_add(&pol->
walk.all, &net->xfrm.policy_all);
1062 hlist_add_head(&pol->
bydst, chain);
1063 hlist_add_head(&pol->
byidx, net->xfrm.policy_byidx+idx_hash(net, pol->
index));
1064 net->xfrm.policy_count[dir]++;
1067 if (xfrm_bydst_should_resize(net, dir,
NULL))
1074 struct net *net = xp_net(pol);
1076 if (hlist_unhashed(&pol->
bydst))
1079 hlist_del(&pol->
bydst);
1080 hlist_del(&pol->
byidx);
1082 net->xfrm.policy_count[dir]--;
1090 pol = __xfrm_policy_unlink(pol, dir);
1093 xfrm_policy_kill(pol);
1102 struct net *net = xp_net(pol);
1105 #ifdef CONFIG_XFRM_SUB_POLICY
1111 old_pol = sk->sk_policy[dir];
1112 sk->sk_policy[dir] =
pol;
1115 pol->
index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
1116 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1122 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1126 xfrm_policy_kill(old_pol);
1137 if (security_xfrm_policy_clone(old->
security,
1153 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
1163 *p1 = sk->sk_policy[1];
1165 sk->sk_policy[0] = sk->sk_policy[1] =
NULL;
1166 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) ==
NULL)
1168 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) ==
NULL)
1175 unsigned short family)
1182 err = afinfo->
get_saddr(net, local, remote);
1183 xfrm_policy_put_afinfo(afinfo);
1191 struct xfrm_state **xfrm,
unsigned short family)
1193 struct net *net = xp_net(policy);
1200 for (nx=0, i = 0; i < policy->
xfrm_nr; i++) {
1208 remote = &tmpl->
id.daddr;
1209 local = &tmpl->
saddr;
1211 error = xfrm_get_saddr(net, &tmp, remote, tmpl->
encap_family);
1231 else if (error == -
ESRCH)
1240 for (nx--; nx>=0; nx--)
1241 xfrm_state_put(xfrm[nx]);
1246 xfrm_tmpl_resolve(
struct xfrm_policy **pols,
int npols,
const struct flowi *fl,
1247 struct xfrm_state **xfrm,
unsigned short family)
1250 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1256 for (i = 0; i < npols; i++) {
1262 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1272 xfrm_state_sort(xfrm, tpp, cnx, family);
1277 for (cnx--; cnx>=0; cnx--)
1278 xfrm_state_put(tpp[cnx]);
1287 static inline int xfrm_get_tos(
const struct flowi *fl,
int family)
1297 xfrm_policy_put_afinfo(afinfo);
1315 if (stale_bundle(dst))
1330 if (stale_bundle(dst))
1345 .get = xfrm_bundle_flo_get,
1346 .check = xfrm_bundle_flo_check,
1347 .delete = xfrm_bundle_flo_delete,
1350 static inline struct xfrm_dst *xfrm_alloc_dst(
struct net *net,
int family)
1361 dst_ops = &net->xfrm.xfrm4_dst_ops;
1363 #if IS_ENABLED(CONFIG_IPV6)
1365 dst_ops = &net->xfrm.xfrm6_dst_ops;
1376 memset(dst + 1, 0,
sizeof(*xdst) -
sizeof(*dst));
1377 xdst->
flo.ops = &xfrm_bundle_fc_ops;
1383 xfrm_policy_put_afinfo(afinfo);
1392 xfrm_policy_get_afinfo(dst->
ops->family);
1398 err = afinfo->
init_path(path, dst, nfheader_len);
1400 xfrm_policy_put_afinfo(afinfo);
1406 const struct flowi *fl)
1409 xfrm_policy_get_afinfo(xdst->
u.
dst.ops->family);
1415 err = afinfo->
fill_dst(xdst, dev, fl);
1417 xfrm_policy_put_afinfo(afinfo);
1429 const struct flowi *fl,
1432 struct net *net = xp_net(policy);
1441 int nfheader_len = 0;
1444 int family = policy->
selector.family;
1447 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1449 tos = xfrm_get_tos(fl, family);
1456 for (; i < nx; i++) {
1457 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1460 err = PTR_ERR(xdst);
1467 inner_mode = xfrm_ip2inner_mode(xfrm[i],
1468 xfrm_af2proto(family));
1480 dst_prev->
child = dst_clone(dst1);
1485 dst_copy_metrics(dst1, dst);
1488 family = xfrm[
i]->
props.family;
1489 dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
1497 dst1->xfrm = xfrm[
i];
1507 dst1->
next = dst_prev;
1510 header_len += xfrm[
i]->
props.header_len;
1512 nfheader_len += xfrm[
i]->
props.header_len;
1513 trailer_len += xfrm[
i]->
props.trailer_len;
1524 xfrm_init_path((
struct xfrm_dst *)dst0, dst, nfheader_len);
1525 xfrm_init_pmtu(dst_prev);
1527 for (dst_prev = dst0; dst_prev !=
dst; dst_prev = dst_prev->
child) {
1530 err = xfrm_fill_dst(xdst, dev, fl);
1536 header_len -= xdst->
u.
dst.xfrm->props.header_len;
1537 trailer_len -= xdst->
u.
dst.xfrm->props.trailer_len;
1545 xfrm_state_put(xfrm[i]);
1549 dst0 = ERR_PTR(err);
1554 xfrm_dst_alloc_copy(
void **
target,
const void *
src,
int size)
1561 memcpy(*target, src, size);
1568 #ifdef CONFIG_XFRM_SUB_POLICY
1570 return xfrm_dst_alloc_copy((
void **)&(xdst->partner),
1578 xfrm_dst_update_origin(
struct dst_entry *dst,
const struct flowi *fl)
1580 #ifdef CONFIG_XFRM_SUB_POLICY
1582 return xfrm_dst_alloc_copy((
void **)&(xdst->origin), fl,
sizeof(*fl));
1588 static int xfrm_expand_policies(
const struct flowi *fl,
u16 family,
1594 if (*num_pols == 0 || !pols[0]) {
1599 if (IS_ERR(pols[0]))
1600 return PTR_ERR(pols[0]);
1602 *num_xfrms = pols[0]->
xfrm_nr;
1604 #ifdef CONFIG_XFRM_SUB_POLICY
1607 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1612 if (IS_ERR(pols[1])) {
1613 xfrm_pols_put(pols, *num_pols);
1614 return PTR_ERR(pols[1]);
1617 (*num_xfrms) += pols[1]->
xfrm_nr;
1633 xfrm_resolve_and_create_bundle(
struct xfrm_policy **pols,
int num_pols,
1634 const struct flowi *fl,
u16 family,
1637 struct net *net = xp_net(pols[0]);
1644 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1646 if (err != 0 && err != -
EAGAIN)
1648 return ERR_PTR(err);
1651 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1654 return ERR_CAST(dst);
1660 err = xfrm_dst_update_parent(dst, &pols[1]->
selector);
1662 err = xfrm_dst_update_origin(dst, fl);
1666 return ERR_PTR(err);
1677 xfrm_bundle_lookup(
struct net *net,
const struct flowi *fl,
u16 family,
u8 dir,
1683 int num_pols = 0, num_xfrms = 0,
i,
err, pol_dead;
1694 pol_dead |= pols[
i]->
walk.dead;
1697 dst_free(&xdst->
u.
dst);
1709 pols[0] = __xfrm_policy_lookup(net, fl, family, dir);
1710 err = xfrm_expand_policies(fl, family, pols,
1711 &num_pols, &num_xfrms);
1717 goto make_dummy_bundle;
1720 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
1721 if (IS_ERR(new_xdst)) {
1722 err = PTR_ERR(new_xdst);
1726 goto make_dummy_bundle;
1727 dst_hold(&xdst->
u.
dst);
1729 }
else if (new_xdst ==
NULL) {
1732 goto make_dummy_bundle;
1734 dst_hold(&xdst->
u.
dst);
1742 dst_free(&xdst->
u.
dst);
1747 dst_hold(&new_xdst->
u.
dst);
1748 return &new_xdst->
flo;
1754 xdst = xfrm_alloc_dst(net, family);
1756 xfrm_pols_put(pols, num_pols);
1757 return ERR_CAST(xdst);
1763 dst_hold(&xdst->
u.
dst);
1770 dst_free(&xdst->
u.
dst);
1772 xfrm_pols_put(pols, num_pols);
1773 return ERR_PTR(err);
1776 static struct dst_entry *make_blackhole(
struct net *net,
u16 family,
1788 xfrm_policy_put_afinfo(afinfo);
1799 const struct flowi *fl,
1806 u16 family = dst_orig->
ops->family;
1808 int i,
err, num_pols, num_xfrms = 0, drop_pols = 0;
1818 err = xfrm_expand_policies(fl, family, pols,
1819 &num_pols, &num_xfrms);
1824 if (num_xfrms <= 0) {
1825 drop_pols = num_pols;
1829 xdst = xfrm_resolve_and_create_bundle(
1833 xfrm_pols_put(pols, num_pols);
1834 err = PTR_ERR(xdst);
1836 }
else if (xdst ==
NULL) {
1838 drop_pols = num_pols;
1842 dst_hold(&xdst->
u.
dst);
1844 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
1845 xdst->
u.
dst.next = xfrm_policy_sk_bundles;
1846 xfrm_policy_sk_bundles = &xdst->
u.
dst;
1847 spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
1849 route = xdst->
route;
1860 xfrm_bundle_lookup, dst_orig);
1872 route = xdst->
route;
1876 if (route ==
NULL && num_xfrms > 0) {
1883 if (net->xfrm.sysctl_larval_drop) {
1887 xfrm_pols_put(pols, drop_pols);
1890 return make_blackhole(net, family, dst_orig);
1901 if (!signal_pending(
current)) {
1924 for (i = 0; i < num_pols; i++)
1927 if (num_xfrms < 0) {
1932 }
else if (num_xfrms > 0) {
1941 xfrm_pols_put(pols, drop_pols);
1942 if (dst && dst->xfrm &&
1948 if (!(flags & XFRM_LOOKUP_ICMP)) {
1957 xfrm_pols_put(pols, drop_pols);
1958 return ERR_PTR(err);
1963 xfrm_secpath_reject(
int idx,
struct sk_buff *
skb,
const struct flowi *fl)
1967 if (!skb->sp || idx < 0 || idx >= skb->sp->
len)
1969 x = skb->sp->xvec[
idx];
1970 if (!x->
type->reject)
1972 return x->
type->reject(x, skb, fl);
1983 unsigned short family)
1985 if (xfrm_state_kern(x))
1987 return x->
id.proto == tmpl->
id.proto &&
1988 (x->
id.spi == tmpl->
id.spi || !tmpl->
id.spi) &&
1994 xfrm_state_addr_cmp(tmpl, x, family));
2006 unsigned short family)
2015 for (; idx < sp->
len; idx++) {
2016 if (xfrm_state_ok(tmpl, sp->
xvec[idx], family))
2028 unsigned int family,
int reverse)
2037 err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2038 xfrm_policy_put_afinfo(afinfo);
2043 static inline int secpath_has_nontransport(
const struct sec_path *sp,
int k,
int *idxp)
2045 for (; k < sp->
len; k++) {
2056 unsigned short family)
2058 struct net *net = dev_net(skb->
dev);
2071 fl_dir = policy_to_flow_dir(dir);
2078 nf_nat_decode_session(skb, &fl, family);
2084 for (i=skb->sp->
len-1; i>=0; i--) {
2094 if (sk && sk->sk_policy[dir]) {
2095 pol = xfrm_sk_policy_lookup(sk, dir, &fl);
2106 xfrm_policy_lookup,
NULL);
2107 if (IS_ERR_OR_NULL(flo))
2108 pol = ERR_CAST(flo);
2119 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2120 xfrm_secpath_reject(xerr_idx, skb, &fl);
2131 #ifdef CONFIG_XFRM_SUB_POLICY
2137 if (IS_ERR(pols[1])) {
2156 if ((sp = skb->sp) ==
NULL)
2159 for (pi = 0; pi < npols; pi++) {
2160 if (pols[pi] != pol &&
2169 for (i = 0; i < pols[pi]->
xfrm_nr; i++)
2170 tpp[ti++] = &pols[pi]->xfrm_vec[i];
2174 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
2184 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2185 k = xfrm_policy_ok(tpp[i], sp, k, family);
2195 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2200 xfrm_pols_put(pols, npols);
2206 xfrm_secpath_reject(xerr_idx, skb, &fl);
2208 xfrm_pols_put(pols, npols);
2215 struct net *net = dev_net(skb->
dev);
2220 if (xfrm_decode_session(skb, &fl, family) < 0) {
2232 skb_dst_set(skb, dst);
2262 if (dst->
obsolete < 0 && !stale_bundle(dst))
2268 static int stale_bundle(
struct dst_entry *dst)
2270 return !xfrm_bundle_ok((
struct xfrm_dst *)dst);
2275 while ((dst = dst->
child) && dst->xfrm && dst->
dev == dev) {
2276 dst->
dev = dev_net(dev)->loopback_dev;
2283 static void xfrm_link_failure(
struct sk_buff *skb)
2299 static void __xfrm_garbage_collect(
struct net *net)
2303 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
2304 head = xfrm_policy_sk_bundles;
2305 xfrm_policy_sk_bundles =
NULL;
2306 spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
2315 static void xfrm_garbage_collect(
struct net *net)
2318 __xfrm_garbage_collect(net);
2321 static void xfrm_garbage_collect_deferred(
struct net *net)
2324 __xfrm_garbage_collect(net);
2327 static void xfrm_init_pmtu(
struct dst_entry *dst)
2333 pmtu = dst_mtu(dst->
child);
2338 route_mtu_cached = dst_mtu(xdst->
route);
2341 if (pmtu > route_mtu_cached)
2344 dst_metric_set(dst,
RTAX_MTU, pmtu);
2345 }
while ((dst = dst->
next));
2358 if (!dst_check(dst->
path, ((
struct xfrm_dst *)dst)->path_cookie) ||
2359 (dst->
dev && !netif_running(dst->
dev)))
2375 mtu = dst_mtu(dst->
child);
2383 mtu = dst_mtu(xdst->
route);
2390 }
while (dst->xfrm);
2402 dst_metric_set(dst,
RTAX_MTU, mtu);
2414 static unsigned int xfrm_default_advmss(
const struct dst_entry *dst)
2416 return dst_metric_advmss(dst->
path);
2419 static unsigned int xfrm_mtu(
const struct dst_entry *dst)
2421 unsigned int mtu = dst_metric_raw(dst,
RTAX_MTU);
2423 return mtu ? : dst_mtu(dst->
path);
2430 return dst->
path->ops->neigh_lookup(dst, skb, daddr);
2441 spin_lock(&xfrm_policy_afinfo_lock);
2445 struct dst_ops *dst_ops = afinfo->
dst_ops;
2449 dst_ops->
check = xfrm_dst_check;
2453 dst_ops->
mtu = xfrm_mtu;
2464 spin_unlock(&xfrm_policy_afinfo_lock);
2468 struct dst_ops *xfrm_dst_ops;
2470 switch (afinfo->
family) {
2472 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
2474 #if IS_ENABLED(CONFIG_IPV6)
2476 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
2482 *xfrm_dst_ops = *afinfo->
dst_ops;
2497 spin_lock(&xfrm_policy_afinfo_lock);
2505 spin_unlock(&xfrm_policy_afinfo_lock);
2507 struct dst_ops *dst_ops = afinfo->
dst_ops;
2521 static void __net_init xfrm_dst_ops_init(
struct net *net)
2528 net->xfrm.xfrm4_dst_ops = *afinfo->
dst_ops;
2529 #if IS_ENABLED(CONFIG_IPV6)
2532 net->xfrm.xfrm6_dst_ops = *afinfo->
dst_ops;
2543 xfrm_garbage_collect(dev_net(dev));
2549 .notifier_call = xfrm_dev_event,
2552 #ifdef CONFIG_XFRM_STATISTICS
2553 static int __net_init xfrm_statistics_init(
struct net *net)
2567 static void xfrm_statistics_fini(
struct net *net)
2573 static int __net_init xfrm_statistics_init(
struct net *net)
2578 static void xfrm_statistics_fini(
struct net *net)
2583 static int __net_init xfrm_policy_init(
struct net *net)
2585 unsigned int hmask, sz;
2598 if (!net->xfrm.policy_byidx)
2600 net->xfrm.policy_idx_hmask = hmask;
2602 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2605 net->xfrm.policy_count[dir] = 0;
2608 htab = &net->xfrm.policy_bydst[dir];
2615 INIT_LIST_HEAD(&net->xfrm.policy_all);
2616 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2622 for (dir--; dir >= 0; dir--) {
2625 htab = &net->xfrm.policy_bydst[dir];
2633 static void xfrm_policy_fini(
struct net *net)
2640 #ifdef CONFIG_XFRM_SUB_POLICY
2643 audit_info.
secid = 0;
2648 audit_info.
secid = 0;
2651 WARN_ON(!list_empty(&net->xfrm.policy_all));
2653 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2656 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2658 htab = &net->xfrm.policy_bydst[dir];
2659 sz = (htab->
hmask + 1);
2664 sz = (net->xfrm.policy_idx_hmask + 1) *
sizeof(
struct hlist_head);
2665 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2669 static int __net_init xfrm_net_init(
struct net *net)
2673 rv = xfrm_statistics_init(net);
2675 goto out_statistics;
2679 rv = xfrm_policy_init(net);
2682 xfrm_dst_ops_init(net);
2689 xfrm_policy_fini(net);
2693 xfrm_statistics_fini(net);
2698 static void __net_exit xfrm_net_exit(
struct net *net)
2700 xfrm_sysctl_fini(net);
2701 xfrm_policy_fini(net);
2703 xfrm_statistics_fini(net);
2707 .init = xfrm_net_init,
2708 .exit = xfrm_net_exit,
2717 #ifdef CONFIG_AUDITSYSCALL
2718 static void xfrm_audit_common_policyinfo(
struct xfrm_policy *xp,
2757 audit_buf = xfrm_audit_start(
"SPD-add");
2758 if (audit_buf ==
NULL)
2760 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2762 xfrm_audit_common_policyinfo(xp, audit_buf);
2767 void xfrm_audit_policy_delete(
struct xfrm_policy *xp,
int result,
2772 audit_buf = xfrm_audit_start(
"SPD-delete");
2773 if (audit_buf ==
NULL)
2775 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2777 xfrm_audit_common_policyinfo(xp, audit_buf);
2783 #ifdef CONFIG_XFRM_MIGRATE
2784 static bool xfrm_migrate_selector_match(
const struct xfrm_selector *sel_cmp,
2789 xfrm_addr_cmp(&sel_tgt->
daddr, &sel_cmp->
daddr,
2791 xfrm_addr_cmp(&sel_tgt->
saddr, &sel_cmp->
saddr,
2798 if (
memcmp(sel_tgt, sel_cmp,
sizeof(*sel_tgt)) == 0) {
2816 if (xfrm_migrate_selector_match(sel, &pol->
selector) &&
2817 pol->
type == type) {
2823 chain = &
init_net.xfrm.policy_inexact[dir];
2825 if (xfrm_migrate_selector_match(sel, &pol->
selector) &&
2826 pol->
type == type &&
2871 static int xfrm_policy_migrate(
struct xfrm_policy *pol,
2884 for (i = 0; i < pol->
xfrm_nr; i++) {
2885 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
2886 if (!migrate_tmpl_match(mp, &pol->
xfrm_vec[i]))
2894 sizeof(pol->
xfrm_vec[i].id.daddr));
2911 static int xfrm_migrate_check(
const struct xfrm_migrate *m,
int num_migrate)
2918 for (i = 0; i < num_migrate; i++) {
2922 m[i].old_family) == 0))
2924 if (xfrm_addr_any(&m[i].new_daddr, m[i].
new_family) ||
2925 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
2929 for (j = i + 1; j < num_migrate; j++) {
2930 if (!
memcmp(&m[i].old_daddr, &m[j].old_daddr,
2931 sizeof(m[i].old_daddr)) &&
2932 !
memcmp(&m[i].old_saddr, &m[j].old_saddr,
2933 sizeof(m[i].old_saddr)) &&
2937 m[i].old_family == m[j].old_family)
2949 int i,
err, nx_cur = 0, nx_new = 0;
2956 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
2960 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) ==
NULL) {
2966 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
2967 if ((x = xfrm_migrate_state_find(mp))) {
2970 if ((xc = xfrm_state_migrate(x, mp))) {
2981 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
2986 xfrm_states_put(x_cur, nx_cur);
2987 xfrm_states_delete(x_cur, nx_cur);
2991 km_migrate(sel, dir, type, m, num_migrate, k);
3003 xfrm_states_put(x_cur, nx_cur);
3005 xfrm_states_delete(x_new, nx_new);