14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/socket.h>
19 #include <linux/string.h>
20 #include <linux/net.h>
30 #include <asm/uaccess.h>
31 #if IS_ENABLED(CONFIG_IPV6)
32 #include <linux/in6.h>
49 if (nla_len(rt) < xfrm_alg_len(algp))
66 static int verify_auth_trunc(
struct nlattr **attrs)
75 if (nla_len(rt) < xfrm_alg_auth_len(algp))
82 static int verify_aead(
struct nlattr **attrs)
91 if (nla_len(rt) < aead_len(algp))
104 *addrp = nla_data(rt);
107 static inline int verify_sec_ctx_len(
struct nlattr **attrs)
137 if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
138 nla_len(rt) !=
sizeof(*rs))
165 #if IS_ENABLED(CONFIG_IPV6)
177 switch (p->
id.proto) {
192 !attrs[XFRMA_ALG_AUTH_TRUNC] &&
197 attrs[XFRMA_ALG_AUTH_TRUNC] ||
199 attrs[XFRMA_ALG_AEAD])
201 if (attrs[XFRMA_TFCPAD] &&
207 if (!attrs[XFRMA_ALG_COMP] ||
208 attrs[XFRMA_ALG_AEAD] ||
210 attrs[XFRMA_ALG_AUTH_TRUNC] ||
211 attrs[XFRMA_ALG_CRYPT] ||
216 #if IS_ENABLED(CONFIG_IPV6)
219 if (attrs[XFRMA_ALG_COMP] ||
221 attrs[XFRMA_ALG_AUTH_TRUNC] ||
222 attrs[XFRMA_ALG_AEAD] ||
223 attrs[XFRMA_ALG_CRYPT] ||
226 attrs[XFRMA_TFCPAD] ||
236 if ((err = verify_aead(attrs)))
238 if ((err = verify_auth_trunc(attrs)))
242 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
244 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
246 if ((err = verify_sec_ctx_len(attrs)))
248 if ((err = verify_replay(p, attrs)))
269 static int attach_one_algo(
struct xfrm_algo **algpp,
u8 *props,
279 ualg = nla_data(rta);
281 algo = get_byname(ualg->
alg_name, 1);
284 *props = algo->
desc.sadb_alg_id;
305 ualg = nla_data(rta);
310 *props = algo->
desc.sadb_alg_id;
334 ualg = nla_data(rta);
342 *props = algo->
desc.sadb_alg_id;
365 ualg = nla_data(rta);
370 *props = algo->
desc.sadb_alg_id;
387 if (!replay_esn || !rp)
391 ulen = xfrm_replay_state_esn_len(up);
393 if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
410 klen = xfrm_replay_state_esn_len(up);
411 ulen = nla_len(rta) >= klen ? klen :
sizeof(*up);
432 static inline int xfrm_user_sec_ctx_size(
struct xfrm_sec_ctx *xfrm_ctx)
464 static void xfrm_update_ae_params(
struct xfrm_state *x,
struct nlattr **attrs,
475 replay_esn = nla_data(re);
477 xfrm_replay_state_esn_len(replay_esn));
479 xfrm_replay_state_esn_len(replay_esn));
484 replay = nla_data(rp);
491 ltime = nla_data(lt);
516 copy_from_user_state(x, p);
518 if ((err = attach_aead(&x->
aead, &x->
props.ealgo,
519 attrs[XFRMA_ALG_AEAD])))
521 if ((err = attach_auth_trunc(&x->
aalg, &x->
props.aalgo,
522 attrs[XFRMA_ALG_AUTH_TRUNC])))
524 if (!x->
props.aalgo) {
525 if ((err = attach_auth(&x->
aalg, &x->
props.aalgo,
529 if ((err = attach_one_algo(&x->
ealg, &x->
props.ealgo,
531 attrs[XFRMA_ALG_CRYPT])))
533 if ((err = attach_one_algo(&x->
calg, &x->
props.calgo,
535 attrs[XFRMA_ALG_COMP])))
545 if (attrs[XFRMA_TFCPAD])
546 x->
tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
555 xfrm_mark_get(attrs, &x->
mark);
562 security_xfrm_state_alloc(x, nla_data(attrs[
XFRMA_SEC_CTX])))
578 xfrm_update_ae_params(x, attrs, 0);
593 struct net *net = sock_net(skb->
sk);
602 err = verify_newsa_info(p, attrs);
606 x = xfrm_state_construct(net, p, attrs, &err);
617 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
635 static struct xfrm_state *xfrm_user_state_lookup(
struct net *net,
643 u32 mark = xfrm_mark_get(attrs, &
m);
672 struct net *net = sock_net(skb->
sk);
681 x = xfrm_user_state_lookup(net, p, attrs, &err);
685 if ((err = security_xfrm_state_delete(x)) != 0)
688 if (xfrm_state_kern(x)) {
705 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
738 int ctx_size =
sizeof(*uctx) + s->
ctx_len;
744 uctx = nla_data(attr);
746 uctx->
len = ctx_size;
765 algo = nla_data(nla);
774 static int copy_to_user_state_extra(
struct xfrm_state *x,
780 copy_to_user_state(x, p);
798 ret = copy_to_user_auth(x->
aalg, skb);
800 ret =
nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
801 xfrm_alg_auth_len(x->
aalg), x->
aalg);
821 ret = nla_put_u32(skb, XFRMA_TFCPAD, x->
tfcpad);
825 ret = xfrm_mark_put(skb, &x->
mark);
829 ret =
nla_put(skb, XFRMA_REPLAY_ESN_VAL,
836 ret = copy_sec_ctx(x->
security, skb);
857 err = copy_to_user_state_extra(x, p, skb);
859 nlmsg_cancel(skb, nlh);
875 struct net *net = sock_net(skb->
sk);
880 sizeof(cb->
args) -
sizeof(cb->
args[0]));
884 info.nlmsg_seq = cb->
nlh->nlmsg_seq;
908 info.in_skb = in_skb;
910 info.nlmsg_seq = seq;
911 info.nlmsg_flags = 0;
913 err = dump_one_state(x, 0, &
info);
922 static inline size_t xfrm_spdinfo_msgsize(
void)
929 static int build_spdinfo(
struct sk_buff *skb,
struct net *net,
946 spc.incnt = si.incnt;
947 spc.outcnt = si.outcnt;
948 spc.fwdcnt = si.fwdcnt;
949 spc.inscnt = si.inscnt;
950 spc.outscnt = si.outscnt;
951 spc.fwdscnt = si.fwdscnt;
952 sph.spdhcnt = si.spdhcnt;
953 sph.spdhmcnt = si.spdhmcnt;
959 nlmsg_cancel(skb, nlh);
963 return nlmsg_end(skb, nlh);
969 struct net *net = sock_net(skb->
sk);
971 u32 *flags = nlmsg_data(nlh);
975 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(),
GFP_ATOMIC);
979 if (build_spdinfo(r_skb, net, sportid, seq, *flags) < 0)
982 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
985 static inline size_t xfrm_sadinfo_msgsize(
void)
992 static int build_sadinfo(
struct sk_buff *skb,
struct net *net,
1005 f = nlmsg_data(nlh);
1009 sh.sadhmcnt = si.sadhmcnt;
1010 sh.sadhcnt = si.sadhcnt;
1016 nlmsg_cancel(skb, nlh);
1020 return nlmsg_end(skb, nlh);
1023 static int xfrm_get_sadinfo(
struct sk_buff *skb,
struct nlmsghdr *nlh,
1026 struct net *net = sock_net(skb->
sk);
1028 u32 *flags = nlmsg_data(nlh);
1032 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(),
GFP_ATOMIC);
1036 if (build_sadinfo(r_skb, net, sportid, seq, *flags) < 0)
1039 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
1045 struct net *net = sock_net(skb->
sk);
1051 x = xfrm_user_state_lookup(net, p, attrs, &err);
1055 resp_skb = xfrm_state_netlink(skb, x, nlh->
nlmsg_seq);
1056 if (IS_ERR(resp_skb)) {
1057 err = PTR_ERR(resp_skb);
1059 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
NETLINK_CB(skb).portid);
1068 switch (p->
info.id.proto) {
1075 if (p->
max >= 0x10000)
1089 static int xfrm_alloc_userspi(
struct sk_buff *skb,
struct nlmsghdr *nlh,
1092 struct net *net = sock_net(skb->
sk);
1102 p = nlmsg_data(nlh);
1103 err = verify_userspi_info(p);
1107 family = p->info.family;
1108 daddr = &p->info.id.daddr;
1112 mark = xfrm_mark_get(attrs, &
m);
1115 if (x && xfrm_addr_cmp(&x->
id.daddr, daddr, family)) {
1123 p->info.id.proto, daddr,
1134 resp_skb = xfrm_state_netlink(skb, x, nlh->
nlmsg_seq);
1135 if (IS_ERR(resp_skb)) {
1136 err = PTR_ERR(resp_skb);
1140 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
NETLINK_CB(skb).portid);
1148 static int verify_policy_dir(
u8 dir)
1163 static int verify_policy_type(
u8 type)
1167 #ifdef CONFIG_XFRM_SUB_POLICY
1201 switch (p->
sel.family) {
1206 #if IS_ENABLED(CONFIG_IPV6)
1216 return verify_policy_dir(p->
dir);
1227 uctx = nla_data(rt);
1228 return security_xfrm_policy_alloc(&pol->
security, uctx);
1237 for (i = 0; i <
nr; i++, ut++) {
1263 for (i = 0; i <
nr; i++) {
1274 switch (ut[i].family) {
1277 #if IS_ENABLED(CONFIG_IPV6)
1297 int nr = nla_len(rt) /
sizeof(*utmpl);
1300 err = validate_tmpl(nr, utmpl, pol->
family);
1304 copy_templates(pol, utmpl, nr);
1309 static int copy_from_user_policy_type(
u8 *tp,
struct nlattr **attrs)
1321 err = verify_policy_type(type);
1343 memset(p, 0,
sizeof(*p));
1366 copy_from_user_policy(xp, p);
1368 err = copy_from_user_policy_type(&xp->
type, attrs);
1372 if (!(err = copy_from_user_tmpl(xp, attrs)))
1373 err = copy_from_user_sec_ctx(xp, attrs);
1377 xfrm_mark_get(attrs, &xp->
mark);
1390 struct net *net = sock_net(skb->
sk);
1397 u32 sessionid = audit_get_sessionid(
current);
1400 err = verify_newpolicy_info(p);
1403 err = verify_sec_ctx_len(attrs);
1407 xp = xfrm_policy_construct(net, p, attrs, &err);
1418 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
1421 security_xfrm_policy_free(xp->
security);
1444 for (i = 0; i < xp->
xfrm_nr; i++) {
1448 memset(up, 0,
sizeof(*up));
1465 static inline int copy_to_user_state_sec_ctx(
struct xfrm_state *x,
struct sk_buff *skb)
1468 return copy_sec_ctx(x->
security, skb);
1476 return copy_sec_ctx(xp->
security, skb);
1479 static inline size_t userpolicy_type_attrsize(
void)
1481 #ifdef CONFIG_XFRM_SUB_POLICY
1488 #ifdef CONFIG_XFRM_SUB_POLICY
1489 static int copy_to_user_policy_type(
u8 type,
struct sk_buff *skb)
1499 static inline int copy_to_user_policy_type(
u8 type,
struct sk_buff *skb)
1505 static int dump_one_policy(
struct xfrm_policy *xp,
int dir,
int count,
void *ptr)
1519 p = nlmsg_data(nlh);
1520 copy_to_user_policy(xp, p, dir);
1521 err = copy_to_user_tmpl(xp, skb);
1523 err = copy_to_user_sec_ctx(xp, skb);
1525 err = copy_to_user_policy_type(xp->
type, skb);
1527 err = xfrm_mark_put(skb, &xp->
mark);
1529 nlmsg_cancel(skb, nlh);
1532 nlmsg_end(skb, nlh);
1546 struct net *net = sock_net(skb->
sk);
1551 sizeof(cb->
args) -
sizeof(cb->
args[0]));
1555 info.nlmsg_seq = cb->
nlh->nlmsg_seq;
1568 static struct sk_buff *xfrm_policy_netlink(
struct sk_buff *in_skb,
1580 info.in_skb = in_skb;
1582 info.nlmsg_seq = seq;
1583 info.nlmsg_flags = 0;
1585 err = dump_one_policy(xp, dir, 0, &
info);
1588 return ERR_PTR(err);
1597 struct net *net = sock_net(skb->
sk);
1605 u32 mark = xfrm_mark_get(attrs, &
m);
1607 p = nlmsg_data(nlh);
1610 err = copy_from_user_policy_type(&type, attrs);
1614 err = verify_policy_dir(p->
dir);
1624 err = verify_sec_ctx_len(attrs);
1632 err = security_xfrm_policy_alloc(&ctx, uctx);
1638 security_xfrm_policy_free(ctx);
1646 resp_skb = xfrm_policy_netlink(skb, xp, p->
dir, nlh->
nlmsg_seq);
1647 if (IS_ERR(resp_skb)) {
1648 err = PTR_ERR(resp_skb);
1650 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
1655 u32 sessionid = audit_get_sessionid(
current);
1659 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
1680 struct net *net = sock_net(skb->
sk);
1686 audit_info.loginuid = audit_get_loginuid(
current);
1687 audit_info.sessionid = audit_get_sessionid(
current);
1705 static inline size_t xfrm_aevent_msgsize(
struct xfrm_state *x)
1712 + nla_total_size(replay_size)
1714 + nla_total_size(
sizeof(
struct xfrm_mark))
1716 + nla_total_size(4);
1729 id = nlmsg_data(nlh);
1731 id->sa_id.spi = x->
id.spi;
1732 id->sa_id.family = x->
props.family;
1733 id->sa_id.proto = x->
id.proto;
1735 id->reqid = x->
props.reqid;
1739 err =
nla_put(skb, XFRMA_REPLAY_ESN_VAL,
1763 err = xfrm_mark_put(skb, &x->
mark);
1767 return nlmsg_end(skb, nlh);
1770 nlmsg_cancel(skb, nlh);
1777 struct net *net = sock_net(skb->
sk);
1787 mark = xfrm_mark_get(attrs, &
m);
1793 r_skb = nlmsg_new(xfrm_aevent_msgsize(x),
GFP_ATOMIC);
1794 if (r_skb ==
NULL) {
1804 spin_lock_bh(&x->
lock);
1809 if (build_aevent(r_skb, x, &c) < 0)
1811 err = nlmsg_unicast(net->xfrm.nlsk, r_skb,
NETLINK_CB(skb).portid);
1812 spin_unlock_bh(&x->
lock);
1820 struct net *net = sock_net(skb->
sk);
1831 if (!lt && !rp && !re)
1838 mark = xfrm_mark_get(attrs, &
m);
1847 err = xfrm_replay_verify_len(x->
replay_esn, rp);
1851 spin_lock_bh(&x->
lock);
1852 xfrm_update_ae_params(x, attrs, 1);
1853 spin_unlock_bh(&x->
lock);
1866 static int xfrm_flush_policy(
struct sk_buff *skb,
struct nlmsghdr *nlh,
1869 struct net *net = sock_net(skb->
sk);
1875 err = copy_from_user_policy_type(&type, attrs);
1879 audit_info.loginuid = audit_get_loginuid(
current);
1880 audit_info.sessionid = audit_get_sessionid(
current);
1898 static int xfrm_add_pol_expire(
struct sk_buff *skb,
struct nlmsghdr *nlh,
1901 struct net *net = sock_net(skb->
sk);
1908 u32 mark = xfrm_mark_get(attrs, &
m);
1910 err = copy_from_user_policy_type(&type, attrs);
1914 err = verify_policy_dir(p->
dir);
1924 err = verify_sec_ctx_len(attrs);
1932 err = security_xfrm_policy_alloc(&ctx, uctx);
1937 &p->
sel, ctx, 0, &err);
1938 security_xfrm_policy_free(ctx);
1949 u32 sessionid = audit_get_sessionid(
current);
1954 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid);
1958 WARN(1,
"Dont know what to do with soft policy expire\n");
1967 static int xfrm_add_sa_expire(
struct sk_buff *skb,
struct nlmsghdr *nlh,
1970 struct net *net = sock_net(skb->
sk);
1976 u32 mark = xfrm_mark_get(attrs, &
m);
1984 spin_lock_bh(&x->
lock);
1992 u32 sessionid = audit_get_sessionid(
current);
1997 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid);
2001 spin_unlock_bh(&x->
lock);
2006 static int xfrm_add_acquire(
struct sk_buff *skb,
struct nlmsghdr *nlh,
2009 struct net *net = sock_net(skb->
sk);
2023 xfrm_mark_get(attrs, &mark);
2025 err = verify_newpolicy_info(&ua->
policy);
2030 xp = xfrm_policy_construct(net, &ua->
policy, attrs, &err);
2041 for (i = 0; i < xp->
xfrm_nr; i++, ut++) {
2060 WARN(1,
"BAD policy passed\n");
2067 #ifdef CONFIG_XFRM_MIGRATE
2068 static int copy_from_user_migrate(
struct xfrm_migrate *ma,
2070 struct nlattr **attrs,
int *num)
2087 num_migrate = nla_len(rt) /
sizeof(*um);
2092 for (i = 0; i < num_migrate; i++, um++, ma++) {
2098 ma->
proto = um->proto;
2099 ma->
mode = um->mode;
2100 ma->
reqid = um->reqid;
2125 err = copy_from_user_policy_type(&type, attrs);
2129 err = copy_from_user_migrate((
struct xfrm_migrate *)
m, kmp, attrs, &n);
2148 #ifdef CONFIG_XFRM_MIGRATE
2153 memset(&um, 0,
sizeof(um));
2171 memset(&uk, 0,
sizeof(uk));
2180 static inline size_t xfrm_migrate_msgsize(
int num_migrate,
int with_kma)
2185 + userpolicy_type_attrsize();
2201 pol_id = nlmsg_data(nlh);
2203 memset(pol_id, 0,
sizeof(*pol_id));
2208 err = copy_to_user_kmaddress(k, skb);
2212 err = copy_to_user_policy_type(type, skb);
2215 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
2216 err = copy_to_user_migrate(mp, skb);
2221 return nlmsg_end(skb, nlh);
2224 nlmsg_cancel(skb, nlh);
2235 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k),
GFP_ATOMIC);
2240 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0)
2254 #define XMSGSIZE(type) sizeof(struct type)
2288 [XFRMA_ALG_CRYPT] = { .len =
sizeof(
struct xfrm_algo) },
2289 [XFRMA_ALG_COMP] = { .len =
sizeof(
struct xfrm_algo) },
2292 [XFRMA_SEC_CTX] = { .len =
sizeof(
struct xfrm_sec_ctx) },
2303 [XFRMA_TFCPAD] = { .type =
NLA_U32 },
2307 static struct xfrm_link {
2315 .dump = xfrm_dump_sa,
2316 .done = xfrm_dump_sa_done },
2320 .dump = xfrm_dump_policy,
2321 .done = xfrm_dump_policy_done },
2337 static int xfrm_user_rcv_msg(
struct sk_buff *skb,
struct nlmsghdr *nlh)
2339 struct net *net = sock_net(skb->
sk);
2341 struct xfrm_link *
link;
2349 link = &xfrm_dispatch[
type];
2358 if (link->dump ==
NULL)
2366 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
2370 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs,
XFRMA_MAX,
2375 if (link->doit ==
NULL)
2378 return link->doit(skb, nlh, attrs);
2381 static void xfrm_netlink_rcv(
struct sk_buff *skb)
2388 static inline size_t xfrm_expire_msgsize(
void)
2391 + nla_total_size(
sizeof(
struct xfrm_mark));
2404 ue = nlmsg_data(nlh);
2405 copy_to_user_state(x, &ue->
state);
2408 err = xfrm_mark_put(skb, &x->
mark);
2412 return nlmsg_end(skb, nlh);
2417 struct net *net = xs_net(x);
2420 skb = nlmsg_new(xfrm_expire_msgsize(),
GFP_ATOMIC);
2424 if (build_expire(skb, x, c) < 0) {
2434 struct net *net = xs_net(x);
2437 skb = nlmsg_new(xfrm_aevent_msgsize(x),
GFP_ATOMIC);
2441 if (build_aevent(skb, x, c) < 0)
2447 static int xfrm_notify_sa_flush(
const struct km_event *c)
2449 struct net *net = c->
net;
2465 p = nlmsg_data(nlh);
2468 nlmsg_end(skb, nlh);
2473 static inline size_t xfrm_sa_len(
struct xfrm_state *x)
2477 l += nla_total_size(aead_len(x->
aead));
2479 l += nla_total_size(
sizeof(
struct xfrm_algo) +
2480 (x->
aalg->alg_key_len + 7) / 8);
2481 l += nla_total_size(xfrm_alg_auth_len(x->
aalg));
2484 l += nla_total_size(xfrm_alg_len(x->
ealg));
2486 l += nla_total_size(
sizeof(*x->
calg));
2488 l += nla_total_size(
sizeof(*x->
encap));
2490 l += nla_total_size(
sizeof(x->
tfcpad));
2492 l += nla_total_size(xfrm_replay_state_esn_len(x->
replay_esn));
2497 l += nla_total_size(
sizeof(*x->
coaddr));
2500 l += nla_total_size(
sizeof(
u64));
2507 struct net *net = xs_net(x);
2512 int len = xfrm_sa_len(x);
2515 headlen =
sizeof(*p);
2517 len += nla_total_size(headlen);
2518 headlen =
sizeof(*id);
2519 len += nla_total_size(
sizeof(
struct xfrm_mark));
2532 p = nlmsg_data(nlh);
2536 id = nlmsg_data(nlh);
2538 id->spi = x->
id.spi;
2539 id->family = x->
props.family;
2540 id->proto = x->
id.proto;
2549 err = copy_to_user_state_extra(x, p, skb);
2553 nlmsg_end(skb, nlh);
2567 return xfrm_exp_state_notify(x, c);
2569 return xfrm_aevent_state_notify(x, c);
2573 return xfrm_notify_sa(x, c);
2575 return xfrm_notify_sa_flush(c);
2586 static inline size_t xfrm_acquire_msgsize(
struct xfrm_state *x,
2591 + nla_total_size(
sizeof(
struct xfrm_mark))
2592 + nla_total_size(xfrm_user_sec_ctx_size(x->
security))
2593 + userpolicy_type_attrsize();
2608 ua = nlmsg_data(nlh);
2616 ua->
seq = x->
km.seq = seq;
2618 err = copy_to_user_tmpl(xp, skb);
2620 err = copy_to_user_state_sec_ctx(x, skb);
2622 err = copy_to_user_policy_type(xp->
type, skb);
2624 err = xfrm_mark_put(skb, &xp->
mark);
2626 nlmsg_cancel(skb, nlh);
2630 return nlmsg_end(skb, nlh);
2636 struct net *net = xs_net(x);
2639 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp),
GFP_ATOMIC);
2643 if (build_acquire(skb, x, xt, xp) < 0)
2653 u8 *
data,
int len,
int *dir)
2655 struct net *net = sock_net(sk);
2661 switch (sk->sk_family) {
2668 #if IS_ENABLED(CONFIG_IPV6)
2683 if (len <
sizeof(*p) ||
2684 verify_newpolicy_info(p))
2687 nr = ((len -
sizeof(*p)) /
sizeof(*ut));
2688 if (validate_tmpl(nr, ut, p->
sel.family))
2700 copy_from_user_policy(xp, p);
2702 copy_templates(xp, ut, nr);
2709 static inline size_t xfrm_polexpire_msgsize(
struct xfrm_policy *xp)
2713 + nla_total_size(xfrm_user_sec_ctx_size(xp->
security))
2714 + nla_total_size(
sizeof(
struct xfrm_mark))
2715 + userpolicy_type_attrsize();
2730 upe = nlmsg_data(nlh);
2731 copy_to_user_policy(xp, &upe->
pol, dir);
2732 err = copy_to_user_tmpl(xp, skb);
2734 err = copy_to_user_sec_ctx(xp, skb);
2736 err = copy_to_user_policy_type(xp->
type, skb);
2738 err = xfrm_mark_put(skb, &xp->
mark);
2740 nlmsg_cancel(skb, nlh);
2745 return nlmsg_end(skb, nlh);
2748 static int xfrm_exp_policy_notify(
struct xfrm_policy *xp,
int dir,
const struct km_event *c)
2750 struct net *net = xp_net(xp);
2753 skb = nlmsg_new(xfrm_polexpire_msgsize(xp),
GFP_ATOMIC);
2757 if (build_polexpire(skb, xp, dir, c) < 0)
2766 struct net *net = xp_net(xp);
2773 headlen =
sizeof(*p);
2775 len += nla_total_size(headlen);
2776 headlen =
sizeof(*id);
2778 len += userpolicy_type_attrsize();
2779 len += nla_total_size(
sizeof(
struct xfrm_mark));
2791 p = nlmsg_data(nlh);
2795 id = nlmsg_data(nlh);
2796 memset(
id, 0,
sizeof(*
id));
2799 id->index = xp->
index;
2811 copy_to_user_policy(xp, p, dir);
2812 err = copy_to_user_tmpl(xp, skb);
2814 err = copy_to_user_policy_type(xp->
type, skb);
2816 err = xfrm_mark_put(skb, &xp->
mark);
2820 nlmsg_end(skb, nlh);
2829 static int xfrm_notify_policy_flush(
const struct km_event *c)
2831 struct net *net = c->
net;
2836 skb = nlmsg_new(userpolicy_type_attrsize(),
GFP_ATOMIC);
2844 err = copy_to_user_policy_type(c->
data.
type, skb);
2848 nlmsg_end(skb, nlh);
2857 static int xfrm_send_policy_notify(
struct xfrm_policy *xp,
int dir,
const struct km_event *c)
2864 return xfrm_notify_policy(xp, dir, c);
2866 return xfrm_notify_policy_flush(c);
2868 return xfrm_exp_policy_notify(xp, dir, c);
2878 static inline size_t xfrm_report_msgsize(
void)
2893 ur = nlmsg_data(nlh);
2898 int err =
nla_put(skb, XFRMA_COADDR,
sizeof(*addr), addr);
2900 nlmsg_cancel(skb, nlh);
2904 return nlmsg_end(skb, nlh);
2907 static int xfrm_send_report(
struct net *net,
u8 proto,
2912 skb = nlmsg_new(xfrm_report_msgsize(),
GFP_ATOMIC);
2916 if (build_report(skb, proto, sel, addr) < 0)
2922 static inline size_t xfrm_mapping_msgsize(
void)
2937 um = nlmsg_data(nlh);
2939 memcpy(&um->
id.daddr, &x->
id.daddr,
sizeof(um->
id.daddr));
2940 um->
id.spi = x->
id.spi;
2941 um->
id.family = x->
props.family;
2942 um->
id.proto = x->
id.proto;
2949 return nlmsg_end(skb, nlh);
2955 struct net *net = xs_net(x);
2964 skb = nlmsg_new(xfrm_mapping_msgsize(),
GFP_ATOMIC);
2968 if (build_mapping(skb, x, ipaddr, sport) < 0)
2974 static struct xfrm_mgr netlink_mgr = {
2976 .notify = xfrm_send_state_notify,
2977 .acquire = xfrm_send_acquire,
2978 .compile_policy = xfrm_compile_policy,
2979 .notify_policy = xfrm_send_policy_notify,
2980 .report = xfrm_send_report,
2981 .migrate = xfrm_send_migrate,
2982 .new_mapping = xfrm_send_mapping,
2985 static int __net_init xfrm_user_net_init(
struct net *net)
2990 .input = xfrm_netlink_rcv,
2996 net->xfrm.nlsk_stash = nlsk;
3012 .
init = xfrm_user_net_init,
3013 .exit_batch = xfrm_user_net_exit,
3016 static int __init xfrm_user_init(
void)
3031 static void __exit xfrm_user_exit(
void)