22 #include <linux/netdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
27 #include <linux/kernel.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
34 #include <linux/if_arp.h>
36 #include <linux/ipv6.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/icmp.h>
40 #include <linux/icmpv6.h>
48 static int check_header(
struct sk_buff *
skb,
int len)
52 if (
unlikely(!pskb_may_pull(skb, len)))
57 static bool arphdr_ok(
struct sk_buff *skb)
59 return pskb_may_pull(skb, skb_network_offset(skb) +
63 static int check_iphdr(
struct sk_buff *skb)
65 unsigned int nh_ofs = skb_network_offset(skb);
69 err = check_header(skb, nh_ofs +
sizeof(
struct iphdr));
73 ip_len = ip_hdrlen(skb);
75 skb->
len < nh_ofs + ip_len))
78 skb_set_transport_header(skb, nh_ofs + ip_len);
82 static bool tcphdr_ok(
struct sk_buff *skb)
84 int th_ofs = skb_transport_offset(skb);
90 tcp_len = tcp_hdrlen(skb);
92 skb->
len < th_ofs + tcp_len))
98 static bool udphdr_ok(
struct sk_buff *skb)
100 return pskb_may_pull(skb, skb_transport_offset(skb) +
104 static bool icmphdr_ok(
struct sk_buff *skb)
106 return pskb_may_pull(skb, skb_transport_offset(skb) +
120 return cur_ms - idle_ms;
123 #define SW_FLOW_KEY_OFFSET(field) \
124 (offsetof(struct sw_flow_key, field) + \
125 FIELD_SIZEOF(struct sw_flow_key, field))
130 unsigned int nh_ofs = skb_network_offset(skb);
140 err = check_header(skb, nh_ofs +
sizeof(*nh));
146 payload_ofs = (
u8 *)(nh + 1) - skb->
data;
149 key->
ip.tos = ipv6_get_dsfield(nh);
160 if (frag_off &
htons(~0x7))
166 nh_len = payload_ofs - nh_ofs;
167 skb_set_transport_header(skb, nh_ofs + nh_len);
172 static bool icmp6hdr_ok(
struct sk_buff *skb)
174 return pskb_may_pull(skb, skb_transport_offset(skb) +
178 #define TCP_FLAGS_OFFSET 13
179 #define TCP_FLAG_MASK 0x3f
188 likely(skb->
len >= skb_transport_offset(skb) +
sizeof(
struct tcphdr))) {
189 u8 *tcp = (
u8 *)tcp_hdr(skb);
193 spin_lock(&flow->
lock);
198 spin_unlock(&flow->
lock);
234 hash = jhash_1word(hash, table->
hash_seed);
239 static struct flex_array *alloc_buckets(
unsigned int n_buckets)
255 for (i = 0; i < n_buckets; i++)
262 static void free_buckets(
struct flex_array *buckets)
274 table->
buckets = alloc_buckets(new_size);
316 static void flow_tbl_destroy_rcu_cb(
struct rcu_head *rcu)
340 while (*bucket < table->n_buckets) {
343 hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) {
364 new->node_ver = !old_ver;
388 flow_table_copy_flows(table, new_table);
395 return __flow_tbl_rehash(table, table->
n_buckets);
400 return __flow_tbl_rehash(table, table->
n_buckets * 2);
413 static void rcu_free_flow_callback(
struct rcu_head *
rcu)
440 struct qtag_prefix *qp;
445 if (
unlikely(!pskb_may_pull(skb,
sizeof(
struct qtag_prefix) +
449 qp = (
struct qtag_prefix *) skb->
data;
451 __skb_pull(skb,
sizeof(
struct qtag_prefix));
469 __skb_pull(skb,
sizeof(
__be16));
471 if (
ntohs(proto) >= 1536)
483 (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
487 return llc->ethertype;
491 int *key_lenp,
int nh_len)
493 struct icmp6hdr *icmp = icmp6_hdr(skb);
507 int icmp_len = skb->
len - skb_transport_offset(skb);
516 if (
unlikely(icmp_len <
sizeof(*nd)))
523 nd = (
struct nd_msg *)skb_transport_header(skb);
527 icmp_len -=
sizeof(*nd);
529 while (icmp_len >= 8) {
534 if (
unlikely(!opt_len || opt_len > icmp_len))
603 memset(key, 0,
sizeof(*key));
606 key->
phy.in_port = in_port;
608 skb_reset_mac_header(skb);
625 key->
eth.type = parse_ethertype(skb);
629 skb_reset_network_header(skb);
630 __skb_push(skb, skb->
data - skb_mac_header(skb));
639 error = check_iphdr(skb);
653 key->
ip.tos = nh->
tos;
654 key->
ip.ttl = nh->
ttl;
668 if (tcphdr_ok(skb)) {
669 struct tcphdr *tcp = tcp_hdr(skb);
675 if (udphdr_ok(skb)) {
676 struct udphdr *udp = udp_hdr(skb);
682 if (icmphdr_ok(skb)) {
683 struct icmphdr *icmp = icmp_hdr(skb);
714 nh_len = parse_ipv6hdr(skb, key, &key_len);
731 if (tcphdr_ok(skb)) {
732 struct tcphdr *tcp = tcp_hdr(skb);
738 if (udphdr_ok(skb)) {
739 struct udphdr *udp = udp_hdr(skb);
745 if (icmp6hdr_ok(skb)) {
746 error = parse_icmpv6(skb, key, &key_len, nh_len);
773 head = find_bucket(table, hash);
774 hlist_for_each_entry_rcu(flow, n, head, hash_node[table->
node_ver]) {
776 if (flow->
hash == hash &&
788 head = find_bucket(table, flow->
hash);
806 [OVS_KEY_ATTR_VLAN] =
sizeof(
__be16),
815 [OVS_KEY_ATTR_ND] =
sizeof(
struct ovs_key_nd),
818 static int ipv4_flow_from_nlattrs(
struct sw_flow_key *swkey,
int *key_len,
825 switch (swkey->
ip.proto) {
827 if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
832 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
838 if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
843 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
849 if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP)))
854 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
863 static int ipv6_flow_from_nlattrs(
struct sw_flow_key *swkey,
int *key_len,
870 switch (swkey->
ip.proto) {
872 if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
877 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
883 if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
888 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
894 if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6)))
899 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
907 if (!(*attrs & (1 << OVS_KEY_ATTR_ND)))
912 nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
914 sizeof(swkey->
ipv6.nd.target));
924 static int parse_flow_nlattrs(
const struct nlattr *
attr,
939 expected_len = ovs_key_lens[
type];
940 if (
nla_len(nla) != expected_len && expected_len != -1)
961 const struct nlattr *attr)
972 err = parse_flow_nlattrs(attr, a, &attrs);
985 swkey->
phy.in_port = in_port;
1002 const struct nlattr *encap;
1005 if (attrs != ((1 << OVS_KEY_ATTR_VLAN) |
1006 (1 << OVS_KEY_ATTR_ETHERTYPE) |
1011 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1013 swkey->
eth.tci = tci;
1015 err = parse_flow_nlattrs(encap, a, &attrs);
1031 if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
1032 swkey->
eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1059 err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs);
1066 if (!(attrs & (1 << OVS_KEY_ATTR_IPV6)))
1071 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
1080 sizeof(swkey->
ipv6.addr.src));
1082 sizeof(swkey->
ipv6.addr.dst));
1085 err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs);
1092 if (!(attrs & (1 << OVS_KEY_ATTR_ARP)))
1097 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
1126 const struct nlattr *attr)
1138 if (
nla_len(nla) != ovs_key_lens[type])
1143 *priority = nla_get_u32(nla);
1149 *in_port = nla_get_u32(nla);
1164 if (swkey->
phy.priority &&
1166 goto nla_put_failure;
1170 goto nla_put_failure;
1174 goto nla_put_failure;
1175 eth_key = nla_data(nla);
1181 nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->
eth.tci))
1182 goto nla_put_failure;
1184 if (!swkey->
eth.tci)
1194 goto nla_put_failure;
1201 goto nla_put_failure;
1202 ipv4_key = nla_data(nla);
1212 nla =
nla_reserve(skb, OVS_KEY_ATTR_IPV6,
sizeof(*ipv6_key));
1214 goto nla_put_failure;
1215 ipv6_key = nla_data(nla);
1228 nla =
nla_reserve(skb, OVS_KEY_ATTR_ARP,
sizeof(*arp_key));
1230 goto nla_put_failure;
1231 arp_key = nla_data(nla);
1247 nla =
nla_reserve(skb, OVS_KEY_ATTR_TCP,
sizeof(*tcp_key));
1249 goto nla_put_failure;
1250 tcp_key = nla_data(nla);
1261 nla =
nla_reserve(skb, OVS_KEY_ATTR_UDP,
sizeof(*udp_key));
1263 goto nla_put_failure;
1264 udp_key = nla_data(nla);
1276 nla =
nla_reserve(skb, OVS_KEY_ATTR_ICMP,
sizeof(*icmp_key));
1278 goto nla_put_failure;
1279 icmp_key = nla_data(nla);
1287 sizeof(*icmpv6_key));
1289 goto nla_put_failure;
1290 icmpv6_key = nla_data(nla);
1298 nla =
nla_reserve(skb, OVS_KEY_ATTR_ND,
sizeof(*nd_key));
1300 goto nla_put_failure;
1301 nd_key = nla_data(nla);
1312 nla_nest_end(skb, encap);
1326 if (flow_cache ==
NULL)