12 #include <linux/kernel.h>
14 #include <linux/list.h>
16 #include <linux/random.h>
21 #include <linux/ipv6.h>
22 #include <linux/if_vlan.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
31 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
64 static inline u32 addr_fold(
void *
addr)
66 unsigned long a = (
unsigned long)addr;
75 return addr_fold(skb->
sk);
95 return addr_fold(skb->
sk);
106 static u32 flow_get_iif(
const struct sk_buff *skb)
111 static u32 flow_get_priority(
const struct sk_buff *skb)
116 static u32 flow_get_mark(
const struct sk_buff *skb)
121 static u32 flow_get_nfct(
const struct sk_buff *skb)
123 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
124 return addr_fold(skb->nfct);
130 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
131 #define CTTUPLE(skb, member) \
133 enum ip_conntrack_info ctinfo; \
134 const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
137 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
140 #define CTTUPLE(skb, member) \
156 return flow_get_src(skb, flow);
168 return flow_get_dst(skb, flow);
171 static u32 flow_get_nfct_proto_src(
const struct sk_buff *skb,
const struct flow_keys *flow)
175 return flow_get_proto_src(skb, flow);
178 static u32 flow_get_nfct_proto_dst(
const struct sk_buff *skb,
const struct flow_keys *flow)
182 return flow_get_proto_dst(skb, flow);
185 static u32 flow_get_rtclassid(
const struct sk_buff *skb)
187 #ifdef CONFIG_IP_ROUTE_CLASSID
189 return skb_dst(skb)->tclassid;
194 static u32 flow_get_skuid(
const struct sk_buff *skb)
196 if (skb->
sk && skb->
sk->sk_socket && skb->
sk->sk_socket->file) {
197 kuid_t skuid = skb->
sk->sk_socket->file->f_cred->fsuid;
203 static u32 flow_get_skgid(
const struct sk_buff *skb)
205 if (skb->
sk && skb->
sk->sk_socket && skb->
sk->sk_socket->file) {
206 kgid_t skgid = skb->
sk->sk_socket->file->f_cred->fsgid;
212 static u32 flow_get_vlan_tag(
const struct sk_buff *skb)
216 if (vlan_get_tag(skb, &
tag) < 0)
221 static u32 flow_get_rxhash(
struct sk_buff *skb)
223 return skb_get_rxhash(skb);
230 return flow_get_src(skb, flow);
232 return flow_get_dst(skb, flow);
234 return flow_get_proto(skb, flow);
236 return flow_get_proto_src(skb, flow);
238 return flow_get_proto_dst(skb, flow);
240 return flow_get_iif(skb);
242 return flow_get_priority(skb);
244 return flow_get_mark(skb);
246 return flow_get_nfct(skb);
248 return flow_get_nfct_src(skb, flow);
250 return flow_get_nfct_dst(skb, flow);
252 return flow_get_nfct_proto_src(skb, flow);
254 return flow_get_nfct_proto_dst(skb, flow);
256 return flow_get_rtclassid(skb);
258 return flow_get_skuid(skb);
260 return flow_get_skgid(skb);
262 return flow_get_vlan_tag(skb);
264 return flow_get_rxhash(skb);
271 #define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \
272 (1 << FLOW_KEY_DST) | \
273 (1 << FLOW_KEY_PROTO) | \
274 (1 << FLOW_KEY_PROTO_SRC) | \
275 (1 << FLOW_KEY_PROTO_DST) | \
276 (1 << FLOW_KEY_NFCT_SRC) | \
277 (1 << FLOW_KEY_NFCT_DST) | \
278 (1 << FLOW_KEY_NFCT_PROTO_SRC) | \
279 (1 << FLOW_KEY_NFCT_PROTO_DST))
302 for (n = 0; n < f->
nkeys; n++) {
303 key =
ffs(keymask) - 1;
304 keymask &= ~(1 <<
key);
305 keys[
n] = flow_key_get(skb, key, &flow_keys);
312 classid = (classid & f->
mask) ^ f->
xor;
322 r = tcf_exts_exec(skb, &f->
exts, res);
330 static void flow_perturbation(
unsigned long arg)
354 static int flow_change(
struct sk_buff *in_skb,
355 struct tcf_proto *tp,
unsigned long base,
365 unsigned int nkeys = 0;
366 unsigned int perturb_period = 0;
375 err = nla_parse_nested(tb,
TCA_FLOW_MAX, opt, flow_policy);
380 baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
386 keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
411 if (f->
handle != handle && handle)
416 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
425 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) *
HZ;
431 if (!tb[TCA_FLOW_KEYS])
435 if (tb[TCA_FLOW_MODE])
436 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
440 if (tb[TCA_FLOW_PERTURB]) {
443 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) *
HZ;
447 baseclass =
TC_H_MAKE(tp->
q->handle, baseclass);
470 if (tb[TCA_FLOW_KEYS]) {
478 f->
mask = nla_get_u32(tb[TCA_FLOW_MASK]);
480 f->
xor = nla_get_u32(tb[TCA_FLOW_XOR]);
482 f->
rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
484 f->
addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
487 f->
divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
501 *arg = (
unsigned long)f;
519 static int flow_delete(
struct tcf_proto *tp,
unsigned long arg)
526 flow_destroy_filter(tp, f);
530 static int flow_init(
struct tcf_proto *tp)
537 INIT_LIST_HEAD(&head->
filters);
542 static void flow_destroy(
struct tcf_proto *tp)
549 flow_destroy_filter(tp, f);
554 static unsigned long flow_get(
struct tcf_proto *tp,
u32 handle)
560 if (f->handle == handle)
569 static int flow_dump(
struct tcf_proto *tp,
unsigned long fh,
582 goto nla_put_failure;
584 if (nla_put_u32(skb, TCA_FLOW_KEYS, f->
keymask) ||
585 nla_put_u32(skb, TCA_FLOW_MODE, f->
mode))
586 goto nla_put_failure;
588 if (f->
mask != ~0 || f->
xor != 0) {
589 if (nla_put_u32(skb, TCA_FLOW_MASK, f->
mask) ||
590 nla_put_u32(skb, TCA_FLOW_XOR, f->
xor))
591 goto nla_put_failure;
594 nla_put_u32(skb, TCA_FLOW_RSHIFT, f->
rshift))
595 goto nla_put_failure;
597 nla_put_u32(skb, TCA_FLOW_ADDEND, f->
addend))
598 goto nla_put_failure;
601 nla_put_u32(skb, TCA_FLOW_DIVISOR, f->
divisor))
602 goto nla_put_failure;
604 nla_put_u32(skb, TCA_FLOW_BASECLASS, f->
baseclass))
605 goto nla_put_failure;
609 goto nla_put_failure;
612 goto nla_put_failure;
613 #ifdef CONFIG_NET_EMATCH
616 goto nla_put_failure;
618 nla_nest_end(skb, nest);
621 goto nla_put_failure;
626 nlmsg_trim(skb, nest);
638 if (arg->
fn(tp, (
unsigned long)f, arg) < 0) {
649 .classify = flow_classify,
651 .destroy = flow_destroy,
652 .change = flow_change,
653 .delete = flow_delete,
661 static int __init cls_flow_init(
void)
666 static void __exit cls_flow_exit(
void)