61 #include <linux/slab.h>
62 #include <linux/module.h>
63 #include <linux/types.h>
64 #include <linux/kernel.h>
65 #include <linux/sched.h>
66 #include <linux/string.h>
68 #include <linux/random.h>
69 #include <linux/if_vlan.h>
102 #define META_COLLECTOR(FUNC) static void meta_##FUNC(struct sk_buff *skb, \
103 struct tcf_pkt_info *info, struct meta_value *v, \
104 struct meta_obj *dst, int *err)
115 static inline unsigned long fixed_loadavg(
int load)
117 int rnd_load = load + (
FIXED_1/200);
120 return ((rnd_load >>
FSHIFT) * 100) + rnd_frac;
163 *
err = int_dev(
skb->dev, dst);
168 *
err = var_dev(
skb->dev, dst);
180 if (!tag && __vlan_get_tag(
skb, &tag))
255 #ifdef CONFIG_IP_ROUTE_CLASSID
256 dst->
value = skb_dst(
skb)->tclassid;
274 #define SKIP_NONLOCAL(skb) \
275 if (unlikely(skb->sk == NULL)) { \
302 dst->
value =
skb->sk->sk_bound_dev_if;
309 if (
skb->sk->sk_bound_dev_if == 0) {
317 skb->sk->sk_bound_dev_if);
318 *
err = var_dev(dev, dst);
356 dst->
value = sk_rmem_alloc_get(
skb->sk);
362 dst->
value = sk_wmem_alloc_get(
skb->sk);
374 dst->
value =
skb->sk->sk_receive_queue.qlen;
380 dst->
value =
skb->sk->sk_write_queue.qlen;
386 dst->
value =
skb->sk->sk_wmem_queued;
392 dst->
value =
skb->sk->sk_forward_alloc;
422 dst->
value =
skb->sk->sk_error_queue.qlen;
428 dst->
value =
skb->sk->sk_ack_backlog;
434 dst->
value =
skb->sk->sk_max_ack_backlog;
464 dst->
value =
skb->sk->sk_frag.offset;
470 dst->
value =
skb->sk->sk_write_pending;
482 #define META_ID(name) TCF_META_ID_##name
483 #define META_FUNC(name) { .get = meta_##name }
544 return &__meta_ops[meta_type(val)][meta_id(val)];
563 int len = nla_len(nla);
577 static void meta_var_apply_extras(
struct meta_value *v,
580 int shift = v->
hdr.shift;
582 if (shift && shift < dst->len)
590 goto nla_put_failure;
616 if (nla_len(nla) >=
sizeof(
unsigned long)) {
617 dst->
val = *(
unsigned long *) nla_data(nla);
618 dst->
len =
sizeof(
unsigned long);
619 }
else if (nla_len(nla) ==
sizeof(
u32)) {
620 dst->
val = nla_get_u32(nla);
628 static void meta_int_apply_extras(
struct meta_value *v,
640 if (v->
len ==
sizeof(
unsigned long)) {
641 if (
nla_put(skb, tlv,
sizeof(
unsigned long), &v->
val))
642 goto nla_put_failure;
643 }
else if (v->
len ==
sizeof(
u32)) {
644 if (nla_put_u32(skb, tlv, v->
val))
645 goto nla_put_failure;
668 .destroy = meta_var_destroy,
669 .compare = meta_var_compare,
670 .change = meta_var_change,
671 .apply_extras = meta_var_apply_extras,
672 .dump = meta_var_dump
675 .compare = meta_int_compare,
676 .change = meta_int_change,
677 .apply_extras = meta_int_apply_extras,
678 .dump = meta_int_dump
684 return &__meta_type_ops[meta_type(v)];
712 static int em_meta_match(
struct sk_buff *skb,
struct tcf_ematch *
m,
719 if (meta_get(skb, info, &meta->
lvalue, &l_value) < 0 ||
720 meta_get(skb, info, &meta->
rvalue, &r_value) < 0)
725 switch (meta->
lvalue.hdr.op) {
737 static void meta_delete(
struct meta_match *meta)
751 static inline int meta_change_data(
struct meta_value *dst,
struct nlattr *nla)
754 if (nla_len(nla) == 0)
772 static int em_meta_change(
struct tcf_proto *tp,
void *
data,
int len,
773 struct tcf_ematch *m)
802 if (!meta_is_supported(&meta->
lvalue) ||
803 !meta_is_supported(&meta->
rvalue)) {
812 m->datalen =
sizeof(*meta);
813 m->data = (
unsigned long) meta;
822 static void em_meta_destroy(
struct tcf_proto *tp,
struct tcf_ematch *m)
828 static int em_meta_dump(
struct sk_buff *skb,
struct tcf_ematch *
em)
834 memset(&hdr, 0,
sizeof(hdr));
839 goto nla_put_failure;
842 if (ops->
dump(skb, &meta->
lvalue, TCA_EM_META_LVALUE) < 0 ||
843 ops->
dump(skb, &meta->
rvalue, TCA_EM_META_RVALUE) < 0)
844 goto nla_put_failure;
852 static struct tcf_ematch_ops em_meta_ops = {
854 .change = em_meta_change,
855 .match = em_meta_match,
856 .destroy = em_meta_destroy,
857 .dump = em_meta_dump,
862 static int __init init_em_meta(
void)
867 static void __exit exit_em_meta(
void)