Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cls_u32.c
Go to the documentation of this file.
1 /*
2  * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  *
9  * Authors: Alexey Kuznetsov, <[email protected]>
10  *
11  * The filters are packed to hash tables of key nodes
12  * with a set of 32bit key/mask pairs at every node.
13  * Nodes reference next level hash tables etc.
14  *
15  * This scheme is the best universal classifier I managed to
16  * invent; it is not super-fast, but it is not slow (provided you
17  * program it correctly), and general enough. And its relative
18  * speed grows as the number of rules becomes larger.
19  *
20  * It seems that it represents the best middle point between
21  * speed and manageability both by human and by machine.
22  *
23  * It is especially useful for link sharing combined with QoS;
24  * pure RSVP doesn't need such a general approach and can use
25  * much simpler (and faster) schemes, sort of cls_rsvp.c.
26  *
27  * JHS: We should remove the CONFIG_NET_CLS_IND from here
28  * eventually when the meta match extension is made available
29  *
30  * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
31  */
32 
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/kernel.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/skbuff.h>
41 #include <net/netlink.h>
42 #include <net/act_api.h>
43 #include <net/pkt_cls.h>
44 
45 struct tc_u_knode {
46  struct tc_u_knode *next;
48  struct tc_u_hnode *ht_up;
49  struct tcf_exts exts;
50 #ifdef CONFIG_NET_CLS_IND
51  char indev[IFNAMSIZ];
52 #endif
54  struct tcf_result res;
56 #ifdef CONFIG_CLS_U32_PERF
57  struct tc_u32_pcnt *pf;
58 #endif
59 #ifdef CONFIG_CLS_U32_MARK
60  struct tc_u32_mark mark;
61 #endif
62  struct tc_u32_sel sel;
63 };
64 
65 struct tc_u_hnode {
66  struct tc_u_hnode *next;
69  struct tc_u_common *tp_c;
70  int refcnt;
71  unsigned int divisor;
72  struct tc_u_knode *ht[1];
73 };
74 
75 struct tc_u_common {
76  struct tc_u_hnode *hlist;
77  struct Qdisc *q;
78  int refcnt;
80 };
81 
82 static const struct tcf_ext_map u32_ext_map = {
83  .action = TCA_U32_ACT,
84  .police = TCA_U32_POLICE
85 };
86 
87 static inline unsigned int u32_hash_fold(__be32 key,
88  const struct tc_u32_sel *sel,
89  u8 fshift)
90 {
91  unsigned int h = ntohl(key & sel->hmask) >> fshift;
92 
93  return h;
94 }
95 
96 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res)
97 {
98  struct {
99  struct tc_u_knode *knode;
100  unsigned int off;
102 
103  struct tc_u_hnode *ht = (struct tc_u_hnode *)tp->root;
104  unsigned int off = skb_network_offset(skb);
105  struct tc_u_knode *n;
106  int sdepth = 0;
107  int off2 = 0;
108  int sel = 0;
109 #ifdef CONFIG_CLS_U32_PERF
110  int j;
111 #endif
112  int i, r;
113 
114 next_ht:
115  n = ht->ht[sel];
116 
117 next_knode:
118  if (n) {
119  struct tc_u32_key *key = n->sel.keys;
120 
121 #ifdef CONFIG_CLS_U32_PERF
122  n->pf->rcnt += 1;
123  j = 0;
124 #endif
125 
126 #ifdef CONFIG_CLS_U32_MARK
127  if ((skb->mark & n->mark.mask) != n->mark.val) {
128  n = n->next;
129  goto next_knode;
130  } else {
131  n->mark.success++;
132  }
133 #endif
134 
135  for (i = n->sel.nkeys; i > 0; i--, key++) {
136  int toff = off + key->off + (off2 & key->offmask);
137  __be32 *data, hdata;
138 
139  if (skb_headroom(skb) + toff > INT_MAX)
140  goto out;
141 
142  data = skb_header_pointer(skb, toff, 4, &hdata);
143  if (!data)
144  goto out;
145  if ((*data ^ key->val) & key->mask) {
146  n = n->next;
147  goto next_knode;
148  }
149 #ifdef CONFIG_CLS_U32_PERF
150  n->pf->kcnts[j] += 1;
151  j++;
152 #endif
153  }
154  if (n->ht_down == NULL) {
155 check_terminal:
156  if (n->sel.flags & TC_U32_TERMINAL) {
157 
158  *res = n->res;
159 #ifdef CONFIG_NET_CLS_IND
160  if (!tcf_match_indev(skb, n->indev)) {
161  n = n->next;
162  goto next_knode;
163  }
164 #endif
165 #ifdef CONFIG_CLS_U32_PERF
166  n->pf->rhit += 1;
167 #endif
168  r = tcf_exts_exec(skb, &n->exts, res);
169  if (r < 0) {
170  n = n->next;
171  goto next_knode;
172  }
173 
174  return r;
175  }
176  n = n->next;
177  goto next_knode;
178  }
179 
180  /* PUSH */
181  if (sdepth >= TC_U32_MAXDEPTH)
182  goto deadloop;
183  stack[sdepth].knode = n;
184  stack[sdepth].off = off;
185  sdepth++;
186 
187  ht = n->ht_down;
188  sel = 0;
189  if (ht->divisor) {
190  __be32 *data, hdata;
191 
192  data = skb_header_pointer(skb, off + n->sel.hoff, 4,
193  &hdata);
194  if (!data)
195  goto out;
196  sel = ht->divisor & u32_hash_fold(*data, &n->sel,
197  n->fshift);
198  }
199  if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
200  goto next_ht;
201 
202  if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
203  off2 = n->sel.off + 3;
204  if (n->sel.flags & TC_U32_VAROFFSET) {
205  __be16 *data, hdata;
206 
207  data = skb_header_pointer(skb,
208  off + n->sel.offoff,
209  2, &hdata);
210  if (!data)
211  goto out;
212  off2 += ntohs(n->sel.offmask & *data) >>
213  n->sel.offshift;
214  }
215  off2 &= ~3;
216  }
217  if (n->sel.flags & TC_U32_EAT) {
218  off += off2;
219  off2 = 0;
220  }
221 
222  if (off < skb->len)
223  goto next_ht;
224  }
225 
226  /* POP */
227  if (sdepth--) {
228  n = stack[sdepth].knode;
229  ht = n->ht_up;
230  off = stack[sdepth].off;
231  goto check_terminal;
232  }
233 out:
234  return -1;
235 
236 deadloop:
237  net_warn_ratelimited("cls_u32: dead loop\n");
238  return -1;
239 }
240 
241 static struct tc_u_hnode *
242 u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
243 {
244  struct tc_u_hnode *ht;
245 
246  for (ht = tp_c->hlist; ht; ht = ht->next)
247  if (ht->handle == handle)
248  break;
249 
250  return ht;
251 }
252 
253 static struct tc_u_knode *
254 u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
255 {
256  unsigned int sel;
257  struct tc_u_knode *n = NULL;
258 
259  sel = TC_U32_HASH(handle);
260  if (sel > ht->divisor)
261  goto out;
262 
263  for (n = ht->ht[sel]; n; n = n->next)
264  if (n->handle == handle)
265  break;
266 out:
267  return n;
268 }
269 
270 
271 static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
272 {
273  struct tc_u_hnode *ht;
274  struct tc_u_common *tp_c = tp->data;
275 
276  if (TC_U32_HTID(handle) == TC_U32_ROOT)
277  ht = tp->root;
278  else
279  ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
280 
281  if (!ht)
282  return 0;
283 
284  if (TC_U32_KEY(handle) == 0)
285  return (unsigned long)ht;
286 
287  return (unsigned long)u32_lookup_key(ht, handle);
288 }
289 
290 static void u32_put(struct tcf_proto *tp, unsigned long f)
291 {
292 }
293 
294 static u32 gen_new_htid(struct tc_u_common *tp_c)
295 {
296  int i = 0x800;
297 
298  do {
299  if (++tp_c->hgenerator == 0x7FF)
300  tp_c->hgenerator = 1;
301  } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
302 
303  return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
304 }
305 
306 static int u32_init(struct tcf_proto *tp)
307 {
308  struct tc_u_hnode *root_ht;
309  struct tc_u_common *tp_c;
310 
311  tp_c = tp->q->u32_node;
312 
313  root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
314  if (root_ht == NULL)
315  return -ENOBUFS;
316 
317  root_ht->divisor = 0;
318  root_ht->refcnt++;
319  root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
320  root_ht->prio = tp->prio;
321 
322  if (tp_c == NULL) {
323  tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
324  if (tp_c == NULL) {
325  kfree(root_ht);
326  return -ENOBUFS;
327  }
328  tp_c->q = tp->q;
329  tp->q->u32_node = tp_c;
330  }
331 
332  tp_c->refcnt++;
333  root_ht->next = tp_c->hlist;
334  tp_c->hlist = root_ht;
335  root_ht->tp_c = tp_c;
336 
337  tp->root = root_ht;
338  tp->data = tp_c;
339  return 0;
340 }
341 
342 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
343 {
344  tcf_unbind_filter(tp, &n->res);
345  tcf_exts_destroy(tp, &n->exts);
346  if (n->ht_down)
347  n->ht_down->refcnt--;
348 #ifdef CONFIG_CLS_U32_PERF
349  kfree(n->pf);
350 #endif
351  kfree(n);
352  return 0;
353 }
354 
355 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
356 {
357  struct tc_u_knode **kp;
358  struct tc_u_hnode *ht = key->ht_up;
359 
360  if (ht) {
361  for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
362  if (*kp == key) {
363  tcf_tree_lock(tp);
364  *kp = key->next;
365  tcf_tree_unlock(tp);
366 
367  u32_destroy_key(tp, key);
368  return 0;
369  }
370  }
371  }
372  WARN_ON(1);
373  return 0;
374 }
375 
376 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
377 {
378  struct tc_u_knode *n;
379  unsigned int h;
380 
381  for (h = 0; h <= ht->divisor; h++) {
382  while ((n = ht->ht[h]) != NULL) {
383  ht->ht[h] = n->next;
384 
385  u32_destroy_key(tp, n);
386  }
387  }
388 }
389 
390 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
391 {
392  struct tc_u_common *tp_c = tp->data;
393  struct tc_u_hnode **hn;
394 
395  WARN_ON(ht->refcnt);
396 
397  u32_clear_hnode(tp, ht);
398 
399  for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
400  if (*hn == ht) {
401  *hn = ht->next;
402  kfree(ht);
403  return 0;
404  }
405  }
406 
407  WARN_ON(1);
408  return -ENOENT;
409 }
410 
411 static void u32_destroy(struct tcf_proto *tp)
412 {
413  struct tc_u_common *tp_c = tp->data;
414  struct tc_u_hnode *root_ht = tp->root;
415 
416  WARN_ON(root_ht == NULL);
417 
418  if (root_ht && --root_ht->refcnt == 0)
419  u32_destroy_hnode(tp, root_ht);
420 
421  if (--tp_c->refcnt == 0) {
422  struct tc_u_hnode *ht;
423 
424  tp->q->u32_node = NULL;
425 
426  for (ht = tp_c->hlist; ht; ht = ht->next) {
427  ht->refcnt--;
428  u32_clear_hnode(tp, ht);
429  }
430 
431  while ((ht = tp_c->hlist) != NULL) {
432  tp_c->hlist = ht->next;
433 
434  WARN_ON(ht->refcnt != 0);
435 
436  kfree(ht);
437  }
438 
439  kfree(tp_c);
440  }
441 
442  tp->data = NULL;
443 }
444 
445 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
446 {
447  struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
448 
449  if (ht == NULL)
450  return 0;
451 
452  if (TC_U32_KEY(ht->handle))
453  return u32_delete_key(tp, (struct tc_u_knode *)ht);
454 
455  if (tp->root == ht)
456  return -EINVAL;
457 
458  if (ht->refcnt == 1) {
459  ht->refcnt--;
460  u32_destroy_hnode(tp, ht);
461  } else {
462  return -EBUSY;
463  }
464 
465  return 0;
466 }
467 
468 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
469 {
470  struct tc_u_knode *n;
471  unsigned int i = 0x7FF;
472 
473  for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
474  if (i < TC_U32_NODE(n->handle))
475  i = TC_U32_NODE(n->handle);
476  i++;
477 
478  return handle | (i > 0xFFF ? 0xFFF : i);
479 }
480 
481 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
482  [TCA_U32_CLASSID] = { .type = NLA_U32 },
483  [TCA_U32_HASH] = { .type = NLA_U32 },
484  [TCA_U32_LINK] = { .type = NLA_U32 },
485  [TCA_U32_DIVISOR] = { .type = NLA_U32 },
486  [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
487  [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
488  [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
489 };
490 
491 static int u32_set_parms(struct tcf_proto *tp, unsigned long base,
492  struct tc_u_hnode *ht,
493  struct tc_u_knode *n, struct nlattr **tb,
494  struct nlattr *est)
495 {
496  int err;
497  struct tcf_exts e;
498 
499  err = tcf_exts_validate(tp, tb, est, &e, &u32_ext_map);
500  if (err < 0)
501  return err;
502 
503  err = -EINVAL;
504  if (tb[TCA_U32_LINK]) {
505  u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
506  struct tc_u_hnode *ht_down = NULL, *ht_old;
507 
508  if (TC_U32_KEY(handle))
509  goto errout;
510 
511  if (handle) {
512  ht_down = u32_lookup_ht(ht->tp_c, handle);
513 
514  if (ht_down == NULL)
515  goto errout;
516  ht_down->refcnt++;
517  }
518 
519  tcf_tree_lock(tp);
520  ht_old = n->ht_down;
521  n->ht_down = ht_down;
522  tcf_tree_unlock(tp);
523 
524  if (ht_old)
525  ht_old->refcnt--;
526  }
527  if (tb[TCA_U32_CLASSID]) {
528  n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
529  tcf_bind_filter(tp, &n->res, base);
530  }
531 
532 #ifdef CONFIG_NET_CLS_IND
533  if (tb[TCA_U32_INDEV]) {
534  err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV]);
535  if (err < 0)
536  goto errout;
537  }
538 #endif
539  tcf_exts_change(tp, &n->exts, &e);
540 
541  return 0;
542 errout:
543  tcf_exts_destroy(tp, &e);
544  return err;
545 }
546 
547 static int u32_change(struct sk_buff *in_skb,
548  struct tcf_proto *tp, unsigned long base, u32 handle,
549  struct nlattr **tca,
550  unsigned long *arg)
551 {
552  struct tc_u_common *tp_c = tp->data;
553  struct tc_u_hnode *ht;
554  struct tc_u_knode *n;
555  struct tc_u32_sel *s;
556  struct nlattr *opt = tca[TCA_OPTIONS];
557  struct nlattr *tb[TCA_U32_MAX + 1];
558  u32 htid;
559  int err;
560 
561  if (opt == NULL)
562  return handle ? -EINVAL : 0;
563 
564  err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy);
565  if (err < 0)
566  return err;
567 
568  n = (struct tc_u_knode *)*arg;
569  if (n) {
570  if (TC_U32_KEY(n->handle) == 0)
571  return -EINVAL;
572 
573  return u32_set_parms(tp, base, n->ht_up, n, tb, tca[TCA_RATE]);
574  }
575 
576  if (tb[TCA_U32_DIVISOR]) {
577  unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
578 
579  if (--divisor > 0x100)
580  return -EINVAL;
581  if (TC_U32_KEY(handle))
582  return -EINVAL;
583  if (handle == 0) {
584  handle = gen_new_htid(tp->data);
585  if (handle == 0)
586  return -ENOMEM;
587  }
588  ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
589  if (ht == NULL)
590  return -ENOBUFS;
591  ht->tp_c = tp_c;
592  ht->refcnt = 1;
593  ht->divisor = divisor;
594  ht->handle = handle;
595  ht->prio = tp->prio;
596  ht->next = tp_c->hlist;
597  tp_c->hlist = ht;
598  *arg = (unsigned long)ht;
599  return 0;
600  }
601 
602  if (tb[TCA_U32_HASH]) {
603  htid = nla_get_u32(tb[TCA_U32_HASH]);
604  if (TC_U32_HTID(htid) == TC_U32_ROOT) {
605  ht = tp->root;
606  htid = ht->handle;
607  } else {
608  ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
609  if (ht == NULL)
610  return -EINVAL;
611  }
612  } else {
613  ht = tp->root;
614  htid = ht->handle;
615  }
616 
617  if (ht->divisor < TC_U32_HASH(htid))
618  return -EINVAL;
619 
620  if (handle) {
621  if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
622  return -EINVAL;
623  handle = htid | TC_U32_NODE(handle);
624  } else
625  handle = gen_new_kid(ht, htid);
626 
627  if (tb[TCA_U32_SEL] == NULL)
628  return -EINVAL;
629 
630  s = nla_data(tb[TCA_U32_SEL]);
631 
632  n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
633  if (n == NULL)
634  return -ENOBUFS;
635 
636 #ifdef CONFIG_CLS_U32_PERF
637  n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
638  if (n->pf == NULL) {
639  kfree(n);
640  return -ENOBUFS;
641  }
642 #endif
643 
644  memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
645  n->ht_up = ht;
646  n->handle = handle;
647  n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
648 
649 #ifdef CONFIG_CLS_U32_MARK
650  if (tb[TCA_U32_MARK]) {
651  struct tc_u32_mark *mark;
652 
653  mark = nla_data(tb[TCA_U32_MARK]);
654  memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
655  n->mark.success = 0;
656  }
657 #endif
658 
659  err = u32_set_parms(tp, base, ht, n, tb, tca[TCA_RATE]);
660  if (err == 0) {
661  struct tc_u_knode **ins;
662  for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
663  if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
664  break;
665 
666  n->next = *ins;
667  tcf_tree_lock(tp);
668  *ins = n;
669  tcf_tree_unlock(tp);
670 
671  *arg = (unsigned long)n;
672  return 0;
673  }
674 #ifdef CONFIG_CLS_U32_PERF
675  kfree(n->pf);
676 #endif
677  kfree(n);
678  return err;
679 }
680 
681 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
682 {
683  struct tc_u_common *tp_c = tp->data;
684  struct tc_u_hnode *ht;
685  struct tc_u_knode *n;
686  unsigned int h;
687 
688  if (arg->stop)
689  return;
690 
691  for (ht = tp_c->hlist; ht; ht = ht->next) {
692  if (ht->prio != tp->prio)
693  continue;
694  if (arg->count >= arg->skip) {
695  if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
696  arg->stop = 1;
697  return;
698  }
699  }
700  arg->count++;
701  for (h = 0; h <= ht->divisor; h++) {
702  for (n = ht->ht[h]; n; n = n->next) {
703  if (arg->count < arg->skip) {
704  arg->count++;
705  continue;
706  }
707  if (arg->fn(tp, (unsigned long)n, arg) < 0) {
708  arg->stop = 1;
709  return;
710  }
711  arg->count++;
712  }
713  }
714  }
715 }
716 
717 static int u32_dump(struct tcf_proto *tp, unsigned long fh,
718  struct sk_buff *skb, struct tcmsg *t)
719 {
720  struct tc_u_knode *n = (struct tc_u_knode *)fh;
721  struct nlattr *nest;
722 
723  if (n == NULL)
724  return skb->len;
725 
726  t->tcm_handle = n->handle;
727 
728  nest = nla_nest_start(skb, TCA_OPTIONS);
729  if (nest == NULL)
730  goto nla_put_failure;
731 
732  if (TC_U32_KEY(n->handle) == 0) {
733  struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
734  u32 divisor = ht->divisor + 1;
735 
736  if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
737  goto nla_put_failure;
738  } else {
739  if (nla_put(skb, TCA_U32_SEL,
740  sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
741  &n->sel))
742  goto nla_put_failure;
743  if (n->ht_up) {
744  u32 htid = n->handle & 0xFFFFF000;
745  if (nla_put_u32(skb, TCA_U32_HASH, htid))
746  goto nla_put_failure;
747  }
748  if (n->res.classid &&
749  nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
750  goto nla_put_failure;
751  if (n->ht_down &&
752  nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle))
753  goto nla_put_failure;
754 
755 #ifdef CONFIG_CLS_U32_MARK
756  if ((n->mark.val || n->mark.mask) &&
757  nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark))
758  goto nla_put_failure;
759 #endif
760 
761  if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
762  goto nla_put_failure;
763 
764 #ifdef CONFIG_NET_CLS_IND
765  if (strlen(n->indev) &&
766  nla_put_string(skb, TCA_U32_INDEV, n->indev))
767  goto nla_put_failure;
768 #endif
769 #ifdef CONFIG_CLS_U32_PERF
770  if (nla_put(skb, TCA_U32_PCNT,
771  sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
772  n->pf))
773  goto nla_put_failure;
774 #endif
775  }
776 
777  nla_nest_end(skb, nest);
778 
779  if (TC_U32_KEY(n->handle))
780  if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0)
781  goto nla_put_failure;
782  return skb->len;
783 
784 nla_put_failure:
785  nla_nest_cancel(skb, nest);
786  return -1;
787 }
788 
789 static struct tcf_proto_ops cls_u32_ops __read_mostly = {
790  .kind = "u32",
791  .classify = u32_classify,
792  .init = u32_init,
793  .destroy = u32_destroy,
794  .get = u32_get,
795  .put = u32_put,
796  .change = u32_change,
797  .delete = u32_delete,
798  .walk = u32_walk,
799  .dump = u32_dump,
800  .owner = THIS_MODULE,
801 };
802 
803 static int __init init_u32(void)
804 {
805  pr_info("u32 classifier\n");
806 #ifdef CONFIG_CLS_U32_PERF
807  pr_info(" Performance counters on\n");
808 #endif
809 #ifdef CONFIG_NET_CLS_IND
810  pr_info(" input device check on\n");
811 #endif
812 #ifdef CONFIG_NET_CLS_ACT
813  pr_info(" Actions configured\n");
814 #endif
815  return register_tcf_proto_ops(&cls_u32_ops);
816 }
817 
818 static void __exit exit_u32(void)
819 {
820  unregister_tcf_proto_ops(&cls_u32_ops);
821 }
822 
823 module_init(init_u32)
824 module_exit(exit_u32)
825 MODULE_LICENSE("GPL");