Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cls_tcindex.c
Go to the documentation of this file.
1 /*
2  * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
3  *
4  * Written 1998,1999 by Werner Almesberger, EPFL ICA
5  */
6 
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <net/act_api.h>
14 #include <net/netlink.h>
15 #include <net/pkt_cls.h>
16 
17 /*
18  * Passing parameters to the root seems to be done more awkwardly than really
19  * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
20  * verified. FIXME.
21  */
22 
23 #define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
24 #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
25 
26 
27 #define PRIV(tp) ((struct tcindex_data *) (tp)->root)
28 
29 
31  struct tcf_exts exts;
32  struct tcf_result res;
33 };
34 
39 };
40 
41 
42 struct tcindex_data {
43  struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
44  struct tcindex_filter **h; /* imperfect hash; only used if !perfect;
45  NULL if unused */
46  u16 mask; /* AND key with mask */
47  int shift; /* shift ANDed key to the right */
48  int hash; /* hash table size; 0 if undefined */
49  int alloc_hash; /* allocated size */
50  int fall_through; /* 0: only classify if explicit match */
51 };
52 
53 static const struct tcf_ext_map tcindex_ext_map = {
54  .police = TCA_TCINDEX_POLICE,
55  .action = TCA_TCINDEX_ACT
56 };
57 
58 static inline int
59 tcindex_filter_is_set(struct tcindex_filter_result *r)
60 {
61  return tcf_exts_is_predicative(&r->exts) || r->res.classid;
62 }
63 
64 static struct tcindex_filter_result *
65 tcindex_lookup(struct tcindex_data *p, u16 key)
66 {
67  struct tcindex_filter *f;
68 
69  if (p->perfect)
70  return tcindex_filter_is_set(p->perfect + key) ?
71  p->perfect + key : NULL;
72  else if (p->h) {
73  for (f = p->h[key % p->hash]; f; f = f->next)
74  if (f->key == key)
75  return &f->result;
76  }
77 
78  return NULL;
79 }
80 
81 
82 static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
83  struct tcf_result *res)
84 {
85  struct tcindex_data *p = PRIV(tp);
86  struct tcindex_filter_result *f;
87  int key = (skb->tc_index & p->mask) >> p->shift;
88 
89  pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
90  skb, tp, res, p);
91 
92  f = tcindex_lookup(p, key);
93  if (!f) {
94  if (!p->fall_through)
95  return -1;
96  res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle), key);
97  res->class = 0;
98  pr_debug("alg 0x%x\n", res->classid);
99  return 0;
100  }
101  *res = f->res;
102  pr_debug("map 0x%x\n", res->classid);
103 
104  return tcf_exts_exec(skb, &f->exts, res);
105 }
106 
107 
108 static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle)
109 {
110  struct tcindex_data *p = PRIV(tp);
111  struct tcindex_filter_result *r;
112 
113  pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
114  if (p->perfect && handle >= p->alloc_hash)
115  return 0;
116  r = tcindex_lookup(p, handle);
117  return r && tcindex_filter_is_set(r) ? (unsigned long) r : 0UL;
118 }
119 
120 
121 static void tcindex_put(struct tcf_proto *tp, unsigned long f)
122 {
123  pr_debug("tcindex_put(tp %p,f 0x%lx)\n", tp, f);
124 }
125 
126 
127 static int tcindex_init(struct tcf_proto *tp)
128 {
129  struct tcindex_data *p;
130 
131  pr_debug("tcindex_init(tp %p)\n", tp);
132  p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
133  if (!p)
134  return -ENOMEM;
135 
136  p->mask = 0xffff;
137  p->hash = DEFAULT_HASH_SIZE;
138  p->fall_through = 1;
139 
140  tp->root = p;
141  return 0;
142 }
143 
144 
145 static int
146 __tcindex_delete(struct tcf_proto *tp, unsigned long arg, int lock)
147 {
148  struct tcindex_data *p = PRIV(tp);
149  struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
150  struct tcindex_filter *f = NULL;
151 
152  pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p,f %p\n", tp, arg, p, f);
153  if (p->perfect) {
154  if (!r->res.class)
155  return -ENOENT;
156  } else {
157  int i;
158  struct tcindex_filter **walk = NULL;
159 
160  for (i = 0; i < p->hash; i++)
161  for (walk = p->h+i; *walk; walk = &(*walk)->next)
162  if (&(*walk)->result == r)
163  goto found;
164  return -ENOENT;
165 
166 found:
167  f = *walk;
168  if (lock)
169  tcf_tree_lock(tp);
170  *walk = f->next;
171  if (lock)
172  tcf_tree_unlock(tp);
173  }
174  tcf_unbind_filter(tp, &r->res);
175  tcf_exts_destroy(tp, &r->exts);
176  kfree(f);
177  return 0;
178 }
179 
180 static int tcindex_delete(struct tcf_proto *tp, unsigned long arg)
181 {
182  return __tcindex_delete(tp, arg, 1);
183 }
184 
185 static inline int
186 valid_perfect_hash(struct tcindex_data *p)
187 {
188  return p->hash > (p->mask >> p->shift);
189 }
190 
191 static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
192  [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
193  [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
194  [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
195  [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
196  [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
197 };
198 
199 static int
200 tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
201  struct tcindex_data *p, struct tcindex_filter_result *r,
202  struct nlattr **tb, struct nlattr *est)
203 {
204  int err, balloc = 0;
205  struct tcindex_filter_result new_filter_result, *old_r = r;
206  struct tcindex_filter_result cr;
207  struct tcindex_data cp;
208  struct tcindex_filter *f = NULL; /* make gcc behave */
209  struct tcf_exts e;
210 
211  err = tcf_exts_validate(tp, tb, est, &e, &tcindex_ext_map);
212  if (err < 0)
213  return err;
214 
215  memcpy(&cp, p, sizeof(cp));
216  memset(&new_filter_result, 0, sizeof(new_filter_result));
217 
218  if (old_r)
219  memcpy(&cr, r, sizeof(cr));
220  else
221  memset(&cr, 0, sizeof(cr));
222 
223  if (tb[TCA_TCINDEX_HASH])
224  cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
225 
226  if (tb[TCA_TCINDEX_MASK])
227  cp.mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
228 
229  if (tb[TCA_TCINDEX_SHIFT])
230  cp.shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
231 
232  err = -EBUSY;
233  /* Hash already allocated, make sure that we still meet the
234  * requirements for the allocated hash.
235  */
236  if (cp.perfect) {
237  if (!valid_perfect_hash(&cp) ||
238  cp.hash > cp.alloc_hash)
239  goto errout;
240  } else if (cp.h && cp.hash != cp.alloc_hash)
241  goto errout;
242 
243  err = -EINVAL;
244  if (tb[TCA_TCINDEX_FALL_THROUGH])
245  cp.fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
246 
247  if (!cp.hash) {
248  /* Hash not specified, use perfect hash if the upper limit
249  * of the hashing index is below the threshold.
250  */
251  if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD)
252  cp.hash = (cp.mask >> cp.shift) + 1;
253  else
254  cp.hash = DEFAULT_HASH_SIZE;
255  }
256 
257  if (!cp.perfect && !cp.h)
258  cp.alloc_hash = cp.hash;
259 
260  /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
261  * but then, we'd fail handles that may become valid after some future
262  * mask change. While this is extremely unlikely to ever matter,
263  * the check below is safer (and also more backwards-compatible).
264  */
265  if (cp.perfect || valid_perfect_hash(&cp))
266  if (handle >= cp.alloc_hash)
267  goto errout;
268 
269 
270  err = -ENOMEM;
271  if (!cp.perfect && !cp.h) {
272  if (valid_perfect_hash(&cp)) {
273  cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
274  if (!cp.perfect)
275  goto errout;
276  balloc = 1;
277  } else {
278  cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
279  if (!cp.h)
280  goto errout;
281  balloc = 2;
282  }
283  }
284 
285  if (cp.perfect)
286  r = cp.perfect + handle;
287  else
288  r = tcindex_lookup(&cp, handle) ? : &new_filter_result;
289 
290  if (r == &new_filter_result) {
291  f = kzalloc(sizeof(*f), GFP_KERNEL);
292  if (!f)
293  goto errout_alloc;
294  }
295 
296  if (tb[TCA_TCINDEX_CLASSID]) {
297  cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
298  tcf_bind_filter(tp, &cr.res, base);
299  }
300 
301  tcf_exts_change(tp, &cr.exts, &e);
302 
303  tcf_tree_lock(tp);
304  if (old_r && old_r != r)
305  memset(old_r, 0, sizeof(*old_r));
306 
307  memcpy(p, &cp, sizeof(cp));
308  memcpy(r, &cr, sizeof(cr));
309 
310  if (r == &new_filter_result) {
311  struct tcindex_filter **fp;
312 
313  f->key = handle;
314  f->result = new_filter_result;
315  f->next = NULL;
316  for (fp = p->h+(handle % p->hash); *fp; fp = &(*fp)->next)
317  /* nothing */;
318  *fp = f;
319  }
320  tcf_tree_unlock(tp);
321 
322  return 0;
323 
324 errout_alloc:
325  if (balloc == 1)
326  kfree(cp.perfect);
327  else if (balloc == 2)
328  kfree(cp.h);
329 errout:
330  tcf_exts_destroy(tp, &e);
331  return err;
332 }
333 
334 static int
335 tcindex_change(struct sk_buff *in_skb,
336  struct tcf_proto *tp, unsigned long base, u32 handle,
337  struct nlattr **tca, unsigned long *arg)
338 {
339  struct nlattr *opt = tca[TCA_OPTIONS];
340  struct nlattr *tb[TCA_TCINDEX_MAX + 1];
341  struct tcindex_data *p = PRIV(tp);
342  struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg;
343  int err;
344 
345  pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
346  "p %p,r %p,*arg 0x%lx\n",
347  tp, handle, tca, arg, opt, p, r, arg ? *arg : 0L);
348 
349  if (!opt)
350  return 0;
351 
352  err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy);
353  if (err < 0)
354  return err;
355 
356  return tcindex_set_parms(tp, base, handle, p, r, tb, tca[TCA_RATE]);
357 }
358 
359 
360 static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
361 {
362  struct tcindex_data *p = PRIV(tp);
363  struct tcindex_filter *f, *next;
364  int i;
365 
366  pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
367  if (p->perfect) {
368  for (i = 0; i < p->hash; i++) {
369  if (!p->perfect[i].res.class)
370  continue;
371  if (walker->count >= walker->skip) {
372  if (walker->fn(tp,
373  (unsigned long) (p->perfect+i), walker)
374  < 0) {
375  walker->stop = 1;
376  return;
377  }
378  }
379  walker->count++;
380  }
381  }
382  if (!p->h)
383  return;
384  for (i = 0; i < p->hash; i++) {
385  for (f = p->h[i]; f; f = next) {
386  next = f->next;
387  if (walker->count >= walker->skip) {
388  if (walker->fn(tp, (unsigned long) &f->result,
389  walker) < 0) {
390  walker->stop = 1;
391  return;
392  }
393  }
394  walker->count++;
395  }
396  }
397 }
398 
399 
400 static int tcindex_destroy_element(struct tcf_proto *tp,
401  unsigned long arg, struct tcf_walker *walker)
402 {
403  return __tcindex_delete(tp, arg, 0);
404 }
405 
406 
407 static void tcindex_destroy(struct tcf_proto *tp)
408 {
409  struct tcindex_data *p = PRIV(tp);
410  struct tcf_walker walker;
411 
412  pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
413  walker.count = 0;
414  walker.skip = 0;
415  walker.fn = &tcindex_destroy_element;
416  tcindex_walk(tp, &walker);
417  kfree(p->perfect);
418  kfree(p->h);
419  kfree(p);
420  tp->root = NULL;
421 }
422 
423 
424 static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
425  struct sk_buff *skb, struct tcmsg *t)
426 {
427  struct tcindex_data *p = PRIV(tp);
428  struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
429  unsigned char *b = skb_tail_pointer(skb);
430  struct nlattr *nest;
431 
432  pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n",
433  tp, fh, skb, t, p, r, b);
434  pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
435 
436  nest = nla_nest_start(skb, TCA_OPTIONS);
437  if (nest == NULL)
438  goto nla_put_failure;
439 
440  if (!fh) {
441  t->tcm_handle = ~0; /* whatever ... */
442  if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
443  nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
444  nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
445  nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
446  goto nla_put_failure;
447  nla_nest_end(skb, nest);
448  } else {
449  if (p->perfect) {
450  t->tcm_handle = r-p->perfect;
451  } else {
452  struct tcindex_filter *f;
453  int i;
454 
455  t->tcm_handle = 0;
456  for (i = 0; !t->tcm_handle && i < p->hash; i++) {
457  for (f = p->h[i]; !t->tcm_handle && f;
458  f = f->next) {
459  if (&f->result == r)
460  t->tcm_handle = f->key;
461  }
462  }
463  }
464  pr_debug("handle = %d\n", t->tcm_handle);
465  if (r->res.class &&
466  nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
467  goto nla_put_failure;
468 
469  if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0)
470  goto nla_put_failure;
471  nla_nest_end(skb, nest);
472 
473  if (tcf_exts_dump_stats(skb, &r->exts, &tcindex_ext_map) < 0)
474  goto nla_put_failure;
475  }
476 
477  return skb->len;
478 
479 nla_put_failure:
480  nlmsg_trim(skb, b);
481  return -1;
482 }
483 
484 static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
485  .kind = "tcindex",
486  .classify = tcindex_classify,
487  .init = tcindex_init,
488  .destroy = tcindex_destroy,
489  .get = tcindex_get,
490  .put = tcindex_put,
491  .change = tcindex_change,
492  .delete = tcindex_delete,
493  .walk = tcindex_walk,
494  .dump = tcindex_dump,
495  .owner = THIS_MODULE,
496 };
497 
498 static int __init init_tcindex(void)
499 {
500  return register_tcf_proto_ops(&cls_tcindex_ops);
501 }
502 
503 static void __exit exit_tcindex(void)
504 {
505  unregister_tcf_proto_ops(&cls_tcindex_ops);
506 }
507 
508 module_init(init_tcindex)
509 module_exit(exit_tcindex)
510 MODULE_LICENSE("GPL");