Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ematch.c
Go to the documentation of this file.
1 /*
2  * net/sched/ematch.c Extended Match API
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  *
9  * Authors: Thomas Graf <[email protected]>
10  *
11  * ==========================================================================
12  *
13  * An extended match (ematch) is a small classification tool not worth
14  * writing a full classifier for. Ematches can be interconnected to form
15  * a logic expression and get attached to classifiers to extend their
16  * functionatlity.
17  *
18  * The userspace part transforms the logic expressions into an array
19  * consisting of multiple sequences of interconnected ematches separated
20  * by markers. Precedence is implemented by a special ematch kind
21  * referencing a sequence beyond the marker of the current sequence
22  * causing the current position in the sequence to be pushed onto a stack
23  * to allow the current position to be overwritten by the position referenced
24  * in the special ematch. Matching continues in the new sequence until a
25  * marker is reached causing the position to be restored from the stack.
26  *
27  * Example:
28  * A AND (B1 OR B2) AND C AND D
29  *
30  * ------->-PUSH-------
31  * -->-- / -->-- \ -->--
32  * / \ / / \ \ / \
33  * +-------+-------+-------+-------+-------+--------+
34  * | A AND | B AND | C AND | D END | B1 OR | B2 END |
35  * +-------+-------+-------+-------+-------+--------+
36  * \ /
37  * --------<-POP---------
38  *
39  * where B is a virtual ematch referencing to sequence starting with B1.
40  *
41  * ==========================================================================
42  *
43  * How to write an ematch in 60 seconds
44  * ------------------------------------
45  *
46  * 1) Provide a matcher function:
47  * static int my_match(struct sk_buff *skb, struct tcf_ematch *m,
48  * struct tcf_pkt_info *info)
49  * {
50  * struct mydata *d = (struct mydata *) m->data;
51  *
52  * if (...matching goes here...)
53  * return 1;
54  * else
55  * return 0;
56  * }
57  *
58  * 2) Fill out a struct tcf_ematch_ops:
59  * static struct tcf_ematch_ops my_ops = {
60  * .kind = unique id,
61  * .datalen = sizeof(struct mydata),
62  * .match = my_match,
63  * .owner = THIS_MODULE,
64  * };
65  *
66  * 3) Register/Unregister your ematch:
67  * static int __init init_my_ematch(void)
68  * {
69  * return tcf_em_register(&my_ops);
70  * }
71  *
72  * static void __exit exit_my_ematch(void)
73  * {
74  * tcf_em_unregister(&my_ops);
75  * }
76  *
77  * module_init(init_my_ematch);
78  * module_exit(exit_my_ematch);
79  *
80  * 4) By now you should have two more seconds left, barely enough to
81  * open up a beer to watch the compilation going.
82  */
83 
84 #include <linux/module.h>
85 #include <linux/slab.h>
86 #include <linux/types.h>
87 #include <linux/kernel.h>
88 #include <linux/errno.h>
89 #include <linux/rtnetlink.h>
90 #include <linux/skbuff.h>
91 #include <net/pkt_cls.h>
92 
93 static LIST_HEAD(ematch_ops);
94 static DEFINE_RWLOCK(ematch_mod_lock);
95 
96 static struct tcf_ematch_ops *tcf_em_lookup(u16 kind)
97 {
98  struct tcf_ematch_ops *e = NULL;
99 
100  read_lock(&ematch_mod_lock);
101  list_for_each_entry(e, &ematch_ops, link) {
102  if (kind == e->kind) {
103  if (!try_module_get(e->owner))
104  e = NULL;
105  read_unlock(&ematch_mod_lock);
106  return e;
107  }
108  }
109  read_unlock(&ematch_mod_lock);
110 
111  return NULL;
112 }
113 
126 int tcf_em_register(struct tcf_ematch_ops *ops)
127 {
128  int err = -EEXIST;
129  struct tcf_ematch_ops *e;
130 
131  if (ops->match == NULL)
132  return -EINVAL;
133 
134  write_lock(&ematch_mod_lock);
135  list_for_each_entry(e, &ematch_ops, link)
136  if (ops->kind == e->kind)
137  goto errout;
138 
139  list_add_tail(&ops->link, &ematch_ops);
140  err = 0;
141 errout:
142  write_unlock(&ematch_mod_lock);
143  return err;
144 }
146 
158 void tcf_em_unregister(struct tcf_ematch_ops *ops)
159 {
160  write_lock(&ematch_mod_lock);
161  list_del(&ops->link);
162  write_unlock(&ematch_mod_lock);
163 }
165 
166 static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree,
167  int index)
168 {
169  return &tree->matches[index];
170 }
171 
172 
173 static int tcf_em_validate(struct tcf_proto *tp,
174  struct tcf_ematch_tree_hdr *tree_hdr,
175  struct tcf_ematch *em, struct nlattr *nla, int idx)
176 {
177  int err = -EINVAL;
178  struct tcf_ematch_hdr *em_hdr = nla_data(nla);
179  int data_len = nla_len(nla) - sizeof(*em_hdr);
180  void *data = (void *) em_hdr + sizeof(*em_hdr);
181 
182  if (!TCF_EM_REL_VALID(em_hdr->flags))
183  goto errout;
184 
185  if (em_hdr->kind == TCF_EM_CONTAINER) {
186  /* Special ematch called "container", carries an index
187  * referencing an external ematch sequence.
188  */
189  u32 ref;
190 
191  if (data_len < sizeof(ref))
192  goto errout;
193  ref = *(u32 *) data;
194 
195  if (ref >= tree_hdr->nmatches)
196  goto errout;
197 
198  /* We do not allow backward jumps to avoid loops and jumps
199  * to our own position are of course illegal.
200  */
201  if (ref <= idx)
202  goto errout;
203 
204 
205  em->data = ref;
206  } else {
207  /* Note: This lookup will increase the module refcnt
208  * of the ematch module referenced. In case of a failure,
209  * a destroy function is called by the underlying layer
210  * which automatically releases the reference again, therefore
211  * the module MUST not be given back under any circumstances
212  * here. Be aware, the destroy function assumes that the
213  * module is held if the ops field is non zero.
214  */
215  em->ops = tcf_em_lookup(em_hdr->kind);
216 
217  if (em->ops == NULL) {
218  err = -ENOENT;
219 #ifdef CONFIG_MODULES
220  __rtnl_unlock();
221  request_module("ematch-kind-%u", em_hdr->kind);
222  rtnl_lock();
223  em->ops = tcf_em_lookup(em_hdr->kind);
224  if (em->ops) {
225  /* We dropped the RTNL mutex in order to
226  * perform the module load. Tell the caller
227  * to replay the request.
228  */
229  module_put(em->ops->owner);
230  err = -EAGAIN;
231  }
232 #endif
233  goto errout;
234  }
235 
236  /* ematch module provides expected length of data, so we
237  * can do a basic sanity check.
238  */
239  if (em->ops->datalen && data_len < em->ops->datalen)
240  goto errout;
241 
242  if (em->ops->change) {
243  err = em->ops->change(tp, data, data_len, em);
244  if (err < 0)
245  goto errout;
246  } else if (data_len > 0) {
247  /* ematch module doesn't provide an own change
248  * procedure and expects us to allocate and copy
249  * the ematch data.
250  *
251  * TCF_EM_SIMPLE may be specified stating that the
252  * data only consists of a u32 integer and the module
253  * does not expected a memory reference but rather
254  * the value carried.
255  */
256  if (em_hdr->flags & TCF_EM_SIMPLE) {
257  if (data_len < sizeof(u32))
258  goto errout;
259  em->data = *(u32 *) data;
260  } else {
261  void *v = kmemdup(data, data_len, GFP_KERNEL);
262  if (v == NULL) {
263  err = -ENOBUFS;
264  goto errout;
265  }
266  em->data = (unsigned long) v;
267  }
268  }
269  }
270 
271  em->matchid = em_hdr->matchid;
272  em->flags = em_hdr->flags;
273  em->datalen = data_len;
274 
275  err = 0;
276 errout:
277  return err;
278 }
279 
280 static const struct nla_policy em_policy[TCA_EMATCH_TREE_MAX + 1] = {
281  [TCA_EMATCH_TREE_HDR] = { .len = sizeof(struct tcf_ematch_tree_hdr) },
282  [TCA_EMATCH_TREE_LIST] = { .type = NLA_NESTED },
283 };
284 
301 int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
302  struct tcf_ematch_tree *tree)
303 {
304  int idx, list_len, matches_len, err;
305  struct nlattr *tb[TCA_EMATCH_TREE_MAX + 1];
306  struct nlattr *rt_match, *rt_hdr, *rt_list;
307  struct tcf_ematch_tree_hdr *tree_hdr;
308  struct tcf_ematch *em;
309 
310  memset(tree, 0, sizeof(*tree));
311  if (!nla)
312  return 0;
313 
314  err = nla_parse_nested(tb, TCA_EMATCH_TREE_MAX, nla, em_policy);
315  if (err < 0)
316  goto errout;
317 
318  err = -EINVAL;
319  rt_hdr = tb[TCA_EMATCH_TREE_HDR];
320  rt_list = tb[TCA_EMATCH_TREE_LIST];
321 
322  if (rt_hdr == NULL || rt_list == NULL)
323  goto errout;
324 
325  tree_hdr = nla_data(rt_hdr);
326  memcpy(&tree->hdr, tree_hdr, sizeof(*tree_hdr));
327 
328  rt_match = nla_data(rt_list);
329  list_len = nla_len(rt_list);
330  matches_len = tree_hdr->nmatches * sizeof(*em);
331 
332  tree->matches = kzalloc(matches_len, GFP_KERNEL);
333  if (tree->matches == NULL)
334  goto errout;
335 
336  /* We do not use nla_parse_nested here because the maximum
337  * number of attributes is unknown. This saves us the allocation
338  * for a tb buffer which would serve no purpose at all.
339  *
340  * The array of rt attributes is parsed in the order as they are
341  * provided, their type must be incremental from 1 to n. Even
342  * if it does not serve any real purpose, a failure of sticking
343  * to this policy will result in parsing failure.
344  */
345  for (idx = 0; nla_ok(rt_match, list_len); idx++) {
346  err = -EINVAL;
347 
348  if (rt_match->nla_type != (idx + 1))
349  goto errout_abort;
350 
351  if (idx >= tree_hdr->nmatches)
352  goto errout_abort;
353 
354  if (nla_len(rt_match) < sizeof(struct tcf_ematch_hdr))
355  goto errout_abort;
356 
357  em = tcf_em_get_match(tree, idx);
358 
359  err = tcf_em_validate(tp, tree_hdr, em, rt_match, idx);
360  if (err < 0)
361  goto errout_abort;
362 
363  rt_match = nla_next(rt_match, &list_len);
364  }
365 
366  /* Check if the number of matches provided by userspace actually
367  * complies with the array of matches. The number was used for
368  * the validation of references and a mismatch could lead to
369  * undefined references during the matching process.
370  */
371  if (idx != tree_hdr->nmatches) {
372  err = -EINVAL;
373  goto errout_abort;
374  }
375 
376  err = 0;
377 errout:
378  return err;
379 
380 errout_abort:
381  tcf_em_tree_destroy(tp, tree);
382  return err;
383 }
385 
397 {
398  int i;
399 
400  if (tree->matches == NULL)
401  return;
402 
403  for (i = 0; i < tree->hdr.nmatches; i++) {
404  struct tcf_ematch *em = tcf_em_get_match(tree, i);
405 
406  if (em->ops) {
407  if (em->ops->destroy)
408  em->ops->destroy(tp, em);
409  else if (!tcf_em_is_simple(em))
410  kfree((void *) em->data);
411  module_put(em->ops->owner);
412  }
413  }
414 
415  tree->hdr.nmatches = 0;
416  kfree(tree->matches);
417  tree->matches = NULL;
418 }
420 
434 {
435  int i;
436  u8 *tail;
437  struct nlattr *top_start;
438  struct nlattr *list_start;
439 
440  top_start = nla_nest_start(skb, tlv);
441  if (top_start == NULL)
442  goto nla_put_failure;
443 
444  if (nla_put(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr))
445  goto nla_put_failure;
446 
447  list_start = nla_nest_start(skb, TCA_EMATCH_TREE_LIST);
448  if (list_start == NULL)
449  goto nla_put_failure;
450 
451  tail = skb_tail_pointer(skb);
452  for (i = 0; i < tree->hdr.nmatches; i++) {
453  struct nlattr *match_start = (struct nlattr *)tail;
454  struct tcf_ematch *em = tcf_em_get_match(tree, i);
455  struct tcf_ematch_hdr em_hdr = {
456  .kind = em->ops ? em->ops->kind : TCF_EM_CONTAINER,
457  .matchid = em->matchid,
458  .flags = em->flags
459  };
460 
461  if (nla_put(skb, i + 1, sizeof(em_hdr), &em_hdr))
462  goto nla_put_failure;
463 
464  if (em->ops && em->ops->dump) {
465  if (em->ops->dump(skb, em) < 0)
466  goto nla_put_failure;
467  } else if (tcf_em_is_container(em) || tcf_em_is_simple(em)) {
468  u32 u = em->data;
469  nla_put_nohdr(skb, sizeof(u), &u);
470  } else if (em->datalen > 0)
471  nla_put_nohdr(skb, em->datalen, (void *) em->data);
472 
473  tail = skb_tail_pointer(skb);
474  match_start->nla_len = tail - (u8 *)match_start;
475  }
476 
477  nla_nest_end(skb, list_start);
478  nla_nest_end(skb, top_start);
479 
480  return 0;
481 
482 nla_put_failure:
483  return -1;
484 }
486 
487 static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em,
488  struct tcf_pkt_info *info)
489 {
490  int r = em->ops->match(skb, em, info);
491 
492  return tcf_em_is_inverted(em) ? !r : r;
493 }
494 
495 /* Do not use this function directly, use tcf_em_tree_match instead */
497  struct tcf_pkt_info *info)
498 {
499  int stackp = 0, match_idx = 0, res = 0;
500  struct tcf_ematch *cur_match;
501  int stack[CONFIG_NET_EMATCH_STACK];
502 
503 proceed:
504  while (match_idx < tree->hdr.nmatches) {
505  cur_match = tcf_em_get_match(tree, match_idx);
506 
507  if (tcf_em_is_container(cur_match)) {
508  if (unlikely(stackp >= CONFIG_NET_EMATCH_STACK))
509  goto stack_overflow;
510 
511  stack[stackp++] = match_idx;
512  match_idx = cur_match->data;
513  goto proceed;
514  }
515 
516  res = tcf_em_match(skb, cur_match, info);
517 
518  if (tcf_em_early_end(cur_match, res))
519  break;
520 
521  match_idx++;
522  }
523 
524 pop_stack:
525  if (stackp > 0) {
526  match_idx = stack[--stackp];
527  cur_match = tcf_em_get_match(tree, match_idx);
528 
529  if (tcf_em_early_end(cur_match, res))
530  goto pop_stack;
531  else {
532  match_idx++;
533  goto proceed;
534  }
535  }
536 
537  return res;
538 
539 stack_overflow:
540  net_warn_ratelimited("tc ematch: local stack overflow, increase NET_EMATCH_STACK\n");
541  return -1;
542 }