Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
caif_dev.c
Go to the documentation of this file.
1 /*
2  * CAIF Interface registration.
3  * Copyright (C) ST-Ericsson AB 2010
4  * Author: Sjur Brendeland/[email protected]
5  * License terms: GNU General Public License (GPL) version 2
6  *
7  * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
8  * and Sakari Ailus <[email protected]>
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
12 
13 #include <linux/kernel.h>
14 #include <linux/if_arp.h>
15 #include <linux/net.h>
16 #include <linux/netdevice.h>
17 #include <linux/mutex.h>
18 #include <linux/module.h>
19 #include <linux/spinlock.h>
20 #include <net/netns/generic.h>
21 #include <net/net_namespace.h>
22 #include <net/pkt_sched.h>
23 #include <net/caif/caif_device.h>
24 #include <net/caif/caif_layer.h>
25 #include <net/caif/cfpkt.h>
26 #include <net/caif/cfcnfg.h>
27 #include <net/caif/cfserl.h>
28 
29 MODULE_LICENSE("GPL");
30 
31 /* Used for local tracking of the CAIF net devices */
33  struct cflayer layer;
34  struct list_head list;
35  struct net_device *netdev;
38  struct sk_buff *xoff_skb;
40  bool xoff;
41 };
42 
44  struct list_head list;
45  /* Protects simulanous deletes in list */
46  struct mutex lock;
47 };
48 
49 struct caif_net {
50  struct cfcnfg *cfg;
52 };
53 
54 static int caif_net_id;
55 static int q_high = 50; /* Percent */
56 
57 struct cfcnfg *get_cfcnfg(struct net *net)
58 {
59  struct caif_net *caifn;
60  caifn = net_generic(net, caif_net_id);
61  return caifn->cfg;
62 }
64 
65 static struct caif_device_entry_list *caif_device_list(struct net *net)
66 {
67  struct caif_net *caifn;
68  caifn = net_generic(net, caif_net_id);
69  return &caifn->caifdevs;
70 }
71 
72 static void caifd_put(struct caif_device_entry *e)
73 {
75 }
76 
77 static void caifd_hold(struct caif_device_entry *e)
78 {
80 }
81 
82 static int caifd_refcnt_read(struct caif_device_entry *e)
83 {
84  int i, refcnt = 0;
86  refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
87  return refcnt;
88 }
89 
90 /* Allocate new CAIF device. */
91 static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
92 {
93  struct caif_device_entry *caifd;
94 
95  caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
96  if (!caifd)
97  return NULL;
98  caifd->pcpu_refcnt = alloc_percpu(int);
99  if (!caifd->pcpu_refcnt) {
100  kfree(caifd);
101  return NULL;
102  }
103  caifd->netdev = dev;
104  dev_hold(dev);
105  return caifd;
106 }
107 
108 static struct caif_device_entry *caif_get(struct net_device *dev)
109 {
110  struct caif_device_entry_list *caifdevs =
111  caif_device_list(dev_net(dev));
112  struct caif_device_entry *caifd;
113 
114  list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
115  if (caifd->netdev == dev)
116  return caifd;
117  }
118  return NULL;
119 }
120 
121 void caif_flow_cb(struct sk_buff *skb)
122 {
123  struct caif_device_entry *caifd;
124  void (*dtor)(struct sk_buff *skb) = NULL;
125  bool send_xoff;
126 
127  WARN_ON(skb->dev == NULL);
128 
129  rcu_read_lock();
130  caifd = caif_get(skb->dev);
131 
132  WARN_ON(caifd == NULL);
133  if (caifd == NULL)
134  return;
135 
136  caifd_hold(caifd);
137  rcu_read_unlock();
138 
139  spin_lock_bh(&caifd->flow_lock);
140  send_xoff = caifd->xoff;
141  caifd->xoff = 0;
142  dtor = caifd->xoff_skb_dtor;
143 
144  if (WARN_ON(caifd->xoff_skb != skb))
145  skb = NULL;
146 
147  caifd->xoff_skb = NULL;
148  caifd->xoff_skb_dtor = NULL;
149 
150  spin_unlock_bh(&caifd->flow_lock);
151 
152  if (dtor && skb)
153  dtor(skb);
154 
155  if (send_xoff)
156  caifd->layer.up->
157  ctrlcmd(caifd->layer.up,
159  caifd->layer.id);
160  caifd_put(caifd);
161 }
162 
163 static int transmit(struct cflayer *layer, struct cfpkt *pkt)
164 {
165  int err, high = 0, qlen = 0;
166  struct caif_device_entry *caifd =
167  container_of(layer, struct caif_device_entry, layer);
168  struct sk_buff *skb;
169  struct netdev_queue *txq;
170 
171  rcu_read_lock_bh();
172 
173  skb = cfpkt_tonative(pkt);
174  skb->dev = caifd->netdev;
175  skb_reset_network_header(skb);
176  skb->protocol = htons(ETH_P_CAIF);
177 
178  /* Check if we need to handle xoff */
179  if (likely(caifd->netdev->tx_queue_len == 0))
180  goto noxoff;
181 
182  if (unlikely(caifd->xoff))
183  goto noxoff;
184 
185  if (likely(!netif_queue_stopped(caifd->netdev))) {
186  /* If we run with a TX queue, check if the queue is too long*/
187  txq = netdev_get_tx_queue(skb->dev, 0);
188  qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc));
189 
190  if (likely(qlen == 0))
191  goto noxoff;
192 
193  high = (caifd->netdev->tx_queue_len * q_high) / 100;
194  if (likely(qlen < high))
195  goto noxoff;
196  }
197 
198  /* Hold lock while accessing xoff */
199  spin_lock_bh(&caifd->flow_lock);
200  if (caifd->xoff) {
201  spin_unlock_bh(&caifd->flow_lock);
202  goto noxoff;
203  }
204 
205  /*
206  * Handle flow off, we do this by temporary hi-jacking this
207  * skb's destructor function, and replace it with our own
208  * flow-on callback. The callback will set flow-on and call
209  * the original destructor.
210  */
211 
212  pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
213  netif_queue_stopped(caifd->netdev),
214  qlen, high);
215  caifd->xoff = 1;
216  caifd->xoff_skb = skb;
217  caifd->xoff_skb_dtor = skb->destructor;
218  skb->destructor = caif_flow_cb;
219  spin_unlock_bh(&caifd->flow_lock);
220 
221  caifd->layer.up->ctrlcmd(caifd->layer.up,
223  caifd->layer.id);
224 noxoff:
225  rcu_read_unlock_bh();
226 
227  err = dev_queue_xmit(skb);
228  if (err > 0)
229  err = -EIO;
230 
231  return err;
232 }
233 
234 /*
235  * Stuff received packets into the CAIF stack.
236  * On error, returns non-zero and releases the skb.
237  */
238 static int receive(struct sk_buff *skb, struct net_device *dev,
239  struct packet_type *pkttype, struct net_device *orig_dev)
240 {
241  struct cfpkt *pkt;
242  struct caif_device_entry *caifd;
243  int err;
244 
245  pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
246 
247  rcu_read_lock();
248  caifd = caif_get(dev);
249 
250  if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
251  !netif_oper_up(caifd->netdev)) {
252  rcu_read_unlock();
253  kfree_skb(skb);
254  return NET_RX_DROP;
255  }
256 
257  /* Hold reference to netdevice while using CAIF stack */
258  caifd_hold(caifd);
259  rcu_read_unlock();
260 
261  err = caifd->layer.up->receive(caifd->layer.up, pkt);
262 
263  /* For -EILSEQ the packet is not freed so so it now */
264  if (err == -EILSEQ)
265  cfpkt_destroy(pkt);
266 
267  /* Release reference to stack upwards */
268  caifd_put(caifd);
269 
270  if (err != 0)
271  err = NET_RX_DROP;
272  return err;
273 }
274 
275 static struct packet_type caif_packet_type __read_mostly = {
276  .type = cpu_to_be16(ETH_P_CAIF),
277  .func = receive,
278 };
279 
280 static void dev_flowctrl(struct net_device *dev, int on)
281 {
282  struct caif_device_entry *caifd;
283 
284  rcu_read_lock();
285 
286  caifd = caif_get(dev);
287  if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
288  rcu_read_unlock();
289  return;
290  }
291 
292  caifd_hold(caifd);
293  rcu_read_unlock();
294 
295  caifd->layer.up->ctrlcmd(caifd->layer.up,
296  on ?
299  caifd->layer.id);
300  caifd_put(caifd);
301 }
302 
303 void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
304  struct cflayer *link_support, int head_room,
305  struct cflayer **layer, int (**rcv_func)(
306  struct sk_buff *, struct net_device *,
307  struct packet_type *, struct net_device *))
308 {
309  struct caif_device_entry *caifd;
310  enum cfcnfg_phy_preference pref;
311  struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
312  struct caif_device_entry_list *caifdevs;
313 
314  caifdevs = caif_device_list(dev_net(dev));
315  caifd = caif_device_alloc(dev);
316  if (!caifd)
317  return;
318  *layer = &caifd->layer;
319  spin_lock_init(&caifd->flow_lock);
320 
321  switch (caifdev->link_select) {
323  pref = CFPHYPREF_HIGH_BW;
324  break;
326  pref = CFPHYPREF_LOW_LAT;
327  break;
328  default:
329  pref = CFPHYPREF_HIGH_BW;
330  break;
331  }
332  mutex_lock(&caifdevs->lock);
333  list_add_rcu(&caifd->list, &caifdevs->list);
334 
335  strncpy(caifd->layer.name, dev->name,
336  sizeof(caifd->layer.name) - 1);
337  caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
338  caifd->layer.transmit = transmit;
340  dev,
341  &caifd->layer,
342  pref,
343  link_support,
344  caifdev->use_fcs,
345  head_room);
346  mutex_unlock(&caifdevs->lock);
347  if (rcv_func)
348  *rcv_func = receive;
349 }
351 
352 /* notify Caif of device events */
353 static int caif_device_notify(struct notifier_block *me, unsigned long what,
354  void *arg)
355 {
356  struct net_device *dev = arg;
357  struct caif_device_entry *caifd = NULL;
358  struct caif_dev_common *caifdev;
359  struct cfcnfg *cfg;
360  struct cflayer *layer, *link_support;
361  int head_room = 0;
362  struct caif_device_entry_list *caifdevs;
363 
364  cfg = get_cfcnfg(dev_net(dev));
365  caifdevs = caif_device_list(dev_net(dev));
366 
367  caifd = caif_get(dev);
368  if (caifd == NULL && dev->type != ARPHRD_CAIF)
369  return 0;
370 
371  switch (what) {
372  case NETDEV_REGISTER:
373  if (caifd != NULL)
374  break;
375 
376  caifdev = netdev_priv(dev);
377 
378  link_support = NULL;
379  if (caifdev->use_frag) {
380  head_room = 1;
381  link_support = cfserl_create(dev->ifindex,
382  caifdev->use_stx);
383  if (!link_support) {
384  pr_warn("Out of memory\n");
385  break;
386  }
387  }
388  caif_enroll_dev(dev, caifdev, link_support, head_room,
389  &layer, NULL);
390  caifdev->flowctrl = dev_flowctrl;
391  break;
392 
393  case NETDEV_UP:
394  rcu_read_lock();
395 
396  caifd = caif_get(dev);
397  if (caifd == NULL) {
398  rcu_read_unlock();
399  break;
400  }
401 
402  caifd->xoff = 0;
403  cfcnfg_set_phy_state(cfg, &caifd->layer, true);
404  rcu_read_unlock();
405 
406  break;
407 
408  case NETDEV_DOWN:
409  rcu_read_lock();
410 
411  caifd = caif_get(dev);
412  if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
413  rcu_read_unlock();
414  return -EINVAL;
415  }
416 
417  cfcnfg_set_phy_state(cfg, &caifd->layer, false);
418  caifd_hold(caifd);
419  rcu_read_unlock();
420 
421  caifd->layer.up->ctrlcmd(caifd->layer.up,
423  caifd->layer.id);
424 
425  spin_lock_bh(&caifd->flow_lock);
426 
427  /*
428  * Replace our xoff-destructor with original destructor.
429  * We trust that skb->destructor *always* is called before
430  * the skb reference is invalid. The hijacked SKB destructor
431  * takes the flow_lock so manipulating the skb->destructor here
432  * should be safe.
433  */
434  if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
435  caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
436 
437  caifd->xoff = 0;
438  caifd->xoff_skb_dtor = NULL;
439  caifd->xoff_skb = NULL;
440 
441  spin_unlock_bh(&caifd->flow_lock);
442  caifd_put(caifd);
443  break;
444 
445  case NETDEV_UNREGISTER:
446  mutex_lock(&caifdevs->lock);
447 
448  caifd = caif_get(dev);
449  if (caifd == NULL) {
450  mutex_unlock(&caifdevs->lock);
451  break;
452  }
453  list_del_rcu(&caifd->list);
454 
455  /*
456  * NETDEV_UNREGISTER is called repeatedly until all reference
457  * counts for the net-device are released. If references to
458  * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
459  * the next call to NETDEV_UNREGISTER.
460  *
461  * If any packets are in flight down the CAIF Stack,
462  * cfcnfg_del_phy_layer will return nonzero.
463  * If no packets are in flight, the CAIF Stack associated
464  * with the net-device un-registering is freed.
465  */
466 
467  if (caifd_refcnt_read(caifd) != 0 ||
468  cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
469 
470  pr_info("Wait for device inuse\n");
471  /* Enrole device if CAIF Stack is still in use */
472  list_add_rcu(&caifd->list, &caifdevs->list);
473  mutex_unlock(&caifdevs->lock);
474  break;
475  }
476 
477  synchronize_rcu();
478  dev_put(caifd->netdev);
479  free_percpu(caifd->pcpu_refcnt);
480  kfree(caifd);
481 
482  mutex_unlock(&caifdevs->lock);
483  break;
484  }
485  return 0;
486 }
487 
488 static struct notifier_block caif_device_notifier = {
489  .notifier_call = caif_device_notify,
490  .priority = 0,
491 };
492 
493 /* Per-namespace Caif devices handling */
494 static int caif_init_net(struct net *net)
495 {
496  struct caif_net *caifn = net_generic(net, caif_net_id);
497  INIT_LIST_HEAD(&caifn->caifdevs.list);
498  mutex_init(&caifn->caifdevs.lock);
499 
500  caifn->cfg = cfcnfg_create();
501  if (!caifn->cfg)
502  return -ENOMEM;
503 
504  return 0;
505 }
506 
507 static void caif_exit_net(struct net *net)
508 {
509  struct caif_device_entry *caifd, *tmp;
510  struct caif_device_entry_list *caifdevs =
511  caif_device_list(net);
512  struct cfcnfg *cfg = get_cfcnfg(net);
513 
514  rtnl_lock();
515  mutex_lock(&caifdevs->lock);
516 
517  list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
518  int i = 0;
519  list_del_rcu(&caifd->list);
520  cfcnfg_set_phy_state(cfg, &caifd->layer, false);
521 
522  while (i < 10 &&
523  (caifd_refcnt_read(caifd) != 0 ||
524  cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
525 
526  pr_info("Wait for device inuse\n");
527  msleep(250);
528  i++;
529  }
530  synchronize_rcu();
531  dev_put(caifd->netdev);
532  free_percpu(caifd->pcpu_refcnt);
533  kfree(caifd);
534  }
535  cfcnfg_remove(cfg);
536 
537  mutex_unlock(&caifdevs->lock);
538  rtnl_unlock();
539 }
540 
541 static struct pernet_operations caif_net_ops = {
542  .init = caif_init_net,
543  .exit = caif_exit_net,
544  .id = &caif_net_id,
545  .size = sizeof(struct caif_net),
546 };
547 
548 /* Initialize Caif devices list */
549 static int __init caif_device_init(void)
550 {
551  int result;
552 
553  result = register_pernet_subsys(&caif_net_ops);
554 
555  if (result)
556  return result;
557 
558  register_netdevice_notifier(&caif_device_notifier);
559  dev_add_pack(&caif_packet_type);
560 
561  return result;
562 }
563 
564 static void __exit caif_device_exit(void)
565 {
566  unregister_netdevice_notifier(&caif_device_notifier);
567  dev_remove_pack(&caif_packet_type);
568  unregister_pernet_subsys(&caif_net_ops);
569 }
570 
571 module_init(caif_device_init);
572 module_exit(caif_device_exit);