Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bcm.c
Go to the documentation of this file.
1 /*
2  * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
3  *
4  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  * notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  * notice, this list of conditions and the following disclaimer in the
14  * documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of Volkswagen nor the names of its contributors
16  * may be used to endorse or promote products derived from this software
17  * without specific prior written permission.
18  *
19  * Alternatively, provided that this notice is retained in full, this
20  * software may be distributed under the terms of the GNU General
21  * Public License ("GPL") version 2, in which case the provisions of the
22  * GPL apply INSTEAD OF those given above.
23  *
24  * The provided data structures and external interfaces from this code
25  * are not restricted to be used by modules with a GPL compatible license.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38  * DAMAGE.
39  *
40  */
41 
42 #include <linux/module.h>
43 #include <linux/init.h>
44 #include <linux/interrupt.h>
45 #include <linux/hrtimer.h>
46 #include <linux/list.h>
47 #include <linux/proc_fs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uio.h>
50 #include <linux/net.h>
51 #include <linux/netdevice.h>
52 #include <linux/socket.h>
53 #include <linux/if_arp.h>
54 #include <linux/skbuff.h>
55 #include <linux/can.h>
56 #include <linux/can/core.h>
57 #include <linux/can/bcm.h>
58 #include <linux/slab.h>
59 #include <net/sock.h>
60 #include <net/net_namespace.h>
61 
62 /*
63  * To send multiple CAN frame content within TX_SETUP or to filter
64  * CAN messages with multiplex index within RX_SETUP, the number of
65  * different filters is limited to 256 due to the one byte index value.
66  */
67 #define MAX_NFRAMES 256
68 
69 /* use of last_frames[index].can_dlc */
70 #define RX_RECV 0x40 /* received data for this element */
71 #define RX_THR 0x80 /* element not been sent due to throttle feature */
72 #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
73 
74 /* get best masking value for can_rx_register() for a given single can_id */
75 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
76  (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
77  (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
78 
79 #define CAN_BCM_VERSION CAN_VERSION
80 static __initconst const char banner[] = KERN_INFO
81  "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n";
82 
83 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
84 MODULE_LICENSE("Dual BSD/GPL");
85 MODULE_AUTHOR("Oliver Hartkopp <[email protected]>");
86 MODULE_ALIAS("can-proto-2");
87 
88 /* easy access to can_frame payload */
89 static inline u64 GET_U64(const struct can_frame *cp)
90 {
91  return *(u64 *)cp->data;
92 }
93 
94 struct bcm_op {
95  struct list_head list;
96  int ifindex;
99  unsigned long frames_abs, frames_filtered;
100  struct timeval ival1, ival2;
102  struct tasklet_struct tsklet, thrtsklet;
108  struct can_frame *frames;
112  struct sock *sk;
114 };
115 
116 static struct proc_dir_entry *proc_dir;
117 
118 struct bcm_sock {
119  struct sock sk;
120  int bound;
121  int ifindex;
125  unsigned long dropped_usr_msgs;
127  char procname [32]; /* inode number in decimal with \0 */
128 };
129 
130 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
131 {
132  return (struct bcm_sock *)sk;
133 }
134 
135 #define CFSIZ sizeof(struct can_frame)
136 #define OPSIZ sizeof(struct bcm_op)
137 #define MHSIZ sizeof(struct bcm_msg_head)
138 
139 /*
140  * procfs functions
141  */
142 static char *bcm_proc_getifname(char *result, int ifindex)
143 {
144  struct net_device *dev;
145 
146  if (!ifindex)
147  return "any";
148 
149  rcu_read_lock();
150  dev = dev_get_by_index_rcu(&init_net, ifindex);
151  if (dev)
152  strcpy(result, dev->name);
153  else
154  strcpy(result, "???");
155  rcu_read_unlock();
156 
157  return result;
158 }
159 
160 static int bcm_proc_show(struct seq_file *m, void *v)
161 {
162  char ifname[IFNAMSIZ];
163  struct sock *sk = (struct sock *)m->private;
164  struct bcm_sock *bo = bcm_sk(sk);
165  struct bcm_op *op;
166 
167  seq_printf(m, ">>> socket %pK", sk->sk_socket);
168  seq_printf(m, " / sk %pK", sk);
169  seq_printf(m, " / bo %pK", bo);
170  seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
171  seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
172  seq_printf(m, " <<<\n");
173 
174  list_for_each_entry(op, &bo->rx_ops, list) {
175 
176  unsigned long reduction;
177 
178  /* print only active entries & prevent division by zero */
179  if (!op->frames_abs)
180  continue;
181 
182  seq_printf(m, "rx_op: %03X %-5s ",
183  op->can_id, bcm_proc_getifname(ifname, op->ifindex));
184  seq_printf(m, "[%u]%c ", op->nframes,
185  (op->flags & RX_CHECK_DLC)?'d':' ');
186  if (op->kt_ival1.tv64)
187  seq_printf(m, "timeo=%lld ",
188  (long long)
189  ktime_to_us(op->kt_ival1));
190 
191  if (op->kt_ival2.tv64)
192  seq_printf(m, "thr=%lld ",
193  (long long)
194  ktime_to_us(op->kt_ival2));
195 
196  seq_printf(m, "# recv %ld (%ld) => reduction: ",
197  op->frames_filtered, op->frames_abs);
198 
199  reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
200 
201  seq_printf(m, "%s%ld%%\n",
202  (reduction == 100)?"near ":"", reduction);
203  }
204 
205  list_for_each_entry(op, &bo->tx_ops, list) {
206 
207  seq_printf(m, "tx_op: %03X %s [%u] ",
208  op->can_id,
209  bcm_proc_getifname(ifname, op->ifindex),
210  op->nframes);
211 
212  if (op->kt_ival1.tv64)
213  seq_printf(m, "t1=%lld ",
214  (long long) ktime_to_us(op->kt_ival1));
215 
216  if (op->kt_ival2.tv64)
217  seq_printf(m, "t2=%lld ",
218  (long long) ktime_to_us(op->kt_ival2));
219 
220  seq_printf(m, "# sent %ld\n", op->frames_abs);
221  }
222  seq_putc(m, '\n');
223  return 0;
224 }
225 
226 static int bcm_proc_open(struct inode *inode, struct file *file)
227 {
228  return single_open(file, bcm_proc_show, PDE(inode)->data);
229 }
230 
231 static const struct file_operations bcm_proc_fops = {
232  .owner = THIS_MODULE,
233  .open = bcm_proc_open,
234  .read = seq_read,
235  .llseek = seq_lseek,
236  .release = single_release,
237 };
238 
239 /*
240  * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
241  * of the given bcm tx op
242  */
243 static void bcm_can_tx(struct bcm_op *op)
244 {
245  struct sk_buff *skb;
246  struct net_device *dev;
247  struct can_frame *cf = &op->frames[op->currframe];
248 
249  /* no target device? => exit */
250  if (!op->ifindex)
251  return;
252 
253  dev = dev_get_by_index(&init_net, op->ifindex);
254  if (!dev) {
255  /* RFC: should this bcm_op remove itself here? */
256  return;
257  }
258 
259  skb = alloc_skb(CFSIZ, gfp_any());
260  if (!skb)
261  goto out;
262 
263  memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
264 
265  /* send with loopback */
266  skb->dev = dev;
267  skb->sk = op->sk;
268  can_send(skb, 1);
269 
270  /* update statistics */
271  op->currframe++;
272  op->frames_abs++;
273 
274  /* reached last frame? */
275  if (op->currframe >= op->nframes)
276  op->currframe = 0;
277  out:
278  dev_put(dev);
279 }
280 
281 /*
282  * bcm_send_to_user - send a BCM message to the userspace
283  * (consisting of bcm_msg_head + x CAN frames)
284  */
285 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
286  struct can_frame *frames, int has_timestamp)
287 {
288  struct sk_buff *skb;
289  struct can_frame *firstframe;
290  struct sockaddr_can *addr;
291  struct sock *sk = op->sk;
292  unsigned int datalen = head->nframes * CFSIZ;
293  int err;
294 
295  skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
296  if (!skb)
297  return;
298 
299  memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head));
300 
301  if (head->nframes) {
302  /* can_frames starting here */
303  firstframe = (struct can_frame *)skb_tail_pointer(skb);
304 
305  memcpy(skb_put(skb, datalen), frames, datalen);
306 
307  /*
308  * the BCM uses the can_dlc-element of the can_frame
309  * structure for internal purposes. This is only
310  * relevant for updates that are generated by the
311  * BCM, where nframes is 1
312  */
313  if (head->nframes == 1)
314  firstframe->can_dlc &= BCM_CAN_DLC_MASK;
315  }
316 
317  if (has_timestamp) {
318  /* restore rx timestamp */
319  skb->tstamp = op->rx_stamp;
320  }
321 
322  /*
323  * Put the datagram to the queue so that bcm_recvmsg() can
324  * get it from there. We need to pass the interface index to
325  * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
326  * containing the interface index.
327  */
328 
329  BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
330  addr = (struct sockaddr_can *)skb->cb;
331  memset(addr, 0, sizeof(*addr));
332  addr->can_family = AF_CAN;
333  addr->can_ifindex = op->rx_ifindex;
334 
335  err = sock_queue_rcv_skb(sk, skb);
336  if (err < 0) {
337  struct bcm_sock *bo = bcm_sk(sk);
338 
339  kfree_skb(skb);
340  /* don't care about overflows in this statistic */
341  bo->dropped_usr_msgs++;
342  }
343 }
344 
345 static void bcm_tx_start_timer(struct bcm_op *op)
346 {
347  if (op->kt_ival1.tv64 && op->count)
348  hrtimer_start(&op->timer,
349  ktime_add(ktime_get(), op->kt_ival1),
351  else if (op->kt_ival2.tv64)
352  hrtimer_start(&op->timer,
353  ktime_add(ktime_get(), op->kt_ival2),
355 }
356 
357 static void bcm_tx_timeout_tsklet(unsigned long data)
358 {
359  struct bcm_op *op = (struct bcm_op *)data;
360  struct bcm_msg_head msg_head;
361 
362  if (op->kt_ival1.tv64 && (op->count > 0)) {
363 
364  op->count--;
365  if (!op->count && (op->flags & TX_COUNTEVT)) {
366 
367  /* create notification to user */
368  msg_head.opcode = TX_EXPIRED;
369  msg_head.flags = op->flags;
370  msg_head.count = op->count;
371  msg_head.ival1 = op->ival1;
372  msg_head.ival2 = op->ival2;
373  msg_head.can_id = op->can_id;
374  msg_head.nframes = 0;
375 
376  bcm_send_to_user(op, &msg_head, NULL, 0);
377  }
378  bcm_can_tx(op);
379 
380  } else if (op->kt_ival2.tv64)
381  bcm_can_tx(op);
382 
383  bcm_tx_start_timer(op);
384 }
385 
386 /*
387  * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions
388  */
389 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
390 {
391  struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
392 
393  tasklet_schedule(&op->tsklet);
394 
395  return HRTIMER_NORESTART;
396 }
397 
398 /*
399  * bcm_rx_changed - create a RX_CHANGED notification due to changed content
400  */
401 static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
402 {
403  struct bcm_msg_head head;
404 
405  /* update statistics */
406  op->frames_filtered++;
407 
408  /* prevent statistics overflow */
409  if (op->frames_filtered > ULONG_MAX/100)
410  op->frames_filtered = op->frames_abs = 0;
411 
412  /* this element is not throttled anymore */
413  data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV);
414 
415  head.opcode = RX_CHANGED;
416  head.flags = op->flags;
417  head.count = op->count;
418  head.ival1 = op->ival1;
419  head.ival2 = op->ival2;
420  head.can_id = op->can_id;
421  head.nframes = 1;
422 
423  bcm_send_to_user(op, &head, data, 1);
424 }
425 
426 /*
427  * bcm_rx_update_and_send - process a detected relevant receive content change
428  * 1. update the last received data
429  * 2. send a notification to the user (if possible)
430  */
431 static void bcm_rx_update_and_send(struct bcm_op *op,
432  struct can_frame *lastdata,
433  const struct can_frame *rxdata)
434 {
435  memcpy(lastdata, rxdata, CFSIZ);
436 
437  /* mark as used and throttled by default */
438  lastdata->can_dlc |= (RX_RECV|RX_THR);
439 
440  /* throtteling mode inactive ? */
441  if (!op->kt_ival2.tv64) {
442  /* send RX_CHANGED to the user immediately */
443  bcm_rx_changed(op, lastdata);
444  return;
445  }
446 
447  /* with active throttling timer we are just done here */
448  if (hrtimer_active(&op->thrtimer))
449  return;
450 
451  /* first receiption with enabled throttling mode */
452  if (!op->kt_lastmsg.tv64)
453  goto rx_changed_settime;
454 
455  /* got a second frame inside a potential throttle period? */
456  if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
457  ktime_to_us(op->kt_ival2)) {
458  /* do not send the saved data - only start throttle timer */
459  hrtimer_start(&op->thrtimer,
460  ktime_add(op->kt_lastmsg, op->kt_ival2),
462  return;
463  }
464 
465  /* the gap was that big, that throttling was not needed here */
466 rx_changed_settime:
467  bcm_rx_changed(op, lastdata);
468  op->kt_lastmsg = ktime_get();
469 }
470 
471 /*
472  * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
473  * received data stored in op->last_frames[]
474  */
475 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
476  const struct can_frame *rxdata)
477 {
478  /*
479  * no one uses the MSBs of can_dlc for comparation,
480  * so we use it here to detect the first time of reception
481  */
482 
483  if (!(op->last_frames[index].can_dlc & RX_RECV)) {
484  /* received data for the first time => send update to user */
485  bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
486  return;
487  }
488 
489  /* do a real check in can_frame data section */
490 
491  if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) !=
492  (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) {
493  bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
494  return;
495  }
496 
497  if (op->flags & RX_CHECK_DLC) {
498  /* do a real check in can_frame dlc */
499  if (rxdata->can_dlc != (op->last_frames[index].can_dlc &
500  BCM_CAN_DLC_MASK)) {
501  bcm_rx_update_and_send(op, &op->last_frames[index],
502  rxdata);
503  return;
504  }
505  }
506 }
507 
508 /*
509  * bcm_rx_starttimer - enable timeout monitoring for CAN frame receiption
510  */
511 static void bcm_rx_starttimer(struct bcm_op *op)
512 {
513  if (op->flags & RX_NO_AUTOTIMER)
514  return;
515 
516  if (op->kt_ival1.tv64)
517  hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
518 }
519 
520 static void bcm_rx_timeout_tsklet(unsigned long data)
521 {
522  struct bcm_op *op = (struct bcm_op *)data;
523  struct bcm_msg_head msg_head;
524 
525  /* create notification to user */
526  msg_head.opcode = RX_TIMEOUT;
527  msg_head.flags = op->flags;
528  msg_head.count = op->count;
529  msg_head.ival1 = op->ival1;
530  msg_head.ival2 = op->ival2;
531  msg_head.can_id = op->can_id;
532  msg_head.nframes = 0;
533 
534  bcm_send_to_user(op, &msg_head, NULL, 0);
535 }
536 
537 /*
538  * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
539  */
540 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
541 {
542  struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
543 
544  /* schedule before NET_RX_SOFTIRQ */
545  tasklet_hi_schedule(&op->tsklet);
546 
547  /* no restart of the timer is done here! */
548 
549  /* if user wants to be informed, when cyclic CAN-Messages come back */
550  if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
551  /* clear received can_frames to indicate 'nothing received' */
552  memset(op->last_frames, 0, op->nframes * CFSIZ);
553  }
554 
555  return HRTIMER_NORESTART;
556 }
557 
558 /*
559  * bcm_rx_do_flush - helper for bcm_rx_thr_flush
560  */
561 static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
562  unsigned int index)
563 {
564  if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
565  if (update)
566  bcm_rx_changed(op, &op->last_frames[index]);
567  return 1;
568  }
569  return 0;
570 }
571 
572 /*
573  * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
574  *
575  * update == 0 : just check if throttled data is available (any irq context)
576  * update == 1 : check and send throttled data to userspace (soft_irq context)
577  */
578 static int bcm_rx_thr_flush(struct bcm_op *op, int update)
579 {
580  int updated = 0;
581 
582  if (op->nframes > 1) {
583  unsigned int i;
584 
585  /* for MUX filter we start at index 1 */
586  for (i = 1; i < op->nframes; i++)
587  updated += bcm_rx_do_flush(op, update, i);
588 
589  } else {
590  /* for RX_FILTER_ID and simple filter */
591  updated += bcm_rx_do_flush(op, update, 0);
592  }
593 
594  return updated;
595 }
596 
597 static void bcm_rx_thr_tsklet(unsigned long data)
598 {
599  struct bcm_op *op = (struct bcm_op *)data;
600 
601  /* push the changed data to the userspace */
602  bcm_rx_thr_flush(op, 1);
603 }
604 
605 /*
606  * bcm_rx_thr_handler - the time for blocked content updates is over now:
607  * Check for throttled data and send it to the userspace
608  */
609 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
610 {
611  struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
612 
613  tasklet_schedule(&op->thrtsklet);
614 
615  if (bcm_rx_thr_flush(op, 0)) {
616  hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
617  return HRTIMER_RESTART;
618  } else {
619  /* rearm throttle handling */
620  op->kt_lastmsg = ktime_set(0, 0);
621  return HRTIMER_NORESTART;
622  }
623 }
624 
625 /*
626  * bcm_rx_handler - handle a CAN frame receiption
627  */
628 static void bcm_rx_handler(struct sk_buff *skb, void *data)
629 {
630  struct bcm_op *op = (struct bcm_op *)data;
631  const struct can_frame *rxframe = (struct can_frame *)skb->data;
632  unsigned int i;
633 
634  /* disable timeout */
635  hrtimer_cancel(&op->timer);
636 
637  if (op->can_id != rxframe->can_id)
638  return;
639 
640  /* save rx timestamp */
641  op->rx_stamp = skb->tstamp;
642  /* save originator for recvfrom() */
643  op->rx_ifindex = skb->dev->ifindex;
644  /* update statistics */
645  op->frames_abs++;
646 
647  if (op->flags & RX_RTR_FRAME) {
648  /* send reply for RTR-request (placed in op->frames[0]) */
649  bcm_can_tx(op);
650  return;
651  }
652 
653  if (op->flags & RX_FILTER_ID) {
654  /* the easiest case */
655  bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
656  goto rx_starttimer;
657  }
658 
659  if (op->nframes == 1) {
660  /* simple compare with index 0 */
661  bcm_rx_cmp_to_index(op, 0, rxframe);
662  goto rx_starttimer;
663  }
664 
665  if (op->nframes > 1) {
666  /*
667  * multiplex compare
668  *
669  * find the first multiplex mask that fits.
670  * Remark: The MUX-mask is stored in index 0
671  */
672 
673  for (i = 1; i < op->nframes; i++) {
674  if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) ==
675  (GET_U64(&op->frames[0]) &
676  GET_U64(&op->frames[i]))) {
677  bcm_rx_cmp_to_index(op, i, rxframe);
678  break;
679  }
680  }
681  }
682 
683 rx_starttimer:
684  bcm_rx_starttimer(op);
685 }
686 
687 /*
688  * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
689  */
690 static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
691  int ifindex)
692 {
693  struct bcm_op *op;
694 
695  list_for_each_entry(op, ops, list) {
696  if ((op->can_id == can_id) && (op->ifindex == ifindex))
697  return op;
698  }
699 
700  return NULL;
701 }
702 
703 static void bcm_remove_op(struct bcm_op *op)
704 {
705  hrtimer_cancel(&op->timer);
706  hrtimer_cancel(&op->thrtimer);
707 
708  if (op->tsklet.func)
709  tasklet_kill(&op->tsklet);
710 
711  if (op->thrtsklet.func)
712  tasklet_kill(&op->thrtsklet);
713 
714  if ((op->frames) && (op->frames != &op->sframe))
715  kfree(op->frames);
716 
717  if ((op->last_frames) && (op->last_frames != &op->last_sframe))
718  kfree(op->last_frames);
719 
720  kfree(op);
721 }
722 
723 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
724 {
725  if (op->rx_reg_dev == dev) {
726  can_rx_unregister(dev, op->can_id, REGMASK(op->can_id),
727  bcm_rx_handler, op);
728 
729  /* mark as removed subscription */
730  op->rx_reg_dev = NULL;
731  } else
732  printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
733  "mismatch %p %p\n", op->rx_reg_dev, dev);
734 }
735 
736 /*
737  * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
738  */
739 static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex)
740 {
741  struct bcm_op *op, *n;
742 
743  list_for_each_entry_safe(op, n, ops, list) {
744  if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
745 
746  /*
747  * Don't care if we're bound or not (due to netdev
748  * problems) can_rx_unregister() is always a save
749  * thing to do here.
750  */
751  if (op->ifindex) {
752  /*
753  * Only remove subscriptions that had not
754  * been removed due to NETDEV_UNREGISTER
755  * in bcm_notifier()
756  */
757  if (op->rx_reg_dev) {
758  struct net_device *dev;
759 
760  dev = dev_get_by_index(&init_net,
761  op->ifindex);
762  if (dev) {
763  bcm_rx_unreg(dev, op);
764  dev_put(dev);
765  }
766  }
767  } else
769  REGMASK(op->can_id),
770  bcm_rx_handler, op);
771 
772  list_del(&op->list);
773  bcm_remove_op(op);
774  return 1; /* done */
775  }
776  }
777 
778  return 0; /* not found */
779 }
780 
781 /*
782  * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
783  */
784 static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex)
785 {
786  struct bcm_op *op, *n;
787 
788  list_for_each_entry_safe(op, n, ops, list) {
789  if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
790  list_del(&op->list);
791  bcm_remove_op(op);
792  return 1; /* done */
793  }
794  }
795 
796  return 0; /* not found */
797 }
798 
799 /*
800  * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
801  */
802 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
803  int ifindex)
804 {
805  struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex);
806 
807  if (!op)
808  return -EINVAL;
809 
810  /* put current values into msg_head */
811  msg_head->flags = op->flags;
812  msg_head->count = op->count;
813  msg_head->ival1 = op->ival1;
814  msg_head->ival2 = op->ival2;
815  msg_head->nframes = op->nframes;
816 
817  bcm_send_to_user(op, msg_head, op->frames, 0);
818 
819  return MHSIZ;
820 }
821 
822 /*
823  * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
824  */
825 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
826  int ifindex, struct sock *sk)
827 {
828  struct bcm_sock *bo = bcm_sk(sk);
829  struct bcm_op *op;
830  unsigned int i;
831  int err;
832 
833  /* we need a real device to send frames */
834  if (!ifindex)
835  return -ENODEV;
836 
837  /* check nframes boundaries - we need at least one can_frame */
838  if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
839  return -EINVAL;
840 
841  /* check the given can_id */
842  op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex);
843 
844  if (op) {
845  /* update existing BCM operation */
846 
847  /*
848  * Do we need more space for the can_frames than currently
849  * allocated? -> This is a _really_ unusual use-case and
850  * therefore (complexity / locking) it is not supported.
851  */
852  if (msg_head->nframes > op->nframes)
853  return -E2BIG;
854 
855  /* update can_frames content */
856  for (i = 0; i < msg_head->nframes; i++) {
857  err = memcpy_fromiovec((u8 *)&op->frames[i],
858  msg->msg_iov, CFSIZ);
859 
860  if (op->frames[i].can_dlc > 8)
861  err = -EINVAL;
862 
863  if (err < 0)
864  return err;
865 
866  if (msg_head->flags & TX_CP_CAN_ID) {
867  /* copy can_id into frame */
868  op->frames[i].can_id = msg_head->can_id;
869  }
870  }
871 
872  } else {
873  /* insert new BCM operation for the given can_id */
874 
875  op = kzalloc(OPSIZ, GFP_KERNEL);
876  if (!op)
877  return -ENOMEM;
878 
879  op->can_id = msg_head->can_id;
880 
881  /* create array for can_frames and copy the data */
882  if (msg_head->nframes > 1) {
883  op->frames = kmalloc(msg_head->nframes * CFSIZ,
884  GFP_KERNEL);
885  if (!op->frames) {
886  kfree(op);
887  return -ENOMEM;
888  }
889  } else
890  op->frames = &op->sframe;
891 
892  for (i = 0; i < msg_head->nframes; i++) {
893  err = memcpy_fromiovec((u8 *)&op->frames[i],
894  msg->msg_iov, CFSIZ);
895 
896  if (op->frames[i].can_dlc > 8)
897  err = -EINVAL;
898 
899  if (err < 0) {
900  if (op->frames != &op->sframe)
901  kfree(op->frames);
902  kfree(op);
903  return err;
904  }
905 
906  if (msg_head->flags & TX_CP_CAN_ID) {
907  /* copy can_id into frame */
908  op->frames[i].can_id = msg_head->can_id;
909  }
910  }
911 
912  /* tx_ops never compare with previous received messages */
913  op->last_frames = NULL;
914 
915  /* bcm_can_tx / bcm_tx_timeout_handler needs this */
916  op->sk = sk;
917  op->ifindex = ifindex;
918 
919  /* initialize uninitialized (kzalloc) structure */
921  op->timer.function = bcm_tx_timeout_handler;
922 
923  /* initialize tasklet for tx countevent notification */
924  tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
925  (unsigned long) op);
926 
927  /* currently unused in tx_ops */
929 
930  /* add this bcm_op to the list of the tx_ops */
931  list_add(&op->list, &bo->tx_ops);
932 
933  } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
934 
935  if (op->nframes != msg_head->nframes) {
936  op->nframes = msg_head->nframes;
937  /* start multiple frame transmission with index 0 */
938  op->currframe = 0;
939  }
940 
941  /* check flags */
942 
943  op->flags = msg_head->flags;
944 
945  if (op->flags & TX_RESET_MULTI_IDX) {
946  /* start multiple frame transmission with index 0 */
947  op->currframe = 0;
948  }
949 
950  if (op->flags & SETTIMER) {
951  /* set timer values */
952  op->count = msg_head->count;
953  op->ival1 = msg_head->ival1;
954  op->ival2 = msg_head->ival2;
955  op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
956  op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
957 
958  /* disable an active timer due to zero values? */
959  if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64)
960  hrtimer_cancel(&op->timer);
961  }
962 
963  if (op->flags & STARTTIMER) {
964  hrtimer_cancel(&op->timer);
965  /* spec: send can_frame when starting timer */
966  op->flags |= TX_ANNOUNCE;
967  }
968 
969  if (op->flags & TX_ANNOUNCE) {
970  bcm_can_tx(op);
971  if (op->count)
972  op->count--;
973  }
974 
975  if (op->flags & STARTTIMER)
976  bcm_tx_start_timer(op);
977 
978  return msg_head->nframes * CFSIZ + MHSIZ;
979 }
980 
981 /*
982  * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
983  */
984 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
985  int ifindex, struct sock *sk)
986 {
987  struct bcm_sock *bo = bcm_sk(sk);
988  struct bcm_op *op;
989  int do_rx_register;
990  int err = 0;
991 
992  if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
993  /* be robust against wrong usage ... */
994  msg_head->flags |= RX_FILTER_ID;
995  /* ignore trailing garbage */
996  msg_head->nframes = 0;
997  }
998 
999  /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
1000  if (msg_head->nframes > MAX_NFRAMES + 1)
1001  return -EINVAL;
1002 
1003  if ((msg_head->flags & RX_RTR_FRAME) &&
1004  ((msg_head->nframes != 1) ||
1005  (!(msg_head->can_id & CAN_RTR_FLAG))))
1006  return -EINVAL;
1007 
1008  /* check the given can_id */
1009  op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex);
1010  if (op) {
1011  /* update existing BCM operation */
1012 
1013  /*
1014  * Do we need more space for the can_frames than currently
1015  * allocated? -> This is a _really_ unusual use-case and
1016  * therefore (complexity / locking) it is not supported.
1017  */
1018  if (msg_head->nframes > op->nframes)
1019  return -E2BIG;
1020 
1021  if (msg_head->nframes) {
1022  /* update can_frames content */
1023  err = memcpy_fromiovec((u8 *)op->frames,
1024  msg->msg_iov,
1025  msg_head->nframes * CFSIZ);
1026  if (err < 0)
1027  return err;
1028 
1029  /* clear last_frames to indicate 'nothing received' */
1030  memset(op->last_frames, 0, msg_head->nframes * CFSIZ);
1031  }
1032 
1033  op->nframes = msg_head->nframes;
1034 
1035  /* Only an update -> do not call can_rx_register() */
1036  do_rx_register = 0;
1037 
1038  } else {
1039  /* insert new BCM operation for the given can_id */
1040  op = kzalloc(OPSIZ, GFP_KERNEL);
1041  if (!op)
1042  return -ENOMEM;
1043 
1044  op->can_id = msg_head->can_id;
1045  op->nframes = msg_head->nframes;
1046 
1047  if (msg_head->nframes > 1) {
1048  /* create array for can_frames and copy the data */
1049  op->frames = kmalloc(msg_head->nframes * CFSIZ,
1050  GFP_KERNEL);
1051  if (!op->frames) {
1052  kfree(op);
1053  return -ENOMEM;
1054  }
1055 
1056  /* create and init array for received can_frames */
1057  op->last_frames = kzalloc(msg_head->nframes * CFSIZ,
1058  GFP_KERNEL);
1059  if (!op->last_frames) {
1060  kfree(op->frames);
1061  kfree(op);
1062  return -ENOMEM;
1063  }
1064 
1065  } else {
1066  op->frames = &op->sframe;
1067  op->last_frames = &op->last_sframe;
1068  }
1069 
1070  if (msg_head->nframes) {
1071  err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov,
1072  msg_head->nframes * CFSIZ);
1073  if (err < 0) {
1074  if (op->frames != &op->sframe)
1075  kfree(op->frames);
1076  if (op->last_frames != &op->last_sframe)
1077  kfree(op->last_frames);
1078  kfree(op);
1079  return err;
1080  }
1081  }
1082 
1083  /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1084  op->sk = sk;
1085  op->ifindex = ifindex;
1086 
1087  /* ifindex for timeout events w/o previous frame reception */
1088  op->rx_ifindex = ifindex;
1089 
1090  /* initialize uninitialized (kzalloc) structure */
1092  op->timer.function = bcm_rx_timeout_handler;
1093 
1094  /* initialize tasklet for rx timeout notification */
1095  tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
1096  (unsigned long) op);
1097 
1099  op->thrtimer.function = bcm_rx_thr_handler;
1100 
1101  /* initialize tasklet for rx throttle handling */
1102  tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
1103  (unsigned long) op);
1104 
1105  /* add this bcm_op to the list of the rx_ops */
1106  list_add(&op->list, &bo->rx_ops);
1107 
1108  /* call can_rx_register() */
1109  do_rx_register = 1;
1110 
1111  } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1112 
1113  /* check flags */
1114  op->flags = msg_head->flags;
1115 
1116  if (op->flags & RX_RTR_FRAME) {
1117 
1118  /* no timers in RTR-mode */
1119  hrtimer_cancel(&op->thrtimer);
1120  hrtimer_cancel(&op->timer);
1121 
1122  /*
1123  * funny feature in RX(!)_SETUP only for RTR-mode:
1124  * copy can_id into frame BUT without RTR-flag to
1125  * prevent a full-load-loopback-test ... ;-]
1126  */
1127  if ((op->flags & TX_CP_CAN_ID) ||
1128  (op->frames[0].can_id == op->can_id))
1129  op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG;
1130 
1131  } else {
1132  if (op->flags & SETTIMER) {
1133 
1134  /* set timer value */
1135  op->ival1 = msg_head->ival1;
1136  op->ival2 = msg_head->ival2;
1137  op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
1138  op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
1139 
1140  /* disable an active timer due to zero value? */
1141  if (!op->kt_ival1.tv64)
1142  hrtimer_cancel(&op->timer);
1143 
1144  /*
1145  * In any case cancel the throttle timer, flush
1146  * potentially blocked msgs and reset throttle handling
1147  */
1148  op->kt_lastmsg = ktime_set(0, 0);
1149  hrtimer_cancel(&op->thrtimer);
1150  bcm_rx_thr_flush(op, 1);
1151  }
1152 
1153  if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
1154  hrtimer_start(&op->timer, op->kt_ival1,
1156  }
1157 
1158  /* now we can register for can_ids, if we added a new bcm_op */
1159  if (do_rx_register) {
1160  if (ifindex) {
1161  struct net_device *dev;
1162 
1163  dev = dev_get_by_index(&init_net, ifindex);
1164  if (dev) {
1165  err = can_rx_register(dev, op->can_id,
1166  REGMASK(op->can_id),
1167  bcm_rx_handler, op,
1168  "bcm");
1169 
1170  op->rx_reg_dev = dev;
1171  dev_put(dev);
1172  }
1173 
1174  } else
1175  err = can_rx_register(NULL, op->can_id,
1176  REGMASK(op->can_id),
1177  bcm_rx_handler, op, "bcm");
1178  if (err) {
1179  /* this bcm rx op is broken -> remove it */
1180  list_del(&op->list);
1181  bcm_remove_op(op);
1182  return err;
1183  }
1184  }
1185 
1186  return msg_head->nframes * CFSIZ + MHSIZ;
1187 }
1188 
1189 /*
1190  * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1191  */
1192 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1193 {
1194  struct sk_buff *skb;
1195  struct net_device *dev;
1196  int err;
1197 
1198  /* we need a real device to send frames */
1199  if (!ifindex)
1200  return -ENODEV;
1201 
1202  skb = alloc_skb(CFSIZ, GFP_KERNEL);
1203 
1204  if (!skb)
1205  return -ENOMEM;
1206 
1207  err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ);
1208  if (err < 0) {
1209  kfree_skb(skb);
1210  return err;
1211  }
1212 
1213  dev = dev_get_by_index(&init_net, ifindex);
1214  if (!dev) {
1215  kfree_skb(skb);
1216  return -ENODEV;
1217  }
1218 
1219  skb->dev = dev;
1220  skb->sk = sk;
1221  err = can_send(skb, 1); /* send with loopback */
1222  dev_put(dev);
1223 
1224  if (err)
1225  return err;
1226 
1227  return CFSIZ + MHSIZ;
1228 }
1229 
1230 /*
1231  * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1232  */
1233 static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
1234  struct msghdr *msg, size_t size)
1235 {
1236  struct sock *sk = sock->sk;
1237  struct bcm_sock *bo = bcm_sk(sk);
1238  int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1239  struct bcm_msg_head msg_head;
1240  int ret; /* read bytes or error codes as return value */
1241 
1242  if (!bo->bound)
1243  return -ENOTCONN;
1244 
1245  /* check for valid message length from userspace */
1246  if (size < MHSIZ || (size - MHSIZ) % CFSIZ)
1247  return -EINVAL;
1248 
1249  /* check for alternative ifindex for this bcm_op */
1250 
1251  if (!ifindex && msg->msg_name) {
1252  /* no bound device as default => check msg_name */
1253  struct sockaddr_can *addr =
1254  (struct sockaddr_can *)msg->msg_name;
1255 
1256  if (msg->msg_namelen < sizeof(*addr))
1257  return -EINVAL;
1258 
1259  if (addr->can_family != AF_CAN)
1260  return -EINVAL;
1261 
1262  /* ifindex from sendto() */
1263  ifindex = addr->can_ifindex;
1264 
1265  if (ifindex) {
1266  struct net_device *dev;
1267 
1268  dev = dev_get_by_index(&init_net, ifindex);
1269  if (!dev)
1270  return -ENODEV;
1271 
1272  if (dev->type != ARPHRD_CAN) {
1273  dev_put(dev);
1274  return -ENODEV;
1275  }
1276 
1277  dev_put(dev);
1278  }
1279  }
1280 
1281  /* read message head information */
1282 
1283  ret = memcpy_fromiovec((u8 *)&msg_head, msg->msg_iov, MHSIZ);
1284  if (ret < 0)
1285  return ret;
1286 
1287  lock_sock(sk);
1288 
1289  switch (msg_head.opcode) {
1290 
1291  case TX_SETUP:
1292  ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1293  break;
1294 
1295  case RX_SETUP:
1296  ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1297  break;
1298 
1299  case TX_DELETE:
1300  if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex))
1301  ret = MHSIZ;
1302  else
1303  ret = -EINVAL;
1304  break;
1305 
1306  case RX_DELETE:
1307  if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex))
1308  ret = MHSIZ;
1309  else
1310  ret = -EINVAL;
1311  break;
1312 
1313  case TX_READ:
1314  /* reuse msg_head for the reply to TX_READ */
1315  msg_head.opcode = TX_STATUS;
1316  ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1317  break;
1318 
1319  case RX_READ:
1320  /* reuse msg_head for the reply to RX_READ */
1321  msg_head.opcode = RX_STATUS;
1322  ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1323  break;
1324 
1325  case TX_SEND:
1326  /* we need exactly one can_frame behind the msg head */
1327  if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ))
1328  ret = -EINVAL;
1329  else
1330  ret = bcm_tx_send(msg, ifindex, sk);
1331  break;
1332 
1333  default:
1334  ret = -EINVAL;
1335  break;
1336  }
1337 
1338  release_sock(sk);
1339 
1340  return ret;
1341 }
1342 
1343 /*
1344  * notification handler for netdevice status changes
1345  */
1346 static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1347  void *data)
1348 {
1349  struct net_device *dev = (struct net_device *)data;
1350  struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
1351  struct sock *sk = &bo->sk;
1352  struct bcm_op *op;
1353  int notify_enodev = 0;
1354 
1355  if (!net_eq(dev_net(dev), &init_net))
1356  return NOTIFY_DONE;
1357 
1358  if (dev->type != ARPHRD_CAN)
1359  return NOTIFY_DONE;
1360 
1361  switch (msg) {
1362 
1363  case NETDEV_UNREGISTER:
1364  lock_sock(sk);
1365 
1366  /* remove device specific receive entries */
1367  list_for_each_entry(op, &bo->rx_ops, list)
1368  if (op->rx_reg_dev == dev)
1369  bcm_rx_unreg(dev, op);
1370 
1371  /* remove device reference, if this is our bound device */
1372  if (bo->bound && bo->ifindex == dev->ifindex) {
1373  bo->bound = 0;
1374  bo->ifindex = 0;
1375  notify_enodev = 1;
1376  }
1377 
1378  release_sock(sk);
1379 
1380  if (notify_enodev) {
1381  sk->sk_err = ENODEV;
1382  if (!sock_flag(sk, SOCK_DEAD))
1383  sk->sk_error_report(sk);
1384  }
1385  break;
1386 
1387  case NETDEV_DOWN:
1388  if (bo->bound && bo->ifindex == dev->ifindex) {
1389  sk->sk_err = ENETDOWN;
1390  if (!sock_flag(sk, SOCK_DEAD))
1391  sk->sk_error_report(sk);
1392  }
1393  }
1394 
1395  return NOTIFY_DONE;
1396 }
1397 
1398 /*
1399  * initial settings for all BCM sockets to be set at socket creation time
1400  */
1401 static int bcm_init(struct sock *sk)
1402 {
1403  struct bcm_sock *bo = bcm_sk(sk);
1404 
1405  bo->bound = 0;
1406  bo->ifindex = 0;
1407  bo->dropped_usr_msgs = 0;
1408  bo->bcm_proc_read = NULL;
1409 
1410  INIT_LIST_HEAD(&bo->tx_ops);
1411  INIT_LIST_HEAD(&bo->rx_ops);
1412 
1413  /* set notifier */
1414  bo->notifier.notifier_call = bcm_notifier;
1415 
1417 
1418  return 0;
1419 }
1420 
1421 /*
1422  * standard socket functions
1423  */
1424 static int bcm_release(struct socket *sock)
1425 {
1426  struct sock *sk = sock->sk;
1427  struct bcm_sock *bo;
1428  struct bcm_op *op, *next;
1429 
1430  if (sk == NULL)
1431  return 0;
1432 
1433  bo = bcm_sk(sk);
1434 
1435  /* remove bcm_ops, timer, rx_unregister(), etc. */
1436 
1438 
1439  lock_sock(sk);
1440 
1441  list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1442  bcm_remove_op(op);
1443 
1444  list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1445  /*
1446  * Don't care if we're bound or not (due to netdev problems)
1447  * can_rx_unregister() is always a save thing to do here.
1448  */
1449  if (op->ifindex) {
1450  /*
1451  * Only remove subscriptions that had not
1452  * been removed due to NETDEV_UNREGISTER
1453  * in bcm_notifier()
1454  */
1455  if (op->rx_reg_dev) {
1456  struct net_device *dev;
1457 
1458  dev = dev_get_by_index(&init_net, op->ifindex);
1459  if (dev) {
1460  bcm_rx_unreg(dev, op);
1461  dev_put(dev);
1462  }
1463  }
1464  } else
1465  can_rx_unregister(NULL, op->can_id,
1466  REGMASK(op->can_id),
1467  bcm_rx_handler, op);
1468 
1469  bcm_remove_op(op);
1470  }
1471 
1472  /* remove procfs entry */
1473  if (proc_dir && bo->bcm_proc_read)
1474  remove_proc_entry(bo->procname, proc_dir);
1475 
1476  /* remove device reference */
1477  if (bo->bound) {
1478  bo->bound = 0;
1479  bo->ifindex = 0;
1480  }
1481 
1482  sock_orphan(sk);
1483  sock->sk = NULL;
1484 
1485  release_sock(sk);
1486  sock_put(sk);
1487 
1488  return 0;
1489 }
1490 
1491 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1492  int flags)
1493 {
1494  struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1495  struct sock *sk = sock->sk;
1496  struct bcm_sock *bo = bcm_sk(sk);
1497 
1498  if (len < sizeof(*addr))
1499  return -EINVAL;
1500 
1501  if (bo->bound)
1502  return -EISCONN;
1503 
1504  /* bind a device to this socket */
1505  if (addr->can_ifindex) {
1506  struct net_device *dev;
1507 
1508  dev = dev_get_by_index(&init_net, addr->can_ifindex);
1509  if (!dev)
1510  return -ENODEV;
1511 
1512  if (dev->type != ARPHRD_CAN) {
1513  dev_put(dev);
1514  return -ENODEV;
1515  }
1516 
1517  bo->ifindex = dev->ifindex;
1518  dev_put(dev);
1519 
1520  } else {
1521  /* no interface reference for ifindex = 0 ('any' CAN device) */
1522  bo->ifindex = 0;
1523  }
1524 
1525  bo->bound = 1;
1526 
1527  if (proc_dir) {
1528  /* unique socket address as filename */
1529  sprintf(bo->procname, "%lu", sock_i_ino(sk));
1530  bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
1531  proc_dir,
1532  &bcm_proc_fops, sk);
1533  }
1534 
1535  return 0;
1536 }
1537 
1538 static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
1539  struct msghdr *msg, size_t size, int flags)
1540 {
1541  struct sock *sk = sock->sk;
1542  struct sk_buff *skb;
1543  int error = 0;
1544  int noblock;
1545  int err;
1546 
1547  noblock = flags & MSG_DONTWAIT;
1548  flags &= ~MSG_DONTWAIT;
1549  skb = skb_recv_datagram(sk, flags, noblock, &error);
1550  if (!skb)
1551  return error;
1552 
1553  if (skb->len < size)
1554  size = skb->len;
1555 
1556  err = memcpy_toiovec(msg->msg_iov, skb->data, size);
1557  if (err < 0) {
1558  skb_free_datagram(sk, skb);
1559  return err;
1560  }
1561 
1562  sock_recv_ts_and_drops(msg, sk, skb);
1563 
1564  if (msg->msg_name) {
1565  msg->msg_namelen = sizeof(struct sockaddr_can);
1566  memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1567  }
1568 
1569  skb_free_datagram(sk, skb);
1570 
1571  return size;
1572 }
1573 
1574 static const struct proto_ops bcm_ops = {
1575  .family = PF_CAN,
1576  .release = bcm_release,
1577  .bind = sock_no_bind,
1578  .connect = bcm_connect,
1579  .socketpair = sock_no_socketpair,
1580  .accept = sock_no_accept,
1581  .getname = sock_no_getname,
1582  .poll = datagram_poll,
1583  .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */
1584  .listen = sock_no_listen,
1585  .shutdown = sock_no_shutdown,
1586  .setsockopt = sock_no_setsockopt,
1587  .getsockopt = sock_no_getsockopt,
1588  .sendmsg = bcm_sendmsg,
1589  .recvmsg = bcm_recvmsg,
1590  .mmap = sock_no_mmap,
1591  .sendpage = sock_no_sendpage,
1592 };
1593 
1594 static struct proto bcm_proto __read_mostly = {
1595  .name = "CAN_BCM",
1596  .owner = THIS_MODULE,
1597  .obj_size = sizeof(struct bcm_sock),
1598  .init = bcm_init,
1599 };
1600 
1601 static const struct can_proto bcm_can_proto = {
1602  .type = SOCK_DGRAM,
1603  .protocol = CAN_BCM,
1604  .ops = &bcm_ops,
1605  .prot = &bcm_proto,
1606 };
1607 
1608 static int __init bcm_module_init(void)
1609 {
1610  int err;
1611 
1612  printk(banner);
1613 
1614  err = can_proto_register(&bcm_can_proto);
1615  if (err < 0) {
1616  printk(KERN_ERR "can: registration of bcm protocol failed\n");
1617  return err;
1618  }
1619 
1620  /* create /proc/net/can-bcm directory */
1621  proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
1622  return 0;
1623 }
1624 
1625 static void __exit bcm_module_exit(void)
1626 {
1627  can_proto_unregister(&bcm_can_proto);
1628 
1629  if (proc_dir)
1630  proc_net_remove(&init_net, "can-bcm");
1631 }
1632 
1633 module_init(bcm_module_init);
1634 module_exit(bcm_module_exit);