Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
caif_serial.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) ST-Ericsson AB 2010
3  * Author: Sjur Brendeland / [email protected]
4  * License terms: GNU General Public License (GPL) version 2
5  */
6 
7 #include <linux/hardirq.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/types.h>
12 #include <linux/skbuff.h>
13 #include <linux/netdevice.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/tty.h>
16 #include <linux/file.h>
17 #include <linux/if_arp.h>
18 #include <net/caif/caif_device.h>
19 #include <net/caif/cfcnfg.h>
20 #include <linux/err.h>
21 #include <linux/debugfs.h>
22 
23 MODULE_LICENSE("GPL");
24 MODULE_AUTHOR("Sjur Brendeland<[email protected]>");
25 MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
26 MODULE_LICENSE("GPL");
28 
29 #define SEND_QUEUE_LOW 10
30 #define SEND_QUEUE_HIGH 100
31 #define CAIF_SENDING 1 /* Bit 1 = 0x02*/
32 #define CAIF_FLOW_OFF_SENT 4 /* Bit 4 = 0x10 */
33 #define MAX_WRITE_CHUNK 4096
34 #define ON 1
35 #define OFF 0
36 #define CAIF_MAX_MTU 4096
37 
38 /*This list is protected by the rtnl lock. */
39 static LIST_HEAD(ser_list);
40 
41 static bool ser_loop;
42 module_param(ser_loop, bool, S_IRUGO);
43 MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
44 
45 static bool ser_use_stx = true;
46 module_param(ser_use_stx, bool, S_IRUGO);
47 MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
48 
49 static bool ser_use_fcs = true;
50 
51 module_param(ser_use_fcs, bool, S_IRUGO);
52 MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
53 
54 static int ser_write_chunk = MAX_WRITE_CHUNK;
55 module_param(ser_write_chunk, int, S_IRUGO);
56 
57 MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
58 
59 static struct dentry *debugfsdir;
60 
61 static int caif_net_open(struct net_device *dev);
62 static int caif_net_close(struct net_device *dev);
63 
64 struct ser_device {
66  struct list_head node;
67  struct net_device *dev;
69  struct tty_struct *tty;
70  bool tx_started;
71  unsigned long state;
72  char *tty_name;
73 #ifdef CONFIG_DEBUG_FS
74  struct dentry *debugfs_tty_dir;
75  struct debugfs_blob_wrapper tx_blob;
76  struct debugfs_blob_wrapper rx_blob;
77  u8 rx_data[128];
78  u8 tx_data[128];
79  u8 tty_status;
80 
81 #endif
82 };
83 
84 static void caifdev_setup(struct net_device *dev);
85 static void ldisc_tx_wakeup(struct tty_struct *tty);
86 #ifdef CONFIG_DEBUG_FS
87 static inline void update_tty_status(struct ser_device *ser)
88 {
89  ser->tty_status =
90  ser->tty->stopped << 5 |
91  ser->tty->hw_stopped << 4 |
92  ser->tty->flow_stopped << 3 |
93  ser->tty->packet << 2 |
94  ser->tty->low_latency << 1 |
95  ser->tty->warned;
96 }
97 static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
98 {
99  ser->debugfs_tty_dir =
100  debugfs_create_dir(tty->name, debugfsdir);
101  if (!IS_ERR(ser->debugfs_tty_dir)) {
102  debugfs_create_blob("last_tx_msg", S_IRUSR,
103  ser->debugfs_tty_dir,
104  &ser->tx_blob);
105 
106  debugfs_create_blob("last_rx_msg", S_IRUSR,
107  ser->debugfs_tty_dir,
108  &ser->rx_blob);
109 
110  debugfs_create_x32("ser_state", S_IRUSR,
111  ser->debugfs_tty_dir,
112  (u32 *)&ser->state);
113 
114  debugfs_create_x8("tty_status", S_IRUSR,
115  ser->debugfs_tty_dir,
116  &ser->tty_status);
117 
118  }
119  ser->tx_blob.data = ser->tx_data;
120  ser->tx_blob.size = 0;
121  ser->rx_blob.data = ser->rx_data;
122  ser->rx_blob.size = 0;
123 }
124 
125 static inline void debugfs_deinit(struct ser_device *ser)
126 {
127  debugfs_remove_recursive(ser->debugfs_tty_dir);
128 }
129 
130 static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
131 {
132  if (size > sizeof(ser->rx_data))
133  size = sizeof(ser->rx_data);
134  memcpy(ser->rx_data, data, size);
135  ser->rx_blob.data = ser->rx_data;
136  ser->rx_blob.size = size;
137 }
138 
139 static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
140 {
141  if (size > sizeof(ser->tx_data))
142  size = sizeof(ser->tx_data);
143  memcpy(ser->tx_data, data, size);
144  ser->tx_blob.data = ser->tx_data;
145  ser->tx_blob.size = size;
146 }
147 #else
148 static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
149 {
150 }
151 
152 static inline void debugfs_deinit(struct ser_device *ser)
153 {
154 }
155 
156 static inline void update_tty_status(struct ser_device *ser)
157 {
158 }
159 
160 static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
161 {
162 }
163 
164 static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
165 {
166 }
167 
168 #endif
169 
170 static void ldisc_receive(struct tty_struct *tty, const u8 *data,
171  char *flags, int count)
172 {
173  struct sk_buff *skb = NULL;
174  struct ser_device *ser;
175  int ret;
176  u8 *p;
177 
178  ser = tty->disc_data;
179 
180  /*
181  * NOTE: flags may contain information about break or overrun.
182  * This is not yet handled.
183  */
184 
185 
186  /*
187  * Workaround for garbage at start of transmission,
188  * only enable if STX handling is not enabled.
189  */
190  if (!ser->common.use_stx && !ser->tx_started) {
191  dev_info(&ser->dev->dev,
192  "Bytes received before initial transmission -"
193  "bytes discarded.\n");
194  return;
195  }
196 
197  BUG_ON(ser->dev == NULL);
198 
199  /* Get a suitable caif packet and copy in data. */
200  skb = netdev_alloc_skb(ser->dev, count+1);
201  if (skb == NULL)
202  return;
203  p = skb_put(skb, count);
204  memcpy(p, data, count);
205 
206  skb->protocol = htons(ETH_P_CAIF);
207  skb_reset_mac_header(skb);
208  skb->dev = ser->dev;
209  debugfs_rx(ser, data, count);
210  /* Push received packet up the stack. */
211  ret = netif_rx_ni(skb);
212  if (!ret) {
213  ser->dev->stats.rx_packets++;
214  ser->dev->stats.rx_bytes += count;
215  } else
216  ++ser->dev->stats.rx_dropped;
217  update_tty_status(ser);
218 }
219 
220 static int handle_tx(struct ser_device *ser)
221 {
222  struct tty_struct *tty;
223  struct sk_buff *skb;
224  int tty_wr, len, room;
225 
226  tty = ser->tty;
227  ser->tx_started = true;
228 
229  /* Enter critical section */
230  if (test_and_set_bit(CAIF_SENDING, &ser->state))
231  return 0;
232 
233  /* skb_peek is safe because handle_tx is called after skb_queue_tail */
234  while ((skb = skb_peek(&ser->head)) != NULL) {
235 
236  /* Make sure you don't write too much */
237  len = skb->len;
238  room = tty_write_room(tty);
239  if (!room)
240  break;
241  if (room > ser_write_chunk)
242  room = ser_write_chunk;
243  if (len > room)
244  len = room;
245 
246  /* Write to tty or loopback */
247  if (!ser_loop) {
248  tty_wr = tty->ops->write(tty, skb->data, len);
249  update_tty_status(ser);
250  } else {
251  tty_wr = len;
252  ldisc_receive(tty, skb->data, NULL, len);
253  }
254  ser->dev->stats.tx_packets++;
255  ser->dev->stats.tx_bytes += tty_wr;
256 
257  /* Error on TTY ?! */
258  if (tty_wr < 0)
259  goto error;
260  /* Reduce buffer written, and discard if empty */
261  skb_pull(skb, tty_wr);
262  if (skb->len == 0) {
263  struct sk_buff *tmp = skb_dequeue(&ser->head);
264  WARN_ON(tmp != skb);
265  if (in_interrupt())
266  dev_kfree_skb_irq(skb);
267  else
268  kfree_skb(skb);
269  }
270  }
271  /* Send flow off if queue is empty */
272  if (ser->head.qlen <= SEND_QUEUE_LOW &&
274  ser->common.flowctrl != NULL)
275  ser->common.flowctrl(ser->dev, ON);
276  clear_bit(CAIF_SENDING, &ser->state);
277  return 0;
278 error:
279  clear_bit(CAIF_SENDING, &ser->state);
280  return tty_wr;
281 }
282 
283 static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
284 {
285  struct ser_device *ser;
286 
287  BUG_ON(dev == NULL);
288  ser = netdev_priv(dev);
289 
290  /* Send flow off once, on high water mark */
291  if (ser->head.qlen > SEND_QUEUE_HIGH &&
293  ser->common.flowctrl != NULL)
294 
295  ser->common.flowctrl(ser->dev, OFF);
296 
297  skb_queue_tail(&ser->head, skb);
298  return handle_tx(ser);
299 }
300 
301 
302 static void ldisc_tx_wakeup(struct tty_struct *tty)
303 {
304  struct ser_device *ser;
305 
306  ser = tty->disc_data;
307  BUG_ON(ser == NULL);
308  WARN_ON(ser->tty != tty);
309  handle_tx(ser);
310 }
311 
312 
313 static int ldisc_open(struct tty_struct *tty)
314 {
315  struct ser_device *ser;
316  struct net_device *dev;
317  char name[64];
318  int result;
319 
320  /* No write no play */
321  if (tty->ops->write == NULL)
322  return -EOPNOTSUPP;
324  return -EPERM;
325 
326  sprintf(name, "cf%s", tty->name);
327  dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
328  if (!dev)
329  return -ENOMEM;
330 
331  ser = netdev_priv(dev);
332  ser->tty = tty_kref_get(tty);
333  ser->dev = dev;
334  debugfs_init(ser, tty);
336  tty->disc_data = ser;
338  rtnl_lock();
339  result = register_netdevice(dev);
340  if (result) {
341  rtnl_unlock();
342  free_netdev(dev);
343  return -ENODEV;
344  }
345 
346  list_add(&ser->node, &ser_list);
347  rtnl_unlock();
348  netif_stop_queue(dev);
349  update_tty_status(ser);
350  return 0;
351 }
352 
353 static void ldisc_close(struct tty_struct *tty)
354 {
355  struct ser_device *ser = tty->disc_data;
356  /* Remove may be called inside or outside of rtnl_lock */
357  int islocked = rtnl_is_locked();
358 
359  if (!islocked)
360  rtnl_lock();
361  /* device is freed automagically by net-sysfs */
362  dev_close(ser->dev);
363  unregister_netdevice(ser->dev);
364  list_del(&ser->node);
365  debugfs_deinit(ser);
366  tty_kref_put(ser->tty);
367  if (!islocked)
368  rtnl_unlock();
369 }
370 
371 /* The line discipline structure. */
372 static struct tty_ldisc_ops caif_ldisc = {
373  .owner = THIS_MODULE,
374  .magic = TTY_LDISC_MAGIC,
375  .name = "n_caif",
376  .open = ldisc_open,
377  .close = ldisc_close,
378  .receive_buf = ldisc_receive,
379  .write_wakeup = ldisc_tx_wakeup
380 };
381 
382 static int register_ldisc(void)
383 {
384  int result;
385 
386  result = tty_register_ldisc(N_CAIF, &caif_ldisc);
387  if (result < 0) {
388  pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
389  result);
390  return result;
391  }
392  return result;
393 }
394 static const struct net_device_ops netdev_ops = {
395  .ndo_open = caif_net_open,
396  .ndo_stop = caif_net_close,
397  .ndo_start_xmit = caif_xmit
398 };
399 
400 static void caifdev_setup(struct net_device *dev)
401 {
402  struct ser_device *serdev = netdev_priv(dev);
403 
404  dev->features = 0;
405  dev->netdev_ops = &netdev_ops;
406  dev->type = ARPHRD_CAIF;
408  dev->mtu = CAIF_MAX_MTU;
409  dev->tx_queue_len = 0;
410  dev->destructor = free_netdev;
411  skb_queue_head_init(&serdev->head);
412  serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
413  serdev->common.use_frag = true;
414  serdev->common.use_stx = ser_use_stx;
415  serdev->common.use_fcs = ser_use_fcs;
416  serdev->dev = dev;
417 }
418 
419 
420 static int caif_net_open(struct net_device *dev)
421 {
422  netif_wake_queue(dev);
423  return 0;
424 }
425 
426 static int caif_net_close(struct net_device *dev)
427 {
428  netif_stop_queue(dev);
429  return 0;
430 }
431 
432 static int __init caif_ser_init(void)
433 {
434  int ret;
435 
436  ret = register_ldisc();
437  debugfsdir = debugfs_create_dir("caif_serial", NULL);
438  return ret;
439 }
440 
441 static void __exit caif_ser_exit(void)
442 {
443  struct ser_device *ser = NULL;
444  struct list_head *node;
445  struct list_head *_tmp;
446 
447  list_for_each_safe(node, _tmp, &ser_list) {
448  ser = list_entry(node, struct ser_device, node);
449  dev_close(ser->dev);
450  unregister_netdevice(ser->dev);
451  list_del(node);
452  }
454  debugfs_remove_recursive(debugfsdir);
455 }
456 
457 module_init(caif_ser_init);
458 module_exit(caif_ser_exit);