Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
slip.c
Go to the documentation of this file.
1 /*
2  * slip.c This module implements the SLIP protocol for kernel-based
3  * devices like TTY. It interfaces between a raw TTY, and the
4  * kernel's INET protocol layers.
5  *
6  * Version: @(#)slip.c 0.8.3 12/24/94
7  *
8  * Authors: Laurence Culhane, <[email protected]>
9  * Fred N. van Kempen, <[email protected]>
10  *
11  * Fixes:
12  * Alan Cox : Sanity checks and avoid tx overruns.
13  * Has a new sl->mtu field.
14  * Alan Cox : Found cause of overrun. ifconfig sl0
15  * mtu upwards. Driver now spots this
16  * and grows/shrinks its buffers(hack!).
17  * Memory leak if you run out of memory
18  * setting up a slip driver fixed.
19  * Matt Dillon : Printable slip (borrowed from NET2E)
20  * Pauline Middelink : Slip driver fixes.
21  * Alan Cox : Honours the old SL_COMPRESSED flag
22  * Alan Cox : KISS AX.25 and AXUI IP support
23  * Michael Riepe : Automatic CSLIP recognition added
24  * Charles Hedrick : CSLIP header length problem fix.
25  * Alan Cox : Corrected non-IP cases of the above.
26  * Alan Cox : Now uses hardware type as per FvK.
27  * Alan Cox : Default to 192.168.0.0 (RFC 1597)
28  * A.N.Kuznetsov : dev_tint() recursion fix.
29  * Dmitry Gorodchanin : SLIP memory leaks
30  * Dmitry Gorodchanin : Code cleanup. Reduce tty driver
31  * buffering from 4096 to 256 bytes.
32  * Improving SLIP response time.
33  * CONFIG_SLIP_MODE_SLIP6.
34  * ifconfig sl? up & down now works
35  * correctly.
36  * Modularization.
37  * Alan Cox : Oops - fix AX.25 buffer lengths
38  * Dmitry Gorodchanin : Even more cleanups. Preserve CSLIP
39  * statistics. Include CSLIP code only
40  * if it really needed.
41  * Alan Cox : Free slhc buffers in the right place.
42  * Alan Cox : Allow for digipeated IP over AX.25
43  * Matti Aarnio : Dynamic SLIP devices, with ideas taken
44  * from Jim Freeman's <[email protected]>
45  * dynamic PPP devices. We do NOT kfree()
46  * device entries, just reg./unreg. them
47  * as they are needed. We kfree() them
48  * at module cleanup.
49  * With MODULE-loading ``insmod'', user
50  * can issue parameter: slip_maxdev=1024
51  * (Or how much he/she wants.. Default
52  * is 256)
53  * Stanislav Voronyi : Slip line checking, with ideas taken
54  * from multislip BSDI driver which was
55  * written by Igor Chechik, RELCOM Corp.
56  * Only algorithms have been ported to
57  * Linux SLIP driver.
58  * Vitaly E. Lavrov : Sane behaviour on tty hangup.
59  * Alexey Kuznetsov : Cleanup interfaces to tty & netdevice
60  * modules.
61  */
62 
63 #define SL_CHECK_TRANSMIT
64 #include <linux/module.h>
65 #include <linux/moduleparam.h>
66 
67 #include <asm/uaccess.h>
68 #include <linux/bitops.h>
69 #include <linux/sched.h>
70 #include <linux/string.h>
71 #include <linux/mm.h>
72 #include <linux/interrupt.h>
73 #include <linux/in.h>
74 #include <linux/tty.h>
75 #include <linux/errno.h>
76 #include <linux/netdevice.h>
77 #include <linux/etherdevice.h>
78 #include <linux/skbuff.h>
79 #include <linux/rtnetlink.h>
80 #include <linux/if_arp.h>
81 #include <linux/if_slip.h>
82 #include <linux/compat.h>
83 #include <linux/delay.h>
84 #include <linux/init.h>
85 #include <linux/slab.h>
86 #include "slip.h"
87 #ifdef CONFIG_INET
88 #include <linux/ip.h>
89 #include <linux/tcp.h>
90 #include <net/slhc_vj.h>
91 #endif
92 
93 #define SLIP_VERSION "0.8.4-NET3.019-NEWTTY"
94 
95 static struct net_device **slip_devs;
96 
97 static int slip_maxdev = SL_NRUNIT;
98 module_param(slip_maxdev, int, 0);
99 MODULE_PARM_DESC(slip_maxdev, "Maximum number of slip devices");
100 
101 static int slip_esc(unsigned char *p, unsigned char *d, int len);
102 static void slip_unesc(struct slip *sl, unsigned char c);
103 #ifdef CONFIG_SLIP_MODE_SLIP6
104 static int slip_esc6(unsigned char *p, unsigned char *d, int len);
105 static void slip_unesc6(struct slip *sl, unsigned char c);
106 #endif
107 #ifdef CONFIG_SLIP_SMART
108 static void sl_keepalive(unsigned long sls);
109 static void sl_outfill(unsigned long sls);
110 static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
111 #endif
112 
113 /********************************
114 * Buffer administration routines:
115 * sl_alloc_bufs()
116 * sl_free_bufs()
117 * sl_realloc_bufs()
118 *
119 * NOTE: sl_realloc_bufs != sl_free_bufs + sl_alloc_bufs, because
120 * sl_realloc_bufs provides strong atomicity and reallocation
121 * on actively running device.
122 *********************************/
123 
124 /*
125  Allocate channel buffers.
126  */
127 
128 static int sl_alloc_bufs(struct slip *sl, int mtu)
129 {
130  int err = -ENOBUFS;
131  unsigned long len;
132  char *rbuff = NULL;
133  char *xbuff = NULL;
134 #ifdef SL_INCLUDE_CSLIP
135  char *cbuff = NULL;
136  struct slcompress *slcomp = NULL;
137 #endif
138 
139  /*
140  * Allocate the SLIP frame buffers:
141  *
142  * rbuff Receive buffer.
143  * xbuff Transmit buffer.
144  * cbuff Temporary compression buffer.
145  */
146  len = mtu * 2;
147 
148  /*
149  * allow for arrival of larger UDP packets, even if we say not to
150  * also fixes a bug in which SunOS sends 512-byte packets even with
151  * an MSS of 128
152  */
153  if (len < 576 * 2)
154  len = 576 * 2;
155  rbuff = kmalloc(len + 4, GFP_KERNEL);
156  if (rbuff == NULL)
157  goto err_exit;
158  xbuff = kmalloc(len + 4, GFP_KERNEL);
159  if (xbuff == NULL)
160  goto err_exit;
161 #ifdef SL_INCLUDE_CSLIP
162  cbuff = kmalloc(len + 4, GFP_KERNEL);
163  if (cbuff == NULL)
164  goto err_exit;
165  slcomp = slhc_init(16, 16);
166  if (slcomp == NULL)
167  goto err_exit;
168 #endif
169  spin_lock_bh(&sl->lock);
170  if (sl->tty == NULL) {
171  spin_unlock_bh(&sl->lock);
172  err = -ENODEV;
173  goto err_exit;
174  }
175  sl->mtu = mtu;
176  sl->buffsize = len;
177  sl->rcount = 0;
178  sl->xleft = 0;
179  rbuff = xchg(&sl->rbuff, rbuff);
180  xbuff = xchg(&sl->xbuff, xbuff);
181 #ifdef SL_INCLUDE_CSLIP
182  cbuff = xchg(&sl->cbuff, cbuff);
183  slcomp = xchg(&sl->slcomp, slcomp);
184 #endif
185 #ifdef CONFIG_SLIP_MODE_SLIP6
186  sl->xdata = 0;
187  sl->xbits = 0;
188 #endif
189  spin_unlock_bh(&sl->lock);
190  err = 0;
191 
192  /* Cleanup */
193 err_exit:
194 #ifdef SL_INCLUDE_CSLIP
195  kfree(cbuff);
196  slhc_free(slcomp);
197 #endif
198  kfree(xbuff);
199  kfree(rbuff);
200  return err;
201 }
202 
203 /* Free a SLIP channel buffers. */
204 static void sl_free_bufs(struct slip *sl)
205 {
206  /* Free all SLIP frame buffers. */
207  kfree(xchg(&sl->rbuff, NULL));
208  kfree(xchg(&sl->xbuff, NULL));
209 #ifdef SL_INCLUDE_CSLIP
210  kfree(xchg(&sl->cbuff, NULL));
211  slhc_free(xchg(&sl->slcomp, NULL));
212 #endif
213 }
214 
215 /*
216  Reallocate slip channel buffers.
217  */
218 
219 static int sl_realloc_bufs(struct slip *sl, int mtu)
220 {
221  int err = 0;
222  struct net_device *dev = sl->dev;
223  unsigned char *xbuff, *rbuff;
224 #ifdef SL_INCLUDE_CSLIP
225  unsigned char *cbuff;
226 #endif
227  int len = mtu * 2;
228 
229 /*
230  * allow for arrival of larger UDP packets, even if we say not to
231  * also fixes a bug in which SunOS sends 512-byte packets even with
232  * an MSS of 128
233  */
234  if (len < 576 * 2)
235  len = 576 * 2;
236 
237  xbuff = kmalloc(len + 4, GFP_ATOMIC);
238  rbuff = kmalloc(len + 4, GFP_ATOMIC);
239 #ifdef SL_INCLUDE_CSLIP
240  cbuff = kmalloc(len + 4, GFP_ATOMIC);
241 #endif
242 
243 
244 #ifdef SL_INCLUDE_CSLIP
245  if (xbuff == NULL || rbuff == NULL || cbuff == NULL) {
246 #else
247  if (xbuff == NULL || rbuff == NULL) {
248 #endif
249  if (mtu > sl->mtu) {
250  printk(KERN_WARNING "%s: unable to grow slip buffers, MTU change cancelled.\n",
251  dev->name);
252  err = -ENOBUFS;
253  }
254  goto done;
255  }
256  spin_lock_bh(&sl->lock);
257 
258  err = -ENODEV;
259  if (sl->tty == NULL)
260  goto done_on_bh;
261 
262  xbuff = xchg(&sl->xbuff, xbuff);
263  rbuff = xchg(&sl->rbuff, rbuff);
264 #ifdef SL_INCLUDE_CSLIP
265  cbuff = xchg(&sl->cbuff, cbuff);
266 #endif
267  if (sl->xleft) {
268  if (sl->xleft <= len) {
269  memcpy(sl->xbuff, sl->xhead, sl->xleft);
270  } else {
271  sl->xleft = 0;
272  dev->stats.tx_dropped++;
273  }
274  }
275  sl->xhead = sl->xbuff;
276 
277  if (sl->rcount) {
278  if (sl->rcount <= len) {
279  memcpy(sl->rbuff, rbuff, sl->rcount);
280  } else {
281  sl->rcount = 0;
282  dev->stats.rx_over_errors++;
283  set_bit(SLF_ERROR, &sl->flags);
284  }
285  }
286  sl->mtu = mtu;
287  dev->mtu = mtu;
288  sl->buffsize = len;
289  err = 0;
290 
291 done_on_bh:
292  spin_unlock_bh(&sl->lock);
293 
294 done:
295  kfree(xbuff);
296  kfree(rbuff);
297 #ifdef SL_INCLUDE_CSLIP
298  kfree(cbuff);
299 #endif
300  return err;
301 }
302 
303 
304 /* Set the "sending" flag. This must be atomic hence the set_bit. */
305 static inline void sl_lock(struct slip *sl)
306 {
307  netif_stop_queue(sl->dev);
308 }
309 
310 
311 /* Clear the "sending" flag. This must be atomic, hence the ASM. */
312 static inline void sl_unlock(struct slip *sl)
313 {
314  netif_wake_queue(sl->dev);
315 }
316 
317 /* Send one completely decapsulated IP datagram to the IP layer. */
318 static void sl_bump(struct slip *sl)
319 {
320  struct net_device *dev = sl->dev;
321  struct sk_buff *skb;
322  int count;
323 
324  count = sl->rcount;
325 #ifdef SL_INCLUDE_CSLIP
326  if (sl->mode & (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) {
327  unsigned char c = sl->rbuff[0];
328  if (c & SL_TYPE_COMPRESSED_TCP) {
329  /* ignore compressed packets when CSLIP is off */
330  if (!(sl->mode & SL_MODE_CSLIP)) {
331  printk(KERN_WARNING "%s: compressed packet ignored\n", dev->name);
332  return;
333  }
334  /* make sure we've reserved enough space for uncompress
335  to use */
336  if (count + 80 > sl->buffsize) {
337  dev->stats.rx_over_errors++;
338  return;
339  }
340  count = slhc_uncompress(sl->slcomp, sl->rbuff, count);
341  if (count <= 0)
342  return;
343  } else if (c >= SL_TYPE_UNCOMPRESSED_TCP) {
344  if (!(sl->mode & SL_MODE_CSLIP)) {
345  /* turn on header compression */
346  sl->mode |= SL_MODE_CSLIP;
347  sl->mode &= ~SL_MODE_ADAPTIVE;
348  printk(KERN_INFO "%s: header compression turned on\n", dev->name);
349  }
350  sl->rbuff[0] &= 0x4f;
351  if (slhc_remember(sl->slcomp, sl->rbuff, count) <= 0)
352  return;
353  }
354  }
355 #endif /* SL_INCLUDE_CSLIP */
356 
357  dev->stats.rx_bytes += count;
358 
359  skb = dev_alloc_skb(count);
360  if (skb == NULL) {
361  printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
362  dev->stats.rx_dropped++;
363  return;
364  }
365  skb->dev = dev;
366  memcpy(skb_put(skb, count), sl->rbuff, count);
367  skb_reset_mac_header(skb);
368  skb->protocol = htons(ETH_P_IP);
369  netif_rx_ni(skb);
370  dev->stats.rx_packets++;
371 }
372 
373 /* Encapsulate one IP datagram and stuff into a TTY queue. */
374 static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
375 {
376  unsigned char *p;
377  int actual, count;
378 
379  if (len > sl->mtu) { /* Sigh, shouldn't occur BUT ... */
380  printk(KERN_WARNING "%s: truncating oversized transmit packet!\n", sl->dev->name);
381  sl->dev->stats.tx_dropped++;
382  sl_unlock(sl);
383  return;
384  }
385 
386  p = icp;
387 #ifdef SL_INCLUDE_CSLIP
388  if (sl->mode & SL_MODE_CSLIP)
389  len = slhc_compress(sl->slcomp, p, len, sl->cbuff, &p, 1);
390 #endif
391 #ifdef CONFIG_SLIP_MODE_SLIP6
392  if (sl->mode & SL_MODE_SLIP6)
393  count = slip_esc6(p, sl->xbuff, len);
394  else
395 #endif
396  count = slip_esc(p, sl->xbuff, len);
397 
398  /* Order of next two lines is *very* important.
399  * When we are sending a little amount of data,
400  * the transfer may be completed inside the ops->write()
401  * routine, because it's running with interrupts enabled.
402  * In this case we *never* got WRITE_WAKEUP event,
403  * if we did not request it before write operation.
404  * 14 Oct 1994 Dmitry Gorodchanin.
405  */
406  set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
407  actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
408 #ifdef SL_CHECK_TRANSMIT
409  sl->dev->trans_start = jiffies;
410 #endif
411  sl->xleft = count - actual;
412  sl->xhead = sl->xbuff + actual;
413 #ifdef CONFIG_SLIP_SMART
414  /* VSV */
415  clear_bit(SLF_OUTWAIT, &sl->flags); /* reset outfill flag */
416 #endif
417 }
418 
419 /*
420  * Called by the driver when there's room for more data. If we have
421  * more packets to send, we send them here.
422  */
423 static void slip_write_wakeup(struct tty_struct *tty)
424 {
425  int actual;
426  struct slip *sl = tty->disc_data;
427 
428  /* First make sure we're connected. */
429  if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
430  return;
431 
432  if (sl->xleft <= 0) {
433  /* Now serial buffer is almost free & we can start
434  * transmission of another packet */
435  sl->dev->stats.tx_packets++;
437  sl_unlock(sl);
438  return;
439  }
440 
441  actual = tty->ops->write(tty, sl->xhead, sl->xleft);
442  sl->xleft -= actual;
443  sl->xhead += actual;
444 }
445 
446 static void sl_tx_timeout(struct net_device *dev)
447 {
448  struct slip *sl = netdev_priv(dev);
449 
450  spin_lock(&sl->lock);
451 
452  if (netif_queue_stopped(dev)) {
453  if (!netif_running(dev))
454  goto out;
455 
456  /* May be we must check transmitter timeout here ?
457  * 14 Oct 1994 Dmitry Gorodchanin.
458  */
459 #ifdef SL_CHECK_TRANSMIT
460  if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ)) {
461  /* 20 sec timeout not reached */
462  goto out;
463  }
464  printk(KERN_WARNING "%s: transmit timed out, %s?\n",
465  dev->name,
466  (tty_chars_in_buffer(sl->tty) || sl->xleft) ?
467  "bad line quality" : "driver error");
468  sl->xleft = 0;
469  clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
470  sl_unlock(sl);
471 #endif
472  }
473 out:
474  spin_unlock(&sl->lock);
475 }
476 
477 
478 /* Encapsulate an IP datagram and kick it into a TTY queue. */
479 static netdev_tx_t
480 sl_xmit(struct sk_buff *skb, struct net_device *dev)
481 {
482  struct slip *sl = netdev_priv(dev);
483 
484  spin_lock(&sl->lock);
485  if (!netif_running(dev)) {
486  spin_unlock(&sl->lock);
487  printk(KERN_WARNING "%s: xmit call when iface is down\n", dev->name);
488  dev_kfree_skb(skb);
489  return NETDEV_TX_OK;
490  }
491  if (sl->tty == NULL) {
492  spin_unlock(&sl->lock);
493  dev_kfree_skb(skb);
494  return NETDEV_TX_OK;
495  }
496 
497  sl_lock(sl);
498  dev->stats.tx_bytes += skb->len;
499  sl_encaps(sl, skb->data, skb->len);
500  spin_unlock(&sl->lock);
501 
502  dev_kfree_skb(skb);
503  return NETDEV_TX_OK;
504 }
505 
506 
507 /******************************************
508  * Routines looking at netdevice side.
509  ******************************************/
510 
511 /* Netdevice UP -> DOWN routine */
512 
513 static int
514 sl_close(struct net_device *dev)
515 {
516  struct slip *sl = netdev_priv(dev);
517 
518  spin_lock_bh(&sl->lock);
519  if (sl->tty)
520  /* TTY discipline is running. */
521  clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
522  netif_stop_queue(dev);
523  sl->rcount = 0;
524  sl->xleft = 0;
525  spin_unlock_bh(&sl->lock);
526 
527  return 0;
528 }
529 
530 /* Netdevice DOWN -> UP routine */
531 
532 static int sl_open(struct net_device *dev)
533 {
534  struct slip *sl = netdev_priv(dev);
535 
536  if (sl->tty == NULL)
537  return -ENODEV;
538 
539  sl->flags &= (1 << SLF_INUSE);
540  netif_start_queue(dev);
541  return 0;
542 }
543 
544 /* Netdevice change MTU request */
545 
546 static int sl_change_mtu(struct net_device *dev, int new_mtu)
547 {
548  struct slip *sl = netdev_priv(dev);
549 
550  if (new_mtu < 68 || new_mtu > 65534)
551  return -EINVAL;
552 
553  if (new_mtu != dev->mtu)
554  return sl_realloc_bufs(sl, new_mtu);
555  return 0;
556 }
557 
558 /* Netdevice get statistics request */
559 
560 static struct rtnl_link_stats64 *
561 sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
562 {
563  struct net_device_stats *devstats = &dev->stats;
564 #ifdef SL_INCLUDE_CSLIP
565  struct slip *sl = netdev_priv(dev);
566  struct slcompress *comp = sl->slcomp;
567 #endif
568  stats->rx_packets = devstats->rx_packets;
569  stats->tx_packets = devstats->tx_packets;
570  stats->rx_bytes = devstats->rx_bytes;
571  stats->tx_bytes = devstats->tx_bytes;
572  stats->rx_dropped = devstats->rx_dropped;
573  stats->tx_dropped = devstats->tx_dropped;
574  stats->tx_errors = devstats->tx_errors;
575  stats->rx_errors = devstats->rx_errors;
576  stats->rx_over_errors = devstats->rx_over_errors;
577 
578 #ifdef SL_INCLUDE_CSLIP
579  if (comp) {
580  /* Generic compressed statistics */
581  stats->rx_compressed = comp->sls_i_compressed;
582  stats->tx_compressed = comp->sls_o_compressed;
583 
584  /* Are we really still needs this? */
585  stats->rx_fifo_errors += comp->sls_i_compressed;
586  stats->rx_dropped += comp->sls_i_tossed;
587  stats->tx_fifo_errors += comp->sls_o_compressed;
588  stats->collisions += comp->sls_o_misses;
589  }
590 #endif
591  return stats;
592 }
593 
594 /* Netdevice register callback */
595 
596 static int sl_init(struct net_device *dev)
597 {
598  struct slip *sl = netdev_priv(dev);
599 
600  /*
601  * Finish setting up the DEVICE info.
602  */
603 
604  dev->mtu = sl->mtu;
605  dev->type = ARPHRD_SLIP + sl->mode;
606 #ifdef SL_CHECK_TRANSMIT
607  dev->watchdog_timeo = 20*HZ;
608 #endif
609  return 0;
610 }
611 
612 
613 static void sl_uninit(struct net_device *dev)
614 {
615  struct slip *sl = netdev_priv(dev);
616 
617  sl_free_bufs(sl);
618 }
619 
620 /* Hook the destructor so we can free slip devices at the right point in time */
621 static void sl_free_netdev(struct net_device *dev)
622 {
623  int i = dev->base_addr;
624  free_netdev(dev);
625  slip_devs[i] = NULL;
626 }
627 
628 static const struct net_device_ops sl_netdev_ops = {
629  .ndo_init = sl_init,
630  .ndo_uninit = sl_uninit,
631  .ndo_open = sl_open,
632  .ndo_stop = sl_close,
633  .ndo_start_xmit = sl_xmit,
634  .ndo_get_stats64 = sl_get_stats64,
635  .ndo_change_mtu = sl_change_mtu,
636  .ndo_tx_timeout = sl_tx_timeout,
637 #ifdef CONFIG_SLIP_SMART
638  .ndo_do_ioctl = sl_ioctl,
639 #endif
640 };
641 
642 
643 static void sl_setup(struct net_device *dev)
644 {
645  dev->netdev_ops = &sl_netdev_ops;
646  dev->destructor = sl_free_netdev;
647 
648  dev->hard_header_len = 0;
649  dev->addr_len = 0;
650  dev->tx_queue_len = 10;
651 
652  /* New-style flags. */
654 }
655 
656 /******************************************
657  Routines looking at TTY side.
658  ******************************************/
659 
660 
661 /*
662  * Handle the 'receiver data ready' interrupt.
663  * This function is called by the 'tty_io' module in the kernel when
664  * a block of SLIP data has been received, which can now be decapsulated
665  * and sent on to some IP layer for further processing. This will not
666  * be re-entered while running but other ldisc functions may be called
667  * in parallel
668  */
669 
670 static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
671  char *fp, int count)
672 {
673  struct slip *sl = tty->disc_data;
674 
675  if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
676  return;
677 
678  /* Read the characters out of the buffer */
679  while (count--) {
680  if (fp && *fp++) {
681  if (!test_and_set_bit(SLF_ERROR, &sl->flags))
682  sl->dev->stats.rx_errors++;
683  cp++;
684  continue;
685  }
686 #ifdef CONFIG_SLIP_MODE_SLIP6
687  if (sl->mode & SL_MODE_SLIP6)
688  slip_unesc6(sl, *cp++);
689  else
690 #endif
691  slip_unesc(sl, *cp++);
692  }
693 }
694 
695 /************************************
696  * slip_open helper routines.
697  ************************************/
698 
699 /* Collect hanged up channels */
700 static void sl_sync(void)
701 {
702  int i;
703  struct net_device *dev;
704  struct slip *sl;
705 
706  for (i = 0; i < slip_maxdev; i++) {
707  dev = slip_devs[i];
708  if (dev == NULL)
709  break;
710 
711  sl = netdev_priv(dev);
712  if (sl->tty || sl->leased)
713  continue;
714  if (dev->flags & IFF_UP)
715  dev_close(dev);
716  }
717 }
718 
719 
720 /* Find a free SLIP channel, and link in this `tty' line. */
721 static struct slip *sl_alloc(dev_t line)
722 {
723  int i;
724  char name[IFNAMSIZ];
725  struct net_device *dev = NULL;
726  struct slip *sl;
727 
728  for (i = 0; i < slip_maxdev; i++) {
729  dev = slip_devs[i];
730  if (dev == NULL)
731  break;
732  }
733  /* Sorry, too many, all slots in use */
734  if (i >= slip_maxdev)
735  return NULL;
736 
737  sprintf(name, "sl%d", i);
738  dev = alloc_netdev(sizeof(*sl), name, sl_setup);
739  if (!dev)
740  return NULL;
741 
742  dev->base_addr = i;
743  sl = netdev_priv(dev);
744 
745  /* Initialize channel control data */
746  sl->magic = SLIP_MAGIC;
747  sl->dev = dev;
748  spin_lock_init(&sl->lock);
749  sl->mode = SL_MODE_DEFAULT;
750 #ifdef CONFIG_SLIP_SMART
751  /* initialize timer_list struct */
752  init_timer(&sl->keepalive_timer);
753  sl->keepalive_timer.data = (unsigned long)sl;
754  sl->keepalive_timer.function = sl_keepalive;
755  init_timer(&sl->outfill_timer);
756  sl->outfill_timer.data = (unsigned long)sl;
757  sl->outfill_timer.function = sl_outfill;
758 #endif
759  slip_devs[i] = dev;
760  return sl;
761 }
762 
763 /*
764  * Open the high-level part of the SLIP channel.
765  * This function is called by the TTY module when the
766  * SLIP line discipline is called for. Because we are
767  * sure the tty line exists, we only have to link it to
768  * a free SLIP channel...
769  *
770  * Called in process context serialized from other ldisc calls.
771  */
772 
773 static int slip_open(struct tty_struct *tty)
774 {
775  struct slip *sl;
776  int err;
777 
778  if (!capable(CAP_NET_ADMIN))
779  return -EPERM;
780 
781  if (tty->ops->write == NULL)
782  return -EOPNOTSUPP;
783 
784  /* RTnetlink lock is misused here to serialize concurrent
785  opens of slip channels. There are better ways, but it is
786  the simplest one.
787  */
788  rtnl_lock();
789 
790  /* Collect hanged up channels. */
791  sl_sync();
792 
793  sl = tty->disc_data;
794 
795  err = -EEXIST;
796  /* First make sure we're not already connected. */
797  if (sl && sl->magic == SLIP_MAGIC)
798  goto err_exit;
799 
800  /* OK. Find a free SLIP channel to use. */
801  err = -ENFILE;
802  sl = sl_alloc(tty_devnum(tty));
803  if (sl == NULL)
804  goto err_exit;
805 
806  sl->tty = tty;
807  tty->disc_data = sl;
808  sl->pid = current->pid;
809 
810  if (!test_bit(SLF_INUSE, &sl->flags)) {
811  /* Perform the low-level SLIP initialization. */
812  err = sl_alloc_bufs(sl, SL_MTU);
813  if (err)
814  goto err_free_chan;
815 
816  set_bit(SLF_INUSE, &sl->flags);
817 
818  err = register_netdevice(sl->dev);
819  if (err)
820  goto err_free_bufs;
821  }
822 
823 #ifdef CONFIG_SLIP_SMART
824  if (sl->keepalive) {
825  sl->keepalive_timer.expires = jiffies + sl->keepalive * HZ;
826  add_timer(&sl->keepalive_timer);
827  }
828  if (sl->outfill) {
829  sl->outfill_timer.expires = jiffies + sl->outfill * HZ;
830  add_timer(&sl->outfill_timer);
831  }
832 #endif
833 
834  /* Done. We have linked the TTY line to a channel. */
835  rtnl_unlock();
836  tty->receive_room = 65536; /* We don't flow control */
837 
838  /* TTY layer expects 0 on success */
839  return 0;
840 
841 err_free_bufs:
842  sl_free_bufs(sl);
843 
844 err_free_chan:
845  sl->tty = NULL;
846  tty->disc_data = NULL;
847  clear_bit(SLF_INUSE, &sl->flags);
848 
849 err_exit:
850  rtnl_unlock();
851 
852  /* Count references from TTY module */
853  return err;
854 }
855 
856 /*
857  * Close down a SLIP channel.
858  * This means flushing out any pending queues, and then returning. This
859  * call is serialized against other ldisc functions.
860  *
861  * We also use this method fo a hangup event
862  */
863 
864 static void slip_close(struct tty_struct *tty)
865 {
866  struct slip *sl = tty->disc_data;
867 
868  /* First make sure we're connected. */
869  if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
870  return;
871 
872  tty->disc_data = NULL;
873  sl->tty = NULL;
874 
875  /* VSV = very important to remove timers */
876 #ifdef CONFIG_SLIP_SMART
877  del_timer_sync(&sl->keepalive_timer);
878  del_timer_sync(&sl->outfill_timer);
879 #endif
880  /* Flush network side */
881  unregister_netdev(sl->dev);
882  /* This will complete via sl_free_netdev */
883 }
884 
885 static int slip_hangup(struct tty_struct *tty)
886 {
887  slip_close(tty);
888  return 0;
889 }
890  /************************************************************************
891  * STANDARD SLIP ENCAPSULATION *
892  ************************************************************************/
893 
894 static int slip_esc(unsigned char *s, unsigned char *d, int len)
895 {
896  unsigned char *ptr = d;
897  unsigned char c;
898 
899  /*
900  * Send an initial END character to flush out any
901  * data that may have accumulated in the receiver
902  * due to line noise.
903  */
904 
905  *ptr++ = END;
906 
907  /*
908  * For each byte in the packet, send the appropriate
909  * character sequence, according to the SLIP protocol.
910  */
911 
912  while (len-- > 0) {
913  switch (c = *s++) {
914  case END:
915  *ptr++ = ESC;
916  *ptr++ = ESC_END;
917  break;
918  case ESC:
919  *ptr++ = ESC;
920  *ptr++ = ESC_ESC;
921  break;
922  default:
923  *ptr++ = c;
924  break;
925  }
926  }
927  *ptr++ = END;
928  return ptr - d;
929 }
930 
931 static void slip_unesc(struct slip *sl, unsigned char s)
932 {
933 
934  switch (s) {
935  case END:
936 #ifdef CONFIG_SLIP_SMART
937  /* drop keeptest bit = VSV */
938  if (test_bit(SLF_KEEPTEST, &sl->flags))
940 #endif
941 
942  if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
943  (sl->rcount > 2))
944  sl_bump(sl);
945  clear_bit(SLF_ESCAPE, &sl->flags);
946  sl->rcount = 0;
947  return;
948 
949  case ESC:
950  set_bit(SLF_ESCAPE, &sl->flags);
951  return;
952  case ESC_ESC:
954  s = ESC;
955  break;
956  case ESC_END:
958  s = END;
959  break;
960  }
961  if (!test_bit(SLF_ERROR, &sl->flags)) {
962  if (sl->rcount < sl->buffsize) {
963  sl->rbuff[sl->rcount++] = s;
964  return;
965  }
966  sl->dev->stats.rx_over_errors++;
967  set_bit(SLF_ERROR, &sl->flags);
968  }
969 }
970 
971 
972 #ifdef CONFIG_SLIP_MODE_SLIP6
973 /************************************************************************
974  * 6 BIT SLIP ENCAPSULATION *
975  ************************************************************************/
976 
977 static int slip_esc6(unsigned char *s, unsigned char *d, int len)
978 {
979  unsigned char *ptr = d;
980  unsigned char c;
981  int i;
982  unsigned short v = 0;
983  short bits = 0;
984 
985  /*
986  * Send an initial END character to flush out any
987  * data that may have accumulated in the receiver
988  * due to line noise.
989  */
990 
991  *ptr++ = 0x70;
992 
993  /*
994  * Encode the packet into printable ascii characters
995  */
996 
997  for (i = 0; i < len; ++i) {
998  v = (v << 8) | s[i];
999  bits += 8;
1000  while (bits >= 6) {
1001  bits -= 6;
1002  c = 0x30 + ((v >> bits) & 0x3F);
1003  *ptr++ = c;
1004  }
1005  }
1006  if (bits) {
1007  c = 0x30 + ((v << (6 - bits)) & 0x3F);
1008  *ptr++ = c;
1009  }
1010  *ptr++ = 0x70;
1011  return ptr - d;
1012 }
1013 
1014 static void slip_unesc6(struct slip *sl, unsigned char s)
1015 {
1016  unsigned char c;
1017 
1018  if (s == 0x70) {
1019 #ifdef CONFIG_SLIP_SMART
1020  /* drop keeptest bit = VSV */
1021  if (test_bit(SLF_KEEPTEST, &sl->flags))
1022  clear_bit(SLF_KEEPTEST, &sl->flags);
1023 #endif
1024 
1025  if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
1026  (sl->rcount > 2))
1027  sl_bump(sl);
1028  sl->rcount = 0;
1029  sl->xbits = 0;
1030  sl->xdata = 0;
1031  } else if (s >= 0x30 && s < 0x70) {
1032  sl->xdata = (sl->xdata << 6) | ((s - 0x30) & 0x3F);
1033  sl->xbits += 6;
1034  if (sl->xbits >= 8) {
1035  sl->xbits -= 8;
1036  c = (unsigned char)(sl->xdata >> sl->xbits);
1037  if (!test_bit(SLF_ERROR, &sl->flags)) {
1038  if (sl->rcount < sl->buffsize) {
1039  sl->rbuff[sl->rcount++] = c;
1040  return;
1041  }
1042  sl->dev->stats.rx_over_errors++;
1043  set_bit(SLF_ERROR, &sl->flags);
1044  }
1045  }
1046  }
1047 }
1048 #endif /* CONFIG_SLIP_MODE_SLIP6 */
1049 
1050 /* Perform I/O control on an active SLIP channel. */
1051 static int slip_ioctl(struct tty_struct *tty, struct file *file,
1052  unsigned int cmd, unsigned long arg)
1053 {
1054  struct slip *sl = tty->disc_data;
1055  unsigned int tmp;
1056  int __user *p = (int __user *)arg;
1057 
1058  /* First make sure we're connected. */
1059  if (!sl || sl->magic != SLIP_MAGIC)
1060  return -EINVAL;
1061 
1062  switch (cmd) {
1063  case SIOCGIFNAME:
1064  tmp = strlen(sl->dev->name) + 1;
1065  if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
1066  return -EFAULT;
1067  return 0;
1068 
1069  case SIOCGIFENCAP:
1070  if (put_user(sl->mode, p))
1071  return -EFAULT;
1072  return 0;
1073 
1074  case SIOCSIFENCAP:
1075  if (get_user(tmp, p))
1076  return -EFAULT;
1077 #ifndef SL_INCLUDE_CSLIP
1078  if (tmp & (SL_MODE_CSLIP|SL_MODE_ADAPTIVE))
1079  return -EINVAL;
1080 #else
1081  if ((tmp & (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) ==
1083  /* return -EINVAL; */
1084  tmp &= ~SL_MODE_ADAPTIVE;
1085 #endif
1086 #ifndef CONFIG_SLIP_MODE_SLIP6
1087  if (tmp & SL_MODE_SLIP6)
1088  return -EINVAL;
1089 #endif
1090  sl->mode = tmp;
1091  sl->dev->type = ARPHRD_SLIP + sl->mode;
1092  return 0;
1093 
1094  case SIOCSIFHWADDR:
1095  return -EINVAL;
1096 
1097 #ifdef CONFIG_SLIP_SMART
1098  /* VSV changes start here */
1099  case SIOCSKEEPALIVE:
1100  if (get_user(tmp, p))
1101  return -EFAULT;
1102  if (tmp > 255) /* max for unchar */
1103  return -EINVAL;
1104 
1105  spin_lock_bh(&sl->lock);
1106  if (!sl->tty) {
1107  spin_unlock_bh(&sl->lock);
1108  return -ENODEV;
1109  }
1110  sl->keepalive = (u8)tmp;
1111  if (sl->keepalive != 0) {
1112  mod_timer(&sl->keepalive_timer,
1113  jiffies + sl->keepalive * HZ);
1114  set_bit(SLF_KEEPTEST, &sl->flags);
1115  } else
1116  del_timer(&sl->keepalive_timer);
1117  spin_unlock_bh(&sl->lock);
1118  return 0;
1119 
1120  case SIOCGKEEPALIVE:
1121  if (put_user(sl->keepalive, p))
1122  return -EFAULT;
1123  return 0;
1124 
1125  case SIOCSOUTFILL:
1126  if (get_user(tmp, p))
1127  return -EFAULT;
1128  if (tmp > 255) /* max for unchar */
1129  return -EINVAL;
1130  spin_lock_bh(&sl->lock);
1131  if (!sl->tty) {
1132  spin_unlock_bh(&sl->lock);
1133  return -ENODEV;
1134  }
1135  sl->outfill = (u8)tmp;
1136  if (sl->outfill != 0) {
1137  mod_timer(&sl->outfill_timer,
1138  jiffies + sl->outfill * HZ);
1139  set_bit(SLF_OUTWAIT, &sl->flags);
1140  } else
1141  del_timer(&sl->outfill_timer);
1142  spin_unlock_bh(&sl->lock);
1143  return 0;
1144 
1145  case SIOCGOUTFILL:
1146  if (put_user(sl->outfill, p))
1147  return -EFAULT;
1148  return 0;
1149  /* VSV changes end */
1150 #endif
1151  default:
1152  return tty_mode_ioctl(tty, file, cmd, arg);
1153  }
1154 }
1155 
1156 #ifdef CONFIG_COMPAT
1157 static long slip_compat_ioctl(struct tty_struct *tty, struct file *file,
1158  unsigned int cmd, unsigned long arg)
1159 {
1160  switch (cmd) {
1161  case SIOCGIFNAME:
1162  case SIOCGIFENCAP:
1163  case SIOCSIFENCAP:
1164  case SIOCSIFHWADDR:
1165  case SIOCSKEEPALIVE:
1166  case SIOCGKEEPALIVE:
1167  case SIOCSOUTFILL:
1168  case SIOCGOUTFILL:
1169  return slip_ioctl(tty, file, cmd,
1170  (unsigned long)compat_ptr(arg));
1171  }
1172 
1173  return -ENOIOCTLCMD;
1174 }
1175 #endif
1176 
1177 /* VSV changes start here */
1178 #ifdef CONFIG_SLIP_SMART
1179 /* function do_ioctl called from net/core/dev.c
1180  to allow get/set outfill/keepalive parameter
1181  by ifconfig */
1182 
1183 static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1184 {
1185  struct slip *sl = netdev_priv(dev);
1186  unsigned long *p = (unsigned long *)&rq->ifr_ifru;
1187 
1188  if (sl == NULL) /* Allocation failed ?? */
1189  return -ENODEV;
1190 
1191  spin_lock_bh(&sl->lock);
1192 
1193  if (!sl->tty) {
1194  spin_unlock_bh(&sl->lock);
1195  return -ENODEV;
1196  }
1197 
1198  switch (cmd) {
1199  case SIOCSKEEPALIVE:
1200  /* max for unchar */
1201  if ((unsigned)*p > 255) {
1202  spin_unlock_bh(&sl->lock);
1203  return -EINVAL;
1204  }
1205  sl->keepalive = (u8)*p;
1206  if (sl->keepalive != 0) {
1207  sl->keepalive_timer.expires =
1208  jiffies + sl->keepalive * HZ;
1209  mod_timer(&sl->keepalive_timer,
1210  jiffies + sl->keepalive * HZ);
1211  set_bit(SLF_KEEPTEST, &sl->flags);
1212  } else
1213  del_timer(&sl->keepalive_timer);
1214  break;
1215 
1216  case SIOCGKEEPALIVE:
1217  *p = sl->keepalive;
1218  break;
1219 
1220  case SIOCSOUTFILL:
1221  if ((unsigned)*p > 255) { /* max for unchar */
1222  spin_unlock_bh(&sl->lock);
1223  return -EINVAL;
1224  }
1225  sl->outfill = (u8)*p;
1226  if (sl->outfill != 0) {
1227  mod_timer(&sl->outfill_timer,
1228  jiffies + sl->outfill * HZ);
1229  set_bit(SLF_OUTWAIT, &sl->flags);
1230  } else
1231  del_timer(&sl->outfill_timer);
1232  break;
1233 
1234  case SIOCGOUTFILL:
1235  *p = sl->outfill;
1236  break;
1237 
1238  case SIOCSLEASE:
1239  /* Resolve race condition, when ioctl'ing hanged up
1240  and opened by another process device.
1241  */
1242  if (sl->tty != current->signal->tty &&
1243  sl->pid != current->pid) {
1244  spin_unlock_bh(&sl->lock);
1245  return -EPERM;
1246  }
1247  sl->leased = 0;
1248  if (*p)
1249  sl->leased = 1;
1250  break;
1251 
1252  case SIOCGLEASE:
1253  *p = sl->leased;
1254  }
1255  spin_unlock_bh(&sl->lock);
1256  return 0;
1257 }
1258 #endif
1259 /* VSV changes end */
1260 
1261 static struct tty_ldisc_ops sl_ldisc = {
1262  .owner = THIS_MODULE,
1263  .magic = TTY_LDISC_MAGIC,
1264  .name = "slip",
1265  .open = slip_open,
1266  .close = slip_close,
1267  .hangup = slip_hangup,
1268  .ioctl = slip_ioctl,
1269 #ifdef CONFIG_COMPAT
1270  .compat_ioctl = slip_compat_ioctl,
1271 #endif
1272  .receive_buf = slip_receive_buf,
1273  .write_wakeup = slip_write_wakeup,
1274 };
1275 
1276 static int __init slip_init(void)
1277 {
1278  int status;
1279 
1280  if (slip_maxdev < 4)
1281  slip_maxdev = 4; /* Sanity */
1282 
1283  printk(KERN_INFO "SLIP: version %s (dynamic channels, max=%d)"
1284 #ifdef CONFIG_SLIP_MODE_SLIP6
1285  " (6 bit encapsulation enabled)"
1286 #endif
1287  ".\n",
1288  SLIP_VERSION, slip_maxdev);
1289 #if defined(SL_INCLUDE_CSLIP)
1290  printk(KERN_INFO "CSLIP: code copyright 1989 Regents of the University of California.\n");
1291 #endif
1292 #ifdef CONFIG_SLIP_SMART
1293  printk(KERN_INFO "SLIP linefill/keepalive option.\n");
1294 #endif
1295 
1296  slip_devs = kzalloc(sizeof(struct net_device *)*slip_maxdev,
1297  GFP_KERNEL);
1298  if (!slip_devs)
1299  return -ENOMEM;
1300 
1301  /* Fill in our line protocol discipline, and register it */
1302  status = tty_register_ldisc(N_SLIP, &sl_ldisc);
1303  if (status != 0) {
1304  printk(KERN_ERR "SLIP: can't register line discipline (err = %d)\n", status);
1305  kfree(slip_devs);
1306  }
1307  return status;
1308 }
1309 
1310 static void __exit slip_exit(void)
1311 {
1312  int i;
1313  struct net_device *dev;
1314  struct slip *sl;
1315  unsigned long timeout = jiffies + HZ;
1316  int busy = 0;
1317 
1318  if (slip_devs == NULL)
1319  return;
1320 
1321  /* First of all: check for active disciplines and hangup them.
1322  */
1323  do {
1324  if (busy)
1325  msleep_interruptible(100);
1326 
1327  busy = 0;
1328  for (i = 0; i < slip_maxdev; i++) {
1329  dev = slip_devs[i];
1330  if (!dev)
1331  continue;
1332  sl = netdev_priv(dev);
1333  spin_lock_bh(&sl->lock);
1334  if (sl->tty) {
1335  busy++;
1336  tty_hangup(sl->tty);
1337  }
1338  spin_unlock_bh(&sl->lock);
1339  }
1340  } while (busy && time_before(jiffies, timeout));
1341 
1342  /* FIXME: hangup is async so we should wait when doing this second
1343  phase */
1344 
1345  for (i = 0; i < slip_maxdev; i++) {
1346  dev = slip_devs[i];
1347  if (!dev)
1348  continue;
1349  slip_devs[i] = NULL;
1350 
1351  sl = netdev_priv(dev);
1352  if (sl->tty) {
1353  printk(KERN_ERR "%s: tty discipline still running\n",
1354  dev->name);
1355  /* Intentionally leak the control block. */
1356  dev->destructor = NULL;
1357  }
1358 
1359  unregister_netdev(dev);
1360  }
1361 
1362  kfree(slip_devs);
1363  slip_devs = NULL;
1364 
1366  if (i != 0)
1367  printk(KERN_ERR "SLIP: can't unregister line discipline (err = %d)\n", i);
1368 }
1369 
1371 module_exit(slip_exit);
1372 
1373 #ifdef CONFIG_SLIP_SMART
1374 /*
1375  * This is start of the code for multislip style line checking
1376  * added by Stanislav Voronyi. All changes before marked VSV
1377  */
1378 
1379 static void sl_outfill(unsigned long sls)
1380 {
1381  struct slip *sl = (struct slip *)sls;
1382 
1383  spin_lock(&sl->lock);
1384 
1385  if (sl->tty == NULL)
1386  goto out;
1387 
1388  if (sl->outfill) {
1389  if (test_bit(SLF_OUTWAIT, &sl->flags)) {
1390  /* no packets were transmitted, do outfill */
1391 #ifdef CONFIG_SLIP_MODE_SLIP6
1392  unsigned char s = (sl->mode & SL_MODE_SLIP6)?0x70:END;
1393 #else
1394  unsigned char s = END;
1395 #endif
1396  /* put END into tty queue. Is it right ??? */
1397  if (!netif_queue_stopped(sl->dev)) {
1398  /* if device busy no outfill */
1399  sl->tty->ops->write(sl->tty, &s, 1);
1400  }
1401  } else
1402  set_bit(SLF_OUTWAIT, &sl->flags);
1403 
1404  mod_timer(&sl->outfill_timer, jiffies+sl->outfill*HZ);
1405  }
1406 out:
1407  spin_unlock(&sl->lock);
1408 }
1409 
1410 static void sl_keepalive(unsigned long sls)
1411 {
1412  struct slip *sl = (struct slip *)sls;
1413 
1414  spin_lock(&sl->lock);
1415 
1416  if (sl->tty == NULL)
1417  goto out;
1418 
1419  if (sl->keepalive) {
1420  if (test_bit(SLF_KEEPTEST, &sl->flags)) {
1421  /* keepalive still high :(, we must hangup */
1422  if (sl->outfill)
1423  /* outfill timer must be deleted too */
1424  (void)del_timer(&sl->outfill_timer);
1425  printk(KERN_DEBUG "%s: no packets received during keepalive timeout, hangup.\n", sl->dev->name);
1426  /* this must hangup tty & close slip */
1427  tty_hangup(sl->tty);
1428  /* I think we need not something else */
1429  goto out;
1430  } else
1431  set_bit(SLF_KEEPTEST, &sl->flags);
1432 
1433  mod_timer(&sl->keepalive_timer, jiffies+sl->keepalive*HZ);
1434  }
1435 out:
1436  spin_unlock(&sl->lock);
1437 }
1438 
1439 #endif
1440 MODULE_LICENSE("GPL");