Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ppp_synctty.c
Go to the documentation of this file.
1 /*
2  * PPP synchronous tty channel driver for Linux.
3  *
4  * This is a ppp channel driver that can be used with tty device drivers
5  * that are frame oriented, such as synchronous HDLC devices.
6  *
7  * Complete PPP frames without encoding/decoding are exchanged between
8  * the channel driver and the device driver.
9  *
10  * The async map IOCTL codes are implemented to keep the user mode
11  * applications happy if they call them. Synchronous PPP does not use
12  * the async maps.
13  *
14  * Copyright 1999 Paul Mackerras.
15  *
16  * Also touched by the grubby hands of Paul Fulghum [email protected]
17  *
18  * This program is free software; you can redistribute it and/or
19  * modify it under the terms of the GNU General Public License
20  * as published by the Free Software Foundation; either version
21  * 2 of the License, or (at your option) any later version.
22  *
23  * This driver provides the encapsulation and framing for sending
24  * and receiving PPP frames over sync serial lines. It relies on
25  * the generic PPP layer to give it frames to send and to process
26  * received frames. It implements the PPP line discipline.
27  *
28  * Part of the code in this driver was inspired by the old async-only
29  * PPP driver, written by Michael Callahan and Al Longyear, and
30  * subsequently hacked by Paul Mackerras.
31  *
32  * ==FILEVERSION 20040616==
33  */
34 
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/skbuff.h>
38 #include <linux/tty.h>
39 #include <linux/netdevice.h>
40 #include <linux/poll.h>
41 #include <linux/ppp_defs.h>
42 #include <linux/ppp-ioctl.h>
43 #include <linux/ppp_channel.h>
44 #include <linux/spinlock.h>
45 #include <linux/completion.h>
46 #include <linux/init.h>
47 #include <linux/interrupt.h>
48 #include <linux/slab.h>
49 #include <asm/unaligned.h>
50 #include <asm/uaccess.h>
51 
52 #define PPP_VERSION "2.4.2"
53 
54 /* Structure for storing local state. */
55 struct syncppp {
56  struct tty_struct *tty;
57  unsigned int flags;
58  unsigned int rbits;
59  int mru;
62  unsigned long xmit_flags;
63  u32 xaccm[8];
65  unsigned int bytes_sent;
66  unsigned int bytes_rcvd;
67 
68  struct sk_buff *tpkt;
69  unsigned long last_xmit;
70 
72 
74 
77  struct ppp_channel chan; /* interface to generic ppp layer */
78 };
79 
80 /* Bit numbers in xmit_flags */
81 #define XMIT_WAKEUP 0
82 #define XMIT_FULL 1
83 
84 /* Bits in rbits */
85 #define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
86 
87 #define PPPSYNC_MAX_RQLEN 32 /* arbitrary */
88 
89 /*
90  * Prototypes.
91  */
92 static struct sk_buff* ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *);
93 static int ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb);
94 static int ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd,
95  unsigned long arg);
96 static void ppp_sync_process(unsigned long arg);
97 static int ppp_sync_push(struct syncppp *ap);
98 static void ppp_sync_flush_output(struct syncppp *ap);
99 static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
100  char *flags, int count);
101 
102 static const struct ppp_channel_ops sync_ops = {
103  .start_xmit = ppp_sync_send,
104  .ioctl = ppp_sync_ioctl,
105 };
106 
107 /*
108  * Utility procedures to print a buffer in hex/ascii
109  */
110 static void
111 ppp_print_hex (register __u8 * out, const __u8 * in, int count)
112 {
113  register __u8 next_ch;
114  static const char hex[] = "0123456789ABCDEF";
115 
116  while (count-- > 0) {
117  next_ch = *in++;
118  *out++ = hex[(next_ch >> 4) & 0x0F];
119  *out++ = hex[next_ch & 0x0F];
120  ++out;
121  }
122 }
123 
124 static void
125 ppp_print_char (register __u8 * out, const __u8 * in, int count)
126 {
127  register __u8 next_ch;
128 
129  while (count-- > 0) {
130  next_ch = *in++;
131 
132  if (next_ch < 0x20 || next_ch > 0x7e)
133  *out++ = '.';
134  else {
135  *out++ = next_ch;
136  if (next_ch == '%') /* printk/syslogd has a bug !! */
137  *out++ = '%';
138  }
139  }
140  *out = '\0';
141 }
142 
143 static void
144 ppp_print_buffer (const char *name, const __u8 *buf, int count)
145 {
146  __u8 line[44];
147 
148  if (name != NULL)
149  printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count);
150 
151  while (count > 8) {
152  memset (line, 32, 44);
153  ppp_print_hex (line, buf, 8);
154  ppp_print_char (&line[8 * 3], buf, 8);
155  printk(KERN_DEBUG "%s\n", line);
156  count -= 8;
157  buf += 8;
158  }
159 
160  if (count > 0) {
161  memset (line, 32, 44);
162  ppp_print_hex (line, buf, count);
163  ppp_print_char (&line[8 * 3], buf, count);
164  printk(KERN_DEBUG "%s\n", line);
165  }
166 }
167 
168 
169 /*
170  * Routines implementing the synchronous PPP line discipline.
171  */
172 
173 /*
174  * We have a potential race on dereferencing tty->disc_data,
175  * because the tty layer provides no locking at all - thus one
176  * cpu could be running ppp_synctty_receive while another
177  * calls ppp_synctty_close, which zeroes tty->disc_data and
178  * frees the memory that ppp_synctty_receive is using. The best
179  * way to fix this is to use a rwlock in the tty struct, but for now
180  * we use a single global rwlock for all ttys in ppp line discipline.
181  *
182  * FIXME: Fixed in tty_io nowadays.
183  */
184 static DEFINE_RWLOCK(disc_data_lock);
185 
186 static struct syncppp *sp_get(struct tty_struct *tty)
187 {
188  struct syncppp *ap;
189 
190  read_lock(&disc_data_lock);
191  ap = tty->disc_data;
192  if (ap != NULL)
193  atomic_inc(&ap->refcnt);
194  read_unlock(&disc_data_lock);
195  return ap;
196 }
197 
198 static void sp_put(struct syncppp *ap)
199 {
200  if (atomic_dec_and_test(&ap->refcnt))
201  complete(&ap->dead_cmp);
202 }
203 
204 /*
205  * Called when a tty is put into sync-PPP line discipline.
206  */
207 static int
208 ppp_sync_open(struct tty_struct *tty)
209 {
210  struct syncppp *ap;
211  int err;
212  int speed;
213 
214  if (tty->ops->write == NULL)
215  return -EOPNOTSUPP;
216 
217  ap = kzalloc(sizeof(*ap), GFP_KERNEL);
218  err = -ENOMEM;
219  if (!ap)
220  goto out;
221 
222  /* initialize the syncppp structure */
223  ap->tty = tty;
224  ap->mru = PPP_MRU;
227  ap->xaccm[0] = ~0U;
228  ap->xaccm[3] = 0x60000000U;
229  ap->raccm = ~0U;
230 
231  skb_queue_head_init(&ap->rqueue);
232  tasklet_init(&ap->tsk, ppp_sync_process, (unsigned long) ap);
233 
234  atomic_set(&ap->refcnt, 1);
235  init_completion(&ap->dead_cmp);
236 
237  ap->chan.private = ap;
238  ap->chan.ops = &sync_ops;
239  ap->chan.mtu = PPP_MRU;
240  ap->chan.hdrlen = 2; /* for A/C bytes */
241  speed = tty_get_baud_rate(tty);
242  ap->chan.speed = speed;
243  err = ppp_register_channel(&ap->chan);
244  if (err)
245  goto out_free;
246 
247  tty->disc_data = ap;
248  tty->receive_room = 65536;
249  return 0;
250 
251  out_free:
252  kfree(ap);
253  out:
254  return err;
255 }
256 
257 /*
258  * Called when the tty is put into another line discipline
259  * or it hangs up. We have to wait for any cpu currently
260  * executing in any of the other ppp_synctty_* routines to
261  * finish before we can call ppp_unregister_channel and free
262  * the syncppp struct. This routine must be called from
263  * process context, not interrupt or softirq context.
264  */
265 static void
266 ppp_sync_close(struct tty_struct *tty)
267 {
268  struct syncppp *ap;
269 
270  write_lock_irq(&disc_data_lock);
271  ap = tty->disc_data;
272  tty->disc_data = NULL;
273  write_unlock_irq(&disc_data_lock);
274  if (!ap)
275  return;
276 
277  /*
278  * We have now ensured that nobody can start using ap from now
279  * on, but we have to wait for all existing users to finish.
280  * Note that ppp_unregister_channel ensures that no calls to
281  * our channel ops (i.e. ppp_sync_send/ioctl) are in progress
282  * by the time it returns.
283  */
284  if (!atomic_dec_and_test(&ap->refcnt))
286  tasklet_kill(&ap->tsk);
287 
289  skb_queue_purge(&ap->rqueue);
290  kfree_skb(ap->tpkt);
291  kfree(ap);
292 }
293 
294 /*
295  * Called on tty hangup in process context.
296  *
297  * Wait for I/O to driver to complete and unregister PPP channel.
298  * This is already done by the close routine, so just call that.
299  */
300 static int ppp_sync_hangup(struct tty_struct *tty)
301 {
302  ppp_sync_close(tty);
303  return 0;
304 }
305 
306 /*
307  * Read does nothing - no data is ever available this way.
308  * Pppd reads and writes packets via /dev/ppp instead.
309  */
310 static ssize_t
311 ppp_sync_read(struct tty_struct *tty, struct file *file,
312  unsigned char __user *buf, size_t count)
313 {
314  return -EAGAIN;
315 }
316 
317 /*
318  * Write on the tty does nothing, the packets all come in
319  * from the ppp generic stuff.
320  */
321 static ssize_t
322 ppp_sync_write(struct tty_struct *tty, struct file *file,
323  const unsigned char *buf, size_t count)
324 {
325  return -EAGAIN;
326 }
327 
328 static int
329 ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
330  unsigned int cmd, unsigned long arg)
331 {
332  struct syncppp *ap = sp_get(tty);
333  int __user *p = (int __user *)arg;
334  int err, val;
335 
336  if (!ap)
337  return -ENXIO;
338  err = -EFAULT;
339  switch (cmd) {
340  case PPPIOCGCHAN:
341  err = -EFAULT;
342  if (put_user(ppp_channel_index(&ap->chan), p))
343  break;
344  err = 0;
345  break;
346 
347  case PPPIOCGUNIT:
348  err = -EFAULT;
349  if (put_user(ppp_unit_number(&ap->chan), p))
350  break;
351  err = 0;
352  break;
353 
354  case TCFLSH:
355  /* flush our buffers and the serial port's buffer */
356  if (arg == TCIOFLUSH || arg == TCOFLUSH)
357  ppp_sync_flush_output(ap);
358  err = tty_perform_flush(tty, arg);
359  break;
360 
361  case FIONREAD:
362  val = 0;
363  if (put_user(val, p))
364  break;
365  err = 0;
366  break;
367 
368  default:
369  err = tty_mode_ioctl(tty, file, cmd, arg);
370  break;
371  }
372 
373  sp_put(ap);
374  return err;
375 }
376 
377 /* No kernel lock - fine */
378 static unsigned int
379 ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
380 {
381  return 0;
382 }
383 
384 /* May sleep, don't call from interrupt level or with interrupts disabled */
385 static void
386 ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
387  char *cflags, int count)
388 {
389  struct syncppp *ap = sp_get(tty);
390  unsigned long flags;
391 
392  if (!ap)
393  return;
394  spin_lock_irqsave(&ap->recv_lock, flags);
395  ppp_sync_input(ap, buf, cflags, count);
396  spin_unlock_irqrestore(&ap->recv_lock, flags);
397  if (!skb_queue_empty(&ap->rqueue))
398  tasklet_schedule(&ap->tsk);
399  sp_put(ap);
400  tty_unthrottle(tty);
401 }
402 
403 static void
404 ppp_sync_wakeup(struct tty_struct *tty)
405 {
406  struct syncppp *ap = sp_get(tty);
407 
409  if (!ap)
410  return;
412  tasklet_schedule(&ap->tsk);
413  sp_put(ap);
414 }
415 
416 
417 static struct tty_ldisc_ops ppp_sync_ldisc = {
418  .owner = THIS_MODULE,
419  .magic = TTY_LDISC_MAGIC,
420  .name = "pppsync",
421  .open = ppp_sync_open,
422  .close = ppp_sync_close,
423  .hangup = ppp_sync_hangup,
424  .read = ppp_sync_read,
425  .write = ppp_sync_write,
426  .ioctl = ppp_synctty_ioctl,
427  .poll = ppp_sync_poll,
428  .receive_buf = ppp_sync_receive,
429  .write_wakeup = ppp_sync_wakeup,
430 };
431 
432 static int __init
433 ppp_sync_init(void)
434 {
435  int err;
436 
437  err = tty_register_ldisc(N_SYNC_PPP, &ppp_sync_ldisc);
438  if (err != 0)
439  printk(KERN_ERR "PPP_sync: error %d registering line disc.\n",
440  err);
441  return err;
442 }
443 
444 /*
445  * The following routines provide the PPP channel interface.
446  */
447 static int
448 ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
449 {
450  struct syncppp *ap = chan->private;
451  int err, val;
452  u32 accm[8];
453  void __user *argp = (void __user *)arg;
454  u32 __user *p = argp;
455 
456  err = -EFAULT;
457  switch (cmd) {
458  case PPPIOCGFLAGS:
459  val = ap->flags | ap->rbits;
460  if (put_user(val, (int __user *) argp))
461  break;
462  err = 0;
463  break;
464  case PPPIOCSFLAGS:
465  if (get_user(val, (int __user *) argp))
466  break;
467  ap->flags = val & ~SC_RCV_BITS;
468  spin_lock_irq(&ap->recv_lock);
469  ap->rbits = val & SC_RCV_BITS;
470  spin_unlock_irq(&ap->recv_lock);
471  err = 0;
472  break;
473 
474  case PPPIOCGASYNCMAP:
475  if (put_user(ap->xaccm[0], p))
476  break;
477  err = 0;
478  break;
479  case PPPIOCSASYNCMAP:
480  if (get_user(ap->xaccm[0], p))
481  break;
482  err = 0;
483  break;
484 
485  case PPPIOCGRASYNCMAP:
486  if (put_user(ap->raccm, p))
487  break;
488  err = 0;
489  break;
490  case PPPIOCSRASYNCMAP:
491  if (get_user(ap->raccm, p))
492  break;
493  err = 0;
494  break;
495 
496  case PPPIOCGXASYNCMAP:
497  if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
498  break;
499  err = 0;
500  break;
501  case PPPIOCSXASYNCMAP:
502  if (copy_from_user(accm, argp, sizeof(accm)))
503  break;
504  accm[2] &= ~0x40000000U; /* can't escape 0x5e */
505  accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
506  memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
507  err = 0;
508  break;
509 
510  case PPPIOCGMRU:
511  if (put_user(ap->mru, (int __user *) argp))
512  break;
513  err = 0;
514  break;
515  case PPPIOCSMRU:
516  if (get_user(val, (int __user *) argp))
517  break;
518  if (val < PPP_MRU)
519  val = PPP_MRU;
520  ap->mru = val;
521  err = 0;
522  break;
523 
524  default:
525  err = -ENOTTY;
526  }
527  return err;
528 }
529 
530 /*
531  * This is called at softirq level to deliver received packets
532  * to the ppp_generic code, and to tell the ppp_generic code
533  * if we can accept more output now.
534  */
535 static void ppp_sync_process(unsigned long arg)
536 {
537  struct syncppp *ap = (struct syncppp *) arg;
538  struct sk_buff *skb;
539 
540  /* process received packets */
541  while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
542  if (skb->len == 0) {
543  /* zero length buffers indicate error */
544  ppp_input_error(&ap->chan, 0);
545  kfree_skb(skb);
546  }
547  else
548  ppp_input(&ap->chan, skb);
549  }
550 
551  /* try to push more stuff out */
552  if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_sync_push(ap))
553  ppp_output_wakeup(&ap->chan);
554 }
555 
556 /*
557  * Procedures for encapsulation and framing.
558  */
559 
560 static struct sk_buff*
561 ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
562 {
563  int proto;
564  unsigned char *data;
565  int islcp;
566 
567  data = skb->data;
568  proto = get_unaligned_be16(data);
569 
570  /* LCP packets with codes between 1 (configure-request)
571  * and 7 (code-reject) must be sent as though no options
572  * have been negotiated.
573  */
574  islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
575 
576  /* compress protocol field if option enabled */
577  if (data[0] == 0 && (ap->flags & SC_COMP_PROT) && !islcp)
578  skb_pull(skb,1);
579 
580  /* prepend address/control fields if necessary */
581  if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
582  if (skb_headroom(skb) < 2) {
583  struct sk_buff *npkt = dev_alloc_skb(skb->len + 2);
584  if (npkt == NULL) {
585  kfree_skb(skb);
586  return NULL;
587  }
588  skb_reserve(npkt,2);
589  skb_copy_from_linear_data(skb,
590  skb_put(npkt, skb->len), skb->len);
591  consume_skb(skb);
592  skb = npkt;
593  }
594  skb_push(skb,2);
595  skb->data[0] = PPP_ALLSTATIONS;
596  skb->data[1] = PPP_UI;
597  }
598 
599  ap->last_xmit = jiffies;
600 
601  if (skb && ap->flags & SC_LOG_OUTPKT)
602  ppp_print_buffer ("send buffer", skb->data, skb->len);
603 
604  return skb;
605 }
606 
607 /*
608  * Transmit-side routines.
609  */
610 
611 /*
612  * Send a packet to the peer over an sync tty line.
613  * Returns 1 iff the packet was accepted.
614  * If the packet was not accepted, we will call ppp_output_wakeup
615  * at some later time.
616  */
617 static int
618 ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb)
619 {
620  struct syncppp *ap = chan->private;
621 
622  ppp_sync_push(ap);
623 
625  return 0; /* already full */
626  skb = ppp_sync_txmunge(ap, skb);
627  if (skb != NULL)
628  ap->tpkt = skb;
629  else
631 
632  ppp_sync_push(ap);
633  return 1;
634 }
635 
636 /*
637  * Push as much data as possible out to the tty.
638  */
639 static int
640 ppp_sync_push(struct syncppp *ap)
641 {
642  int sent, done = 0;
643  struct tty_struct *tty = ap->tty;
644  int tty_stuffed = 0;
645 
646  if (!spin_trylock_bh(&ap->xmit_lock))
647  return 0;
648  for (;;) {
650  tty_stuffed = 0;
651  if (!tty_stuffed && ap->tpkt) {
653  sent = tty->ops->write(tty, ap->tpkt->data, ap->tpkt->len);
654  if (sent < 0)
655  goto flush; /* error, e.g. loss of CD */
656  if (sent < ap->tpkt->len) {
657  tty_stuffed = 1;
658  } else {
659  consume_skb(ap->tpkt);
660  ap->tpkt = NULL;
662  done = 1;
663  }
664  continue;
665  }
666  /* haven't made any progress */
667  spin_unlock_bh(&ap->xmit_lock);
668  if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) ||
669  (!tty_stuffed && ap->tpkt)))
670  break;
671  if (!spin_trylock_bh(&ap->xmit_lock))
672  break;
673  }
674  return done;
675 
676 flush:
677  if (ap->tpkt) {
678  kfree_skb(ap->tpkt);
679  ap->tpkt = NULL;
681  done = 1;
682  }
683  spin_unlock_bh(&ap->xmit_lock);
684  return done;
685 }
686 
687 /*
688  * Flush output from our internal buffers.
689  * Called for the TCFLSH ioctl.
690  */
691 static void
692 ppp_sync_flush_output(struct syncppp *ap)
693 {
694  int done = 0;
695 
696  spin_lock_bh(&ap->xmit_lock);
697  if (ap->tpkt != NULL) {
698  kfree_skb(ap->tpkt);
699  ap->tpkt = NULL;
701  done = 1;
702  }
703  spin_unlock_bh(&ap->xmit_lock);
704  if (done)
705  ppp_output_wakeup(&ap->chan);
706 }
707 
708 /*
709  * Receive-side routines.
710  */
711 
712 /* called when the tty driver has data for us.
713  *
714  * Data is frame oriented: each call to ppp_sync_input is considered
715  * a whole frame. If the 1st flag byte is non-zero then the whole
716  * frame is considered to be in error and is tossed.
717  */
718 static void
719 ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
720  char *flags, int count)
721 {
722  struct sk_buff *skb;
723  unsigned char *p;
724 
725  if (count == 0)
726  return;
727 
728  if (ap->flags & SC_LOG_INPKT)
729  ppp_print_buffer ("receive buffer", buf, count);
730 
731  /* stuff the chars in the skb */
732  skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
733  if (!skb) {
734  printk(KERN_ERR "PPPsync: no memory (input pkt)\n");
735  goto err;
736  }
737  /* Try to get the payload 4-byte aligned */
738  if (buf[0] != PPP_ALLSTATIONS)
739  skb_reserve(skb, 2 + (buf[0] & 1));
740 
741  if (flags && *flags) {
742  /* error flag set, ignore frame */
743  goto err;
744  } else if (count > skb_tailroom(skb)) {
745  /* packet overflowed MRU */
746  goto err;
747  }
748 
749  p = skb_put(skb, count);
750  memcpy(p, buf, count);
751 
752  /* strip address/control field if present */
753  p = skb->data;
754  if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
755  /* chop off address/control */
756  if (skb->len < 3)
757  goto err;
758  p = skb_pull(skb, 2);
759  }
760 
761  /* decompress protocol field if compressed */
762  if (p[0] & 1) {
763  /* protocol is compressed */
764  skb_push(skb, 1)[0] = 0;
765  } else if (skb->len < 2)
766  goto err;
767 
768  /* queue the frame to be processed */
769  skb_queue_tail(&ap->rqueue, skb);
770  return;
771 
772 err:
773  /* queue zero length packet as error indication */
774  if (skb || (skb = dev_alloc_skb(0))) {
775  skb_trim(skb, 0);
776  skb_queue_tail(&ap->rqueue, skb);
777  }
778 }
779 
780 static void __exit
781 ppp_sync_cleanup(void)
782 {
784  printk(KERN_ERR "failed to unregister Sync PPP line discipline\n");
785 }
786 
787 module_init(ppp_sync_init);
788 module_exit(ppp_sync_cleanup);
789 MODULE_LICENSE("GPL");