Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
u_serial.c
Go to the documentation of this file.
1 /*
2  * u_serial.c - utilities for USB gadget "serial port"/TTY support
3  *
4  * Copyright (C) 2003 Al Borchers ([email protected])
5  * Copyright (C) 2008 David Brownell
6  * Copyright (C) 2008 by Nokia Corporation
7  *
8  * This code also borrows from usbserial.c, which is
9  * Copyright (C) 1999 - 2002 Greg Kroah-Hartman ([email protected])
10  * Copyright (C) 2000 Peter Berger ([email protected])
11  * Copyright (C) 2000 Al Borchers ([email protected])
12  *
13  * This software is distributed under the terms of the GNU General
14  * Public License ("GPL") as published by the Free Software Foundation,
15  * either version 2 of that License or (at your option) any later version.
16  */
17 
18 /* #define VERBOSE_DEBUG */
19 
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/delay.h>
25 #include <linux/tty.h>
26 #include <linux/tty_flip.h>
27 #include <linux/slab.h>
28 #include <linux/export.h>
29 
30 #include "u_serial.h"
31 
32 
33 /*
34  * This component encapsulates the TTY layer glue needed to provide basic
35  * "serial port" functionality through the USB gadget stack. Each such
36  * port is exposed through a /dev/ttyGS* node.
37  *
38  * After initialization (gserial_setup), these TTY port devices stay
39  * available until they are removed (gserial_cleanup). Each one may be
40  * connected to a USB function (gserial_connect), or disconnected (with
41  * gserial_disconnect) when the USB host issues a config change event.
42  * Data can only flow when the port is connected to the host.
43  *
44  * A given TTY port can be made available in multiple configurations.
45  * For example, each one might expose a ttyGS0 node which provides a
46  * login application. In one case that might use CDC ACM interface 0,
47  * while another configuration might use interface 3 for that. The
48  * work to handle that (including descriptor management) is not part
49  * of this component.
50  *
51  * Configurations may expose more than one TTY port. For example, if
52  * ttyGS0 provides login service, then ttyGS1 might provide dialer access
53  * for a telephone or fax link. And ttyGS2 might be something that just
54  * needs a simple byte stream interface for some messaging protocol that
55  * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
56  */
57 
58 #define PREFIX "ttyGS"
59 
60 /*
61  * gserial is the lifecycle interface, used by USB functions
62  * gs_port is the I/O nexus, used by the tty driver
63  * tty_struct links to the tty/filesystem framework
64  *
65  * gserial <---> gs_port ... links will be null when the USB link is
66  * inactive; managed by gserial_{connect,disconnect}(). each gserial
67  * instance can wrap its own USB control protocol.
68  * gserial->ioport == usb_ep->driver_data ... gs_port
69  * gs_port->port_usb ... gserial
70  *
71  * gs_port <---> tty_struct ... links will be null when the TTY file
72  * isn't opened; managed by gs_open()/gs_close()
73  * gserial->port_tty ... tty_struct
74  * tty_struct->driver_data ... gserial
75  */
76 
77 /* RX and TX queues can buffer QUEUE_SIZE packets before they hit the
78  * next layer of buffering. For TX that's a circular buffer; for RX
79  * consider it a NOP. A third layer is provided by the TTY code.
80  */
81 #define QUEUE_SIZE 16
82 #define WRITE_BUF_SIZE 8192 /* TX only */
83 
84 /* circular buffer */
85 struct gs_buf {
86  unsigned buf_size;
87  char *buf_buf;
88  char *buf_get;
89  char *buf_put;
90 };
91 
92 /*
93  * The port structure holds info for each port, one for each minor number
94  * (and thus for each /dev/ node).
95  */
96 struct gs_port {
97  struct tty_port port;
98  spinlock_t port_lock; /* guard port_* access */
99 
100  struct gserial *port_usb;
101 
102  bool openclose; /* open/close in progress */
103  u8 port_num;
104 
105  struct list_head read_pool;
106  int read_started;
107  int read_allocated;
108  struct list_head read_queue;
109  unsigned n_read;
110  struct tasklet_struct push;
111 
112  struct list_head write_pool;
113  int write_started;
114  int write_allocated;
115  struct gs_buf port_write_buf;
116  wait_queue_head_t drain_wait; /* wait while writes drain */
117 
118  /* REVISIT this state ... */
119  struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
120 };
121 
122 /* increase N_PORTS if you need more */
123 #define N_PORTS 4
124 static struct portmaster {
125  struct mutex lock; /* protect open/close */
126  struct gs_port *port;
127 } ports[N_PORTS];
128 static unsigned n_ports;
129 
130 #define GS_CLOSE_TIMEOUT 15 /* seconds */
131 
132 
133 
134 #ifdef VERBOSE_DEBUG
135 #ifndef pr_vdebug
136 #define pr_vdebug(fmt, arg...) \
137  pr_debug(fmt, ##arg)
138 #endif /* pr_vdebug */
139 #else
140 #ifndef pr_vdebig
141 #define pr_vdebug(fmt, arg...) \
142  ({ if (0) pr_debug(fmt, ##arg); })
143 #endif /* pr_vdebug */
144 #endif
145 
146 /*-------------------------------------------------------------------------*/
147 
148 /* Circular Buffer */
149 
150 /*
151  * gs_buf_alloc
152  *
153  * Allocate a circular buffer and all associated memory.
154  */
155 static int gs_buf_alloc(struct gs_buf *gb, unsigned size)
156 {
157  gb->buf_buf = kmalloc(size, GFP_KERNEL);
158  if (gb->buf_buf == NULL)
159  return -ENOMEM;
160 
161  gb->buf_size = size;
162  gb->buf_put = gb->buf_buf;
163  gb->buf_get = gb->buf_buf;
164 
165  return 0;
166 }
167 
168 /*
169  * gs_buf_free
170  *
171  * Free the buffer and all associated memory.
172  */
173 static void gs_buf_free(struct gs_buf *gb)
174 {
175  kfree(gb->buf_buf);
176  gb->buf_buf = NULL;
177 }
178 
179 /*
180  * gs_buf_clear
181  *
182  * Clear out all data in the circular buffer.
183  */
184 static void gs_buf_clear(struct gs_buf *gb)
185 {
186  gb->buf_get = gb->buf_put;
187  /* equivalent to a get of all data available */
188 }
189 
190 /*
191  * gs_buf_data_avail
192  *
193  * Return the number of bytes of data written into the circular
194  * buffer.
195  */
196 static unsigned gs_buf_data_avail(struct gs_buf *gb)
197 {
198  return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
199 }
200 
201 /*
202  * gs_buf_space_avail
203  *
204  * Return the number of bytes of space available in the circular
205  * buffer.
206  */
207 static unsigned gs_buf_space_avail(struct gs_buf *gb)
208 {
209  return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
210 }
211 
212 /*
213  * gs_buf_put
214  *
215  * Copy data data from a user buffer and put it into the circular buffer.
216  * Restrict to the amount of space available.
217  *
218  * Return the number of bytes copied.
219  */
220 static unsigned
221 gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count)
222 {
223  unsigned len;
224 
225  len = gs_buf_space_avail(gb);
226  if (count > len)
227  count = len;
228 
229  if (count == 0)
230  return 0;
231 
232  len = gb->buf_buf + gb->buf_size - gb->buf_put;
233  if (count > len) {
234  memcpy(gb->buf_put, buf, len);
235  memcpy(gb->buf_buf, buf+len, count - len);
236  gb->buf_put = gb->buf_buf + count - len;
237  } else {
238  memcpy(gb->buf_put, buf, count);
239  if (count < len)
240  gb->buf_put += count;
241  else /* count == len */
242  gb->buf_put = gb->buf_buf;
243  }
244 
245  return count;
246 }
247 
248 /*
249  * gs_buf_get
250  *
251  * Get data from the circular buffer and copy to the given buffer.
252  * Restrict to the amount of data available.
253  *
254  * Return the number of bytes copied.
255  */
256 static unsigned
257 gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
258 {
259  unsigned len;
260 
261  len = gs_buf_data_avail(gb);
262  if (count > len)
263  count = len;
264 
265  if (count == 0)
266  return 0;
267 
268  len = gb->buf_buf + gb->buf_size - gb->buf_get;
269  if (count > len) {
270  memcpy(buf, gb->buf_get, len);
271  memcpy(buf+len, gb->buf_buf, count - len);
272  gb->buf_get = gb->buf_buf + count - len;
273  } else {
274  memcpy(buf, gb->buf_get, count);
275  if (count < len)
276  gb->buf_get += count;
277  else /* count == len */
278  gb->buf_get = gb->buf_buf;
279  }
280 
281  return count;
282 }
283 
284 /*-------------------------------------------------------------------------*/
285 
286 /* I/O glue between TTY (upper) and USB function (lower) driver layers */
287 
288 /*
289  * gs_alloc_req
290  *
291  * Allocate a usb_request and its buffer. Returns a pointer to the
292  * usb_request or NULL if there is an error.
293  */
294 struct usb_request *
295 gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
296 {
297  struct usb_request *req;
298 
299  req = usb_ep_alloc_request(ep, kmalloc_flags);
300 
301  if (req != NULL) {
302  req->length = len;
303  req->buf = kmalloc(len, kmalloc_flags);
304  if (req->buf == NULL) {
305  usb_ep_free_request(ep, req);
306  return NULL;
307  }
308  }
309 
310  return req;
311 }
312 
313 /*
314  * gs_free_req
315  *
316  * Free a usb_request and its buffer.
317  */
318 void gs_free_req(struct usb_ep *ep, struct usb_request *req)
319 {
320  kfree(req->buf);
321  usb_ep_free_request(ep, req);
322 }
323 
324 /*
325  * gs_send_packet
326  *
327  * If there is data to send, a packet is built in the given
328  * buffer and the size is returned. If there is no data to
329  * send, 0 is returned.
330  *
331  * Called with port_lock held.
332  */
333 static unsigned
334 gs_send_packet(struct gs_port *port, char *packet, unsigned size)
335 {
336  unsigned len;
337 
338  len = gs_buf_data_avail(&port->port_write_buf);
339  if (len < size)
340  size = len;
341  if (size != 0)
342  size = gs_buf_get(&port->port_write_buf, packet, size);
343  return size;
344 }
345 
346 /*
347  * gs_start_tx
348  *
349  * This function finds available write requests, calls
350  * gs_send_packet to fill these packets with data, and
351  * continues until either there are no more write requests
352  * available or no more data to send. This function is
353  * run whenever data arrives or write requests are available.
354  *
355  * Context: caller owns port_lock; port_usb is non-null.
356  */
357 static int gs_start_tx(struct gs_port *port)
358 /*
359 __releases(&port->port_lock)
360 __acquires(&port->port_lock)
361 */
362 {
363  struct list_head *pool = &port->write_pool;
364  struct usb_ep *in = port->port_usb->in;
365  int status = 0;
366  bool do_tty_wake = false;
367 
368  while (!list_empty(pool)) {
369  struct usb_request *req;
370  int len;
371 
372  if (port->write_started >= QUEUE_SIZE)
373  break;
374 
375  req = list_entry(pool->next, struct usb_request, list);
376  len = gs_send_packet(port, req->buf, in->maxpacket);
377  if (len == 0) {
379  break;
380  }
381  do_tty_wake = true;
382 
383  req->length = len;
384  list_del(&req->list);
385  req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
386 
387  pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
388  port->port_num, len, *((u8 *)req->buf),
389  *((u8 *)req->buf+1), *((u8 *)req->buf+2));
390 
391  /* Drop lock while we call out of driver; completions
392  * could be issued while we do so. Disconnection may
393  * happen too; maybe immediately before we queue this!
394  *
395  * NOTE that we may keep sending data for a while after
396  * the TTY closed (dev->ioport->port_tty is NULL).
397  */
398  spin_unlock(&port->port_lock);
399  status = usb_ep_queue(in, req, GFP_ATOMIC);
400  spin_lock(&port->port_lock);
401 
402  if (status) {
403  pr_debug("%s: %s %s err %d\n",
404  __func__, "queue", in->name, status);
405  list_add(&req->list, pool);
406  break;
407  }
408 
409  port->write_started++;
410 
411  /* abort immediately after disconnect */
412  if (!port->port_usb)
413  break;
414  }
415 
416  if (do_tty_wake && port->port.tty)
417  tty_wakeup(port->port.tty);
418  return status;
419 }
420 
421 /*
422  * Context: caller owns port_lock, and port_usb is set
423  */
424 static unsigned gs_start_rx(struct gs_port *port)
425 /*
426 __releases(&port->port_lock)
427 __acquires(&port->port_lock)
428 */
429 {
430  struct list_head *pool = &port->read_pool;
431  struct usb_ep *out = port->port_usb->out;
432 
433  while (!list_empty(pool)) {
434  struct usb_request *req;
435  int status;
436  struct tty_struct *tty;
437 
438  /* no more rx if closed */
439  tty = port->port.tty;
440  if (!tty)
441  break;
442 
443  if (port->read_started >= QUEUE_SIZE)
444  break;
445 
446  req = list_entry(pool->next, struct usb_request, list);
447  list_del(&req->list);
448  req->length = out->maxpacket;
449 
450  /* drop lock while we call out; the controller driver
451  * may need to call us back (e.g. for disconnect)
452  */
453  spin_unlock(&port->port_lock);
454  status = usb_ep_queue(out, req, GFP_ATOMIC);
455  spin_lock(&port->port_lock);
456 
457  if (status) {
458  pr_debug("%s: %s %s err %d\n",
459  __func__, "queue", out->name, status);
460  list_add(&req->list, pool);
461  break;
462  }
463  port->read_started++;
464 
465  /* abort immediately after disconnect */
466  if (!port->port_usb)
467  break;
468  }
469  return port->read_started;
470 }
471 
472 /*
473  * RX tasklet takes data out of the RX queue and hands it up to the TTY
474  * layer until it refuses to take any more data (or is throttled back).
475  * Then it issues reads for any further data.
476  *
477  * If the RX queue becomes full enough that no usb_request is queued,
478  * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
479  * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
480  * can be buffered before the TTY layer's buffers (currently 64 KB).
481  */
482 static void gs_rx_push(unsigned long _port)
483 {
484  struct gs_port *port = (void *)_port;
485  struct tty_struct *tty;
486  struct list_head *queue = &port->read_queue;
487  bool disconnect = false;
488  bool do_push = false;
489 
490  /* hand any queued data to the tty */
491  spin_lock_irq(&port->port_lock);
492  tty = port->port.tty;
493  while (!list_empty(queue)) {
494  struct usb_request *req;
495 
496  req = list_first_entry(queue, struct usb_request, list);
497 
498  /* discard data if tty was closed */
499  if (!tty)
500  goto recycle;
501 
502  /* leave data queued if tty was rx throttled */
503  if (test_bit(TTY_THROTTLED, &tty->flags))
504  break;
505 
506  switch (req->status) {
507  case -ESHUTDOWN:
508  disconnect = true;
509  pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
510  break;
511 
512  default:
513  /* presumably a transient fault */
514  pr_warning(PREFIX "%d: unexpected RX status %d\n",
515  port->port_num, req->status);
516  /* FALLTHROUGH */
517  case 0:
518  /* normal completion */
519  break;
520  }
521 
522  /* push data to (open) tty */
523  if (req->actual) {
524  char *packet = req->buf;
525  unsigned size = req->actual;
526  unsigned n;
527  int count;
528 
529  /* we may have pushed part of this packet already... */
530  n = port->n_read;
531  if (n) {
532  packet += n;
533  size -= n;
534  }
535 
536  count = tty_insert_flip_string(tty, packet, size);
537  if (count)
538  do_push = true;
539  if (count != size) {
540  /* stop pushing; TTY layer can't handle more */
541  port->n_read += count;
542  pr_vdebug(PREFIX "%d: rx block %d/%d\n",
543  port->port_num,
544  count, req->actual);
545  break;
546  }
547  port->n_read = 0;
548  }
549 recycle:
550  list_move(&req->list, &port->read_pool);
551  port->read_started--;
552  }
553 
554  /* Push from tty to ldisc; without low_latency set this is handled by
555  * a workqueue, so we won't get callbacks and can hold port_lock
556  */
557  if (tty && do_push)
559 
560 
561  /* We want our data queue to become empty ASAP, keeping data
562  * in the tty and ldisc (not here). If we couldn't push any
563  * this time around, there may be trouble unless there's an
564  * implicit tty_unthrottle() call on its way...
565  *
566  * REVISIT we should probably add a timer to keep the tasklet
567  * from starving ... but it's not clear that case ever happens.
568  */
569  if (!list_empty(queue) && tty) {
570  if (!test_bit(TTY_THROTTLED, &tty->flags)) {
571  if (do_push)
572  tasklet_schedule(&port->push);
573  else
574  pr_warning(PREFIX "%d: RX not scheduled?\n",
575  port->port_num);
576  }
577  }
578 
579  /* If we're still connected, refill the USB RX queue. */
580  if (!disconnect && port->port_usb)
581  gs_start_rx(port);
582 
583  spin_unlock_irq(&port->port_lock);
584 }
585 
586 static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
587 {
588  struct gs_port *port = ep->driver_data;
589 
590  /* Queue all received data until the tty layer is ready for it. */
591  spin_lock(&port->port_lock);
592  list_add_tail(&req->list, &port->read_queue);
593  tasklet_schedule(&port->push);
594  spin_unlock(&port->port_lock);
595 }
596 
597 static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
598 {
599  struct gs_port *port = ep->driver_data;
600 
601  spin_lock(&port->port_lock);
602  list_add(&req->list, &port->write_pool);
603  port->write_started--;
604 
605  switch (req->status) {
606  default:
607  /* presumably a transient fault */
608  pr_warning("%s: unexpected %s status %d\n",
609  __func__, ep->name, req->status);
610  /* FALL THROUGH */
611  case 0:
612  /* normal completion */
613  gs_start_tx(port);
614  break;
615 
616  case -ESHUTDOWN:
617  /* disconnect */
618  pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
619  break;
620  }
621 
622  spin_unlock(&port->port_lock);
623 }
624 
625 static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
626  int *allocated)
627 {
628  struct usb_request *req;
629 
630  while (!list_empty(head)) {
631  req = list_entry(head->next, struct usb_request, list);
632  list_del(&req->list);
633  gs_free_req(ep, req);
634  if (allocated)
635  (*allocated)--;
636  }
637 }
638 
639 static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
640  void (*fn)(struct usb_ep *, struct usb_request *),
641  int *allocated)
642 {
643  int i;
644  struct usb_request *req;
645  int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
646 
647  /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
648  * do quite that many this time, don't fail ... we just won't
649  * be as speedy as we might otherwise be.
650  */
651  for (i = 0; i < n; i++) {
652  req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
653  if (!req)
654  return list_empty(head) ? -ENOMEM : 0;
655  req->complete = fn;
656  list_add_tail(&req->list, head);
657  if (allocated)
658  (*allocated)++;
659  }
660  return 0;
661 }
662 
672 static int gs_start_io(struct gs_port *port)
673 {
674  struct list_head *head = &port->read_pool;
675  struct usb_ep *ep = port->port_usb->out;
676  int status;
677  unsigned started;
678 
679  /* Allocate RX and TX I/O buffers. We can't easily do this much
680  * earlier (with GFP_KERNEL) because the requests are coupled to
681  * endpoints, as are the packet sizes we'll be using. Different
682  * configurations may use different endpoints with a given port;
683  * and high speed vs full speed changes packet sizes too.
684  */
685  status = gs_alloc_requests(ep, head, gs_read_complete,
686  &port->read_allocated);
687  if (status)
688  return status;
689 
690  status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
691  gs_write_complete, &port->write_allocated);
692  if (status) {
693  gs_free_requests(ep, head, &port->read_allocated);
694  return status;
695  }
696 
697  /* queue read requests */
698  port->n_read = 0;
699  started = gs_start_rx(port);
700 
701  /* unblock any pending writes into our circular buffer */
702  if (started) {
703  tty_wakeup(port->port.tty);
704  } else {
705  gs_free_requests(ep, head, &port->read_allocated);
706  gs_free_requests(port->port_usb->in, &port->write_pool,
707  &port->write_allocated);
708  status = -EIO;
709  }
710 
711  return status;
712 }
713 
714 /*-------------------------------------------------------------------------*/
715 
716 /* TTY Driver */
717 
718 /*
719  * gs_open sets up the link between a gs_port and its associated TTY.
720  * That link is broken *only* by TTY close(), and all driver methods
721  * know that.
722  */
723 static int gs_open(struct tty_struct *tty, struct file *file)
724 {
725  int port_num = tty->index;
726  struct gs_port *port;
727  int status;
728 
729  do {
730  mutex_lock(&ports[port_num].lock);
731  port = ports[port_num].port;
732  if (!port)
733  status = -ENODEV;
734  else {
735  spin_lock_irq(&port->port_lock);
736 
737  /* already open? Great. */
738  if (port->port.count) {
739  status = 0;
740  port->port.count++;
741 
742  /* currently opening/closing? wait ... */
743  } else if (port->openclose) {
744  status = -EBUSY;
745 
746  /* ... else we do the work */
747  } else {
748  status = -EAGAIN;
749  port->openclose = true;
750  }
751  spin_unlock_irq(&port->port_lock);
752  }
753  mutex_unlock(&ports[port_num].lock);
754 
755  switch (status) {
756  default:
757  /* fully handled */
758  return status;
759  case -EAGAIN:
760  /* must do the work */
761  break;
762  case -EBUSY:
763  /* wait for EAGAIN task to finish */
764  msleep(1);
765  /* REVISIT could have a waitchannel here, if
766  * concurrent open performance is important
767  */
768  break;
769  }
770  } while (status != -EAGAIN);
771 
772  /* Do the "real open" */
773  spin_lock_irq(&port->port_lock);
774 
775  /* allocate circular buffer on first open */
776  if (port->port_write_buf.buf_buf == NULL) {
777 
778  spin_unlock_irq(&port->port_lock);
779  status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
780  spin_lock_irq(&port->port_lock);
781 
782  if (status) {
783  pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
784  port->port_num, tty, file);
785  port->openclose = false;
786  goto exit_unlock_port;
787  }
788  }
789 
790  /* REVISIT if REMOVED (ports[].port NULL), abort the open
791  * to let rmmod work faster (but this way isn't wrong).
792  */
793 
794  /* REVISIT maybe wait for "carrier detect" */
795 
796  tty->driver_data = port;
797  port->port.tty = tty;
798 
799  port->port.count = 1;
800  port->openclose = false;
801 
802  /* if connected, start the I/O stream */
803  if (port->port_usb) {
804  struct gserial *gser = port->port_usb;
805 
806  pr_debug("gs_open: start ttyGS%d\n", port->port_num);
807  gs_start_io(port);
808 
809  if (gser->connect)
810  gser->connect(gser);
811  }
812 
813  pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
814 
815  status = 0;
816 
817 exit_unlock_port:
818  spin_unlock_irq(&port->port_lock);
819  return status;
820 }
821 
822 static int gs_writes_finished(struct gs_port *p)
823 {
824  int cond;
825 
826  /* return true on disconnect or empty buffer */
827  spin_lock_irq(&p->port_lock);
828  cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf);
829  spin_unlock_irq(&p->port_lock);
830 
831  return cond;
832 }
833 
834 static void gs_close(struct tty_struct *tty, struct file *file)
835 {
836  struct gs_port *port = tty->driver_data;
837  struct gserial *gser;
838 
839  spin_lock_irq(&port->port_lock);
840 
841  if (port->port.count != 1) {
842  if (port->port.count == 0)
843  WARN_ON(1);
844  else
845  --port->port.count;
846  goto exit;
847  }
848 
849  pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
850 
851  /* mark port as closing but in use; we can drop port lock
852  * and sleep if necessary
853  */
854  port->openclose = true;
855  port->port.count = 0;
856 
857  gser = port->port_usb;
858  if (gser && gser->disconnect)
859  gser->disconnect(gser);
860 
861  /* wait for circular write buffer to drain, disconnect, or at
862  * most GS_CLOSE_TIMEOUT seconds; then discard the rest
863  */
864  if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
865  spin_unlock_irq(&port->port_lock);
867  gs_writes_finished(port),
868  GS_CLOSE_TIMEOUT * HZ);
869  spin_lock_irq(&port->port_lock);
870  gser = port->port_usb;
871  }
872 
873  /* Iff we're disconnected, there can be no I/O in flight so it's
874  * ok to free the circular buffer; else just scrub it. And don't
875  * let the push tasklet fire again until we're re-opened.
876  */
877  if (gser == NULL)
878  gs_buf_free(&port->port_write_buf);
879  else
880  gs_buf_clear(&port->port_write_buf);
881 
882  tty->driver_data = NULL;
883  port->port.tty = NULL;
884 
885  port->openclose = false;
886 
887  pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
888  port->port_num, tty, file);
889 
890  wake_up_interruptible(&port->port.close_wait);
891 exit:
892  spin_unlock_irq(&port->port_lock);
893 }
894 
895 static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
896 {
897  struct gs_port *port = tty->driver_data;
898  unsigned long flags;
899  int status;
900 
901  pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
902  port->port_num, tty, count);
903 
904  spin_lock_irqsave(&port->port_lock, flags);
905  if (count)
906  count = gs_buf_put(&port->port_write_buf, buf, count);
907  /* treat count == 0 as flush_chars() */
908  if (port->port_usb)
909  status = gs_start_tx(port);
910  spin_unlock_irqrestore(&port->port_lock, flags);
911 
912  return count;
913 }
914 
915 static int gs_put_char(struct tty_struct *tty, unsigned char ch)
916 {
917  struct gs_port *port = tty->driver_data;
918  unsigned long flags;
919  int status;
920 
921  pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %pf\n",
922  port->port_num, tty, ch, __builtin_return_address(0));
923 
924  spin_lock_irqsave(&port->port_lock, flags);
925  status = gs_buf_put(&port->port_write_buf, &ch, 1);
926  spin_unlock_irqrestore(&port->port_lock, flags);
927 
928  return status;
929 }
930 
931 static void gs_flush_chars(struct tty_struct *tty)
932 {
933  struct gs_port *port = tty->driver_data;
934  unsigned long flags;
935 
936  pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
937 
938  spin_lock_irqsave(&port->port_lock, flags);
939  if (port->port_usb)
940  gs_start_tx(port);
941  spin_unlock_irqrestore(&port->port_lock, flags);
942 }
943 
944 static int gs_write_room(struct tty_struct *tty)
945 {
946  struct gs_port *port = tty->driver_data;
947  unsigned long flags;
948  int room = 0;
949 
950  spin_lock_irqsave(&port->port_lock, flags);
951  if (port->port_usb)
952  room = gs_buf_space_avail(&port->port_write_buf);
953  spin_unlock_irqrestore(&port->port_lock, flags);
954 
955  pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
956  port->port_num, tty, room);
957 
958  return room;
959 }
960 
961 static int gs_chars_in_buffer(struct tty_struct *tty)
962 {
963  struct gs_port *port = tty->driver_data;
964  unsigned long flags;
965  int chars = 0;
966 
967  spin_lock_irqsave(&port->port_lock, flags);
968  chars = gs_buf_data_avail(&port->port_write_buf);
969  spin_unlock_irqrestore(&port->port_lock, flags);
970 
971  pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
972  port->port_num, tty, chars);
973 
974  return chars;
975 }
976 
977 /* undo side effects of setting TTY_THROTTLED */
978 static void gs_unthrottle(struct tty_struct *tty)
979 {
980  struct gs_port *port = tty->driver_data;
981  unsigned long flags;
982 
983  spin_lock_irqsave(&port->port_lock, flags);
984  if (port->port_usb) {
985  /* Kickstart read queue processing. We don't do xon/xoff,
986  * rts/cts, or other handshaking with the host, but if the
987  * read queue backs up enough we'll be NAKing OUT packets.
988  */
989  tasklet_schedule(&port->push);
990  pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
991  }
992  spin_unlock_irqrestore(&port->port_lock, flags);
993 }
994 
995 static int gs_break_ctl(struct tty_struct *tty, int duration)
996 {
997  struct gs_port *port = tty->driver_data;
998  int status = 0;
999  struct gserial *gser;
1000 
1001  pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
1002  port->port_num, duration);
1003 
1004  spin_lock_irq(&port->port_lock);
1005  gser = port->port_usb;
1006  if (gser && gser->send_break)
1007  status = gser->send_break(gser, duration);
1008  spin_unlock_irq(&port->port_lock);
1009 
1010  return status;
1011 }
1012 
1013 static const struct tty_operations gs_tty_ops = {
1014  .open = gs_open,
1015  .close = gs_close,
1016  .write = gs_write,
1017  .put_char = gs_put_char,
1018  .flush_chars = gs_flush_chars,
1019  .write_room = gs_write_room,
1020  .chars_in_buffer = gs_chars_in_buffer,
1021  .unthrottle = gs_unthrottle,
1022  .break_ctl = gs_break_ctl,
1023 };
1024 
1025 /*-------------------------------------------------------------------------*/
1026 
1027 static struct tty_driver *gs_tty_driver;
1028 
1029 static int
1030 gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
1031 {
1032  struct gs_port *port;
1033 
1034  port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
1035  if (port == NULL)
1036  return -ENOMEM;
1037 
1038  tty_port_init(&port->port);
1039  spin_lock_init(&port->port_lock);
1041 
1042  tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
1043 
1044  INIT_LIST_HEAD(&port->read_pool);
1045  INIT_LIST_HEAD(&port->read_queue);
1046  INIT_LIST_HEAD(&port->write_pool);
1047 
1048  port->port_num = port_num;
1049  port->port_line_coding = *coding;
1050 
1051  ports[port_num].port = port;
1052 
1053  return 0;
1054 }
1055 
1075 int gserial_setup(struct usb_gadget *g, unsigned count)
1076 {
1077  unsigned i;
1078  struct usb_cdc_line_coding coding;
1079  int status;
1080 
1081  if (count == 0 || count > N_PORTS)
1082  return -EINVAL;
1083 
1084  gs_tty_driver = alloc_tty_driver(count);
1085  if (!gs_tty_driver)
1086  return -ENOMEM;
1087 
1088  gs_tty_driver->driver_name = "g_serial";
1089  gs_tty_driver->name = PREFIX;
1090  /* uses dynamically assigned dev_t values */
1091 
1092  gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
1093  gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
1095  gs_tty_driver->init_termios = tty_std_termios;
1096 
1097  /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
1098  * MS-Windows. Otherwise, most of these flags shouldn't affect
1099  * anything unless we were to actually hook up to a serial line.
1100  */
1101  gs_tty_driver->init_termios.c_cflag =
1102  B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1103  gs_tty_driver->init_termios.c_ispeed = 9600;
1104  gs_tty_driver->init_termios.c_ospeed = 9600;
1105 
1106  coding.dwDTERate = cpu_to_le32(9600);
1107  coding.bCharFormat = 8;
1108  coding.bParityType = USB_CDC_NO_PARITY;
1109  coding.bDataBits = USB_CDC_1_STOP_BITS;
1110 
1111  tty_set_operations(gs_tty_driver, &gs_tty_ops);
1112 
1113  /* make devices be openable */
1114  for (i = 0; i < count; i++) {
1115  mutex_init(&ports[i].lock);
1116  status = gs_port_alloc(i, &coding);
1117  if (status) {
1118  count = i;
1119  goto fail;
1120  }
1121  }
1122  n_ports = count;
1123 
1124  /* export the driver ... */
1125  status = tty_register_driver(gs_tty_driver);
1126  if (status) {
1127  pr_err("%s: cannot register, err %d\n",
1128  __func__, status);
1129  goto fail;
1130  }
1131 
1132  /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
1133  for (i = 0; i < count; i++) {
1134  struct device *tty_dev;
1135 
1136  tty_dev = tty_port_register_device(&ports[i].port->port,
1137  gs_tty_driver, i, &g->dev);
1138  if (IS_ERR(tty_dev))
1139  pr_warning("%s: no classdev for port %d, err %ld\n",
1140  __func__, i, PTR_ERR(tty_dev));
1141  }
1142 
1143  pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
1144  count, (count == 1) ? "" : "s");
1145 
1146  return status;
1147 fail:
1148  while (count--)
1149  kfree(ports[count].port);
1150  put_tty_driver(gs_tty_driver);
1151  gs_tty_driver = NULL;
1152  return status;
1153 }
1154 
1155 static int gs_closed(struct gs_port *port)
1156 {
1157  int cond;
1158 
1159  spin_lock_irq(&port->port_lock);
1160  cond = (port->port.count == 0) && !port->openclose;
1161  spin_unlock_irq(&port->port_lock);
1162  return cond;
1163 }
1164 
1178 {
1179  unsigned i;
1180  struct gs_port *port;
1181 
1182  if (!gs_tty_driver)
1183  return;
1184 
1185  /* start sysfs and /dev/ttyGS* node removal */
1186  for (i = 0; i < n_ports; i++)
1187  tty_unregister_device(gs_tty_driver, i);
1188 
1189  for (i = 0; i < n_ports; i++) {
1190  /* prevent new opens */
1191  mutex_lock(&ports[i].lock);
1192  port = ports[i].port;
1193  ports[i].port = NULL;
1194  mutex_unlock(&ports[i].lock);
1195 
1196  tasklet_kill(&port->push);
1197 
1198  /* wait for old opens to finish */
1199  wait_event(port->port.close_wait, gs_closed(port));
1200 
1201  WARN_ON(port->port_usb != NULL);
1202 
1203  kfree(port);
1204  }
1205  n_ports = 0;
1206 
1207  tty_unregister_driver(gs_tty_driver);
1208  put_tty_driver(gs_tty_driver);
1209  gs_tty_driver = NULL;
1210 
1211  pr_debug("%s: cleaned up ttyGS* support\n", __func__);
1212 }
1213 
1235 int gserial_connect(struct gserial *gser, u8 port_num)
1236 {
1237  struct gs_port *port;
1238  unsigned long flags;
1239  int status;
1240 
1241  if (!gs_tty_driver || port_num >= n_ports)
1242  return -ENXIO;
1243 
1244  /* we "know" gserial_cleanup() hasn't been called */
1245  port = ports[port_num].port;
1246 
1247  /* activate the endpoints */
1248  status = usb_ep_enable(gser->in);
1249  if (status < 0)
1250  return status;
1251  gser->in->driver_data = port;
1252 
1253  status = usb_ep_enable(gser->out);
1254  if (status < 0)
1255  goto fail_out;
1256  gser->out->driver_data = port;
1257 
1258  /* then tell the tty glue that I/O can work */
1259  spin_lock_irqsave(&port->port_lock, flags);
1260  gser->ioport = port;
1261  port->port_usb = gser;
1262 
1263  /* REVISIT unclear how best to handle this state...
1264  * we don't really couple it with the Linux TTY.
1265  */
1266  gser->port_line_coding = port->port_line_coding;
1267 
1268  /* REVISIT if waiting on "carrier detect", signal. */
1269 
1270  /* if it's already open, start I/O ... and notify the serial
1271  * protocol about open/close status (connect/disconnect).
1272  */
1273  if (port->port.count) {
1274  pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
1275  gs_start_io(port);
1276  if (gser->connect)
1277  gser->connect(gser);
1278  } else {
1279  if (gser->disconnect)
1280  gser->disconnect(gser);
1281  }
1282 
1283  spin_unlock_irqrestore(&port->port_lock, flags);
1284 
1285  return status;
1286 
1287 fail_out:
1288  usb_ep_disable(gser->in);
1289  gser->in->driver_data = NULL;
1290  return status;
1291 }
1292 
1304 void gserial_disconnect(struct gserial *gser)
1305 {
1306  struct gs_port *port = gser->ioport;
1307  unsigned long flags;
1308 
1309  if (!port)
1310  return;
1311 
1312  /* tell the TTY glue not to do I/O here any more */
1313  spin_lock_irqsave(&port->port_lock, flags);
1314 
1315  /* REVISIT as above: how best to track this? */
1316  port->port_line_coding = gser->port_line_coding;
1317 
1318  port->port_usb = NULL;
1319  gser->ioport = NULL;
1320  if (port->port.count > 0 || port->openclose) {
1322  if (port->port.tty)
1323  tty_hangup(port->port.tty);
1324  }
1325  spin_unlock_irqrestore(&port->port_lock, flags);
1326 
1327  /* disable endpoints, aborting down any active I/O */
1328  usb_ep_disable(gser->out);
1329  gser->out->driver_data = NULL;
1330 
1331  usb_ep_disable(gser->in);
1332  gser->in->driver_data = NULL;
1333 
1334  /* finally, free any unused/unusable I/O buffers */
1335  spin_lock_irqsave(&port->port_lock, flags);
1336  if (port->port.count == 0 && !port->openclose)
1337  gs_buf_free(&port->port_write_buf);
1338  gs_free_requests(gser->out, &port->read_pool, NULL);
1339  gs_free_requests(gser->out, &port->read_queue, NULL);
1340  gs_free_requests(gser->in, &port->write_pool, NULL);
1341 
1342  port->read_allocated = port->read_started =
1343  port->write_allocated = port->write_started = 0;
1344 
1345  spin_unlock_irqrestore(&port->port_lock, flags);
1346 }