Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
tty_buffer.c
Go to the documentation of this file.
1 /*
2  * Tty buffer allocation management
3  */
4 
5 #include <linux/types.h>
6 #include <linux/errno.h>
7 #include <linux/tty.h>
8 #include <linux/tty_driver.h>
9 #include <linux/tty_flip.h>
10 #include <linux/timer.h>
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/init.h>
15 #include <linux/wait.h>
16 #include <linux/bitops.h>
17 #include <linux/delay.h>
18 #include <linux/module.h>
19 
31 {
32  struct tty_buffer *thead;
33  while ((thead = tty->buf.head) != NULL) {
34  tty->buf.head = thead->next;
35  kfree(thead);
36  }
37  while ((thead = tty->buf.free) != NULL) {
38  tty->buf.free = thead->next;
39  kfree(thead);
40  }
41  tty->buf.tail = NULL;
42  tty->buf.memory_used = 0;
43 }
44 
57 static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size)
58 {
59  struct tty_buffer *p;
60 
61  if (tty->buf.memory_used + size > 65536)
62  return NULL;
63  p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
64  if (p == NULL)
65  return NULL;
66  p->used = 0;
67  p->size = size;
68  p->next = NULL;
69  p->commit = 0;
70  p->read = 0;
71  p->char_buf_ptr = (char *)(p->data);
72  p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
73  tty->buf.memory_used += size;
74  return p;
75 }
76 
88 static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b)
89 {
90  /* Dumb strategy for now - should keep some stats */
91  tty->buf.memory_used -= b->size;
92  WARN_ON(tty->buf.memory_used < 0);
93 
94  if (b->size >= 512)
95  kfree(b);
96  else {
97  b->next = tty->buf.free;
98  tty->buf.free = b;
99  }
100 }
101 
113 static void __tty_buffer_flush(struct tty_struct *tty)
114 {
115  struct tty_buffer *thead;
116 
117  while ((thead = tty->buf.head) != NULL) {
118  tty->buf.head = thead->next;
119  tty_buffer_free(tty, thead);
120  }
121  tty->buf.tail = NULL;
122 }
123 
135 void tty_buffer_flush(struct tty_struct *tty)
136 {
137  unsigned long flags;
138  spin_lock_irqsave(&tty->buf.lock, flags);
139 
140  /* If the data is being pushed to the tty layer then we can't
141  process it here. Instead set a flag and the flush_to_ldisc
142  path will process the flush request before it exits */
143  if (test_bit(TTY_FLUSHING, &tty->flags)) {
145  spin_unlock_irqrestore(&tty->buf.lock, flags);
146  wait_event(tty->read_wait,
147  test_bit(TTY_FLUSHPENDING, &tty->flags) == 0);
148  return;
149  } else
150  __tty_buffer_flush(tty);
151  spin_unlock_irqrestore(&tty->buf.lock, flags);
152 }
153 
166 static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
167 {
168  struct tty_buffer **tbh = &tty->buf.free;
169  while ((*tbh) != NULL) {
170  struct tty_buffer *t = *tbh;
171  if (t->size >= size) {
172  *tbh = t->next;
173  t->next = NULL;
174  t->used = 0;
175  t->commit = 0;
176  t->read = 0;
177  tty->buf.memory_used += t->size;
178  return t;
179  }
180  tbh = &((*tbh)->next);
181  }
182  /* Round the buffer size out */
183  size = (size + 0xFF) & ~0xFF;
184  return tty_buffer_alloc(tty, size);
185  /* Should possibly check if this fails for the largest buffer we
186  have queued and recycle that ? */
187 }
197 static int __tty_buffer_request_room(struct tty_struct *tty, size_t size)
198 {
199  struct tty_buffer *b, *n;
200  int left;
201  /* OPTIMISATION: We could keep a per tty "zero" sized buffer to
202  remove this conditional if its worth it. This would be invisible
203  to the callers */
204  if ((b = tty->buf.tail) != NULL)
205  left = b->size - b->used;
206  else
207  left = 0;
208 
209  if (left < size) {
210  /* This is the slow path - looking for new buffers to use */
211  if ((n = tty_buffer_find(tty, size)) != NULL) {
212  if (b != NULL) {
213  b->next = n;
214  b->commit = b->used;
215  } else
216  tty->buf.head = n;
217  tty->buf.tail = n;
218  } else
219  size = left;
220  }
221 
222  return size;
223 }
224 
225 
236 int tty_buffer_request_room(struct tty_struct *tty, size_t size)
237 {
238  unsigned long flags;
239  int length;
240 
241  spin_lock_irqsave(&tty->buf.lock, flags);
242  length = __tty_buffer_request_room(tty, size);
243  spin_unlock_irqrestore(&tty->buf.lock, flags);
244  return length;
245 }
247 
262  const unsigned char *chars, char flag, size_t size)
263 {
264  int copied = 0;
265  do {
266  int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
267  int space;
268  unsigned long flags;
269  struct tty_buffer *tb;
270 
271  spin_lock_irqsave(&tty->buf.lock, flags);
272  space = __tty_buffer_request_room(tty, goal);
273  tb = tty->buf.tail;
274  /* If there is no space then tb may be NULL */
275  if (unlikely(space == 0)) {
276  spin_unlock_irqrestore(&tty->buf.lock, flags);
277  break;
278  }
279  memcpy(tb->char_buf_ptr + tb->used, chars, space);
280  memset(tb->flag_buf_ptr + tb->used, flag, space);
281  tb->used += space;
282  spin_unlock_irqrestore(&tty->buf.lock, flags);
283  copied += space;
284  chars += space;
285  /* There is a small chance that we need to split the data over
286  several buffers. If this is the case we must loop */
287  } while (unlikely(size > copied));
288  return copied;
289 }
291 
307  const unsigned char *chars, const char *flags, size_t size)
308 {
309  int copied = 0;
310  do {
311  int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
312  int space;
313  unsigned long __flags;
314  struct tty_buffer *tb;
315 
316  spin_lock_irqsave(&tty->buf.lock, __flags);
317  space = __tty_buffer_request_room(tty, goal);
318  tb = tty->buf.tail;
319  /* If there is no space then tb may be NULL */
320  if (unlikely(space == 0)) {
321  spin_unlock_irqrestore(&tty->buf.lock, __flags);
322  break;
323  }
324  memcpy(tb->char_buf_ptr + tb->used, chars, space);
325  memcpy(tb->flag_buf_ptr + tb->used, flags, space);
326  tb->used += space;
327  spin_unlock_irqrestore(&tty->buf.lock, __flags);
328  copied += space;
329  chars += space;
330  flags += space;
331  /* There is a small chance that we need to split the data over
332  several buffers. If this is the case we must loop */
333  } while (unlikely(size > copied));
334  return copied;
335 }
337 
349 void tty_schedule_flip(struct tty_struct *tty)
350 {
351  unsigned long flags;
352  spin_lock_irqsave(&tty->buf.lock, flags);
353  if (tty->buf.tail != NULL)
354  tty->buf.tail->commit = tty->buf.tail->used;
355  spin_unlock_irqrestore(&tty->buf.lock, flags);
356  schedule_work(&tty->buf.work);
357 }
359 
375 int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars,
376  size_t size)
377 {
378  int space;
379  unsigned long flags;
380  struct tty_buffer *tb;
381 
382  spin_lock_irqsave(&tty->buf.lock, flags);
383  space = __tty_buffer_request_room(tty, size);
384 
385  tb = tty->buf.tail;
386  if (likely(space)) {
387  *chars = tb->char_buf_ptr + tb->used;
388  memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
389  tb->used += space;
390  }
391  spin_unlock_irqrestore(&tty->buf.lock, flags);
392  return space;
393 }
395 
413  unsigned char **chars, char **flags, size_t size)
414 {
415  int space;
416  unsigned long __flags;
417  struct tty_buffer *tb;
418 
419  spin_lock_irqsave(&tty->buf.lock, __flags);
420  space = __tty_buffer_request_room(tty, size);
421 
422  tb = tty->buf.tail;
423  if (likely(space)) {
424  *chars = tb->char_buf_ptr + tb->used;
425  *flags = tb->flag_buf_ptr + tb->used;
426  tb->used += space;
427  }
428  spin_unlock_irqrestore(&tty->buf.lock, __flags);
429  return space;
430 }
432 
433 
434 
447 static void flush_to_ldisc(struct work_struct *work)
448 {
449  struct tty_struct *tty =
450  container_of(work, struct tty_struct, buf.work);
451  unsigned long flags;
452  struct tty_ldisc *disc;
453 
454  disc = tty_ldisc_ref(tty);
455  if (disc == NULL) /* !TTY_LDISC */
456  return;
457 
458  spin_lock_irqsave(&tty->buf.lock, flags);
459 
460  if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
461  struct tty_buffer *head;
462  while ((head = tty->buf.head) != NULL) {
463  int count;
464  char *char_buf;
465  unsigned char *flag_buf;
466 
467  count = head->commit - head->read;
468  if (!count) {
469  if (head->next == NULL)
470  break;
471  tty->buf.head = head->next;
472  tty_buffer_free(tty, head);
473  continue;
474  }
475  /* Ldisc or user is trying to flush the buffers
476  we are feeding to the ldisc, stop feeding the
477  line discipline as we want to empty the queue */
478  if (test_bit(TTY_FLUSHPENDING, &tty->flags))
479  break;
480  if (!tty->receive_room)
481  break;
482  if (count > tty->receive_room)
483  count = tty->receive_room;
484  char_buf = head->char_buf_ptr + head->read;
485  flag_buf = head->flag_buf_ptr + head->read;
486  head->read += count;
487  spin_unlock_irqrestore(&tty->buf.lock, flags);
488  disc->ops->receive_buf(tty, char_buf,
489  flag_buf, count);
490  spin_lock_irqsave(&tty->buf.lock, flags);
491  }
492  clear_bit(TTY_FLUSHING, &tty->flags);
493  }
494 
495  /* We may have a deferred request to flush the input buffer,
496  if so pull the chain under the lock and empty the queue */
497  if (test_bit(TTY_FLUSHPENDING, &tty->flags)) {
498  __tty_buffer_flush(tty);
500  wake_up(&tty->read_wait);
501  }
502  spin_unlock_irqrestore(&tty->buf.lock, flags);
503 
504  tty_ldisc_deref(disc);
505 }
506 
516 {
517  flush_work(&tty->buf.work);
518 }
519 
534 {
535  unsigned long flags;
536  spin_lock_irqsave(&tty->buf.lock, flags);
537  if (tty->buf.tail != NULL)
538  tty->buf.tail->commit = tty->buf.tail->used;
539  spin_unlock_irqrestore(&tty->buf.lock, flags);
540 
541  if (tty->low_latency)
542  flush_to_ldisc(&tty->buf.work);
543  else
544  schedule_work(&tty->buf.work);
545 }
547 
558 void tty_buffer_init(struct tty_struct *tty)
559 {
560  spin_lock_init(&tty->buf.lock);
561  tty->buf.head = NULL;
562  tty->buf.tail = NULL;
563  tty->buf.free = NULL;
564  tty->buf.memory_used = 0;
565  INIT_WORK(&tty->buf.work, flush_to_ldisc);
566 }
567