Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
seq_memory.c
Go to the documentation of this file.
1 /*
2  * ALSA sequencer Memory Manager
3  * Copyright (c) 1998 by Frank van de Pol <[email protected]>
4  * Jaroslav Kysela <[email protected]>
5  * 2000 by Takashi Iwai <[email protected]>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20  *
21  */
22 
23 #include <linux/init.h>
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <sound/core.h>
28 
29 #include <sound/seq_kernel.h>
30 #include "seq_memory.h"
31 #include "seq_queue.h"
32 #include "seq_info.h"
33 #include "seq_lock.h"
34 
35 static inline int snd_seq_pool_available(struct snd_seq_pool *pool)
36 {
37  return pool->total_elements - atomic_read(&pool->counter);
38 }
39 
40 static inline int snd_seq_output_ok(struct snd_seq_pool *pool)
41 {
42  return snd_seq_pool_available(pool) >= pool->room;
43 }
44 
45 /*
46  * Variable length event:
47  * The event like sysex uses variable length type.
48  * The external data may be stored in three different formats.
49  * 1) kernel space
50  * This is the normal case.
51  * ext.data.len = length
52  * ext.data.ptr = buffer pointer
53  * 2) user space
54  * When an event is generated via read(), the external data is
55  * kept in user space until expanded.
56  * ext.data.len = length | SNDRV_SEQ_EXT_USRPTR
57  * ext.data.ptr = userspace pointer
58  * 3) chained cells
59  * When the variable length event is enqueued (in prioq or fifo),
60  * the external data is decomposed to several cells.
61  * ext.data.len = length | SNDRV_SEQ_EXT_CHAINED
62  * ext.data.ptr = the additiona cell head
63  * -> cell.next -> cell.next -> ..
64  */
65 
66 /*
67  * exported:
68  * call dump function to expand external data.
69  */
70 
71 static int get_var_len(const struct snd_seq_event *event)
72 {
74  return -EINVAL;
75 
76  return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
77 }
78 
79 int snd_seq_dump_var_event(const struct snd_seq_event *event,
81 {
82  int len, err;
83  struct snd_seq_event_cell *cell;
84 
85  if ((len = get_var_len(event)) <= 0)
86  return len;
87 
88  if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
89  char buf[32];
90  char __user *curptr = (char __force __user *)event->data.ext.ptr;
91  while (len > 0) {
92  int size = sizeof(buf);
93  if (len < size)
94  size = len;
95  if (copy_from_user(buf, curptr, size))
96  return -EFAULT;
97  err = func(private_data, buf, size);
98  if (err < 0)
99  return err;
100  curptr += size;
101  len -= size;
102  }
103  return 0;
104  } if (! (event->data.ext.len & SNDRV_SEQ_EXT_CHAINED)) {
105  return func(private_data, event->data.ext.ptr, len);
106  }
107 
108  cell = (struct snd_seq_event_cell *)event->data.ext.ptr;
109  for (; len > 0 && cell; cell = cell->next) {
110  int size = sizeof(struct snd_seq_event);
111  if (len < size)
112  size = len;
113  err = func(private_data, &cell->event, size);
114  if (err < 0)
115  return err;
116  len -= size;
117  }
118  return 0;
119 }
120 
122 
123 
124 /*
125  * exported:
126  * expand the variable length event to linear buffer space.
127  */
128 
129 static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
130 {
131  memcpy(*bufptr, src, size);
132  *bufptr += size;
133  return 0;
134 }
135 
136 static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
137 {
138  if (copy_to_user(*bufptr, src, size))
139  return -EFAULT;
140  *bufptr += size;
141  return 0;
142 }
143 
144 int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf,
145  int in_kernel, int size_aligned)
146 {
147  int len, newlen;
148  int err;
149 
150  if ((len = get_var_len(event)) < 0)
151  return len;
152  newlen = len;
153  if (size_aligned > 0)
154  newlen = roundup(len, size_aligned);
155  if (count < newlen)
156  return -EAGAIN;
157 
158  if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
159  if (! in_kernel)
160  return -EINVAL;
161  if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len))
162  return -EFAULT;
163  return newlen;
164  }
165  err = snd_seq_dump_var_event(event,
166  in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
167  (snd_seq_dump_func_t)seq_copy_in_user,
168  &buf);
169  return err < 0 ? err : newlen;
170 }
171 
173 
174 /*
175  * release this cell, free extended data if available
176  */
177 
178 static inline void free_cell(struct snd_seq_pool *pool,
179  struct snd_seq_event_cell *cell)
180 {
181  cell->next = pool->free;
182  pool->free = cell;
183  atomic_dec(&pool->counter);
184 }
185 
187 {
188  unsigned long flags;
189  struct snd_seq_pool *pool;
190 
191  if (snd_BUG_ON(!cell))
192  return;
193  pool = cell->pool;
194  if (snd_BUG_ON(!pool))
195  return;
196 
197  spin_lock_irqsave(&pool->lock, flags);
198  free_cell(pool, cell);
199  if (snd_seq_ev_is_variable(&cell->event)) {
200  if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) {
201  struct snd_seq_event_cell *curp, *nextptr;
202  curp = cell->event.data.ext.ptr;
203  for (; curp; curp = nextptr) {
204  nextptr = curp->next;
205  curp->next = pool->free;
206  free_cell(pool, curp);
207  }
208  }
209  }
210  if (waitqueue_active(&pool->output_sleep)) {
211  /* has enough space now? */
212  if (snd_seq_output_ok(pool))
213  wake_up(&pool->output_sleep);
214  }
215  spin_unlock_irqrestore(&pool->lock, flags);
216 }
217 
218 
219 /*
220  * allocate an event cell.
221  */
222 static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
223  struct snd_seq_event_cell **cellp,
224  int nonblock, struct file *file)
225 {
226  struct snd_seq_event_cell *cell;
227  unsigned long flags;
228  int err = -EAGAIN;
230 
231  if (pool == NULL)
232  return -EINVAL;
233 
234  *cellp = NULL;
235 
236  init_waitqueue_entry(&wait, current);
237  spin_lock_irqsave(&pool->lock, flags);
238  if (pool->ptr == NULL) { /* not initialized */
239  snd_printd("seq: pool is not initialized\n");
240  err = -EINVAL;
241  goto __error;
242  }
243  while (pool->free == NULL && ! nonblock && ! pool->closing) {
244 
246  add_wait_queue(&pool->output_sleep, &wait);
247  spin_unlock_irq(&pool->lock);
248  schedule();
249  spin_lock_irq(&pool->lock);
250  remove_wait_queue(&pool->output_sleep, &wait);
251  /* interrupted? */
252  if (signal_pending(current)) {
253  err = -ERESTARTSYS;
254  goto __error;
255  }
256  }
257  if (pool->closing) { /* closing.. */
258  err = -ENOMEM;
259  goto __error;
260  }
261 
262  cell = pool->free;
263  if (cell) {
264  int used;
265  pool->free = cell->next;
266  atomic_inc(&pool->counter);
267  used = atomic_read(&pool->counter);
268  if (pool->max_used < used)
269  pool->max_used = used;
270  pool->event_alloc_success++;
271  /* clear cell pointers */
272  cell->next = NULL;
273  err = 0;
274  } else
275  pool->event_alloc_failures++;
276  *cellp = cell;
277 
278 __error:
279  spin_unlock_irqrestore(&pool->lock, flags);
280  return err;
281 }
282 
283 
284 /*
285  * duplicate the event to a cell.
286  * if the event has external data, the data is decomposed to additional
287  * cells.
288  */
289 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
290  struct snd_seq_event_cell **cellp, int nonblock,
291  struct file *file)
292 {
293  int ncells, err;
294  unsigned int extlen;
295  struct snd_seq_event_cell *cell;
296 
297  *cellp = NULL;
298 
299  ncells = 0;
300  extlen = 0;
301  if (snd_seq_ev_is_variable(event)) {
302  extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
303  ncells = (extlen + sizeof(struct snd_seq_event) - 1) / sizeof(struct snd_seq_event);
304  }
305  if (ncells >= pool->total_elements)
306  return -ENOMEM;
307 
308  err = snd_seq_cell_alloc(pool, &cell, nonblock, file);
309  if (err < 0)
310  return err;
311 
312  /* copy the event */
313  cell->event = *event;
314 
315  /* decompose */
316  if (snd_seq_ev_is_variable(event)) {
317  int len = extlen;
318  int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED;
319  int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR;
320  struct snd_seq_event_cell *src, *tmp, *tail;
321  char *buf;
322 
323  cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED;
324  cell->event.data.ext.ptr = NULL;
325 
326  src = (struct snd_seq_event_cell *)event->data.ext.ptr;
327  buf = (char *)event->data.ext.ptr;
328  tail = NULL;
329 
330  while (ncells-- > 0) {
331  int size = sizeof(struct snd_seq_event);
332  if (len < size)
333  size = len;
334  err = snd_seq_cell_alloc(pool, &tmp, nonblock, file);
335  if (err < 0)
336  goto __error;
337  if (cell->event.data.ext.ptr == NULL)
338  cell->event.data.ext.ptr = tmp;
339  if (tail)
340  tail->next = tmp;
341  tail = tmp;
342  /* copy chunk */
343  if (is_chained && src) {
344  tmp->event = src->event;
345  src = src->next;
346  } else if (is_usrptr) {
347  if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) {
348  err = -EFAULT;
349  goto __error;
350  }
351  } else {
352  memcpy(&tmp->event, buf, size);
353  }
354  buf += size;
355  len -= size;
356  }
357  }
358 
359  *cellp = cell;
360  return 0;
361 
362 __error:
363  snd_seq_cell_free(cell);
364  return err;
365 }
366 
367 
368 /* poll wait */
369 int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file,
370  poll_table *wait)
371 {
372  poll_wait(file, &pool->output_sleep, wait);
373  return snd_seq_output_ok(pool);
374 }
375 
376 
377 /* allocate room specified number of events */
379 {
380  int cell;
381  struct snd_seq_event_cell *cellptr;
382  unsigned long flags;
383 
384  if (snd_BUG_ON(!pool))
385  return -EINVAL;
386  if (pool->ptr) /* should be atomic? */
387  return 0;
388 
389  pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
390  if (pool->ptr == NULL) {
391  snd_printd("seq: malloc for sequencer events failed\n");
392  return -ENOMEM;
393  }
394 
395  /* add new cells to the free cell list */
396  spin_lock_irqsave(&pool->lock, flags);
397  pool->free = NULL;
398 
399  for (cell = 0; cell < pool->size; cell++) {
400  cellptr = pool->ptr + cell;
401  cellptr->pool = pool;
402  cellptr->next = pool->free;
403  pool->free = cellptr;
404  }
405  pool->room = (pool->size + 1) / 2;
406 
407  /* init statistics */
408  pool->max_used = 0;
409  pool->total_elements = pool->size;
410  spin_unlock_irqrestore(&pool->lock, flags);
411  return 0;
412 }
413 
414 /* remove events */
416 {
417  unsigned long flags;
418  struct snd_seq_event_cell *ptr;
419  int max_count = 5 * HZ;
420 
421  if (snd_BUG_ON(!pool))
422  return -EINVAL;
423 
424  /* wait for closing all threads */
425  spin_lock_irqsave(&pool->lock, flags);
426  pool->closing = 1;
427  spin_unlock_irqrestore(&pool->lock, flags);
428 
429  if (waitqueue_active(&pool->output_sleep))
430  wake_up(&pool->output_sleep);
431 
432  while (atomic_read(&pool->counter) > 0) {
433  if (max_count == 0) {
434  snd_printk(KERN_WARNING "snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
435  break;
436  }
438  max_count--;
439  }
440 
441  /* release all resources */
442  spin_lock_irqsave(&pool->lock, flags);
443  ptr = pool->ptr;
444  pool->ptr = NULL;
445  pool->free = NULL;
446  pool->total_elements = 0;
447  spin_unlock_irqrestore(&pool->lock, flags);
448 
449  vfree(ptr);
450 
451  spin_lock_irqsave(&pool->lock, flags);
452  pool->closing = 0;
453  spin_unlock_irqrestore(&pool->lock, flags);
454 
455  return 0;
456 }
457 
458 
459 /* init new memory pool */
460 struct snd_seq_pool *snd_seq_pool_new(int poolsize)
461 {
462  struct snd_seq_pool *pool;
463 
464  /* create pool block */
465  pool = kzalloc(sizeof(*pool), GFP_KERNEL);
466  if (pool == NULL) {
467  snd_printd("seq: malloc failed for pool\n");
468  return NULL;
469  }
470  spin_lock_init(&pool->lock);
471  pool->ptr = NULL;
472  pool->free = NULL;
473  pool->total_elements = 0;
474  atomic_set(&pool->counter, 0);
475  pool->closing = 0;
477 
478  pool->size = poolsize;
479 
480  /* init statistics */
481  pool->max_used = 0;
482  return pool;
483 }
484 
485 /* remove memory pool */
487 {
488  struct snd_seq_pool *pool = *ppool;
489 
490  *ppool = NULL;
491  if (pool == NULL)
492  return 0;
493  snd_seq_pool_done(pool);
494  kfree(pool);
495  return 0;
496 }
497 
498 /* initialize sequencer memory */
500 {
501  return 0;
502 }
503 
504 /* release sequencer memory */
506 {
507 }
508 
509 
510 /* exported to seq_clientmgr.c */
512  struct snd_seq_pool *pool, char *space)
513 {
514  if (pool == NULL)
515  return;
516  snd_iprintf(buffer, "%sPool size : %d\n", space, pool->total_elements);
517  snd_iprintf(buffer, "%sCells in use : %d\n", space, atomic_read(&pool->counter));
518  snd_iprintf(buffer, "%sPeak cells in use : %d\n", space, pool->max_used);
519  snd_iprintf(buffer, "%sAlloc success : %d\n", space, pool->event_alloc_success);
520  snd_iprintf(buffer, "%sAlloc failures : %d\n", space, pool->event_alloc_failures);
521 }