Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
memory.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) by Jaroslav Kysela <[email protected]>
3  * Copyright (c) by Takashi Iwai <[email protected]>
4  *
5  * EMU10K1 memory page allocation (PTB area)
6  *
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  *
22  */
23 
24 #include <linux/pci.h>
25 #include <linux/gfp.h>
26 #include <linux/time.h>
27 #include <linux/mutex.h>
28 #include <linux/export.h>
29 
30 #include <sound/core.h>
31 #include <sound/emu10k1.h>
32 
33 /* page arguments of these two macros are Emu page (4096 bytes), not like
34  * aligned pages in others
35  */
36 #define __set_ptb_entry(emu,page,addr) \
37  (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
38 
39 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
40 #define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
41 /* get aligned page from offset address */
42 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
43 /* get offset address from aligned page */
44 #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
45 
46 #if PAGE_SIZE == 4096
47 /* page size == EMUPAGESIZE */
48 /* fill PTB entrie(s) corresponding to page with addr */
49 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
50 /* fill PTB entrie(s) corresponding to page with silence pointer */
51 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
52 #else
53 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
54 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
55 {
56  int i;
57  page *= UNIT_PAGES;
58  for (i = 0; i < UNIT_PAGES; i++, page++) {
59  __set_ptb_entry(emu, page, addr);
60  addr += EMUPAGESIZE;
61  }
62 }
63 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
64 {
65  int i;
66  page *= UNIT_PAGES;
67  for (i = 0; i < UNIT_PAGES; i++, page++)
68  /* do not increment ptr */
69  __set_ptb_entry(emu, page, emu->silent_page.addr);
70 }
71 #endif /* PAGE_SIZE */
72 
73 
74 /*
75  */
76 static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
77 static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
78 
79 #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
80 
81 
82 /* initialize emu10k1 part */
83 static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
84 {
85  blk->mapped_page = -1;
86  INIT_LIST_HEAD(&blk->mapped_link);
87  INIT_LIST_HEAD(&blk->mapped_order_link);
88  blk->map_locked = 0;
89 
90  blk->first_page = get_aligned_page(blk->mem.offset);
91  blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
92  blk->pages = blk->last_page - blk->first_page + 1;
93 }
94 
95 /*
96  * search empty region on PTB with the given size
97  *
98  * if an empty region is found, return the page and store the next mapped block
99  * in nextp
100  * if not found, return a negative error code.
101  */
102 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
103 {
104  int page = 0, found_page = -ENOMEM;
105  int max_size = npages;
106  int size;
107  struct list_head *candidate = &emu->mapped_link_head;
108  struct list_head *pos;
109 
110  list_for_each (pos, &emu->mapped_link_head) {
111  struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
112  if (blk->mapped_page < 0)
113  continue;
114  size = blk->mapped_page - page;
115  if (size == npages) {
116  *nextp = pos;
117  return page;
118  }
119  else if (size > max_size) {
120  /* we look for the maximum empty hole */
121  max_size = size;
122  candidate = pos;
123  found_page = page;
124  }
125  page = blk->mapped_page + blk->pages;
126  }
127  size = MAX_ALIGN_PAGES - page;
128  if (size >= max_size) {
129  *nextp = pos;
130  return page;
131  }
132  *nextp = candidate;
133  return found_page;
134 }
135 
136 /*
137  * map a memory block onto emu10k1's PTB
138  *
139  * call with memblk_lock held
140  */
141 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
142 {
143  int page, pg;
144  struct list_head *next;
145 
146  page = search_empty_map_area(emu, blk->pages, &next);
147  if (page < 0) /* not found */
148  return page;
149  /* insert this block in the proper position of mapped list */
150  list_add_tail(&blk->mapped_link, next);
151  /* append this as a newest block in order list */
152  list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
153  blk->mapped_page = page;
154  /* fill PTB */
155  for (pg = blk->first_page; pg <= blk->last_page; pg++) {
156  set_ptb_entry(emu, page, emu->page_addr_table[pg]);
157  page++;
158  }
159  return 0;
160 }
161 
162 /*
163  * unmap the block
164  * return the size of resultant empty pages
165  *
166  * call with memblk_lock held
167  */
168 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
169 {
170  int start_page, end_page, mpage, pg;
171  struct list_head *p;
172  struct snd_emu10k1_memblk *q;
173 
174  /* calculate the expected size of empty region */
175  if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
176  q = get_emu10k1_memblk(p, mapped_link);
177  start_page = q->mapped_page + q->pages;
178  } else
179  start_page = 0;
180  if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
181  q = get_emu10k1_memblk(p, mapped_link);
182  end_page = q->mapped_page;
183  } else
184  end_page = MAX_ALIGN_PAGES;
185 
186  /* remove links */
187  list_del(&blk->mapped_link);
188  list_del(&blk->mapped_order_link);
189  /* clear PTB */
190  mpage = blk->mapped_page;
191  for (pg = blk->first_page; pg <= blk->last_page; pg++) {
192  set_silent_ptb(emu, mpage);
193  mpage++;
194  }
195  blk->mapped_page = -1;
196  return end_page - start_page; /* return the new empty size */
197 }
198 
199 /*
200  * search empty pages with the given size, and create a memory block
201  *
202  * unlike synth_alloc the memory block is aligned to the page start
203  */
204 static struct snd_emu10k1_memblk *
205 search_empty(struct snd_emu10k1 *emu, int size)
206 {
207  struct list_head *p;
208  struct snd_emu10k1_memblk *blk;
209  int page, psize;
210 
211  psize = get_aligned_page(size + PAGE_SIZE -1);
212  page = 0;
213  list_for_each(p, &emu->memhdr->block) {
214  blk = get_emu10k1_memblk(p, mem.list);
215  if (page + psize <= blk->first_page)
216  goto __found_pages;
217  page = blk->last_page + 1;
218  }
219  if (page + psize > emu->max_cache_pages)
220  return NULL;
221 
222 __found_pages:
223  /* create a new memory block */
224  blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
225  if (blk == NULL)
226  return NULL;
227  blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
228  emu10k1_memblk_init(blk);
229  return blk;
230 }
231 
232 
233 /*
234  * check if the given pointer is valid for pages
235  */
236 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
237 {
238  if (addr & ~emu->dma_mask) {
239  snd_printk(KERN_ERR "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr);
240  return 0;
241  }
242  if (addr & (EMUPAGESIZE-1)) {
243  snd_printk(KERN_ERR "page is not aligned\n");
244  return 0;
245  }
246  return 1;
247 }
248 
249 /*
250  * map the given memory block on PTB.
251  * if the block is already mapped, update the link order.
252  * if no empty pages are found, tries to release unused memory blocks
253  * and retry the mapping.
254  */
255 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
256 {
257  int err;
258  int size;
259  struct list_head *p, *nextp;
260  struct snd_emu10k1_memblk *deleted;
261  unsigned long flags;
262 
263  spin_lock_irqsave(&emu->memblk_lock, flags);
264  if (blk->mapped_page >= 0) {
265  /* update order link */
266  list_move_tail(&blk->mapped_order_link,
267  &emu->mapped_order_link_head);
268  spin_unlock_irqrestore(&emu->memblk_lock, flags);
269  return 0;
270  }
271  if ((err = map_memblk(emu, blk)) < 0) {
272  /* no enough page - try to unmap some blocks */
273  /* starting from the oldest block */
274  p = emu->mapped_order_link_head.next;
275  for (; p != &emu->mapped_order_link_head; p = nextp) {
276  nextp = p->next;
277  deleted = get_emu10k1_memblk(p, mapped_order_link);
278  if (deleted->map_locked)
279  continue;
280  size = unmap_memblk(emu, deleted);
281  if (size >= blk->pages) {
282  /* ok the empty region is enough large */
283  err = map_memblk(emu, blk);
284  break;
285  }
286  }
287  }
288  spin_unlock_irqrestore(&emu->memblk_lock, flags);
289  return err;
290 }
291 
293 
294 /*
295  * page allocation for DMA
296  */
297 struct snd_util_memblk *
298 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
299 {
300  struct snd_pcm_runtime *runtime = substream->runtime;
301  struct snd_util_memhdr *hdr;
302  struct snd_emu10k1_memblk *blk;
303  int page, err, idx;
304 
305  if (snd_BUG_ON(!emu))
306  return NULL;
307  if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
308  runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE))
309  return NULL;
310  hdr = emu->memhdr;
311  if (snd_BUG_ON(!hdr))
312  return NULL;
313 
314  idx = runtime->period_size >= runtime->buffer_size ?
315  (emu->delay_pcm_irq * 2) : 0;
316  mutex_lock(&hdr->block_mutex);
317  blk = search_empty(emu, runtime->dma_bytes + idx);
318  if (blk == NULL) {
319  mutex_unlock(&hdr->block_mutex);
320  return NULL;
321  }
322  /* fill buffer addresses but pointers are not stored so that
323  * snd_free_pci_page() is not called in in synth_free()
324  */
325  idx = 0;
326  for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
327  unsigned long ofs = idx << PAGE_SHIFT;
329  if (ofs >= runtime->dma_bytes)
330  addr = emu->silent_page.addr;
331  else
332  addr = snd_pcm_sgbuf_get_addr(substream, ofs);
333  if (! is_valid_page(emu, addr)) {
334  printk(KERN_ERR "emu: failure page = %d\n", idx);
335  mutex_unlock(&hdr->block_mutex);
336  return NULL;
337  }
338  emu->page_addr_table[page] = addr;
339  emu->page_ptr_table[page] = NULL;
340  }
341 
342  /* set PTB entries */
343  blk->map_locked = 1; /* do not unmap this block! */
344  err = snd_emu10k1_memblk_map(emu, blk);
345  if (err < 0) {
346  __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
347  mutex_unlock(&hdr->block_mutex);
348  return NULL;
349  }
350  mutex_unlock(&hdr->block_mutex);
351  return (struct snd_util_memblk *)blk;
352 }
353 
354 
355 /*
356  * release DMA buffer from page table
357  */
358 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
359 {
360  if (snd_BUG_ON(!emu || !blk))
361  return -EINVAL;
362  return snd_emu10k1_synth_free(emu, blk);
363 }
364 
365 
366 /*
367  * memory allocation using multiple pages (for synth)
368  * Unlike the DMA allocation above, non-contiguous pages are assined.
369  */
370 
371 /*
372  * allocate a synth sample area
373  */
374 struct snd_util_memblk *
375 snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
376 {
377  struct snd_emu10k1_memblk *blk;
378  struct snd_util_memhdr *hdr = hw->memhdr;
379 
380  mutex_lock(&hdr->block_mutex);
381  blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
382  if (blk == NULL) {
383  mutex_unlock(&hdr->block_mutex);
384  return NULL;
385  }
386  if (synth_alloc_pages(hw, blk)) {
387  __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
388  mutex_unlock(&hdr->block_mutex);
389  return NULL;
390  }
391  snd_emu10k1_memblk_map(hw, blk);
392  mutex_unlock(&hdr->block_mutex);
393  return (struct snd_util_memblk *)blk;
394 }
395 
397 
398 /*
399  * free a synth sample area
400  */
401 int
402 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
403 {
404  struct snd_util_memhdr *hdr = emu->memhdr;
405  struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
406  unsigned long flags;
407 
408  mutex_lock(&hdr->block_mutex);
409  spin_lock_irqsave(&emu->memblk_lock, flags);
410  if (blk->mapped_page >= 0)
411  unmap_memblk(emu, blk);
412  spin_unlock_irqrestore(&emu->memblk_lock, flags);
413  synth_free_pages(emu, blk);
414  __snd_util_mem_free(hdr, memblk);
415  mutex_unlock(&hdr->block_mutex);
416  return 0;
417 }
418 
420 
421 /* check new allocation range */
422 static void get_single_page_range(struct snd_util_memhdr *hdr,
423  struct snd_emu10k1_memblk *blk,
424  int *first_page_ret, int *last_page_ret)
425 {
426  struct list_head *p;
427  struct snd_emu10k1_memblk *q;
428  int first_page, last_page;
429  first_page = blk->first_page;
430  if ((p = blk->mem.list.prev) != &hdr->block) {
431  q = get_emu10k1_memblk(p, mem.list);
432  if (q->last_page == first_page)
433  first_page++; /* first page was already allocated */
434  }
435  last_page = blk->last_page;
436  if ((p = blk->mem.list.next) != &hdr->block) {
437  q = get_emu10k1_memblk(p, mem.list);
438  if (q->first_page == last_page)
439  last_page--; /* last page was already allocated */
440  }
441  *first_page_ret = first_page;
442  *last_page_ret = last_page;
443 }
444 
445 /* release allocated pages */
446 static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
447  int last_page)
448 {
449  int page;
450 
451  for (page = first_page; page <= last_page; page++) {
452  free_page((unsigned long)emu->page_ptr_table[page]);
453  emu->page_addr_table[page] = 0;
454  emu->page_ptr_table[page] = NULL;
455  }
456 }
457 
458 /*
459  * allocate kernel pages
460  */
461 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
462 {
463  int page, first_page, last_page;
464 
465  emu10k1_memblk_init(blk);
466  get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
467  /* allocate kernel pages */
468  for (page = first_page; page <= last_page; page++) {
469  /* first try to allocate from <4GB zone */
470  struct page *p = alloc_page(GFP_KERNEL | GFP_DMA32 |
471  __GFP_NOWARN);
472  if (!p || (page_to_pfn(p) & ~(emu->dma_mask >> PAGE_SHIFT))) {
473  if (p)
474  __free_page(p);
475  /* try to allocate from <16MB zone */
477  __GFP_NORETRY | /* no OOM-killer */
478  __GFP_NOWARN);
479  }
480  if (!p) {
481  __synth_free_pages(emu, first_page, page - 1);
482  return -ENOMEM;
483  }
484  emu->page_addr_table[page] = page_to_phys(p);
485  emu->page_ptr_table[page] = page_address(p);
486  }
487  return 0;
488 }
489 
490 /*
491  * free pages
492  */
493 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
494 {
495  int first_page, last_page;
496 
497  get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
498  __synth_free_pages(emu, first_page, last_page);
499  return 0;
500 }
501 
502 /* calculate buffer pointer from offset address */
503 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
504 {
505  char *ptr;
506  if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
507  return NULL;
508  ptr = emu->page_ptr_table[page];
509  if (! ptr) {
510  printk(KERN_ERR "emu10k1: access to NULL ptr: page = %d\n", page);
511  return NULL;
512  }
513  ptr += offset & (PAGE_SIZE - 1);
514  return (void*)ptr;
515 }
516 
517 /*
518  * bzero(blk + offset, size)
519  */
520 int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
521  int offset, int size)
522 {
523  int page, nextofs, end_offset, temp, temp1;
524  void *ptr;
525  struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
526 
527  offset += blk->offset & (PAGE_SIZE - 1);
528  end_offset = offset + size;
529  page = get_aligned_page(offset);
530  do {
531  nextofs = aligned_page_offset(page + 1);
532  temp = nextofs - offset;
533  temp1 = end_offset - offset;
534  if (temp1 < temp)
535  temp = temp1;
536  ptr = offset_ptr(emu, page + p->first_page, offset);
537  if (ptr)
538  memset(ptr, 0, temp);
539  offset = nextofs;
540  page++;
541  } while (offset < end_offset);
542  return 0;
543 }
544 
546 
547 /*
548  * copy_from_user(blk + offset, data, size)
549  */
550 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
551  int offset, const char __user *data, int size)
552 {
553  int page, nextofs, end_offset, temp, temp1;
554  void *ptr;
555  struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
556 
557  offset += blk->offset & (PAGE_SIZE - 1);
558  end_offset = offset + size;
559  page = get_aligned_page(offset);
560  do {
561  nextofs = aligned_page_offset(page + 1);
562  temp = nextofs - offset;
563  temp1 = end_offset - offset;
564  if (temp1 < temp)
565  temp = temp1;
566  ptr = offset_ptr(emu, page + p->first_page, offset);
567  if (ptr && copy_from_user(ptr, data, temp))
568  return -EFAULT;
569  offset = nextofs;
570  data += temp;
571  page++;
572  } while (offset < end_offset);
573  return 0;
574 }
575