24 #include <linux/pci.h>
26 #include <linux/time.h>
28 #include <linux/export.h>
36 #define __set_ptb_entry(emu,page,addr) \
37 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
39 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
40 #define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
42 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
44 #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
49 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
51 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
63 static inline void set_silent_ptb(
struct snd_emu10k1 *
emu,
int page)
76 static int synth_alloc_pages(
struct snd_emu10k1 *
hw,
struct snd_emu10k1_memblk *
blk);
77 static int synth_free_pages(
struct snd_emu10k1 *
hw,
struct snd_emu10k1_memblk *
blk);
79 #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
83 static void emu10k1_memblk_init(
struct snd_emu10k1_memblk *
blk)
85 blk->mapped_page = -1;
86 INIT_LIST_HEAD(&blk->mapped_link);
87 INIT_LIST_HEAD(&blk->mapped_order_link);
92 blk->pages = blk->last_page - blk->first_page + 1;
102 static int search_empty_map_area(
struct snd_emu10k1 *emu,
int npages,
struct list_head **
nextp)
104 int page = 0, found_page = -
ENOMEM;
107 struct list_head *candidate = &emu->mapped_link_head;
112 if (blk->mapped_page < 0)
114 size = blk->mapped_page -
page;
115 if (size == npages) {
119 else if (size > max_size) {
125 page = blk->mapped_page + blk->pages;
128 if (size >= max_size) {
141 static int map_memblk(
struct snd_emu10k1 *emu,
struct snd_emu10k1_memblk *blk)
146 page = search_empty_map_area(emu, blk->pages, &next);
152 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
153 blk->mapped_page =
page;
155 for (pg = blk->first_page;
pg <= blk->last_page; pg++) {
156 set_ptb_entry(emu, page, emu->page_addr_table[pg]);
168 static int unmap_memblk(
struct snd_emu10k1 *emu,
struct snd_emu10k1_memblk *blk)
170 int start_page, end_page, mpage,
pg;
172 struct snd_emu10k1_memblk *
q;
175 if ((p = blk->mapped_link.
prev) != &emu->mapped_link_head) {
177 start_page = q->mapped_page + q->pages;
180 if ((p = blk->mapped_link.
next) != &emu->mapped_link_head) {
182 end_page = q->mapped_page;
190 mpage = blk->mapped_page;
191 for (pg = blk->first_page;
pg <= blk->last_page; pg++) {
192 set_silent_ptb(emu, mpage);
195 blk->mapped_page = -1;
196 return end_page - start_page;
204 static struct snd_emu10k1_memblk *
205 search_empty(
struct snd_emu10k1 *emu,
int size)
208 struct snd_emu10k1_memblk *
blk;
215 if (page + psize <= blk->first_page)
217 page = blk->last_page + 1;
219 if (page + psize > emu->max_cache_pages)
228 emu10k1_memblk_init(blk);
236 static int is_valid_page(
struct snd_emu10k1 *emu,
dma_addr_t addr)
238 if (addr & ~emu->dma_mask) {
239 snd_printk(
KERN_ERR "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (
unsigned long)addr);
242 if (addr & (EMUPAGESIZE-1)) {
260 struct snd_emu10k1_memblk *deleted;
264 if (blk->mapped_page >= 0) {
266 list_move_tail(&blk->mapped_order_link,
267 &emu->mapped_order_link_head);
268 spin_unlock_irqrestore(&emu->memblk_lock, flags);
271 if ((err = map_memblk(emu, blk)) < 0) {
274 p = emu->mapped_order_link_head.
next;
275 for (; p != &emu->mapped_order_link_head; p =
nextp) {
278 if (deleted->map_locked)
280 size = unmap_memblk(emu, deleted);
281 if (size >= blk->pages) {
283 err = map_memblk(emu, blk);
288 spin_unlock_irqrestore(&emu->memblk_lock, flags);
302 struct snd_emu10k1_memblk *
blk;
308 runtime->
dma_bytes >= MAXPAGES * EMUPAGESIZE))
315 (emu->delay_pcm_irq * 2) : 0;
317 blk = search_empty(emu, runtime->
dma_bytes + idx);
326 for (page = blk->first_page;
page <= blk->last_page; page++, idx++) {
330 addr = emu->silent_page.
addr;
332 addr = snd_pcm_sgbuf_get_addr(substream, ofs);
333 if (! is_valid_page(emu, addr)) {
377 struct snd_emu10k1_memblk *
blk;
386 if (synth_alloc_pages(hw, blk)) {
405 struct snd_emu10k1_memblk *blk = (
struct snd_emu10k1_memblk *)memblk;
410 if (blk->mapped_page >= 0)
411 unmap_memblk(emu, blk);
412 spin_unlock_irqrestore(&emu->memblk_lock, flags);
413 synth_free_pages(emu, blk);
423 struct snd_emu10k1_memblk *blk,
424 int *first_page_ret,
int *last_page_ret)
427 struct snd_emu10k1_memblk *
q;
428 int first_page, last_page;
429 first_page = blk->first_page;
430 if ((p = blk->mem.list.
prev) != &hdr->
block) {
432 if (q->last_page == first_page)
435 last_page = blk->last_page;
436 if ((p = blk->mem.list.
next) != &hdr->
block) {
438 if (q->first_page == last_page)
441 *first_page_ret = first_page;
442 *last_page_ret = last_page;
446 static void __synth_free_pages(
struct snd_emu10k1 *emu,
int first_page,
451 for (page = first_page; page <= last_page; page++) {
452 free_page((
unsigned long)emu->page_ptr_table[page]);
453 emu->page_addr_table[
page] = 0;
461 static int synth_alloc_pages(
struct snd_emu10k1 *emu,
struct snd_emu10k1_memblk *blk)
463 int page, first_page, last_page;
465 emu10k1_memblk_init(blk);
466 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
468 for (page = first_page; page <= last_page; page++) {
481 __synth_free_pages(emu, first_page, page - 1);
493 static int synth_free_pages(
struct snd_emu10k1 *emu,
struct snd_emu10k1_memblk *blk)
497 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
498 __synth_free_pages(emu, first_page, last_page);
503 static inline void *offset_ptr(
struct snd_emu10k1 *emu,
int page,
int offset)
508 ptr = emu->page_ptr_table[
page];
510 printk(
KERN_ERR "emu10k1: access to NULL ptr: page = %d\n", page);
521 int offset,
int size)
525 struct snd_emu10k1_memblk *p = (
struct snd_emu10k1_memblk *)blk;
528 end_offset = offset +
size;
533 temp1 = end_offset -
offset;
536 ptr = offset_ptr(emu, page + p->first_page, offset);
541 }
while (offset < end_offset);
551 int offset,
const char __user *
data,
int size)
555 struct snd_emu10k1_memblk *p = (
struct snd_emu10k1_memblk *)blk;
558 end_offset = offset +
size;
563 temp1 = end_offset -
offset;
566 ptr = offset_ptr(emu, page + p->first_page, offset);
572 }
while (offset < end_offset);