Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
sound
pci
ctxfi
ctvmem.c
Go to the documentation of this file.
1
18
#include "
ctvmem.h
"
19
#include <linux/slab.h>
20
#include <
linux/mm.h
>
21
#include <
linux/io.h
>
22
#include <
sound/pcm.h
>
23
24
#define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *))
25
#define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE)
26
27
/* *
28
* Find or create vm block based on requested @size.
29
* @size must be page aligned.
30
* */
31
static
struct
ct_vm_block
*
32
get_vm_block(
struct
ct_vm
*
vm
,
unsigned
int
size
)
33
{
34
struct
ct_vm_block
*
block
=
NULL
, *
entry
;
35
struct
list_head
*
pos
;
36
37
size =
CT_PAGE_ALIGN
(size);
38
if
(size > vm->
size
) {
39
printk
(
KERN_ERR
"ctxfi: Fail! No sufficient device virtual "
40
"memory space available!\n"
);
41
return
NULL
;
42
}
43
44
mutex_lock
(&vm->
lock
);
45
list_for_each
(pos, &vm->
unused
) {
46
entry =
list_entry
(pos,
struct
ct_vm_block
,
list
);
47
if
(entry->size >= size)
48
break
;
/* found a block that is big enough */
49
}
50
if
(pos == &vm->
unused
)
51
goto
out
;
52
53
if
(entry->size == size) {
54
/* Move the vm node from unused list to used list directly */
55
list_move(&entry->list, &vm->
used
);
56
vm->
size
-=
size
;
57
block =
entry
;
58
goto
out
;
59
}
60
61
block = kzalloc(
sizeof
(*block),
GFP_KERNEL
);
62
if
(!block)
63
goto
out
;
64
65
block->
addr
= entry->addr;
66
block->
size
=
size
;
67
list_add(&block->
list
, &vm->
used
);
68
entry->addr +=
size
;
69
entry->size -=
size
;
70
vm->
size
-=
size
;
71
72
out
:
73
mutex_unlock
(&vm->
lock
);
74
return
block
;
75
}
76
77
static
void
put_vm_block(
struct
ct_vm
*
vm
,
struct
ct_vm_block
*
block
)
78
{
79
struct
ct_vm_block
*
entry
, *pre_ent;
80
struct
list_head
*
pos
, *pre;
81
82
block->
size
=
CT_PAGE_ALIGN
(block->
size
);
83
84
mutex_lock
(&vm->
lock
);
85
list_del
(&block->
list
);
86
vm->
size
+= block->
size
;
87
88
list_for_each
(pos, &vm->
unused
) {
89
entry =
list_entry
(pos,
struct
ct_vm_block
,
list
);
90
if
(entry->
addr
>= (block->
addr
+ block->
size
))
91
break
;
/* found a position */
92
}
93
if
(pos == &vm->
unused
) {
94
list_add_tail
(&block->
list
, &vm->
unused
);
95
entry =
block
;
96
}
else
{
97
if
((block->
addr
+ block->
size
) == entry->
addr
) {
98
entry->
addr
= block->
addr
;
99
entry->
size
+= block->
size
;
100
kfree
(block);
101
}
else
{
102
__list_add
(&block->
list
, pos->
prev
, pos);
103
entry =
block
;
104
}
105
}
106
107
pos = &entry->
list
;
108
pre = pos->
prev
;
109
while
(pre != &vm->
unused
) {
110
entry =
list_entry
(pos,
struct
ct_vm_block
,
list
);
111
pre_ent =
list_entry
(pre,
struct
ct_vm_block
,
list
);
112
if
((pre_ent->
addr
+ pre_ent->
size
) > entry->
addr
)
113
break
;
114
115
pre_ent->
size
+= entry->
size
;
116
list_del
(pos);
117
kfree
(entry);
118
pos = pre;
119
pre = pos->
prev
;
120
}
121
mutex_unlock
(&vm->
lock
);
122
}
123
124
/* Map host addr (kmalloced/vmalloced) to device logical addr. */
125
static
struct
ct_vm_block
*
126
ct_vm_map(
struct
ct_vm
*vm,
struct
snd_pcm_substream
*substream,
int
size
)
127
{
128
struct
ct_vm_block
*
block
;
129
unsigned
int
pte_start;
130
unsigned
i
,
pages
;
131
unsigned
long
*
ptp
;
132
133
block = get_vm_block(vm, size);
134
if
(block ==
NULL
) {
135
printk
(
KERN_ERR
"ctxfi: No virtual memory block that is big "
136
"enough to allocate!\n"
);
137
return
NULL
;
138
}
139
140
ptp = (
unsigned
long
*)vm->
ptp
[0].area;
141
pte_start = (block->
addr
>>
CT_PAGE_SHIFT
);
142
pages = block->
size
>>
CT_PAGE_SHIFT
;
143
for
(i = 0; i <
pages
; i++) {
144
unsigned
long
addr
;
145
addr = snd_pcm_sgbuf_get_addr(substream, i <<
CT_PAGE_SHIFT
);
146
ptp[pte_start +
i
] =
addr
;
147
}
148
149
block->
size
=
size
;
150
return
block
;
151
}
152
153
static
void
ct_vm_unmap(
struct
ct_vm
*vm,
struct
ct_vm_block
*block)
154
{
155
/* do unmapping */
156
put_vm_block(vm, block);
157
}
158
159
/* *
160
* return the host physical addr of the @index-th device
161
* page table page on success, or ~0UL on failure.
162
* The first returned ~0UL indicates the termination.
163
* */
164
static
dma_addr_t
165
ct_get_ptp_phys(
struct
ct_vm
*vm,
int
index
)
166
{
167
dma_addr_t
addr
;
168
169
addr = (index >=
CT_PTP_NUM
) ? ~0
UL
: vm->
ptp
[index].addr;
170
171
return
addr;
172
}
173
174
int
ct_vm_create
(
struct
ct_vm
**rvm,
struct
pci_dev
*pci)
175
{
176
struct
ct_vm
*
vm
;
177
struct
ct_vm_block
*
block
;
178
int
i
,
err
= 0;
179
180
*rvm =
NULL
;
181
182
vm = kzalloc(
sizeof
(*vm),
GFP_KERNEL
);
183
if
(!vm)
184
return
-
ENOMEM
;
185
186
mutex_init
(&vm->
lock
);
187
188
/* Allocate page table pages */
189
for
(i = 0; i <
CT_PTP_NUM
; i++) {
190
err =
snd_dma_alloc_pages
(
SNDRV_DMA_TYPE_DEV
,
191
snd_dma_pci_data
(pci),
192
PAGE_SIZE
, &vm->
ptp
[i]);
193
if
(err < 0)
194
break
;
195
}
196
if
(err < 0) {
197
/* no page table pages are allocated */
198
ct_vm_destroy
(vm);
199
return
-
ENOMEM
;
200
}
201
vm->
size
=
CT_ADDRS_PER_PAGE
*
i
;
202
vm->
map
= ct_vm_map;
203
vm->
unmap
= ct_vm_unmap;
204
vm->
get_ptp_phys
= ct_get_ptp_phys;
205
INIT_LIST_HEAD(&vm->
unused
);
206
INIT_LIST_HEAD(&vm->
used
);
207
block = kzalloc(
sizeof
(*block),
GFP_KERNEL
);
208
if
(
NULL
!= block) {
209
block->
addr
= 0;
210
block->
size
= vm->
size
;
211
list_add(&block->
list
, &vm->
unused
);
212
}
213
214
*rvm =
vm
;
215
return
0;
216
}
217
218
/* The caller must ensure no mapping pages are being used
219
* by hardware before calling this function */
220
void
ct_vm_destroy
(
struct
ct_vm
*vm)
221
{
222
int
i
;
223
struct
list_head
*
pos
;
224
struct
ct_vm_block
*
entry
;
225
226
/* free used and unused list nodes */
227
while
(!list_empty(&vm->
used
)) {
228
pos = vm->
used
.next;
229
list_del
(pos);
230
entry =
list_entry
(pos,
struct
ct_vm_block
,
list
);
231
kfree
(entry);
232
}
233
while
(!list_empty(&vm->
unused
)) {
234
pos = vm->
unused
.next;
235
list_del
(pos);
236
entry =
list_entry
(pos,
struct
ct_vm_block
,
list
);
237
kfree
(entry);
238
}
239
240
/* free allocated page table pages */
241
for
(i = 0; i <
CT_PTP_NUM
; i++)
242
snd_dma_free_pages
(&vm->
ptp
[i]);
243
244
vm->
size
= 0;
245
246
kfree
(vm);
247
}
Generated on Thu Jan 10 2013 15:06:05 for Linux Kernel by
1.8.2