Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
mips
lantiq
xway
dma.c
Go to the documentation of this file.
1
/*
2
* This program is free software; you can redistribute it and/or modify it
3
* under the terms of the GNU General Public License version 2 as published
4
* by the Free Software Foundation.
5
*
6
* This program is distributed in the hope that it will be useful,
7
* but WITHOUT ANY WARRANTY; without even the implied warranty of
8
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9
* GNU General Public License for more details.
10
*
11
* You should have received a copy of the GNU General Public License
12
* along with this program; if not, write to the Free Software
13
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
14
*
15
* Copyright (C) 2011 John Crispin <
[email protected]
>
16
*/
17
18
#include <
linux/init.h
>
19
#include <
linux/platform_device.h
>
20
#include <
linux/io.h
>
21
#include <
linux/dma-mapping.h
>
22
#include <linux/module.h>
23
#include <
linux/clk.h
>
24
25
#include <lantiq_soc.h>
26
#include <
xway_dma.h
>
27
28
#define LTQ_DMA_CTRL 0x10
29
#define LTQ_DMA_CPOLL 0x14
30
#define LTQ_DMA_CS 0x18
31
#define LTQ_DMA_CCTRL 0x1C
32
#define LTQ_DMA_CDBA 0x20
33
#define LTQ_DMA_CDLEN 0x24
34
#define LTQ_DMA_CIS 0x28
35
#define LTQ_DMA_CIE 0x2C
36
#define LTQ_DMA_PS 0x40
37
#define LTQ_DMA_PCTRL 0x44
38
#define LTQ_DMA_IRNEN 0xf4
39
40
#define DMA_DESCPT BIT(3)
/* descriptor complete irq */
41
#define DMA_TX BIT(8)
/* TX channel direction */
42
#define DMA_CHAN_ON BIT(0)
/* channel on / off bit */
43
#define DMA_PDEN BIT(6)
/* enable packet drop */
44
#define DMA_CHAN_RST BIT(1)
/* channel on / off bit */
45
#define DMA_RESET BIT(0)
/* channel on / off bit */
46
#define DMA_IRQ_ACK 0x7e
/* IRQ status register */
47
#define DMA_POLL BIT(31)
/* turn on channel polling */
48
#define DMA_CLK_DIV4 BIT(6)
/* polling clock divider */
49
#define DMA_2W_BURST BIT(1)
/* 2 word burst length */
50
#define DMA_MAX_CHANNEL 20
/* the soc has 20 channels */
51
#define DMA_ETOP_ENDIANESS (0xf << 8)
/* endianess swap etop channels */
52
#define DMA_WEIGHT (BIT(17) | BIT(16))
/* default channel wheight */
53
54
#define ltq_dma_r32(x) ltq_r32(ltq_dma_membase + (x))
55
#define ltq_dma_w32(x, y) ltq_w32(x, ltq_dma_membase + (y))
56
#define ltq_dma_w32_mask(x, y, z) ltq_w32_mask(x, y, \
57
ltq_dma_membase + (z))
58
59
static
void
__iomem
*ltq_dma_membase;
60
61
void
62
ltq_dma_enable_irq
(
struct
ltq_dma_channel
*ch)
63
{
64
unsigned
long
flags
;
65
66
local_irq_save
(flags);
67
ltq_dma_w32
(ch->
nr
,
LTQ_DMA_CS
);
68
ltq_dma_w32_mask
(0, 1 << ch->
nr
,
LTQ_DMA_IRNEN
);
69
local_irq_restore
(flags);
70
}
71
EXPORT_SYMBOL_GPL
(
ltq_dma_enable_irq
);
72
73
void
74
ltq_dma_disable_irq
(
struct
ltq_dma_channel
*ch)
75
{
76
unsigned
long
flags
;
77
78
local_irq_save
(flags);
79
ltq_dma_w32
(ch->
nr
,
LTQ_DMA_CS
);
80
ltq_dma_w32_mask
(1 << ch->
nr
, 0,
LTQ_DMA_IRNEN
);
81
local_irq_restore
(flags);
82
}
83
EXPORT_SYMBOL_GPL
(
ltq_dma_disable_irq
);
84
85
void
86
ltq_dma_ack_irq
(
struct
ltq_dma_channel
*ch)
87
{
88
unsigned
long
flags
;
89
90
local_irq_save
(flags);
91
ltq_dma_w32
(ch->
nr
,
LTQ_DMA_CS
);
92
ltq_dma_w32
(
DMA_IRQ_ACK
,
LTQ_DMA_CIS
);
93
local_irq_restore
(flags);
94
}
95
EXPORT_SYMBOL_GPL
(
ltq_dma_ack_irq
);
96
97
void
98
ltq_dma_open
(
struct
ltq_dma_channel
*ch)
99
{
100
unsigned
long
flag
;
101
102
local_irq_save
(flag);
103
ltq_dma_w32
(ch->
nr
,
LTQ_DMA_CS
);
104
ltq_dma_w32_mask
(0,
DMA_CHAN_ON
,
LTQ_DMA_CCTRL
);
105
ltq_dma_enable_irq
(ch);
106
local_irq_restore
(flag);
107
}
108
EXPORT_SYMBOL_GPL
(
ltq_dma_open
);
109
110
void
111
ltq_dma_close
(
struct
ltq_dma_channel
*ch)
112
{
113
unsigned
long
flag
;
114
115
local_irq_save
(flag);
116
ltq_dma_w32
(ch->
nr
,
LTQ_DMA_CS
);
117
ltq_dma_w32_mask
(
DMA_CHAN_ON
, 0,
LTQ_DMA_CCTRL
);
118
ltq_dma_disable_irq
(ch);
119
local_irq_restore
(flag);
120
}
121
EXPORT_SYMBOL_GPL
(
ltq_dma_close
);
122
123
static
void
124
ltq_dma_alloc(
struct
ltq_dma_channel
*ch)
125
{
126
unsigned
long
flags
;
127
128
ch->
desc
= 0;
129
ch->
desc_base
=
dma_alloc_coherent
(
NULL
,
130
LTQ_DESC_NUM
*
LTQ_DESC_SIZE
,
131
&ch->
phys
,
GFP_ATOMIC
);
132
memset
(ch->
desc_base
, 0,
LTQ_DESC_NUM
*
LTQ_DESC_SIZE
);
133
134
local_irq_save
(flags);
135
ltq_dma_w32
(ch->
nr
,
LTQ_DMA_CS
);
136
ltq_dma_w32
(ch->
phys
,
LTQ_DMA_CDBA
);
137
ltq_dma_w32
(
LTQ_DESC_NUM
,
LTQ_DMA_CDLEN
);
138
ltq_dma_w32_mask
(
DMA_CHAN_ON
, 0,
LTQ_DMA_CCTRL
);
139
wmb
();
140
ltq_dma_w32_mask
(0,
DMA_CHAN_RST
,
LTQ_DMA_CCTRL
);
141
while
(
ltq_dma_r32
(
LTQ_DMA_CCTRL
) &
DMA_CHAN_RST
)
142
;
143
local_irq_restore
(flags);
144
}
145
146
void
147
ltq_dma_alloc_tx
(
struct
ltq_dma_channel
*ch)
148
{
149
unsigned
long
flags
;
150
151
ltq_dma_alloc(ch);
152
153
local_irq_save
(flags);
154
ltq_dma_w32
(
DMA_DESCPT
,
LTQ_DMA_CIE
);
155
ltq_dma_w32_mask
(0, 1 << ch->
nr
,
LTQ_DMA_IRNEN
);
156
ltq_dma_w32
(
DMA_WEIGHT
|
DMA_TX
,
LTQ_DMA_CCTRL
);
157
local_irq_restore
(flags);
158
}
159
EXPORT_SYMBOL_GPL
(
ltq_dma_alloc_tx
);
160
161
void
162
ltq_dma_alloc_rx
(
struct
ltq_dma_channel
*ch)
163
{
164
unsigned
long
flags
;
165
166
ltq_dma_alloc(ch);
167
168
local_irq_save
(flags);
169
ltq_dma_w32
(
DMA_DESCPT
,
LTQ_DMA_CIE
);
170
ltq_dma_w32_mask
(0, 1 << ch->
nr
,
LTQ_DMA_IRNEN
);
171
ltq_dma_w32
(
DMA_WEIGHT
,
LTQ_DMA_CCTRL
);
172
local_irq_restore
(flags);
173
}
174
EXPORT_SYMBOL_GPL
(
ltq_dma_alloc_rx
);
175
176
void
177
ltq_dma_free
(
struct
ltq_dma_channel
*ch)
178
{
179
if
(!ch->
desc_base
)
180
return
;
181
ltq_dma_close
(ch);
182
dma_free_coherent
(
NULL
,
LTQ_DESC_NUM
*
LTQ_DESC_SIZE
,
183
ch->
desc_base
, ch->
phys
);
184
}
185
EXPORT_SYMBOL_GPL
(
ltq_dma_free
);
186
187
void
188
ltq_dma_init_port
(
int
p
)
189
{
190
ltq_dma_w32
(p,
LTQ_DMA_PS
);
191
switch
(p) {
192
case
DMA_PORT_ETOP
:
193
/*
194
* Tell the DMA engine to swap the endianess of data frames and
195
* drop packets if the channel arbitration fails.
196
*/
197
ltq_dma_w32_mask
(0,
DMA_ETOP_ENDIANESS
|
DMA_PDEN
,
198
LTQ_DMA_PCTRL
);
199
break
;
200
201
case
DMA_PORT_DEU
:
202
ltq_dma_w32
((
DMA_2W_BURST
<< 4) | (
DMA_2W_BURST
<< 2),
203
LTQ_DMA_PCTRL
);
204
break
;
205
206
default
:
207
break
;
208
}
209
}
210
EXPORT_SYMBOL_GPL
(
ltq_dma_init_port
);
211
212
static
int
__devinit
213
ltq_dma_init(
struct
platform_device
*pdev)
214
{
215
struct
clk
*
clk
;
216
struct
resource
*
res
;
217
int
i
;
218
219
res =
platform_get_resource
(pdev,
IORESOURCE_MEM
, 0);
220
if
(!res)
221
panic
(
"Failed to get dma resource"
);
222
223
/* remap dma register range */
224
ltq_dma_membase =
devm_request_and_ioremap
(&pdev->
dev
, res);
225
if
(!ltq_dma_membase)
226
panic
(
"Failed to remap dma resource"
);
227
228
/* power up and reset the dma engine */
229
clk =
clk_get
(&pdev->
dev
,
NULL
);
230
if
(IS_ERR(clk))
231
panic
(
"Failed to get dma clock"
);
232
233
clk_enable
(clk);
234
ltq_dma_w32_mask
(0,
DMA_RESET
,
LTQ_DMA_CTRL
);
235
236
/* disable all interrupts */
237
ltq_dma_w32
(0,
LTQ_DMA_IRNEN
);
238
239
/* reset/configure each channel */
240
for
(i = 0; i <
DMA_MAX_CHANNEL
; i++) {
241
ltq_dma_w32
(i,
LTQ_DMA_CS
);
242
ltq_dma_w32
(
DMA_CHAN_RST
,
LTQ_DMA_CCTRL
);
243
ltq_dma_w32
(
DMA_POLL
|
DMA_CLK_DIV4
,
LTQ_DMA_CPOLL
);
244
ltq_dma_w32_mask
(
DMA_CHAN_ON
, 0,
LTQ_DMA_CCTRL
);
245
}
246
dev_info
(&pdev->
dev
,
"init done\n"
);
247
return
0;
248
}
249
250
static
const
struct
of_device_id
dma_match[] = {
251
{ .compatible =
"lantiq,dma-xway"
},
252
{},
253
};
254
MODULE_DEVICE_TABLE
(of, dma_match);
255
256
static
struct
platform_driver
dma_driver = {
257
.probe = ltq_dma_init,
258
.driver = {
259
.name =
"dma-xway"
,
260
.owner =
THIS_MODULE
,
261
.of_match_table = dma_match,
262
},
263
};
264
265
int
__init
266
dma_init
(
void
)
267
{
268
return
platform_driver_register
(&dma_driver);
269
}
270
271
postcore_initcall
(
dma_init
);
Generated on Thu Jan 10 2013 12:55:52 for Linux Kernel by
1.8.2