14 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/string.h>
30 #define PL330_MAX_CHAN 8
31 #define PL330_MAX_IRQS 32
32 #define PL330_MAX_PERI 32
73 #define DS_ST_STOP 0x0
74 #define DS_ST_EXEC 0x1
75 #define DS_ST_CMISS 0x2
76 #define DS_ST_UPDTPC 0x3
78 #define DS_ST_ATBRR 0x5
79 #define DS_ST_QBUSY 0x6
81 #define DS_ST_KILL 0x8
82 #define DS_ST_CMPLT 0x9
83 #define DS_ST_FLTCMP 0xe
84 #define DS_ST_FAULT 0xf
89 #define INTSTATUS 0x28
96 #define FTC(n) (_FTC + (n)*0x4)
99 #define CS(n) (_CS + (n)*0x8)
100 #define CS_CNS (1 << 21)
103 #define CPC(n) (_CPC + (n)*0x8)
106 #define SA(n) (_SA + (n)*0x20)
109 #define DA(n) (_DA + (n)*0x20)
112 #define CC(n) (_CC + (n)*0x20)
114 #define CC_SRCINC (1 << 0)
115 #define CC_DSTINC (1 << 14)
116 #define CC_SRCPRI (1 << 8)
117 #define CC_DSTPRI (1 << 22)
118 #define CC_SRCNS (1 << 9)
119 #define CC_DSTNS (1 << 23)
120 #define CC_SRCIA (1 << 10)
121 #define CC_DSTIA (1 << 24)
122 #define CC_SRCBRSTLEN_SHFT 4
123 #define CC_DSTBRSTLEN_SHFT 18
124 #define CC_SRCBRSTSIZE_SHFT 1
125 #define CC_DSTBRSTSIZE_SHFT 15
126 #define CC_SRCCCTRL_SHFT 11
127 #define CC_SRCCCTRL_MASK 0x7
128 #define CC_DSTCCTRL_SHFT 25
129 #define CC_DRCCCTRL_MASK 0x7
130 #define CC_SWAP_SHFT 28
133 #define LC0(n) (_LC0 + (n)*0x20)
136 #define LC1(n) (_LC1 + (n)*0x20)
138 #define DBGSTATUS 0xd00
139 #define DBG_BUSY (1 << 0)
142 #define DBGINST0 0xd08
143 #define DBGINST1 0xd0c
152 #define PERIPH_ID 0xfe0
153 #define PERIPH_REV_SHIFT 20
154 #define PERIPH_REV_MASK 0xf
155 #define PERIPH_REV_R0P0 0
156 #define PERIPH_REV_R1P0 1
157 #define PERIPH_REV_R1P1 2
158 #define PCELL_ID 0xff0
160 #define CR0_PERIPH_REQ_SET (1 << 0)
161 #define CR0_BOOT_EN_SET (1 << 1)
162 #define CR0_BOOT_MAN_NS (1 << 2)
163 #define CR0_NUM_CHANS_SHIFT 4
164 #define CR0_NUM_CHANS_MASK 0x7
165 #define CR0_NUM_PERIPH_SHIFT 12
166 #define CR0_NUM_PERIPH_MASK 0x1f
167 #define CR0_NUM_EVENTS_SHIFT 17
168 #define CR0_NUM_EVENTS_MASK 0x1f
170 #define CR1_ICACHE_LEN_SHIFT 0
171 #define CR1_ICACHE_LEN_MASK 0x7
172 #define CR1_NUM_ICACHELINES_SHIFT 4
173 #define CR1_NUM_ICACHELINES_MASK 0xf
175 #define CRD_DATA_WIDTH_SHIFT 0
176 #define CRD_DATA_WIDTH_MASK 0x7
177 #define CRD_WR_CAP_SHIFT 4
178 #define CRD_WR_CAP_MASK 0x7
179 #define CRD_WR_Q_DEP_SHIFT 8
180 #define CRD_WR_Q_DEP_MASK 0xf
181 #define CRD_RD_CAP_SHIFT 12
182 #define CRD_RD_CAP_MASK 0x7
183 #define CRD_RD_Q_DEP_SHIFT 16
184 #define CRD_RD_Q_DEP_MASK 0xf
185 #define CRD_DATA_BUFF_SHIFT 20
186 #define CRD_DATA_BUFF_MASK 0x3ff
189 #define DESIGNER 0x41
191 #define INTEG_CFG 0x0
192 #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
194 #define PCELL_ID_VAL 0xb105f00d
196 #define PL330_STATE_STOPPED (1 << 0)
197 #define PL330_STATE_EXECUTING (1 << 1)
198 #define PL330_STATE_WFE (1 << 2)
199 #define PL330_STATE_FAULTING (1 << 3)
200 #define PL330_STATE_COMPLETING (1 << 4)
201 #define PL330_STATE_WFP (1 << 5)
202 #define PL330_STATE_KILLING (1 << 6)
203 #define PL330_STATE_FAULT_COMPLETING (1 << 7)
204 #define PL330_STATE_CACHEMISS (1 << 8)
205 #define PL330_STATE_UPDTPC (1 << 9)
206 #define PL330_STATE_ATBARRIER (1 << 10)
207 #define PL330_STATE_QUEUEBUSY (1 << 11)
208 #define PL330_STATE_INVALID (1 << 15)
210 #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
211 | PL330_STATE_WFE | PL330_STATE_FAULTING)
213 #define CMD_DMAADDH 0x54
214 #define CMD_DMAEND 0x00
215 #define CMD_DMAFLUSHP 0x35
216 #define CMD_DMAGO 0xa0
217 #define CMD_DMALD 0x04
218 #define CMD_DMALDP 0x25
219 #define CMD_DMALP 0x20
220 #define CMD_DMALPEND 0x28
221 #define CMD_DMAKILL 0x01
222 #define CMD_DMAMOV 0xbc
223 #define CMD_DMANOP 0x18
224 #define CMD_DMARMB 0x12
225 #define CMD_DMASEV 0x34
226 #define CMD_DMAST 0x08
227 #define CMD_DMASTP 0x29
228 #define CMD_DMASTZ 0x0c
229 #define CMD_DMAWFE 0x36
230 #define CMD_DMAWFP 0x30
231 #define CMD_DMAWMB 0x13
235 #define SZ_DMAFLUSHP 2
239 #define SZ_DMALPEND 2
253 #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
254 #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
256 #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
257 #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
265 #define MCODE_BUFF_PER_REQ 256
268 #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
271 #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
273 #ifdef PL330_DEBUG_MCGEN
275 #define PL330_DBGCMD_DUMP(off, x...) do { \
276 printk("%x:", cmd_line); \
280 #define PL330_DBGMC_START(addr) (cmd_line = addr)
282 #define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
283 #define PL330_DBGMC_START(addr) do {} while (0)
288 #define NR_DEFAULT_DESC 16
294 #define DMAC_MODE_NS (1 << 0)
615 static inline bool _queue_empty(
struct pl330_thread *thrd)
621 static inline bool _queue_full(
struct pl330_thread *thrd)
627 static inline bool is_manager(
struct pl330_thread *thrd)
632 if (thrd->
id == pl330->
pinfo->pcfg.num_chan)
639 static inline bool _manager_ns(
struct pl330_thread *thrd)
651 id |= (
readb(regs + off + 0x0) << 0);
652 id |= (
readb(regs + off + 0x4) << 8);
653 id |= (
readb(regs + off + 0x8) << 16);
654 id |= (
readb(regs + off + 0
xc) << 24);
659 static inline u32 get_revision(
u32 periph_id)
664 static inline u32 _emit_ADDH(
unsigned dry_run,
u8 buf[],
675 da == 1 ?
"DA" :
"SA", val);
680 static inline u32 _emit_END(
unsigned dry_run,
u8 buf[])
692 static inline u32 _emit_FLUSHP(
unsigned dry_run,
u8 buf[],
u8 peri)
716 buf[0] |= (0 << 1) | (1 << 0);
717 else if (cond ==
BURST)
718 buf[0] |= (1 << 1) | (1 << 0);
721 cond ==
SINGLE ?
'S' : (cond ==
BURST ?
'B' :
'A'));
726 static inline u32 _emit_LDP(
unsigned dry_run,
u8 buf[],
742 cond ==
SINGLE ?
'S' :
'B', peri >> 3);
747 static inline u32 _emit_LP(
unsigned dry_run,
u8 buf[],
748 unsigned loop,
u8 cnt)
773 static inline u32 _emit_LPEND(
unsigned dry_run,
u8 buf[],
778 unsigned loop = arg->
loop;
793 buf[0] |= (0 << 1) | (1 << 0);
794 else if (cond ==
BURST)
795 buf[0] |= (1 << 1) | (1 << 0);
800 forever ?
"FE" :
"END",
801 cond ==
SINGLE ?
'S' : (cond ==
BURST ?
'B' :
'A'),
808 static inline u32 _emit_KILL(
unsigned dry_run,
u8 buf[])
818 static inline u32 _emit_MOV(
unsigned dry_run,
u8 buf[],
829 dst ==
SAR ?
"SAR" : (dst ==
DAR ?
"DAR" :
"CCR"), val);
834 static inline u32 _emit_NOP(
unsigned dry_run,
u8 buf[])
846 static inline u32 _emit_RMB(
unsigned dry_run,
u8 buf[])
858 static inline u32 _emit_SEV(
unsigned dry_run,
u8 buf[],
u8 ev)
874 static inline u32 _emit_ST(
unsigned dry_run,
u8 buf[],
enum pl330_cond cond)
882 buf[0] |= (0 << 1) | (1 << 0);
883 else if (cond ==
BURST)
884 buf[0] |= (1 << 1) | (1 << 0);
887 cond ==
SINGLE ?
'S' : (cond ==
BURST ?
'B' :
'A'));
892 static inline u32 _emit_STP(
unsigned dry_run,
u8 buf[],
908 cond ==
SINGLE ?
'S' :
'B', peri >> 3);
913 static inline u32 _emit_STZ(
unsigned dry_run,
u8 buf[])
925 static inline u32 _emit_WFE(
unsigned dry_run,
u8 buf[],
u8 ev,
941 ev >> 3, invalidate ?
", I" :
"");
946 static inline u32 _emit_WFP(
unsigned dry_run,
u8 buf[],
955 buf[0] |= (0 << 1) | (0 << 0);
956 else if (cond ==
BURST)
957 buf[0] |= (1 << 1) | (0 << 0);
959 buf[0] |= (0 << 1) | (1 << 0);
966 cond ==
SINGLE ?
'S' : (cond ==
BURST ?
'B' :
'P'), peri >> 3);
971 static inline u32 _emit_WMB(
unsigned dry_run,
u8 buf[])
989 static inline u32 _emit_GO(
unsigned dry_run,
u8 buf[],
994 unsigned ns = arg->
ns;
1000 buf[0] |= (ns << 1);
1002 buf[1] = chan & 0x7;
1009 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
1012 static bool _until_dmac_idle(
struct pl330_thread *thrd)
1031 static inline void _execute_DBGINSN(
struct pl330_thread *thrd,
1032 u8 insn[],
bool as_manager)
1037 val = (insn[0] << 16) | (insn[1] << 24);
1040 val |= (thrd->
id << 8);
1044 val = *((
u32 *)&insn[2]);
1048 if (_until_dmac_idle(thrd)) {
1049 dev_err(thrd->
dmac->pinfo->dev,
"DMAC halted!\n");
1067 _emit_END(0, req->
mc_cpu);
1078 if (is_manager(thrd))
1097 if (is_manager(thrd))
1102 if (is_manager(thrd))
1107 if (is_manager(thrd))
1112 if (is_manager(thrd))
1117 if (is_manager(thrd))
1122 if (is_manager(thrd))
1134 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1145 _emit_KILL(0, insn);
1150 _execute_DBGINSN(thrd, insn, is_manager(thrd));
1161 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1180 if (!req || !req->
r)
1186 ns = r->
cfg->nonsecure ? 1 : 0;
1193 if (_manager_ns(thrd) && !ns)
1194 dev_info(thrd->
dmac->pinfo->dev,
"%s:%d Recipe for ABORT!\n",
1195 __func__, __LINE__);
1200 _emit_GO(0, insn, &
go);
1206 _execute_DBGINSN(thrd, insn,
true);
1215 switch (_state(thrd)) {
1246 static
inline int _ldst_memtomem(
unsigned dry_run,
u8 buf[],
1255 off += _emit_LD(dry_run, &buf[off],
ALWAYS);
1256 off += _emit_ST(dry_run, &buf[off],
ALWAYS);
1260 off += _emit_LD(dry_run, &buf[off],
ALWAYS);
1261 off += _emit_RMB(dry_run, &buf[off]);
1262 off += _emit_ST(dry_run, &buf[off],
ALWAYS);
1263 off += _emit_WMB(dry_run, &buf[off]);
1270 static inline int _ldst_devtomem(
unsigned dry_run,
u8 buf[],
1276 off += _emit_WFP(dry_run, &buf[off],
SINGLE, pxs->
r->peri);
1277 off += _emit_LDP(dry_run, &buf[off],
SINGLE, pxs->
r->peri);
1278 off += _emit_ST(dry_run, &buf[off],
ALWAYS);
1279 off += _emit_FLUSHP(dry_run, &buf[off], pxs->
r->peri);
1285 static inline int _ldst_memtodev(
unsigned dry_run,
u8 buf[],
1291 off += _emit_WFP(dry_run, &buf[off],
SINGLE, pxs->
r->peri);
1292 off += _emit_LD(dry_run, &buf[off],
ALWAYS);
1293 off += _emit_STP(dry_run, &buf[off],
SINGLE, pxs->
r->peri);
1294 off += _emit_FLUSHP(dry_run, &buf[off], pxs->
r->peri);
1300 static int _bursts(
unsigned dry_run,
u8 buf[],
1305 switch (pxs->
r->rqtype) {
1307 off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
1310 off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
1313 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1324 static inline int _loop(
unsigned dry_run,
u8 buf[],
1325 unsigned long *bursts,
const struct _xfer_spec *pxs)
1327 int cyc, cycmax, szlp, szlpend, szbrst, off;
1328 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1332 if (*bursts >= 256*256) {
1335 cyc = *bursts / lcnt1 / lcnt0;
1336 }
else if (*bursts > 256) {
1338 lcnt0 = *bursts / lcnt1;
1346 szlp = _emit_LP(1, buf, 0, 0);
1347 szbrst = _bursts(1, buf, pxs, 1);
1350 lpend.forever =
false;
1353 szlpend = _emit_LPEND(1, buf, &lpend);
1365 cycmax = (255 - (szlp + szlpend)) / szbrst;
1367 cyc = (cycmax < cyc) ? cycmax : cyc;
1372 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1376 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1379 off += _bursts(dry_run, &buf[off], pxs, cyc);
1382 lpend.forever =
false;
1384 lpend.bjump = off - ljmp1;
1385 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1389 lpend.forever =
false;
1391 lpend.bjump = off - ljmp0;
1392 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1395 *bursts = lcnt1 * cyc;
1402 static inline int _setup_loops(
unsigned dry_run,
u8 buf[],
1412 off += _loop(dry_run, &buf[off], &c, pxs);
1419 static inline int _setup_xfer(
unsigned dry_run,
u8 buf[],
1426 off += _emit_MOV(dry_run, &buf[off],
SAR, x->
src_addr);
1428 off += _emit_MOV(dry_run, &buf[off],
DAR, x->
dst_addr);
1431 off += _setup_loops(dry_run, &buf[off], pxs);
1440 static int _setup_req(
unsigned dry_run,
struct pl330_thread *thrd,
1451 off += _emit_MOV(dry_run, &buf[off],
CCR, pxs->
ccr);
1460 off += _setup_xfer(dry_run, &buf[off], pxs);
1466 off += _emit_SEV(dry_run, &buf[off], thrd->
ev);
1468 off += _emit_END(dry_run, &buf[off]);
1505 static inline bool _is_valid(
u32 ccr)
1525 static int pl330_submit_req(
void *ch_id,
struct pl330_req *r)
1531 unsigned long flags;
1538 if (!r || !thrd || thrd->
free)
1546 || pl330->
dmac_tbd.reset_chan & (1 << thrd->
id)) {
1548 __func__, __LINE__);
1555 "%s:%d Invalid peripheral(%u)!\n",
1556 __func__, __LINE__, r->
peri);
1562 if (_queue_full(thrd)) {
1571 if (!_manager_ns(thrd))
1572 r->
cfg->nonsecure = 0;
1574 r->
cfg->nonsecure = 1;
1576 ccr = _prepare_ccr(r->
cfg);
1582 if (!_is_valid(ccr)) {
1584 dev_info(thrd->
dmac->pinfo->dev,
"%s:%d Invalid CCR(%x)!\n",
1585 __func__, __LINE__, ccr);
1595 ret = _setup_req(1, thrd, idx, &
xs);
1601 "%s:%d Trying increasing mcbufsz\n",
1602 __func__, __LINE__);
1609 thrd->
req[
idx].mc_len = _setup_req(0, thrd, idx, &
xs);
1615 spin_unlock_irqrestore(&pl330->
lock, flags);
1620 static void pl330_dotask(
unsigned long data)
1624 unsigned long flags;
1635 pl330->
dmac_tbd.reset_dmac =
false;
1641 pl330->
dmac_tbd.reset_chan = (1 << pi->
pcfg.num_chan) - 1;
1643 pl330->
dmac_tbd.reset_mngr =
false;
1646 for (i = 0; i < pi->
pcfg.num_chan; i++) {
1648 if (pl330->
dmac_tbd.reset_chan & (1 << i)) {
1660 spin_unlock_irqrestore(&pl330->
lock, flags);
1662 _callback(thrd->
req[1 - thrd->
lstenq].r, err);
1663 _callback(thrd->
req[thrd->
lstenq].r, err);
1673 pl330->
dmac_tbd.reset_chan &= ~(1 <<
i);
1677 spin_unlock_irqrestore(&pl330->
lock, flags);
1683 static int pl330_update(
const struct pl330_info *pi)
1687 unsigned long flags;
1690 int id, ev, ret = 0;
1704 pl330->
dmac_tbd.reset_mngr =
false;
1706 val =
readl(regs +
FSC) & ((1 << pi->
pcfg.num_chan) - 1);
1711 if (val & (1 << i)) {
1713 "Reset Channel-%d\t CS-%x FTC-%x\n",
1724 if (pi->
pcfg.num_events < 32
1725 && val & ~((1 << pi->
pcfg.num_events) - 1)) {
1727 dev_err(pi->
dev,
"%s:%d Unexpected!\n", __func__, __LINE__);
1732 for (ev = 0; ev < pi->
pcfg.num_events; ev++) {
1733 if (val & (1 << ev)) {
1739 if (inten & (1 << ev))
1756 mark_free(thrd, active);
1770 spin_unlock_irqrestore(&pl330->
lock, flags);
1776 spin_unlock_irqrestore(&pl330->
lock, flags);
1782 tasklet_schedule(&pl330->
tasks);
1792 unsigned long flags;
1823 mark_free(thrd, active);
1827 if ((active == -1) && !
_start(thrd))
1835 spin_unlock_irqrestore(&pl330->
lock, flags);
1840 static inline int _alloc_event(
struct pl330_thread *thrd)
1846 for (ev = 0; ev < pi->
pcfg.num_events; ev++)
1847 if (pl330->
events[ev] == -1) {
1855 static bool _chan_ns(
const struct pl330_info *pi,
int i)
1857 return pi->
pcfg.irq_ns & (1 <<
i);
1863 static void *pl330_request_channel(
const struct pl330_info *pi)
1867 unsigned long flags;
1878 chans = pi->
pcfg.num_chan;
1882 for (i = 0; i < chans; i++) {
1884 if ((thrd->
free) && (!_manager_ns(thrd) ||
1886 thrd->
ev = _alloc_event(thrd);
1887 if (thrd->
ev >= 0) {
1900 spin_unlock_irqrestore(&pl330->
lock, flags);
1906 static inline void _free_event(
struct pl330_thread *thrd,
int ev)
1917 static void pl330_release_channel(
void *ch_id)
1921 unsigned long flags;
1923 if (!thrd || thrd->
free)
1934 _free_event(thrd, thrd->
ev);
1936 spin_unlock_irqrestore(&pl330->
lock, flags);
1942 static void read_dmac_config(
struct pl330_info *pi)
1949 pi->
pcfg.data_bus_width = 8 * (1 <<
val);
1953 pi->
pcfg.data_buf_dep = val + 1;
1967 pi->
pcfg.num_peri = 0;
1987 static inline void _reset_thread(
struct pl330_thread *thrd)
1999 thrd->
req[1].mc_cpu = thrd->
req[0].mc_cpu
2001 thrd->
req[1].mc_bus = thrd->
req[0].mc_bus
2007 static int dmac_alloc_threads(
struct pl330_dmac *pl330)
2010 int chans = pi->
pcfg.num_chan;
2015 pl330->
channels = kzalloc((1 + chans) *
sizeof(*thrd),
2021 for (i = 0; i < chans; i++) {
2025 _reset_thread(thrd);
2039 static int dmac_alloc_resources(
struct pl330_dmac *pl330)
2042 int chans = pi->
pcfg.num_chan;
2053 dev_err(pi->
dev,
"%s:%d Can't allocate memory!\n",
2054 __func__, __LINE__);
2058 ret = dmac_alloc_threads(pl330);
2060 dev_err(pi->
dev,
"%s:%d Can't to create channels for DMAC!\n",
2061 __func__, __LINE__);
2077 if (!pi || !pi->
dev)
2096 dev_err(pi->
dev,
"PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
2102 read_dmac_config(pi);
2104 if (pi->
pcfg.num_events == 0) {
2105 dev_err(pi->
dev,
"%s:%d Can't work without events!\n",
2106 __func__, __LINE__);
2112 dev_err(pi->
dev,
"%s:%d Can't allocate memory!\n",
2113 __func__, __LINE__);
2130 for (i = 0; i < pi->
pcfg.num_events; i++)
2134 ret = dmac_alloc_resources(pl330);
2136 dev_err(pi->
dev,
"Unable to create channels for DMAC\n");
2148 static int dmac_free_threads(
struct pl330_dmac *pl330)
2151 int chans = pi->
pcfg.num_chan;
2156 for (i = 0; i < chans; i++) {
2158 pl330_release_channel((
void *)thrd);
2167 static void dmac_free_resources(
struct pl330_dmac *pl330)
2170 int chans = pi->
pcfg.num_chan;
2172 dmac_free_threads(pl330);
2192 dmac_free_resources(pl330);
2221 unsigned long flags;
2230 callback = desc->
txd.callback;
2231 param = desc->
txd.callback_param;
2246 list_splice_tail_init(list, &pdmac->
desc_pool);
2247 spin_unlock_irqrestore(&pdmac->
pool_lock, flags);
2250 static inline void handle_cyclic_desc_list(
struct list_head *list)
2254 unsigned long flags;
2262 callback = desc->
txd.callback;
2272 list_splice_tail_init(list, &pch->
work_list);
2273 spin_unlock_irqrestore(&pch->
lock, flags);
2292 }
else if (ret == -
EAGAIN) {
2298 dev_err(pch->
dmac->pif.dev,
"%s:%d Bad Desc(%d)\n",
2299 __func__, __LINE__, desc->
txd.cookie);
2300 tasklet_schedule(&pch->
task);
2305 static void pl330_tasklet(
unsigned long data)
2309 unsigned long flags;
2318 dma_cookie_complete(&desc->
txd);
2319 list_move_tail(&desc->
node, &list);
2328 spin_unlock_irqrestore(&pch->
lock, flags);
2331 handle_cyclic_desc_list(&list);
2333 free_desc_list(&list);
2340 unsigned long flags;
2350 spin_unlock_irqrestore(&pch->
lock, flags);
2352 tasklet_schedule(&pch->
task);
2359 if (chan->
device->dev->driver != &pl330_driver.drv)
2363 if (chan->
device->dev->of_node) {
2364 const __be32 *prop_value;
2368 prop_value = ((
struct property *)param)->value;
2371 return ((chan->
private == node) &&
2377 return *peri_id == (unsigned)param;
2381 static int pl330_alloc_chan_resources(
struct dma_chan *
chan)
2385 unsigned long flags;
2389 dma_cookie_init(chan);
2394 spin_unlock_irqrestore(&pch->
lock, flags);
2400 spin_unlock_irqrestore(&pch->
lock, flags);
2409 unsigned long flags;
2424 list_move_tail(&desc->
node, &list);
2427 list_splice_tail_init(&list, &pdmac->
desc_pool);
2428 spin_unlock_irqrestore(&pch->
lock, flags);
2450 dev_err(pch->
dmac->pif.dev,
"Not supported command.\n");
2457 static void pl330_free_chan_resources(
struct dma_chan *chan)
2460 unsigned long flags;
2470 list_splice_tail_init(&pch->
work_list, &pch->
dmac->desc_pool);
2472 spin_unlock_irqrestore(&pch->
lock, flags);
2479 return dma_cookie_status(chan, cookie, txstate);
2482 static void pl330_issue_pending(
struct dma_chan *chan)
2484 pl330_tasklet((
unsigned long) to_pchan(chan));
2497 unsigned long flags;
2502 while (!list_empty(&last->
node)) {
2505 dma_cookie_assign(&desc->
txd);
2510 cookie = dma_cookie_assign(&last->
txd);
2512 spin_unlock_irqrestore(&pch->
lock, flags);
2520 desc->
req.x = &desc->
px;
2521 desc->
req.token = desc;
2523 desc->
rqcfg.privileged = 0;
2524 desc->
rqcfg.insnaccess = 0;
2528 desc->
req.xfer_cb = dma_pl330_rqcb;
2529 desc->
txd.tx_submit = pl330_tx_submit;
2531 INIT_LIST_HEAD(&desc->
node);
2538 unsigned long flags;
2544 desc =
kmalloc(count *
sizeof(*desc), flg);
2550 for (i = 0; i <
count; i++) {
2551 _init_desc(&desc[i]);
2555 spin_unlock_irqrestore(&pdmac->
pool_lock, flags);
2564 unsigned long flags;
2575 list_del_init(&desc->
node);
2581 spin_unlock_irqrestore(&pdmac->
pool_lock, flags);
2589 u8 *peri_id = pch->
chan.private;
2593 desc = pluck_desc(pdmac);
2601 desc = pluck_desc(pdmac);
2604 "%s:%d ALERT!\n", __func__, __LINE__);
2611 desc->
txd.cookie = 0;
2612 async_tx_ack(&desc->
txd);
2614 desc->
req.peri = peri_id ? pch->
chan.chan_id : 0;
2615 desc->
rqcfg.pcfg = &pch->
dmac->pif.pcfg;
2638 dev_err(pch->
dmac->pif.dev,
"%s:%d Unable to fetch desc\n",
2639 __func__, __LINE__);
2653 fill_px(&desc->
px, dst, src, len);
2659 static inline int get_burst_len(
struct dma_pl330_desc *desc,
size_t len)
2665 burst_len = pi->
pcfg.data_bus_width / 8;
2666 burst_len *= pi->
pcfg.data_buf_dep;
2667 burst_len >>= desc->
rqcfg.brst_size;
2673 while (burst_len > 1) {
2674 if (!(len % (burst_len << desc->
rqcfg.brst_size)))
2685 unsigned long flags,
void *
context)
2692 desc = pl330_get_desc(pch);
2694 dev_err(pch->
dmac->pif.dev,
"%s:%d Unable to fetch desc\n",
2695 __func__, __LINE__);
2699 switch (direction) {
2701 desc->
rqcfg.src_inc = 1;
2702 desc->
rqcfg.dst_inc = 0;
2708 desc->
rqcfg.src_inc = 0;
2709 desc->
rqcfg.dst_inc = 1;
2715 dev_err(pch->
dmac->pif.dev,
"%s:%d Invalid dma direction\n",
2716 __func__, __LINE__);
2721 desc->
rqcfg.brst_len = 1;
2725 fill_px(&desc->
px, dst, src, period_len);
2732 dma_addr_t src,
size_t len,
unsigned long flags)
2742 pi = &pch->
dmac->pif;
2744 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
2748 desc->
rqcfg.src_inc = 1;
2749 desc->
rqcfg.dst_inc = 1;
2753 burst = pi->
pcfg.data_bus_width / 8;
2761 desc->
rqcfg.brst_size = 0;
2762 while (burst != (1 << desc->
rqcfg.brst_size))
2763 desc->
rqcfg.brst_size++;
2765 desc->
rqcfg.brst_len = get_burst_len(desc, len);
2775 unsigned long flg,
void *context)
2780 unsigned long flags;
2784 if (
unlikely(!pch || !sgl || !sg_len))
2793 desc = pl330_get_desc(pch);
2798 "%s:%d Unable to fetch desc\n",
2799 __func__, __LINE__);
2805 while (!list_empty(&first->
node)) {
2813 spin_unlock_irqrestore(&pdmac->
pool_lock, flags);
2824 desc->
rqcfg.src_inc = 1;
2825 desc->
rqcfg.dst_inc = 0;
2830 desc->
rqcfg.src_inc = 0;
2831 desc->
rqcfg.dst_inc = 1;
2838 desc->
rqcfg.brst_len = 1;
2842 desc->
txd.flags = flg;
2846 static irqreturn_t pl330_irq_handler(
int irq,
void *data)
2848 if (pl330_update(data))
2866 pdat = adev->
dev.platform_data;
2871 dev_err(&adev->
dev,
"unable to allocate mem\n");
2893 dev_name(&adev->
dev), pi);
2897 ret = pl330_add(pi);
2906 dev_warn(&adev->
dev,
"unable to allocate desc\n");
2915 num_chan =
max_t(
int, pi->
pcfg.num_peri, pi->
pcfg.num_chan);
2920 dev_err(&adev->
dev,
"unable to allocate pdmac->peripherals\n");
2926 if (!adev->
dev.of_node)
2929 pch->
chan.private = adev->
dev.of_node;
2934 pch->
chan.device = pd;
2946 if (pi->
pcfg.num_peri) {
2964 dev_err(&adev->
dev,
"unable to register DMAC\n");
2969 "Loaded driver for PL330 DMAC-%d\n", adev->
periphid);
2971 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
2972 pi->
pcfg.data_buf_dep,
2973 pi->
pcfg.data_bus_width / 8, pi->
pcfg.num_chan,
2974 pi->
pcfg.num_peri, pi->
pcfg.num_events);
3013 pl330_free_chan_resources(&pch->
chan);
3033 static struct amba_id pl330_ids[] = {
3046 .name =
"dma-pl330",
3048 .id_table = pl330_ids,
3049 .probe = pl330_probe,
3050 .remove = pl330_remove,