10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
28 #define D40_NAME "dma40"
30 #define D40_PHY_CHAN -1
33 #define D40_CHAN_POS(chan) (2 * (chan / 2))
34 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
37 #define D40_SUSPEND_MAX_IT 500
40 #define DMA40_AUTOSUSPEND_DELAY 100
43 #define LCLA_ALIGNMENT 0x40000
46 #define D40_LCLA_LINK_PER_EVENT_GRP 128
47 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
50 #define MAX_LCLA_ALLOC_ATTEMPTS 256
53 #define D40_ALLOC_FREE (1 << 31)
54 #define D40_ALLOC_PHY (1 << 30)
55 #define D40_ALLOC_LOG_FREE 0
93 static u32 d40_backup_regs[] = {
102 #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
105 static u32 d40_backup_regs_v3[] = {
124 #define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
126 static u32 d40_backup_regs_chan[] = {
400 return &d40c->
chan.dev->device;
410 return !chan_is_physical(chan);
419 #define d40_err(dev, format, arg...) \
420 dev_err(dev, "[%s] " format, __func__, ## arg)
422 #define chan_err(d40c, format, arg...) \
423 d40_err(chan2dev(d40c), format, ## arg)
428 bool is_log = chan_is_logical(d40c);
438 base = d40d->
lli_pool.pre_alloc_lli;
492 static int d40_lcla_alloc_one(
struct d40_chan *d40c,
509 if (!d40c->
base->lcla_pool.alloc_map[p + i]) {
510 d40c->
base->lcla_pool.alloc_map[p +
i] = d40d;
517 spin_unlock_irqrestore(&d40c->
base->lcla_pool.lock, flags);
522 static int d40_lcla_free_all(
struct d40_chan *d40c,
529 if (chan_is_physical(d40c))
535 if (d40c->
base->lcla_pool.alloc_map[d40c->
phy_chan->num *
547 spin_unlock_irqrestore(&d40c->
base->lcla_pool.lock, flags);
553 static void d40_desc_remove(
struct d40_desc *d40d)
562 if (!list_empty(&d40c->
client)) {
567 if (async_tx_test_ack(&d->
txd)) {
570 memset(desc, 0,
sizeof(*desc));
580 INIT_LIST_HEAD(&desc->
node);
588 d40_pool_lli_free(d40c, d40d);
589 d40_lcla_free_all(d40c, d40d);
621 bool cyclic = desc->
cyclic;
624 bool use_esram_lcla = chan->
base->plat_data->use_esram_lcla;
631 linkback = cyclic && lli_current == 0;
637 if (linkback || (lli_len - lli_current > 1)) {
638 curr_lcla = d40_lcla_alloc_one(chan, desc);
639 first_lcla = curr_lcla;
648 if (!linkback || curr_lcla == -
EINVAL) {
649 unsigned int flags = 0;
655 &lli->
dst[lli_current],
656 &lli->
src[lli_current],
665 for (; lli_current < lli_len; lli_current++) {
666 unsigned int lcla_offset = chan->
phy_chan->num * 1024 +
669 unsigned int flags = 0;
672 if (lli_current + 1 < lli_len)
673 next_lcla = d40_lcla_alloc_one(chan, desc);
675 next_lcla = linkback ? first_lcla : -
EINVAL;
677 if (cyclic || next_lcla == -
EINVAL)
680 if (linkback && curr_lcla == first_lcla) {
683 &lli->
dst[lli_current],
684 &lli->
src[lli_current],
693 &lli->
dst[lli_current],
694 &lli->
src[lli_current],
701 if (!use_esram_lcla) {
707 curr_lcla = next_lcla;
709 if (curr_lcla == -
EINVAL || curr_lcla == first_lcla) {
721 if (chan_is_physical(d40c)) {
722 d40_phy_lli_load(d40c, d40d);
725 d40_log_lli_to_lcxa(d40c, d40d);
732 if (list_empty(&d40c->
active))
744 d40_desc_remove(desc);
766 if (list_empty(&d40c->
queue))
775 static int d40_psize_2_burst_size(
bool is_log,
int psize)
793 static int d40_size_2_dmalen(
int size,
u32 data_width1,
u32 data_width2)
796 u32 max_w =
max(data_width1, data_width2);
797 u32 min_w =
min(data_width1, data_width2);
801 seg_max -= (1 << max_w);
810 if (dmalen * seg_max < size)
817 u32 data_width1,
u32 data_width2)
826 data_width1, data_width2);
836 static void dma40_backup(
void __iomem *baseaddr,
u32 *backup,
837 u32 *regaddr,
int num,
bool save)
841 for (i = 0; i < num; i++) {
847 writel_relaxed(backup[i], addr);
851 static void d40_save_restore_registers(
struct d40_base *base,
bool save)
867 d40_backup_regs_chan,
885 static void d40_save_restore_registers(
struct d40_base *base,
bool save)
890 static int __d40_execute_command_phy(
struct d40_chan *d40c,
914 status = (
readl(active_reg) &
929 status = (
readl(active_reg) &
945 if (i == D40_SUSPEND_MAX_IT) {
947 "unable to suspend the chl %d (log: %d) status %x\n",
956 spin_unlock_irqrestore(&d40c->
base->execmd_lock, flags);
960 static void d40_term_all(
struct d40_chan *d40c)
966 while ((d40d = d40_first_active_get(d40c))) {
967 d40_desc_remove(d40d);
968 d40_desc_free(d40c, d40d);
972 while ((d40d = d40_first_queued(d40c))) {
973 d40_desc_remove(d40d);
974 d40_desc_free(d40c, d40d);
978 while ((d40d = d40_first_pending(d40c))) {
979 d40_desc_remove(d40d);
980 d40_desc_free(d40c, d40d);
984 if (!list_empty(&d40c->
client))
986 d40_desc_remove(d40d);
987 d40_desc_free(d40c, d40d);
994 d40_desc_remove(d40d);
995 d40_desc_free(d40c, d40d);
1001 static void __d40_config_set_event(
struct d40_chan *d40c,
1009 switch (event_type) {
1044 if (tries == D40_SUSPEND_MAX_IT) {
1046 "unable to stop the event_line chl %d (log: %d)"
1047 "status %x\n", d40c->
phy_chan->num,
1070 "[%s] workaround enable S%cLNK (%d tries)\n",
1084 static void d40_config_set_event(
struct d40_chan *d40c,
1092 __d40_config_set_event(d40c, event_type, event,
1099 __d40_config_set_event(d40c, event_type, event,
1104 static u32 d40_chan_has_events(
struct d40_chan *d40c)
1106 void __iomem *chanbase = chan_base(d40c);
1118 unsigned long flags;
1135 active_status = (
readl(active_reg) &
1144 if (!d40_chan_has_events(d40c) && (command ==
D40_DMA_STOP))
1145 ret = __d40_execute_command_phy(d40c, command);
1152 ret = __d40_execute_command_phy(d40c, command);
1160 spin_unlock_irqrestore(&d40c->
phy_chan->lock, flags);
1164 static int d40_channel_execute_command(
struct d40_chan *d40c,
1167 if (chan_is_logical(d40c))
1168 return __d40_execute_command_log(d40c, command);
1170 return __d40_execute_command_phy(d40c, command);
1175 static const unsigned int phy_map[] = {
1183 static const unsigned int log_map[] = {
1192 if (chan_is_physical(d40c))
1193 return phy_map[d40c->
dma_cfg.mode_opt];
1195 return log_map[d40c->
dma_cfg.mode_opt];
1198 static void d40_config_write(
struct d40_chan *d40c)
1204 addr_base = (d40c->
phy_chan->num % 2) * 4;
1206 var = ((
u32)(chan_is_logical(d40c)) + 1) <<
1215 if (chan_is_logical(d40c)) {
1218 void __iomem *chanbase = chan_base(d40c);
1238 if (chan_is_logical(d40c))
1247 return num_elt * (1 << d40c->
dma_cfg.dst_info.data_width);
1250 static bool d40_tx_is_linked(
struct d40_chan *d40c)
1254 if (chan_is_logical(d40c))
1263 static int d40_pause(
struct d40_chan *d40c)
1266 unsigned long flags;
1271 pm_runtime_get_sync(d40c->
base->dev);
1276 pm_runtime_mark_last_busy(d40c->
base->dev);
1277 pm_runtime_put_autosuspend(d40c->
base->dev);
1278 spin_unlock_irqrestore(&d40c->
lock, flags);
1282 static int d40_resume(
struct d40_chan *d40c)
1285 unsigned long flags;
1291 pm_runtime_get_sync(d40c->
base->dev);
1294 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1295 res = d40_channel_execute_command(d40c,
D40_DMA_RUN);
1297 pm_runtime_mark_last_busy(d40c->
base->dev);
1298 pm_runtime_put_autosuspend(d40c->
base->dev);
1299 spin_unlock_irqrestore(&d40c->
lock, flags);
1309 unsigned long flags;
1313 cookie = dma_cookie_assign(tx);
1314 d40_desc_queue(d40c, d40d);
1315 spin_unlock_irqrestore(&d40c->
lock, flags);
1320 static int d40_start(
struct d40_chan *d40c)
1322 return d40_channel_execute_command(d40c,
D40_DMA_RUN);
1331 d40d = d40_first_queued(d40c);
1336 pm_runtime_get_sync(d40c->
base->dev);
1340 d40_desc_remove(d40d);
1343 d40_desc_submit(d40c, d40d);
1346 d40_desc_load(d40c, d40d);
1349 err = d40_start(d40c);
1359 static void dma_tc_handle(
struct d40_chan *d40c)
1364 d40d = d40_first_active_get(d40c);
1377 && !d40_tx_is_linked(d40c)
1378 && !d40_residue(d40c)) {
1379 d40_lcla_free_all(d40c, d40d);
1380 d40_desc_load(d40c, d40d);
1381 (
void) d40_start(d40c);
1387 d40_lcla_free_all(d40c, d40d);
1390 d40_desc_load(d40c, d40d);
1392 (
void) d40_start(d40c);
1396 if (d40_queue_start(d40c) ==
NULL)
1398 pm_runtime_mark_last_busy(d40c->
base->dev);
1399 pm_runtime_put_autosuspend(d40c->
base->dev);
1403 tasklet_schedule(&d40c->
tasklet);
1407 static void dma_tasklet(
unsigned long data)
1411 unsigned long flags;
1413 void *callback_param;
1418 d40d = d40_first_active_get(d40c);
1423 dma_cookie_complete(&d40d->
txd);
1430 spin_unlock_irqrestore(&d40c->
lock, flags);
1435 callback = d40d->
txd.callback;
1436 callback_param = d40d->
txd.callback_param;
1439 if (async_tx_test_ack(&d40d->
txd)) {
1440 d40_desc_remove(d40d);
1441 d40_desc_free(d40c, d40d);
1444 d40_desc_remove(d40d);
1445 d40_lcla_free_all(d40c, d40d);
1455 tasklet_schedule(&d40c->
tasklet);
1457 spin_unlock_irqrestore(&d40c->
lock, flags);
1468 spin_unlock_irqrestore(&d40c->
lock, flags);
1471 static irqreturn_t d40_handle_interrupt(
int irq,
void *data)
1492 unsigned long flags;
1520 spin_lock(&d40c->
lock);
1522 if (!il[row].is_error)
1523 dma_tc_handle(d40c);
1525 d40_err(base->
dev,
"IRQ chan: %ld offset %d idx %d\n",
1526 chan, il[row].
offset, idx);
1528 spin_unlock(&d40c->
lock);
1536 static int d40_validate_conf(
struct d40_chan *d40c,
1545 chan_err(d40c,
"Invalid direction.\n");
1553 chan_err(d40c,
"Invalid TX channel address (%d)\n",
1561 chan_err(d40c,
"Invalid RX channel address (%d)\n",
1585 (src_event_group != dst_event_group)) {
1586 chan_err(d40c,
"Invalid event group\n");
1595 chan_err(d40c,
"periph to periph not supported\n");
1599 if (d40_psize_2_burst_size(is_log, conf->
src_info.psize) *
1600 (1 << conf->
src_info.data_width) !=
1601 d40_psize_2_burst_size(is_log, conf->
dst_info.psize) *
1602 (1 << conf->
dst_info.data_width)) {
1608 chan_err(d40c,
"src (burst x width) != dst (burst x width)\n");
1616 bool is_src,
int log_event_line,
bool is_log,
1619 unsigned long flags;
1664 spin_unlock_irqrestore(&phy->
lock, flags);
1667 spin_unlock_irqrestore(&phy->
lock, flags);
1671 static bool d40_alloc_mask_free(
struct d40_phy_res *phy,
bool is_src,
1674 unsigned long flags;
1675 bool is_free =
false;
1678 if (!log_event_line) {
1700 spin_unlock_irqrestore(&phy->
lock, flags);
1705 static int d40_allocate_channel(
struct d40_chan *d40c,
bool *first_phy_user)
1717 phys = d40c->
base->phy_res;
1720 dev_type = d40c->
dma_cfg.src_dev_type;
1726 dev_type = d40c->
dma_cfg.dst_dev_type;
1727 log_num = 2 * dev_type + 1;
1738 for (i = 0; i < d40c->
base->num_phy_chans; i++) {
1740 if (d40_alloc_mask_set(&phys[i], is_src,
1746 for (j = 0; j < d40c->
base->num_phy_chans; j += 8) {
1747 int phy_num = j + event_group * 2;
1748 for (i = phy_num; i < phy_num + 2; i++) {
1749 if (d40_alloc_mask_set(&phys[i],
1767 for (j = 0; j < d40c->
base->num_phy_chans; j += 8) {
1768 int phy_num = j + event_group * 2;
1770 if (d40c->
dma_cfg.use_fixed_channel) {
1771 i = d40c->
dma_cfg.phy_channel;
1773 if ((i != phy_num) && (i != phy_num + 1)) {
1775 "invalid fixed phy channel %d\n", i);
1779 if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1780 is_log, first_phy_user))
1784 "could not allocate fixed phy channel %d\n", i);
1794 for (i = phy_num; i < phy_num + 2; i++) {
1795 if (d40_alloc_mask_set(&phys[i], is_src,
1801 for (i = phy_num + 1; i >= phy_num; i--) {
1802 if (d40_alloc_mask_set(&phys[i], is_src,
1817 d40c->
base->lookup_log_chans[d40c->
log_num] = d40c;
1819 d40c->
base->lookup_phy_chans[d40c->
phy_chan->num] = d40c;
1825 static int d40_config_memcpy(
struct d40_chan *d40c)
1830 d40c->
dma_cfg = *d40c->
base->plat_data->memcpy_conf_log;
1832 d40c->
dma_cfg.dst_dev_type = d40c->
base->plat_data->
1837 d40c->
dma_cfg = *d40c->
base->plat_data->memcpy_conf_phy;
1846 static int d40_free_dma(
struct d40_chan *d40c)
1864 chan_err(d40c,
"channel already free\n");
1876 chan_err(d40c,
"Unknown direction\n");
1880 pm_runtime_get_sync(d40c->
base->dev);
1887 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
1889 if (chan_is_logical(d40c))
1895 pm_runtime_mark_last_busy(d40c->
base->dev);
1896 pm_runtime_put_autosuspend(d40c->
base->dev);
1904 pm_runtime_mark_last_busy(d40c->
base->dev);
1905 pm_runtime_put_autosuspend(d40c->
base->dev);
1909 static bool d40_is_paused(
struct d40_chan *d40c)
1911 void __iomem *chanbase = chan_base(d40c);
1912 bool is_paused =
false;
1913 unsigned long flags;
1920 if (chan_is_physical(d40c)) {
1926 status = (
readl(active_reg) &
1943 chan_err(d40c,
"Unknown direction\n");
1953 spin_unlock_irqrestore(&d40c->
lock, flags);
1959 static u32 stedma40_residue(
struct dma_chan *chan)
1964 unsigned long flags;
1967 bytes_left = d40_residue(d40c);
1968 spin_unlock_irqrestore(&d40c->
lock, flags);
1976 unsigned int sg_len,
dma_addr_t src_dev_addr,
1998 return ret < 0 ? ret : 0;
2004 unsigned int sg_len,
dma_addr_t src_dev_addr,
2010 unsigned long flags = 0;
2020 src_info, dst_info, flags);
2026 dst_info, src_info, flags);
2031 return ret < 0 ? ret : 0;
2037 unsigned int sg_len,
unsigned long dma_flags)
2043 desc = d40_desc_get(chan);
2050 chan_err(chan,
"Unaligned size\n");
2054 ret = d40_pool_lli_alloc(chan, desc, desc->
lli_len);
2056 chan_err(chan,
"Could not allocate lli\n");
2062 desc->
txd.flags = dma_flags;
2063 desc->
txd.tx_submit = d40_tx_submit;
2070 d40_desc_free(chan, desc);
2101 unsigned long flags;
2105 chan_err(chan,
"Cannot prepare unallocated channel\n");
2112 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2116 if (
sg_next(&sg_src[sg_len - 1]) == sg_src)
2123 src_dev_addr = dev_addr;
2125 dst_dev_addr = dev_addr;
2128 if (chan_is_logical(chan))
2129 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
2130 sg_len, src_dev_addr, dst_dev_addr);
2132 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
2133 sg_len, src_dev_addr, dst_dev_addr);
2136 chan_err(chan,
"Failed to prepare %s sg job: %d\n",
2137 chan_is_logical(chan) ?
"log" :
"phy", ret);
2147 spin_unlock_irqrestore(&chan->
lock, flags);
2153 d40_desc_free(chan, desc);
2154 spin_unlock_irqrestore(&chan->
lock, flags);
2166 err = d40_validate_conf(d40c, info);
2170 err = d40_config_memcpy(d40c);
2179 static void __d40_set_prio_rt(
struct d40_chan *d40c,
int dev_type,
bool src)
2181 bool realtime = d40c->
dma_cfg.realtime;
2182 bool highprio = d40c->
dma_cfg.high_priority;
2193 writel(bit, d40c->
base->virtbase + prioreg + group * 4);
2194 writel(bit, d40c->
base->virtbase + rtreg + group * 4);
2197 static void d40_set_prio_realtime(
struct d40_chan *d40c)
2199 if (d40c->
base->rev < 3)
2204 __d40_set_prio_rt(d40c, d40c->
dma_cfg.src_dev_type,
true);
2208 __d40_set_prio_rt(d40c, d40c->
dma_cfg.dst_dev_type,
false);
2212 static int d40_alloc_chan_resources(
struct dma_chan *chan)
2215 unsigned long flags;
2221 dma_cookie_init(chan);
2225 err = d40_config_memcpy(d40c);
2227 chan_err(d40c,
"Failed to configure memcpy channel\n");
2232 err = d40_allocate_channel(d40c, &is_free_phy);
2234 chan_err(d40c,
"Failed to allocate channel\n");
2239 pm_runtime_get_sync(d40c->
base->dev);
2244 d40_set_prio_realtime(d40c);
2246 if (chan_is_logical(d40c)) {
2251 d40c->
lcpa = d40c->
base->lcpa_base +
2254 d40c->
lcpa = d40c->
base->lcpa_base +
2259 dev_dbg(chan2dev(d40c),
"allocated %s channel (phy %d%s)\n",
2260 chan_is_logical(d40c) ?
"logical" :
"physical",
2262 d40c->
dma_cfg.use_fixed_channel ?
", fixed" :
"");
2271 d40_config_write(d40c);
2273 pm_runtime_mark_last_busy(d40c->
base->dev);
2274 pm_runtime_put_autosuspend(d40c->
base->dev);
2275 spin_unlock_irqrestore(&d40c->
lock, flags);
2279 static void d40_free_chan_resources(
struct dma_chan *chan)
2284 unsigned long flags;
2287 chan_err(d40c,
"Cannot free unallocated channel\n");
2294 err = d40_free_dma(d40c);
2297 chan_err(d40c,
"Failed to free channel\n");
2298 spin_unlock_irqrestore(&d40c->
lock, flags);
2305 unsigned long dma_flags)
2319 return d40_prep_sg(chan, &src_sg, &dst_sg, 1,
DMA_NONE, dma_flags);
2323 d40_prep_memcpy_sg(
struct dma_chan *chan,
2324 struct scatterlist *dst_sg,
unsigned int dst_nents,
2325 struct scatterlist *src_sg,
unsigned int src_nents,
2326 unsigned long dma_flags)
2328 if (dst_nents != src_nents)
2331 return d40_prep_sg(chan, src_sg, dst_sg, src_nents,
DMA_NONE, dma_flags);
2336 unsigned int sg_len,
2338 unsigned long dma_flags,
2344 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2349 size_t buf_len,
size_t period_len,
2353 unsigned int periods = buf_len / period_len;
2359 for (i = 0; i < periods; i++) {
2362 dma_addr += period_len;
2368 ((
unsigned long)sg | 0x01) & ~0x02;
2370 txd = d40_prep_sg(chan, sg, sg, periods, direction,
2386 chan_err(d40c,
"Cannot read status of unallocated channel\n");
2390 ret = dma_cookie_status(chan, cookie, txstate);
2392 dma_set_residue(txstate, stedma40_residue(chan));
2394 if (d40_is_paused(d40c))
2400 static void d40_issue_pending(
struct dma_chan *chan)
2403 unsigned long flags;
2406 chan_err(d40c,
"Channel is not allocated!\n");
2416 (
void) d40_queue_start(d40c);
2418 spin_unlock_irqrestore(&d40c->
lock, flags);
2421 static void d40_terminate_all(
struct dma_chan *chan)
2423 unsigned long flags;
2429 pm_runtime_get_sync(d40c->
base->dev);
2432 chan_err(d40c,
"Failed to stop channel\n");
2435 pm_runtime_mark_last_busy(d40c->
base->dev);
2436 pm_runtime_put_autosuspend(d40c->
base->dev);
2438 pm_runtime_mark_last_busy(d40c->
base->dev);
2439 pm_runtime_put_autosuspend(d40c->
base->dev);
2443 spin_unlock_irqrestore(&d40c->
lock, flags);
2447 dma40_config_to_halfchannel(
struct d40_chan *d40c,
2470 "illegal peripheral address width "
2476 if (chan_is_logical(d40c)) {
2479 else if (maxburst >= 8)
2481 else if (maxburst >= 4)
2488 else if (maxburst >= 8)
2490 else if (maxburst >= 4)
2497 info->
psize = psize;
2504 static int d40_set_runtime_config(
struct dma_chan *chan,
2511 u32 src_maxburst, dst_maxburst;
2526 "channel has a pre-wired RX address %08x "
2527 "overriding with %08x\n",
2528 dev_addr_rx, config_addr);
2531 "channel was not configured for peripheral "
2532 "to memory transfer (%d) overriding\n",
2538 dst_addr_width = src_addr_width;
2539 if (dst_maxburst == 0)
2540 dst_maxburst = src_maxburst;
2549 "channel has a pre-wired TX address %08x "
2550 "overriding with %08x\n",
2551 dev_addr_tx, config_addr);
2554 "channel was not configured for memory "
2555 "to peripheral transfer (%d) overriding\n",
2561 src_addr_width = dst_addr_width;
2562 if (src_maxburst == 0)
2563 src_maxburst = dst_maxburst;
2566 "unrecognized channel direction %d\n",
2571 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2573 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2581 ret = dma40_config_to_halfchannel(d40c, &cfg->
src_info,
2587 ret = dma40_config_to_halfchannel(d40c, &cfg->
dst_info,
2594 if (chan_is_logical(d40c))
2604 "configured channel %s for %s, data width %d/%d, "
2605 "maxburst %d/%d elements, LE, no flow control\n",
2606 dma_chan_name(chan),
2608 src_addr_width, dst_addr_width,
2609 src_maxburst, dst_maxburst);
2620 chan_err(d40c,
"Channel is not allocated!\n");
2626 d40_terminate_all(chan);
2629 return d40_pause(d40c);
2631 return d40_resume(d40c);
2633 return d40_set_runtime_config(chan,
2654 for (i = offset; i < offset + num_chans; i++) {
2663 INIT_LIST_HEAD(&d40c->
active);
2664 INIT_LIST_HEAD(&d40c->
queue);
2666 INIT_LIST_HEAD(&d40c->
client);
2670 (
unsigned long) d40c);
2707 int num_reserved_chans)
2723 d40_err(base->
dev,
"Failed to register slave channels\n");
2740 "Failed to regsiter memcpy only channels\n");
2745 0, num_reserved_chans);
2753 d40_ops_init(base, &base->
dma_both);
2758 "Failed to register logical and physical capable channels\n");
2772 static int dma40_pm_suspend(
struct device *dev)
2775 struct d40_base *base = platform_get_drvdata(pdev);
2777 if (!pm_runtime_suspended(dev))
2785 static int dma40_runtime_suspend(
struct device *dev)
2788 struct d40_base *base = platform_get_drvdata(pdev);
2790 d40_save_restore_registers(base,
true);
2800 static int dma40_runtime_resume(
struct device *dev)
2803 struct d40_base *base = platform_get_drvdata(pdev);
2806 d40_save_restore_registers(base,
false);
2813 static int dma40_resume(
struct device *dev)
2816 struct d40_base *base = platform_get_drvdata(pdev);
2825 static const struct dev_pm_ops dma40_pm_ops = {
2827 .runtime_suspend = dma40_runtime_suspend,
2828 .runtime_resume = dma40_runtime_resume,
2829 .resume = dma40_resume,
2831 #define DMA40_PM_OPS (&dma40_pm_ops)
2833 #define DMA40_PM_OPS NULL
2841 int num_phy_chans_avail = 0;
2843 int odd_even_bit = -2;
2851 odd_even_bit += 2 * ((i % 2) == 0);
2852 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2867 num_phy_chans_avail++;
2873 for (i = 0; base->
plat_data->disabled_channels[
i] != -1; i++) {
2874 int chan = base->
plat_data->disabled_channels[
i];
2878 base->
phy_res[chan].reserved =
true;
2883 num_phy_chans_avail--;
2886 dev_info(base->
dev,
"%d of %d physical DMA channels available\n",
2895 (val[0] & 0x3) != 1)
2897 "[%s] INFO: channel %d is misconfigured (%d)\n",
2898 __func__, i, val[0] & 0x3);
2900 val[0] = val[0] >> 2;
2912 return num_phy_chans_avail;
2932 d40_err(&pdev->
dev,
"No matching clock found\n");
2936 clk_ret = clk_prepare_enable(clk);
2938 d40_err(&pdev->
dev,
"Failed to prepare/enable clock\n");
2956 for (pid = 0, i = 0; i < 4; i++)
2957 pid |= (
readl(virtbase + resource_size(res) - 0x20 + 4 * i)
2959 for (cid = 0, i = 0; i < 4; i++)
2960 cid |= (
readl(virtbase + resource_size(res) - 0x10 + 4 * i)
2964 d40_err(&pdev->
dev,
"Unknown hardware! No PrimeCell ID\n");
2968 d40_err(&pdev->
dev,
"Unknown designer! Got %x wanted %x\n",
2985 dev_info(&pdev->
dev,
"hardware revision: %d @ 0x%x\n",
2989 d40_err(&pdev->
dev,
"hardware revision: %d is not supported",
2994 plat_data = pdev->
dev.platform_data;
2997 for (i = 0; i < plat_data->
dev_len; i++)
2998 if (plat_data->
dev_rx[i] != 0)
3001 for (i = 0; i < plat_data->
dev_len; i++)
3002 if (plat_data->
dev_tx[i] != 0)
3006 (num_phy_chans + num_log_chans + plat_data->
memcpy_len) *
3019 base->
phy_size = resource_size(res);
3050 sizeof(d40_backup_regs_chan),
3056 kzalloc(num_phy_chans *
sizeof(
struct d40_desc *)
3071 clk_disable_unprepare(clk);
3078 resource_size(res));
3116 u32 prmseo[2] = {0, 0};
3117 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3121 for (i = 0; i <
ARRAY_SIZE(dma_init_reg); i++)
3128 activeo[i % 2] = activeo[i % 2] << 2;
3132 activeo[i % 2] |= 3;
3137 pcmis = (pcmis << 1) | 1;
3140 pcicr = (pcicr << 1) | 1;
3143 prmseo[i % 2] = prmseo[i % 2] << 2;
3164 unsigned long *page_list;
3187 if (!page_list[i]) {
3189 d40_err(base->
dev,
"Failed to allocate %d pages.\n",
3192 for (j = 0; j <
i; j++)
3202 for (j = 0; j <
i; j++)
3205 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3206 base->
lcla_pool.base = (
void *)page_list[i];
3213 "[%s] Failed to get %d pages @ 18 bit align.\n",
3250 int num_reserved_chans;
3253 base = d40_hw_detect_init(pdev);
3258 num_reserved_chans = d40_phy_res_init(base);
3260 platform_set_drvdata(pdev, base);
3269 d40_err(&pdev->
dev,
"No \"lcpa\" memory resource\n");
3279 "Failed to request LCPA region 0x%x-0x%x\n",
3286 if (res->
start != val && val != 0) {
3288 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
3289 __func__, val, res->
start);
3296 d40_err(&pdev->
dev,
"Failed to ioremap LCPA region\n");
3306 "No \"lcla_esram\" memory resource\n");
3310 resource_size(res));
3313 d40_err(&pdev->
dev,
"Failed to ioremap LCLA region\n");
3319 ret = d40_lcla_allocate(base);
3321 d40_err(&pdev->
dev,
"Failed to allocate LCLA area\n");
3338 pm_runtime_use_autosuspend(base->
dev);
3340 pm_runtime_resume(base->
dev);
3346 d40_err(&pdev->
dev,
"Failed to get lcpa_regulator\n");
3354 "Failed to enable lcpa_regulator\n");
3362 err = d40_dmaengine_init(base, num_reserved_chans);
3429 static int __init stedma40_init(
void)