20 #include <linux/module.h>
21 #include <linux/slab.h>
36 #define to_mv_xor_chan(chan) \
37 container_of(chan, struct mv_xor_chan, common)
39 #define to_mv_xor_device(dev) \
40 container_of(dev, struct mv_xor_device, common)
42 #define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx)
49 hw_desc->
status = (1 << 31);
101 static int mv_chan_memset_slot_count(
size_t len)
106 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
122 static void mv_chan_set_next_descriptor(
struct mv_xor_chan *chan,
128 static void mv_chan_set_dest_pointer(
struct mv_xor_chan *chan,
u32 desc_addr)
144 static void mv_chan_unmask_interrupts(
struct mv_xor_chan *chan)
154 intr_cause = (intr_cause >> (chan->
idx * 16)) & 0xFFFF;
158 static int mv_is_err_intr(
u32 intr_cause)
160 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
166 static void mv_xor_device_clear_eoc_cause(
struct mv_xor_chan *chan)
168 u32 val = ~(1 << (chan->
idx * 16));
169 dev_dbg(chan->
device->common.dev,
"%s, val 0x%08x\n", __func__, val);
173 static void mv_xor_device_clear_err_status(
struct mv_xor_chan *chan)
175 u32 val = 0xFFFF0000 >> (chan->
idx * 16);
184 if (chain_old_tail->
type != desc->
type)
210 "error: unsupported operation %d.\n",
222 static void mv_chan_activate(
struct mv_xor_chan *chan)
232 static char mv_chan_is_busy(
struct mv_xor_chan *chan)
236 state = (state >> 4) & 0x3;
238 return (state == 1) ? 1 : 0;
241 static int mv_chan_xor_slot_count(
size_t len,
int src_cnt)
251 static void mv_xor_free_slots(
struct mv_xor_chan *mv_chan,
255 __func__, __LINE__, slot);
266 static void mv_xor_start_new_chain(
struct mv_xor_chan *mv_chan,
270 __func__, __LINE__, sw_desc);
272 mv_set_mode(mv_chan, sw_desc->
type);
280 mv_chan_set_block_size(mv_chan, sw_desc->
unmap_len);
281 mv_chan_set_value(mv_chan, sw_desc->
value);
284 mv_chan_set_next_descriptor(mv_chan, sw_desc->
async_tx.phys);
287 mv_xor_issue_pending(&mv_chan->
common);
312 &mv_chan->
device->pdev->dev;
320 dest = mv_desc_get_dest_addr(unmap);
333 addr = mv_desc_get_src_addr(unmap,
352 mv_xor_clean_completed_slots(
struct mv_xor_chan *mv_chan)
356 dev_dbg(mv_chan->
device->common.dev,
"%s %d\n", __func__, __LINE__);
360 if (async_tx_test_ack(&iter->
async_tx)) {
362 mv_xor_free_slots(mv_chan, iter);
372 dev_dbg(mv_chan->
device->common.dev,
"%s %d: desc %p flags %d\n",
373 __func__, __LINE__, desc, desc->
async_tx.flags);
378 if (!async_tx_test_ack(&desc->
async_tx)) {
384 mv_xor_free_slots(mv_chan, desc);
388 static void __mv_xor_slot_cleanup(
struct mv_xor_chan *mv_chan)
392 int busy = mv_chan_is_busy(mv_chan);
393 u32 current_desc = mv_chan_get_current_desc(mv_chan);
394 int seen_current = 0;
396 dev_dbg(mv_chan->
device->common.dev,
"%s %d\n", __func__, __LINE__);
397 dev_dbg(mv_chan->
device->common.dev,
"current_desc %x\n", current_desc);
398 mv_xor_clean_completed_slots(mv_chan);
419 if (iter->
async_tx.phys == current_desc) {
425 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
427 if (mv_xor_clean_slot(iter, mv_chan))
431 if ((busy == 0) && !list_empty(&mv_chan->
chain)) {
437 mv_xor_start_new_chain(mv_chan, chain_head);
447 spin_lock_bh(&mv_chan->
lock);
448 __mv_xor_slot_cleanup(mv_chan);
449 spin_unlock_bh(&mv_chan->
lock);
452 static void mv_xor_tasklet(
unsigned long data)
455 mv_xor_slot_cleanup(chan);
464 int slots_found,
retry = 0;
498 if (slots_found == num_slots) {
526 mv_desc_clear_next_desc(alloc_start);
527 mv_desc_clear_next_desc(alloc_tail);
548 int new_hw_chain = 1;
551 "%s sw_desc %p: async_tx %p\n",
552 __func__, sw_desc, &sw_desc->
async_tx);
556 spin_lock_bh(&mv_chan->
lock);
557 cookie = dma_cookie_assign(tx);
559 if (list_empty(&mv_chan->
chain))
567 list_splice_init(&grp_start->
tx_list,
570 if (!mv_can_chain(grp_start))
573 dev_dbg(mv_chan->
device->common.dev,
"Append to last desc %x\n",
577 mv_desc_set_next_desc(old_chain_tail, grp_start->
async_tx.phys);
580 if (!mv_chan_is_busy(mv_chan)) {
581 u32 current_desc = mv_chan_get_current_desc(mv_chan);
586 if (current_desc == old_chain_tail->
async_tx.phys)
592 mv_xor_start_new_chain(mv_chan, grp_start);
595 spin_unlock_bh(&mv_chan->
lock);
601 static int mv_xor_alloc_chan_resources(
struct dma_chan *chan)
608 mv_chan->
device->pdev->dev.platform_data;
613 while (idx < num_descs_in_pool) {
617 " %d descriptor slots", idx);
620 hw_desc = (
char *) mv_chan->
device->dma_desc_pool_virt;
624 slot->
async_tx.tx_submit = mv_xor_tx_submit;
627 INIT_LIST_HEAD(&slot->
tx_list);
628 hw_desc = (
char *) mv_chan->
device->dma_desc_pool;
633 spin_lock_bh(&mv_chan->
lock);
636 spin_unlock_bh(&mv_chan->
lock);
645 "allocated %d descriptor slots last_used: %p\n",
653 size_t len,
unsigned long flags)
660 "%s dest: %x src %x len: %u flags: %ld\n",
661 __func__, dest, src, len, flags);
667 spin_lock_bh(&mv_chan->
lock);
669 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
674 mv_desc_init(grp_start, flags);
675 mv_desc_set_byte_count(grp_start, len);
676 mv_desc_set_dest_addr(sw_desc->
group_head, dest);
677 mv_desc_set_src_addr(grp_start, 0, src);
681 spin_unlock_bh(&mv_chan->
lock);
684 "%s sw_desc %p async_tx %p\n",
685 __func__, sw_desc, sw_desc ? &sw_desc->
async_tx : 0);
692 size_t len,
unsigned long flags)
699 "%s dest: %x len: %u flags: %ld\n",
700 __func__, dest, len, flags);
706 spin_lock_bh(&mv_chan->
lock);
707 slot_cnt = mv_chan_memset_slot_count(len);
708 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
713 mv_desc_init(grp_start, flags);
714 mv_desc_set_byte_count(grp_start, len);
715 mv_desc_set_dest_addr(sw_desc->
group_head, dest);
716 mv_desc_set_block_fill_val(grp_start, value);
720 spin_unlock_bh(&mv_chan->
lock);
722 "%s sw_desc %p async_tx %p \n",
723 __func__, sw_desc, &sw_desc->
async_tx);
729 unsigned int src_cnt,
size_t len,
unsigned long flags)
741 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
742 __func__, src_cnt, len, dest, flags);
744 spin_lock_bh(&mv_chan->
lock);
745 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
746 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
751 mv_desc_init(grp_start, flags);
753 mv_desc_set_byte_count(grp_start, len);
754 mv_desc_set_dest_addr(sw_desc->
group_head, dest);
758 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
760 spin_unlock_bh(&mv_chan->
lock);
762 "%s sw_desc %p async_tx %p \n",
763 __func__, sw_desc, &sw_desc->
async_tx);
767 static void mv_xor_free_chan_resources(
struct dma_chan *chan)
771 int in_use_descs = 0;
773 mv_xor_slot_cleanup(mv_chan);
775 spin_lock_bh(&mv_chan->
lock);
794 dev_dbg(mv_chan->
device->common.dev,
"%s slots_allocated %d\n",
796 spin_unlock_bh(&mv_chan->
lock);
800 "freeing %d in use descriptors!\n", in_use_descs);
816 ret = dma_cookie_status(chan, cookie, txstate);
818 mv_xor_clean_completed_slots(mv_chan);
821 mv_xor_slot_cleanup(mv_chan);
823 return dma_cookie_status(chan, cookie, txstate);
826 static void mv_dump_xor_regs(
struct mv_xor_chan *chan)
832 "config 0x%08x.\n", val);
836 "activation 0x%08x.\n", val);
840 "intr cause 0x%08x.\n", val);
844 "intr mask 0x%08x.\n", val);
848 "error cause 0x%08x.\n", val);
852 "error addr 0x%08x.\n", val);
855 static void mv_xor_err_interrupt_handler(
struct mv_xor_chan *chan,
858 if (intr_cause & (1 << 4)) {
860 "ignore this error\n");
865 "error on chan %d. intr cause 0x%08x.\n",
866 chan->
idx, intr_cause);
868 mv_dump_xor_regs(chan);
875 u32 intr_cause = mv_chan_get_intr_cause(chan);
877 dev_dbg(chan->
device->common.dev,
"intr cause %x\n", intr_cause);
879 if (mv_is_err_intr(intr_cause))
880 mv_xor_err_interrupt_handler(chan, intr_cause);
884 mv_xor_device_clear_eoc_cause(chan);
889 static void mv_xor_issue_pending(
struct dma_chan *chan)
895 mv_chan_activate(mv_chan);
902 #define MV_XOR_TEST_SIZE 2000
919 dest = kzalloc(
sizeof(
u8) * MV_XOR_TEST_SIZE,
GFP_KERNEL);
927 ((
u8 *) src)[
i] = (
u8)i;
933 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
944 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
945 MV_XOR_TEST_SIZE, 0);
946 cookie = mv_xor_tx_submit(tx);
947 mv_xor_issue_pending(dma_chan);
951 if (mv_xor_status(dma_chan, cookie,
NULL) !=
954 "Self-test copy timed out, disabling\n");
962 if (
memcmp(src, dest, MV_XOR_TEST_SIZE)) {
964 "Self-test copy failed compare, disabling\n");
970 mv_xor_free_chan_resources(dma_chan);
977 #define MV_XOR_NUM_SRC_TEST 4
996 if (!xor_srcs[src_idx]) {
1014 ptr[i] = (1 << src_idx);
1018 cmp_byte ^= (
u8) (1 << src_idx);
1020 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1021 (cmp_byte << 8) | cmp_byte;
1028 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
1041 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1044 cookie = mv_xor_tx_submit(tx);
1045 mv_xor_issue_pending(dma_chan);
1049 if (mv_xor_status(dma_chan, cookie,
NULL) !=
1052 "Self-test xor timed out, disabling\n");
1054 goto free_resources;
1062 if (ptr[i] != cmp_word) {
1064 "Self-test xor failed compare, disabling."
1065 " index %d, data %x, expected %x\n", i,
1068 goto free_resources;
1073 mv_xor_free_chan_resources(dma_chan);
1135 platform_set_drvdata(pdev, adev);
1137 adev->
shared = platform_get_drvdata(plat_data->
shared);
1139 INIT_LIST_HEAD(&dma_dev->
channels);
1146 dma_dev->
dev = &pdev->
dev;
1175 mv_xor_device_clear_err_status(mv_chan);
1182 ret = devm_request_irq(&pdev->
dev, irq,
1183 mv_xor_interrupt_handler,
1184 0, dev_name(&pdev->
dev), mv_chan);
1188 mv_chan_unmask_interrupts(mv_chan);
1193 INIT_LIST_HEAD(&mv_chan->
chain);
1196 mv_chan->
common.device = dma_dev;
1197 dma_cookie_init(&mv_chan->
common);
1202 ret = mv_xor_memcpy_self_test(adev);
1203 dev_dbg(&pdev->
dev,
"memcpy self test returned %d\n", ret);
1209 ret = mv_xor_xor_self_test(adev);
1210 dev_dbg(&pdev->
dev,
"xor self test returned %d\n", ret);
1240 for (i = 0; i < 8; i++) {
1247 for (i = 0; i < dram->
num_cs; i++) {
1248 const struct mbus_dram_window *
cs = dram->
cs +
i;
1250 writel((cs->base & 0xffff0000) |
1251 (cs->mbus_attr << 8) |
1255 win_enable |= (1 <<
i);
1256 win_enable |= 3 << (16 + (2 *
i));
1264 .probe = mv_xor_probe,
1278 dev_printk(
KERN_NOTICE, &pdev->
dev,
"Marvell shared XOR driver\n");
1289 resource_size(res));
1298 resource_size(res));
1302 platform_set_drvdata(pdev, msp);
1309 mv_xor_conf_mbus_windows(msp, dram);
1315 if (!IS_ERR(msp->
clk))
1316 clk_prepare_enable(msp->
clk);
1325 if (!IS_ERR(msp->
clk)) {
1326 clk_disable_unprepare(msp->
clk);
1334 .probe = mv_xor_shared_probe,
1335 .remove = mv_xor_shared_remove,
1343 static int __init mv_xor_init(
void)
1359 static void __exit mv_xor_exit(
void)