18 #include <linux/list.h>
21 #include <linux/string.h>
24 #include <linux/module.h>
28 #include "../dmaengine.h"
31 #define FS_VF_IN_VALID 0x00000002
32 #define FS_ENC_IN_VALID 0x00000001
43 static struct ipu ipu_data;
45 #define to_ipu(id) container_of(id, struct ipu, idmac)
47 static u32 __idmac_read_icreg(
struct ipu *
ipu,
unsigned long reg)
52 #define idmac_read_icreg(ipu, reg) __idmac_read_icreg(ipu, reg - IC_CONF)
59 #define idmac_write_icreg(ipu, v, reg) __idmac_write_icreg(ipu, v, reg - IC_CONF)
61 static u32 idmac_read_ipureg(
struct ipu *
ipu,
unsigned long reg)
74 static void dump_idmac_reg(
struct ipu *
ipu)
76 dev_dbg(ipu->
dev,
"IDMAC_CONF 0x%x, IC_CONF 0x%x, IDMAC_CHA_EN 0x%x, "
77 "IDMAC_CHA_PRI 0x%x, IDMAC_CHA_BUSY 0x%x\n",
83 dev_dbg(ipu->
dev,
"BUF0_RDY 0x%x, BUF1_RDY 0x%x, CUR_BUF 0x%x, "
84 "DB_MODE 0x%x, TASKS_STAT 0x%x\n",
136 static void ipu_ic_disable_task(
struct ipu *ipu,
enum ipu_channel channel)
278 u32 u_offset,
u32 v_offset)
280 params->
pp.ubo_l = u_offset & 0x7ff;
281 params->
pp.ubo_h = u_offset >> 11;
282 params->
pp.vbo_l = v_offset & 0x1ffff;
283 params->
pp.vbo_h = v_offset >> 17;
293 params->
pp.fw = width - 1;
294 params->
pp.fh_l = height - 1;
295 params->
pp.fh_h = (height - 1) >> 8;
296 params->
pp.sl = stride - 1;
320 params->
ip.ofs2 = 11;
321 params->
ip.ofs3 = 16;
333 params->
ip.ofs2 = 16;
334 params->
ip.ofs3 = 24;
344 params->
ip.ofs0 = 16;
347 params->
ip.ofs3 = 24;
360 params->
ip.ofs1 = 16;
361 params->
ip.ofs2 = 24;
374 params->
ip.ofs0 = 24;
375 params->
ip.ofs1 = 16;
395 u_offset = stride *
height;
396 v_offset = u_offset + u_offset / 4;
397 ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
404 v_offset = stride *
height;
405 u_offset = v_offset + v_offset / 2;
406 ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
413 u_offset = stride *
height;
414 v_offset = u_offset + u_offset / 2;
415 ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
419 "mx3 ipu: unimplemented pixel format %d\n", pixel_fmt);
429 params->
pp.eba0 = buf0;
430 params->
pp.eba1 = buf1;
433 static void ipu_ch_param_set_rotation(
union chan_param_mem *params,
436 params->
pp.bam = rotate;
442 for (; num_words > 0; num_words--) {
444 "write param mem - addr = 0x%08X, data = 0x%08X\n",
449 if ((addr & 0x7) == 5) {
463 *resize_coeff = 1 << 13;
464 *downsize_coeff = 1 << 13;
467 if (out_size << 3 < in_size)
473 while (temp_size >= out_size * 2 && temp_downsize < 2) {
477 *downsize_coeff = temp_downsize;
484 *resize_coeff = (8192
L * (temp_size - 1)) / (out_size - 1);
485 if (*resize_coeff >= 16384L) {
486 dev_err(ipu_data.dev,
"Warning! Overflow on resize coeff.\n");
487 *resize_coeff = 0x3FFF;
490 dev_dbg(ipu_data.dev,
"resizing from %u -> %u pixels, "
491 "downsize=%u, resize=%u.%lu (reg=%u)\n", in_size, out_size,
492 *downsize_coeff, *resize_coeff >= 8192L ? 1 : 0,
493 ((*resize_coeff & 0x1FFF) * 10000L) / 8192L, *resize_coeff);
512 static int ipu_ic_init_prpenc(
struct ipu *ipu,
516 uint32_t downsize_coeff, resize_coeff;
520 calc_resize_coeffs(params->
video.in_height,
521 params->
video.out_height,
522 &resize_coeff, &downsize_coeff);
523 reg = (downsize_coeff << 30) | (resize_coeff << 16);
526 calc_resize_coeffs(params->
video.in_width,
527 params->
video.out_width,
528 &resize_coeff, &downsize_coeff);
529 reg |= (downsize_coeff << 14) | resize_coeff;
532 in_fmt = format_to_colorspace(params->
video.in_pixel_fmt);
533 out_fmt = format_to_colorspace(params->
video.out_pixel_fmt);
539 if (in_fmt != out_fmt) {
540 dev_err(ipu->
dev,
"Colourspace conversion unsupported!\n");
561 return 0x10000 | (dma_ch << 4);
564 static void ipu_channel_set_priority(
struct ipu *ipu,
enum ipu_channel channel,
608 struct ipu *ipu =
to_ipu(idmac);
624 ipu_channel_set_priority(ipu, channel,
true);
633 ipu_ic_enable_task(ipu, channel);
635 spin_unlock_irqrestore(&ipu->
lock, flags);
654 static int ipu_init_channel_buffer(
struct idmac_channel *ichan,
655 enum pixel_fmt pixel_fmt,
663 struct ipu *ipu =
to_ipu(idmac);
669 stride_bytes = stride * bytes_per_pixel(pixel_fmt);
671 if (stride_bytes % 4) {
673 "Stride length must be 32-bit aligned, stride = %d, bytes = %d\n",
674 stride, stride_bytes);
680 dev_err(ipu->
dev,
"Stride must be 8 pixel multiple\n");
685 ipu_ch_param_set_size(¶ms, pixel_fmt, width, height, stride_bytes);
686 ipu_ch_param_set_buffer(¶ms, phyaddr_0, phyaddr_1);
687 ipu_ch_param_set_rotation(¶ms, rot_mode);
691 ipu_write_param_mem(dma_param_addr(channel), (
uint32_t *)¶ms, 10);
704 spin_unlock_irqrestore(&ipu->
lock, flags);
714 static void ipu_select_buffer(
enum ipu_channel channel,
int buffer_n)
733 static void ipu_update_channel_buffer(
struct idmac_channel *ichan,
744 if (reg & (1
UL << channel)) {
745 ipu_ic_disable_task(&ipu_data, channel);
750 idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
755 if (reg & (1
UL << channel)) {
756 ipu_ic_disable_task(&ipu_data, channel);
763 if (!(reg & (1
UL << channel)))
764 idmac_write_ipureg(&ipu_data, reg | (1
UL << channel),
768 idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
773 spin_unlock_irqrestore(&ipu_data.lock, flags);
780 unsigned int chan_id = ichan->
dma_chan.chan_id;
783 if (async_tx_test_ack(&desc->
txd))
794 ipu_select_buffer(chan_id, buf_idx);
795 dev_dbg(dev,
"Updated sg %p on channel 0x%x buffer %d\n",
796 sg, chan_id, buf_idx);
802 static int ipu_submit_channel_buffers(
struct idmac_channel *ichan,
808 for (i = 0, sg = desc->
sg; i < 2 && sg; i++) {
812 ret = ipu_submit_buffer(ichan, desc, sg, i);
828 struct ipu *ipu =
to_ipu(idmac);
835 if (!list_empty(&desc->
list)) {
837 dev_err(dev,
"Descriptor %p not prepared!\n", tx);
843 async_tx_clear_ack(tx);
856 cookie = ipu_init_channel_buffer(ichan,
868 dev_dbg(dev,
"Submitting sg %p\n", &desc->
sg[0]);
870 cookie = dma_cookie_assign(tx);
877 ret = ipu_submit_channel_buffers(ichan, desc);
879 spin_unlock_irqrestore(&ichan->
lock, flags);
887 ret = ipu_enable_channel(idmac, ichan);
899 list_del_init(&desc->
list);
900 spin_unlock_irqrestore(&ichan->
lock, flags);
925 INIT_LIST_HEAD(&ichan->
queue);
931 memset(txd, 0,
sizeof(*txd));
951 static int ipu_init_channel(
struct idmac *idmac,
struct idmac_channel *ichan)
958 struct ipu *ipu =
to_ipu(idmac);
959 int ret = 0, n_desc = 0;
961 dev_dbg(ipu->
dev,
"init channel = %d\n", channel);
979 ret = ipu_ic_init_prpenc(ipu, params,
true);
991 ipu_conf = idmac_read_ipureg(ipu,
IPU_CONF) |
992 ipu_channel_conf_mask(channel);
993 idmac_write_ipureg(ipu, ipu_conf,
IPU_CONF);
995 spin_unlock_irqrestore(&ipu->
lock, flags);
997 if (n_desc && !ichan->
desc)
998 ret = idmac_desc_alloc(ichan, n_desc);
1000 dump_idmac_reg(ipu);
1010 static void ipu_uninit_channel(
struct idmac *idmac,
struct idmac_channel *ichan)
1013 unsigned long flags;
1015 unsigned long chan_mask = 1
UL <<
channel;
1017 struct ipu *ipu =
to_ipu(idmac);
1022 dev_err(ipu->
dev,
"Channel already uninitialized %d\n",
1024 spin_unlock_irqrestore(&ipu->
lock, flags);
1053 ipu_conf = idmac_read_ipureg(ipu,
IPU_CONF) &
1054 ~ipu_channel_conf_mask(channel);
1055 idmac_write_ipureg(ipu, ipu_conf,
IPU_CONF);
1057 spin_unlock_irqrestore(&ipu->
lock, flags);
1072 static int ipu_disable_channel(
struct idmac *idmac,
struct idmac_channel *ichan,
1076 struct ipu *ipu =
to_ipu(idmac);
1078 unsigned long flags;
1079 unsigned long chan_mask = 1
UL <<
channel;
1092 "Warning: timeout waiting for channel %u to "
1093 "stop: buf0_rdy = 0x%08X, buf1_rdy = 0x%08X, "
1094 "busy = 0x%08X, tstat = 0x%08X\n", channel,
1102 dev_dbg(ipu->
dev,
"timeout = %d * 10ms\n", 40 - timeout);
1115 ipu_ic_disable_task(ipu, channel);
1121 spin_unlock_irqrestore(&ipu->
lock, flags);
1135 if ((*desc)->list.next == &ichan->
queue)
1160 unsigned int chan_id = ichan->
dma_chan.chan_id;
1165 void *callback_param;
1167 u32 ready0, ready1, curbuf,
err;
1168 unsigned long flags;
1181 if (err & (1 << chan_id)) {
1183 spin_unlock_irqrestore(&ipu_data.lock, flags);
1191 dev_warn(dev,
"NFB4EOF on channel %d, ready %x, %x, cur %x\n",
1192 chan_id, ready0, ready1, curbuf);
1195 spin_unlock_irqrestore(&ipu_data.lock, flags);
1198 spin_lock(&ichan->
lock);
1202 spin_unlock(&ichan->
lock);
1204 "IRQ with active buffer still ready on channel %x, "
1205 "active %d, ready %x, %x!\n", chan_id,
1212 spin_unlock(&ichan->
lock);
1214 "IRQ without queued buffers on channel %x, active %d, "
1215 "ready %x, %x!\n", chan_id,
1229 spin_unlock(&ichan->
lock);
1236 dev_dbg(dev,
"IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n",
1240 sgnew = idmac_sg_next(ichan, &descnew, *sg);
1241 if (sgnext != sgnew)
1242 dev_err(dev,
"Submitted buffer %p, next buffer %p\n", sgnext, sgnew);
1250 dev_dbg(dev,
"Underrun on channel %x\n", chan_id);
1254 ipu_submit_buffer(ichan, descnew, sgnew, !ichan->
active_buffer);
1257 ipu_ic_disable_task(&ipu_data, chan_id);
1258 spin_unlock_irqrestore(&ipu_data.lock, flags);
1265 sgnew = idmac_sg_next(ichan, &descnew, sgnew);
1267 if (
unlikely(!sg_next(*sg)) || !sgnext) {
1272 list_del_init(&desc->
list);
1279 ipu_submit_buffer(ichan, descnew, sgnew, ichan->
active_buffer) < 0) {
1280 callback = descnew->
txd.callback;
1281 callback_param = descnew->
txd.callback_param;
1282 list_del_init(&descnew->
list);
1283 spin_unlock(&ichan->
lock);
1286 spin_lock(&ichan->
lock);
1292 dma_cookie_complete(&desc->
txd);
1294 callback = desc->
txd.callback;
1295 callback_param = desc->
txd.callback_param;
1297 spin_unlock(&ichan->
lock);
1305 static void ipu_gc_tasklet(
unsigned long arg)
1307 struct ipu *ipu = (
struct ipu *)arg;
1313 unsigned long flags;
1317 for (j = 0; j < ichan->
n_tx_desc; j++) {
1318 desc = ichan->
desc +
j;
1320 if (async_tx_test_ack(&desc->
txd)) {
1323 if (ichan->
sg[0] == sg)
1325 else if (ichan->
sg[1] == sg)
1328 async_tx_clear_ack(&desc->
txd);
1330 spin_unlock_irqrestore(&ichan->
lock, flags);
1344 unsigned long flags;
1352 dev_err(chan->
device->dev,
"Invalid DMA direction %d!\n", direction);
1363 list_del_init(&desc->
list);
1368 txd->
flags = tx_flags;
1370 spin_unlock_irqrestore(&ichan->
lock, flags);
1380 static void idmac_issue_pending(
struct dma_chan *chan)
1384 struct ipu *ipu =
to_ipu(idmac);
1385 unsigned long flags;
1390 spin_unlock_irqrestore(&ipu->
lock, flags);
1405 struct ipu *ipu =
to_ipu(idmac);
1407 unsigned long flags;
1413 ipu_ic_disable_task(ipu, chan->
chan_id);
1417 list_del_init(list);
1419 ichan->sg[0] =
NULL;
1420 ichan->sg[1] = NULL;
1422 spin_unlock_irqrestore(&ipu->lock, flags);
1427 ipu_disable_channel(idmac, ichan,
1430 tasklet_disable(&ipu->
tasklet);
1434 list_splice_init(&ichan->
queue, &ichan->free_list);
1437 for (i = 0; i < ichan->n_tx_desc; i++) {
1439 if (list_empty(&desc->
list))
1441 list_add(&desc->
list, &ichan->free_list);
1443 async_tx_clear_ack(&desc->
txd);
1446 ichan->sg[0] =
NULL;
1447 ichan->sg[1] =
NULL;
1448 spin_unlock_irqrestore(&ichan->lock, flags);
1450 tasklet_enable(&ipu->
tasklet);
1469 ret = __idmac_control(chan, cmd, arg);
1477 static irqreturn_t ic_sof_irq(
int irq,
void *dev_id)
1486 static irqreturn_t ic_eof_irq(
int irq,
void *dev_id)
1498 static int idmac_alloc_chan_resources(
struct dma_chan *chan)
1508 dma_cookie_init(chan);
1520 ipu_disable_channel(idmac, ichan,
true);
1522 ret = ipu_init_channel(idmac, ichan);
1535 request_irq(ic_sof, ic_sof_irq, 0,
"IC SOF", ichan);
1538 request_irq(ic_eof, ic_eof_irq, 0,
"IC EOF", ichan);
1544 dev_dbg(&chan->
dev->device,
"Found channel 0x%x, irq %d\n",
1550 ipu_uninit_channel(idmac, ichan);
1557 static void idmac_free_chan_resources(
struct dma_chan *chan)
1587 ipu_uninit_channel(idmac, ichan);
1598 if (cookie != chan->
cookie)
1603 static int __init ipu_idmac_init(
struct ipu *ipu)
1605 struct idmac *idmac = &ipu->
idmac;
1636 dma_cookie_init(dma_chan);
1646 static void __exit ipu_idmac_exit(
struct ipu *ipu)
1649 struct idmac *idmac = &ipu->
idmac;
1673 if (!mem_ipu || !mem_ic)
1676 ipu_data.dev = &pdev->
dev;
1678 platform_set_drvdata(pdev, &ipu_data);
1684 ipu_data.irq_fn =
ret;
1689 ipu_data.irq_err =
ret;
1691 dev_dbg(&pdev->
dev,
"fn irq %u, err irq %u\n",
1692 ipu_data.irq_fn, ipu_data.irq_err);
1695 ipu_data.reg_ipu =
ioremap(mem_ipu->
start, resource_size(mem_ipu));
1696 if (!ipu_data.reg_ipu) {
1698 goto err_ioremap_ipu;
1702 ipu_data.reg_ic =
ioremap(mem_ic->
start, resource_size(mem_ic));
1703 if (!ipu_data.reg_ic) {
1705 goto err_ioremap_ic;
1710 if (IS_ERR(ipu_data.ipu_clk)) {
1711 ret = PTR_ERR(ipu_data.ipu_clk);
1716 clk_prepare_enable(ipu_data.ipu_clk);
1725 dev_dbg(&pdev->
dev,
"%s @ 0x%08lx, fn irq %u, err irq %u\n", pdev->
name,
1726 (
unsigned long)mem_ipu->
start, ipu_data.irq_fn, ipu_data.irq_err);
1730 goto err_attach_irq;
1733 ret = ipu_idmac_init(&ipu_data);
1735 goto err_idmac_init;
1737 tasklet_init(&ipu_data.tasklet, ipu_gc_tasklet, (
unsigned long)&ipu_data);
1739 ipu_data.dev = &pdev->
dev;
1741 dev_dbg(ipu_data.dev,
"IPU initialized\n");
1748 clk_disable_unprepare(ipu_data.ipu_clk);
1756 dev_err(&pdev->
dev,
"Failed to probe IPU: %d\n", ret);
1762 struct ipu *ipu = platform_get_drvdata(pdev);
1764 ipu_idmac_exit(ipu);
1766 clk_disable_unprepare(ipu->
ipu_clk);
1771 platform_set_drvdata(pdev,
NULL);
1788 static int __init ipu_init(
void)