Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ipu-common.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010 Sascha Hauer <[email protected]>
3  * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License as published by the
7  * Free Software Foundation; either version 2 of the License, or (at your
8  * option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12  * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13  * for more details.
14  */
15 #include <linux/module.h>
16 #include <linux/export.h>
17 #include <linux/types.h>
18 #include <linux/init.h>
19 #include <linux/platform_device.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/clk.h>
26 #include <linux/list.h>
27 #include <linux/irq.h>
28 #include <linux/of_device.h>
29 #include <asm/mach/irq.h>
30 
31 #include "imx-ipu-v3.h"
32 #include "ipu-prv.h"
33 
34 static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
35 {
36  return readl(ipu->cm_reg + offset);
37 }
38 
39 static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
40 {
41  writel(value, ipu->cm_reg + offset);
42 }
43 
44 static inline u32 ipu_idmac_read(struct ipu_soc *ipu, unsigned offset)
45 {
46  return readl(ipu->idmac_reg + offset);
47 }
48 
49 static inline void ipu_idmac_write(struct ipu_soc *ipu, u32 value,
50  unsigned offset)
51 {
52  writel(value, ipu->idmac_reg + offset);
53 }
54 
56 {
57  u32 val;
58 
59  val = ipu_cm_read(ipu, IPU_SRM_PRI2);
60  val |= 0x8;
61  ipu_cm_write(ipu, val, IPU_SRM_PRI2);
62 }
64 
66 {
67  struct ipu_soc *ipu = channel->ipu;
68 
69  return ipu->cpmem_base + channel->num;
70 }
72 
74 {
75  struct ipu_soc *ipu = channel->ipu;
76  struct ipu_ch_param __iomem *p = ipu_get_cpmem(channel);
77  u32 val;
78 
79  if (ipu->ipu_type == IPUV3EX)
81 
82  val = ipu_idmac_read(ipu, IDMAC_CHA_PRI(channel->num));
83  val |= 1 << (channel->num % 32);
84  ipu_idmac_write(ipu, val, IDMAC_CHA_PRI(channel->num));
85 };
87 
89 {
90  u32 bit = (wbs >> 8) % 160;
91  u32 size = wbs & 0xff;
92  u32 word = (wbs >> 8) / 160;
93  u32 i = bit / 32;
94  u32 ofs = bit % 32;
95  u32 mask = (1 << size) - 1;
96  u32 val;
97 
98  pr_debug("%s %d %d %d\n", __func__, word, bit , size);
99 
100  val = readl(&base->word[word].data[i]);
101  val &= ~(mask << ofs);
102  val |= v << ofs;
103  writel(val, &base->word[word].data[i]);
104 
105  if ((bit + size - 1) / 32 > i) {
106  val = readl(&base->word[word].data[i + 1]);
107  val &= ~(mask >> (ofs ? (32 - ofs) : 0));
108  val |= v >> (ofs ? (32 - ofs) : 0);
109  writel(val, &base->word[word].data[i + 1]);
110  }
111 }
113 
115 {
116  u32 bit = (wbs >> 8) % 160;
117  u32 size = wbs & 0xff;
118  u32 word = (wbs >> 8) / 160;
119  u32 i = bit / 32;
120  u32 ofs = bit % 32;
121  u32 mask = (1 << size) - 1;
122  u32 val = 0;
123 
124  pr_debug("%s %d %d %d\n", __func__, word, bit , size);
125 
126  val = (readl(&base->word[word].data[i]) >> ofs) & mask;
127 
128  if ((bit + size - 1) / 32 > i) {
129  u32 tmp;
130  tmp = readl(&base->word[word].data[i + 1]);
131  tmp &= mask >> (ofs ? (32 - ofs) : 0);
132  val |= tmp << (ofs ? (32 - ofs) : 0);
133  }
134 
135  return val;
136 }
138 
140  struct ipu_rgb *rgb)
141 {
142  int bpp = 0, npb = 0, ro, go, bo, to;
143 
144  ro = rgb->bits_per_pixel - rgb->red.length - rgb->red.offset;
145  go = rgb->bits_per_pixel - rgb->green.length - rgb->green.offset;
146  bo = rgb->bits_per_pixel - rgb->blue.length - rgb->blue.offset;
147  to = rgb->bits_per_pixel - rgb->transp.length - rgb->transp.offset;
148 
149  ipu_ch_param_write_field(p, IPU_FIELD_WID0, rgb->red.length - 1);
151  ipu_ch_param_write_field(p, IPU_FIELD_WID1, rgb->green.length - 1);
153  ipu_ch_param_write_field(p, IPU_FIELD_WID2, rgb->blue.length - 1);
155 
156  if (rgb->transp.length) {
158  rgb->transp.length - 1);
160  } else {
163  rgb->bits_per_pixel);
164  }
165 
166  switch (rgb->bits_per_pixel) {
167  case 32:
168  bpp = 0;
169  npb = 15;
170  break;
171  case 24:
172  bpp = 1;
173  npb = 19;
174  break;
175  case 16:
176  bpp = 3;
177  npb = 31;
178  break;
179  case 8:
180  bpp = 5;
181  npb = 63;
182  break;
183  default:
184  return -EINVAL;
185  }
188  ipu_ch_param_write_field(p, IPU_FIELD_PFS, 7); /* rgb mode */
189 
190  return 0;
191 }
193 
195  int width)
196 {
197  int bpp = 0, npb = 0;
198 
199  switch (width) {
200  case 32:
201  bpp = 0;
202  npb = 15;
203  break;
204  case 24:
205  bpp = 1;
206  npb = 19;
207  break;
208  case 16:
209  bpp = 3;
210  npb = 31;
211  break;
212  case 8:
213  bpp = 5;
214  npb = 63;
215  break;
216  default:
217  return -EINVAL;
218  }
219 
222  ipu_ch_param_write_field(p, IPU_FIELD_PFS, 6); /* raw mode */
223 
224  return 0;
225 }
227 
229  u32 pixel_format, int stride, int u_offset, int v_offset)
230 {
231  switch (pixel_format) {
232  case V4L2_PIX_FMT_YUV420:
233  ipu_ch_param_write_field(p, IPU_FIELD_SLUV, (stride / 2) - 1);
234  ipu_ch_param_write_field(p, IPU_FIELD_UBO, u_offset / 8);
235  ipu_ch_param_write_field(p, IPU_FIELD_VBO, v_offset / 8);
236  break;
237  }
238 }
240 
242  int stride, int height)
243 {
244  int u_offset, v_offset;
245  int uv_stride = 0;
246 
247  switch (pixel_format) {
248  case V4L2_PIX_FMT_YUV420:
249  uv_stride = stride / 2;
250  u_offset = stride * height;
251  v_offset = u_offset + (uv_stride * height / 2);
253  u_offset, v_offset);
254  break;
255  }
256 }
258 
259 static struct ipu_rgb def_rgb_32 = {
260  .red = { .offset = 16, .length = 8, },
261  .green = { .offset = 8, .length = 8, },
262  .blue = { .offset = 0, .length = 8, },
263  .transp = { .offset = 24, .length = 8, },
264  .bits_per_pixel = 32,
265 };
266 
267 static struct ipu_rgb def_bgr_32 = {
268  .red = { .offset = 16, .length = 8, },
269  .green = { .offset = 8, .length = 8, },
270  .blue = { .offset = 0, .length = 8, },
271  .transp = { .offset = 24, .length = 8, },
272  .bits_per_pixel = 32,
273 };
274 
275 static struct ipu_rgb def_rgb_24 = {
276  .red = { .offset = 0, .length = 8, },
277  .green = { .offset = 8, .length = 8, },
278  .blue = { .offset = 16, .length = 8, },
279  .transp = { .offset = 0, .length = 0, },
280  .bits_per_pixel = 24,
281 };
282 
283 static struct ipu_rgb def_bgr_24 = {
284  .red = { .offset = 16, .length = 8, },
285  .green = { .offset = 8, .length = 8, },
286  .blue = { .offset = 0, .length = 8, },
287  .transp = { .offset = 0, .length = 0, },
288  .bits_per_pixel = 24,
289 };
290 
291 static struct ipu_rgb def_rgb_16 = {
292  .red = { .offset = 11, .length = 5, },
293  .green = { .offset = 5, .length = 6, },
294  .blue = { .offset = 0, .length = 5, },
295  .transp = { .offset = 0, .length = 0, },
296  .bits_per_pixel = 16,
297 };
298 
299 #define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
300 #define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
301  (pix->width * (y) / 4) + (x) / 2)
302 #define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \
303  (pix->width * pix->height / 4) + \
304  (pix->width * (y) / 4) + (x) / 2)
305 
307 {
308  switch (pixelformat) {
309  case V4L2_PIX_FMT_YUV420:
310  /* pix format */
312  /* burst size */
314  break;
315  case V4L2_PIX_FMT_UYVY:
316  /* bits/pixel */
318  /* pix format */
320  /* burst size */
322  break;
323  case V4L2_PIX_FMT_YUYV:
324  /* bits/pixel */
326  /* pix format */
328  /* burst size */
330  break;
331  case V4L2_PIX_FMT_RGB32:
332  ipu_cpmem_set_format_rgb(cpmem, &def_rgb_32);
333  break;
334  case V4L2_PIX_FMT_RGB565:
335  ipu_cpmem_set_format_rgb(cpmem, &def_rgb_16);
336  break;
337  case V4L2_PIX_FMT_BGR32:
338  ipu_cpmem_set_format_rgb(cpmem, &def_bgr_32);
339  break;
340  case V4L2_PIX_FMT_RGB24:
341  ipu_cpmem_set_format_rgb(cpmem, &def_rgb_24);
342  break;
343  case V4L2_PIX_FMT_BGR24:
344  ipu_cpmem_set_format_rgb(cpmem, &def_bgr_24);
345  break;
346  default:
347  return -EINVAL;
348  }
349 
350  return 0;
351 }
353 
355  struct ipu_image *image)
356 {
357  struct v4l2_pix_format *pix = &image->pix;
358  int y_offset, u_offset, v_offset;
359 
360  pr_debug("%s: resolution: %dx%d stride: %d\n",
361  __func__, pix->width, pix->height,
362  pix->bytesperline);
363 
364  ipu_cpmem_set_resolution(cpmem, image->rect.width,
365  image->rect.height);
366  ipu_cpmem_set_stride(cpmem, pix->bytesperline);
367 
368  ipu_cpmem_set_fmt(cpmem, pix->pixelformat);
369 
370  switch (pix->pixelformat) {
371  case V4L2_PIX_FMT_YUV420:
372  y_offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
373  u_offset = U_OFFSET(pix, image->rect.left,
374  image->rect.top) - y_offset;
375  v_offset = V_OFFSET(pix, image->rect.left,
376  image->rect.top) - y_offset;
377 
379  pix->bytesperline, u_offset, v_offset);
380  ipu_cpmem_set_buffer(cpmem, 0, image->phys + y_offset);
381  break;
382  case V4L2_PIX_FMT_UYVY:
383  ipu_cpmem_set_buffer(cpmem, 0, image->phys +
384  image->rect.left * 2 +
385  image->rect.top * image->pix.bytesperline);
386  break;
387  case V4L2_PIX_FMT_RGB32:
388  case V4L2_PIX_FMT_BGR32:
389  ipu_cpmem_set_buffer(cpmem, 0, image->phys +
390  image->rect.left * 4 +
391  image->rect.top * image->pix.bytesperline);
392  break;
393  case V4L2_PIX_FMT_RGB565:
394  ipu_cpmem_set_buffer(cpmem, 0, image->phys +
395  image->rect.left * 2 +
396  image->rect.top * image->pix.bytesperline);
397  break;
398  case V4L2_PIX_FMT_RGB24:
399  case V4L2_PIX_FMT_BGR24:
400  ipu_cpmem_set_buffer(cpmem, 0, image->phys +
401  image->rect.left * 3 +
402  image->rect.top * image->pix.bytesperline);
403  break;
404  default:
405  return -EINVAL;
406  }
407 
408  return 0;
409 }
411 
413 {
414  switch (pixelformat) {
415  case V4L2_PIX_FMT_YUV420:
416  case V4L2_PIX_FMT_UYVY:
417  case V4L2_PIX_FMT_YVYU:
418  return IPUV3_COLORSPACE_YUV;
419  case V4L2_PIX_FMT_RGB32:
420  case V4L2_PIX_FMT_BGR32:
421  case V4L2_PIX_FMT_RGB24:
422  case V4L2_PIX_FMT_BGR24:
423  case V4L2_PIX_FMT_RGB565:
424  return IPUV3_COLORSPACE_RGB;
425  default:
427  }
428 }
430 
431 struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
432 {
433  struct ipuv3_channel *channel;
434 
435  dev_dbg(ipu->dev, "%s %d\n", __func__, num);
436 
437  if (num > 63)
438  return ERR_PTR(-ENODEV);
439 
440  mutex_lock(&ipu->channel_lock);
441 
442  channel = &ipu->channel[num];
443 
444  if (channel->busy) {
445  channel = ERR_PTR(-EBUSY);
446  goto out;
447  }
448 
449  channel->busy = 1;
450  channel->num = num;
451 
452 out:
453  mutex_unlock(&ipu->channel_lock);
454 
455  return channel;
456 }
458 
460 {
461  struct ipu_soc *ipu = channel->ipu;
462 
463  dev_dbg(ipu->dev, "%s %d\n", __func__, channel->num);
464 
465  mutex_lock(&ipu->channel_lock);
466 
467  channel->busy = 0;
468 
469  mutex_unlock(&ipu->channel_lock);
470 }
472 
473 #define idma_mask(ch) (1 << (ch & 0x1f))
474 
476  bool doublebuffer)
477 {
478  struct ipu_soc *ipu = channel->ipu;
479  unsigned long flags;
480  u32 reg;
481 
482  spin_lock_irqsave(&ipu->lock, flags);
483 
484  reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
485  if (doublebuffer)
486  reg |= idma_mask(channel->num);
487  else
488  reg &= ~idma_mask(channel->num);
489  ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
490 
491  spin_unlock_irqrestore(&ipu->lock, flags);
492 }
494 
496 {
497  unsigned long lock_flags;
498  u32 val;
499 
500  spin_lock_irqsave(&ipu->lock, lock_flags);
501 
502  val = ipu_cm_read(ipu, IPU_DISP_GEN);
503 
504  if (mask & IPU_CONF_DI0_EN)
506  if (mask & IPU_CONF_DI1_EN)
508 
509  ipu_cm_write(ipu, val, IPU_DISP_GEN);
510 
511  val = ipu_cm_read(ipu, IPU_CONF);
512  val |= mask;
513  ipu_cm_write(ipu, val, IPU_CONF);
514 
515  spin_unlock_irqrestore(&ipu->lock, lock_flags);
516 
517  return 0;
518 }
520 
522 {
523  unsigned long lock_flags;
524  u32 val;
525 
526  spin_lock_irqsave(&ipu->lock, lock_flags);
527 
528  val = ipu_cm_read(ipu, IPU_CONF);
529  val &= ~mask;
530  ipu_cm_write(ipu, val, IPU_CONF);
531 
532  val = ipu_cm_read(ipu, IPU_DISP_GEN);
533 
534  if (mask & IPU_CONF_DI0_EN)
535  val &= ~IPU_DI0_COUNTER_RELEASE;
536  if (mask & IPU_CONF_DI1_EN)
537  val &= ~IPU_DI1_COUNTER_RELEASE;
538 
539  ipu_cm_write(ipu, val, IPU_DISP_GEN);
540 
541  spin_unlock_irqrestore(&ipu->lock, lock_flags);
542 
543  return 0;
544 }
546 
548 {
549  struct ipu_soc *ipu = channel->ipu;
550  unsigned int chno = channel->num;
551  unsigned long flags;
552 
553  spin_lock_irqsave(&ipu->lock, flags);
554 
555  /* Mark buffer as ready. */
556  if (buf_num == 0)
557  ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
558  else
559  ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
560 
561  spin_unlock_irqrestore(&ipu->lock, flags);
562 }
564 
566 {
567  struct ipu_soc *ipu = channel->ipu;
568  u32 val;
569  unsigned long flags;
570 
571  spin_lock_irqsave(&ipu->lock, flags);
572 
573  val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
574  val |= idma_mask(channel->num);
575  ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
576 
577  spin_unlock_irqrestore(&ipu->lock, flags);
578 
579  return 0;
580 }
582 
584 {
585  struct ipu_soc *ipu = channel->ipu;
586  u32 val;
587  unsigned long flags;
588  unsigned long timeout;
589 
590  timeout = jiffies + msecs_to_jiffies(50);
591  while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
592  idma_mask(channel->num)) {
593  if (time_after(jiffies, timeout)) {
594  dev_warn(ipu->dev, "disabling busy idmac channel %d\n",
595  channel->num);
596  break;
597  }
598  cpu_relax();
599  }
600 
601  spin_lock_irqsave(&ipu->lock, flags);
602 
603  /* Disable DMA channel(s) */
604  val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
605  val &= ~idma_mask(channel->num);
606  ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
607 
608  /* Set channel buffers NOT to be ready */
609  ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
610 
611  if (ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num)) &
612  idma_mask(channel->num)) {
613  ipu_cm_write(ipu, idma_mask(channel->num),
614  IPU_CHA_BUF0_RDY(channel->num));
615  }
616 
617  if (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num)) &
618  idma_mask(channel->num)) {
619  ipu_cm_write(ipu, idma_mask(channel->num),
620  IPU_CHA_BUF1_RDY(channel->num));
621  }
622 
623  ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
624 
625  /* Reset the double buffer */
626  val = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
627  val &= ~idma_mask(channel->num);
628  ipu_cm_write(ipu, val, IPU_CHA_DB_MODE_SEL(channel->num));
629 
630  spin_unlock_irqrestore(&ipu->lock, flags);
631 
632  return 0;
633 }
635 
636 static int ipu_reset(struct ipu_soc *ipu)
637 {
638  unsigned long timeout;
639 
640  ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
641 
642  timeout = jiffies + msecs_to_jiffies(1000);
643  while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
644  if (time_after(jiffies, timeout))
645  return -ETIME;
646  cpu_relax();
647  }
648 
649  mdelay(300);
650 
651  return 0;
652 }
653 
654 struct ipu_devtype {
655  const char *name;
656  unsigned long cm_ofs;
657  unsigned long cpmem_ofs;
658  unsigned long srm_ofs;
659  unsigned long tpm_ofs;
660  unsigned long disp0_ofs;
661  unsigned long disp1_ofs;
662  unsigned long dc_tmpl_ofs;
663  unsigned long vdi_ofs;
665 };
666 
667 static struct ipu_devtype ipu_type_imx51 = {
668  .name = "IPUv3EX",
669  .cm_ofs = 0x1e000000,
670  .cpmem_ofs = 0x1f000000,
671  .srm_ofs = 0x1f040000,
672  .tpm_ofs = 0x1f060000,
673  .disp0_ofs = 0x1e040000,
674  .disp1_ofs = 0x1e048000,
675  .dc_tmpl_ofs = 0x1f080000,
676  .vdi_ofs = 0x1e068000,
677  .type = IPUV3EX,
678 };
679 
680 static struct ipu_devtype ipu_type_imx53 = {
681  .name = "IPUv3M",
682  .cm_ofs = 0x06000000,
683  .cpmem_ofs = 0x07000000,
684  .srm_ofs = 0x07040000,
685  .tpm_ofs = 0x07060000,
686  .disp0_ofs = 0x06040000,
687  .disp1_ofs = 0x06048000,
688  .dc_tmpl_ofs = 0x07080000,
689  .vdi_ofs = 0x06068000,
690  .type = IPUV3M,
691 };
692 
693 static struct ipu_devtype ipu_type_imx6q = {
694  .name = "IPUv3H",
695  .cm_ofs = 0x00200000,
696  .cpmem_ofs = 0x00300000,
697  .srm_ofs = 0x00340000,
698  .tpm_ofs = 0x00360000,
699  .disp0_ofs = 0x00240000,
700  .disp1_ofs = 0x00248000,
701  .dc_tmpl_ofs = 0x00380000,
702  .vdi_ofs = 0x00268000,
703  .type = IPUV3H,
704 };
705 
706 static const struct of_device_id imx_ipu_dt_ids[] = {
707  { .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
708  { .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
709  { .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
710  { /* sentinel */ }
711 };
712 MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
713 
714 static int ipu_submodules_init(struct ipu_soc *ipu,
715  struct platform_device *pdev, unsigned long ipu_base,
716  struct clk *ipu_clk)
717 {
718  char *unit;
719  int ret;
720  struct device *dev = &pdev->dev;
721  const struct ipu_devtype *devtype = ipu->devtype;
722 
723  ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
724  IPU_CONF_DI0_EN, ipu_clk);
725  if (ret) {
726  unit = "di0";
727  goto err_di_0;
728  }
729 
730  ret = ipu_di_init(ipu, dev, 1, ipu_base + devtype->disp1_ofs,
731  IPU_CONF_DI1_EN, ipu_clk);
732  if (ret) {
733  unit = "di1";
734  goto err_di_1;
735  }
736 
737  ret = ipu_dc_init(ipu, dev, ipu_base + devtype->cm_ofs +
738  IPU_CM_DC_REG_OFS, ipu_base + devtype->dc_tmpl_ofs);
739  if (ret) {
740  unit = "dc_template";
741  goto err_dc;
742  }
743 
744  ret = ipu_dmfc_init(ipu, dev, ipu_base +
745  devtype->cm_ofs + IPU_CM_DMFC_REG_OFS, ipu_clk);
746  if (ret) {
747  unit = "dmfc";
748  goto err_dmfc;
749  }
750 
751  ret = ipu_dp_init(ipu, dev, ipu_base + devtype->srm_ofs);
752  if (ret) {
753  unit = "dp";
754  goto err_dp;
755  }
756 
757  return 0;
758 
759 err_dp:
760  ipu_dmfc_exit(ipu);
761 err_dmfc:
762  ipu_dc_exit(ipu);
763 err_dc:
764  ipu_di_exit(ipu, 1);
765 err_di_1:
766  ipu_di_exit(ipu, 0);
767 err_di_0:
768  dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
769  return ret;
770 }
771 
772 static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
773 {
774  unsigned long status;
775  int i, bit, irq_base;
776 
777  for (i = 0; i < num_regs; i++) {
778 
779  status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
780  status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
781 
782  irq_base = ipu->irq_start + regs[i] * 32;
783  for_each_set_bit(bit, &status, 32)
784  generic_handle_irq(irq_base + bit);
785  }
786 }
787 
788 static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
789 {
790  struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
791  const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
792  struct irq_chip *chip = irq_get_chip(irq);
793 
794  chained_irq_enter(chip, desc);
795 
796  ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
797 
798  chained_irq_exit(chip, desc);
799 }
800 
801 static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
802 {
803  struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
804  const int int_reg[] = { 4, 5, 8, 9};
805  struct irq_chip *chip = irq_get_chip(irq);
806 
807  chained_irq_enter(chip, desc);
808 
809  ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
810 
811  chained_irq_exit(chip, desc);
812 }
813 
814 static void ipu_ack_irq(struct irq_data *d)
815 {
816  struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
817  unsigned int irq = d->irq - ipu->irq_start;
818 
819  ipu_cm_write(ipu, 1 << (irq % 32), IPU_INT_STAT(irq / 32));
820 }
821 
822 static void ipu_unmask_irq(struct irq_data *d)
823 {
824  struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
825  unsigned int irq = d->irq - ipu->irq_start;
826  unsigned long flags;
827  u32 reg;
828 
829  spin_lock_irqsave(&ipu->lock, flags);
830 
831  reg = ipu_cm_read(ipu, IPU_INT_CTRL(irq / 32));
832  reg |= 1 << (irq % 32);
833  ipu_cm_write(ipu, reg, IPU_INT_CTRL(irq / 32));
834 
835  spin_unlock_irqrestore(&ipu->lock, flags);
836 }
837 
838 static void ipu_mask_irq(struct irq_data *d)
839 {
840  struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
841  unsigned int irq = d->irq - ipu->irq_start;
842  unsigned long flags;
843  u32 reg;
844 
845  spin_lock_irqsave(&ipu->lock, flags);
846 
847  reg = ipu_cm_read(ipu, IPU_INT_CTRL(irq / 32));
848  reg &= ~(1 << (irq % 32));
849  ipu_cm_write(ipu, reg, IPU_INT_CTRL(irq / 32));
850 
851  spin_unlock_irqrestore(&ipu->lock, flags);
852 }
853 
854 static struct irq_chip ipu_irq_chip = {
855  .name = "IPU",
856  .irq_ack = ipu_ack_irq,
857  .irq_mask = ipu_mask_irq,
858  .irq_unmask = ipu_unmask_irq,
859 };
860 
862  enum ipu_channel_irq irq_type)
863 {
864  return ipu->irq_start + irq_type + channel->num;
865 }
867 
868 static void ipu_submodules_exit(struct ipu_soc *ipu)
869 {
870  ipu_dp_exit(ipu);
871  ipu_dmfc_exit(ipu);
872  ipu_dc_exit(ipu);
873  ipu_di_exit(ipu, 1);
874  ipu_di_exit(ipu, 0);
875 }
876 
877 static int platform_remove_devices_fn(struct device *dev, void *unused)
878 {
879  struct platform_device *pdev = to_platform_device(dev);
880 
882 
883  return 0;
884 }
885 
886 static void platform_device_unregister_children(struct platform_device *pdev)
887 {
888  device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
889 }
890 
893  const char *name;
894 };
895 
896 static const struct ipu_platform_reg client_reg[] = {
897  {
898  .pdata = {
899  .di = 0,
900  .dc = 5,
901  .dp = IPU_DP_FLOW_SYNC_BG,
902  .dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
903  .dma[1] = -EINVAL,
904  },
905  .name = "imx-ipuv3-crtc",
906  }, {
907  .pdata = {
908  .di = 1,
909  .dc = 1,
910  .dp = -EINVAL,
911  .dma[0] = IPUV3_CHANNEL_MEM_DC_SYNC,
912  .dma[1] = -EINVAL,
913  },
914  .name = "imx-ipuv3-crtc",
915  },
916 };
917 
918 static int ipu_client_id;
919 
920 static int ipu_add_subdevice_pdata(struct device *dev,
921  const struct ipu_platform_reg *reg)
922 {
923  struct platform_device *pdev;
924 
925  pdev = platform_device_register_data(dev, reg->name, ipu_client_id++,
926  &reg->pdata, sizeof(struct ipu_platform_reg));
927 
928  return pdev ? 0 : -EINVAL;
929 }
930 
931 static int ipu_add_client_devices(struct ipu_soc *ipu)
932 {
933  int ret;
934  int i;
935 
936  for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
937  const struct ipu_platform_reg *reg = &client_reg[i];
938  ret = ipu_add_subdevice_pdata(ipu->dev, reg);
939  if (ret)
940  goto err_register;
941  }
942 
943  return 0;
944 
945 err_register:
946  platform_device_unregister_children(to_platform_device(ipu->dev));
947 
948  return ret;
949 }
950 
951 static int ipu_irq_init(struct ipu_soc *ipu)
952 {
953  int i;
954 
955  ipu->irq_start = irq_alloc_descs(-1, 0, IPU_NUM_IRQS, 0);
956  if (ipu->irq_start < 0)
957  return ipu->irq_start;
958 
959  for (i = ipu->irq_start; i < ipu->irq_start + IPU_NUM_IRQS; i++) {
960  irq_set_chip_and_handler(i, &ipu_irq_chip, handle_level_irq);
962  irq_set_chip_data(i, ipu);
963  }
964 
965  irq_set_chained_handler(ipu->irq_sync, ipu_irq_handler);
966  irq_set_handler_data(ipu->irq_sync, ipu);
967  irq_set_chained_handler(ipu->irq_err, ipu_err_irq_handler);
968  irq_set_handler_data(ipu->irq_err, ipu);
969 
970  return 0;
971 }
972 
973 static void ipu_irq_exit(struct ipu_soc *ipu)
974 {
975  int i;
976 
977  irq_set_chained_handler(ipu->irq_err, NULL);
979  irq_set_chained_handler(ipu->irq_sync, NULL);
981 
982  for (i = ipu->irq_start; i < ipu->irq_start + IPU_NUM_IRQS; i++) {
983  set_irq_flags(i, 0);
984  irq_set_chip(i, NULL);
986  }
987 
989 }
990 
991 static int __devinit ipu_probe(struct platform_device *pdev)
992 {
993  const struct of_device_id *of_id =
994  of_match_device(imx_ipu_dt_ids, &pdev->dev);
995  struct ipu_soc *ipu;
996  struct resource *res;
997  unsigned long ipu_base;
998  int i, ret, irq_sync, irq_err;
999  const struct ipu_devtype *devtype;
1000 
1001  devtype = of_id->data;
1002 
1003  dev_info(&pdev->dev, "Initializing %s\n", devtype->name);
1004 
1005  irq_sync = platform_get_irq(pdev, 0);
1006  irq_err = platform_get_irq(pdev, 1);
1007  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1008 
1009  dev_info(&pdev->dev, "irq_sync: %d irq_err: %d\n",
1010  irq_sync, irq_err);
1011 
1012  if (!res || irq_sync < 0 || irq_err < 0)
1013  return -ENODEV;
1014 
1015  ipu_base = res->start;
1016 
1017  ipu = devm_kzalloc(&pdev->dev, sizeof(*ipu), GFP_KERNEL);
1018  if (!ipu)
1019  return -ENODEV;
1020 
1021  for (i = 0; i < 64; i++)
1022  ipu->channel[i].ipu = ipu;
1023  ipu->devtype = devtype;
1024  ipu->ipu_type = devtype->type;
1025 
1026  spin_lock_init(&ipu->lock);
1027  mutex_init(&ipu->channel_lock);
1028 
1029  dev_info(&pdev->dev, "cm_reg: 0x%08lx\n",
1030  ipu_base + devtype->cm_ofs);
1031  dev_info(&pdev->dev, "idmac: 0x%08lx\n",
1032  ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
1033  dev_info(&pdev->dev, "cpmem: 0x%08lx\n",
1034  ipu_base + devtype->cpmem_ofs);
1035  dev_info(&pdev->dev, "disp0: 0x%08lx\n",
1036  ipu_base + devtype->disp0_ofs);
1037  dev_info(&pdev->dev, "disp1: 0x%08lx\n",
1038  ipu_base + devtype->disp1_ofs);
1039  dev_info(&pdev->dev, "srm: 0x%08lx\n",
1040  ipu_base + devtype->srm_ofs);
1041  dev_info(&pdev->dev, "tpm: 0x%08lx\n",
1042  ipu_base + devtype->tpm_ofs);
1043  dev_info(&pdev->dev, "dc: 0x%08lx\n",
1044  ipu_base + devtype->cm_ofs + IPU_CM_DC_REG_OFS);
1045  dev_info(&pdev->dev, "ic: 0x%08lx\n",
1046  ipu_base + devtype->cm_ofs + IPU_CM_IC_REG_OFS);
1047  dev_info(&pdev->dev, "dmfc: 0x%08lx\n",
1048  ipu_base + devtype->cm_ofs + IPU_CM_DMFC_REG_OFS);
1049  dev_info(&pdev->dev, "vdi: 0x%08lx\n",
1050  ipu_base + devtype->vdi_ofs);
1051 
1052  ipu->cm_reg = devm_ioremap(&pdev->dev,
1053  ipu_base + devtype->cm_ofs, PAGE_SIZE);
1054  ipu->idmac_reg = devm_ioremap(&pdev->dev,
1055  ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
1056  PAGE_SIZE);
1057  ipu->cpmem_base = devm_ioremap(&pdev->dev,
1058  ipu_base + devtype->cpmem_ofs, PAGE_SIZE);
1059 
1060  if (!ipu->cm_reg || !ipu->idmac_reg || !ipu->cpmem_base) {
1061  ret = -ENOMEM;
1062  goto failed_ioremap;
1063  }
1064 
1065  ipu->clk = devm_clk_get(&pdev->dev, "bus");
1066  if (IS_ERR(ipu->clk)) {
1067  ret = PTR_ERR(ipu->clk);
1068  dev_err(&pdev->dev, "clk_get failed with %d", ret);
1069  goto failed_clk_get;
1070  }
1071 
1072  platform_set_drvdata(pdev, ipu);
1073 
1074  clk_prepare_enable(ipu->clk);
1075 
1076  ipu->dev = &pdev->dev;
1077  ipu->irq_sync = irq_sync;
1078  ipu->irq_err = irq_err;
1079 
1080  ret = ipu_irq_init(ipu);
1081  if (ret)
1082  goto out_failed_irq;
1083 
1084  ipu_reset(ipu);
1085 
1086  /* Set MCU_T to divide MCU access window into 2 */
1087  ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
1088  IPU_DISP_GEN);
1089 
1090  ret = ipu_submodules_init(ipu, pdev, ipu_base, ipu->clk);
1091  if (ret)
1092  goto failed_submodules_init;
1093 
1094  ret = ipu_add_client_devices(ipu);
1095  if (ret) {
1096  dev_err(&pdev->dev, "adding client devices failed with %d\n",
1097  ret);
1098  goto failed_add_clients;
1099  }
1100 
1101  return 0;
1102 
1103 failed_add_clients:
1104  ipu_submodules_exit(ipu);
1105 failed_submodules_init:
1106  ipu_irq_exit(ipu);
1107 out_failed_irq:
1108  clk_disable_unprepare(ipu->clk);
1109 failed_clk_get:
1110 failed_ioremap:
1111  return ret;
1112 }
1113 
1114 static int __devexit ipu_remove(struct platform_device *pdev)
1115 {
1116  struct ipu_soc *ipu = platform_get_drvdata(pdev);
1117  struct resource *res;
1118 
1119  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1120 
1121  platform_device_unregister_children(pdev);
1122  ipu_submodules_exit(ipu);
1123  ipu_irq_exit(ipu);
1124 
1125  clk_disable_unprepare(ipu->clk);
1126 
1127  return 0;
1128 }
1129 
1130 static struct platform_driver imx_ipu_driver = {
1131  .driver = {
1132  .name = "imx-ipuv3",
1133  .of_match_table = imx_ipu_dt_ids,
1134  },
1135  .probe = ipu_probe,
1136  .remove = __devexit_p(ipu_remove),
1137 };
1138 
1139 module_platform_driver(imx_ipu_driver);
1140 
1141 MODULE_DESCRIPTION("i.MX IPU v3 driver");
1142 MODULE_AUTHOR("Sascha Hauer <[email protected]>");
1143 MODULE_LICENSE("GPL");