Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dsi.c
Go to the documentation of this file.
1 /*
2  * linux/drivers/video/omap2/dss/dsi.c
3  *
4  * Copyright (C) 2009 Nokia Corporation
5  * Author: Tomi Valkeinen <[email protected]>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published by
9  * the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program. If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #define DSS_SUBSYS_NAME "DSI"
21 
22 #include <linux/kernel.h>
23 #include <linux/io.h>
24 #include <linux/clk.h>
25 #include <linux/device.h>
26 #include <linux/err.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/mutex.h>
30 #include <linux/module.h>
31 #include <linux/semaphore.h>
32 #include <linux/seq_file.h>
33 #include <linux/platform_device.h>
35 #include <linux/wait.h>
36 #include <linux/workqueue.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/debugfs.h>
40 #include <linux/pm_runtime.h>
41 
42 #include <video/omapdss.h>
43 #include <video/mipi_display.h>
44 
45 #include "dss.h"
46 #include "dss_features.h"
47 
48 /*#define VERBOSE_IRQ*/
49 #define DSI_CATCH_MISSING_TE
50 
51 struct dsi_reg { u16 idx; };
52 
53 #define DSI_REG(idx) ((const struct dsi_reg) { idx })
54 
55 #define DSI_SZ_REGS SZ_1K
56 /* DSI Protocol Engine */
57 
58 #define DSI_REVISION DSI_REG(0x0000)
59 #define DSI_SYSCONFIG DSI_REG(0x0010)
60 #define DSI_SYSSTATUS DSI_REG(0x0014)
61 #define DSI_IRQSTATUS DSI_REG(0x0018)
62 #define DSI_IRQENABLE DSI_REG(0x001C)
63 #define DSI_CTRL DSI_REG(0x0040)
64 #define DSI_GNQ DSI_REG(0x0044)
65 #define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048)
66 #define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C)
67 #define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050)
68 #define DSI_CLK_CTRL DSI_REG(0x0054)
69 #define DSI_TIMING1 DSI_REG(0x0058)
70 #define DSI_TIMING2 DSI_REG(0x005C)
71 #define DSI_VM_TIMING1 DSI_REG(0x0060)
72 #define DSI_VM_TIMING2 DSI_REG(0x0064)
73 #define DSI_VM_TIMING3 DSI_REG(0x0068)
74 #define DSI_CLK_TIMING DSI_REG(0x006C)
75 #define DSI_TX_FIFO_VC_SIZE DSI_REG(0x0070)
76 #define DSI_RX_FIFO_VC_SIZE DSI_REG(0x0074)
77 #define DSI_COMPLEXIO_CFG2 DSI_REG(0x0078)
78 #define DSI_RX_FIFO_VC_FULLNESS DSI_REG(0x007C)
79 #define DSI_VM_TIMING4 DSI_REG(0x0080)
80 #define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(0x0084)
81 #define DSI_VM_TIMING5 DSI_REG(0x0088)
82 #define DSI_VM_TIMING6 DSI_REG(0x008C)
83 #define DSI_VM_TIMING7 DSI_REG(0x0090)
84 #define DSI_STOPCLK_TIMING DSI_REG(0x0094)
85 #define DSI_VC_CTRL(n) DSI_REG(0x0100 + (n * 0x20))
86 #define DSI_VC_TE(n) DSI_REG(0x0104 + (n * 0x20))
87 #define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(0x0108 + (n * 0x20))
88 #define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(0x010C + (n * 0x20))
89 #define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(0x0110 + (n * 0x20))
90 #define DSI_VC_IRQSTATUS(n) DSI_REG(0x0118 + (n * 0x20))
91 #define DSI_VC_IRQENABLE(n) DSI_REG(0x011C + (n * 0x20))
92 
93 /* DSIPHY_SCP */
94 
95 #define DSI_DSIPHY_CFG0 DSI_REG(0x200 + 0x0000)
96 #define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004)
97 #define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
98 #define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
99 #define DSI_DSIPHY_CFG10 DSI_REG(0x200 + 0x0028)
100 
101 /* DSI_PLL_CTRL_SCP */
102 
103 #define DSI_PLL_CONTROL DSI_REG(0x300 + 0x0000)
104 #define DSI_PLL_STATUS DSI_REG(0x300 + 0x0004)
105 #define DSI_PLL_GO DSI_REG(0x300 + 0x0008)
106 #define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C)
107 #define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010)
108 
109 #define REG_GET(dsidev, idx, start, end) \
110  FLD_GET(dsi_read_reg(dsidev, idx), start, end)
111 
112 #define REG_FLD_MOD(dsidev, idx, val, start, end) \
113  dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end))
114 
115 /* Global interrupts */
116 #define DSI_IRQ_VC0 (1 << 0)
117 #define DSI_IRQ_VC1 (1 << 1)
118 #define DSI_IRQ_VC2 (1 << 2)
119 #define DSI_IRQ_VC3 (1 << 3)
120 #define DSI_IRQ_WAKEUP (1 << 4)
121 #define DSI_IRQ_RESYNC (1 << 5)
122 #define DSI_IRQ_PLL_LOCK (1 << 7)
123 #define DSI_IRQ_PLL_UNLOCK (1 << 8)
124 #define DSI_IRQ_PLL_RECALL (1 << 9)
125 #define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
126 #define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
127 #define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
128 #define DSI_IRQ_TE_TRIGGER (1 << 16)
129 #define DSI_IRQ_ACK_TRIGGER (1 << 17)
130 #define DSI_IRQ_SYNC_LOST (1 << 18)
131 #define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
132 #define DSI_IRQ_TA_TIMEOUT (1 << 20)
133 #define DSI_IRQ_ERROR_MASK \
134  (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
135  DSI_IRQ_TA_TIMEOUT | DSI_IRQ_SYNC_LOST)
136 #define DSI_IRQ_CHANNEL_MASK 0xf
137 
138 /* Virtual channel interrupts */
139 #define DSI_VC_IRQ_CS (1 << 0)
140 #define DSI_VC_IRQ_ECC_CORR (1 << 1)
141 #define DSI_VC_IRQ_PACKET_SENT (1 << 2)
142 #define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
143 #define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
144 #define DSI_VC_IRQ_BTA (1 << 5)
145 #define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
146 #define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
147 #define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
148 #define DSI_VC_IRQ_ERROR_MASK \
149  (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
150  DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
151  DSI_VC_IRQ_FIFO_TX_UDF)
152 
153 /* ComplexIO interrupts */
154 #define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
155 #define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
156 #define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
157 #define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3)
158 #define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4)
159 #define DSI_CIO_IRQ_ERRESC1 (1 << 5)
160 #define DSI_CIO_IRQ_ERRESC2 (1 << 6)
161 #define DSI_CIO_IRQ_ERRESC3 (1 << 7)
162 #define DSI_CIO_IRQ_ERRESC4 (1 << 8)
163 #define DSI_CIO_IRQ_ERRESC5 (1 << 9)
164 #define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
165 #define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
166 #define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
167 #define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13)
168 #define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14)
169 #define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
170 #define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
171 #define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
172 #define DSI_CIO_IRQ_STATEULPS4 (1 << 18)
173 #define DSI_CIO_IRQ_STATEULPS5 (1 << 19)
174 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
175 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
176 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
177 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
178 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
179 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
180 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26)
181 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27)
182 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28)
183 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29)
184 #define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
185 #define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
186 #define DSI_CIO_IRQ_ERROR_MASK \
187  (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
188  DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
189  DSI_CIO_IRQ_ERRSYNCESC5 | \
190  DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
191  DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
192  DSI_CIO_IRQ_ERRESC5 | \
193  DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
194  DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
195  DSI_CIO_IRQ_ERRCONTROL5 | \
196  DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
197  DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
198  DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
199  DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
200  DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
201 
202 typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
203 
204 #define DSI_MAX_NR_ISRS 2
205 #define DSI_MAX_NR_LANES 5
206 
214 };
215 
217  enum dsi_lane_function function;
219 };
220 
221 struct dsi_isr_data {
223  void *arg;
225 };
226 
227 enum fifo_size {
233 };
234 
238 };
239 
241  unsigned long last_reset;
242  unsigned irq_count;
243  unsigned dsi_irqs[32];
244  unsigned vc_irqs[4][32];
245  unsigned cio_irqs[32];
246 };
247 
252 };
253 
254 struct dsi_data {
256  void __iomem *base;
257 
259 
260  int irq;
261 
262  struct clk *dss_clk;
263  struct clk *sys_clk;
264 
266 
269 
270  struct {
274  int vc_id;
275  } vc[4];
276 
277  struct mutex lock;
279 
280  unsigned pll_locked;
281 
284  /* space for a copy used by the interrupt handler */
286 
288 #ifdef DEBUG
289  unsigned update_bytes;
290 #endif
291 
294 
297 
299 
300 #ifdef DSI_CATCH_MISSING_TE
302 #endif
303 
304  unsigned long cache_req_pck;
305  unsigned long cache_clk_freq;
307 
310 #ifdef DEBUG
311  ktime_t perf_setup_time;
312  ktime_t perf_start_time;
313 #endif
316 
317 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
318  spinlock_t irq_stats_lock;
319  struct dsi_irq_stats irq_stats;
320 #endif
321  /* DSI PLL Parameter Ranges */
322  unsigned long regm_max, regn_max;
323  unsigned long regm_dispc_max, regm_dsi_max;
324  unsigned long fint_min, fint_max;
325  unsigned long lpdiv_max;
326 
328 
330  unsigned num_lanes_used;
331 
333 
339 
341 };
342 
346 };
347 
348 #ifdef DEBUG
349 static bool dsi_perf;
350 module_param(dsi_perf, bool, 0644);
351 #endif
352 
353 static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
354 {
355  return dev_get_drvdata(&dsidev->dev);
356 }
357 
358 static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
359 {
360  return dssdev->output->pdev;
361 }
362 
364 {
365  struct omap_dss_output *out;
366  enum omap_dss_output_id id;
367 
368  switch (module) {
369  case 0:
371  break;
372  case 1:
374  break;
375  default:
376  return NULL;
377  }
378 
379  out = omap_dss_get_output(id);
380 
381  return out ? out->pdev : NULL;
382 }
383 
384 static inline void dsi_write_reg(struct platform_device *dsidev,
385  const struct dsi_reg idx, u32 val)
386 {
387  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
388 
389  __raw_writel(val, dsi->base + idx.idx);
390 }
391 
392 static inline u32 dsi_read_reg(struct platform_device *dsidev,
393  const struct dsi_reg idx)
394 {
395  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
396 
397  return __raw_readl(dsi->base + idx.idx);
398 }
399 
401 {
402  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
403  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
404 
405  down(&dsi->bus_lock);
406 }
408 
410 {
411  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
412  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
413 
414  up(&dsi->bus_lock);
415 }
417 
418 static bool dsi_bus_is_locked(struct platform_device *dsidev)
419 {
420  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
421 
422  return dsi->bus_lock.count == 0;
423 }
424 
425 static void dsi_completion_handler(void *data, u32 mask)
426 {
427  complete((struct completion *)data);
428 }
429 
430 static inline int wait_for_bit_change(struct platform_device *dsidev,
431  const struct dsi_reg idx, int bitnum, int value)
432 {
433  unsigned long timeout;
434  ktime_t wait;
435  int t;
436 
437  /* first busyloop to see if the bit changes right away */
438  t = 100;
439  while (t-- > 0) {
440  if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
441  return value;
442  }
443 
444  /* then loop for 500ms, sleeping for 1ms in between */
445  timeout = jiffies + msecs_to_jiffies(500);
446  while (time_before(jiffies, timeout)) {
447  if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
448  return value;
449 
450  wait = ns_to_ktime(1000 * 1000);
453  }
454 
455  return !value;
456 }
457 
459 {
460  switch (fmt) {
463  return 24;
465  return 18;
467  return 16;
468  default:
469  BUG();
470  return 0;
471  }
472 }
473 
474 #ifdef DEBUG
475 static void dsi_perf_mark_setup(struct platform_device *dsidev)
476 {
477  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
478  dsi->perf_setup_time = ktime_get();
479 }
480 
481 static void dsi_perf_mark_start(struct platform_device *dsidev)
482 {
483  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
484  dsi->perf_start_time = ktime_get();
485 }
486 
487 static void dsi_perf_show(struct platform_device *dsidev, const char *name)
488 {
489  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
490  ktime_t t, setup_time, trans_time;
492  u32 setup_us, trans_us, total_us;
493 
494  if (!dsi_perf)
495  return;
496 
497  t = ktime_get();
498 
499  setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time);
500  setup_us = (u32)ktime_to_us(setup_time);
501  if (setup_us == 0)
502  setup_us = 1;
503 
504  trans_time = ktime_sub(t, dsi->perf_start_time);
505  trans_us = (u32)ktime_to_us(trans_time);
506  if (trans_us == 0)
507  trans_us = 1;
508 
509  total_us = setup_us + trans_us;
510 
511  total_bytes = dsi->update_bytes;
512 
513  printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
514  "%u bytes, %u kbytes/sec\n",
515  name,
516  setup_us,
517  trans_us,
518  total_us,
519  1000*1000 / total_us,
520  total_bytes,
521  total_bytes * 1000 / total_us);
522 }
523 #else
524 static inline void dsi_perf_mark_setup(struct platform_device *dsidev)
525 {
526 }
527 
528 static inline void dsi_perf_mark_start(struct platform_device *dsidev)
529 {
530 }
531 
532 static inline void dsi_perf_show(struct platform_device *dsidev,
533  const char *name)
534 {
535 }
536 #endif
537 
538 static void print_irq_status(u32 status)
539 {
540  if (status == 0)
541  return;
542 
543 #ifndef VERBOSE_IRQ
544  if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0)
545  return;
546 #endif
547  printk(KERN_DEBUG "DSI IRQ: 0x%x: ", status);
548 
549 #define PIS(x) \
550  if (status & DSI_IRQ_##x) \
551  printk(#x " ");
552 #ifdef VERBOSE_IRQ
553  PIS(VC0);
554  PIS(VC1);
555  PIS(VC2);
556  PIS(VC3);
557 #endif
558  PIS(WAKEUP);
559  PIS(RESYNC);
560  PIS(PLL_LOCK);
561  PIS(PLL_UNLOCK);
562  PIS(PLL_RECALL);
563  PIS(COMPLEXIO_ERR);
566  PIS(TE_TRIGGER);
567  PIS(ACK_TRIGGER);
568  PIS(SYNC_LOST);
569  PIS(LDO_POWER_GOOD);
570  PIS(TA_TIMEOUT);
571 #undef PIS
572 
573  printk("\n");
574 }
575 
576 static void print_irq_status_vc(int channel, u32 status)
577 {
578  if (status == 0)
579  return;
580 
581 #ifndef VERBOSE_IRQ
582  if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
583  return;
584 #endif
585  printk(KERN_DEBUG "DSI VC(%d) IRQ 0x%x: ", channel, status);
586 
587 #define PIS(x) \
588  if (status & DSI_VC_IRQ_##x) \
589  printk(#x " ");
590  PIS(CS);
591  PIS(ECC_CORR);
592 #ifdef VERBOSE_IRQ
593  PIS(PACKET_SENT);
594 #endif
595  PIS(FIFO_TX_OVF);
596  PIS(FIFO_RX_OVF);
597  PIS(BTA);
598  PIS(ECC_NO_CORR);
599  PIS(FIFO_TX_UDF);
600  PIS(PP_BUSY_CHANGE);
601 #undef PIS
602  printk("\n");
603 }
604 
605 static void print_irq_status_cio(u32 status)
606 {
607  if (status == 0)
608  return;
609 
610  printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status);
611 
612 #define PIS(x) \
613  if (status & DSI_CIO_IRQ_##x) \
614  printk(#x " ");
615  PIS(ERRSYNCESC1);
616  PIS(ERRSYNCESC2);
617  PIS(ERRSYNCESC3);
618  PIS(ERRESC1);
619  PIS(ERRESC2);
620  PIS(ERRESC3);
621  PIS(ERRCONTROL1);
622  PIS(ERRCONTROL2);
623  PIS(ERRCONTROL3);
624  PIS(STATEULPS1);
625  PIS(STATEULPS2);
626  PIS(STATEULPS3);
627  PIS(ERRCONTENTIONLP0_1);
628  PIS(ERRCONTENTIONLP1_1);
629  PIS(ERRCONTENTIONLP0_2);
630  PIS(ERRCONTENTIONLP1_2);
631  PIS(ERRCONTENTIONLP0_3);
632  PIS(ERRCONTENTIONLP1_3);
633  PIS(ULPSACTIVENOT_ALL0);
634  PIS(ULPSACTIVENOT_ALL1);
635 #undef PIS
636 
637  printk("\n");
638 }
639 
640 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
641 static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
642  u32 *vcstatus, u32 ciostatus)
643 {
644  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
645  int i;
646 
647  spin_lock(&dsi->irq_stats_lock);
648 
649  dsi->irq_stats.irq_count++;
650  dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs);
651 
652  for (i = 0; i < 4; ++i)
653  dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]);
654 
655  dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs);
656 
657  spin_unlock(&dsi->irq_stats_lock);
658 }
659 #else
660 #define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus)
661 #endif
662 
663 static int debug_irq;
664 
665 static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
666  u32 *vcstatus, u32 ciostatus)
667 {
668  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
669  int i;
670 
671  if (irqstatus & DSI_IRQ_ERROR_MASK) {
672  DSSERR("DSI error, irqstatus %x\n", irqstatus);
673  print_irq_status(irqstatus);
674  spin_lock(&dsi->errors_lock);
675  dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK;
676  spin_unlock(&dsi->errors_lock);
677  } else if (debug_irq) {
678  print_irq_status(irqstatus);
679  }
680 
681  for (i = 0; i < 4; ++i) {
682  if (vcstatus[i] & DSI_VC_IRQ_ERROR_MASK) {
683  DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
684  i, vcstatus[i]);
685  print_irq_status_vc(i, vcstatus[i]);
686  } else if (debug_irq) {
687  print_irq_status_vc(i, vcstatus[i]);
688  }
689  }
690 
691  if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
692  DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
693  print_irq_status_cio(ciostatus);
694  } else if (debug_irq) {
695  print_irq_status_cio(ciostatus);
696  }
697 }
698 
699 static void dsi_call_isrs(struct dsi_isr_data *isr_array,
700  unsigned isr_array_size, u32 irqstatus)
701 {
702  struct dsi_isr_data *isr_data;
703  int i;
704 
705  for (i = 0; i < isr_array_size; i++) {
706  isr_data = &isr_array[i];
707  if (isr_data->isr && isr_data->mask & irqstatus)
708  isr_data->isr(isr_data->arg, irqstatus);
709  }
710 }
711 
712 static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
713  u32 irqstatus, u32 *vcstatus, u32 ciostatus)
714 {
715  int i;
716 
717  dsi_call_isrs(isr_tables->isr_table,
718  ARRAY_SIZE(isr_tables->isr_table),
719  irqstatus);
720 
721  for (i = 0; i < 4; ++i) {
722  if (vcstatus[i] == 0)
723  continue;
724  dsi_call_isrs(isr_tables->isr_table_vc[i],
725  ARRAY_SIZE(isr_tables->isr_table_vc[i]),
726  vcstatus[i]);
727  }
728 
729  if (ciostatus != 0)
730  dsi_call_isrs(isr_tables->isr_table_cio,
731  ARRAY_SIZE(isr_tables->isr_table_cio),
732  ciostatus);
733 }
734 
735 static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
736 {
737  struct platform_device *dsidev;
738  struct dsi_data *dsi;
739  u32 irqstatus, vcstatus[4], ciostatus;
740  int i;
741 
742  dsidev = (struct platform_device *) arg;
743  dsi = dsi_get_dsidrv_data(dsidev);
744 
745  spin_lock(&dsi->irq_lock);
746 
747  irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
748 
749  /* IRQ is not for us */
750  if (!irqstatus) {
751  spin_unlock(&dsi->irq_lock);
752  return IRQ_NONE;
753  }
754 
755  dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
756  /* flush posted write */
757  dsi_read_reg(dsidev, DSI_IRQSTATUS);
758 
759  for (i = 0; i < 4; ++i) {
760  if ((irqstatus & (1 << i)) == 0) {
761  vcstatus[i] = 0;
762  continue;
763  }
764 
765  vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
766 
767  dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]);
768  /* flush posted write */
769  dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
770  }
771 
772  if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
773  ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
774 
775  dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
776  /* flush posted write */
777  dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
778  } else {
779  ciostatus = 0;
780  }
781 
782 #ifdef DSI_CATCH_MISSING_TE
783  if (irqstatus & DSI_IRQ_TE_TRIGGER)
784  del_timer(&dsi->te_timer);
785 #endif
786 
787  /* make a copy and unlock, so that isrs can unregister
788  * themselves */
789  memcpy(&dsi->isr_tables_copy, &dsi->isr_tables,
790  sizeof(dsi->isr_tables));
791 
792  spin_unlock(&dsi->irq_lock);
793 
794  dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus);
795 
796  dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus);
797 
798  dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus);
799 
800  return IRQ_HANDLED;
801 }
802 
803 /* dsi->irq_lock has to be locked by the caller */
804 static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
805  struct dsi_isr_data *isr_array,
806  unsigned isr_array_size, u32 default_mask,
807  const struct dsi_reg enable_reg,
808  const struct dsi_reg status_reg)
809 {
810  struct dsi_isr_data *isr_data;
811  u32 mask;
812  u32 old_mask;
813  int i;
814 
815  mask = default_mask;
816 
817  for (i = 0; i < isr_array_size; i++) {
818  isr_data = &isr_array[i];
819 
820  if (isr_data->isr == NULL)
821  continue;
822 
823  mask |= isr_data->mask;
824  }
825 
826  old_mask = dsi_read_reg(dsidev, enable_reg);
827  /* clear the irqstatus for newly enabled irqs */
828  dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask);
829  dsi_write_reg(dsidev, enable_reg, mask);
830 
831  /* flush posted writes */
832  dsi_read_reg(dsidev, enable_reg);
833  dsi_read_reg(dsidev, status_reg);
834 }
835 
836 /* dsi->irq_lock has to be locked by the caller */
837 static void _omap_dsi_set_irqs(struct platform_device *dsidev)
838 {
839  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
840  u32 mask = DSI_IRQ_ERROR_MASK;
841 #ifdef DSI_CATCH_MISSING_TE
842  mask |= DSI_IRQ_TE_TRIGGER;
843 #endif
844  _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table,
845  ARRAY_SIZE(dsi->isr_tables.isr_table), mask,
847 }
848 
849 /* dsi->irq_lock has to be locked by the caller */
850 static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc)
851 {
852  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
853 
854  _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc],
855  ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]),
858 }
859 
860 /* dsi->irq_lock has to be locked by the caller */
861 static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev)
862 {
863  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
864 
865  _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio,
866  ARRAY_SIZE(dsi->isr_tables.isr_table_cio),
869 }
870 
871 static void _dsi_initialize_irq(struct platform_device *dsidev)
872 {
873  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
874  unsigned long flags;
875  int vc;
876 
877  spin_lock_irqsave(&dsi->irq_lock, flags);
878 
879  memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables));
880 
881  _omap_dsi_set_irqs(dsidev);
882  for (vc = 0; vc < 4; ++vc)
883  _omap_dsi_set_irqs_vc(dsidev, vc);
884  _omap_dsi_set_irqs_cio(dsidev);
885 
886  spin_unlock_irqrestore(&dsi->irq_lock, flags);
887 }
888 
889 static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
890  struct dsi_isr_data *isr_array, unsigned isr_array_size)
891 {
892  struct dsi_isr_data *isr_data;
893  int free_idx;
894  int i;
895 
896  BUG_ON(isr == NULL);
897 
898  /* check for duplicate entry and find a free slot */
899  free_idx = -1;
900  for (i = 0; i < isr_array_size; i++) {
901  isr_data = &isr_array[i];
902 
903  if (isr_data->isr == isr && isr_data->arg == arg &&
904  isr_data->mask == mask) {
905  return -EINVAL;
906  }
907 
908  if (isr_data->isr == NULL && free_idx == -1)
909  free_idx = i;
910  }
911 
912  if (free_idx == -1)
913  return -EBUSY;
914 
915  isr_data = &isr_array[free_idx];
916  isr_data->isr = isr;
917  isr_data->arg = arg;
918  isr_data->mask = mask;
919 
920  return 0;
921 }
922 
923 static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
924  struct dsi_isr_data *isr_array, unsigned isr_array_size)
925 {
926  struct dsi_isr_data *isr_data;
927  int i;
928 
929  for (i = 0; i < isr_array_size; i++) {
930  isr_data = &isr_array[i];
931  if (isr_data->isr != isr || isr_data->arg != arg ||
932  isr_data->mask != mask)
933  continue;
934 
935  isr_data->isr = NULL;
936  isr_data->arg = NULL;
937  isr_data->mask = 0;
938 
939  return 0;
940  }
941 
942  return -EINVAL;
943 }
944 
945 static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
946  void *arg, u32 mask)
947 {
948  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
949  unsigned long flags;
950  int r;
951 
952  spin_lock_irqsave(&dsi->irq_lock, flags);
953 
954  r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table,
955  ARRAY_SIZE(dsi->isr_tables.isr_table));
956 
957  if (r == 0)
958  _omap_dsi_set_irqs(dsidev);
959 
960  spin_unlock_irqrestore(&dsi->irq_lock, flags);
961 
962  return r;
963 }
964 
965 static int dsi_unregister_isr(struct platform_device *dsidev,
966  omap_dsi_isr_t isr, void *arg, u32 mask)
967 {
968  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
969  unsigned long flags;
970  int r;
971 
972  spin_lock_irqsave(&dsi->irq_lock, flags);
973 
974  r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table,
975  ARRAY_SIZE(dsi->isr_tables.isr_table));
976 
977  if (r == 0)
978  _omap_dsi_set_irqs(dsidev);
979 
980  spin_unlock_irqrestore(&dsi->irq_lock, flags);
981 
982  return r;
983 }
984 
985 static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
986  omap_dsi_isr_t isr, void *arg, u32 mask)
987 {
988  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
989  unsigned long flags;
990  int r;
991 
992  spin_lock_irqsave(&dsi->irq_lock, flags);
993 
994  r = _dsi_register_isr(isr, arg, mask,
995  dsi->isr_tables.isr_table_vc[channel],
996  ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
997 
998  if (r == 0)
999  _omap_dsi_set_irqs_vc(dsidev, channel);
1000 
1001  spin_unlock_irqrestore(&dsi->irq_lock, flags);
1002 
1003  return r;
1004 }
1005 
1006 static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
1007  omap_dsi_isr_t isr, void *arg, u32 mask)
1008 {
1009  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1010  unsigned long flags;
1011  int r;
1012 
1013  spin_lock_irqsave(&dsi->irq_lock, flags);
1014 
1015  r = _dsi_unregister_isr(isr, arg, mask,
1016  dsi->isr_tables.isr_table_vc[channel],
1017  ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
1018 
1019  if (r == 0)
1020  _omap_dsi_set_irqs_vc(dsidev, channel);
1021 
1022  spin_unlock_irqrestore(&dsi->irq_lock, flags);
1023 
1024  return r;
1025 }
1026 
1027 static int dsi_register_isr_cio(struct platform_device *dsidev,
1028  omap_dsi_isr_t isr, void *arg, u32 mask)
1029 {
1030  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1031  unsigned long flags;
1032  int r;
1033 
1034  spin_lock_irqsave(&dsi->irq_lock, flags);
1035 
1036  r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
1037  ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1038 
1039  if (r == 0)
1040  _omap_dsi_set_irqs_cio(dsidev);
1041 
1042  spin_unlock_irqrestore(&dsi->irq_lock, flags);
1043 
1044  return r;
1045 }
1046 
1047 static int dsi_unregister_isr_cio(struct platform_device *dsidev,
1048  omap_dsi_isr_t isr, void *arg, u32 mask)
1049 {
1050  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1051  unsigned long flags;
1052  int r;
1053 
1054  spin_lock_irqsave(&dsi->irq_lock, flags);
1055 
1056  r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
1057  ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1058 
1059  if (r == 0)
1060  _omap_dsi_set_irqs_cio(dsidev);
1061 
1062  spin_unlock_irqrestore(&dsi->irq_lock, flags);
1063 
1064  return r;
1065 }
1066 
1067 static u32 dsi_get_errors(struct platform_device *dsidev)
1068 {
1069  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1070  unsigned long flags;
1071  u32 e;
1072  spin_lock_irqsave(&dsi->errors_lock, flags);
1073  e = dsi->errors;
1074  dsi->errors = 0;
1075  spin_unlock_irqrestore(&dsi->errors_lock, flags);
1076  return e;
1077 }
1078 
1080 {
1081  int r;
1082  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1083 
1084  DSSDBG("dsi_runtime_get\n");
1085 
1086  r = pm_runtime_get_sync(&dsi->pdev->dev);
1087  WARN_ON(r < 0);
1088  return r < 0 ? r : 0;
1089 }
1090 
1091 void dsi_runtime_put(struct platform_device *dsidev)
1092 {
1093  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1094  int r;
1095 
1096  DSSDBG("dsi_runtime_put\n");
1097 
1098  r = pm_runtime_put_sync(&dsi->pdev->dev);
1099  WARN_ON(r < 0 && r != -ENOSYS);
1100 }
1101 
1102 /* source clock for DSI PLL. this could also be PCLKFREE */
1103 static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
1104  bool enable)
1105 {
1106  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1107 
1108  if (enable)
1109  clk_prepare_enable(dsi->sys_clk);
1110  else
1111  clk_disable_unprepare(dsi->sys_clk);
1112 
1113  if (enable && dsi->pll_locked) {
1114  if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
1115  DSSERR("cannot lock PLL when enabling clocks\n");
1116  }
1117 }
1118 
1119 #ifdef DEBUG
1120 static void _dsi_print_reset_status(struct platform_device *dsidev)
1121 {
1122  u32 l;
1123  int b0, b1, b2;
1124 
1125  if (!dss_debug)
1126  return;
1127 
1128  /* A dummy read using the SCP interface to any DSIPHY register is
1129  * required after DSIPHY reset to complete the reset of the DSI complex
1130  * I/O. */
1131  l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1132 
1133  printk(KERN_DEBUG "DSI resets: ");
1134 
1135  l = dsi_read_reg(dsidev, DSI_PLL_STATUS);
1136  printk("PLL (%d) ", FLD_GET(l, 0, 0));
1137 
1138  l = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
1139  printk("CIO (%d) ", FLD_GET(l, 29, 29));
1140 
1142  b0 = 28;
1143  b1 = 27;
1144  b2 = 26;
1145  } else {
1146  b0 = 24;
1147  b1 = 25;
1148  b2 = 26;
1149  }
1150 
1151  l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1152  printk("PHY (%x%x%x, %d, %d, %d)\n",
1153  FLD_GET(l, b0, b0),
1154  FLD_GET(l, b1, b1),
1155  FLD_GET(l, b2, b2),
1156  FLD_GET(l, 29, 29),
1157  FLD_GET(l, 30, 30),
1158  FLD_GET(l, 31, 31));
1159 }
1160 #else
1161 #define _dsi_print_reset_status(x)
1162 #endif
1163 
1164 static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
1165 {
1166  DSSDBG("dsi_if_enable(%d)\n", enable);
1167 
1168  enable = enable ? 1 : 0;
1169  REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */
1170 
1171  if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) {
1172  DSSERR("Failed to set dsi_if_enable to %d\n", enable);
1173  return -EIO;
1174  }
1175 
1176  return 0;
1177 }
1178 
1179 unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
1180 {
1181  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1182 
1183  return dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk;
1184 }
1185 
1186 static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
1187 {
1188  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1189 
1190  return dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk;
1191 }
1192 
1193 static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
1194 {
1195  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1196 
1197  return dsi->current_cinfo.clkin4ddr / 16;
1198 }
1199 
1200 static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1201 {
1202  unsigned long r;
1203  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1204 
1206  /* DSI FCLK source is DSS_CLK_FCK */
1207  r = clk_get_rate(dsi->dss_clk);
1208  } else {
1209  /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
1210  r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
1211  }
1212 
1213  return r;
1214 }
1215 
1216 static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
1217 {
1218  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1219  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1220  unsigned long dsi_fclk;
1221  unsigned lp_clk_div;
1222  unsigned long lp_clk;
1223 
1224  lp_clk_div = dssdev->clocks.dsi.lp_clk_div;
1225 
1226  if (lp_clk_div == 0 || lp_clk_div > dsi->lpdiv_max)
1227  return -EINVAL;
1228 
1229  dsi_fclk = dsi_fclk_rate(dsidev);
1230 
1231  lp_clk = dsi_fclk / 2 / lp_clk_div;
1232 
1233  DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
1234  dsi->current_cinfo.lp_clk = lp_clk;
1235  dsi->current_cinfo.lp_clk_div = lp_clk_div;
1236 
1237  /* LP_CLK_DIVISOR */
1238  REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
1239 
1240  /* LP_RX_SYNCHRO_ENABLE */
1241  REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
1242 
1243  return 0;
1244 }
1245 
1246 static void dsi_enable_scp_clk(struct platform_device *dsidev)
1247 {
1248  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1249 
1250  if (dsi->scp_clk_refcount++ == 0)
1251  REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
1252 }
1253 
1254 static void dsi_disable_scp_clk(struct platform_device *dsidev)
1255 {
1256  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1257 
1258  WARN_ON(dsi->scp_clk_refcount == 0);
1259  if (--dsi->scp_clk_refcount == 0)
1260  REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
1261 }
1262 
1268 };
1269 
1270 static int dsi_pll_power(struct platform_device *dsidev,
1272 {
1273  int t = 0;
1274 
1275  /* DSI-PLL power command 0x3 is not working */
1277  state == DSI_PLL_POWER_ON_DIV)
1278  state = DSI_PLL_POWER_ON_ALL;
1279 
1280  /* PLL_PWR_CMD */
1281  REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30);
1282 
1283  /* PLL_PWR_STATUS */
1284  while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) {
1285  if (++t > 1000) {
1286  DSSERR("Failed to set DSI PLL power mode to %d\n",
1287  state);
1288  return -ENODEV;
1289  }
1290  udelay(1);
1291  }
1292 
1293  return 0;
1294 }
1295 
1296 /* calculate clock rates using dividers in cinfo */
1297 static int dsi_calc_clock_rates(struct platform_device *dsidev,
1298  struct dsi_clock_info *cinfo)
1299 {
1300  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1301 
1302  if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
1303  return -EINVAL;
1304 
1305  if (cinfo->regm == 0 || cinfo->regm > dsi->regm_max)
1306  return -EINVAL;
1307 
1308  if (cinfo->regm_dispc > dsi->regm_dispc_max)
1309  return -EINVAL;
1310 
1311  if (cinfo->regm_dsi > dsi->regm_dsi_max)
1312  return -EINVAL;
1313 
1314  cinfo->clkin = clk_get_rate(dsi->sys_clk);
1315  cinfo->fint = cinfo->clkin / cinfo->regn;
1316 
1317  if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
1318  return -EINVAL;
1319 
1320  cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
1321 
1322  if (cinfo->clkin4ddr > 1800 * 1000 * 1000)
1323  return -EINVAL;
1324 
1325  if (cinfo->regm_dispc > 0)
1326  cinfo->dsi_pll_hsdiv_dispc_clk =
1327  cinfo->clkin4ddr / cinfo->regm_dispc;
1328  else
1329  cinfo->dsi_pll_hsdiv_dispc_clk = 0;
1330 
1331  if (cinfo->regm_dsi > 0)
1332  cinfo->dsi_pll_hsdiv_dsi_clk =
1333  cinfo->clkin4ddr / cinfo->regm_dsi;
1334  else
1335  cinfo->dsi_pll_hsdiv_dsi_clk = 0;
1336 
1337  return 0;
1338 }
1339 
1341  unsigned long req_pck, struct dsi_clock_info *dsi_cinfo,
1342  struct dispc_clock_info *dispc_cinfo)
1343 {
1344  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1345  struct dsi_clock_info cur, best;
1346  struct dispc_clock_info best_dispc;
1347  int min_fck_per_pck;
1348  int match = 0;
1349  unsigned long dss_sys_clk, max_dss_fck;
1350 
1351  dss_sys_clk = clk_get_rate(dsi->sys_clk);
1352 
1354 
1355  if (req_pck == dsi->cache_req_pck &&
1356  dsi->cache_cinfo.clkin == dss_sys_clk) {
1357  DSSDBG("DSI clock info found from cache\n");
1358  *dsi_cinfo = dsi->cache_cinfo;
1359  dispc_find_clk_divs(req_pck, dsi_cinfo->dsi_pll_hsdiv_dispc_clk,
1360  dispc_cinfo);
1361  return 0;
1362  }
1363 
1364  min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
1365 
1366  if (min_fck_per_pck &&
1367  req_pck * min_fck_per_pck > max_dss_fck) {
1368  DSSERR("Requested pixel clock not possible with the current "
1369  "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
1370  "the constraint off.\n");
1371  min_fck_per_pck = 0;
1372  }
1373 
1374  DSSDBG("dsi_pll_calc\n");
1375 
1376 retry:
1377  memset(&best, 0, sizeof(best));
1378  memset(&best_dispc, 0, sizeof(best_dispc));
1379 
1380  memset(&cur, 0, sizeof(cur));
1381  cur.clkin = dss_sys_clk;
1382 
1383  /* 0.75MHz < Fint = clkin / regn < 2.1MHz */
1384  /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
1385  for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
1386  cur.fint = cur.clkin / cur.regn;
1387 
1388  if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
1389  continue;
1390 
1391  /* DSIPHY(MHz) = (2 * regm / regn) * clkin */
1392  for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
1393  unsigned long a, b;
1394 
1395  a = 2 * cur.regm * (cur.clkin/1000);
1396  b = cur.regn;
1397  cur.clkin4ddr = a / b * 1000;
1398 
1399  if (cur.clkin4ddr > 1800 * 1000 * 1000)
1400  break;
1401 
1402  /* dsi_pll_hsdiv_dispc_clk(MHz) =
1403  * DSIPHY(MHz) / regm_dispc < 173MHz/186Mhz */
1404  for (cur.regm_dispc = 1; cur.regm_dispc <
1405  dsi->regm_dispc_max; ++cur.regm_dispc) {
1406  struct dispc_clock_info cur_dispc;
1408  cur.clkin4ddr / cur.regm_dispc;
1409 
1410  /* this will narrow down the search a bit,
1411  * but still give pixclocks below what was
1412  * requested */
1413  if (cur.dsi_pll_hsdiv_dispc_clk < req_pck)
1414  break;
1415 
1416  if (cur.dsi_pll_hsdiv_dispc_clk > max_dss_fck)
1417  continue;
1418 
1419  if (min_fck_per_pck &&
1421  req_pck * min_fck_per_pck)
1422  continue;
1423 
1424  match = 1;
1425 
1426  dispc_find_clk_divs(req_pck,
1428  &cur_dispc);
1429 
1430  if (abs(cur_dispc.pck - req_pck) <
1431  abs(best_dispc.pck - req_pck)) {
1432  best = cur;
1433  best_dispc = cur_dispc;
1434 
1435  if (cur_dispc.pck == req_pck)
1436  goto found;
1437  }
1438  }
1439  }
1440  }
1441 found:
1442  if (!match) {
1443  if (min_fck_per_pck) {
1444  DSSERR("Could not find suitable clock settings.\n"
1445  "Turning FCK/PCK constraint off and"
1446  "trying again.\n");
1447  min_fck_per_pck = 0;
1448  goto retry;
1449  }
1450 
1451  DSSERR("Could not find suitable clock settings.\n");
1452 
1453  return -EINVAL;
1454  }
1455 
1456  /* dsi_pll_hsdiv_dsi_clk (regm_dsi) is not used */
1457  best.regm_dsi = 0;
1458  best.dsi_pll_hsdiv_dsi_clk = 0;
1459 
1460  if (dsi_cinfo)
1461  *dsi_cinfo = best;
1462  if (dispc_cinfo)
1463  *dispc_cinfo = best_dispc;
1464 
1465  dsi->cache_req_pck = req_pck;
1466  dsi->cache_clk_freq = 0;
1467  dsi->cache_cinfo = best;
1468 
1469  return 0;
1470 }
1471 
1472 static int dsi_pll_calc_ddrfreq(struct platform_device *dsidev,
1473  unsigned long req_clkin4ddr, struct dsi_clock_info *cinfo)
1474 {
1475  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1476  struct dsi_clock_info cur, best;
1477 
1478  DSSDBG("dsi_pll_calc_ddrfreq\n");
1479 
1480  memset(&best, 0, sizeof(best));
1481  memset(&cur, 0, sizeof(cur));
1482 
1483  cur.clkin = clk_get_rate(dsi->sys_clk);
1484 
1485  for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
1486  cur.fint = cur.clkin / cur.regn;
1487 
1488  if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
1489  continue;
1490 
1491  /* DSIPHY(MHz) = (2 * regm / regn) * clkin */
1492  for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
1493  unsigned long a, b;
1494 
1495  a = 2 * cur.regm * (cur.clkin/1000);
1496  b = cur.regn;
1497  cur.clkin4ddr = a / b * 1000;
1498 
1499  if (cur.clkin4ddr > 1800 * 1000 * 1000)
1500  break;
1501 
1502  if (abs(cur.clkin4ddr - req_clkin4ddr) <
1503  abs(best.clkin4ddr - req_clkin4ddr)) {
1504  best = cur;
1505  DSSDBG("best %ld\n", best.clkin4ddr);
1506  }
1507 
1508  if (cur.clkin4ddr == req_clkin4ddr)
1509  goto found;
1510  }
1511  }
1512 found:
1513  if (cinfo)
1514  *cinfo = best;
1515 
1516  return 0;
1517 }
1518 
1519 static void dsi_pll_calc_dsi_fck(struct platform_device *dsidev,
1520  struct dsi_clock_info *cinfo)
1521 {
1522  unsigned long max_dsi_fck;
1523 
1525 
1526  cinfo->regm_dsi = DIV_ROUND_UP(cinfo->clkin4ddr, max_dsi_fck);
1527  cinfo->dsi_pll_hsdiv_dsi_clk = cinfo->clkin4ddr / cinfo->regm_dsi;
1528 }
1529 
1530 static int dsi_pll_calc_dispc_fck(struct platform_device *dsidev,
1531  unsigned long req_pck, struct dsi_clock_info *cinfo,
1532  struct dispc_clock_info *dispc_cinfo)
1533 {
1534  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1535  unsigned regm_dispc, best_regm_dispc;
1536  unsigned long dispc_clk, best_dispc_clk;
1537  int min_fck_per_pck;
1538  unsigned long max_dss_fck;
1539  struct dispc_clock_info best_dispc;
1540  bool match;
1541 
1543 
1544  min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
1545 
1546  if (min_fck_per_pck &&
1547  req_pck * min_fck_per_pck > max_dss_fck) {
1548  DSSERR("Requested pixel clock not possible with the current "
1549  "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
1550  "the constraint off.\n");
1551  min_fck_per_pck = 0;
1552  }
1553 
1554 retry:
1555  best_regm_dispc = 0;
1556  best_dispc_clk = 0;
1557  memset(&best_dispc, 0, sizeof(best_dispc));
1558  match = false;
1559 
1560  for (regm_dispc = 1; regm_dispc < dsi->regm_dispc_max; ++regm_dispc) {
1561  struct dispc_clock_info cur_dispc;
1562 
1563  dispc_clk = cinfo->clkin4ddr / regm_dispc;
1564 
1565  /* this will narrow down the search a bit,
1566  * but still give pixclocks below what was
1567  * requested */
1568  if (dispc_clk < req_pck)
1569  break;
1570 
1571  if (dispc_clk > max_dss_fck)
1572  continue;
1573 
1574  if (min_fck_per_pck && dispc_clk < req_pck * min_fck_per_pck)
1575  continue;
1576 
1577  match = true;
1578 
1579  dispc_find_clk_divs(req_pck, dispc_clk, &cur_dispc);
1580 
1581  if (abs(cur_dispc.pck - req_pck) <
1582  abs(best_dispc.pck - req_pck)) {
1583  best_regm_dispc = regm_dispc;
1584  best_dispc_clk = dispc_clk;
1585  best_dispc = cur_dispc;
1586 
1587  if (cur_dispc.pck == req_pck)
1588  goto found;
1589  }
1590  }
1591 
1592  if (!match) {
1593  if (min_fck_per_pck) {
1594  DSSERR("Could not find suitable clock settings.\n"
1595  "Turning FCK/PCK constraint off and"
1596  "trying again.\n");
1597  min_fck_per_pck = 0;
1598  goto retry;
1599  }
1600 
1601  DSSERR("Could not find suitable clock settings.\n");
1602 
1603  return -EINVAL;
1604  }
1605 found:
1606  cinfo->regm_dispc = best_regm_dispc;
1607  cinfo->dsi_pll_hsdiv_dispc_clk = best_dispc_clk;
1608 
1609  *dispc_cinfo = best_dispc;
1610 
1611  return 0;
1612 }
1613 
1615  struct dsi_clock_info *cinfo)
1616 {
1617  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1618  int r = 0;
1619  u32 l;
1620  int f = 0;
1621  u8 regn_start, regn_end, regm_start, regm_end;
1622  u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end;
1623 
1624  DSSDBGF();
1625 
1626  dsi->current_cinfo.clkin = cinfo->clkin;
1627  dsi->current_cinfo.fint = cinfo->fint;
1628  dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
1629  dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
1630  cinfo->dsi_pll_hsdiv_dispc_clk;
1631  dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk =
1632  cinfo->dsi_pll_hsdiv_dsi_clk;
1633 
1634  dsi->current_cinfo.regn = cinfo->regn;
1635  dsi->current_cinfo.regm = cinfo->regm;
1636  dsi->current_cinfo.regm_dispc = cinfo->regm_dispc;
1637  dsi->current_cinfo.regm_dsi = cinfo->regm_dsi;
1638 
1639  DSSDBG("DSI Fint %ld\n", cinfo->fint);
1640 
1641  DSSDBG("clkin rate %ld\n", cinfo->clkin);
1642 
1643  /* DSIPHY == CLKIN4DDR */
1644  DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu = %lu\n",
1645  cinfo->regm,
1646  cinfo->regn,
1647  cinfo->clkin,
1648  cinfo->clkin4ddr);
1649 
1650  DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
1651  cinfo->clkin4ddr / 1000 / 1000 / 2);
1652 
1653  DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
1654 
1655  DSSDBG("regm_dispc = %d, %s (%s) = %lu\n", cinfo->regm_dispc,
1658  cinfo->dsi_pll_hsdiv_dispc_clk);
1659  DSSDBG("regm_dsi = %d, %s (%s) = %lu\n", cinfo->regm_dsi,
1662  cinfo->dsi_pll_hsdiv_dsi_clk);
1663 
1664  dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, &regn_start, &regn_end);
1665  dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM, &regm_start, &regm_end);
1667  &regm_dispc_end);
1669  &regm_dsi_end);
1670 
1671  /* DSI_PLL_AUTOMODE = manual */
1672  REG_FLD_MOD(dsidev, DSI_PLL_CONTROL, 0, 0, 0);
1673 
1674  l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION1);
1675  l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
1676  /* DSI_PLL_REGN */
1677  l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end);
1678  /* DSI_PLL_REGM */
1679  l = FLD_MOD(l, cinfo->regm, regm_start, regm_end);
1680  /* DSI_CLOCK_DIV */
1681  l = FLD_MOD(l, cinfo->regm_dispc > 0 ? cinfo->regm_dispc - 1 : 0,
1682  regm_dispc_start, regm_dispc_end);
1683  /* DSIPROTO_CLOCK_DIV */
1684  l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0,
1685  regm_dsi_start, regm_dsi_end);
1686  dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION1, l);
1687 
1688  BUG_ON(cinfo->fint < dsi->fint_min || cinfo->fint > dsi->fint_max);
1689 
1690  l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1691 
1693  f = cinfo->fint < 1000000 ? 0x3 :
1694  cinfo->fint < 1250000 ? 0x4 :
1695  cinfo->fint < 1500000 ? 0x5 :
1696  cinfo->fint < 1750000 ? 0x6 :
1697  0x7;
1698 
1699  l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
1701  f = cinfo->clkin4ddr < 1000000000 ? 0x2 : 0x4;
1702 
1703  l = FLD_MOD(l, f, 4, 1); /* PLL_SELFREQDCO */
1704  }
1705 
1706  l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1707  l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
1708  l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
1710  l = FLD_MOD(l, 3, 22, 21); /* REF_SYSCLK = sysclk */
1711  dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1712 
1713  REG_FLD_MOD(dsidev, DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
1714 
1715  if (wait_for_bit_change(dsidev, DSI_PLL_GO, 0, 0) != 0) {
1716  DSSERR("dsi pll go bit not going down.\n");
1717  r = -EIO;
1718  goto err;
1719  }
1720 
1721  if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) {
1722  DSSERR("cannot lock PLL\n");
1723  r = -EIO;
1724  goto err;
1725  }
1726 
1727  dsi->pll_locked = 1;
1728 
1729  l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1730  l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
1731  l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */
1732  l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */
1733  l = FLD_MOD(l, 0, 7, 7); /* DSI_PLL_TIGHTPHASELOCK */
1734  l = FLD_MOD(l, 0, 8, 8); /* DSI_PLL_DRIFTGUARDEN */
1735  l = FLD_MOD(l, 0, 10, 9); /* DSI_PLL_LOCKSEL */
1736  l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1737  l = FLD_MOD(l, 1, 14, 14); /* DSIPHY_CLKINEN */
1738  l = FLD_MOD(l, 0, 15, 15); /* DSI_BYPASSEN */
1739  l = FLD_MOD(l, 1, 16, 16); /* DSS_CLOCK_EN */
1740  l = FLD_MOD(l, 0, 17, 17); /* DSS_CLOCK_PWDN */
1741  l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */
1742  l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */
1743  l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */
1744  dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1745 
1746  DSSDBG("PLL config done\n");
1747 err:
1748  return r;
1749 }
1750 
1751 int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
1752  bool enable_hsdiv)
1753 {
1754  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1755  int r = 0;
1756  enum dsi_pll_power_state pwstate;
1757 
1758  DSSDBG("PLL init\n");
1759 
1760  if (dsi->vdds_dsi_reg == NULL) {
1761  struct regulator *vdds_dsi;
1762 
1763  vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
1764 
1765  if (IS_ERR(vdds_dsi)) {
1766  DSSERR("can't get VDDS_DSI regulator\n");
1767  return PTR_ERR(vdds_dsi);
1768  }
1769 
1770  dsi->vdds_dsi_reg = vdds_dsi;
1771  }
1772 
1773  dsi_enable_pll_clock(dsidev, 1);
1774  /*
1775  * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
1776  */
1777  dsi_enable_scp_clk(dsidev);
1778 
1779  if (!dsi->vdds_dsi_enabled) {
1780  r = regulator_enable(dsi->vdds_dsi_reg);
1781  if (r)
1782  goto err0;
1783  dsi->vdds_dsi_enabled = true;
1784  }
1785 
1786  /* XXX PLL does not come out of reset without this... */
1788 
1789  if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) {
1790  DSSERR("PLL not coming out of reset.\n");
1791  r = -ENODEV;
1793  goto err1;
1794  }
1795 
1796  /* XXX ... but if left on, we get problems when planes do not
1797  * fill the whole display. No idea about this */
1799 
1800  if (enable_hsclk && enable_hsdiv)
1801  pwstate = DSI_PLL_POWER_ON_ALL;
1802  else if (enable_hsclk)
1803  pwstate = DSI_PLL_POWER_ON_HSCLK;
1804  else if (enable_hsdiv)
1805  pwstate = DSI_PLL_POWER_ON_DIV;
1806  else
1807  pwstate = DSI_PLL_POWER_OFF;
1808 
1809  r = dsi_pll_power(dsidev, pwstate);
1810 
1811  if (r)
1812  goto err1;
1813 
1814  DSSDBG("PLL init done\n");
1815 
1816  return 0;
1817 err1:
1818  if (dsi->vdds_dsi_enabled) {
1820  dsi->vdds_dsi_enabled = false;
1821  }
1822 err0:
1823  dsi_disable_scp_clk(dsidev);
1824  dsi_enable_pll_clock(dsidev, 0);
1825  return r;
1826 }
1827 
1828 void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
1829 {
1830  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1831 
1832  dsi->pll_locked = 0;
1833  dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
1834  if (disconnect_lanes) {
1835  WARN_ON(!dsi->vdds_dsi_enabled);
1837  dsi->vdds_dsi_enabled = false;
1838  }
1839 
1840  dsi_disable_scp_clk(dsidev);
1841  dsi_enable_pll_clock(dsidev, 0);
1842 
1843  DSSDBG("PLL uninit done\n");
1844 }
1845 
1846 static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1847  struct seq_file *s)
1848 {
1849  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1850  struct dsi_clock_info *cinfo = &dsi->current_cinfo;
1851  enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
1852  int dsi_module = dsi->module_id;
1853 
1854  dispc_clk_src = dss_get_dispc_clk_source();
1855  dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
1856 
1857  if (dsi_runtime_get(dsidev))
1858  return;
1859 
1860  seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
1861 
1862  seq_printf(s, "dsi pll clkin\t%lu\n", cinfo->clkin);
1863 
1864  seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
1865 
1866  seq_printf(s, "CLKIN4DDR\t%-16luregm %u\n",
1867  cinfo->clkin4ddr, cinfo->regm);
1868 
1869  seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16luregm_dispc %u\t(%s)\n",
1870  dss_feat_get_clk_source_name(dsi_module == 0 ?
1873  cinfo->dsi_pll_hsdiv_dispc_clk,
1874  cinfo->regm_dispc,
1875  dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1876  "off" : "on");
1877 
1878  seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16luregm_dsi %u\t(%s)\n",
1879  dss_feat_get_clk_source_name(dsi_module == 0 ?
1882  cinfo->dsi_pll_hsdiv_dsi_clk,
1883  cinfo->regm_dsi,
1884  dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1885  "off" : "on");
1886 
1887  seq_printf(s, "- DSI%d -\n", dsi_module + 1);
1888 
1889  seq_printf(s, "dsi fclk source = %s (%s)\n",
1890  dss_get_generic_clk_source_name(dsi_clk_src),
1891  dss_feat_get_clk_source_name(dsi_clk_src));
1892 
1893  seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
1894 
1895  seq_printf(s, "DDR_CLK\t\t%lu\n",
1896  cinfo->clkin4ddr / 4);
1897 
1898  seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev));
1899 
1900  seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
1901 
1902  dsi_runtime_put(dsidev);
1903 }
1904 
1905 void dsi_dump_clocks(struct seq_file *s)
1906 {
1907  struct platform_device *dsidev;
1908  int i;
1909 
1910  for (i = 0; i < MAX_NUM_DSI; i++) {
1911  dsidev = dsi_get_dsidev_from_id(i);
1912  if (dsidev)
1913  dsi_dump_dsidev_clocks(dsidev, s);
1914  }
1915 }
1916 
1917 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1918 static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
1919  struct seq_file *s)
1920 {
1921  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1922  unsigned long flags;
1923  struct dsi_irq_stats stats;
1924 
1925  spin_lock_irqsave(&dsi->irq_stats_lock, flags);
1926 
1927  stats = dsi->irq_stats;
1928  memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
1929  dsi->irq_stats.last_reset = jiffies;
1930 
1931  spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
1932 
1933  seq_printf(s, "period %u ms\n",
1934  jiffies_to_msecs(jiffies - stats.last_reset));
1935 
1936  seq_printf(s, "irqs %d\n", stats.irq_count);
1937 #define PIS(x) \
1938  seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
1939 
1940  seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
1941  PIS(VC0);
1942  PIS(VC1);
1943  PIS(VC2);
1944  PIS(VC3);
1945  PIS(WAKEUP);
1946  PIS(RESYNC);
1947  PIS(PLL_LOCK);
1948  PIS(PLL_UNLOCK);
1949  PIS(PLL_RECALL);
1950  PIS(COMPLEXIO_ERR);
1951  PIS(HS_TX_TIMEOUT);
1952  PIS(LP_RX_TIMEOUT);
1953  PIS(TE_TRIGGER);
1954  PIS(ACK_TRIGGER);
1955  PIS(SYNC_LOST);
1956  PIS(LDO_POWER_GOOD);
1957  PIS(TA_TIMEOUT);
1958 #undef PIS
1959 
1960 #define PIS(x) \
1961  seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
1962  stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
1963  stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
1964  stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
1965  stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
1966 
1967  seq_printf(s, "-- VC interrupts --\n");
1968  PIS(CS);
1969  PIS(ECC_CORR);
1970  PIS(PACKET_SENT);
1971  PIS(FIFO_TX_OVF);
1972  PIS(FIFO_RX_OVF);
1973  PIS(BTA);
1974  PIS(ECC_NO_CORR);
1975  PIS(FIFO_TX_UDF);
1976  PIS(PP_BUSY_CHANGE);
1977 #undef PIS
1978 
1979 #define PIS(x) \
1980  seq_printf(s, "%-20s %10d\n", #x, \
1981  stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
1982 
1983  seq_printf(s, "-- CIO interrupts --\n");
1984  PIS(ERRSYNCESC1);
1985  PIS(ERRSYNCESC2);
1986  PIS(ERRSYNCESC3);
1987  PIS(ERRESC1);
1988  PIS(ERRESC2);
1989  PIS(ERRESC3);
1990  PIS(ERRCONTROL1);
1991  PIS(ERRCONTROL2);
1992  PIS(ERRCONTROL3);
1993  PIS(STATEULPS1);
1994  PIS(STATEULPS2);
1995  PIS(STATEULPS3);
1996  PIS(ERRCONTENTIONLP0_1);
1997  PIS(ERRCONTENTIONLP1_1);
1998  PIS(ERRCONTENTIONLP0_2);
1999  PIS(ERRCONTENTIONLP1_2);
2000  PIS(ERRCONTENTIONLP0_3);
2001  PIS(ERRCONTENTIONLP1_3);
2002  PIS(ULPSACTIVENOT_ALL0);
2003  PIS(ULPSACTIVENOT_ALL1);
2004 #undef PIS
2005 }
2006 
2007 static void dsi1_dump_irqs(struct seq_file *s)
2008 {
2009  struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
2010 
2011  dsi_dump_dsidev_irqs(dsidev, s);
2012 }
2013 
2014 static void dsi2_dump_irqs(struct seq_file *s)
2015 {
2016  struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
2017 
2018  dsi_dump_dsidev_irqs(dsidev, s);
2019 }
2020 #endif
2021 
2022 static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
2023  struct seq_file *s)
2024 {
2025 #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
2026 
2027  if (dsi_runtime_get(dsidev))
2028  return;
2029  dsi_enable_scp_clk(dsidev);
2030 
2036  DUMPREG(DSI_CTRL);
2057 
2058  DUMPREG(DSI_VC_CTRL(0));
2059  DUMPREG(DSI_VC_TE(0));
2065 
2066  DUMPREG(DSI_VC_CTRL(1));
2067  DUMPREG(DSI_VC_TE(1));
2073 
2074  DUMPREG(DSI_VC_CTRL(2));
2075  DUMPREG(DSI_VC_TE(2));
2081 
2082  DUMPREG(DSI_VC_CTRL(3));
2083  DUMPREG(DSI_VC_TE(3));
2089 
2094 
2100 
2101  dsi_disable_scp_clk(dsidev);
2102  dsi_runtime_put(dsidev);
2103 #undef DUMPREG
2104 }
2105 
2106 static void dsi1_dump_regs(struct seq_file *s)
2107 {
2108  struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
2109 
2110  dsi_dump_dsidev_regs(dsidev, s);
2111 }
2112 
2113 static void dsi2_dump_regs(struct seq_file *s)
2114 {
2115  struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
2116 
2117  dsi_dump_dsidev_regs(dsidev, s);
2118 }
2119 
2124 };
2125 
2126 static int dsi_cio_power(struct platform_device *dsidev,
2128 {
2129  int t = 0;
2130 
2131  /* PWR_CMD */
2132  REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27);
2133 
2134  /* PWR_STATUS */
2135  while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1),
2136  26, 25) != state) {
2137  if (++t > 1000) {
2138  DSSERR("failed to set complexio power state to "
2139  "%d\n", state);
2140  return -ENODEV;
2141  }
2142  udelay(1);
2143  }
2144 
2145  return 0;
2146 }
2147 
2148 static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
2149 {
2150  int val;
2151 
2152  /* line buffer on OMAP3 is 1024 x 24bits */
2153  /* XXX: for some reason using full buffer size causes
2154  * considerable TX slowdown with update sizes that fill the
2155  * whole buffer */
2157  return 1023 * 3;
2158 
2159  val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
2160 
2161  switch (val) {
2162  case 1:
2163  return 512 * 3; /* 512x24 bits */
2164  case 2:
2165  return 682 * 3; /* 682x24 bits */
2166  case 3:
2167  return 853 * 3; /* 853x24 bits */
2168  case 4:
2169  return 1024 * 3; /* 1024x24 bits */
2170  case 5:
2171  return 1194 * 3; /* 1194x24 bits */
2172  case 6:
2173  return 1365 * 3; /* 1365x24 bits */
2174  case 7:
2175  return 1920 * 3; /* 1920x24 bits */
2176  default:
2177  BUG();
2178  return 0;
2179  }
2180 }
2181 
2182 static int dsi_set_lane_config(struct platform_device *dsidev)
2183 {
2184  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2185  static const u8 offsets[] = { 0, 4, 8, 12, 16 };
2186  static const enum dsi_lane_function functions[] = {
2187  DSI_LANE_CLK,
2192  };
2193  u32 r;
2194  int i;
2195 
2196  r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
2197 
2198  for (i = 0; i < dsi->num_lanes_used; ++i) {
2199  unsigned offset = offsets[i];
2200  unsigned polarity, lane_number;
2201  unsigned t;
2202 
2203  for (t = 0; t < dsi->num_lanes_supported; ++t)
2204  if (dsi->lanes[t].function == functions[i])
2205  break;
2206 
2207  if (t == dsi->num_lanes_supported)
2208  return -EINVAL;
2209 
2210  lane_number = t;
2211  polarity = dsi->lanes[t].polarity;
2212 
2213  r = FLD_MOD(r, lane_number + 1, offset + 2, offset);
2214  r = FLD_MOD(r, polarity, offset + 3, offset + 3);
2215  }
2216 
2217  /* clear the unused lanes */
2218  for (; i < dsi->num_lanes_supported; ++i) {
2219  unsigned offset = offsets[i];
2220 
2221  r = FLD_MOD(r, 0, offset + 2, offset);
2222  r = FLD_MOD(r, 0, offset + 3, offset + 3);
2223  }
2224 
2225  dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
2226 
2227  return 0;
2228 }
2229 
2230 static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
2231 {
2232  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2233 
2234  /* convert time in ns to ddr ticks, rounding up */
2235  unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
2236  return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
2237 }
2238 
2239 static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
2240 {
2241  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2242 
2243  unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
2244  return ddr * 1000 * 1000 / (ddr_clk / 1000);
2245 }
2246 
2247 static void dsi_cio_timings(struct platform_device *dsidev)
2248 {
2249  u32 r;
2250  u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
2251  u32 tlpx_half, tclk_trail, tclk_zero;
2252  u32 tclk_prepare;
2253 
2254  /* calculate timings */
2255 
2256  /* 1 * DDR_CLK = 2 * UI */
2257 
2258  /* min 40ns + 4*UI max 85ns + 6*UI */
2259  ths_prepare = ns2ddr(dsidev, 70) + 2;
2260 
2261  /* min 145ns + 10*UI */
2262  ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2;
2263 
2264  /* min max(8*UI, 60ns+4*UI) */
2265  ths_trail = ns2ddr(dsidev, 60) + 5;
2266 
2267  /* min 100ns */
2268  ths_exit = ns2ddr(dsidev, 145);
2269 
2270  /* tlpx min 50n */
2271  tlpx_half = ns2ddr(dsidev, 25);
2272 
2273  /* min 60ns */
2274  tclk_trail = ns2ddr(dsidev, 60) + 2;
2275 
2276  /* min 38ns, max 95ns */
2277  tclk_prepare = ns2ddr(dsidev, 65);
2278 
2279  /* min tclk-prepare + tclk-zero = 300ns */
2280  tclk_zero = ns2ddr(dsidev, 260);
2281 
2282  DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
2283  ths_prepare, ddr2ns(dsidev, ths_prepare),
2284  ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero));
2285  DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
2286  ths_trail, ddr2ns(dsidev, ths_trail),
2287  ths_exit, ddr2ns(dsidev, ths_exit));
2288 
2289  DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
2290  "tclk_zero %u (%uns)\n",
2291  tlpx_half, ddr2ns(dsidev, tlpx_half),
2292  tclk_trail, ddr2ns(dsidev, tclk_trail),
2293  tclk_zero, ddr2ns(dsidev, tclk_zero));
2294  DSSDBG("tclk_prepare %u (%uns)\n",
2295  tclk_prepare, ddr2ns(dsidev, tclk_prepare));
2296 
2297  /* program timings */
2298 
2299  r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
2300  r = FLD_MOD(r, ths_prepare, 31, 24);
2301  r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
2302  r = FLD_MOD(r, ths_trail, 15, 8);
2303  r = FLD_MOD(r, ths_exit, 7, 0);
2304  dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r);
2305 
2306  r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
2307  r = FLD_MOD(r, tlpx_half, 20, 16);
2308  r = FLD_MOD(r, tclk_trail, 15, 8);
2309  r = FLD_MOD(r, tclk_zero, 7, 0);
2310 
2312  r = FLD_MOD(r, 0, 21, 21); /* DCCEN = disable */
2313  r = FLD_MOD(r, 1, 22, 22); /* CLKINP_DIVBY2EN = enable */
2314  r = FLD_MOD(r, 1, 23, 23); /* CLKINP_SEL = enable */
2315  }
2316 
2317  dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r);
2318 
2319  r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
2320  r = FLD_MOD(r, tclk_prepare, 7, 0);
2321  dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r);
2322 }
2323 
2324 /* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */
2325 static void dsi_cio_enable_lane_override(struct platform_device *dsidev,
2326  unsigned mask_p, unsigned mask_n)
2327 {
2328  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2329  int i;
2330  u32 l;
2331  u8 lptxscp_start = dsi->num_lanes_supported == 3 ? 22 : 26;
2332 
2333  l = 0;
2334 
2335  for (i = 0; i < dsi->num_lanes_supported; ++i) {
2336  unsigned p = dsi->lanes[i].polarity;
2337 
2338  if (mask_p & (1 << i))
2339  l |= 1 << (i * 2 + (p ? 0 : 1));
2340 
2341  if (mask_n & (1 << i))
2342  l |= 1 << (i * 2 + (p ? 1 : 0));
2343  }
2344 
2345  /*
2346  * Bits in REGLPTXSCPDAT4TO0DXDY:
2347  * 17: DY0 18: DX0
2348  * 19: DY1 20: DX1
2349  * 21: DY2 22: DX2
2350  * 23: DY3 24: DX3
2351  * 25: DY4 26: DX4
2352  */
2353 
2354  /* Set the lane override configuration */
2355 
2356  /* REGLPTXSCPDAT4TO0DXDY */
2357  REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
2358 
2359  /* Enable lane override */
2360 
2361  /* ENLPTXSCPDAT */
2362  REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27);
2363 }
2364 
2365 static void dsi_cio_disable_lane_override(struct platform_device *dsidev)
2366 {
2367  /* Disable lane override */
2368  REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
2369  /* Reset the lane override configuration */
2370  /* REGLPTXSCPDAT4TO0DXDY */
2371  REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17);
2372 }
2373 
2374 static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev)
2375 {
2376  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2377  int t, i;
2378  bool in_use[DSI_MAX_NR_LANES];
2379  static const u8 offsets_old[] = { 28, 27, 26 };
2380  static const u8 offsets_new[] = { 24, 25, 26, 27, 28 };
2381  const u8 *offsets;
2382 
2384  offsets = offsets_old;
2385  else
2386  offsets = offsets_new;
2387 
2388  for (i = 0; i < dsi->num_lanes_supported; ++i)
2389  in_use[i] = dsi->lanes[i].function != DSI_LANE_UNUSED;
2390 
2391  t = 100000;
2392  while (true) {
2393  u32 l;
2394  int ok;
2395 
2396  l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2397 
2398  ok = 0;
2399  for (i = 0; i < dsi->num_lanes_supported; ++i) {
2400  if (!in_use[i] || (l & (1 << offsets[i])))
2401  ok++;
2402  }
2403 
2404  if (ok == dsi->num_lanes_supported)
2405  break;
2406 
2407  if (--t == 0) {
2408  for (i = 0; i < dsi->num_lanes_supported; ++i) {
2409  if (!in_use[i] || (l & (1 << offsets[i])))
2410  continue;
2411 
2412  DSSERR("CIO TXCLKESC%d domain not coming " \
2413  "out of reset\n", i);
2414  }
2415  return -EIO;
2416  }
2417  }
2418 
2419  return 0;
2420 }
2421 
2422 /* return bitmask of enabled lanes, lane0 being the lsb */
2423 static unsigned dsi_get_lane_mask(struct platform_device *dsidev)
2424 {
2425  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2426  unsigned mask = 0;
2427  int i;
2428 
2429  for (i = 0; i < dsi->num_lanes_supported; ++i) {
2430  if (dsi->lanes[i].function != DSI_LANE_UNUSED)
2431  mask |= 1 << i;
2432  }
2433 
2434  return mask;
2435 }
2436 
2437 static int dsi_cio_init(struct platform_device *dsidev)
2438 {
2439  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2440  int r;
2441  u32 l;
2442 
2443  DSSDBGF();
2444 
2445  r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
2446  if (r)
2447  return r;
2448 
2449  dsi_enable_scp_clk(dsidev);
2450 
2451  /* A dummy read using the SCP interface to any DSIPHY register is
2452  * required after DSIPHY reset to complete the reset of the DSI complex
2453  * I/O. */
2454  dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2455 
2456  if (wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1) != 1) {
2457  DSSERR("CIO SCP Clock domain not coming out of reset.\n");
2458  r = -EIO;
2459  goto err_scp_clk_dom;
2460  }
2461 
2462  r = dsi_set_lane_config(dsidev);
2463  if (r)
2464  goto err_scp_clk_dom;
2465 
2466  /* set TX STOP MODE timer to maximum for this operation */
2467  l = dsi_read_reg(dsidev, DSI_TIMING1);
2468  l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2469  l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */
2470  l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */
2471  l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */
2472  dsi_write_reg(dsidev, DSI_TIMING1, l);
2473 
2474  if (dsi->ulps_enabled) {
2475  unsigned mask_p;
2476  int i;
2477 
2478  DSSDBG("manual ulps exit\n");
2479 
2480  /* ULPS is exited by Mark-1 state for 1ms, followed by
2481  * stop state. DSS HW cannot do this via the normal
2482  * ULPS exit sequence, as after reset the DSS HW thinks
2483  * that we are not in ULPS mode, and refuses to send the
2484  * sequence. So we need to send the ULPS exit sequence
2485  * manually by setting positive lines high and negative lines
2486  * low for 1ms.
2487  */
2488 
2489  mask_p = 0;
2490 
2491  for (i = 0; i < dsi->num_lanes_supported; ++i) {
2492  if (dsi->lanes[i].function == DSI_LANE_UNUSED)
2493  continue;
2494  mask_p |= 1 << i;
2495  }
2496 
2497  dsi_cio_enable_lane_override(dsidev, mask_p, 0);
2498  }
2499 
2500  r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON);
2501  if (r)
2502  goto err_cio_pwr;
2503 
2504  if (wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
2505  DSSERR("CIO PWR clock domain not coming out of reset.\n");
2506  r = -ENODEV;
2507  goto err_cio_pwr_dom;
2508  }
2509 
2510  dsi_if_enable(dsidev, true);
2511  dsi_if_enable(dsidev, false);
2512  REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
2513 
2514  r = dsi_cio_wait_tx_clk_esc_reset(dsidev);
2515  if (r)
2516  goto err_tx_clk_esc_rst;
2517 
2518  if (dsi->ulps_enabled) {
2519  /* Keep Mark-1 state for 1ms (as per DSI spec) */
2520  ktime_t wait = ns_to_ktime(1000 * 1000);
2523 
2524  /* Disable the override. The lanes should be set to Mark-11
2525  * state by the HW */
2526  dsi_cio_disable_lane_override(dsidev);
2527  }
2528 
2529  /* FORCE_TX_STOP_MODE_IO */
2530  REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15);
2531 
2532  dsi_cio_timings(dsidev);
2533 
2534  if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
2535  /* DDR_CLK_ALWAYS_ON */
2536  REG_FLD_MOD(dsidev, DSI_CLK_CTRL,
2537  dsi->vm_timings.ddr_clk_always_on, 13, 13);
2538  }
2539 
2540  dsi->ulps_enabled = false;
2541 
2542  DSSDBG("CIO init done\n");
2543 
2544  return 0;
2545 
2546 err_tx_clk_esc_rst:
2547  REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
2548 err_cio_pwr_dom:
2549  dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2550 err_cio_pwr:
2551  if (dsi->ulps_enabled)
2552  dsi_cio_disable_lane_override(dsidev);
2553 err_scp_clk_dom:
2554  dsi_disable_scp_clk(dsidev);
2555  dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
2556  return r;
2557 }
2558 
2559 static void dsi_cio_uninit(struct platform_device *dsidev)
2560 {
2561  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2562 
2563  /* DDR_CLK_ALWAYS_ON */
2564  REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
2565 
2566  dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2567  dsi_disable_scp_clk(dsidev);
2568  dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
2569 }
2570 
2571 static void dsi_config_tx_fifo(struct platform_device *dsidev,
2572  enum fifo_size size1, enum fifo_size size2,
2573  enum fifo_size size3, enum fifo_size size4)
2574 {
2575  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2576  u32 r = 0;
2577  int add = 0;
2578  int i;
2579 
2580  dsi->vc[0].fifo_size = size1;
2581  dsi->vc[1].fifo_size = size2;
2582  dsi->vc[2].fifo_size = size3;
2583  dsi->vc[3].fifo_size = size4;
2584 
2585  for (i = 0; i < 4; i++) {
2586  u8 v;
2587  int size = dsi->vc[i].fifo_size;
2588 
2589  if (add + size > 4) {
2590  DSSERR("Illegal FIFO configuration\n");
2591  BUG();
2592  return;
2593  }
2594 
2595  v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
2596  r |= v << (8 * i);
2597  /*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */
2598  add += size;
2599  }
2600 
2601  dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r);
2602 }
2603 
2604 static void dsi_config_rx_fifo(struct platform_device *dsidev,
2605  enum fifo_size size1, enum fifo_size size2,
2606  enum fifo_size size3, enum fifo_size size4)
2607 {
2608  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2609  u32 r = 0;
2610  int add = 0;
2611  int i;
2612 
2613  dsi->vc[0].fifo_size = size1;
2614  dsi->vc[1].fifo_size = size2;
2615  dsi->vc[2].fifo_size = size3;
2616  dsi->vc[3].fifo_size = size4;
2617 
2618  for (i = 0; i < 4; i++) {
2619  u8 v;
2620  int size = dsi->vc[i].fifo_size;
2621 
2622  if (add + size > 4) {
2623  DSSERR("Illegal FIFO configuration\n");
2624  BUG();
2625  return;
2626  }
2627 
2628  v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
2629  r |= v << (8 * i);
2630  /*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */
2631  add += size;
2632  }
2633 
2634  dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r);
2635 }
2636 
2637 static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev)
2638 {
2639  u32 r;
2640 
2641  r = dsi_read_reg(dsidev, DSI_TIMING1);
2642  r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2643  dsi_write_reg(dsidev, DSI_TIMING1, r);
2644 
2645  if (wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0) != 0) {
2646  DSSERR("TX_STOP bit not going down\n");
2647  return -EIO;
2648  }
2649 
2650  return 0;
2651 }
2652 
2653 static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel)
2654 {
2655  return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0);
2656 }
2657 
2658 static void dsi_packet_sent_handler_vp(void *data, u32 mask)
2659 {
2660  struct dsi_packet_sent_handler_data *vp_data =
2661  (struct dsi_packet_sent_handler_data *) data;
2662  struct dsi_data *dsi = dsi_get_dsidrv_data(vp_data->dsidev);
2663  const int channel = dsi->update_channel;
2664  u8 bit = dsi->te_enabled ? 30 : 31;
2665 
2666  if (REG_GET(vp_data->dsidev, DSI_VC_TE(channel), bit, bit) == 0)
2667  complete(vp_data->completion);
2668 }
2669 
2670 static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
2671 {
2672  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2674  struct dsi_packet_sent_handler_data vp_data = { dsidev, &completion };
2675  int r = 0;
2676  u8 bit;
2677 
2678  bit = dsi->te_enabled ? 30 : 31;
2679 
2680  r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2681  &vp_data, DSI_VC_IRQ_PACKET_SENT);
2682  if (r)
2683  goto err0;
2684 
2685  /* Wait for completion only if TE_EN/TE_START is still set */
2686  if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) {
2688  msecs_to_jiffies(10)) == 0) {
2689  DSSERR("Failed to complete previous frame transfer\n");
2690  r = -EIO;
2691  goto err1;
2692  }
2693  }
2694 
2695  dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2696  &vp_data, DSI_VC_IRQ_PACKET_SENT);
2697 
2698  return 0;
2699 err1:
2700  dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2701  &vp_data, DSI_VC_IRQ_PACKET_SENT);
2702 err0:
2703  return r;
2704 }
2705 
2706 static void dsi_packet_sent_handler_l4(void *data, u32 mask)
2707 {
2708  struct dsi_packet_sent_handler_data *l4_data =
2709  (struct dsi_packet_sent_handler_data *) data;
2710  struct dsi_data *dsi = dsi_get_dsidrv_data(l4_data->dsidev);
2711  const int channel = dsi->update_channel;
2712 
2713  if (REG_GET(l4_data->dsidev, DSI_VC_CTRL(channel), 5, 5) == 0)
2714  complete(l4_data->completion);
2715 }
2716 
2717 static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel)
2718 {
2720  struct dsi_packet_sent_handler_data l4_data = { dsidev, &completion };
2721  int r = 0;
2722 
2723  r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2724  &l4_data, DSI_VC_IRQ_PACKET_SENT);
2725  if (r)
2726  goto err0;
2727 
2728  /* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
2729  if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) {
2731  msecs_to_jiffies(10)) == 0) {
2732  DSSERR("Failed to complete previous l4 transfer\n");
2733  r = -EIO;
2734  goto err1;
2735  }
2736  }
2737 
2738  dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2739  &l4_data, DSI_VC_IRQ_PACKET_SENT);
2740 
2741  return 0;
2742 err1:
2743  dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2744  &l4_data, DSI_VC_IRQ_PACKET_SENT);
2745 err0:
2746  return r;
2747 }
2748 
2749 static int dsi_sync_vc(struct platform_device *dsidev, int channel)
2750 {
2751  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2752 
2753  WARN_ON(!dsi_bus_is_locked(dsidev));
2754 
2755  WARN_ON(in_interrupt());
2756 
2757  if (!dsi_vc_is_enabled(dsidev, channel))
2758  return 0;
2759 
2760  switch (dsi->vc[channel].source) {
2761  case DSI_VC_SOURCE_VP:
2762  return dsi_sync_vc_vp(dsidev, channel);
2763  case DSI_VC_SOURCE_L4:
2764  return dsi_sync_vc_l4(dsidev, channel);
2765  default:
2766  BUG();
2767  return -EINVAL;
2768  }
2769 }
2770 
2771 static int dsi_vc_enable(struct platform_device *dsidev, int channel,
2772  bool enable)
2773 {
2774  DSSDBG("dsi_vc_enable channel %d, enable %d\n",
2775  channel, enable);
2776 
2777  enable = enable ? 1 : 0;
2778 
2779  REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0);
2780 
2781  if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel),
2782  0, enable) != enable) {
2783  DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
2784  return -EIO;
2785  }
2786 
2787  return 0;
2788 }
2789 
2790 static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
2791 {
2792  u32 r;
2793 
2794  DSSDBGF("%d", channel);
2795 
2796  r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
2797 
2798  if (FLD_GET(r, 15, 15)) /* VC_BUSY */
2799  DSSERR("VC(%d) busy when trying to configure it!\n",
2800  channel);
2801 
2802  r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
2803  r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */
2804  r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
2805  r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
2806  r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
2807  r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
2808  r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
2810  r = FLD_MOD(r, 3, 11, 10); /* OCP_WIDTH = 32 bit */
2811 
2812  r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
2813  r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
2814 
2815  dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
2816 }
2817 
2818 static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
2819  enum dsi_vc_source source)
2820 {
2821  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2822 
2823  if (dsi->vc[channel].source == source)
2824  return 0;
2825 
2826  DSSDBGF("%d", channel);
2827 
2828  dsi_sync_vc(dsidev, channel);
2829 
2830  dsi_vc_enable(dsidev, channel, 0);
2831 
2832  /* VC_BUSY */
2833  if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
2834  DSSERR("vc(%d) busy when trying to config for VP\n", channel);
2835  return -EIO;
2836  }
2837 
2838  /* SOURCE, 0 = L4, 1 = video port */
2839  REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), source, 1, 1);
2840 
2841  /* DCS_CMD_ENABLE */
2843  bool enable = source == DSI_VC_SOURCE_VP;
2844  REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 30, 30);
2845  }
2846 
2847  dsi_vc_enable(dsidev, channel, 1);
2848 
2849  dsi->vc[channel].source = source;
2850 
2851  return 0;
2852 }
2853 
2854 void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
2855  bool enable)
2856 {
2857  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2858  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2859 
2860  DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
2861 
2862  WARN_ON(!dsi_bus_is_locked(dsidev));
2863 
2864  dsi_vc_enable(dsidev, channel, 0);
2865  dsi_if_enable(dsidev, 0);
2866 
2867  REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9);
2868 
2869  dsi_vc_enable(dsidev, channel, 1);
2870  dsi_if_enable(dsidev, 1);
2871 
2872  dsi_force_tx_stop_mode_io(dsidev);
2873 
2874  /* start the DDR clock by sending a NULL packet */
2875  if (dsi->vm_timings.ddr_clk_always_on && enable)
2876  dsi_vc_send_null(dssdev, channel);
2877 }
2879 
2880 static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel)
2881 {
2882  while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2883  u32 val;
2884  val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2885  DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
2886  (val >> 0) & 0xff,
2887  (val >> 8) & 0xff,
2888  (val >> 16) & 0xff,
2889  (val >> 24) & 0xff);
2890  }
2891 }
2892 
2893 static void dsi_show_rx_ack_with_err(u16 err)
2894 {
2895  DSSERR("\tACK with ERROR (%#x):\n", err);
2896  if (err & (1 << 0))
2897  DSSERR("\t\tSoT Error\n");
2898  if (err & (1 << 1))
2899  DSSERR("\t\tSoT Sync Error\n");
2900  if (err & (1 << 2))
2901  DSSERR("\t\tEoT Sync Error\n");
2902  if (err & (1 << 3))
2903  DSSERR("\t\tEscape Mode Entry Command Error\n");
2904  if (err & (1 << 4))
2905  DSSERR("\t\tLP Transmit Sync Error\n");
2906  if (err & (1 << 5))
2907  DSSERR("\t\tHS Receive Timeout Error\n");
2908  if (err & (1 << 6))
2909  DSSERR("\t\tFalse Control Error\n");
2910  if (err & (1 << 7))
2911  DSSERR("\t\t(reserved7)\n");
2912  if (err & (1 << 8))
2913  DSSERR("\t\tECC Error, single-bit (corrected)\n");
2914  if (err & (1 << 9))
2915  DSSERR("\t\tECC Error, multi-bit (not corrected)\n");
2916  if (err & (1 << 10))
2917  DSSERR("\t\tChecksum Error\n");
2918  if (err & (1 << 11))
2919  DSSERR("\t\tData type not recognized\n");
2920  if (err & (1 << 12))
2921  DSSERR("\t\tInvalid VC ID\n");
2922  if (err & (1 << 13))
2923  DSSERR("\t\tInvalid Transmission Length\n");
2924  if (err & (1 << 14))
2925  DSSERR("\t\t(reserved14)\n");
2926  if (err & (1 << 15))
2927  DSSERR("\t\tDSI Protocol Violation\n");
2928 }
2929 
2930 static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
2931  int channel)
2932 {
2933  /* RX_FIFO_NOT_EMPTY */
2934  while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2935  u32 val;
2936  u8 dt;
2937  val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2938  DSSERR("\trawval %#08x\n", val);
2939  dt = FLD_GET(val, 5, 0);
2941  u16 err = FLD_GET(val, 23, 8);
2942  dsi_show_rx_ack_with_err(err);
2943  } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE) {
2944  DSSERR("\tDCS short response, 1 byte: %#x\n",
2945  FLD_GET(val, 23, 8));
2946  } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE) {
2947  DSSERR("\tDCS short response, 2 byte: %#x\n",
2948  FLD_GET(val, 23, 8));
2949  } else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) {
2950  DSSERR("\tDCS long response, len %d\n",
2951  FLD_GET(val, 23, 8));
2952  dsi_vc_flush_long_data(dsidev, channel);
2953  } else {
2954  DSSERR("\tunknown datatype 0x%02x\n", dt);
2955  }
2956  }
2957  return 0;
2958 }
2959 
2960 static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
2961 {
2962  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2963 
2964  if (dsi->debug_write || dsi->debug_read)
2965  DSSDBG("dsi_vc_send_bta %d\n", channel);
2966 
2967  WARN_ON(!dsi_bus_is_locked(dsidev));
2968 
2969  /* RX_FIFO_NOT_EMPTY */
2970  if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2971  DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
2972  dsi_vc_flush_receive_data(dsidev, channel);
2973  }
2974 
2975  REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
2976 
2977  /* flush posted write */
2978  dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
2979 
2980  return 0;
2981 }
2982 
2983 int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
2984 {
2985  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2987  int r = 0;
2988  u32 err;
2989 
2990  r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler,
2992  if (r)
2993  goto err0;
2994 
2995  r = dsi_register_isr(dsidev, dsi_completion_handler, &completion,
2997  if (r)
2998  goto err1;
2999 
3000  r = dsi_vc_send_bta(dsidev, channel);
3001  if (r)
3002  goto err2;
3003 
3005  msecs_to_jiffies(500)) == 0) {
3006  DSSERR("Failed to receive BTA\n");
3007  r = -EIO;
3008  goto err2;
3009  }
3010 
3011  err = dsi_get_errors(dsidev);
3012  if (err) {
3013  DSSERR("Error while sending BTA: %x\n", err);
3014  r = -EIO;
3015  goto err2;
3016  }
3017 err2:
3018  dsi_unregister_isr(dsidev, dsi_completion_handler, &completion,
3020 err1:
3021  dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler,
3023 err0:
3024  return r;
3025 }
3027 
3028 static inline void dsi_vc_write_long_header(struct platform_device *dsidev,
3029  int channel, u8 data_type, u16 len, u8 ecc)
3030 {
3031  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3032  u32 val;
3033  u8 data_id;
3034 
3035  WARN_ON(!dsi_bus_is_locked(dsidev));
3036 
3037  data_id = data_type | dsi->vc[channel].vc_id << 6;
3038 
3039  val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
3040  FLD_VAL(ecc, 31, 24);
3041 
3042  dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val);
3043 }
3044 
3045 static inline void dsi_vc_write_long_payload(struct platform_device *dsidev,
3046  int channel, u8 b1, u8 b2, u8 b3, u8 b4)
3047 {
3048  u32 val;
3049 
3050  val = b4 << 24 | b3 << 16 | b2 << 8 | b1 << 0;
3051 
3052 /* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
3053  b1, b2, b3, b4, val); */
3054 
3055  dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
3056 }
3057 
3058 static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
3059  u8 data_type, u8 *data, u16 len, u8 ecc)
3060 {
3061  /*u32 val; */
3062  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3063  int i;
3064  u8 *p;
3065  int r = 0;
3066  u8 b1, b2, b3, b4;
3067 
3068  if (dsi->debug_write)
3069  DSSDBG("dsi_vc_send_long, %d bytes\n", len);
3070 
3071  /* len + header */
3072  if (dsi->vc[channel].fifo_size * 32 * 4 < len + 4) {
3073  DSSERR("unable to send long packet: packet too long.\n");
3074  return -EINVAL;
3075  }
3076 
3077  dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
3078 
3079  dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc);
3080 
3081  p = data;
3082  for (i = 0; i < len >> 2; i++) {
3083  if (dsi->debug_write)
3084  DSSDBG("\tsending full packet %d\n", i);
3085 
3086  b1 = *p++;
3087  b2 = *p++;
3088  b3 = *p++;
3089  b4 = *p++;
3090 
3091  dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4);
3092  }
3093 
3094  i = len % 4;
3095  if (i) {
3096  b1 = 0; b2 = 0; b3 = 0;
3097 
3098  if (dsi->debug_write)
3099  DSSDBG("\tsending remainder bytes %d\n", i);
3100 
3101  switch (i) {
3102  case 3:
3103  b1 = *p++;
3104  b2 = *p++;
3105  b3 = *p++;
3106  break;
3107  case 2:
3108  b1 = *p++;
3109  b2 = *p++;
3110  break;
3111  case 1:
3112  b1 = *p++;
3113  break;
3114  }
3115 
3116  dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0);
3117  }
3118 
3119  return r;
3120 }
3121 
3122 static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
3123  u8 data_type, u16 data, u8 ecc)
3124 {
3125  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3126  u32 r;
3127  u8 data_id;
3128 
3129  WARN_ON(!dsi_bus_is_locked(dsidev));
3130 
3131  if (dsi->debug_write)
3132  DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
3133  channel,
3134  data_type, data & 0xff, (data >> 8) & 0xff);
3135 
3136  dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
3137 
3138  if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) {
3139  DSSERR("ERROR FIFO FULL, aborting transfer\n");
3140  return -EINVAL;
3141  }
3142 
3143  data_id = data_type | dsi->vc[channel].vc_id << 6;
3144 
3145  r = (data_id << 0) | (data << 8) | (ecc << 24);
3146 
3147  dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r);
3148 
3149  return 0;
3150 }
3151 
3152 int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
3153 {
3154  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3155 
3156  return dsi_vc_send_long(dsidev, channel, MIPI_DSI_NULL_PACKET, NULL,
3157  0, 0);
3158 }
3160 
3161 static int dsi_vc_write_nosync_common(struct platform_device *dsidev,
3162  int channel, u8 *data, int len, enum dss_dsi_content_type type)
3163 {
3164  int r;
3165 
3166  if (len == 0) {
3167  BUG_ON(type == DSS_DSI_CONTENT_DCS);
3168  r = dsi_vc_send_short(dsidev, channel,
3170  } else if (len == 1) {
3171  r = dsi_vc_send_short(dsidev, channel,
3172  type == DSS_DSI_CONTENT_GENERIC ?
3174  MIPI_DSI_DCS_SHORT_WRITE, data[0], 0);
3175  } else if (len == 2) {
3176  r = dsi_vc_send_short(dsidev, channel,
3177  type == DSS_DSI_CONTENT_GENERIC ?
3180  data[0] | (data[1] << 8), 0);
3181  } else {
3182  r = dsi_vc_send_long(dsidev, channel,
3183  type == DSS_DSI_CONTENT_GENERIC ?
3185  MIPI_DSI_DCS_LONG_WRITE, data, len, 0);
3186  }
3187 
3188  return r;
3189 }
3190 
3191 int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
3192  u8 *data, int len)
3193 {
3194  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3195 
3196  return dsi_vc_write_nosync_common(dsidev, channel, data, len,
3198 }
3200 
3201 int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
3202  u8 *data, int len)
3203 {
3204  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3205 
3206  return dsi_vc_write_nosync_common(dsidev, channel, data, len,
3208 }
3210 
3211 static int dsi_vc_write_common(struct omap_dss_device *dssdev, int channel,
3212  u8 *data, int len, enum dss_dsi_content_type type)
3213 {
3214  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3215  int r;
3216 
3217  r = dsi_vc_write_nosync_common(dsidev, channel, data, len, type);
3218  if (r)
3219  goto err;
3220 
3221  r = dsi_vc_send_bta_sync(dssdev, channel);
3222  if (r)
3223  goto err;
3224 
3225  /* RX_FIFO_NOT_EMPTY */
3226  if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
3227  DSSERR("rx fifo not empty after write, dumping data:\n");
3228  dsi_vc_flush_receive_data(dsidev, channel);
3229  r = -EIO;
3230  goto err;
3231  }
3232 
3233  return 0;
3234 err:
3235  DSSERR("dsi_vc_write_common(ch %d, cmd 0x%02x, len %d) failed\n",
3236  channel, data[0], len);
3237  return r;
3238 }
3239 
3240 int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
3241  int len)
3242 {
3243  return dsi_vc_write_common(dssdev, channel, data, len,
3245 }
3247 
3248 int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8 *data,
3249  int len)
3250 {
3251  return dsi_vc_write_common(dssdev, channel, data, len,
3253 }
3255 
3256 int dsi_vc_dcs_write_0(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd)
3257 {
3258  return dsi_vc_dcs_write(dssdev, channel, &dcs_cmd, 1);
3259 }
3261 
3262 int dsi_vc_generic_write_0(struct omap_dss_device *dssdev, int channel)
3263 {
3264  return dsi_vc_generic_write(dssdev, channel, NULL, 0);
3265 }
3267 
3268 int dsi_vc_dcs_write_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3269  u8 param)
3270 {
3271  u8 buf[2];
3272  buf[0] = dcs_cmd;
3273  buf[1] = param;
3274  return dsi_vc_dcs_write(dssdev, channel, buf, 2);
3275 }
3277 
3278 int dsi_vc_generic_write_1(struct omap_dss_device *dssdev, int channel,
3279  u8 param)
3280 {
3281  return dsi_vc_generic_write(dssdev, channel, &param, 1);
3282 }
3284 
3285 int dsi_vc_generic_write_2(struct omap_dss_device *dssdev, int channel,
3286  u8 param1, u8 param2)
3287 {
3288  u8 buf[2];
3289  buf[0] = param1;
3290  buf[1] = param2;
3291  return dsi_vc_generic_write(dssdev, channel, buf, 2);
3292 }
3294 
3295 static int dsi_vc_dcs_send_read_request(struct platform_device *dsidev,
3296  int channel, u8 dcs_cmd)
3297 {
3298  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3299  int r;
3300 
3301  if (dsi->debug_read)
3302  DSSDBG("dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x)\n",
3303  channel, dcs_cmd);
3304 
3305  r = dsi_vc_send_short(dsidev, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0);
3306  if (r) {
3307  DSSERR("dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x)"
3308  " failed\n", channel, dcs_cmd);
3309  return r;
3310  }
3311 
3312  return 0;
3313 }
3314 
3315 static int dsi_vc_generic_send_read_request(struct platform_device *dsidev,
3316  int channel, u8 *reqdata, int reqlen)
3317 {
3318  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3319  u16 data;
3320  u8 data_type;
3321  int r;
3322 
3323  if (dsi->debug_read)
3324  DSSDBG("dsi_vc_generic_send_read_request(ch %d, reqlen %d)\n",
3325  channel, reqlen);
3326 
3327  if (reqlen == 0) {
3329  data = 0;
3330  } else if (reqlen == 1) {
3332  data = reqdata[0];
3333  } else if (reqlen == 2) {
3335  data = reqdata[0] | (reqdata[1] << 8);
3336  } else {
3337  BUG();
3338  return -EINVAL;
3339  }
3340 
3341  r = dsi_vc_send_short(dsidev, channel, data_type, data, 0);
3342  if (r) {
3343  DSSERR("dsi_vc_generic_send_read_request(ch %d, reqlen %d)"
3344  " failed\n", channel, reqlen);
3345  return r;
3346  }
3347 
3348  return 0;
3349 }
3350 
3351 static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
3352  u8 *buf, int buflen, enum dss_dsi_content_type type)
3353 {
3354  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3355  u32 val;
3356  u8 dt;
3357  int r;
3358 
3359  /* RX_FIFO_NOT_EMPTY */
3360  if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) {
3361  DSSERR("RX fifo empty when trying to read.\n");
3362  r = -EIO;
3363  goto err;
3364  }
3365 
3366  val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
3367  if (dsi->debug_read)
3368  DSSDBG("\theader: %08x\n", val);
3369  dt = FLD_GET(val, 5, 0);
3371  u16 err = FLD_GET(val, 23, 8);
3372  dsi_show_rx_ack_with_err(err);
3373  r = -EIO;
3374  goto err;
3375 
3376  } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
3379  u8 data = FLD_GET(val, 15, 8);
3380  if (dsi->debug_read)
3381  DSSDBG("\t%s short response, 1 byte: %02x\n",
3382  type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
3383  "DCS", data);
3384 
3385  if (buflen < 1) {
3386  r = -EIO;
3387  goto err;
3388  }
3389 
3390  buf[0] = data;
3391 
3392  return 1;
3393  } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
3396  u16 data = FLD_GET(val, 23, 8);
3397  if (dsi->debug_read)
3398  DSSDBG("\t%s short response, 2 byte: %04x\n",
3399  type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
3400  "DCS", data);
3401 
3402  if (buflen < 2) {
3403  r = -EIO;
3404  goto err;
3405  }
3406 
3407  buf[0] = data & 0xff;
3408  buf[1] = (data >> 8) & 0xff;
3409 
3410  return 2;
3411  } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
3414  int w;
3415  int len = FLD_GET(val, 23, 8);
3416  if (dsi->debug_read)
3417  DSSDBG("\t%s long response, len %d\n",
3418  type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
3419  "DCS", len);
3420 
3421  if (len > buflen) {
3422  r = -EIO;
3423  goto err;
3424  }
3425 
3426  /* two byte checksum ends the packet, not included in len */
3427  for (w = 0; w < len + 2;) {
3428  int b;
3429  val = dsi_read_reg(dsidev,
3430  DSI_VC_SHORT_PACKET_HEADER(channel));
3431  if (dsi->debug_read)
3432  DSSDBG("\t\t%02x %02x %02x %02x\n",
3433  (val >> 0) & 0xff,
3434  (val >> 8) & 0xff,
3435  (val >> 16) & 0xff,
3436  (val >> 24) & 0xff);
3437 
3438  for (b = 0; b < 4; ++b) {
3439  if (w < len)
3440  buf[w] = (val >> (b * 8)) & 0xff;
3441  /* we discard the 2 byte checksum */
3442  ++w;
3443  }
3444  }
3445 
3446  return len;
3447  } else {
3448  DSSERR("\tunknown datatype 0x%02x\n", dt);
3449  r = -EIO;
3450  goto err;
3451  }
3452 
3453 err:
3454  DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
3455  type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
3456 
3457  return r;
3458 }
3459 
3460 int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3461  u8 *buf, int buflen)
3462 {
3463  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3464  int r;
3465 
3466  r = dsi_vc_dcs_send_read_request(dsidev, channel, dcs_cmd);
3467  if (r)
3468  goto err;
3469 
3470  r = dsi_vc_send_bta_sync(dssdev, channel);
3471  if (r)
3472  goto err;
3473 
3474  r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
3476  if (r < 0)
3477  goto err;
3478 
3479  if (r != buflen) {
3480  r = -EIO;
3481  goto err;
3482  }
3483 
3484  return 0;
3485 err:
3486  DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n", channel, dcs_cmd);
3487  return r;
3488 }
3490 
3491 static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
3492  u8 *reqdata, int reqlen, u8 *buf, int buflen)
3493 {
3494  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3495  int r;
3496 
3497  r = dsi_vc_generic_send_read_request(dsidev, channel, reqdata, reqlen);
3498  if (r)
3499  return r;
3500 
3501  r = dsi_vc_send_bta_sync(dssdev, channel);
3502  if (r)
3503  return r;
3504 
3505  r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
3507  if (r < 0)
3508  return r;
3509 
3510  if (r != buflen) {
3511  r = -EIO;
3512  return r;
3513  }
3514 
3515  return 0;
3516 }
3517 
3518 int dsi_vc_generic_read_0(struct omap_dss_device *dssdev, int channel, u8 *buf,
3519  int buflen)
3520 {
3521  int r;
3522 
3523  r = dsi_vc_generic_read(dssdev, channel, NULL, 0, buf, buflen);
3524  if (r) {
3525  DSSERR("dsi_vc_generic_read_0(ch %d) failed\n", channel);
3526  return r;
3527  }
3528 
3529  return 0;
3530 }
3532 
3533 int dsi_vc_generic_read_1(struct omap_dss_device *dssdev, int channel, u8 param,
3534  u8 *buf, int buflen)
3535 {
3536  int r;
3537 
3538  r = dsi_vc_generic_read(dssdev, channel, &param, 1, buf, buflen);
3539  if (r) {
3540  DSSERR("dsi_vc_generic_read_1(ch %d) failed\n", channel);
3541  return r;
3542  }
3543 
3544  return 0;
3545 }
3547 
3548 int dsi_vc_generic_read_2(struct omap_dss_device *dssdev, int channel,
3549  u8 param1, u8 param2, u8 *buf, int buflen)
3550 {
3551  int r;
3552  u8 reqdata[2];
3553 
3554  reqdata[0] = param1;
3555  reqdata[1] = param2;
3556 
3557  r = dsi_vc_generic_read(dssdev, channel, reqdata, 2, buf, buflen);
3558  if (r) {
3559  DSSERR("dsi_vc_generic_read_2(ch %d) failed\n", channel);
3560  return r;
3561  }
3562 
3563  return 0;
3564 }
3566 
3567 int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
3568  u16 len)
3569 {
3570  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3571 
3572  return dsi_vc_send_short(dsidev, channel,
3574 }
3576 
3577 static int dsi_enter_ulps(struct platform_device *dsidev)
3578 {
3579  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3581  int r, i;
3582  unsigned mask;
3583 
3584  DSSDBGF();
3585 
3586  WARN_ON(!dsi_bus_is_locked(dsidev));
3587 
3588  WARN_ON(dsi->ulps_enabled);
3589 
3590  if (dsi->ulps_enabled)
3591  return 0;
3592 
3593  /* DDR_CLK_ALWAYS_ON */
3594  if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) {
3595  dsi_if_enable(dsidev, 0);
3596  REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
3597  dsi_if_enable(dsidev, 1);
3598  }
3599 
3600  dsi_sync_vc(dsidev, 0);
3601  dsi_sync_vc(dsidev, 1);
3602  dsi_sync_vc(dsidev, 2);
3603  dsi_sync_vc(dsidev, 3);
3604 
3605  dsi_force_tx_stop_mode_io(dsidev);
3606 
3607  dsi_vc_enable(dsidev, 0, false);
3608  dsi_vc_enable(dsidev, 1, false);
3609  dsi_vc_enable(dsidev, 2, false);
3610  dsi_vc_enable(dsidev, 3, false);
3611 
3612  if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */
3613  DSSERR("HS busy when enabling ULPS\n");
3614  return -EIO;
3615  }
3616 
3617  if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */
3618  DSSERR("LP busy when enabling ULPS\n");
3619  return -EIO;
3620  }
3621 
3622  r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion,
3624  if (r)
3625  return r;
3626 
3627  mask = 0;
3628 
3629  for (i = 0; i < dsi->num_lanes_supported; ++i) {
3630  if (dsi->lanes[i].function == DSI_LANE_UNUSED)
3631  continue;
3632  mask |= 1 << i;
3633  }
3634  /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
3635  /* LANEx_ULPS_SIG2 */
3636  REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, mask, 9, 5);
3637 
3638  /* flush posted write and wait for SCP interface to finish the write */
3639  dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
3640 
3642  msecs_to_jiffies(1000)) == 0) {
3643  DSSERR("ULPS enable timeout\n");
3644  r = -EIO;
3645  goto err;
3646  }
3647 
3648  dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3650 
3651  /* Reset LANEx_ULPS_SIG2 */
3652  REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, 0, 9, 5);
3653 
3654  /* flush posted write and wait for SCP interface to finish the write */
3655  dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
3656 
3657  dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
3658 
3659  dsi_if_enable(dsidev, false);
3660 
3661  dsi->ulps_enabled = true;
3662 
3663  return 0;
3664 
3665 err:
3666  dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3668  return r;
3669 }
3670 
3671 static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
3672  unsigned ticks, bool x4, bool x16)
3673 {
3674  unsigned long fck;
3675  unsigned long total_ticks;
3676  u32 r;
3677 
3678  BUG_ON(ticks > 0x1fff);
3679 
3680  /* ticks in DSI_FCK */
3681  fck = dsi_fclk_rate(dsidev);
3682 
3683  r = dsi_read_reg(dsidev, DSI_TIMING2);
3684  r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
3685  r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */
3686  r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */
3687  r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
3688  dsi_write_reg(dsidev, DSI_TIMING2, r);
3689 
3690  total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3691 
3692  DSSDBG("LP_RX_TO %lu ticks (%#x%s%s) = %lu ns\n",
3693  total_ticks,
3694  ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3695  (total_ticks * 1000) / (fck / 1000 / 1000));
3696 }
3697 
3698 static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
3699  bool x8, bool x16)
3700 {
3701  unsigned long fck;
3702  unsigned long total_ticks;
3703  u32 r;
3704 
3705  BUG_ON(ticks > 0x1fff);
3706 
3707  /* ticks in DSI_FCK */
3708  fck = dsi_fclk_rate(dsidev);
3709 
3710  r = dsi_read_reg(dsidev, DSI_TIMING1);
3711  r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
3712  r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */
3713  r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */
3714  r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
3715  dsi_write_reg(dsidev, DSI_TIMING1, r);
3716 
3717  total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1);
3718 
3719  DSSDBG("TA_TO %lu ticks (%#x%s%s) = %lu ns\n",
3720  total_ticks,
3721  ticks, x8 ? " x8" : "", x16 ? " x16" : "",
3722  (total_ticks * 1000) / (fck / 1000 / 1000));
3723 }
3724 
3725 static void dsi_set_stop_state_counter(struct platform_device *dsidev,
3726  unsigned ticks, bool x4, bool x16)
3727 {
3728  unsigned long fck;
3729  unsigned long total_ticks;
3730  u32 r;
3731 
3732  BUG_ON(ticks > 0x1fff);
3733 
3734  /* ticks in DSI_FCK */
3735  fck = dsi_fclk_rate(dsidev);
3736 
3737  r = dsi_read_reg(dsidev, DSI_TIMING1);
3738  r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
3739  r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */
3740  r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */
3741  r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
3742  dsi_write_reg(dsidev, DSI_TIMING1, r);
3743 
3744  total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3745 
3746  DSSDBG("STOP_STATE_COUNTER %lu ticks (%#x%s%s) = %lu ns\n",
3747  total_ticks,
3748  ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3749  (total_ticks * 1000) / (fck / 1000 / 1000));
3750 }
3751 
3752 static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
3753  unsigned ticks, bool x4, bool x16)
3754 {
3755  unsigned long fck;
3756  unsigned long total_ticks;
3757  u32 r;
3758 
3759  BUG_ON(ticks > 0x1fff);
3760 
3761  /* ticks in TxByteClkHS */
3762  fck = dsi_get_txbyteclkhs(dsidev);
3763 
3764  r = dsi_read_reg(dsidev, DSI_TIMING2);
3765  r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
3766  r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */
3767  r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */
3768  r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
3769  dsi_write_reg(dsidev, DSI_TIMING2, r);
3770 
3771  total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3772 
3773  DSSDBG("HS_TX_TO %lu ticks (%#x%s%s) = %lu ns\n",
3774  total_ticks,
3775  ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3776  (total_ticks * 1000) / (fck / 1000 / 1000));
3777 }
3778 
3779 static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
3780 {
3781  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3782  int num_line_buffers;
3783 
3784  if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3785  int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3786  unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
3787  struct omap_video_timings *timings = &dsi->timings;
3788  /*
3789  * Don't use line buffers if width is greater than the video
3790  * port's line buffer size
3791  */
3792  if (line_buf_size <= timings->x_res * bpp / 8)
3793  num_line_buffers = 0;
3794  else
3795  num_line_buffers = 2;
3796  } else {
3797  /* Use maximum number of line buffers in command mode */
3798  num_line_buffers = 2;
3799  }
3800 
3801  /* LINE_BUFFER */
3802  REG_FLD_MOD(dsidev, DSI_CTRL, num_line_buffers, 13, 12);
3803 }
3804 
3805 static void dsi_config_vp_sync_events(struct platform_device *dsidev)
3806 {
3807  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3808  bool vsync_end = dsi->vm_timings.vp_vsync_end;
3809  bool hsync_end = dsi->vm_timings.vp_hsync_end;
3810  u32 r;
3811 
3812  r = dsi_read_reg(dsidev, DSI_CTRL);
3813  r = FLD_MOD(r, 1, 9, 9); /* VP_DE_POL */
3814  r = FLD_MOD(r, 1, 10, 10); /* VP_HSYNC_POL */
3815  r = FLD_MOD(r, 1, 11, 11); /* VP_VSYNC_POL */
3816  r = FLD_MOD(r, 1, 15, 15); /* VP_VSYNC_START */
3817  r = FLD_MOD(r, vsync_end, 16, 16); /* VP_VSYNC_END */
3818  r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */
3819  r = FLD_MOD(r, hsync_end, 18, 18); /* VP_HSYNC_END */
3820  dsi_write_reg(dsidev, DSI_CTRL, r);
3821 }
3822 
3823 static void dsi_config_blanking_modes(struct platform_device *dsidev)
3824 {
3825  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3826  int blanking_mode = dsi->vm_timings.blanking_mode;
3827  int hfp_blanking_mode = dsi->vm_timings.hfp_blanking_mode;
3828  int hbp_blanking_mode = dsi->vm_timings.hbp_blanking_mode;
3829  int hsa_blanking_mode = dsi->vm_timings.hsa_blanking_mode;
3830  u32 r;
3831 
3832  /*
3833  * 0 = TX FIFO packets sent or LPS in corresponding blanking periods
3834  * 1 = Long blanking packets are sent in corresponding blanking periods
3835  */
3836  r = dsi_read_reg(dsidev, DSI_CTRL);
3837  r = FLD_MOD(r, blanking_mode, 20, 20); /* BLANKING_MODE */
3838  r = FLD_MOD(r, hfp_blanking_mode, 21, 21); /* HFP_BLANKING */
3839  r = FLD_MOD(r, hbp_blanking_mode, 22, 22); /* HBP_BLANKING */
3840  r = FLD_MOD(r, hsa_blanking_mode, 23, 23); /* HSA_BLANKING */
3841  dsi_write_reg(dsidev, DSI_CTRL, r);
3842 }
3843 
3844 /*
3845  * According to section 'HS Command Mode Interleaving' in OMAP TRM, Scenario 3
3846  * results in maximum transition time for data and clock lanes to enter and
3847  * exit HS mode. Hence, this is the scenario where the least amount of command
3848  * mode data can be interleaved. We program the minimum amount of TXBYTECLKHS
3849  * clock cycles that can be used to interleave command mode data in HS so that
3850  * all scenarios are satisfied.
3851  */
3852 static int dsi_compute_interleave_hs(int blank, bool ddr_alwon, int enter_hs,
3853  int exit_hs, int exiths_clk, int ddr_pre, int ddr_post)
3854 {
3855  int transition;
3856 
3857  /*
3858  * If DDR_CLK_ALWAYS_ON is set, we need to consider HS mode transition
3859  * time of data lanes only, if it isn't set, we need to consider HS
3860  * transition time of both data and clock lanes. HS transition time
3861  * of Scenario 3 is considered.
3862  */
3863  if (ddr_alwon) {
3864  transition = enter_hs + exit_hs + max(enter_hs, 2) + 1;
3865  } else {
3866  int trans1, trans2;
3867  trans1 = ddr_pre + enter_hs + exit_hs + max(enter_hs, 2) + 1;
3868  trans2 = ddr_pre + enter_hs + exiths_clk + ddr_post + ddr_pre +
3869  enter_hs + 1;
3870  transition = max(trans1, trans2);
3871  }
3872 
3873  return blank > transition ? blank - transition : 0;
3874 }
3875 
3876 /*
3877  * According to section 'LP Command Mode Interleaving' in OMAP TRM, Scenario 1
3878  * results in maximum transition time for data lanes to enter and exit LP mode.
3879  * Hence, this is the scenario where the least amount of command mode data can
3880  * be interleaved. We program the minimum amount of bytes that can be
3881  * interleaved in LP so that all scenarios are satisfied.
3882  */
3883 static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
3884  int lp_clk_div, int tdsi_fclk)
3885 {
3886  int trans_lp; /* time required for a LP transition, in TXBYTECLKHS */
3887  int tlp_avail; /* time left for interleaving commands, in CLKIN4DDR */
3888  int ttxclkesc; /* period of LP transmit escape clock, in CLKIN4DDR */
3889  int thsbyte_clk = 16; /* Period of TXBYTECLKHS clock, in CLKIN4DDR */
3890  int lp_inter; /* cmd mode data that can be interleaved, in bytes */
3891 
3892  /* maximum LP transition time according to Scenario 1 */
3893  trans_lp = exit_hs + max(enter_hs, 2) + 1;
3894 
3895  /* CLKIN4DDR = 16 * TXBYTECLKHS */
3896  tlp_avail = thsbyte_clk * (blank - trans_lp);
3897 
3898  ttxclkesc = tdsi_fclk * lp_clk_div;
3899 
3900  lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc -
3901  26) / 16;
3902 
3903  return max(lp_inter, 0);
3904 }
3905 
3906 static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
3907 {
3908  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3909  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3910  int blanking_mode;
3911  int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
3912  int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div;
3913  int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
3914  int tclk_trail, ths_exit, exiths_clk;
3915  bool ddr_alwon;
3916  struct omap_video_timings *timings = &dsi->timings;
3917  int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3918  int ndl = dsi->num_lanes_used - 1;
3919  int dsi_fclk_hsdiv = dssdev->clocks.dsi.regm_dsi + 1;
3920  int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
3921  int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
3922  int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
3923  int bl_interleave_hs = 0, bl_interleave_lp = 0;
3924  u32 r;
3925 
3926  r = dsi_read_reg(dsidev, DSI_CTRL);
3927  blanking_mode = FLD_GET(r, 20, 20);
3928  hfp_blanking_mode = FLD_GET(r, 21, 21);
3929  hbp_blanking_mode = FLD_GET(r, 22, 22);
3930  hsa_blanking_mode = FLD_GET(r, 23, 23);
3931 
3932  r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
3933  hbp = FLD_GET(r, 11, 0);
3934  hfp = FLD_GET(r, 23, 12);
3935  hsa = FLD_GET(r, 31, 24);
3936 
3937  r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
3938  ddr_clk_post = FLD_GET(r, 7, 0);
3939  ddr_clk_pre = FLD_GET(r, 15, 8);
3940 
3941  r = dsi_read_reg(dsidev, DSI_VM_TIMING7);
3942  exit_hs_mode_lat = FLD_GET(r, 15, 0);
3943  enter_hs_mode_lat = FLD_GET(r, 31, 16);
3944 
3945  r = dsi_read_reg(dsidev, DSI_CLK_CTRL);
3946  lp_clk_div = FLD_GET(r, 12, 0);
3947  ddr_alwon = FLD_GET(r, 13, 13);
3948 
3949  r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
3950  ths_exit = FLD_GET(r, 7, 0);
3951 
3952  r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
3953  tclk_trail = FLD_GET(r, 15, 8);
3954 
3955  exiths_clk = ths_exit + tclk_trail;
3956 
3957  width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
3958  bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);
3959 
3960  if (!hsa_blanking_mode) {
3961  hsa_interleave_hs = dsi_compute_interleave_hs(hsa, ddr_alwon,
3962  enter_hs_mode_lat, exit_hs_mode_lat,
3963  exiths_clk, ddr_clk_pre, ddr_clk_post);
3964  hsa_interleave_lp = dsi_compute_interleave_lp(hsa,
3965  enter_hs_mode_lat, exit_hs_mode_lat,
3966  lp_clk_div, dsi_fclk_hsdiv);
3967  }
3968 
3969  if (!hfp_blanking_mode) {
3970  hfp_interleave_hs = dsi_compute_interleave_hs(hfp, ddr_alwon,
3971  enter_hs_mode_lat, exit_hs_mode_lat,
3972  exiths_clk, ddr_clk_pre, ddr_clk_post);
3973  hfp_interleave_lp = dsi_compute_interleave_lp(hfp,
3974  enter_hs_mode_lat, exit_hs_mode_lat,
3975  lp_clk_div, dsi_fclk_hsdiv);
3976  }
3977 
3978  if (!hbp_blanking_mode) {
3979  hbp_interleave_hs = dsi_compute_interleave_hs(hbp, ddr_alwon,
3980  enter_hs_mode_lat, exit_hs_mode_lat,
3981  exiths_clk, ddr_clk_pre, ddr_clk_post);
3982 
3983  hbp_interleave_lp = dsi_compute_interleave_lp(hbp,
3984  enter_hs_mode_lat, exit_hs_mode_lat,
3985  lp_clk_div, dsi_fclk_hsdiv);
3986  }
3987 
3988  if (!blanking_mode) {
3989  bl_interleave_hs = dsi_compute_interleave_hs(bllp, ddr_alwon,
3990  enter_hs_mode_lat, exit_hs_mode_lat,
3991  exiths_clk, ddr_clk_pre, ddr_clk_post);
3992 
3993  bl_interleave_lp = dsi_compute_interleave_lp(bllp,
3994  enter_hs_mode_lat, exit_hs_mode_lat,
3995  lp_clk_div, dsi_fclk_hsdiv);
3996  }
3997 
3998  DSSDBG("DSI HS interleaving(TXBYTECLKHS) HSA %d, HFP %d, HBP %d, BLLP %d\n",
3999  hsa_interleave_hs, hfp_interleave_hs, hbp_interleave_hs,
4000  bl_interleave_hs);
4001 
4002  DSSDBG("DSI LP interleaving(bytes) HSA %d, HFP %d, HBP %d, BLLP %d\n",
4003  hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp,
4004  bl_interleave_lp);
4005 
4006  r = dsi_read_reg(dsidev, DSI_VM_TIMING4);
4007  r = FLD_MOD(r, hsa_interleave_hs, 23, 16);
4008  r = FLD_MOD(r, hfp_interleave_hs, 15, 8);
4009  r = FLD_MOD(r, hbp_interleave_hs, 7, 0);
4010  dsi_write_reg(dsidev, DSI_VM_TIMING4, r);
4011 
4012  r = dsi_read_reg(dsidev, DSI_VM_TIMING5);
4013  r = FLD_MOD(r, hsa_interleave_lp, 23, 16);
4014  r = FLD_MOD(r, hfp_interleave_lp, 15, 8);
4015  r = FLD_MOD(r, hbp_interleave_lp, 7, 0);
4016  dsi_write_reg(dsidev, DSI_VM_TIMING5, r);
4017 
4018  r = dsi_read_reg(dsidev, DSI_VM_TIMING6);
4019  r = FLD_MOD(r, bl_interleave_hs, 31, 15);
4020  r = FLD_MOD(r, bl_interleave_lp, 16, 0);
4021  dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
4022 }
4023 
4024 static int dsi_proto_config(struct omap_dss_device *dssdev)
4025 {
4026  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4027  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4028  u32 r;
4029  int buswidth = 0;
4030 
4031  dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32,
4035 
4036  dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32,
4040 
4041  /* XXX what values for the timeouts? */
4042  dsi_set_stop_state_counter(dsidev, 0x1000, false, false);
4043  dsi_set_ta_timeout(dsidev, 0x1fff, true, true);
4044  dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true);
4045  dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true);
4046 
4047  switch (dsi_get_pixel_size(dsi->pix_fmt)) {
4048  case 16:
4049  buswidth = 0;
4050  break;
4051  case 18:
4052  buswidth = 1;
4053  break;
4054  case 24:
4055  buswidth = 2;
4056  break;
4057  default:
4058  BUG();
4059  return -EINVAL;
4060  }
4061 
4062  r = dsi_read_reg(dsidev, DSI_CTRL);
4063  r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
4064  r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
4065  r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
4066  r = FLD_MOD(r, 1, 4, 4); /* VP_CLK_RATIO, always 1, see errata*/
4067  r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
4068  r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */
4069  r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
4070  r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
4072  r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
4073  /* DCS_CMD_CODE, 1=start, 0=continue */
4074  r = FLD_MOD(r, 0, 25, 25);
4075  }
4076 
4077  dsi_write_reg(dsidev, DSI_CTRL, r);
4078 
4079  dsi_config_vp_num_line_buffers(dsidev);
4080 
4081  if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
4082  dsi_config_vp_sync_events(dsidev);
4083  dsi_config_blanking_modes(dsidev);
4084  dsi_config_cmd_mode_interleaving(dssdev);
4085  }
4086 
4087  dsi_vc_initial_config(dsidev, 0);
4088  dsi_vc_initial_config(dsidev, 1);
4089  dsi_vc_initial_config(dsidev, 2);
4090  dsi_vc_initial_config(dsidev, 3);
4091 
4092  return 0;
4093 }
4094 
4095 static void dsi_proto_timings(struct platform_device *dsidev)
4096 {
4097  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4098  unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
4099  unsigned tclk_pre, tclk_post;
4100  unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
4101  unsigned ths_trail, ths_exit;
4102  unsigned ddr_clk_pre, ddr_clk_post;
4103  unsigned enter_hs_mode_lat, exit_hs_mode_lat;
4104  unsigned ths_eot;
4105  int ndl = dsi->num_lanes_used - 1;
4106  u32 r;
4107 
4108  r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
4109  ths_prepare = FLD_GET(r, 31, 24);
4110  ths_prepare_ths_zero = FLD_GET(r, 23, 16);
4111  ths_zero = ths_prepare_ths_zero - ths_prepare;
4112  ths_trail = FLD_GET(r, 15, 8);
4113  ths_exit = FLD_GET(r, 7, 0);
4114 
4115  r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
4116  tlpx = FLD_GET(r, 20, 16) * 2;
4117  tclk_trail = FLD_GET(r, 15, 8);
4118  tclk_zero = FLD_GET(r, 7, 0);
4119 
4120  r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
4121  tclk_prepare = FLD_GET(r, 7, 0);
4122 
4123  /* min 8*UI */
4124  tclk_pre = 20;
4125  /* min 60ns + 52*UI */
4126  tclk_post = ns2ddr(dsidev, 60) + 26;
4127 
4128  ths_eot = DIV_ROUND_UP(4, ndl);
4129 
4130  ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
4131  4);
4132  ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot;
4133 
4134  BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
4135  BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
4136 
4137  r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
4138  r = FLD_MOD(r, ddr_clk_pre, 15, 8);
4139  r = FLD_MOD(r, ddr_clk_post, 7, 0);
4140  dsi_write_reg(dsidev, DSI_CLK_TIMING, r);
4141 
4142  DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
4143  ddr_clk_pre,
4144  ddr_clk_post);
4145 
4146  enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) +
4147  DIV_ROUND_UP(ths_prepare, 4) +
4148  DIV_ROUND_UP(ths_zero + 3, 4);
4149 
4150  exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot;
4151 
4152  r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
4153  FLD_VAL(exit_hs_mode_lat, 15, 0);
4154  dsi_write_reg(dsidev, DSI_VM_TIMING7, r);
4155 
4156  DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
4157  enter_hs_mode_lat, exit_hs_mode_lat);
4158 
4159  if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
4160  /* TODO: Implement a video mode check_timings function */
4161  int hsa = dsi->vm_timings.hsa;
4162  int hfp = dsi->vm_timings.hfp;
4163  int hbp = dsi->vm_timings.hbp;
4164  int vsa = dsi->vm_timings.vsa;
4165  int vfp = dsi->vm_timings.vfp;
4166  int vbp = dsi->vm_timings.vbp;
4167  int window_sync = dsi->vm_timings.window_sync;
4168  bool hsync_end = dsi->vm_timings.vp_hsync_end;
4169  struct omap_video_timings *timings = &dsi->timings;
4170  int bpp = dsi_get_pixel_size(dsi->pix_fmt);
4171  int tl, t_he, width_bytes;
4172 
4173  t_he = hsync_end ?
4174  ((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0;
4175 
4176  width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
4177 
4178  /* TL = t_HS + HSA + t_HE + HFP + ceil((WC + 6) / NDL) + HBP */
4179  tl = DIV_ROUND_UP(4, ndl) + (hsync_end ? hsa : 0) + t_he + hfp +
4180  DIV_ROUND_UP(width_bytes + 6, ndl) + hbp;
4181 
4182  DSSDBG("HBP: %d, HFP: %d, HSA: %d, TL: %d TXBYTECLKHS\n", hbp,
4183  hfp, hsync_end ? hsa : 0, tl);
4184  DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp,
4185  vsa, timings->y_res);
4186 
4187  r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
4188  r = FLD_MOD(r, hbp, 11, 0); /* HBP */
4189  r = FLD_MOD(r, hfp, 23, 12); /* HFP */
4190  r = FLD_MOD(r, hsync_end ? hsa : 0, 31, 24); /* HSA */
4191  dsi_write_reg(dsidev, DSI_VM_TIMING1, r);
4192 
4193  r = dsi_read_reg(dsidev, DSI_VM_TIMING2);
4194  r = FLD_MOD(r, vbp, 7, 0); /* VBP */
4195  r = FLD_MOD(r, vfp, 15, 8); /* VFP */
4196  r = FLD_MOD(r, vsa, 23, 16); /* VSA */
4197  r = FLD_MOD(r, window_sync, 27, 24); /* WINDOW_SYNC */
4198  dsi_write_reg(dsidev, DSI_VM_TIMING2, r);
4199 
4200  r = dsi_read_reg(dsidev, DSI_VM_TIMING3);
4201  r = FLD_MOD(r, timings->y_res, 14, 0); /* VACT */
4202  r = FLD_MOD(r, tl, 31, 16); /* TL */
4203  dsi_write_reg(dsidev, DSI_VM_TIMING3, r);
4204  }
4205 }
4206 
4208  const struct omap_dsi_pin_config *pin_cfg)
4209 {
4210  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4211  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4212  int num_pins;
4213  const int *pins;
4214  struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
4215  int num_lanes;
4216  int i;
4217 
4218  static const enum dsi_lane_function functions[] = {
4219  DSI_LANE_CLK,
4224  };
4225 
4226  num_pins = pin_cfg->num_pins;
4227  pins = pin_cfg->pins;
4228 
4229  if (num_pins < 4 || num_pins > dsi->num_lanes_supported * 2
4230  || num_pins % 2 != 0)
4231  return -EINVAL;
4232 
4233  for (i = 0; i < DSI_MAX_NR_LANES; ++i)
4234  lanes[i].function = DSI_LANE_UNUSED;
4235 
4236  num_lanes = 0;
4237 
4238  for (i = 0; i < num_pins; i += 2) {
4239  u8 lane, pol;
4240  int dx, dy;
4241 
4242  dx = pins[i];
4243  dy = pins[i + 1];
4244 
4245  if (dx < 0 || dx >= dsi->num_lanes_supported * 2)
4246  return -EINVAL;
4247 
4248  if (dy < 0 || dy >= dsi->num_lanes_supported * 2)
4249  return -EINVAL;
4250 
4251  if (dx & 1) {
4252  if (dy != dx - 1)
4253  return -EINVAL;
4254  pol = 1;
4255  } else {
4256  if (dy != dx + 1)
4257  return -EINVAL;
4258  pol = 0;
4259  }
4260 
4261  lane = dx / 2;
4262 
4263  lanes[lane].function = functions[i / 2];
4264  lanes[lane].polarity = pol;
4265  num_lanes++;
4266  }
4267 
4268  memcpy(dsi->lanes, lanes, sizeof(dsi->lanes));
4269  dsi->num_lanes_used = num_lanes;
4270 
4271  return 0;
4272 }
4274 
4276  unsigned long ddr_clk, unsigned long lp_clk)
4277 {
4278  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4279  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4280  struct dsi_clock_info cinfo;
4281  struct dispc_clock_info dispc_cinfo;
4282  unsigned lp_clk_div;
4283  unsigned long dsi_fclk;
4284  int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
4285  unsigned long pck;
4286  int r;
4287 
4288  DSSDBGF("ddr_clk %lu, lp_clk %lu", ddr_clk, lp_clk);
4289 
4290  mutex_lock(&dsi->lock);
4291 
4292  /* Calculate PLL output clock */
4293  r = dsi_pll_calc_ddrfreq(dsidev, ddr_clk * 4, &cinfo);
4294  if (r)
4295  goto err;
4296 
4297  /* Calculate PLL's DSI clock */
4298  dsi_pll_calc_dsi_fck(dsidev, &cinfo);
4299 
4300  /* Calculate PLL's DISPC clock and pck & lck divs */
4301  pck = cinfo.clkin4ddr / 16 * (dsi->num_lanes_used - 1) * 8 / bpp;
4302  DSSDBG("finding dispc dividers for pck %lu\n", pck);
4303  r = dsi_pll_calc_dispc_fck(dsidev, pck, &cinfo, &dispc_cinfo);
4304  if (r)
4305  goto err;
4306 
4307  /* Calculate LP clock */
4308  dsi_fclk = cinfo.dsi_pll_hsdiv_dsi_clk;
4309  lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk * 2);
4310 
4311  dssdev->clocks.dsi.regn = cinfo.regn;
4312  dssdev->clocks.dsi.regm = cinfo.regm;
4313  dssdev->clocks.dsi.regm_dispc = cinfo.regm_dispc;
4314  dssdev->clocks.dsi.regm_dsi = cinfo.regm_dsi;
4315 
4316  dssdev->clocks.dsi.lp_clk_div = lp_clk_div;
4317 
4318  dssdev->clocks.dispc.channel.lck_div = dispc_cinfo.lck_div;
4319  dssdev->clocks.dispc.channel.pck_div = dispc_cinfo.pck_div;
4320 
4321  dssdev->clocks.dispc.dispc_fclk_src = OMAP_DSS_CLK_SRC_FCK;
4322 
4323  dssdev->clocks.dispc.channel.lcd_clk_src =
4324  dsi->module_id == 0 ?
4327 
4328  dssdev->clocks.dsi.dsi_fclk_src =
4329  dsi->module_id == 0 ?
4332 
4333  mutex_unlock(&dsi->lock);
4334  return 0;
4335 err:
4336  mutex_unlock(&dsi->lock);
4337  return r;
4338 }
4340 
4341 int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
4342 {
4343  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4344  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4345  struct omap_overlay_manager *mgr = dssdev->output->manager;
4346  int bpp = dsi_get_pixel_size(dsi->pix_fmt);
4347  u8 data_type;
4348  u16 word_count;
4349  int r;
4350 
4351  if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
4352  switch (dsi->pix_fmt) {
4354  data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
4355  break;
4357  data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
4358  break;
4360  data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
4361  break;
4363  data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
4364  break;
4365  default:
4366  BUG();
4367  return -EINVAL;
4368  };
4369 
4370  dsi_if_enable(dsidev, false);
4371  dsi_vc_enable(dsidev, channel, false);
4372 
4373  /* MODE, 1 = video mode */
4374  REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
4375 
4376  word_count = DIV_ROUND_UP(dsi->timings.x_res * bpp, 8);
4377 
4378  dsi_vc_write_long_header(dsidev, channel, data_type,
4379  word_count, 0);
4380 
4381  dsi_vc_enable(dsidev, channel, true);
4382  dsi_if_enable(dsidev, true);
4383  }
4384 
4385  r = dss_mgr_enable(mgr);
4386  if (r) {
4387  if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
4388  dsi_if_enable(dsidev, false);
4389  dsi_vc_enable(dsidev, channel, false);
4390  }
4391 
4392  return r;
4393  }
4394 
4395  return 0;
4396 }
4398 
4399 void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
4400 {
4401  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4402  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4403  struct omap_overlay_manager *mgr = dssdev->output->manager;
4404 
4405  if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
4406  dsi_if_enable(dsidev, false);
4407  dsi_vc_enable(dsidev, channel, false);
4408 
4409  /* MODE, 0 = command mode */
4410  REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 4, 4);
4411 
4412  dsi_vc_enable(dsidev, channel, true);
4413  dsi_if_enable(dsidev, true);
4414  }
4415 
4416  dss_mgr_disable(mgr);
4417 }
4419 
4420 static void dsi_update_screen_dispc(struct omap_dss_device *dssdev)
4421 {
4422  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4423  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4424  struct omap_overlay_manager *mgr = dssdev->output->manager;
4425  unsigned bytespp;
4426  unsigned bytespl;
4427  unsigned bytespf;
4428  unsigned total_len;
4429  unsigned packet_payload;
4430  unsigned packet_len;
4431  u32 l;
4432  int r;
4433  const unsigned channel = dsi->update_channel;
4434  const unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
4435  u16 w = dsi->timings.x_res;
4436  u16 h = dsi->timings.y_res;
4437 
4438  DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
4439 
4440  dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_VP);
4441 
4442  bytespp = dsi_get_pixel_size(dsi->pix_fmt) / 8;
4443  bytespl = w * bytespp;
4444  bytespf = bytespl * h;
4445 
4446  /* NOTE: packet_payload has to be equal to N * bytespl, where N is
4447  * number of lines in a packet. See errata about VP_CLK_RATIO */
4448 
4449  if (bytespf < line_buf_size)
4450  packet_payload = bytespf;
4451  else
4452  packet_payload = (line_buf_size) / bytespl * bytespl;
4453 
4454  packet_len = packet_payload + 1; /* 1 byte for DCS cmd */
4455  total_len = (bytespf / packet_payload) * packet_len;
4456 
4457  if (bytespf % packet_payload)
4458  total_len += (bytespf % packet_payload) + 1;
4459 
4460  l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
4461  dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
4462 
4463  dsi_vc_write_long_header(dsidev, channel, MIPI_DSI_DCS_LONG_WRITE,
4464  packet_len, 0);
4465 
4466  if (dsi->te_enabled)
4467  l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
4468  else
4469  l = FLD_MOD(l, 1, 31, 31); /* TE_START */
4470  dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
4471 
4472  /* We put SIDLEMODE to no-idle for the duration of the transfer,
4473  * because DSS interrupts are not capable of waking up the CPU and the
4474  * framedone interrupt could be delayed for quite a long time. I think
4475  * the same goes for any DSS interrupts, but for some reason I have not
4476  * seen the problem anywhere else than here.
4477  */
4479 
4480  dsi_perf_mark_start(dsidev);
4481 
4483  msecs_to_jiffies(250));
4484  BUG_ON(r == 0);
4485 
4486  dss_mgr_set_timings(mgr, &dsi->timings);
4487 
4488  dss_mgr_start_update(mgr);
4489 
4490  if (dsi->te_enabled) {
4491  /* disable LP_RX_TO, so that we can receive TE. Time to wait
4492  * for TE is longer than the timer allows */
4493  REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
4494 
4495  dsi_vc_send_bta(dsidev, channel);
4496 
4497 #ifdef DSI_CATCH_MISSING_TE
4498  mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
4499 #endif
4500  }
4501 }
4502 
4503 #ifdef DSI_CATCH_MISSING_TE
4504 static void dsi_te_timeout(unsigned long arg)
4505 {
4506  DSSERR("TE not received for 250ms!\n");
4507 }
4508 #endif
4509 
4510 static void dsi_handle_framedone(struct platform_device *dsidev, int error)
4511 {
4512  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4513 
4514  /* SIDLEMODE back to smart-idle */
4516 
4517  if (dsi->te_enabled) {
4518  /* enable LP_RX_TO again after the TE */
4519  REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
4520  }
4521 
4522  dsi->framedone_callback(error, dsi->framedone_data);
4523 
4524  if (!error)
4525  dsi_perf_show(dsidev, "DISPC");
4526 }
4527 
4528 static void dsi_framedone_timeout_work_callback(struct work_struct *work)
4529 {
4530  struct dsi_data *dsi = container_of(work, struct dsi_data,
4531  framedone_timeout_work.work);
4532  /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after
4533  * 250ms which would conflict with this timeout work. What should be
4534  * done is first cancel the transfer on the HW, and then cancel the
4535  * possibly scheduled framedone work. However, cancelling the transfer
4536  * on the HW is buggy, and would probably require resetting the whole
4537  * DSI */
4538 
4539  DSSERR("Framedone not received for 250ms!\n");
4540 
4541  dsi_handle_framedone(dsi->pdev, -ETIMEDOUT);
4542 }
4543 
4544 static void dsi_framedone_irq_callback(void *data, u32 mask)
4545 {
4546  struct platform_device *dsidev = (struct platform_device *) data;
4547  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4548 
4549  /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
4550  * turns itself off. However, DSI still has the pixels in its buffers,
4551  * and is sending the data.
4552  */
4553 
4555 
4556  dsi_handle_framedone(dsidev, 0);
4557 }
4558 
4559 int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
4560  void (*callback)(int, void *), void *data)
4561 {
4562  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4563  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4564  u16 dw, dh;
4565 
4566  dsi_perf_mark_setup(dsidev);
4567 
4568  dsi->update_channel = channel;
4569 
4571  dsi->framedone_data = data;
4572 
4573  dw = dsi->timings.x_res;
4574  dh = dsi->timings.y_res;
4575 
4576 #ifdef DEBUG
4577  dsi->update_bytes = dw * dh *
4578  dsi_get_pixel_size(dsi->pix_fmt) / 8;
4579 #endif
4580  dsi_update_screen_dispc(dssdev);
4581 
4582  return 0;
4583 }
4585 
4586 /* Display funcs */
4587 
4588 static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
4589 {
4590  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4591  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4592  struct dispc_clock_info dispc_cinfo;
4593  int r;
4594  unsigned long long fck;
4595 
4596  fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
4597 
4598  dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div;
4599  dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div;
4600 
4601  r = dispc_calc_clock_rates(fck, &dispc_cinfo);
4602  if (r) {
4603  DSSERR("Failed to calc dispc clocks\n");
4604  return r;
4605  }
4606 
4607  dsi->mgr_config.clock_info = dispc_cinfo;
4608 
4609  return 0;
4610 }
4611 
4612 static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
4613 {
4614  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4615  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4616  struct omap_overlay_manager *mgr = dssdev->output->manager;
4617  int r;
4618  u32 irq = 0;
4619 
4620  if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
4621  dsi->timings.hsw = 1;
4622  dsi->timings.hfp = 1;
4623  dsi->timings.hbp = 1;
4624  dsi->timings.vsw = 1;
4625  dsi->timings.vfp = 0;
4626  dsi->timings.vbp = 0;
4627 
4628  irq = dispc_mgr_get_framedone_irq(mgr->id);
4629 
4630  r = omap_dispc_register_isr(dsi_framedone_irq_callback,
4631  (void *) dsidev, irq);
4632  if (r) {
4633  DSSERR("can't get FRAMEDONE irq\n");
4634  goto err;
4635  }
4636 
4637  dsi->mgr_config.stallmode = true;
4638  dsi->mgr_config.fifohandcheck = true;
4639  } else {
4640  dsi->mgr_config.stallmode = false;
4641  dsi->mgr_config.fifohandcheck = false;
4642  }
4643 
4644  /*
4645  * override interlace, logic level and edge related parameters in
4646  * omap_video_timings with default values
4647  */
4648  dsi->timings.interlace = false;
4649  dsi->timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
4650  dsi->timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
4651  dsi->timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
4652  dsi->timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
4653  dsi->timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
4654 
4655  dss_mgr_set_timings(mgr, &dsi->timings);
4656 
4657  r = dsi_configure_dispc_clocks(dssdev);
4658  if (r)
4659  goto err1;
4660 
4661  dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
4662  dsi->mgr_config.video_port_width =
4664  dsi->mgr_config.lcden_sig_polarity = 0;
4665 
4666  dss_mgr_set_lcd_config(mgr, &dsi->mgr_config);
4667 
4668  return 0;
4669 err1:
4670  if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
4671  omap_dispc_unregister_isr(dsi_framedone_irq_callback,
4672  (void *) dsidev, irq);
4673 err:
4674  return r;
4675 }
4676 
4677 static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
4678 {
4679  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4680  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4681  struct omap_overlay_manager *mgr = dssdev->output->manager;
4682 
4683  if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
4684  u32 irq;
4685 
4686  irq = dispc_mgr_get_framedone_irq(mgr->id);
4687 
4688  omap_dispc_unregister_isr(dsi_framedone_irq_callback,
4689  (void *) dsidev, irq);
4690  }
4691 }
4692 
4693 static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
4694 {
4695  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4696  struct dsi_clock_info cinfo;
4697  int r;
4698 
4699  cinfo.regn = dssdev->clocks.dsi.regn;
4700  cinfo.regm = dssdev->clocks.dsi.regm;
4701  cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc;
4702  cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi;
4703  r = dsi_calc_clock_rates(dsidev, &cinfo);
4704  if (r) {
4705  DSSERR("Failed to calc dsi clocks\n");
4706  return r;
4707  }
4708 
4709  r = dsi_pll_set_clock_div(dsidev, &cinfo);
4710  if (r) {
4711  DSSERR("Failed to set dsi clocks\n");
4712  return r;
4713  }
4714 
4715  return 0;
4716 }
4717 
4718 static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
4719 {
4720  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4721  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4722  struct omap_overlay_manager *mgr = dssdev->output->manager;
4723  int r;
4724 
4725  r = dsi_pll_init(dsidev, true, true);
4726  if (r)
4727  goto err0;
4728 
4729  r = dsi_configure_dsi_clocks(dssdev);
4730  if (r)
4731  goto err1;
4732 
4733  dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
4734  dss_select_dsi_clk_source(dsi->module_id, dssdev->clocks.dsi.dsi_fclk_src);
4736  dssdev->clocks.dispc.channel.lcd_clk_src);
4737 
4738  DSSDBG("PLL OK\n");
4739 
4740  r = dsi_cio_init(dsidev);
4741  if (r)
4742  goto err2;
4743 
4744  _dsi_print_reset_status(dsidev);
4745 
4746  dsi_proto_timings(dsidev);
4747  dsi_set_lp_clk_divisor(dssdev);
4748 
4749  if (1)
4750  _dsi_print_reset_status(dsidev);
4751 
4752  r = dsi_proto_config(dssdev);
4753  if (r)
4754  goto err3;
4755 
4756  /* enable interface */
4757  dsi_vc_enable(dsidev, 0, 1);
4758  dsi_vc_enable(dsidev, 1, 1);
4759  dsi_vc_enable(dsidev, 2, 1);
4760  dsi_vc_enable(dsidev, 3, 1);
4761  dsi_if_enable(dsidev, 1);
4762  dsi_force_tx_stop_mode_io(dsidev);
4763 
4764  return 0;
4765 err3:
4766  dsi_cio_uninit(dsidev);
4767 err2:
4771 
4772 err1:
4773  dsi_pll_uninit(dsidev, true);
4774 err0:
4775  return r;
4776 }
4777 
4778 static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
4779  bool disconnect_lanes, bool enter_ulps)
4780 {
4781  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4782  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4783  struct omap_overlay_manager *mgr = dssdev->output->manager;
4784 
4785  if (enter_ulps && !dsi->ulps_enabled)
4786  dsi_enter_ulps(dsidev);
4787 
4788  /* disable interface */
4789  dsi_if_enable(dsidev, 0);
4790  dsi_vc_enable(dsidev, 0, 0);
4791  dsi_vc_enable(dsidev, 1, 0);
4792  dsi_vc_enable(dsidev, 2, 0);
4793  dsi_vc_enable(dsidev, 3, 0);
4794 
4798  dsi_cio_uninit(dsidev);
4799  dsi_pll_uninit(dsidev, disconnect_lanes);
4800 }
4801 
4803 {
4804  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4805  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4806  struct omap_dss_output *out = dssdev->output;
4807  int r = 0;
4808 
4809  DSSDBG("dsi_display_enable\n");
4810 
4811  WARN_ON(!dsi_bus_is_locked(dsidev));
4812 
4813  mutex_lock(&dsi->lock);
4814 
4815  if (out == NULL || out->manager == NULL) {
4816  DSSERR("failed to enable display: no output/manager\n");
4817  r = -ENODEV;
4818  goto err_start_dev;
4819  }
4820 
4821  r = omap_dss_start_device(dssdev);
4822  if (r) {
4823  DSSERR("failed to start device\n");
4824  goto err_start_dev;
4825  }
4826 
4827  r = dsi_runtime_get(dsidev);
4828  if (r)
4829  goto err_get_dsi;
4830 
4831  dsi_enable_pll_clock(dsidev, 1);
4832 
4833  _dsi_initialize_irq(dsidev);
4834 
4835  r = dsi_display_init_dispc(dssdev);
4836  if (r)
4837  goto err_init_dispc;
4838 
4839  r = dsi_display_init_dsi(dssdev);
4840  if (r)
4841  goto err_init_dsi;
4842 
4843  mutex_unlock(&dsi->lock);
4844 
4845  return 0;
4846 
4847 err_init_dsi:
4848  dsi_display_uninit_dispc(dssdev);
4849 err_init_dispc:
4850  dsi_enable_pll_clock(dsidev, 0);
4851  dsi_runtime_put(dsidev);
4852 err_get_dsi:
4853  omap_dss_stop_device(dssdev);
4854 err_start_dev:
4855  mutex_unlock(&dsi->lock);
4856  DSSDBG("dsi_display_enable FAILED\n");
4857  return r;
4858 }
4860 
4862  bool disconnect_lanes, bool enter_ulps)
4863 {
4864  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4865  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4866 
4867  DSSDBG("dsi_display_disable\n");
4868 
4869  WARN_ON(!dsi_bus_is_locked(dsidev));
4870 
4871  mutex_lock(&dsi->lock);
4872 
4873  dsi_sync_vc(dsidev, 0);
4874  dsi_sync_vc(dsidev, 1);
4875  dsi_sync_vc(dsidev, 2);
4876  dsi_sync_vc(dsidev, 3);
4877 
4878  dsi_display_uninit_dispc(dssdev);
4879 
4880  dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps);
4881 
4882  dsi_runtime_put(dsidev);
4883  dsi_enable_pll_clock(dsidev, 0);
4884 
4885  omap_dss_stop_device(dssdev);
4886 
4887  mutex_unlock(&dsi->lock);
4888 }
4890 
4891 int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
4892 {
4893  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4894  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4895 
4896  dsi->te_enabled = enable;
4897  return 0;
4898 }
4900 
4902  struct omap_video_timings *timings)
4903 {
4904  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4905  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4906 
4907  mutex_lock(&dsi->lock);
4908 
4909  dsi->timings = *timings;
4910 
4911  mutex_unlock(&dsi->lock);
4912 }
4914 
4915 void omapdss_dsi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h)
4916 {
4917  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4918  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4919 
4920  mutex_lock(&dsi->lock);
4921 
4922  dsi->timings.x_res = w;
4923  dsi->timings.y_res = h;
4924 
4925  mutex_unlock(&dsi->lock);
4926 }
4928 
4931 {
4932  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4933  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4934 
4935  mutex_lock(&dsi->lock);
4936 
4937  dsi->pix_fmt = fmt;
4938 
4939  mutex_unlock(&dsi->lock);
4940 }
4942 
4944  enum omap_dss_dsi_mode mode)
4945 {
4946  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4947  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4948 
4949  mutex_lock(&dsi->lock);
4950 
4951  dsi->mode = mode;
4952 
4953  mutex_unlock(&dsi->lock);
4954 }
4956 
4958  struct omap_dss_dsi_videomode_timings *timings)
4959 {
4960  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4961  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4962 
4963  mutex_lock(&dsi->lock);
4964 
4965  dsi->vm_timings = *timings;
4966 
4967  mutex_unlock(&dsi->lock);
4968 }
4970 
4971 static int __init dsi_init_display(struct omap_dss_device *dssdev)
4972 {
4973  struct platform_device *dsidev =
4974  dsi_get_dsidev_from_id(dssdev->phy.dsi.module);
4975  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4976 
4977  DSSDBG("DSI init\n");
4978 
4979  if (dsi->vdds_dsi_reg == NULL) {
4980  struct regulator *vdds_dsi;
4981 
4982  vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
4983 
4984  if (IS_ERR(vdds_dsi)) {
4985  DSSERR("can't get VDDS_DSI regulator\n");
4986  return PTR_ERR(vdds_dsi);
4987  }
4988 
4989  dsi->vdds_dsi_reg = vdds_dsi;
4990  }
4991 
4992  return 0;
4993 }
4994 
4995 int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
4996 {
4997  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4998  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4999  int i;
5000 
5001  for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
5002  if (!dsi->vc[i].dssdev) {
5003  dsi->vc[i].dssdev = dssdev;
5004  *channel = i;
5005  return 0;
5006  }
5007  }
5008 
5009  DSSERR("cannot get VC for display %s", dssdev->name);
5010  return -ENOSPC;
5011 }
5013 
5014 int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
5015 {
5016  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
5017  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5018 
5019  if (vc_id < 0 || vc_id > 3) {
5020  DSSERR("VC ID out of range\n");
5021  return -EINVAL;
5022  }
5023 
5024  if (channel < 0 || channel > 3) {
5025  DSSERR("Virtual Channel out of range\n");
5026  return -EINVAL;
5027  }
5028 
5029  if (dsi->vc[channel].dssdev != dssdev) {
5030  DSSERR("Virtual Channel not allocated to display %s\n",
5031  dssdev->name);
5032  return -EINVAL;
5033  }
5034 
5035  dsi->vc[channel].vc_id = vc_id;
5036 
5037  return 0;
5038 }
5040 
5041 void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel)
5042 {
5043  struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
5044  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5045 
5046  if ((channel >= 0 && channel <= 3) &&
5047  dsi->vc[channel].dssdev == dssdev) {
5048  dsi->vc[channel].dssdev = NULL;
5049  dsi->vc[channel].vc_id = 0;
5050  }
5051 }
5053 
5055 {
5056  if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 7, 1) != 1)
5057  DSSERR("%s (%s) not active\n",
5060 }
5061 
5063 {
5064  if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 8, 1) != 1)
5065  DSSERR("%s (%s) not active\n",
5068 }
5069 
5070 static void dsi_calc_clock_param_ranges(struct platform_device *dsidev)
5071 {
5072  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5073 
5076  dsi->regm_dispc_max =
5082 }
5083 
5084 static int dsi_get_clocks(struct platform_device *dsidev)
5085 {
5086  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5087  struct clk *clk;
5088 
5089  clk = clk_get(&dsidev->dev, "fck");
5090  if (IS_ERR(clk)) {
5091  DSSERR("can't get fck\n");
5092  return PTR_ERR(clk);
5093  }
5094 
5095  dsi->dss_clk = clk;
5096 
5097  clk = clk_get(&dsidev->dev, "sys_clk");
5098  if (IS_ERR(clk)) {
5099  DSSERR("can't get sys_clk\n");
5100  clk_put(dsi->dss_clk);
5101  dsi->dss_clk = NULL;
5102  return PTR_ERR(clk);
5103  }
5104 
5105  dsi->sys_clk = clk;
5106 
5107  return 0;
5108 }
5109 
5110 static void dsi_put_clocks(struct platform_device *dsidev)
5111 {
5112  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5113 
5114  if (dsi->dss_clk)
5115  clk_put(dsi->dss_clk);
5116  if (dsi->sys_clk)
5117  clk_put(dsi->sys_clk);
5118 }
5119 
5120 static struct omap_dss_device * __init dsi_find_dssdev(struct platform_device *pdev)
5121 {
5122  struct omap_dss_board_info *pdata = pdev->dev.platform_data;
5123  struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
5124  const char *def_disp_name = dss_get_default_display_name();
5125  struct omap_dss_device *def_dssdev;
5126  int i;
5127 
5128  def_dssdev = NULL;
5129 
5130  for (i = 0; i < pdata->num_devices; ++i) {
5131  struct omap_dss_device *dssdev = pdata->devices[i];
5132 
5133  if (dssdev->type != OMAP_DISPLAY_TYPE_DSI)
5134  continue;
5135 
5136  if (dssdev->phy.dsi.module != dsi->module_id)
5137  continue;
5138 
5139  if (def_dssdev == NULL)
5140  def_dssdev = dssdev;
5141 
5142  if (def_disp_name != NULL &&
5143  strcmp(dssdev->name, def_disp_name) == 0) {
5144  def_dssdev = dssdev;
5145  break;
5146  }
5147  }
5148 
5149  return def_dssdev;
5150 }
5151 
5152 static void __init dsi_probe_pdata(struct platform_device *dsidev)
5153 {
5154  struct omap_dss_device *plat_dssdev;
5155  struct omap_dss_device *dssdev;
5156  int r;
5157 
5158  plat_dssdev = dsi_find_dssdev(dsidev);
5159 
5160  if (!plat_dssdev)
5161  return;
5162 
5163  dssdev = dss_alloc_and_init_device(&dsidev->dev);
5164  if (!dssdev)
5165  return;
5166 
5167  dss_copy_device_pdata(dssdev, plat_dssdev);
5168 
5169  r = dsi_init_display(dssdev);
5170  if (r) {
5171  DSSERR("device %s init failed: %d\n", dssdev->name, r);
5172  dss_put_device(dssdev);
5173  return;
5174  }
5175 
5176  r = dss_add_device(dssdev);
5177  if (r) {
5178  DSSERR("device %s register failed: %d\n", dssdev->name, r);
5179  dss_put_device(dssdev);
5180  return;
5181  }
5182 }
5183 
5184 static void __init dsi_init_output(struct platform_device *dsidev)
5185 {
5186  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5187  struct omap_dss_output *out = &dsi->output;
5188 
5189  out->pdev = dsidev;
5190  out->id = dsi->module_id == 0 ?
5192 
5193  out->type = OMAP_DISPLAY_TYPE_DSI;
5194 
5195  dss_register_output(out);
5196 }
5197 
5198 static void __exit dsi_uninit_output(struct platform_device *dsidev)
5199 {
5200  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5201  struct omap_dss_output *out = &dsi->output;
5202 
5203  dss_unregister_output(out);
5204 }
5205 
5206 /* DSI1 HW IP initialisation */
5207 static int __init omap_dsihw_probe(struct platform_device *dsidev)
5208 {
5209  u32 rev;
5210  int r, i;
5211  struct resource *dsi_mem;
5212  struct dsi_data *dsi;
5213 
5214  dsi = devm_kzalloc(&dsidev->dev, sizeof(*dsi), GFP_KERNEL);
5215  if (!dsi)
5216  return -ENOMEM;
5217 
5218  dsi->module_id = dsidev->id;
5219  dsi->pdev = dsidev;
5220  dev_set_drvdata(&dsidev->dev, dsi);
5221 
5222  spin_lock_init(&dsi->irq_lock);
5223  spin_lock_init(&dsi->errors_lock);
5224  dsi->errors = 0;
5225 
5226 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
5227  spin_lock_init(&dsi->irq_stats_lock);
5228  dsi->irq_stats.last_reset = jiffies;
5229 #endif
5230 
5231  mutex_init(&dsi->lock);
5232  sema_init(&dsi->bus_lock, 1);
5233 
5235  dsi_framedone_timeout_work_callback);
5236 
5237 #ifdef DSI_CATCH_MISSING_TE
5238  init_timer(&dsi->te_timer);
5239  dsi->te_timer.function = dsi_te_timeout;
5240  dsi->te_timer.data = 0;
5241 #endif
5242  dsi_mem = platform_get_resource(dsi->pdev, IORESOURCE_MEM, 0);
5243  if (!dsi_mem) {
5244  DSSERR("can't get IORESOURCE_MEM DSI\n");
5245  return -EINVAL;
5246  }
5247 
5248  dsi->base = devm_ioremap(&dsidev->dev, dsi_mem->start,
5249  resource_size(dsi_mem));
5250  if (!dsi->base) {
5251  DSSERR("can't ioremap DSI\n");
5252  return -ENOMEM;
5253  }
5254 
5255  dsi->irq = platform_get_irq(dsi->pdev, 0);
5256  if (dsi->irq < 0) {
5257  DSSERR("platform_get_irq failed\n");
5258  return -ENODEV;
5259  }
5260 
5261  r = devm_request_irq(&dsidev->dev, dsi->irq, omap_dsi_irq_handler,
5262  IRQF_SHARED, dev_name(&dsidev->dev), dsi->pdev);
5263  if (r < 0) {
5264  DSSERR("request_irq failed\n");
5265  return r;
5266  }
5267 
5268  /* DSI VCs initialization */
5269  for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
5270  dsi->vc[i].source = DSI_VC_SOURCE_L4;
5271  dsi->vc[i].dssdev = NULL;
5272  dsi->vc[i].vc_id = 0;
5273  }
5274 
5275  dsi_calc_clock_param_ranges(dsidev);
5276 
5277  r = dsi_get_clocks(dsidev);
5278  if (r)
5279  return r;
5280 
5281  pm_runtime_enable(&dsidev->dev);
5282 
5283  r = dsi_runtime_get(dsidev);
5284  if (r)
5285  goto err_runtime_get;
5286 
5287  rev = dsi_read_reg(dsidev, DSI_REVISION);
5288  dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
5289  FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
5290 
5291  /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
5292  * of data to 3 by default */
5294  /* NB_DATA_LANES */
5295  dsi->num_lanes_supported = 1 + REG_GET(dsidev, DSI_GNQ, 11, 9);
5296  else
5297  dsi->num_lanes_supported = 3;
5298 
5299  dsi_init_output(dsidev);
5300 
5301  dsi_probe_pdata(dsidev);
5302 
5303  dsi_runtime_put(dsidev);
5304 
5305  if (dsi->module_id == 0)
5306  dss_debugfs_create_file("dsi1_regs", dsi1_dump_regs);
5307  else if (dsi->module_id == 1)
5308  dss_debugfs_create_file("dsi2_regs", dsi2_dump_regs);
5309 
5310 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
5311  if (dsi->module_id == 0)
5312  dss_debugfs_create_file("dsi1_irqs", dsi1_dump_irqs);
5313  else if (dsi->module_id == 1)
5314  dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs);
5315 #endif
5316  return 0;
5317 
5318 err_runtime_get:
5319  pm_runtime_disable(&dsidev->dev);
5320  dsi_put_clocks(dsidev);
5321  return r;
5322 }
5323 
5324 static int __exit omap_dsihw_remove(struct platform_device *dsidev)
5325 {
5326  struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5327 
5328  WARN_ON(dsi->scp_clk_refcount > 0);
5329 
5331 
5332  dsi_uninit_output(dsidev);
5333 
5334  pm_runtime_disable(&dsidev->dev);
5335 
5336  dsi_put_clocks(dsidev);
5337 
5338  if (dsi->vdds_dsi_reg != NULL) {
5339  if (dsi->vdds_dsi_enabled) {
5341  dsi->vdds_dsi_enabled = false;
5342  }
5343 
5345  dsi->vdds_dsi_reg = NULL;
5346  }
5347 
5348  return 0;
5349 }
5350 
5351 static int dsi_runtime_suspend(struct device *dev)
5352 {
5354 
5355  return 0;
5356 }
5357 
5358 static int dsi_runtime_resume(struct device *dev)
5359 {
5360  int r;
5361 
5362  r = dispc_runtime_get();
5363  if (r)
5364  return r;
5365 
5366  return 0;
5367 }
5368 
5369 static const struct dev_pm_ops dsi_pm_ops = {
5370  .runtime_suspend = dsi_runtime_suspend,
5371  .runtime_resume = dsi_runtime_resume,
5372 };
5373 
5374 static struct platform_driver omap_dsihw_driver = {
5375  .remove = __exit_p(omap_dsihw_remove),
5376  .driver = {
5377  .name = "omapdss_dsi",
5378  .owner = THIS_MODULE,
5379  .pm = &dsi_pm_ops,
5380  },
5381 };
5382 
5384 {
5385  return platform_driver_probe(&omap_dsihw_driver, omap_dsihw_probe);
5386 }
5387 
5389 {
5390  platform_driver_unregister(&omap_dsihw_driver);
5391 }