Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
omap_dmm_tiler.c
Go to the documentation of this file.
1 /*
2  * DMM IOMMU driver support functions for TI OMAP processors.
3  *
4  * Author: Rob Clark <[email protected]>
5  * Andy Gross <[email protected]>
6  *
7  * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License as
11  * published by the Free Software Foundation version 2.
12  *
13  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
14  * kind, whether express or implied; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  */
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/platform_device.h> /* platform_device() */
21 #include <linux/errno.h>
22 #include <linux/sched.h>
23 #include <linux/wait.h>
24 #include <linux/interrupt.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/delay.h>
29 #include <linux/mm.h>
30 #include <linux/time.h>
31 #include <linux/list.h>
32 #include <linux/semaphore.h>
33 
34 #include "omap_dmm_tiler.h"
35 #include "omap_dmm_priv.h"
36 
37 #define DMM_DRIVER_NAME "dmm"
38 
39 /* mappings for associating views to luts */
40 static struct tcm *containers[TILFMT_NFORMATS];
41 static struct dmm *omap_dmm;
42 
43 /* global spinlock for protecting lists */
45 
46 /* Geometry table */
47 #define GEOM(xshift, yshift, bytes_per_pixel) { \
48  .x_shft = (xshift), \
49  .y_shft = (yshift), \
50  .cpp = (bytes_per_pixel), \
51  .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
52  .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
53  }
54 
55 static const struct {
56  uint32_t x_shft; /* unused X-bits (as part of bpp) */
57  uint32_t y_shft; /* unused Y-bits (as part of bpp) */
58  uint32_t cpp; /* bytes/chars per pixel */
59  uint32_t slot_w; /* width of each slot (in pixels) */
60  uint32_t slot_h; /* height of each slot (in pixels) */
61 } geom[TILFMT_NFORMATS] = {
62  [TILFMT_8BIT] = GEOM(0, 0, 1),
63  [TILFMT_16BIT] = GEOM(0, 1, 2),
64  [TILFMT_32BIT] = GEOM(1, 1, 4),
66 };
67 
68 
69 /* lookup table for registers w/ per-engine instances */
70 static const uint32_t reg[][4] = {
75 };
76 
77 /* simple allocator to grab next 16 byte aligned memory from txn */
78 static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
79 {
80  void *ptr;
81  struct refill_engine *engine = txn->engine_handle;
82 
83  /* dmm programming requires 16 byte aligned addresses */
84  txn->current_pa = round_up(txn->current_pa, 16);
85  txn->current_va = (void *)round_up((long)txn->current_va, 16);
86 
87  ptr = txn->current_va;
88  *pa = txn->current_pa;
89 
90  txn->current_pa += sz;
91  txn->current_va += sz;
92 
93  BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
94 
95  return ptr;
96 }
97 
98 /* check status and spin until wait_mask comes true */
99 static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
100 {
101  struct dmm *dmm = engine->dmm;
102  uint32_t r = 0, err, i;
103 
105  while (true) {
106  r = readl(dmm->base + reg[PAT_STATUS][engine->id]);
107  err = r & DMM_PATSTATUS_ERR;
108  if (err)
109  return -EFAULT;
110 
111  if ((r & wait_mask) == wait_mask)
112  break;
113 
114  if (--i == 0)
115  return -ETIMEDOUT;
116 
117  udelay(1);
118  }
119 
120  return 0;
121 }
122 
123 static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
124 {
125  struct dmm *dmm = arg;
127  int i;
128 
129  /* ack IRQ */
130  writel(status, dmm->base + DMM_PAT_IRQSTATUS);
131 
132  for (i = 0; i < dmm->num_engines; i++) {
133  if (status & DMM_IRQSTAT_LST)
134  wake_up_interruptible(&dmm->engines[i].wait_for_refill);
135 
136  status >>= 8;
137  }
138 
139  return IRQ_HANDLED;
140 }
141 
145 static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
146 {
147  struct dmm_txn *txn = NULL;
148  struct refill_engine *engine = NULL;
149 
150  down(&dmm->engine_sem);
151 
152  /* grab an idle engine */
153  spin_lock(&list_lock);
154  if (!list_empty(&dmm->idle_head)) {
155  engine = list_entry(dmm->idle_head.next, struct refill_engine,
156  idle_node);
157  list_del(&engine->idle_node);
158  }
159  spin_unlock(&list_lock);
160 
161  BUG_ON(!engine);
162 
163  txn = &engine->txn;
164  engine->tcm = tcm;
165  txn->engine_handle = engine;
166  txn->last_pat = NULL;
167  txn->current_va = engine->refill_va;
168  txn->current_pa = engine->refill_pa;
169 
170  return txn;
171 }
172 
177 static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
178  struct page **pages, uint32_t npages, uint32_t roll)
179 {
180  dma_addr_t pat_pa = 0;
181  uint32_t *data;
182  struct pat *pat;
183  struct refill_engine *engine = txn->engine_handle;
184  int columns = (1 + area->x1 - area->x0);
185  int rows = (1 + area->y1 - area->y0);
186  int i = columns*rows;
187  u32 *lut = omap_dmm->lut + (engine->tcm->lut_id * omap_dmm->lut_width *
188  omap_dmm->lut_height) +
189  (area->y0 * omap_dmm->lut_width) + area->x0;
190 
191  pat = alloc_dma(txn, sizeof(struct pat), &pat_pa);
192 
193  if (txn->last_pat)
194  txn->last_pat->next_pa = (uint32_t)pat_pa;
195 
196  pat->area = *area;
197  pat->ctrl = (struct pat_ctrl){
198  .start = 1,
199  .lut_id = engine->tcm->lut_id,
200  };
201 
202  data = alloc_dma(txn, 4*i, &pat->data_pa);
203 
204  while (i--) {
205  int n = i + roll;
206  if (n >= npages)
207  n -= npages;
208  data[i] = (pages && pages[n]) ?
209  page_to_phys(pages[n]) : engine->dmm->dummy_pa;
210  }
211 
212  /* fill in lut with new addresses */
213  for (i = 0; i < rows; i++, lut += omap_dmm->lut_width)
214  memcpy(lut, &data[i*columns], columns * sizeof(u32));
215 
216  txn->last_pat = pat;
217 
218  return 0;
219 }
220 
224 static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
225 {
226  int ret = 0;
227  struct refill_engine *engine = txn->engine_handle;
228  struct dmm *dmm = engine->dmm;
229 
230  if (!txn->last_pat) {
231  dev_err(engine->dmm->dev, "need at least one txn\n");
232  ret = -EINVAL;
233  goto cleanup;
234  }
235 
236  txn->last_pat->next_pa = 0;
237 
238  /* write to PAT_DESCR to clear out any pending transaction */
239  writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
240 
241  /* wait for engine ready: */
242  ret = wait_status(engine, DMM_PATSTATUS_READY);
243  if (ret) {
244  ret = -EFAULT;
245  goto cleanup;
246  }
247 
248  /* kick reload */
249  writel(engine->refill_pa,
250  dmm->base + reg[PAT_DESCR][engine->id]);
251 
252  if (wait) {
254  wait_status(engine, DMM_PATSTATUS_READY) == 0,
255  msecs_to_jiffies(1)) <= 0) {
256  dev_err(dmm->dev, "timed out waiting for done\n");
257  ret = -ETIMEDOUT;
258  }
259  }
260 
261 cleanup:
262  spin_lock(&list_lock);
263  list_add(&engine->idle_node, &dmm->idle_head);
264  spin_unlock(&list_lock);
265 
266  up(&omap_dmm->engine_sem);
267  return ret;
268 }
269 
270 /*
271  * DMM programming
272  */
273 static int fill(struct tcm_area *area, struct page **pages,
274  uint32_t npages, uint32_t roll, bool wait)
275 {
276  int ret = 0;
277  struct tcm_area slice, area_s;
278  struct dmm_txn *txn;
279 
280  txn = dmm_txn_init(omap_dmm, area->tcm);
281  if (IS_ERR_OR_NULL(txn))
282  return PTR_ERR(txn);
283 
284  tcm_for_each_slice(slice, *area, area_s) {
285  struct pat_area p_area = {
286  .x0 = slice.p0.x, .y0 = slice.p0.y,
287  .x1 = slice.p1.x, .y1 = slice.p1.y,
288  };
289 
290  ret = dmm_txn_append(txn, &p_area, pages, npages, roll);
291  if (ret)
292  goto fail;
293 
294  roll += tcm_sizeof(slice);
295  }
296 
297  ret = dmm_txn_commit(txn, wait);
298 
299 fail:
300  return ret;
301 }
302 
303 /*
304  * Pin/unpin
305  */
306 
307 /* note: slots for which pages[i] == NULL are filled w/ dummy page
308  */
309 int tiler_pin(struct tiler_block *block, struct page **pages,
310  uint32_t npages, uint32_t roll, bool wait)
311 {
312  int ret;
313 
314  ret = fill(&block->area, pages, npages, roll, wait);
315 
316  if (ret)
317  tiler_unpin(block);
318 
319  return ret;
320 }
321 
323 {
324  return fill(&block->area, NULL, 0, 0, false);
325 }
326 
327 /*
328  * Reserve/release
329  */
332 {
333  struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
334  u32 min_align = 128;
335  int ret;
336 
337  BUG_ON(!validfmt(fmt));
338 
339  /* convert width/height to slots */
340  w = DIV_ROUND_UP(w, geom[fmt].slot_w);
341  h = DIV_ROUND_UP(h, geom[fmt].slot_h);
342 
343  /* convert alignment to slots */
344  min_align = max(min_align, (geom[fmt].slot_w * geom[fmt].cpp));
345  align = ALIGN(align, min_align);
346  align /= geom[fmt].slot_w * geom[fmt].cpp;
347 
348  block->fmt = fmt;
349 
350  ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area);
351  if (ret) {
352  kfree(block);
353  return ERR_PTR(-ENOMEM);
354  }
355 
356  /* add to allocation list */
357  spin_lock(&list_lock);
358  list_add(&block->alloc_node, &omap_dmm->alloc_head);
359  spin_unlock(&list_lock);
360 
361  return block;
362 }
363 
365 {
366  struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
367  int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
368 
369  if (!block)
370  return ERR_PTR(-ENOMEM);
371 
372  block->fmt = TILFMT_PAGE;
373 
374  if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
375  &block->area)) {
376  kfree(block);
377  return ERR_PTR(-ENOMEM);
378  }
379 
380  spin_lock(&list_lock);
381  list_add(&block->alloc_node, &omap_dmm->alloc_head);
382  spin_unlock(&list_lock);
383 
384  return block;
385 }
386 
387 /* note: if you have pin'd pages, you should have already unpin'd first! */
389 {
390  int ret = tcm_free(&block->area);
391 
392  if (block->area.tcm)
393  dev_err(omap_dmm->dev, "failed to release block\n");
394 
395  spin_lock(&list_lock);
396  list_del(&block->alloc_node);
397  spin_unlock(&list_lock);
398 
399  kfree(block);
400  return ret;
401 }
402 
403 /*
404  * Utils
405  */
406 
407 /* calculate the tiler space address of a pixel in a view orientation...
408  * below description copied from the display subsystem section of TRM:
409  *
410  * When the TILER is addressed, the bits:
411  * [28:27] = 0x0 for 8-bit tiled
412  * 0x1 for 16-bit tiled
413  * 0x2 for 32-bit tiled
414  * 0x3 for page mode
415  * [31:29] = 0x0 for 0-degree view
416  * 0x1 for 180-degree view + mirroring
417  * 0x2 for 0-degree view + mirroring
418  * 0x3 for 180-degree view
419  * 0x4 for 270-degree view + mirroring
420  * 0x5 for 270-degree view
421  * 0x6 for 90-degree view
422  * 0x7 for 90-degree view + mirroring
423  * Otherwise the bits indicated the corresponding bit address to access
424  * the SDRAM.
425  */
426 static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y)
427 {
428  u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
429 
430  x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
431  y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
432  alignment = geom[fmt].x_shft + geom[fmt].y_shft;
433 
434  /* validate coordinate */
435  x_mask = MASK(x_bits);
436  y_mask = MASK(y_bits);
437 
438  if (x < 0 || x > x_mask || y < 0 || y > y_mask) {
439  DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u",
440  x, x, x_mask, y, y, y_mask);
441  return 0;
442  }
443 
444  /* account for mirroring */
445  if (orient & MASK_X_INVERT)
446  x ^= x_mask;
447  if (orient & MASK_Y_INVERT)
448  y ^= y_mask;
449 
450  /* get coordinate address */
451  if (orient & MASK_XY_FLIP)
452  tmp = ((x << y_bits) + y);
453  else
454  tmp = ((y << x_bits) + x);
455 
456  return TIL_ADDR((tmp << alignment), orient, fmt);
457 }
458 
460 {
461  BUG_ON(!validfmt(block->fmt));
462 
463  return TILVIEW_8BIT + tiler_get_address(block->fmt, 0,
464  block->area.p0.x * geom[block->fmt].slot_w,
465  block->area.p0.y * geom[block->fmt].slot_h);
466 }
467 
469  uint32_t x, uint32_t y)
470 {
471  struct tcm_pt *p = &block->area.p0;
472  BUG_ON(!validfmt(block->fmt));
473 
474  return tiler_get_address(block->fmt, orient,
475  (p->x * geom[block->fmt].slot_w) + x,
476  (p->y * geom[block->fmt].slot_h) + y);
477 }
478 
480 {
481  BUG_ON(!validfmt(fmt));
482  *w = round_up(*w, geom[fmt].slot_w);
483  *h = round_up(*h, geom[fmt].slot_h);
484 }
485 
487 {
488  BUG_ON(!validfmt(fmt));
489 
490  if (orient & MASK_XY_FLIP)
491  return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
492  else
493  return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
494 }
495 
497 {
498  tiler_align(fmt, &w, &h);
499  return geom[fmt].cpp * w * h;
500 }
501 
503 {
504  BUG_ON(!validfmt(fmt));
505  return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
506 }
507 
509 {
510  return omap_dmm ? true : false;
511 }
512 
513 static int omap_dmm_remove(struct platform_device *dev)
514 {
515  struct tiler_block *block, *_block;
516  int i;
517 
518  if (omap_dmm) {
519  /* free all area regions */
520  spin_lock(&list_lock);
521  list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
522  alloc_node) {
523  list_del(&block->alloc_node);
524  kfree(block);
525  }
526  spin_unlock(&list_lock);
527 
528  for (i = 0; i < omap_dmm->num_lut; i++)
529  if (omap_dmm->tcm && omap_dmm->tcm[i])
530  omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
531  kfree(omap_dmm->tcm);
532 
533  kfree(omap_dmm->engines);
534  if (omap_dmm->refill_va)
535  dma_free_coherent(omap_dmm->dev,
536  REFILL_BUFFER_SIZE * omap_dmm->num_engines,
537  omap_dmm->refill_va,
538  omap_dmm->refill_pa);
539  if (omap_dmm->dummy_page)
540  __free_page(omap_dmm->dummy_page);
541 
542  vfree(omap_dmm->lut);
543 
544  if (omap_dmm->irq > 0)
545  free_irq(omap_dmm->irq, omap_dmm);
546 
547  iounmap(omap_dmm->base);
548  kfree(omap_dmm);
549  omap_dmm = NULL;
550  }
551 
552  return 0;
553 }
554 
555 static int omap_dmm_probe(struct platform_device *dev)
556 {
557  int ret = -EFAULT, i;
558  struct tcm_area area = {0};
559  u32 hwinfo, pat_geom, lut_table_size;
560  struct resource *mem;
561 
562  omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
563  if (!omap_dmm) {
564  dev_err(&dev->dev, "failed to allocate driver data section\n");
565  goto fail;
566  }
567 
568  /* initialize lists */
569  INIT_LIST_HEAD(&omap_dmm->alloc_head);
570  INIT_LIST_HEAD(&omap_dmm->idle_head);
571 
572  /* lookup hwmod data - base address and irq */
573  mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
574  if (!mem) {
575  dev_err(&dev->dev, "failed to get base address resource\n");
576  goto fail;
577  }
578 
579  omap_dmm->base = ioremap(mem->start, SZ_2K);
580 
581  if (!omap_dmm->base) {
582  dev_err(&dev->dev, "failed to get dmm base address\n");
583  goto fail;
584  }
585 
586  omap_dmm->irq = platform_get_irq(dev, 0);
587  if (omap_dmm->irq < 0) {
588  dev_err(&dev->dev, "failed to get IRQ resource\n");
589  goto fail;
590  }
591 
592  omap_dmm->dev = &dev->dev;
593 
594  hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
595  omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
596  omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
597  omap_dmm->container_width = 256;
598  omap_dmm->container_height = 128;
599 
600  /* read out actual LUT width and height */
601  pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
602  omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
603  omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
604 
605  /* initialize DMM registers */
606  writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0);
607  writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1);
608  writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0);
609  writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE);
610  writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0);
611  writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1);
612 
613  ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
614  "omap_dmm_irq_handler", omap_dmm);
615 
616  if (ret) {
617  dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
618  omap_dmm->irq, ret);
619  omap_dmm->irq = -1;
620  goto fail;
621  }
622 
623  /* Enable all interrupts for each refill engine except
624  * ERR_LUT_MISS<n> (which is just advisory, and we don't care
625  * about because we want to be able to refill live scanout
626  * buffers for accelerated pan/scroll) and FILL_DSC<n> which
627  * we just generally don't care about.
628  */
629  writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET);
630 
631  lut_table_size = omap_dmm->lut_width * omap_dmm->lut_height *
632  omap_dmm->num_lut;
633 
634  omap_dmm->lut = vmalloc(lut_table_size * sizeof(*omap_dmm->lut));
635  if (!omap_dmm->lut) {
636  dev_err(&dev->dev, "could not allocate lut table\n");
637  ret = -ENOMEM;
638  goto fail;
639  }
640 
642  if (!omap_dmm->dummy_page) {
643  dev_err(&dev->dev, "could not allocate dummy page\n");
644  ret = -ENOMEM;
645  goto fail;
646  }
647 
648  /* set dma mask for device */
649  /* NOTE: this is a workaround for the hwmod not initializing properly */
650  dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
651 
652  omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
653 
654  /* alloc refill memory */
655  omap_dmm->refill_va = dma_alloc_coherent(&dev->dev,
656  REFILL_BUFFER_SIZE * omap_dmm->num_engines,
657  &omap_dmm->refill_pa, GFP_KERNEL);
658  if (!omap_dmm->refill_va) {
659  dev_err(&dev->dev, "could not allocate refill memory\n");
660  goto fail;
661  }
662 
663  /* alloc engines */
664  omap_dmm->engines = kzalloc(
665  omap_dmm->num_engines * sizeof(struct refill_engine),
666  GFP_KERNEL);
667  if (!omap_dmm->engines) {
668  dev_err(&dev->dev, "could not allocate engines\n");
669  ret = -ENOMEM;
670  goto fail;
671  }
672 
673  sema_init(&omap_dmm->engine_sem, omap_dmm->num_engines);
674  for (i = 0; i < omap_dmm->num_engines; i++) {
675  omap_dmm->engines[i].id = i;
676  omap_dmm->engines[i].dmm = omap_dmm;
677  omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
678  (REFILL_BUFFER_SIZE * i);
679  omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
680  (REFILL_BUFFER_SIZE * i);
681  init_waitqueue_head(&omap_dmm->engines[i].wait_for_refill);
682 
683  list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
684  }
685 
686  omap_dmm->tcm = kzalloc(omap_dmm->num_lut * sizeof(*omap_dmm->tcm),
687  GFP_KERNEL);
688  if (!omap_dmm->tcm) {
689  dev_err(&dev->dev, "failed to allocate lut ptrs\n");
690  ret = -ENOMEM;
691  goto fail;
692  }
693 
694  /* init containers */
695  for (i = 0; i < omap_dmm->num_lut; i++) {
696  omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
697  omap_dmm->container_height,
698  NULL);
699 
700  if (!omap_dmm->tcm[i]) {
701  dev_err(&dev->dev, "failed to allocate container\n");
702  ret = -ENOMEM;
703  goto fail;
704  }
705 
706  omap_dmm->tcm[i]->lut_id = i;
707  }
708 
709  /* assign access mode containers to applicable tcm container */
710  /* OMAP 4 has 1 container for all 4 views */
711  containers[TILFMT_8BIT] = omap_dmm->tcm[0];
712  containers[TILFMT_16BIT] = omap_dmm->tcm[0];
713  containers[TILFMT_32BIT] = omap_dmm->tcm[0];
714  containers[TILFMT_PAGE] = omap_dmm->tcm[0];
715 
716  area = (struct tcm_area) {
717  .is2d = true,
718  .tcm = NULL,
719  .p1.x = omap_dmm->container_width - 1,
720  .p1.y = omap_dmm->container_height - 1,
721  };
722 
723  for (i = 0; i < lut_table_size; i++)
724  omap_dmm->lut[i] = omap_dmm->dummy_pa;
725 
726  /* initialize all LUTs to dummy page entries */
727  for (i = 0; i < omap_dmm->num_lut; i++) {
728  area.tcm = omap_dmm->tcm[i];
729  if (fill(&area, NULL, 0, 0, true))
730  dev_err(omap_dmm->dev, "refill failed");
731  }
732 
733  dev_info(omap_dmm->dev, "initialized all PAT entries\n");
734 
735  return 0;
736 
737 fail:
738  if (omap_dmm_remove(dev))
739  dev_err(&dev->dev, "cleanup failed\n");
740  return ret;
741 }
742 
743 /*
744  * debugfs support
745  */
746 
747 #ifdef CONFIG_DEBUG_FS
748 
749 static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
750  "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
751 static const char *special = ".,:;'\"`~!^-+";
752 
753 static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
754  char c, bool ovw)
755 {
756  int x, y;
757  for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
758  for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
759  if (map[y][x] == ' ' || ovw)
760  map[y][x] = c;
761 }
762 
763 static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
764  char c)
765 {
766  map[p->y / ydiv][p->x / xdiv] = c;
767 }
768 
769 static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
770 {
771  return map[p->y / ydiv][p->x / xdiv];
772 }
773 
774 static int map_width(int xdiv, int x0, int x1)
775 {
776  return (x1 / xdiv) - (x0 / xdiv) + 1;
777 }
778 
779 static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
780 {
781  char *p = map[yd] + (x0 / xdiv);
782  int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
783  if (w >= 0) {
784  p += w;
785  while (*nice)
786  *p++ = *nice++;
787  }
788 }
789 
790 static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
791  struct tcm_area *a)
792 {
793  sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
794  if (a->p0.y + 1 < a->p1.y) {
795  text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
796  256 - 1);
797  } else if (a->p0.y < a->p1.y) {
798  if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
799  text_map(map, xdiv, nice, a->p0.y / ydiv,
800  a->p0.x + xdiv, 256 - 1);
801  else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
802  text_map(map, xdiv, nice, a->p1.y / ydiv,
803  0, a->p1.y - xdiv);
804  } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
805  text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
806  }
807 }
808 
809 static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
810  struct tcm_area *a)
811 {
812  sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
813  if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
814  text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
815  a->p0.x, a->p1.x);
816 }
817 
818 int tiler_map_show(struct seq_file *s, void *arg)
819 {
820  int xdiv = 2, ydiv = 1;
821  char **map = NULL, *global_map;
822  struct tiler_block *block;
823  struct tcm_area a, p;
824  int i;
825  const char *m2d = alphabet;
826  const char *a2d = special;
827  const char *m2dp = m2d, *a2dp = a2d;
828  char nice[128];
829  int h_adj;
830  int w_adj;
831  unsigned long flags;
832 
833  if (!omap_dmm) {
834  /* early return if dmm/tiler device is not initialized */
835  return 0;
836  }
837 
838  h_adj = omap_dmm->lut_height / ydiv;
839  w_adj = omap_dmm->lut_width / xdiv;
840 
841  map = kzalloc(h_adj * sizeof(*map), GFP_KERNEL);
842  global_map = kzalloc((w_adj + 1) * h_adj, GFP_KERNEL);
843 
844  if (!map || !global_map)
845  goto error;
846 
847  memset(global_map, ' ', (w_adj + 1) * h_adj);
848  for (i = 0; i < omap_dmm->lut_height; i++) {
849  map[i] = global_map + i * (w_adj + 1);
850  map[i][w_adj] = 0;
851  }
852  spin_lock_irqsave(&list_lock, flags);
853 
854  list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
855  if (block->fmt != TILFMT_PAGE) {
856  fill_map(map, xdiv, ydiv, &block->area, *m2dp, true);
857  if (!*++a2dp)
858  a2dp = a2d;
859  if (!*++m2dp)
860  m2dp = m2d;
861  map_2d_info(map, xdiv, ydiv, nice, &block->area);
862  } else {
863  bool start = read_map_pt(map, xdiv, ydiv,
864  &block->area.p0)
865  == ' ';
866  bool end = read_map_pt(map, xdiv, ydiv, &block->area.p1)
867  == ' ';
868  tcm_for_each_slice(a, block->area, p)
869  fill_map(map, xdiv, ydiv, &a, '=', true);
870  fill_map_pt(map, xdiv, ydiv, &block->area.p0,
871  start ? '<' : 'X');
872  fill_map_pt(map, xdiv, ydiv, &block->area.p1,
873  end ? '>' : 'X');
874  map_1d_info(map, xdiv, ydiv, nice, &block->area);
875  }
876  }
877 
878  spin_unlock_irqrestore(&list_lock, flags);
879 
880  if (s) {
881  seq_printf(s, "BEGIN DMM TILER MAP\n");
882  for (i = 0; i < 128; i++)
883  seq_printf(s, "%03d:%s\n", i, map[i]);
884  seq_printf(s, "END TILER MAP\n");
885  } else {
886  dev_dbg(omap_dmm->dev, "BEGIN DMM TILER MAP\n");
887  for (i = 0; i < 128; i++)
888  dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
889  dev_dbg(omap_dmm->dev, "END TILER MAP\n");
890  }
891 
892 error:
893  kfree(map);
894  kfree(global_map);
895 
896  return 0;
897 }
898 #endif
899 
901  .probe = omap_dmm_probe,
902  .remove = omap_dmm_remove,
903  .driver = {
904  .owner = THIS_MODULE,
905  .name = DMM_DRIVER_NAME,
906  },
907 };
908 
909 MODULE_LICENSE("GPL v2");
910 MODULE_AUTHOR("Andy Gross <[email protected]>");
911 MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
912 MODULE_ALIAS("platform:" DMM_DRIVER_NAME);