19 #include <linux/module.h>
21 #include <linux/errno.h>
22 #include <linux/sched.h>
23 #include <linux/wait.h>
26 #include <linux/slab.h>
30 #include <linux/time.h>
31 #include <linux/list.h>
37 #define DMM_DRIVER_NAME "dmm"
41 static struct dmm *omap_dmm;
47 #define GEOM(xshift, yshift, bytes_per_pixel) { \
50 .cpp = (bytes_per_pixel), \
51 .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
52 .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
111 if ((r & wait_mask) == wait_mask)
125 struct dmm *dmm =
arg;
145 static struct dmm_txn *dmm_txn_init(
struct dmm *dmm,
struct tcm *
tcm)
184 int columns = (1 + area->
x1 - area->
x0);
185 int rows = (1 + area->
y1 - area->
y0);
186 int i = columns*
rows;
191 pat = alloc_dma(txn,
sizeof(
struct pat), &pat_pa);
199 .lut_id = engine->
tcm->lut_id,
202 data = alloc_dma(txn, 4*i, &pat->
data_pa);
208 data[
i] = (pages && pages[
n]) ?
214 memcpy(lut, &data[i*columns], columns *
sizeof(
u32));
224 static int dmm_txn_commit(
struct dmm_txn *txn,
bool wait)
228 struct dmm *dmm = engine->
dmm;
231 dev_err(engine->
dmm->dev,
"need at least one txn\n");
256 dev_err(dmm->
dev,
"timed out waiting for done\n");
280 txn = dmm_txn_init(omap_dmm, area->
tcm);
281 if (IS_ERR_OR_NULL(txn))
290 ret = dmm_txn_append(txn, &p_area, pages, npages, roll);
297 ret = dmm_txn_commit(txn, wait);
314 ret =
fill(&block->
area, pages, npages, roll, wait);
344 min_align =
max(min_align, (geom[fmt].slot_w * geom[fmt].
cpp));
345 align =
ALIGN(align, min_align);
346 align /= geom[
fmt].slot_w * geom[
fmt].cpp;
350 ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->
area);
374 if (tcm_reserve_1d(containers[
TILFMT_PAGE], num_pages,
393 dev_err(omap_dmm->
dev,
"failed to release block\n");
432 alignment = geom[
fmt].x_shft + geom[
fmt].y_shft;
435 x_mask =
MASK(x_bits);
436 y_mask =
MASK(y_bits);
438 if (x < 0 || x > x_mask || y < 0 || y > y_mask) {
439 DBG(
"invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u",
440 x, x, x_mask, y, y, y_mask);
452 tmp = ((x << y_bits) + y);
454 tmp = ((y << x_bits) + x);
456 return TIL_ADDR((tmp << alignment), orient, fmt);
464 block->
area.p0.x * geom[block->
fmt].slot_w,
465 block->
area.p0.y * geom[block->
fmt].slot_h);
474 return tiler_get_address(block->
fmt, orient,
475 (p->
x * geom[block->
fmt].slot_w) + x,
476 (p->
y * geom[block->
fmt].slot_h) + y);
490 if (orient & MASK_XY_FLIP)
499 return geom[
fmt].cpp * w *
h;
510 return omap_dmm ?
true :
false;
528 for (i = 0; i < omap_dmm->
num_lut; i++)
529 if (omap_dmm->
tcm && omap_dmm->
tcm[i])
530 omap_dmm->
tcm[
i]->deinit(omap_dmm->
tcm[i]);
544 if (omap_dmm->
irq > 0)
559 u32 hwinfo, pat_geom, lut_table_size;
562 omap_dmm = kzalloc(
sizeof(*omap_dmm),
GFP_KERNEL);
564 dev_err(&dev->
dev,
"failed to allocate driver data section\n");
575 dev_err(&dev->
dev,
"failed to get base address resource\n");
581 if (!omap_dmm->
base) {
582 dev_err(&dev->
dev,
"failed to get dmm base address\n");
587 if (omap_dmm->
irq < 0) {
588 dev_err(&dev->
dev,
"failed to get IRQ resource\n");
592 omap_dmm->
dev = &dev->
dev;
596 omap_dmm->
num_lut = (hwinfo >> 16) & 0x1F;
602 omap_dmm->
lut_width = ((pat_geom >> 16) & 0xF) << 5;
603 omap_dmm->
lut_height = ((pat_geom >> 24) & 0xF) << 5;
614 "omap_dmm_irq_handler", omap_dmm);
617 dev_err(&dev->
dev,
"couldn't register IRQ %d, error %d\n",
634 omap_dmm->
lut =
vmalloc(lut_table_size *
sizeof(*omap_dmm->
lut));
635 if (!omap_dmm->
lut) {
636 dev_err(&dev->
dev,
"could not allocate lut table\n");
643 dev_err(&dev->
dev,
"could not allocate dummy page\n");
659 dev_err(&dev->
dev,
"could not allocate refill memory\n");
668 dev_err(&dev->
dev,
"could not allocate engines\n");
676 omap_dmm->
engines[
i].dmm = omap_dmm;
686 omap_dmm->
tcm = kzalloc(omap_dmm->
num_lut *
sizeof(*omap_dmm->
tcm),
688 if (!omap_dmm->
tcm) {
689 dev_err(&dev->
dev,
"failed to allocate lut ptrs\n");
695 for (i = 0; i < omap_dmm->
num_lut; i++) {
700 if (!omap_dmm->
tcm[i]) {
701 dev_err(&dev->
dev,
"failed to allocate container\n");
706 omap_dmm->
tcm[
i]->lut_id =
i;
723 for (i = 0; i < lut_table_size; i++)
733 dev_info(omap_dmm->
dev,
"initialized all PAT entries\n");
738 if (omap_dmm_remove(dev))
747 #ifdef CONFIG_DEBUG_FS
749 static const char *alphabet =
"abcdefghijklmnopqrstuvwxyz"
750 "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
751 static const char *special =
".,:;'\"`~!^-+";
753 static void fill_map(
char **
map,
int xdiv,
int ydiv,
struct tcm_area *
a,
757 for (y = a->
p0.y / ydiv; y <= a->
p1.y / ydiv; y++)
758 for (x = a->
p0.x / xdiv; x <= a->
p1.x / xdiv; x++)
759 if (map[y][x] ==
' ' || ovw)
763 static void fill_map_pt(
char **map,
int xdiv,
int ydiv,
struct tcm_pt *
p,
766 map[p->
y / ydiv][p->
x / xdiv] =
c;
769 static char read_map_pt(
char **map,
int xdiv,
int ydiv,
struct tcm_pt *
p)
771 return map[p->
y / ydiv][p->
x / xdiv];
774 static int map_width(
int xdiv,
int x0,
int x1)
776 return (x1 / xdiv) - (x0 / xdiv) + 1;
779 static void text_map(
char **map,
int xdiv,
char *nice,
int yd,
int x0,
int x1)
781 char *
p = map[yd] + (x0 / xdiv);
782 int w = (map_width(xdiv, x0, x1) -
strlen(nice)) / 2;
790 static void map_1d_info(
char **map,
int xdiv,
int ydiv,
char *nice,
794 if (a->
p0.y + 1 < a->
p1.y) {
795 text_map(map, xdiv, nice, (a->
p0.y + a->
p1.y) / 2 / ydiv, 0,
797 }
else if (a->
p0.y < a->
p1.y) {
798 if (
strlen(nice) < map_width(xdiv, a->
p0.x, 256 - 1))
799 text_map(map, xdiv, nice, a->
p0.y / ydiv,
800 a->
p0.x + xdiv, 256 - 1);
801 else if (
strlen(nice) < map_width(xdiv, 0, a->
p1.x))
802 text_map(map, xdiv, nice, a->
p1.y / ydiv,
804 }
else if (
strlen(nice) + 1 < map_width(xdiv, a->
p0.x, a->
p1.x)) {
805 text_map(map, xdiv, nice, a->
p0.y / ydiv, a->
p0.x, a->
p1.x);
809 static void map_2d_info(
char **map,
int xdiv,
int ydiv,
char *nice,
813 if (
strlen(nice) + 1 < map_width(xdiv, a->
p0.x, a->
p1.x))
814 text_map(map, xdiv, nice, (a->
p0.y + a->
p1.y) / 2 / ydiv,
818 int tiler_map_show(
struct seq_file *
s,
void *arg)
820 int xdiv = 2, ydiv = 1;
821 char **map =
NULL, *global_map;
825 const char *m2d = alphabet;
826 const char *a2d = special;
827 const char *m2dp = m2d, *a2dp = a2d;
841 map = kzalloc(h_adj *
sizeof(*map),
GFP_KERNEL);
842 global_map = kzalloc((w_adj + 1) * h_adj,
GFP_KERNEL);
844 if (!map || !global_map)
847 memset(global_map,
' ', (w_adj + 1) * h_adj);
849 map[
i] = global_map + i * (w_adj + 1);
856 fill_map(map, xdiv, ydiv, &block->
area, *m2dp,
true);
861 map_2d_info(map, xdiv, ydiv, nice, &block->
area);
863 bool start = read_map_pt(map, xdiv, ydiv,
866 bool end = read_map_pt(map, xdiv, ydiv, &block->
area.p1)
869 fill_map(map, xdiv, ydiv, &a, '=',
true);
870 fill_map_pt(map, xdiv, ydiv, &block->area.
p0,
872 fill_map_pt(map, xdiv, ydiv, &block->area.
p1,
874 map_1d_info(map, xdiv, ydiv, nice, &block->area);
878 spin_unlock_irqrestore(&
list_lock, flags);
882 for (i = 0; i < 128; i++)
886 dev_dbg(omap_dmm->
dev,
"BEGIN DMM TILER MAP\n");
887 for (i = 0; i < 128; i++)
888 dev_dbg(omap_dmm->
dev,
"%03d:%s\n", i, map[i]);
901 .probe = omap_dmm_probe,
902 .remove = omap_dmm_remove,