Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vermilion.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) Intel Corp. 2007.
3  * All Rights Reserved.
4  *
5  * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
6  * develop this driver.
7  *
8  * This file is part of the Vermilion Range fb driver.
9  * The Vermilion Range fb driver is free software;
10  * you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * The Vermilion Range fb driver is distributed
16  * in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this driver; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24  *
25  * Authors:
26  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
27  * Michel Dänzer <michel-at-tungstengraphics-dot-com>
28  * Alan Hourihane <alanh-at-tungstengraphics-dot-com>
29  */
30 
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/errno.h>
34 #include <linux/string.h>
35 #include <linux/delay.h>
36 #include <linux/slab.h>
37 #include <linux/mm.h>
38 #include <linux/fb.h>
39 #include <linux/pci.h>
40 #include <asm/cacheflush.h>
41 #include <asm/tlbflush.h>
42 #include <linux/mmzone.h>
43 
44 /* #define VERMILION_DEBUG */
45 
46 #include "vermilion.h"
47 
48 #define MODULE_NAME "vmlfb"
49 
50 #define VML_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
51 
52 static struct mutex vml_mutex;
53 static struct list_head global_no_mode;
54 static struct list_head global_has_mode;
55 static struct fb_ops vmlfb_ops;
56 static struct vml_sys *subsys = NULL;
57 static char *vml_default_mode = "1024x768@60";
58 static struct fb_videomode defaultmode = {
59  NULL, 60, 1024, 768, 12896, 144, 24, 29, 3, 136, 6,
61 };
62 
63 static u32 vml_mem_requested = (10 * 1024 * 1024);
64 static u32 vml_mem_contig = (4 * 1024 * 1024);
65 static u32 vml_mem_min = (4 * 1024 * 1024);
66 
67 static u32 vml_clocks[] = {
68  6750,
69  13500,
70  27000,
71  29700,
72  37125,
73  54000,
74  59400,
75  74250,
76  120000,
77  148500
78 };
79 
80 static u32 vml_num_clocks = ARRAY_SIZE(vml_clocks);
81 
82 /*
83  * Allocate a contiguous vram area and make its linear kernel map
84  * uncached.
85  */
86 
87 static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order,
88  unsigned min_order)
89 {
90  gfp_t flags;
91  unsigned long i;
92 
93  max_order++;
94  do {
95  /*
96  * Really try hard to get the needed memory.
97  * We need memory below the first 32MB, so we
98  * add the __GFP_DMA flag that guarantees that we are
99  * below the first 16MB.
100  */
101 
102  flags = __GFP_DMA | __GFP_HIGH;
103  va->logical =
104  __get_free_pages(flags, --max_order);
105  } while (va->logical == 0 && max_order > min_order);
106 
107  if (!va->logical)
108  return -ENOMEM;
109 
110  va->phys = virt_to_phys((void *)va->logical);
111  va->size = PAGE_SIZE << max_order;
112  va->order = max_order;
113 
114  /*
115  * It seems like __get_free_pages only ups the usage count
116  * of the first page. This doesn't work with fault mapping, so
117  * up the usage count once more (XXX: should use split_page or
118  * compound page).
119  */
120 
121  memset((void *)va->logical, 0x00, va->size);
122  for (i = va->logical; i < va->logical + va->size; i += PAGE_SIZE) {
123  get_page(virt_to_page(i));
124  }
125 
126  /*
127  * Change caching policy of the linear kernel map to avoid
128  * mapping type conflicts with user-space mappings.
129  */
131 
133  ": Allocated %ld bytes vram area at 0x%08lx\n",
134  va->size, va->phys);
135 
136  return 0;
137 }
138 
139 /*
140  * Free a contiguous vram area and reset its linear kernel map
141  * mapping type.
142  */
143 
144 static void vmlfb_free_vram_area(struct vram_area *va)
145 {
146  unsigned long j;
147 
148  if (va->logical) {
149 
150  /*
151  * Reset the linear kernel map caching policy.
152  */
153 
155  va->size >> PAGE_SHIFT);
156 
157  /*
158  * Decrease the usage count on the pages we've used
159  * to compensate for upping when allocating.
160  */
161 
162  for (j = va->logical; j < va->logical + va->size;
163  j += PAGE_SIZE) {
164  (void)put_page_testzero(virt_to_page(j));
165  }
166 
168  ": Freeing %ld bytes vram area at 0x%08lx\n",
169  va->size, va->phys);
170  free_pages(va->logical, va->order);
171 
172  va->logical = 0;
173  }
174 }
175 
176 /*
177  * Free allocated vram.
178  */
179 
180 static void vmlfb_free_vram(struct vml_info *vinfo)
181 {
182  int i;
183 
184  for (i = 0; i < vinfo->num_areas; ++i) {
185  vmlfb_free_vram_area(&vinfo->vram[i]);
186  }
187  vinfo->num_areas = 0;
188 }
189 
190 /*
191  * Allocate vram. Currently we try to allocate contiguous areas from the
192  * __GFP_DMA zone and puzzle them together. A better approach would be to
193  * allocate one contiguous area for scanout and use one-page allocations for
194  * offscreen areas. This requires user-space and GPU virtual mappings.
195  */
196 
197 static int vmlfb_alloc_vram(struct vml_info *vinfo,
198  size_t requested,
199  size_t min_total, size_t min_contig)
200 {
201  int i, j;
202  int order;
203  int contiguous;
204  int err;
205  struct vram_area *va;
206  struct vram_area *va2;
207 
208  vinfo->num_areas = 0;
209  for (i = 0; i < VML_VRAM_AREAS; ++i) {
210  va = &vinfo->vram[i];
211  order = 0;
212 
213  while (requested > (PAGE_SIZE << order) && order < MAX_ORDER)
214  order++;
215 
216  err = vmlfb_alloc_vram_area(va, order, 0);
217 
218  if (err)
219  break;
220 
221  if (i == 0) {
222  vinfo->vram_start = va->phys;
223  vinfo->vram_logical = (void __iomem *) va->logical;
224  vinfo->vram_contig_size = va->size;
225  vinfo->num_areas = 1;
226  } else {
227  contiguous = 0;
228 
229  for (j = 0; j < i; ++j) {
230  va2 = &vinfo->vram[j];
231  if (va->phys + va->size == va2->phys ||
232  va2->phys + va2->size == va->phys) {
233  contiguous = 1;
234  break;
235  }
236  }
237 
238  if (contiguous) {
239  vinfo->num_areas++;
240  if (va->phys < vinfo->vram_start) {
241  vinfo->vram_start = va->phys;
242  vinfo->vram_logical =
243  (void __iomem *)va->logical;
244  }
245  vinfo->vram_contig_size += va->size;
246  } else {
247  vmlfb_free_vram_area(va);
248  break;
249  }
250  }
251 
252  if (requested < va->size)
253  break;
254  else
255  requested -= va->size;
256  }
257 
258  if (vinfo->vram_contig_size > min_total &&
259  vinfo->vram_contig_size > min_contig) {
260 
262  ": Contiguous vram: %ld bytes at physical 0x%08lx.\n",
263  (unsigned long)vinfo->vram_contig_size,
264  (unsigned long)vinfo->vram_start);
265 
266  return 0;
267  }
268 
270  ": Could not allocate requested minimal amount of vram.\n");
271 
272  vmlfb_free_vram(vinfo);
273 
274  return -ENOMEM;
275 }
276 
277 /*
278  * Find the GPU to use with our display controller.
279  */
280 
281 static int vmlfb_get_gpu(struct vml_par *par)
282 {
283  mutex_lock(&vml_mutex);
284 
286 
287  if (!par->gpu) {
288  mutex_unlock(&vml_mutex);
289  return -ENODEV;
290  }
291 
292  mutex_unlock(&vml_mutex);
293 
294  if (pci_enable_device(par->gpu) < 0)
295  return -ENODEV;
296 
297  return 0;
298 }
299 
300 /*
301  * Find a contiguous vram area that contains a given offset from vram start.
302  */
303 static int vmlfb_vram_offset(struct vml_info *vinfo, unsigned long offset)
304 {
305  unsigned long aoffset;
306  unsigned i;
307 
308  for (i = 0; i < vinfo->num_areas; ++i) {
309  aoffset = offset - (vinfo->vram[i].phys - vinfo->vram_start);
310 
311  if (aoffset < vinfo->vram[i].size) {
312  return 0;
313  }
314  }
315 
316  return -EINVAL;
317 }
318 
319 /*
320  * Remap the MMIO register spaces of the VDC and the GPU.
321  */
322 
323 static int vmlfb_enable_mmio(struct vml_par *par)
324 {
325  int err;
326 
327  par->vdc_mem_base = pci_resource_start(par->vdc, 0);
328  par->vdc_mem_size = pci_resource_len(par->vdc, 0);
329  if (!request_mem_region(par->vdc_mem_base, par->vdc_mem_size, "vmlfb")) {
331  ": Could not claim display controller MMIO.\n");
332  return -EBUSY;
333  }
335  if (par->vdc_mem == NULL) {
337  ": Could not map display controller MMIO.\n");
338  err = -ENOMEM;
339  goto out_err_0;
340  }
341 
342  par->gpu_mem_base = pci_resource_start(par->gpu, 0);
343  par->gpu_mem_size = pci_resource_len(par->gpu, 0);
344  if (!request_mem_region(par->gpu_mem_base, par->gpu_mem_size, "vmlfb")) {
345  printk(KERN_ERR MODULE_NAME ": Could not claim GPU MMIO.\n");
346  err = -EBUSY;
347  goto out_err_1;
348  }
350  if (par->gpu_mem == NULL) {
351  printk(KERN_ERR MODULE_NAME ": Could not map GPU MMIO.\n");
352  err = -ENOMEM;
353  goto out_err_2;
354  }
355 
356  return 0;
357 
358 out_err_2:
360 out_err_1:
361  iounmap(par->vdc_mem);
362 out_err_0:
364  return err;
365 }
366 
367 /*
368  * Unmap the VDC and GPU register spaces.
369  */
370 
371 static void vmlfb_disable_mmio(struct vml_par *par)
372 {
373  iounmap(par->gpu_mem);
375  iounmap(par->vdc_mem);
377 }
378 
379 /*
380  * Release and uninit the VDC and GPU.
381  */
382 
383 static void vmlfb_release_devices(struct vml_par *par)
384 {
385  if (atomic_dec_and_test(&par->refcount)) {
386  pci_set_drvdata(par->vdc, NULL);
387  pci_disable_device(par->gpu);
388  pci_disable_device(par->vdc);
389  }
390 }
391 
392 /*
393  * Free up allocated resources for a device.
394  */
395 
396 static void __devexit vml_pci_remove(struct pci_dev *dev)
397 {
398  struct fb_info *info;
399  struct vml_info *vinfo;
400  struct vml_par *par;
401 
402  info = pci_get_drvdata(dev);
403  if (info) {
404  vinfo = container_of(info, struct vml_info, info);
405  par = vinfo->par;
406  mutex_lock(&vml_mutex);
408  fb_dealloc_cmap(&info->cmap);
409  vmlfb_free_vram(vinfo);
410  vmlfb_disable_mmio(par);
411  vmlfb_release_devices(par);
412  kfree(vinfo);
413  kfree(par);
414  mutex_unlock(&vml_mutex);
415  }
416 }
417 
418 static void vmlfb_set_pref_pixel_format(struct fb_var_screeninfo *var)
419 {
420  switch (var->bits_per_pixel) {
421  case 16:
422  var->blue.offset = 0;
423  var->blue.length = 5;
424  var->green.offset = 5;
425  var->green.length = 5;
426  var->red.offset = 10;
427  var->red.length = 5;
428  var->transp.offset = 15;
429  var->transp.length = 1;
430  break;
431  case 32:
432  var->blue.offset = 0;
433  var->blue.length = 8;
434  var->green.offset = 8;
435  var->green.length = 8;
436  var->red.offset = 16;
437  var->red.length = 8;
438  var->transp.offset = 24;
439  var->transp.length = 0;
440  break;
441  default:
442  break;
443  }
444 
445  var->blue.msb_right = var->green.msb_right =
446  var->red.msb_right = var->transp.msb_right = 0;
447 }
448 
449 /*
450  * Device initialization.
451  * We initialize one vml_par struct per device and one vml_info
452  * struct per pipe. Currently we have only one pipe.
453  */
454 
455 static int __devinit vml_pci_probe(struct pci_dev *dev,
456  const struct pci_device_id *id)
457 {
458  struct vml_info *vinfo;
459  struct fb_info *info;
460  struct vml_par *par;
461  int err = 0;
462 
463  par = kzalloc(sizeof(*par), GFP_KERNEL);
464  if (par == NULL)
465  return -ENOMEM;
466 
467  vinfo = kzalloc(sizeof(*vinfo), GFP_KERNEL);
468  if (vinfo == NULL) {
469  err = -ENOMEM;
470  goto out_err_0;
471  }
472 
473  vinfo->par = par;
474  par->vdc = dev;
475  atomic_set(&par->refcount, 1);
476 
477  switch (id->device) {
478  case VML_DEVICE_VDC:
479  if ((err = vmlfb_get_gpu(par)))
480  goto out_err_1;
481  pci_set_drvdata(dev, &vinfo->info);
482  break;
483  default:
484  err = -ENODEV;
485  goto out_err_1;
486  break;
487  }
488 
489  info = &vinfo->info;
491 
492  err = vmlfb_enable_mmio(par);
493  if (err)
494  goto out_err_2;
495 
496  err = vmlfb_alloc_vram(vinfo, vml_mem_requested,
497  vml_mem_contig, vml_mem_min);
498  if (err)
499  goto out_err_3;
500 
501  strcpy(info->fix.id, "Vermilion Range");
502  info->fix.mmio_start = 0;
503  info->fix.mmio_len = 0;
504  info->fix.smem_start = vinfo->vram_start;
505  info->fix.smem_len = vinfo->vram_contig_size;
506  info->fix.type = FB_TYPE_PACKED_PIXELS;
507  info->fix.visual = FB_VISUAL_TRUECOLOR;
508  info->fix.ypanstep = 1;
509  info->fix.xpanstep = 1;
510  info->fix.ywrapstep = 0;
511  info->fix.accel = FB_ACCEL_NONE;
512  info->screen_base = vinfo->vram_logical;
513  info->pseudo_palette = vinfo->pseudo_palette;
514  info->par = par;
515  info->fbops = &vmlfb_ops;
516  info->device = &dev->dev;
517 
518  INIT_LIST_HEAD(&vinfo->head);
519  vinfo->pipe_disabled = 1;
521 
522  info->var.grayscale = 0;
523  info->var.bits_per_pixel = 16;
524  vmlfb_set_pref_pixel_format(&info->var);
525 
526  if (!fb_find_mode
527  (&info->var, info, vml_default_mode, NULL, 0, &defaultmode, 16)) {
528  printk(KERN_ERR MODULE_NAME ": Could not find initial mode\n");
529  }
530 
531  if (fb_alloc_cmap(&info->cmap, 256, 1) < 0) {
532  err = -ENOMEM;
533  goto out_err_4;
534  }
535 
536  err = register_framebuffer(info);
537  if (err) {
538  printk(KERN_ERR MODULE_NAME ": Register framebuffer error.\n");
539  goto out_err_5;
540  }
541 
542  printk("Initialized vmlfb\n");
543 
544  return 0;
545 
546 out_err_5:
547  fb_dealloc_cmap(&info->cmap);
548 out_err_4:
549  vmlfb_free_vram(vinfo);
550 out_err_3:
551  vmlfb_disable_mmio(par);
552 out_err_2:
553  vmlfb_release_devices(par);
554 out_err_1:
555  kfree(vinfo);
556 out_err_0:
557  kfree(par);
558  return err;
559 }
560 
561 static int vmlfb_open(struct fb_info *info, int user)
562 {
563  /*
564  * Save registers here?
565  */
566  return 0;
567 }
568 
569 static int vmlfb_release(struct fb_info *info, int user)
570 {
571  /*
572  * Restore registers here.
573  */
574 
575  return 0;
576 }
577 
578 static int vml_nearest_clock(int clock)
579 {
580 
581  int i;
582  int cur_index;
583  int cur_diff;
584  int diff;
585 
586  cur_index = 0;
587  cur_diff = clock - vml_clocks[0];
588  cur_diff = (cur_diff < 0) ? -cur_diff : cur_diff;
589  for (i = 1; i < vml_num_clocks; ++i) {
590  diff = clock - vml_clocks[i];
591  diff = (diff < 0) ? -diff : diff;
592  if (diff < cur_diff) {
593  cur_index = i;
594  cur_diff = diff;
595  }
596  }
597  return vml_clocks[cur_index];
598 }
599 
600 static int vmlfb_check_var_locked(struct fb_var_screeninfo *var,
601  struct vml_info *vinfo)
602 {
603  u32 pitch;
604  u64 mem;
605  int nearest_clock;
606  int clock;
607  int clock_diff;
608  struct fb_var_screeninfo v;
609 
610  v = *var;
611  clock = PICOS2KHZ(var->pixclock);
612 
613  if (subsys && subsys->nearest_clock) {
614  nearest_clock = subsys->nearest_clock(subsys, clock);
615  } else {
616  nearest_clock = vml_nearest_clock(clock);
617  }
618 
619  /*
620  * Accept a 20% diff.
621  */
622 
623  clock_diff = nearest_clock - clock;
624  clock_diff = (clock_diff < 0) ? -clock_diff : clock_diff;
625  if (clock_diff > clock / 5) {
626 #if 0
627  printk(KERN_DEBUG MODULE_NAME ": Diff failure. %d %d\n",clock_diff,clock);
628 #endif
629  return -EINVAL;
630  }
631 
632  v.pixclock = KHZ2PICOS(nearest_clock);
633 
634  if (var->xres > VML_MAX_XRES || var->yres > VML_MAX_YRES) {
635  printk(KERN_DEBUG MODULE_NAME ": Resolution failure.\n");
636  return -EINVAL;
637  }
638  if (var->xres_virtual > VML_MAX_XRES_VIRTUAL) {
640  ": Virtual resolution failure.\n");
641  return -EINVAL;
642  }
643  switch (v.bits_per_pixel) {
644  case 0 ... 16:
645  v.bits_per_pixel = 16;
646  break;
647  case 17 ... 32:
648  v.bits_per_pixel = 32;
649  break;
650  default:
651  printk(KERN_DEBUG MODULE_NAME ": Invalid bpp: %d.\n",
652  var->bits_per_pixel);
653  return -EINVAL;
654  }
655 
656  pitch = ALIGN((var->xres * var->bits_per_pixel) >> 3, 0x40);
657  mem = pitch * var->yres_virtual;
658  if (mem > vinfo->vram_contig_size) {
659  return -ENOMEM;
660  }
661 
662  switch (v.bits_per_pixel) {
663  case 16:
664  if (var->blue.offset != 0 ||
665  var->blue.length != 5 ||
666  var->green.offset != 5 ||
667  var->green.length != 5 ||
668  var->red.offset != 10 ||
669  var->red.length != 5 ||
670  var->transp.offset != 15 || var->transp.length != 1) {
671  vmlfb_set_pref_pixel_format(&v);
672  }
673  break;
674  case 32:
675  if (var->blue.offset != 0 ||
676  var->blue.length != 8 ||
677  var->green.offset != 8 ||
678  var->green.length != 8 ||
679  var->red.offset != 16 ||
680  var->red.length != 8 ||
681  (var->transp.length != 0 && var->transp.length != 8) ||
682  (var->transp.length == 8 && var->transp.offset != 24)) {
683  vmlfb_set_pref_pixel_format(&v);
684  }
685  break;
686  default:
687  return -EINVAL;
688  }
689 
690  *var = v;
691 
692  return 0;
693 }
694 
695 static int vmlfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
696 {
697  struct vml_info *vinfo = container_of(info, struct vml_info, info);
698  int ret;
699 
700  mutex_lock(&vml_mutex);
701  ret = vmlfb_check_var_locked(var, vinfo);
702  mutex_unlock(&vml_mutex);
703 
704  return ret;
705 }
706 
707 static void vml_wait_vblank(struct vml_info *vinfo)
708 {
709  /* Wait for vblank. For now, just wait for a 50Hz cycle (20ms)) */
710  mdelay(20);
711 }
712 
713 static void vmlfb_disable_pipe(struct vml_info *vinfo)
714 {
715  struct vml_par *par = vinfo->par;
716 
717  /* Disable the MDVO pad */
718  VML_WRITE32(par, VML_RCOMPSTAT, 0);
719  while (!(VML_READ32(par, VML_RCOMPSTAT) & VML_MDVO_VDC_I_RCOMP)) ;
720 
721  /* Disable display planes */
725  /* Wait for vblank for the disable to take effect */
726  vml_wait_vblank(vinfo);
727 
728  /* Next, disable display pipes */
729  VML_WRITE32(par, VML_PIPEACONF, 0);
731 
732  vinfo->pipe_disabled = 1;
733 }
734 
735 #ifdef VERMILION_DEBUG
736 static void vml_dump_regs(struct vml_info *vinfo)
737 {
738  struct vml_par *par = vinfo->par;
739 
740  printk(KERN_DEBUG MODULE_NAME ": Modesetting register dump:\n");
741  printk(KERN_DEBUG MODULE_NAME ": \tHTOTAL_A : 0x%08x\n",
742  (unsigned)VML_READ32(par, VML_HTOTAL_A));
743  printk(KERN_DEBUG MODULE_NAME ": \tHBLANK_A : 0x%08x\n",
744  (unsigned)VML_READ32(par, VML_HBLANK_A));
745  printk(KERN_DEBUG MODULE_NAME ": \tHSYNC_A : 0x%08x\n",
746  (unsigned)VML_READ32(par, VML_HSYNC_A));
747  printk(KERN_DEBUG MODULE_NAME ": \tVTOTAL_A : 0x%08x\n",
748  (unsigned)VML_READ32(par, VML_VTOTAL_A));
749  printk(KERN_DEBUG MODULE_NAME ": \tVBLANK_A : 0x%08x\n",
750  (unsigned)VML_READ32(par, VML_VBLANK_A));
751  printk(KERN_DEBUG MODULE_NAME ": \tVSYNC_A : 0x%08x\n",
752  (unsigned)VML_READ32(par, VML_VSYNC_A));
753  printk(KERN_DEBUG MODULE_NAME ": \tDSPCSTRIDE : 0x%08x\n",
754  (unsigned)VML_READ32(par, VML_DSPCSTRIDE));
755  printk(KERN_DEBUG MODULE_NAME ": \tDSPCSIZE : 0x%08x\n",
756  (unsigned)VML_READ32(par, VML_DSPCSIZE));
757  printk(KERN_DEBUG MODULE_NAME ": \tDSPCPOS : 0x%08x\n",
758  (unsigned)VML_READ32(par, VML_DSPCPOS));
759  printk(KERN_DEBUG MODULE_NAME ": \tDSPARB : 0x%08x\n",
760  (unsigned)VML_READ32(par, VML_DSPARB));
761  printk(KERN_DEBUG MODULE_NAME ": \tDSPCADDR : 0x%08x\n",
762  (unsigned)VML_READ32(par, VML_DSPCADDR));
763  printk(KERN_DEBUG MODULE_NAME ": \tBCLRPAT_A : 0x%08x\n",
764  (unsigned)VML_READ32(par, VML_BCLRPAT_A));
765  printk(KERN_DEBUG MODULE_NAME ": \tCANVSCLR_A : 0x%08x\n",
766  (unsigned)VML_READ32(par, VML_CANVSCLR_A));
767  printk(KERN_DEBUG MODULE_NAME ": \tPIPEASRC : 0x%08x\n",
768  (unsigned)VML_READ32(par, VML_PIPEASRC));
769  printk(KERN_DEBUG MODULE_NAME ": \tPIPEACONF : 0x%08x\n",
770  (unsigned)VML_READ32(par, VML_PIPEACONF));
771  printk(KERN_DEBUG MODULE_NAME ": \tDSPCCNTR : 0x%08x\n",
772  (unsigned)VML_READ32(par, VML_DSPCCNTR));
773  printk(KERN_DEBUG MODULE_NAME ": \tRCOMPSTAT : 0x%08x\n",
774  (unsigned)VML_READ32(par, VML_RCOMPSTAT));
775  printk(KERN_DEBUG MODULE_NAME ": End of modesetting register dump.\n");
776 }
777 #endif
778 
779 static int vmlfb_set_par_locked(struct vml_info *vinfo)
780 {
781  struct vml_par *par = vinfo->par;
782  struct fb_info *info = &vinfo->info;
783  struct fb_var_screeninfo *var = &info->var;
784  u32 htotal, hactive, hblank_start, hblank_end, hsync_start, hsync_end;
785  u32 vtotal, vactive, vblank_start, vblank_end, vsync_start, vsync_end;
786  u32 dspcntr;
787  int clock;
788 
789  vinfo->bytes_per_pixel = var->bits_per_pixel >> 3;
790  vinfo->stride = ALIGN(var->xres_virtual * vinfo->bytes_per_pixel, 0x40);
791  info->fix.line_length = vinfo->stride;
792 
793  if (!subsys)
794  return 0;
795 
796  htotal =
797  var->xres + var->right_margin + var->hsync_len + var->left_margin;
798  hactive = var->xres;
799  hblank_start = var->xres;
800  hblank_end = htotal;
801  hsync_start = hactive + var->right_margin;
802  hsync_end = hsync_start + var->hsync_len;
803 
804  vtotal =
805  var->yres + var->lower_margin + var->vsync_len + var->upper_margin;
806  vactive = var->yres;
807  vblank_start = var->yres;
808  vblank_end = vtotal;
809  vsync_start = vactive + var->lower_margin;
810  vsync_end = vsync_start + var->vsync_len;
811 
813  clock = PICOS2KHZ(var->pixclock);
814 
815  if (subsys->nearest_clock) {
816  clock = subsys->nearest_clock(subsys, clock);
817  } else {
818  clock = vml_nearest_clock(clock);
819  }
820  printk(KERN_DEBUG MODULE_NAME
821  ": Set mode Hfreq : %d kHz, Vfreq : %d Hz.\n", clock / htotal,
822  ((clock / htotal) * 1000) / vtotal);
823 
824  switch (var->bits_per_pixel) {
825  case 16:
826  dspcntr |= VML_GFX_ARGB1555;
827  break;
828  case 32:
829  if (var->transp.length == 8)
830  dspcntr |= VML_GFX_ARGB8888 | VML_GFX_ALPHAMULT;
831  else
832  dspcntr |= VML_GFX_RGB0888;
833  break;
834  default:
835  return -EINVAL;
836  }
837 
838  vmlfb_disable_pipe(vinfo);
839  mb();
840 
841  if (subsys->set_clock)
842  subsys->set_clock(subsys, clock);
843  else
844  return -EINVAL;
845 
846  VML_WRITE32(par, VML_HTOTAL_A, ((htotal - 1) << 16) | (hactive - 1));
848  ((hblank_end - 1) << 16) | (hblank_start - 1));
850  ((hsync_end - 1) << 16) | (hsync_start - 1));
851  VML_WRITE32(par, VML_VTOTAL_A, ((vtotal - 1) << 16) | (vactive - 1));
853  ((vblank_end - 1) << 16) | (vblank_start - 1));
855  ((vsync_end - 1) << 16) | (vsync_start - 1));
856  VML_WRITE32(par, VML_DSPCSTRIDE, vinfo->stride);
858  ((var->yres - 1) << 16) | (var->xres - 1));
859  VML_WRITE32(par, VML_DSPCPOS, 0x00000000);
861  VML_WRITE32(par, VML_BCLRPAT_A, 0x00000000);
862  VML_WRITE32(par, VML_CANVSCLR_A, 0x00000000);
864  ((var->xres - 1) << 16) | (var->yres - 1));
865 
866  wmb();
868  wmb();
869  VML_WRITE32(par, VML_DSPCCNTR, dspcntr);
870  wmb();
871  VML_WRITE32(par, VML_DSPCADDR, (u32) vinfo->vram_start +
872  var->yoffset * vinfo->stride +
873  var->xoffset * vinfo->bytes_per_pixel);
874 
876 
877  while (!(VML_READ32(par, VML_RCOMPSTAT) &
878  (VML_MDVO_VDC_I_RCOMP | VML_MDVO_PAD_ENABLE))) ;
879 
880  vinfo->pipe_disabled = 0;
881 #ifdef VERMILION_DEBUG
882  vml_dump_regs(vinfo);
883 #endif
884 
885  return 0;
886 }
887 
888 static int vmlfb_set_par(struct fb_info *info)
889 {
890  struct vml_info *vinfo = container_of(info, struct vml_info, info);
891  int ret;
892 
893  mutex_lock(&vml_mutex);
894  list_move(&vinfo->head, (subsys) ? &global_has_mode : &global_no_mode);
895  ret = vmlfb_set_par_locked(vinfo);
896 
897  mutex_unlock(&vml_mutex);
898  return ret;
899 }
900 
901 static int vmlfb_blank_locked(struct vml_info *vinfo)
902 {
903  struct vml_par *par = vinfo->par;
905 
906  switch (vinfo->cur_blank_mode) {
907  case FB_BLANK_UNBLANK:
908  if (vinfo->pipe_disabled) {
909  vmlfb_set_par_locked(vinfo);
910  }
913  break;
914  case FB_BLANK_NORMAL:
915  if (vinfo->pipe_disabled) {
916  vmlfb_set_par_locked(vinfo);
917  }
920  break;
923  if (!vinfo->pipe_disabled) {
924  vmlfb_disable_pipe(vinfo);
925  }
926  break;
927  case FB_BLANK_POWERDOWN:
928  if (!vinfo->pipe_disabled) {
929  vmlfb_disable_pipe(vinfo);
930  }
931  break;
932  default:
933  return -EINVAL;
934  }
935 
936  return 0;
937 }
938 
939 static int vmlfb_blank(int blank_mode, struct fb_info *info)
940 {
941  struct vml_info *vinfo = container_of(info, struct vml_info, info);
942  int ret;
943 
944  mutex_lock(&vml_mutex);
945  vinfo->cur_blank_mode = blank_mode;
946  ret = vmlfb_blank_locked(vinfo);
947  mutex_unlock(&vml_mutex);
948  return ret;
949 }
950 
951 static int vmlfb_pan_display(struct fb_var_screeninfo *var,
952  struct fb_info *info)
953 {
954  struct vml_info *vinfo = container_of(info, struct vml_info, info);
955  struct vml_par *par = vinfo->par;
956 
957  mutex_lock(&vml_mutex);
958  VML_WRITE32(par, VML_DSPCADDR, (u32) vinfo->vram_start +
959  var->yoffset * vinfo->stride +
960  var->xoffset * vinfo->bytes_per_pixel);
962  mutex_unlock(&vml_mutex);
963 
964  return 0;
965 }
966 
967 static int vmlfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
968  u_int transp, struct fb_info *info)
969 {
970  u32 v;
971 
972  if (regno >= 16)
973  return -EINVAL;
974 
975  if (info->var.grayscale) {
976  red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
977  }
978 
979  if (info->fix.visual != FB_VISUAL_TRUECOLOR)
980  return -EINVAL;
981 
982  red = VML_TOHW(red, info->var.red.length);
983  blue = VML_TOHW(blue, info->var.blue.length);
984  green = VML_TOHW(green, info->var.green.length);
985  transp = VML_TOHW(transp, info->var.transp.length);
986 
987  v = (red << info->var.red.offset) |
988  (green << info->var.green.offset) |
989  (blue << info->var.blue.offset) |
990  (transp << info->var.transp.offset);
991 
992  switch (info->var.bits_per_pixel) {
993  case 16:
994  ((u32 *) info->pseudo_palette)[regno] = v;
995  break;
996  case 24:
997  case 32:
998  ((u32 *) info->pseudo_palette)[regno] = v;
999  break;
1000  }
1001  return 0;
1002 }
1003 
1004 static int vmlfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
1005 {
1006  struct vml_info *vinfo = container_of(info, struct vml_info, info);
1007  unsigned long size = vma->vm_end - vma->vm_start;
1008  unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
1009  int ret;
1010 
1011  if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
1012  return -EINVAL;
1013  if (offset + size > vinfo->vram_contig_size)
1014  return -EINVAL;
1015  ret = vmlfb_vram_offset(vinfo, offset);
1016  if (ret)
1017  return -EINVAL;
1018  offset += vinfo->vram_start;
1020  pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
1021  if (remap_pfn_range(vma, vma->vm_start, offset >> PAGE_SHIFT,
1022  size, vma->vm_page_prot))
1023  return -EAGAIN;
1024  return 0;
1025 }
1026 
1027 static int vmlfb_sync(struct fb_info *info)
1028 {
1029  return 0;
1030 }
1031 
1032 static int vmlfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
1033 {
1034  return -EINVAL; /* just to force soft_cursor() call */
1035 }
1036 
1037 static struct fb_ops vmlfb_ops = {
1038  .owner = THIS_MODULE,
1039  .fb_open = vmlfb_open,
1040  .fb_release = vmlfb_release,
1041  .fb_check_var = vmlfb_check_var,
1042  .fb_set_par = vmlfb_set_par,
1043  .fb_blank = vmlfb_blank,
1044  .fb_pan_display = vmlfb_pan_display,
1045  .fb_fillrect = cfb_fillrect,
1046  .fb_copyarea = cfb_copyarea,
1047  .fb_imageblit = cfb_imageblit,
1048  .fb_cursor = vmlfb_cursor,
1049  .fb_sync = vmlfb_sync,
1050  .fb_mmap = vmlfb_mmap,
1051  .fb_setcolreg = vmlfb_setcolreg
1052 };
1053 
1054 static struct pci_device_id vml_ids[] = {
1056  {0}
1057 };
1058 
1059 static struct pci_driver vmlfb_pci_driver = {
1060  .name = "vmlfb",
1061  .id_table = vml_ids,
1062  .probe = vml_pci_probe,
1063  .remove = __devexit_p(vml_pci_remove)
1064 };
1065 
1066 static void __exit vmlfb_cleanup(void)
1067 {
1068  pci_unregister_driver(&vmlfb_pci_driver);
1069 }
1070 
1071 static int __init vmlfb_init(void)
1072 {
1073 
1074 #ifndef MODULE
1075  char *option = NULL;
1076 
1077  if (fb_get_options(MODULE_NAME, &option))
1078  return -ENODEV;
1079 #endif
1080 
1081  printk(KERN_DEBUG MODULE_NAME ": initializing\n");
1082  mutex_init(&vml_mutex);
1083  INIT_LIST_HEAD(&global_no_mode);
1084  INIT_LIST_HEAD(&global_has_mode);
1085 
1086  return pci_register_driver(&vmlfb_pci_driver);
1087 }
1088 
1090 {
1091  struct vml_info *entry;
1092  struct list_head *list;
1093  u32 save_activate;
1094 
1095  mutex_lock(&vml_mutex);
1096  if (subsys != NULL) {
1097  subsys->restore(subsys);
1098  }
1099  subsys = sys;
1100  subsys->save(subsys);
1101 
1102  /*
1103  * We need to restart list traversal for each item, since we
1104  * release the list mutex in the loop.
1105  */
1106 
1107  list = global_no_mode.next;
1108  while (list != &global_no_mode) {
1109  list_del_init(list);
1110  entry = list_entry(list, struct vml_info, head);
1111 
1112  /*
1113  * First, try the current mode which might not be
1114  * completely validated with respect to the pixel clock.
1115  */
1116 
1117  if (!vmlfb_check_var_locked(&entry->info.var, entry)) {
1118  vmlfb_set_par_locked(entry);
1119  list_add_tail(list, &global_has_mode);
1120  } else {
1121 
1122  /*
1123  * Didn't work. Try to find another mode,
1124  * that matches this subsys.
1125  */
1126 
1127  mutex_unlock(&vml_mutex);
1128  save_activate = entry->info.var.activate;
1129  entry->info.var.bits_per_pixel = 16;
1130  vmlfb_set_pref_pixel_format(&entry->info.var);
1131  if (fb_find_mode(&entry->info.var,
1132  &entry->info,
1133  vml_default_mode, NULL, 0, NULL, 16)) {
1134  entry->info.var.activate |=
1136  fb_set_var(&entry->info, &entry->info.var);
1137  } else {
1138  printk(KERN_ERR MODULE_NAME
1139  ": Sorry. no mode found for this subsys.\n");
1140  }
1141  entry->info.var.activate = save_activate;
1142  mutex_lock(&vml_mutex);
1143  }
1144  vmlfb_blank_locked(entry);
1145  list = global_no_mode.next;
1146  }
1147  mutex_unlock(&vml_mutex);
1148 
1149  printk(KERN_DEBUG MODULE_NAME ": Registered %s subsystem.\n",
1150  subsys->name ? subsys->name : "unknown");
1151  return 0;
1152 }
1153 
1155 
1157 {
1158  struct vml_info *entry, *next;
1159 
1160  mutex_lock(&vml_mutex);
1161  if (subsys != sys) {
1162  mutex_unlock(&vml_mutex);
1163  return;
1164  }
1165  subsys->restore(subsys);
1166  subsys = NULL;
1167  list_for_each_entry_safe(entry, next, &global_has_mode, head) {
1168  printk(KERN_DEBUG MODULE_NAME ": subsys disable pipe\n");
1169  vmlfb_disable_pipe(entry);
1170  list_move_tail(&entry->head, &global_no_mode);
1171  }
1172  mutex_unlock(&vml_mutex);
1173 }
1174 
1176 
1177 module_init(vmlfb_init);
1178 module_exit(vmlfb_cleanup);
1179 
1180 MODULE_AUTHOR("Tungsten Graphics");
1181 MODULE_DESCRIPTION("Initialization of the Vermilion display devices");
1182 MODULE_VERSION("1.0.0");
1183 MODULE_LICENSE("GPL");