Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cirrus_main.c
Go to the documentation of this file.
1 /*
2  * Copyright 2012 Red Hat
3  *
4  * This file is subject to the terms and conditions of the GNU General
5  * Public License version 2. See the file COPYING in the main
6  * directory of this archive for more details.
7  *
8  * Authors: Matthew Garrett
9  * Dave Airlie
10  */
11 #include <drm/drmP.h>
12 #include <drm/drm_crtc_helper.h>
13 
14 #include "cirrus_drv.h"
15 
16 
17 static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
18 {
19  struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
20  if (cirrus_fb->obj)
21  drm_gem_object_unreference_unlocked(cirrus_fb->obj);
23  kfree(fb);
24 }
25 
26 static int cirrus_user_framebuffer_create_handle(struct drm_framebuffer *fb,
27  struct drm_file *file_priv,
28  unsigned int *handle)
29 {
30  return 0;
31 }
32 
33 static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
34  .destroy = cirrus_user_framebuffer_destroy,
35  .create_handle = cirrus_user_framebuffer_create_handle,
36 };
37 
39  struct cirrus_framebuffer *gfb,
40  struct drm_mode_fb_cmd2 *mode_cmd,
41  struct drm_gem_object *obj)
42 {
43  int ret;
44 
45  ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
46  if (ret) {
47  DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
48  return ret;
49  }
50  drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
51  gfb->obj = obj;
52  return 0;
53 }
54 
55 static struct drm_framebuffer *
56 cirrus_user_framebuffer_create(struct drm_device *dev,
57  struct drm_file *filp,
58  struct drm_mode_fb_cmd2 *mode_cmd)
59 {
60  struct drm_gem_object *obj;
61  struct cirrus_framebuffer *cirrus_fb;
62  int ret;
63  u32 bpp, depth;
64 
65  drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
66  /* cirrus can't handle > 24bpp framebuffers at all */
67  if (bpp > 24)
68  return ERR_PTR(-EINVAL);
69 
70  obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
71  if (obj == NULL)
72  return ERR_PTR(-ENOENT);
73 
74  cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
75  if (!cirrus_fb) {
76  drm_gem_object_unreference_unlocked(obj);
77  return ERR_PTR(-ENOMEM);
78  }
79 
80  ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
81  if (ret) {
82  drm_gem_object_unreference_unlocked(obj);
83  kfree(cirrus_fb);
84  return ERR_PTR(ret);
85  }
86  return &cirrus_fb->base;
87 }
88 
89 static const struct drm_mode_config_funcs cirrus_mode_funcs = {
90  .fb_create = cirrus_user_framebuffer_create,
91 };
92 
93 /* Unmap the framebuffer from the core and release the memory */
94 static void cirrus_vram_fini(struct cirrus_device *cdev)
95 {
96  iounmap(cdev->rmmio);
97  cdev->rmmio = NULL;
98  if (cdev->mc.vram_base)
99  release_mem_region(cdev->mc.vram_base, cdev->mc.vram_size);
100 }
101 
102 /* Map the framebuffer from the card and configure the core */
103 static int cirrus_vram_init(struct cirrus_device *cdev)
104 {
105  /* BAR 0 is VRAM */
106  cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
107  /* We have 4MB of VRAM */
108  cdev->mc.vram_size = 4 * 1024 * 1024;
109 
110  if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
111  "cirrusdrmfb_vram")) {
112  DRM_ERROR("can't reserve VRAM\n");
113  return -ENXIO;
114  }
115 
116  return 0;
117 }
118 
119 /*
120  * Our emulated hardware has two sets of memory. One is video RAM and can
121  * simply be used as a linear framebuffer - the other provides mmio access
122  * to the display registers. The latter can also be accessed via IO port
123  * access, but we map the range and use mmio to program them instead
124  */
125 
127  struct drm_device *ddev,
128  struct pci_dev *pdev, uint32_t flags)
129 {
130  int ret;
131 
132  cdev->dev = ddev;
133  cdev->flags = flags;
134 
135  /* Hardcode the number of CRTCs to 1 */
136  cdev->num_crtc = 1;
137 
138  /* BAR 0 is the framebuffer, BAR 1 contains registers */
139  cdev->rmmio_base = pci_resource_start(cdev->dev->pdev, 1);
140  cdev->rmmio_size = pci_resource_len(cdev->dev->pdev, 1);
141 
142  if (!request_mem_region(cdev->rmmio_base, cdev->rmmio_size,
143  "cirrusdrmfb_mmio")) {
144  DRM_ERROR("can't reserve mmio registers\n");
145  return -ENOMEM;
146  }
147 
148  cdev->rmmio = ioremap(cdev->rmmio_base, cdev->rmmio_size);
149 
150  if (cdev->rmmio == NULL)
151  return -ENOMEM;
152 
153  ret = cirrus_vram_init(cdev);
154  if (ret) {
156  return ret;
157  }
158 
159  return 0;
160 }
161 
163 {
165  cirrus_vram_fini(cdev);
166 }
167 
168 /*
169  * Functions here will be called by the core once it's bound the driver to
170  * a PCI device
171  */
172 
173 int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
174 {
175  struct cirrus_device *cdev;
176  int r;
177 
178  cdev = kzalloc(sizeof(struct cirrus_device), GFP_KERNEL);
179  if (cdev == NULL)
180  return -ENOMEM;
181  dev->dev_private = (void *)cdev;
182 
183  r = cirrus_device_init(cdev, dev, dev->pdev, flags);
184  if (r) {
185  dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
186  goto out;
187  }
188 
189  r = cirrus_mm_init(cdev);
190  if (r)
191  dev_err(&dev->pdev->dev, "fatal err on mm init\n");
192 
193  r = cirrus_modeset_init(cdev);
194  if (r)
195  dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
196 
197  dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
198 out:
199  if (r)
201  return r;
202 }
203 
205 {
206  struct cirrus_device *cdev = dev->dev_private;
207 
208  if (cdev == NULL)
209  return 0;
210  cirrus_modeset_fini(cdev);
211  cirrus_mm_fini(cdev);
212  cirrus_device_fini(cdev);
213  kfree(cdev);
214  dev->dev_private = NULL;
215  return 0;
216 }
217 
219  u32 size, bool iskernel,
220  struct drm_gem_object **obj)
221 {
222  struct cirrus_bo *cirrusbo;
223  int ret;
224 
225  *obj = NULL;
226 
227  size = roundup(size, PAGE_SIZE);
228  if (size == 0)
229  return -EINVAL;
230 
231  ret = cirrus_bo_create(dev, size, 0, 0, &cirrusbo);
232  if (ret) {
233  if (ret != -ERESTARTSYS)
234  DRM_ERROR("failed to allocate GEM object\n");
235  return ret;
236  }
237  *obj = &cirrusbo->gem;
238  return 0;
239 }
240 
241 int cirrus_dumb_create(struct drm_file *file,
242  struct drm_device *dev,
243  struct drm_mode_create_dumb *args)
244 {
245  int ret;
246  struct drm_gem_object *gobj;
247  u32 handle;
248 
249  args->pitch = args->width * ((args->bpp + 7) / 8);
250  args->size = args->pitch * args->height;
251 
252  ret = cirrus_gem_create(dev, args->size, false,
253  &gobj);
254  if (ret)
255  return ret;
256 
257  ret = drm_gem_handle_create(file, gobj, &handle);
258  drm_gem_object_unreference_unlocked(gobj);
259  if (ret)
260  return ret;
261 
262  args->handle = handle;
263  return 0;
264 }
265 
266 int cirrus_dumb_destroy(struct drm_file *file,
267  struct drm_device *dev,
269 {
270  return drm_gem_handle_delete(file, handle);
271 }
272 
273 int cirrus_gem_init_object(struct drm_gem_object *obj)
274 {
275  BUG();
276  return 0;
277 }
278 
279 void cirrus_bo_unref(struct cirrus_bo **bo)
280 {
281  struct ttm_buffer_object *tbo;
282 
283  if ((*bo) == NULL)
284  return;
285 
286  tbo = &((*bo)->bo);
287  ttm_bo_unref(&tbo);
288  if (tbo == NULL)
289  *bo = NULL;
290 
291 }
292 
293 void cirrus_gem_free_object(struct drm_gem_object *obj)
294 {
295  struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
296 
297  if (!cirrus_bo)
298  return;
299  cirrus_bo_unref(&cirrus_bo);
300 }
301 
302 
303 static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
304 {
305  return bo->bo.addr_space_offset;
306 }
307 
308 int
309 cirrus_dumb_mmap_offset(struct drm_file *file,
310  struct drm_device *dev,
312  uint64_t *offset)
313 {
314  struct drm_gem_object *obj;
315  int ret;
316  struct cirrus_bo *bo;
317 
318  mutex_lock(&dev->struct_mutex);
319  obj = drm_gem_object_lookup(dev, file, handle);
320  if (obj == NULL) {
321  ret = -ENOENT;
322  goto out_unlock;
323  }
324 
325  bo = gem_to_cirrus_bo(obj);
326  *offset = cirrus_bo_mmap_offset(bo);
327 
328  drm_gem_object_unreference(obj);
329  ret = 0;
330 out_unlock:
331  mutex_unlock(&dev->struct_mutex);
332  return ret;
333 
334 }