Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
r420.c
Go to the documentation of this file.
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  * Alex Deucher
26  * Jerome Glisse
27  */
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <drm/drmP.h>
31 #include "radeon_reg.h"
32 #include "radeon.h"
33 #include "radeon_asic.h"
34 #include "atom.h"
35 #include "r100d.h"
36 #include "r420d.h"
37 #include "r420_reg_safe.h"
38 
40 {
41  /* default */
42  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
43  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
44  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
45  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
46  /* low sh */
47  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
48  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
49  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
50  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
51  /* mid sh */
52  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
53  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
54  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
55  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
56  /* high sh */
57  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
58  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
59  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
60  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
61  /* low mh */
62  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
63  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
64  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
65  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
66  /* mid mh */
67  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
68  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
69  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
70  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
71  /* high mh */
72  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
73  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
74  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
75  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
76 }
77 
78 static void r420_set_reg_safe(struct radeon_device *rdev)
79 {
80  rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
81  rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
82 }
83 
85 {
86  unsigned tmp;
87  unsigned gb_pipe_select;
88  unsigned num_pipes;
89 
90  /* GA_ENHANCE workaround TCL deadlock issue */
92  (1 << 2) | (1 << 3));
93  /* add idle wait as per freedesktop.org bug 24041 */
94  if (r100_gui_wait_for_idle(rdev)) {
95  printk(KERN_WARNING "Failed to wait GUI idle while "
96  "programming pipes. Bad things might happen.\n");
97  }
98  /* get max number of pipes */
99  gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
100  num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
101 
102  /* SE chips have 1 pipe */
103  if ((rdev->pdev->device == 0x5e4c) ||
104  (rdev->pdev->device == 0x5e4f))
105  num_pipes = 1;
106 
107  rdev->num_gb_pipes = num_pipes;
108  tmp = 0;
109  switch (num_pipes) {
110  default:
111  /* force to 1 pipe */
112  num_pipes = 1;
113  case 1:
114  tmp = (0 << 1);
115  break;
116  case 2:
117  tmp = (3 << 1);
118  break;
119  case 3:
120  tmp = (6 << 1);
121  break;
122  case 4:
123  tmp = (7 << 1);
124  break;
125  }
126  WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
127  /* Sub pixel 1/12 so we can have 4K rendering according to doc */
130  if (r100_gui_wait_for_idle(rdev)) {
131  printk(KERN_WARNING "Failed to wait GUI idle while "
132  "programming pipes. Bad things might happen.\n");
133  }
134 
137 
142 
143  if (r100_gui_wait_for_idle(rdev)) {
144  printk(KERN_WARNING "Failed to wait GUI idle while "
145  "programming pipes. Bad things might happen.\n");
146  }
147 
148  if (rdev->family == CHIP_RV530) {
150  if ((tmp & 3) == 3)
151  rdev->num_z_pipes = 2;
152  else
153  rdev->num_z_pipes = 1;
154  } else
155  rdev->num_z_pipes = 1;
156 
157  DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n",
158  rdev->num_gb_pipes, rdev->num_z_pipes);
159 }
160 
162 {
163  u32 r;
164 
167  return r;
168 }
169 
171 {
175 }
176 
177 static void r420_debugfs(struct radeon_device *rdev)
178 {
179  if (r100_debugfs_rbbm_init(rdev)) {
180  DRM_ERROR("Failed to register debugfs file for RBBM !\n");
181  }
182  if (r420_debugfs_pipes_info_init(rdev)) {
183  DRM_ERROR("Failed to register debugfs file for pipes !\n");
184  }
185 }
186 
187 static void r420_clock_resume(struct radeon_device *rdev)
188 {
189  u32 sclk_cntl;
190 
191  if (radeon_dynclks != -1 && radeon_dynclks)
193  sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL);
194  sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
195  if (rdev->family == CHIP_R420)
196  sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1);
197  WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
198 }
199 
200 static void r420_cp_errata_init(struct radeon_device *rdev)
201 {
203 
204  /* RV410 and R420 can lock up if CP DMA to host memory happens
205  * while the 2D engine is busy.
206  *
207  * The proper workaround is to queue a RESYNC at the beginning
208  * of the CP init, apparently.
209  */
210  radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
211  radeon_ring_lock(rdev, ring, 8);
213  radeon_ring_write(ring, rdev->config.r300.resync_scratch);
214  radeon_ring_write(ring, 0xDEADBEEF);
215  radeon_ring_unlock_commit(rdev, ring);
216 }
217 
218 static void r420_cp_errata_fini(struct radeon_device *rdev)
219 {
220  struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
221 
222  /* Catch the RESYNC we dispatched all the way back,
223  * at the very beginning of the CP init.
224  */
225  radeon_ring_lock(rdev, ring, 8);
228  radeon_ring_unlock_commit(rdev, ring);
229  radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
230 }
231 
232 static int r420_startup(struct radeon_device *rdev)
233 {
234  int r;
235 
236  /* set common regs */
237  r100_set_common_regs(rdev);
238  /* program mc */
239  r300_mc_program(rdev);
240  /* Resume clock */
241  r420_clock_resume(rdev);
242  /* Initialize GART (initialize after TTM so we can allocate
243  * memory through TTM but finalize after TTM) */
244  if (rdev->flags & RADEON_IS_PCIE) {
245  r = rv370_pcie_gart_enable(rdev);
246  if (r)
247  return r;
248  }
249  if (rdev->flags & RADEON_IS_PCI) {
250  r = r100_pci_gart_enable(rdev);
251  if (r)
252  return r;
253  }
254  r420_pipes_init(rdev);
255 
256  /* allocate wb buffer */
257  r = radeon_wb_init(rdev);
258  if (r)
259  return r;
260 
262  if (r) {
263  dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
264  return r;
265  }
266 
267  /* Enable IRQ */
268  r100_irq_set(rdev);
269  rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
270  /* 1M ring buffer */
271  r = r100_cp_init(rdev, 1024 * 1024);
272  if (r) {
273  dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
274  return r;
275  }
276  r420_cp_errata_init(rdev);
277 
278  r = radeon_ib_pool_init(rdev);
279  if (r) {
280  dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
281  return r;
282  }
283 
284  return 0;
285 }
286 
287 int r420_resume(struct radeon_device *rdev)
288 {
289  int r;
290 
291  /* Make sur GART are not working */
292  if (rdev->flags & RADEON_IS_PCIE)
294  if (rdev->flags & RADEON_IS_PCI)
295  r100_pci_gart_disable(rdev);
296  /* Resume clock before doing reset */
297  r420_clock_resume(rdev);
298  /* Reset gpu before posting otherwise ATOM will enter infinite loop */
299  if (radeon_asic_reset(rdev)) {
300  dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
303  }
304  /* check if cards are posted or not */
305  if (rdev->is_atom_bios) {
306  atom_asic_init(rdev->mode_info.atom_context);
307  } else {
309  }
310  /* Resume clock after posting */
311  r420_clock_resume(rdev);
312  /* Initialize surface registers */
313  radeon_surface_init(rdev);
314 
315  rdev->accel_working = true;
316  r = r420_startup(rdev);
317  if (r) {
318  rdev->accel_working = false;
319  }
320  return r;
321 }
322 
323 int r420_suspend(struct radeon_device *rdev)
324 {
325  r420_cp_errata_fini(rdev);
326  r100_cp_disable(rdev);
327  radeon_wb_disable(rdev);
328  r100_irq_disable(rdev);
329  if (rdev->flags & RADEON_IS_PCIE)
331  if (rdev->flags & RADEON_IS_PCI)
332  r100_pci_gart_disable(rdev);
333  return 0;
334 }
335 
336 void r420_fini(struct radeon_device *rdev)
337 {
338  r100_cp_fini(rdev);
339  radeon_wb_fini(rdev);
340  radeon_ib_pool_fini(rdev);
341  radeon_gem_fini(rdev);
342  if (rdev->flags & RADEON_IS_PCIE)
343  rv370_pcie_gart_fini(rdev);
344  if (rdev->flags & RADEON_IS_PCI)
345  r100_pci_gart_fini(rdev);
346  radeon_agp_fini(rdev);
347  radeon_irq_kms_fini(rdev);
349  radeon_bo_fini(rdev);
350  if (rdev->is_atom_bios) {
351  radeon_atombios_fini(rdev);
352  } else {
353  radeon_combios_fini(rdev);
354  }
355  kfree(rdev->bios);
356  rdev->bios = NULL;
357 }
358 
359 int r420_init(struct radeon_device *rdev)
360 {
361  int r;
362 
363  /* Initialize scratch registers */
364  radeon_scratch_init(rdev);
365  /* Initialize surface registers */
366  radeon_surface_init(rdev);
367  /* TODO: disable VGA need to use VGA request */
368  /* restore some register to sane defaults */
369  r100_restore_sanity(rdev);
370  /* BIOS*/
371  if (!radeon_get_bios(rdev)) {
372  if (ASIC_IS_AVIVO(rdev))
373  return -EINVAL;
374  }
375  if (rdev->is_atom_bios) {
376  r = radeon_atombios_init(rdev);
377  if (r) {
378  return r;
379  }
380  } else {
381  r = radeon_combios_init(rdev);
382  if (r) {
383  return r;
384  }
385  }
386  /* Reset gpu before posting otherwise ATOM will enter infinite loop */
387  if (radeon_asic_reset(rdev)) {
388  dev_warn(rdev->dev,
389  "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
392  }
393  /* check if cards are posted or not */
394  if (radeon_boot_test_post_card(rdev) == false)
395  return -EINVAL;
396 
397  /* Initialize clocks */
399  /* initialize AGP */
400  if (rdev->flags & RADEON_IS_AGP) {
401  r = radeon_agp_init(rdev);
402  if (r) {
403  radeon_agp_disable(rdev);
404  }
405  }
406  /* initialize memory controller */
407  r300_mc_init(rdev);
408  r420_debugfs(rdev);
409  /* Fence driver */
410  r = radeon_fence_driver_init(rdev);
411  if (r) {
412  return r;
413  }
414  r = radeon_irq_kms_init(rdev);
415  if (r) {
416  return r;
417  }
418  /* Memory manager */
419  r = radeon_bo_init(rdev);
420  if (r) {
421  return r;
422  }
423  if (rdev->family == CHIP_R420)
424  r100_enable_bm(rdev);
425 
426  if (rdev->flags & RADEON_IS_PCIE) {
427  r = rv370_pcie_gart_init(rdev);
428  if (r)
429  return r;
430  }
431  if (rdev->flags & RADEON_IS_PCI) {
432  r = r100_pci_gart_init(rdev);
433  if (r)
434  return r;
435  }
436  r420_set_reg_safe(rdev);
437 
438  rdev->accel_working = true;
439  r = r420_startup(rdev);
440  if (r) {
441  /* Somethings want wront with the accel init stop accel */
442  dev_err(rdev->dev, "Disabling GPU acceleration\n");
443  r100_cp_fini(rdev);
444  radeon_wb_fini(rdev);
445  radeon_ib_pool_fini(rdev);
446  radeon_irq_kms_fini(rdev);
447  if (rdev->flags & RADEON_IS_PCIE)
448  rv370_pcie_gart_fini(rdev);
449  if (rdev->flags & RADEON_IS_PCI)
450  r100_pci_gart_fini(rdev);
451  radeon_agp_fini(rdev);
452  rdev->accel_working = false;
453  }
454  return 0;
455 }
456 
457 /*
458  * Debugfs info
459  */
460 #if defined(CONFIG_DEBUG_FS)
461 static int r420_debugfs_pipes_info(struct seq_file *m, void *data)
462 {
463  struct drm_info_node *node = (struct drm_info_node *) m->private;
464  struct drm_device *dev = node->minor->dev;
465  struct radeon_device *rdev = dev->dev_private;
466  uint32_t tmp;
467 
469  seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
471  seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
473  seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
474  return 0;
475 }
476 
477 static struct drm_info_list r420_pipes_info_list[] = {
478  {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL},
479 };
480 #endif
481 
483 {
484 #if defined(CONFIG_DEBUG_FS)
485  return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1);
486 #else
487  return 0;
488 #endif
489 }