Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
rv770.c
Go to the documentation of this file.
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  * Alex Deucher
26  * Jerome Glisse
27  */
28 #include <linux/firmware.h>
29 #include <linux/platform_device.h>
30 #include <linux/slab.h>
31 #include <drm/drmP.h>
32 #include "radeon.h"
33 #include "radeon_asic.h"
34 #include <drm/radeon_drm.h>
35 #include "rv770d.h"
36 #include "atom.h"
37 #include "avivod.h"
38 
39 #define R700_PFP_UCODE_SIZE 848
40 #define R700_PM4_UCODE_SIZE 1360
41 
42 static void rv770_gpu_init(struct radeon_device *rdev);
43 void rv770_fini(struct radeon_device *rdev);
44 static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
45 
46 u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
47 {
48  struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
49  u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
50  int i;
51 
52  /* Lock the graphics update lock */
54  WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
55 
56  /* update the scanout addresses */
57  if (radeon_crtc->crtc_id) {
60  } else {
63  }
65  (u32)crtc_base);
67  (u32)crtc_base);
68 
69  /* Wait for update_pending to go high. */
70  for (i = 0; i < rdev->usec_timeout; i++) {
72  break;
73  udelay(1);
74  }
75  DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
76 
77  /* Unlock the lock, so double-buffering can take place inside vblank */
79  WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
80 
81  /* Return current update_pending status: */
83 }
84 
85 /* get temperature in millidegrees */
87 {
90  int actual_temp;
91 
92  if (temp & 0x400)
93  actual_temp = -256;
94  else if (temp & 0x200)
95  actual_temp = 255;
96  else if (temp & 0x100) {
97  actual_temp = temp & 0x1ff;
98  actual_temp |= ~0x1ff;
99  } else
100  actual_temp = temp & 0xff;
101 
102  return (actual_temp * 1000) / 2;
103 }
104 
106 {
107  int req_ps_idx = rdev->pm.requested_power_state_index;
108  int req_cm_idx = rdev->pm.requested_clock_mode_index;
109  struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
110  struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
111 
112  if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
113  /* 0xff01 is a flag rather then an actual voltage */
114  if (voltage->voltage == 0xff01)
115  return;
116  if (voltage->voltage != rdev->pm.current_vddc) {
118  rdev->pm.current_vddc = voltage->voltage;
119  DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
120  }
121  }
122 }
123 
124 /*
125  * GART
126  */
127 static int rv770_pcie_gart_enable(struct radeon_device *rdev)
128 {
129  u32 tmp;
130  int r, i;
131 
132  if (rdev->gart.robj == NULL) {
133  dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
134  return -EINVAL;
135  }
136  r = radeon_gart_table_vram_pin(rdev);
137  if (r)
138  return r;
139  radeon_gart_restore(rdev);
140  /* Setup L2 cache */
144  WREG32(VM_L2_CNTL2, 0);
146  /* Setup TLB control */
154  if (rdev->family == CHIP_RV740)
160  WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
161  WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
162  WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
166  (u32)(rdev->dummy_page.addr >> 12));
167  for (i = 1; i < 7; i++)
168  WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
169 
171  DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
172  (unsigned)(rdev->mc.gtt_size >> 20),
173  (unsigned long long)rdev->gart.table_addr);
174  rdev->gart.ready = true;
175  return 0;
176 }
177 
178 static void rv770_pcie_gart_disable(struct radeon_device *rdev)
179 {
180  u32 tmp;
181  int i;
182 
183  /* Disable all tables */
184  for (i = 0; i < 7; i++)
185  WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
186 
187  /* Setup L2 cache */
190  WREG32(VM_L2_CNTL2, 0);
192  /* Setup TLB control */
202 }
203 
204 static void rv770_pcie_gart_fini(struct radeon_device *rdev)
205 {
206  radeon_gart_fini(rdev);
207  rv770_pcie_gart_disable(rdev);
209 }
210 
211 
212 static void rv770_agp_enable(struct radeon_device *rdev)
213 {
214  u32 tmp;
215  int i;
216 
217  /* Setup L2 cache */
221  WREG32(VM_L2_CNTL2, 0);
223  /* Setup TLB control */
235  for (i = 0; i < 7; i++)
236  WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
237 }
238 
239 static void rv770_mc_program(struct radeon_device *rdev)
240 {
241  struct rv515_mc_save save;
242  u32 tmp;
243  int i, j;
244 
245  /* Initialize HDP */
246  for (i = 0, j = 0; i < 32; i++, j += 0x18) {
247  WREG32((0x2c14 + j), 0x00000000);
248  WREG32((0x2c18 + j), 0x00000000);
249  WREG32((0x2c1c + j), 0x00000000);
250  WREG32((0x2c20 + j), 0x00000000);
251  WREG32((0x2c24 + j), 0x00000000);
252  }
253  /* r7xx hw bug. Read from HDP_DEBUG1 rather
254  * than writing to HDP_REG_COHERENCY_FLUSH_CNTL
255  */
256  tmp = RREG32(HDP_DEBUG1);
257 
258  rv515_mc_stop(rdev, &save);
259  if (r600_mc_wait_for_idle(rdev)) {
260  dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
261  }
262  /* Lockout access through VGA aperture*/
264  /* Update configuration */
265  if (rdev->flags & RADEON_IS_AGP) {
266  if (rdev->mc.vram_start < rdev->mc.gtt_start) {
267  /* VRAM before AGP */
269  rdev->mc.vram_start >> 12);
271  rdev->mc.gtt_end >> 12);
272  } else {
273  /* VRAM after AGP */
275  rdev->mc.gtt_start >> 12);
277  rdev->mc.vram_end >> 12);
278  }
279  } else {
281  rdev->mc.vram_start >> 12);
283  rdev->mc.vram_end >> 12);
284  }
286  tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
287  tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
289  WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
290  WREG32(HDP_NONSURFACE_INFO, (2 << 7));
291  WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
292  if (rdev->flags & RADEON_IS_AGP) {
293  WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
294  WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
295  WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
296  } else {
298  WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
299  WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
300  }
301  if (r600_mc_wait_for_idle(rdev)) {
302  dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
303  }
304  rv515_mc_resume(rdev, &save);
305  /* we need to own VRAM, so turn off the VGA renderer here
306  * to stop it overwriting our objects */
308 }
309 
310 
311 /*
312  * CP.
313  */
314 void r700_cp_stop(struct radeon_device *rdev)
315 {
316  radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
318  WREG32(SCRATCH_UMSK, 0);
319 }
320 
321 static int rv770_cp_load_microcode(struct radeon_device *rdev)
322 {
323  const __be32 *fw_data;
324  int i;
325 
326  if (!rdev->me_fw || !rdev->pfp_fw)
327  return -EINVAL;
328 
329  r700_cp_stop(rdev);
331 #ifdef __BIG_ENDIAN
333 #endif
334  RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
335 
336  /* Reset cp */
339  mdelay(15);
341 
342  fw_data = (const __be32 *)rdev->pfp_fw->data;
344  for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
347 
348  fw_data = (const __be32 *)rdev->me_fw->data;
350  for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
351  WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
352 
356  return 0;
357 }
358 
359 void r700_cp_fini(struct radeon_device *rdev)
360 {
362  r700_cp_stop(rdev);
363  radeon_ring_fini(rdev, ring);
364  radeon_scratch_free(rdev, ring->rptr_save_reg);
365 }
366 
367 /*
368  * Core functions
369  */
370 static void rv770_gpu_init(struct radeon_device *rdev)
371 {
372  int i, j, num_qd_pipes;
373  u32 ta_aux_cntl;
374  u32 sx_debug_1;
375  u32 smx_dc_ctl0;
376  u32 db_debug3;
377  u32 num_gs_verts_per_thread;
378  u32 vgt_gs_per_es;
379  u32 gs_prim_buffer_depth = 0;
380  u32 sq_ms_fifo_sizes;
381  u32 sq_config;
382  u32 sq_thread_resource_mgmt;
383  u32 hdp_host_path_cntl;
384  u32 sq_dyn_gpr_size_simd_ab_0;
385  u32 gb_tiling_config = 0;
386  u32 cc_rb_backend_disable = 0;
387  u32 cc_gc_shader_pipe_config = 0;
388  u32 mc_arb_ramcfg;
389  u32 db_debug4, tmp;
390  u32 inactive_pipes, shader_pipe_config;
391  u32 disabled_rb_mask;
392  unsigned active_number;
393 
394  /* setup chip specs */
395  rdev->config.rv770.tiling_group_size = 256;
396  switch (rdev->family) {
397  case CHIP_RV770:
398  rdev->config.rv770.max_pipes = 4;
399  rdev->config.rv770.max_tile_pipes = 8;
400  rdev->config.rv770.max_simds = 10;
401  rdev->config.rv770.max_backends = 4;
402  rdev->config.rv770.max_gprs = 256;
403  rdev->config.rv770.max_threads = 248;
404  rdev->config.rv770.max_stack_entries = 512;
405  rdev->config.rv770.max_hw_contexts = 8;
406  rdev->config.rv770.max_gs_threads = 16 * 2;
407  rdev->config.rv770.sx_max_export_size = 128;
408  rdev->config.rv770.sx_max_export_pos_size = 16;
409  rdev->config.rv770.sx_max_export_smx_size = 112;
410  rdev->config.rv770.sq_num_cf_insts = 2;
411 
412  rdev->config.rv770.sx_num_of_sets = 7;
413  rdev->config.rv770.sc_prim_fifo_size = 0xF9;
414  rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
415  rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
416  break;
417  case CHIP_RV730:
418  rdev->config.rv770.max_pipes = 2;
419  rdev->config.rv770.max_tile_pipes = 4;
420  rdev->config.rv770.max_simds = 8;
421  rdev->config.rv770.max_backends = 2;
422  rdev->config.rv770.max_gprs = 128;
423  rdev->config.rv770.max_threads = 248;
424  rdev->config.rv770.max_stack_entries = 256;
425  rdev->config.rv770.max_hw_contexts = 8;
426  rdev->config.rv770.max_gs_threads = 16 * 2;
427  rdev->config.rv770.sx_max_export_size = 256;
428  rdev->config.rv770.sx_max_export_pos_size = 32;
429  rdev->config.rv770.sx_max_export_smx_size = 224;
430  rdev->config.rv770.sq_num_cf_insts = 2;
431 
432  rdev->config.rv770.sx_num_of_sets = 7;
433  rdev->config.rv770.sc_prim_fifo_size = 0xf9;
434  rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
435  rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
436  if (rdev->config.rv770.sx_max_export_pos_size > 16) {
437  rdev->config.rv770.sx_max_export_pos_size -= 16;
438  rdev->config.rv770.sx_max_export_smx_size += 16;
439  }
440  break;
441  case CHIP_RV710:
442  rdev->config.rv770.max_pipes = 2;
443  rdev->config.rv770.max_tile_pipes = 2;
444  rdev->config.rv770.max_simds = 2;
445  rdev->config.rv770.max_backends = 1;
446  rdev->config.rv770.max_gprs = 256;
447  rdev->config.rv770.max_threads = 192;
448  rdev->config.rv770.max_stack_entries = 256;
449  rdev->config.rv770.max_hw_contexts = 4;
450  rdev->config.rv770.max_gs_threads = 8 * 2;
451  rdev->config.rv770.sx_max_export_size = 128;
452  rdev->config.rv770.sx_max_export_pos_size = 16;
453  rdev->config.rv770.sx_max_export_smx_size = 112;
454  rdev->config.rv770.sq_num_cf_insts = 1;
455 
456  rdev->config.rv770.sx_num_of_sets = 7;
457  rdev->config.rv770.sc_prim_fifo_size = 0x40;
458  rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
459  rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
460  break;
461  case CHIP_RV740:
462  rdev->config.rv770.max_pipes = 4;
463  rdev->config.rv770.max_tile_pipes = 4;
464  rdev->config.rv770.max_simds = 8;
465  rdev->config.rv770.max_backends = 4;
466  rdev->config.rv770.max_gprs = 256;
467  rdev->config.rv770.max_threads = 248;
468  rdev->config.rv770.max_stack_entries = 512;
469  rdev->config.rv770.max_hw_contexts = 8;
470  rdev->config.rv770.max_gs_threads = 16 * 2;
471  rdev->config.rv770.sx_max_export_size = 256;
472  rdev->config.rv770.sx_max_export_pos_size = 32;
473  rdev->config.rv770.sx_max_export_smx_size = 224;
474  rdev->config.rv770.sq_num_cf_insts = 2;
475 
476  rdev->config.rv770.sx_num_of_sets = 7;
477  rdev->config.rv770.sc_prim_fifo_size = 0x100;
478  rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
479  rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
480 
481  if (rdev->config.rv770.sx_max_export_pos_size > 16) {
482  rdev->config.rv770.sx_max_export_pos_size -= 16;
483  rdev->config.rv770.sx_max_export_smx_size += 16;
484  }
485  break;
486  default:
487  break;
488  }
489 
490  /* Initialize HDP */
491  j = 0;
492  for (i = 0; i < 32; i++) {
493  WREG32((0x2c14 + j), 0x00000000);
494  WREG32((0x2c18 + j), 0x00000000);
495  WREG32((0x2c1c + j), 0x00000000);
496  WREG32((0x2c20 + j), 0x00000000);
497  WREG32((0x2c24 + j), 0x00000000);
498  j += 0x18;
499  }
500 
502 
503  /* setup tiling, simd, pipe config */
504  mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
505 
506  shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
507  inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
508  for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
509  if (!(inactive_pipes & tmp)) {
510  active_number++;
511  }
512  tmp <<= 1;
513  }
514  if (active_number == 1) {
516  } else {
518  }
519 
520  cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
521  tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
522  if (tmp < rdev->config.rv770.max_backends) {
523  rdev->config.rv770.max_backends = tmp;
524  }
525 
526  cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
527  tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
528  if (tmp < rdev->config.rv770.max_pipes) {
529  rdev->config.rv770.max_pipes = tmp;
530  }
531  tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
532  if (tmp < rdev->config.rv770.max_simds) {
533  rdev->config.rv770.max_simds = tmp;
534  }
535 
536  switch (rdev->config.rv770.max_tile_pipes) {
537  case 1:
538  default:
539  gb_tiling_config = PIPE_TILING(0);
540  break;
541  case 2:
542  gb_tiling_config = PIPE_TILING(1);
543  break;
544  case 4:
545  gb_tiling_config = PIPE_TILING(2);
546  break;
547  case 8:
548  gb_tiling_config = PIPE_TILING(3);
549  break;
550  }
551  rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
552 
553  disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
554  tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
555  tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
556  R7XX_MAX_BACKENDS, disabled_rb_mask);
557  gb_tiling_config |= tmp << 16;
558  rdev->config.rv770.backend_map = tmp;
559 
560  if (rdev->family == CHIP_RV770)
561  gb_tiling_config |= BANK_TILING(1);
562  else {
563  if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
564  gb_tiling_config |= BANK_TILING(1);
565  else
566  gb_tiling_config |= BANK_TILING(0);
567  }
568  rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
569  gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
570  if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
571  gb_tiling_config |= ROW_TILING(3);
572  gb_tiling_config |= SAMPLE_SPLIT(3);
573  } else {
574  gb_tiling_config |=
575  ROW_TILING(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
576  gb_tiling_config |=
577  SAMPLE_SPLIT(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
578  }
579 
580  gb_tiling_config |= BANK_SWAPS(1);
581  rdev->config.rv770.tile_config = gb_tiling_config;
582 
583  WREG32(GB_TILING_CONFIG, gb_tiling_config);
584  WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
585  WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
586 
591 
592 
593  num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
594  WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
595  WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
596 
597  /* set HW defaults for 3D engine */
599  ROQ_IB2_START(0x2b)));
600 
602 
603  ta_aux_cntl = RREG32(TA_CNTL_AUX);
604  WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO);
605 
606  sx_debug_1 = RREG32(SX_DEBUG_1);
607  sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
608  WREG32(SX_DEBUG_1, sx_debug_1);
609 
610  smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
611  smx_dc_ctl0 &= ~CACHE_DEPTH(0x1ff);
612  smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
613  WREG32(SMX_DC_CTL0, smx_dc_ctl0);
614 
615  if (rdev->family != CHIP_RV740)
617  GS_FLUSH_CTL(4) |
618  ACK_FLUSH_CTL(3) |
619  SYNC_FLUSH_CTL));
620 
621  if (rdev->family != CHIP_RV770)
622  WREG32(SMX_SAR_CTL0, 0x00003f3f);
623 
624  db_debug3 = RREG32(DB_DEBUG3);
625  db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
626  switch (rdev->family) {
627  case CHIP_RV770:
628  case CHIP_RV740:
629  db_debug3 |= DB_CLK_OFF_DELAY(0x1f);
630  break;
631  case CHIP_RV710:
632  case CHIP_RV730:
633  default:
634  db_debug3 |= DB_CLK_OFF_DELAY(2);
635  break;
636  }
637  WREG32(DB_DEBUG3, db_debug3);
638 
639  if (rdev->family != CHIP_RV770) {
640  db_debug4 = RREG32(DB_DEBUG4);
642  WREG32(DB_DEBUG4, db_debug4);
643  }
644 
645  WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) |
646  POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
647  SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
648 
649  WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) |
650  SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
651  SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
652 
654 
656 
658 
660 
661  sq_ms_fifo_sizes = (CACHE_FIFO_SIZE(16 * rdev->config.rv770.sq_num_cf_insts) |
662  DONE_FIFO_HIWATER(0xe0) |
664  switch (rdev->family) {
665  case CHIP_RV770:
666  case CHIP_RV730:
667  case CHIP_RV710:
668  sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
669  break;
670  case CHIP_RV740:
671  default:
672  sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
673  break;
674  }
675  WREG32(SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
676 
677  /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
678  * should be adjusted as needed by the 2D/3D drivers. This just sets default values
679  */
680  sq_config = RREG32(SQ_CONFIG);
681  sq_config &= ~(PS_PRIO(3) |
682  VS_PRIO(3) |
683  GS_PRIO(3) |
684  ES_PRIO(3));
685  sq_config |= (DX9_CONSTS |
686  VC_ENABLE |
687  EXPORT_SRC_C |
688  PS_PRIO(0) |
689  VS_PRIO(1) |
690  GS_PRIO(2) |
691  ES_PRIO(3));
692  if (rdev->family == CHIP_RV710)
693  /* no vertex cache */
694  sq_config &= ~VC_ENABLE;
695 
696  WREG32(SQ_CONFIG, sq_config);
697 
698  WREG32(SQ_GPR_RESOURCE_MGMT_1, (NUM_PS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
699  NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
700  NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2)));
701 
702  WREG32(SQ_GPR_RESOURCE_MGMT_2, (NUM_GS_GPRS((rdev->config.rv770.max_gprs * 7)/64) |
703  NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64)));
704 
705  sq_thread_resource_mgmt = (NUM_PS_THREADS((rdev->config.rv770.max_threads * 4)/8) |
706  NUM_VS_THREADS((rdev->config.rv770.max_threads * 2)/8) |
707  NUM_ES_THREADS((rdev->config.rv770.max_threads * 1)/8));
708  if (((rdev->config.rv770.max_threads * 1) / 8) > rdev->config.rv770.max_gs_threads)
709  sq_thread_resource_mgmt |= NUM_GS_THREADS(rdev->config.rv770.max_gs_threads);
710  else
711  sq_thread_resource_mgmt |= NUM_GS_THREADS((rdev->config.rv770.max_gs_threads * 1)/8);
712  WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
713 
714  WREG32(SQ_STACK_RESOURCE_MGMT_1, (NUM_PS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
715  NUM_VS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
716 
717  WREG32(SQ_STACK_RESOURCE_MGMT_2, (NUM_GS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
718  NUM_ES_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
719 
720  sq_dyn_gpr_size_simd_ab_0 = (SIMDA_RING0((rdev->config.rv770.max_gprs * 38)/64) |
721  SIMDA_RING1((rdev->config.rv770.max_gprs * 38)/64) |
722  SIMDB_RING0((rdev->config.rv770.max_gprs * 38)/64) |
723  SIMDB_RING1((rdev->config.rv770.max_gprs * 38)/64));
724 
725  WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0);
726  WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0);
727  WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0);
728  WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0);
729  WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0);
730  WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0);
731  WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0);
732  WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0);
733 
735  FORCE_EOV_MAX_REZ_CNT(255)));
736 
737  if (rdev->family == CHIP_RV710)
740  else
743 
744  switch (rdev->family) {
745  case CHIP_RV770:
746  case CHIP_RV730:
747  case CHIP_RV740:
748  gs_prim_buffer_depth = 384;
749  break;
750  case CHIP_RV710:
751  gs_prim_buffer_depth = 128;
752  break;
753  default:
754  break;
755  }
756 
757  num_gs_verts_per_thread = rdev->config.rv770.max_pipes * 16;
758  vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
759  /* Max value for this is 256 */
760  if (vgt_gs_per_es > 256)
761  vgt_gs_per_es = 256;
762 
763  WREG32(VGT_ES_PER_GS, 128);
764  WREG32(VGT_GS_PER_ES, vgt_gs_per_es);
765  WREG32(VGT_GS_PER_VS, 2);
766 
767  /* more default values. 2D/3D driver should adjust as needed */
771  WREG32(SX_MISC, 0);
773  WREG32(PA_SC_EDGERULE, 0xaaaaaaaa);
775  WREG32(PA_SC_CLIPRECT_RULE, 0xffff);
777  WREG32(SPI_INPUT_Z, 0);
780 
781  /* clear render buffer base addresses */
790 
791  WREG32(TCP_CNTL, 0);
792 
793  hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
794  WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
795 
797 
799  NUM_CLIP_SEQ(3)));
800  WREG32(VC_ENHANCE, 0);
801 }
802 
804 {
805  u64 size_bf, size_af;
806 
807  if (mc->mc_vram_size > 0xE0000000) {
808  /* leave room for at least 512M GTT */
809  dev_warn(rdev->dev, "limiting VRAM\n");
810  mc->real_vram_size = 0xE0000000;
811  mc->mc_vram_size = 0xE0000000;
812  }
813  if (rdev->flags & RADEON_IS_AGP) {
814  size_bf = mc->gtt_start;
815  size_af = 0xFFFFFFFF - mc->gtt_end;
816  if (size_bf > size_af) {
817  if (mc->mc_vram_size > size_bf) {
818  dev_warn(rdev->dev, "limiting VRAM\n");
819  mc->real_vram_size = size_bf;
820  mc->mc_vram_size = size_bf;
821  }
822  mc->vram_start = mc->gtt_start - mc->mc_vram_size;
823  } else {
824  if (mc->mc_vram_size > size_af) {
825  dev_warn(rdev->dev, "limiting VRAM\n");
826  mc->real_vram_size = size_af;
827  mc->mc_vram_size = size_af;
828  }
829  mc->vram_start = mc->gtt_end + 1;
830  }
831  mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
832  dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
833  mc->mc_vram_size >> 20, mc->vram_start,
834  mc->vram_end, mc->real_vram_size >> 20);
835  } else {
836  radeon_vram_location(rdev, &rdev->mc, 0);
837  rdev->mc.gtt_base_align = 0;
838  radeon_gtt_location(rdev, mc);
839  }
840 }
841 
842 static int rv770_mc_init(struct radeon_device *rdev)
843 {
844  u32 tmp;
845  int chansize, numchan;
846 
847  /* Get VRAM informations */
848  rdev->mc.vram_is_ddr = true;
849  tmp = RREG32(MC_ARB_RAMCFG);
850  if (tmp & CHANSIZE_OVERRIDE) {
851  chansize = 16;
852  } else if (tmp & CHANSIZE_MASK) {
853  chansize = 64;
854  } else {
855  chansize = 32;
856  }
857  tmp = RREG32(MC_SHARED_CHMAP);
858  switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
859  case 0:
860  default:
861  numchan = 1;
862  break;
863  case 1:
864  numchan = 2;
865  break;
866  case 2:
867  numchan = 4;
868  break;
869  case 3:
870  numchan = 8;
871  break;
872  }
873  rdev->mc.vram_width = numchan * chansize;
874  /* Could aper size report 0 ? */
875  rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
876  rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
877  /* Setup GPU memory space */
878  rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
879  rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
880  rdev->mc.visible_vram_size = rdev->mc.aper_size;
881  r700_vram_gtt_location(rdev, &rdev->mc);
883 
884  return 0;
885 }
886 
887 static int rv770_startup(struct radeon_device *rdev)
888 {
890  int r;
891 
892  /* enable pcie gen2 link */
893  rv770_pcie_gen2_enable(rdev);
894 
895  if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
896  r = r600_init_microcode(rdev);
897  if (r) {
898  DRM_ERROR("Failed to load firmware!\n");
899  return r;
900  }
901  }
902 
903  r = r600_vram_scratch_init(rdev);
904  if (r)
905  return r;
906 
907  rv770_mc_program(rdev);
908  if (rdev->flags & RADEON_IS_AGP) {
909  rv770_agp_enable(rdev);
910  } else {
911  r = rv770_pcie_gart_enable(rdev);
912  if (r)
913  return r;
914  }
915 
916  rv770_gpu_init(rdev);
917  r = r600_blit_init(rdev);
918  if (r) {
919  r600_blit_fini(rdev);
920  rdev->asic->copy.copy = NULL;
921  dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
922  }
923 
924  /* allocate wb buffer */
925  r = radeon_wb_init(rdev);
926  if (r)
927  return r;
928 
930  if (r) {
931  dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
932  return r;
933  }
934 
935  /* Enable IRQ */
936  r = r600_irq_init(rdev);
937  if (r) {
938  DRM_ERROR("radeon: IH init failed (%d).\n", r);
939  radeon_irq_kms_fini(rdev);
940  return r;
941  }
942  r600_irq_set(rdev);
943 
944  r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
946  0, 0xfffff, RADEON_CP_PACKET2);
947  if (r)
948  return r;
949  r = rv770_cp_load_microcode(rdev);
950  if (r)
951  return r;
952  r = r600_cp_resume(rdev);
953  if (r)
954  return r;
955 
956  r = radeon_ib_pool_init(rdev);
957  if (r) {
958  dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
959  return r;
960  }
961 
962  r = r600_audio_init(rdev);
963  if (r) {
964  DRM_ERROR("radeon: audio init failed\n");
965  return r;
966  }
967 
968  return 0;
969 }
970 
971 int rv770_resume(struct radeon_device *rdev)
972 {
973  int r;
974 
975  /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
976  * posting will perform necessary task to bring back GPU into good
977  * shape.
978  */
979  /* post card */
980  atom_asic_init(rdev->mode_info.atom_context);
981 
982  rdev->accel_working = true;
983  r = rv770_startup(rdev);
984  if (r) {
985  DRM_ERROR("r600 startup failed on resume\n");
986  rdev->accel_working = false;
987  return r;
988  }
989 
990  return r;
991 
992 }
993 
994 int rv770_suspend(struct radeon_device *rdev)
995 {
996  r600_audio_fini(rdev);
997  r700_cp_stop(rdev);
998  rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
999  r600_irq_suspend(rdev);
1000  radeon_wb_disable(rdev);
1001  rv770_pcie_gart_disable(rdev);
1002 
1003  return 0;
1004 }
1005 
1006 /* Plan is to move initialization in that function and use
1007  * helper function so that radeon_device_init pretty much
1008  * do nothing more than calling asic specific function. This
1009  * should also allow to remove a bunch of callback function
1010  * like vram_info.
1011  */
1012 int rv770_init(struct radeon_device *rdev)
1013 {
1014  int r;
1015 
1016  /* Read BIOS */
1017  if (!radeon_get_bios(rdev)) {
1018  if (ASIC_IS_AVIVO(rdev))
1019  return -EINVAL;
1020  }
1021  /* Must be an ATOMBIOS */
1022  if (!rdev->is_atom_bios) {
1023  dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
1024  return -EINVAL;
1025  }
1026  r = radeon_atombios_init(rdev);
1027  if (r)
1028  return r;
1029  /* Post card if necessary */
1030  if (!radeon_card_posted(rdev)) {
1031  if (!rdev->bios) {
1032  dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1033  return -EINVAL;
1034  }
1035  DRM_INFO("GPU not posted. posting now...\n");
1036  atom_asic_init(rdev->mode_info.atom_context);
1037  }
1038  /* Initialize scratch registers */
1039  r600_scratch_init(rdev);
1040  /* Initialize surface registers */
1041  radeon_surface_init(rdev);
1042  /* Initialize clocks */
1043  radeon_get_clock_info(rdev->ddev);
1044  /* Fence driver */
1045  r = radeon_fence_driver_init(rdev);
1046  if (r)
1047  return r;
1048  /* initialize AGP */
1049  if (rdev->flags & RADEON_IS_AGP) {
1050  r = radeon_agp_init(rdev);
1051  if (r)
1052  radeon_agp_disable(rdev);
1053  }
1054  r = rv770_mc_init(rdev);
1055  if (r)
1056  return r;
1057  /* Memory manager */
1058  r = radeon_bo_init(rdev);
1059  if (r)
1060  return r;
1061 
1062  r = radeon_irq_kms_init(rdev);
1063  if (r)
1064  return r;
1065 
1066  rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
1067  r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
1068 
1069  rdev->ih.ring_obj = NULL;
1070  r600_ih_ring_init(rdev, 64 * 1024);
1071 
1072  r = r600_pcie_gart_init(rdev);
1073  if (r)
1074  return r;
1075 
1076  rdev->accel_working = true;
1077  r = rv770_startup(rdev);
1078  if (r) {
1079  dev_err(rdev->dev, "disabling GPU acceleration\n");
1080  r700_cp_fini(rdev);
1081  r600_irq_fini(rdev);
1082  radeon_wb_fini(rdev);
1083  radeon_ib_pool_fini(rdev);
1084  radeon_irq_kms_fini(rdev);
1085  rv770_pcie_gart_fini(rdev);
1086  rdev->accel_working = false;
1087  }
1088 
1089  return 0;
1090 }
1091 
1092 void rv770_fini(struct radeon_device *rdev)
1093 {
1094  r600_blit_fini(rdev);
1095  r700_cp_fini(rdev);
1096  r600_irq_fini(rdev);
1097  radeon_wb_fini(rdev);
1098  radeon_ib_pool_fini(rdev);
1099  radeon_irq_kms_fini(rdev);
1100  rv770_pcie_gart_fini(rdev);
1101  r600_vram_scratch_fini(rdev);
1102  radeon_gem_fini(rdev);
1104  radeon_agp_fini(rdev);
1105  radeon_bo_fini(rdev);
1106  radeon_atombios_fini(rdev);
1107  kfree(rdev->bios);
1108  rdev->bios = NULL;
1109 }
1110 
1111 static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
1112 {
1113  u32 link_width_cntl, lanes, speed_cntl, tmp;
1114  u16 link_cntl2;
1115  u32 mask;
1116  int ret;
1117 
1118  if (radeon_pcie_gen2 == 0)
1119  return;
1120 
1121  if (rdev->flags & RADEON_IS_IGP)
1122  return;
1123 
1124  if (!(rdev->flags & RADEON_IS_PCIE))
1125  return;
1126 
1127  /* x2 cards have a special sequence */
1128  if (ASIC_IS_X2(rdev))
1129  return;
1130 
1131  ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
1132  if (ret != 0)
1133  return;
1134 
1135  if (!(mask & DRM_PCIE_SPEED_50))
1136  return;
1137 
1138  DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
1139 
1140  /* advertise upconfig capability */
1141  link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
1142  link_width_cntl &= ~LC_UPCONFIGURE_DIS;
1143  WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1144  link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
1145  if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
1146  lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
1147  link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
1149  link_width_cntl |= lanes | LC_RECONFIG_NOW |
1151  WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1152  } else {
1153  link_width_cntl |= LC_UPCONFIGURE_DIS;
1154  WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1155  }
1156 
1157  speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
1158  if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
1159  (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
1160 
1161  tmp = RREG32(0x541c);
1162  WREG32(0x541c, tmp | 0x8);
1164  link_cntl2 = RREG16(0x4088);
1165  link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
1166  link_cntl2 |= 0x2;
1167  WREG16(0x4088, link_cntl2);
1168  WREG32(MM_CFGREGS_CNTL, 0);
1169 
1170  speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
1171  speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
1172  WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
1173 
1174  speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
1175  speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
1176  WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
1177 
1178  speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
1179  speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
1180  WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
1181 
1182  speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
1183  speed_cntl |= LC_GEN2_EN_STRAP;
1184  WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
1185 
1186  } else {
1187  link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
1188  /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
1189  if (1)
1190  link_width_cntl |= LC_UPCONFIGURE_DIS;
1191  else
1192  link_width_cntl &= ~LC_UPCONFIGURE_DIS;
1193  WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1194  }
1195 }