28 #include <linux/slab.h>
32 #include <linux/module.h>
42 #define PFP_UCODE_SIZE 576
43 #define PM4_UCODE_SIZE 1792
44 #define RLC_UCODE_SIZE 768
45 #define R700_PFP_UCODE_SIZE 848
46 #define R700_PM4_UCODE_SIZE 1360
47 #define R700_RLC_UCODE_SIZE 1024
48 #define EVERGREEN_PFP_UCODE_SIZE 1120
49 #define EVERGREEN_PM4_UCODE_SIZE 1376
50 #define EVERGREEN_RLC_UCODE_SIZE 768
51 #define CAYMAN_RLC_UCODE_SIZE 1024
52 #define ARUBA_RLC_UCODE_SIZE 1536
111 int actual_temp = temp & 0xff;
116 return actual_temp * 1000;
123 rdev->
pm.dynpm_can_upclock =
true;
124 rdev->
pm.dynpm_can_downclock =
true;
128 int min_power_state_index = 0;
130 if (rdev->
pm.num_power_states > 2)
131 min_power_state_index = 1;
133 switch (rdev->
pm.dynpm_planned_action) {
135 rdev->
pm.requested_power_state_index = min_power_state_index;
136 rdev->
pm.requested_clock_mode_index = 0;
137 rdev->
pm.dynpm_can_downclock =
false;
140 if (rdev->
pm.current_power_state_index == min_power_state_index) {
141 rdev->
pm.requested_power_state_index = rdev->
pm.current_power_state_index;
142 rdev->
pm.dynpm_can_downclock =
false;
144 if (rdev->
pm.active_crtc_count > 1) {
145 for (i = 0; i < rdev->
pm.num_power_states; i++) {
148 else if (i >= rdev->
pm.current_power_state_index) {
149 rdev->
pm.requested_power_state_index =
150 rdev->
pm.current_power_state_index;
153 rdev->
pm.requested_power_state_index =
i;
158 if (rdev->
pm.current_power_state_index == 0)
159 rdev->
pm.requested_power_state_index =
160 rdev->
pm.num_power_states - 1;
162 rdev->
pm.requested_power_state_index =
163 rdev->
pm.current_power_state_index - 1;
166 rdev->
pm.requested_clock_mode_index = 0;
168 if ((rdev->
pm.active_crtc_count > 0) &&
169 (rdev->
pm.power_state[rdev->
pm.requested_power_state_index].
170 clock_info[rdev->
pm.requested_clock_mode_index].flags &
172 rdev->
pm.requested_power_state_index++;
176 if (rdev->
pm.current_power_state_index == (rdev->
pm.num_power_states - 1)) {
177 rdev->
pm.requested_power_state_index = rdev->
pm.current_power_state_index;
178 rdev->
pm.dynpm_can_upclock =
false;
180 if (rdev->
pm.active_crtc_count > 1) {
181 for (i = (rdev->
pm.num_power_states - 1); i >= 0; i--) {
184 else if (i <= rdev->
pm.current_power_state_index) {
185 rdev->
pm.requested_power_state_index =
186 rdev->
pm.current_power_state_index;
189 rdev->
pm.requested_power_state_index =
i;
194 rdev->
pm.requested_power_state_index =
195 rdev->
pm.current_power_state_index + 1;
197 rdev->
pm.requested_clock_mode_index = 0;
200 rdev->
pm.requested_power_state_index = rdev->
pm.default_power_state_index;
201 rdev->
pm.requested_clock_mode_index = 0;
202 rdev->
pm.dynpm_can_upclock =
false;
206 DRM_ERROR(
"Requested mode for not defined action\n");
213 if (rdev->
pm.active_crtc_count > 1) {
214 rdev->
pm.requested_power_state_index = -1;
216 for (i = 1; i < rdev->
pm.num_power_states; i++) {
221 rdev->
pm.requested_power_state_index =
i;
226 if (rdev->
pm.requested_power_state_index == -1)
227 rdev->
pm.requested_power_state_index = 0;
229 rdev->
pm.requested_power_state_index = 1;
231 switch (rdev->
pm.dynpm_planned_action) {
233 rdev->
pm.requested_clock_mode_index = 0;
234 rdev->
pm.dynpm_can_downclock =
false;
237 if (rdev->
pm.requested_power_state_index == rdev->
pm.current_power_state_index) {
238 if (rdev->
pm.current_clock_mode_index == 0) {
239 rdev->
pm.requested_clock_mode_index = 0;
240 rdev->
pm.dynpm_can_downclock =
false;
242 rdev->
pm.requested_clock_mode_index =
243 rdev->
pm.current_clock_mode_index - 1;
245 rdev->
pm.requested_clock_mode_index = 0;
246 rdev->
pm.dynpm_can_downclock =
false;
249 if ((rdev->
pm.active_crtc_count > 0) &&
250 (rdev->
pm.power_state[rdev->
pm.requested_power_state_index].
251 clock_info[rdev->
pm.requested_clock_mode_index].flags &
253 rdev->
pm.requested_clock_mode_index++;
257 if (rdev->
pm.requested_power_state_index == rdev->
pm.current_power_state_index) {
258 if (rdev->
pm.current_clock_mode_index ==
259 (rdev->
pm.power_state[rdev->
pm.requested_power_state_index].num_clock_modes - 1)) {
260 rdev->
pm.requested_clock_mode_index = rdev->
pm.current_clock_mode_index;
261 rdev->
pm.dynpm_can_upclock =
false;
263 rdev->
pm.requested_clock_mode_index =
264 rdev->
pm.current_clock_mode_index + 1;
266 rdev->
pm.requested_clock_mode_index =
267 rdev->
pm.power_state[rdev->
pm.requested_power_state_index].num_clock_modes - 1;
268 rdev->
pm.dynpm_can_upclock =
false;
272 rdev->
pm.requested_power_state_index = rdev->
pm.default_power_state_index;
273 rdev->
pm.requested_clock_mode_index = 0;
274 rdev->
pm.dynpm_can_upclock =
false;
278 DRM_ERROR(
"Requested mode for not defined action\n");
283 DRM_DEBUG_DRIVER(
"Requested: e: %d m: %d p: %d\n",
284 rdev->
pm.power_state[rdev->
pm.requested_power_state_index].
285 clock_info[rdev->
pm.requested_clock_mode_index].sclk,
286 rdev->
pm.power_state[rdev->
pm.requested_power_state_index].
287 clock_info[rdev->
pm.requested_clock_mode_index].mclk,
288 rdev->
pm.power_state[rdev->
pm.requested_power_state_index].
294 if (rdev->
pm.num_power_states == 2) {
330 }
else if (rdev->
pm.num_power_states == 3) {
447 if (rdev->
pm.num_power_states < 4) {
535 int req_ps_idx = rdev->
pm.requested_power_state_index;
536 int req_cm_idx = rdev->
pm.requested_clock_mode_index;
542 if (voltage->
voltage == 0xff01)
544 if (voltage->
voltage != rdev->
pm.current_vddc) {
546 rdev->
pm.current_vddc = voltage->
voltage;
547 DRM_DEBUG_DRIVER(
"Setting: v: %d\n", voltage->
voltage);
563 bool connected =
false;
730 switch (radeon_connector->
hpd.hpd) {
754 switch (radeon_connector->
hpd.hpd) {
768 enable |= 1 << radeon_connector->
hpd.hpd;
783 switch (radeon_connector->
hpd.hpd) {
807 switch (radeon_connector->
hpd.hpd) {
821 disable |= 1 << radeon_connector->
hpd.hpd;
872 if (rdev->
gart.robj) {
873 WARN(1,
"R600 PCIE GART already initialized\n");
880 rdev->
gart.table_size = rdev->
gart.num_gpu_pages * 8;
890 dev_err(rdev->
dev,
"No VRAM object for PCIE GART.\n");
930 for (i = 1; i < 7; i++)
934 DRM_INFO(
"PCIE GART of %uM enabled (table at 0x%016llX).\n",
935 (
unsigned)(rdev->
mc.gtt_size >> 20),
936 (
unsigned long long)rdev->
gart.table_addr);
937 rdev->
gart.ready =
true;
941 static void r600_pcie_gart_disable(
struct radeon_device *rdev)
947 for (i = 0; i < 7; i++)
977 r600_pcie_gart_disable(rdev);
1011 for (i = 0; i < 7; i++)
1037 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1038 WREG32((0x2c14 + j), 0x00000000);
1039 WREG32((0x2c18 + j), 0x00000000);
1040 WREG32((0x2c1c + j), 0x00000000);
1041 WREG32((0x2c20 + j), 0x00000000);
1042 WREG32((0x2c24 + j), 0x00000000);
1048 dev_warn(rdev->
dev,
"Wait for MC idle timedout !\n");
1054 if (rdev->
mc.vram_start < rdev->
mc.gtt_start) {
1057 rdev->
mc.vram_start >> 12);
1059 rdev->
mc.gtt_end >> 12);
1063 rdev->
mc.gtt_start >> 12);
1065 rdev->
mc.vram_end >> 12);
1072 tmp = ((rdev->
mc.vram_end >> 24) & 0xFFFF) << 16;
1073 tmp |= ((rdev->
mc.vram_start >> 24) & 0xFFFF);
1088 dev_warn(rdev->
dev,
"Wait for MC idle timedout !\n");
1119 u64 size_bf, size_af;
1129 size_af = 0xFFFFFFFF - mc->
gtt_end;
1130 if (size_bf > size_af) {
1146 dev_info(rdev->
dev,
"VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1156 rdev->
mc.gtt_base_align = 0;
1164 int chansize, numchan;
1167 rdev->
mc.vram_is_ddr =
true;
1192 rdev->
mc.vram_width = numchan * chansize;
1199 rdev->
mc.visible_vram_size = rdev->
mc.aper_size;
1200 r600_vram_gtt_location(rdev, &rdev->
mc);
1286 dev_info(rdev->
dev,
" R_008010_GRBM_STATUS=0x%08X\n",
1288 dev_info(rdev->
dev,
" R_008014_GRBM_STATUS2=0x%08X\n",
1290 dev_info(rdev->
dev,
" R_000E50_SRBM_STATUS=0x%08X\n",
1292 dev_info(rdev->
dev,
" R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1294 dev_info(rdev->
dev,
" R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1296 dev_info(rdev->
dev,
" R_00867C_CP_BUSY_STAT = 0x%08X\n",
1298 dev_info(rdev->
dev,
" R_008680_CP_STAT = 0x%08X\n",
1302 dev_warn(rdev->
dev,
"Wait for MC idle timedout !\n");
1322 dev_info(rdev->
dev,
" R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1330 dev_info(rdev->
dev,
"R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1337 dev_info(rdev->
dev,
" R_008010_GRBM_STATUS=0x%08X\n",
1339 dev_info(rdev->
dev,
" R_008014_GRBM_STATUS2=0x%08X\n",
1341 dev_info(rdev->
dev,
" R_000E50_SRBM_STATUS=0x%08X\n",
1343 dev_info(rdev->
dev,
" R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1345 dev_info(rdev->
dev,
" R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1347 dev_info(rdev->
dev,
" R_00867C_CP_BUSY_STAT = 0x%08X\n",
1349 dev_info(rdev->
dev,
" R_008680_CP_STAT = 0x%08X\n",
1375 return r600_gpu_soft_reset(rdev);
1379 u32 tiling_pipe_num,
1381 u32 total_max_rb_num,
1382 u32 disabled_rb_mask)
1384 u32 rendering_pipe_num, rb_num_width, req_rb_num;
1385 u32 pipe_rb_ratio, pipe_rb_remain;
1390 disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
1392 rendering_pipe_num = 1 << tiling_pipe_num;
1394 BUG_ON(rendering_pipe_num < req_rb_num);
1396 pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1397 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1407 for (i = 0; i < max_rb_num; i++) {
1408 if (!(
mask & disabled_rb_mask)) {
1409 for (j = 0; j < pipe_rb_ratio; j++) {
1410 data <<= rb_num_width;
1411 data |= max_rb_num - i - 1;
1413 if (pipe_rb_remain) {
1414 data <<= rb_num_width;
1415 data |= max_rb_num - i - 1;
1429 for (i = 0; i < 32; i++) {
1440 u32 cc_rb_backend_disable;
1441 u32 cc_gc_shader_pipe_config;
1445 u32 sq_gpr_resource_mgmt_1 = 0;
1446 u32 sq_gpr_resource_mgmt_2 = 0;
1447 u32 sq_thread_resource_mgmt = 0;
1448 u32 sq_stack_resource_mgmt_1 = 0;
1449 u32 sq_stack_resource_mgmt_2 = 0;
1450 u32 disabled_rb_mask;
1465 rdev->
config.
r600.sx_max_export_pos_size = 16;
1466 rdev->
config.
r600.sx_max_export_smx_size = 128;
1481 rdev->
config.
r600.sx_max_export_pos_size = 16;
1482 rdev->
config.
r600.sx_max_export_smx_size = 128;
1499 rdev->
config.
r600.sx_max_export_pos_size = 16;
1500 rdev->
config.
r600.sx_max_export_smx_size = 128;
1514 rdev->
config.
r600.sx_max_export_pos_size = 16;
1515 rdev->
config.
r600.sx_max_export_smx_size = 128;
1523 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1524 WREG32((0x2c14 + j), 0x00000000);
1525 WREG32((0x2c18 + j), 0x00000000);
1526 WREG32((0x2c1c + j), 0x00000000);
1527 WREG32((0x2c20 + j), 0x00000000);
1528 WREG32((0x2c24 + j), 0x00000000);
1570 if (tmp < rdev->
config.r600.max_backends) {
1577 if (tmp < rdev->
config.r600.max_pipes) {
1582 if (tmp < rdev->
config.r600.max_simds) {
1590 tiling_config |= tmp << 16;
1593 rdev->
config.
r600.tile_config = tiling_config;
1879 const char *chip_name;
1880 const char *rlc_chip_name;
1881 size_t pfp_req_size, me_req_size, rlc_req_size;
1887 pdev = platform_device_register_simple(
"radeon_cp", 0,
NULL, 0);
1897 rlc_chip_name =
"R600";
1900 chip_name =
"RV610";
1901 rlc_chip_name =
"R600";
1904 chip_name =
"RV630";
1905 rlc_chip_name =
"R600";
1908 chip_name =
"RV620";
1909 rlc_chip_name =
"R600";
1912 chip_name =
"RV635";
1913 rlc_chip_name =
"R600";
1916 chip_name =
"RV670";
1917 rlc_chip_name =
"R600";
1921 chip_name =
"RS780";
1922 rlc_chip_name =
"R600";
1925 chip_name =
"RV770";
1926 rlc_chip_name =
"R700";
1930 chip_name =
"RV730";
1931 rlc_chip_name =
"R700";
1934 chip_name =
"RV710";
1935 rlc_chip_name =
"R700";
1938 chip_name =
"CEDAR";
1939 rlc_chip_name =
"CEDAR";
1942 chip_name =
"REDWOOD";
1943 rlc_chip_name =
"REDWOOD";
1946 chip_name =
"JUNIPER";
1947 rlc_chip_name =
"JUNIPER";
1951 chip_name =
"CYPRESS";
1952 rlc_chip_name =
"CYPRESS";
1956 rlc_chip_name =
"SUMO";
1960 rlc_chip_name =
"SUMO";
1963 chip_name =
"SUMO2";
1964 rlc_chip_name =
"SUMO";
1983 DRM_INFO(
"Loading %s Microcode\n", chip_name);
1985 snprintf(fw_name,
sizeof(fw_name),
"radeon/%s_pfp.bin", chip_name);
1989 if (rdev->
pfp_fw->size != pfp_req_size) {
1991 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1992 rdev->
pfp_fw->size, fw_name);
1997 snprintf(fw_name,
sizeof(fw_name),
"radeon/%s_me.bin", chip_name);
2001 if (rdev->
me_fw->size != me_req_size) {
2003 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2004 rdev->
me_fw->size, fw_name);
2008 snprintf(fw_name,
sizeof(fw_name),
"radeon/%s_rlc.bin", rlc_chip_name);
2012 if (rdev->
rlc_fw->size != rlc_req_size) {
2014 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2015 rdev->
rlc_fw->size, fw_name);
2025 "r600_cp: Failed to load firmware \"%s\"\n",
2037 static int r600_cp_load_microcode(
struct radeon_device *rdev)
2087 DRM_ERROR(
"radeon: cp failed to lock ring (%d).\n", r);
2146 if (rdev->
wb.enabled)
2165 ring->
ready =
false;
2178 ring_size = (1 << (rb_bufsz + 1)) * 4;
2185 DRM_ERROR(
"failed to get scratch reg for rptr save (%d).\n", r);
2209 for (i = 0; i < rdev->
scratch.num_reg; i++) {
2224 DRM_ERROR(
"radeon: cp failed to get scratch reg (%d).\n", r);
2227 WREG32(scratch, 0xCAFEDEAD);
2230 DRM_ERROR(
"radeon: cp failed to lock ring %d (%d).\n", ring->
idx, r);
2240 if (tmp == 0xDEADBEEF)
2244 if (i < rdev->usec_timeout) {
2245 DRM_INFO(
"ring test on %d succeeded in %d usecs\n", ring->
idx, i);
2247 DRM_ERROR(
"radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2248 ring->
idx, scratch, tmp);
2260 if (rdev->
wb.use_event) {
2321 unsigned num_gpu_pages,
2356 r600_pcie_gen2_enable(rdev);
2361 DRM_ERROR(
"Failed to load firmware!\n");
2370 r600_mc_program(rdev);
2372 r600_agp_enable(rdev);
2374 r = r600_pcie_gart_enable(rdev);
2378 r600_gpu_init(rdev);
2383 dev_warn(rdev->
dev,
"failed blitter (%d) falling back to memcpy\n", r);
2393 dev_err(rdev->
dev,
"failed initializing CP fences (%d).\n", r);
2400 DRM_ERROR(
"radeon: IH init failed (%d).\n", r);
2412 r = r600_cp_load_microcode(rdev);
2421 dev_err(rdev->
dev,
"IB initialization failed (%d).\n", r);
2427 DRM_ERROR(
"radeon: audio init failed\n");
2439 if (state ==
false) {
2460 r = r600_startup(rdev);
2462 DRM_ERROR(
"r600 startup failed on resume\n");
2477 r600_pcie_gart_disable(rdev);
2493 DRM_ERROR(
"Failed to register debugfs file for mc !\n");
2502 dev_err(rdev->
dev,
"Expecting atombios for R600 GPU\n");
2511 dev_err(rdev->
dev,
"Card not posted and no BIOS - ignoring\n");
2514 DRM_INFO(
"GPU not posted. posting now...\n");
2532 r = r600_mc_init(rdev);
2547 rdev->
ih.ring_obj =
NULL;
2555 r = r600_startup(rdev);
2557 dev_err(rdev->
dev,
"disabling GPU acceleration\n");
2563 r600_pcie_gart_fini(rdev);
2579 r600_pcie_gart_fini(rdev);
2600 next_rptr = ring->
wptr + 3 + 4;
2605 }
else if (rdev->
wb.enabled) {
2606 next_rptr = ring->
wptr + 5 + 4;
2634 DRM_ERROR(
"radeon: failed to get scratch reg (%d).\n", r);
2637 WREG32(scratch, 0xCAFEDEAD);
2640 DRM_ERROR(
"radeon: failed to get ib (%d).\n", r);
2645 ib.
ptr[2] = 0xDEADBEEF;
2649 DRM_ERROR(
"radeon: failed to schedule ib (%d).\n", r);
2654 DRM_ERROR(
"radeon: fence wait failed (%d).\n", r);
2659 if (tmp == 0xDEADBEEF)
2663 if (i < rdev->usec_timeout) {
2664 DRM_INFO(
"ib test on ring %d succeeded in %u usecs\n", ib.
fence->ring, i);
2666 DRM_ERROR(
"radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2694 ring_size = (1 << rb_bufsz) * 4;
2696 rdev->
ih.ptr_mask = rdev->
ih.ring_size - 1;
2705 if (rdev->
ih.ring_obj ==
NULL) {
2709 NULL, &rdev->
ih.ring_obj);
2711 DRM_ERROR(
"radeon: failed to create ih ring buffer (%d).\n", r);
2719 &rdev->
ih.gpu_addr);
2721 radeon_bo_unreserve(rdev->
ih.ring_obj);
2722 DRM_ERROR(
"radeon: failed to pin ih ring buffer (%d).\n", r);
2726 (
void **)&rdev->
ih.ring);
2727 radeon_bo_unreserve(rdev->
ih.ring_obj);
2729 DRM_ERROR(
"radeon: failed to map ih ring buffer (%d).\n", r);
2739 if (rdev->
ih.ring_obj) {
2744 radeon_bo_unreserve(rdev->
ih.ring_obj);
2748 rdev->
ih.ring_obj =
NULL;
2830 r600_rlc_start(rdev);
2835 static void r600_enable_interrupts(
struct radeon_device *rdev)
2844 rdev->
ih.enabled =
true;
2859 rdev->
ih.enabled =
false;
2863 static void r600_disable_interrupt_state(
struct radeon_device *rdev)
2918 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2929 ret = r600_rlc_init(rdev);
2954 if (rdev->
wb.enabled)
2978 r600_disable_interrupt_state(rdev);
2984 r600_enable_interrupts(rdev);
3005 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3006 u32 grbm_int_cntl = 0;
3008 u32 d1grph = 0, d2grph = 0;
3010 if (!rdev->
irq.installed) {
3011 WARN(1,
"Can't enable IRQ/MSI because no handler is installed\n");
3015 if (!rdev->
ih.enabled) {
3018 r600_disable_interrupt_state(rdev);
3045 DRM_DEBUG(
"r600_irq_set: sw int\n");
3049 if (rdev->
irq.crtc_vblank_int[0] ||
3051 DRM_DEBUG(
"r600_irq_set: vblank 0\n");
3054 if (rdev->
irq.crtc_vblank_int[1] ||
3056 DRM_DEBUG(
"r600_irq_set: vblank 1\n");
3059 if (rdev->
irq.hpd[0]) {
3060 DRM_DEBUG(
"r600_irq_set: hpd 1\n");
3063 if (rdev->
irq.hpd[1]) {
3064 DRM_DEBUG(
"r600_irq_set: hpd 2\n");
3067 if (rdev->
irq.hpd[2]) {
3068 DRM_DEBUG(
"r600_irq_set: hpd 3\n");
3071 if (rdev->
irq.hpd[3]) {
3072 DRM_DEBUG(
"r600_irq_set: hpd 4\n");
3075 if (rdev->
irq.hpd[4]) {
3076 DRM_DEBUG(
"r600_irq_set: hpd 5\n");
3079 if (rdev->
irq.hpd[5]) {
3080 DRM_DEBUG(
"r600_irq_set: hpd 6\n");
3083 if (rdev->
irq.afmt[0]) {
3084 DRM_DEBUG(
"r600_irq_set: hdmi 0\n");
3087 if (rdev->
irq.afmt[1]) {
3088 DRM_DEBUG(
"r600_irq_set: hdmi 0\n");
3140 rdev->
irq.stat_regs.r600.disp_int_cont2 = 0;
3244 r600_disable_interrupt_state(rdev);
3251 if (rdev->
wb.enabled)
3261 dev_warn(rdev->
dev,
"IH ring buffer overflow (0x%08X, %d, %d)\n",
3262 wptr, rdev->
ih.rptr, (wptr + 16) + rdev->
ih.ptr_mask);
3263 rdev->
ih.rptr = (wptr + 16) & rdev->
ih.ptr_mask;
3268 return (wptr & rdev->
ih.ptr_mask);
3305 u32 src_id, src_data;
3307 bool queue_hotplug =
false;
3308 bool queue_hdmi =
false;
3317 wptr = r600_get_ih_wptr(rdev);
3324 rptr = rdev->
ih.rptr;
3325 DRM_DEBUG(
"r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3333 while (rptr != wptr) {
3335 ring_index = rptr / 4;
3337 src_data =
le32_to_cpu(rdev->
ih.ring[ring_index + 1]) & 0xfffffff;
3344 if (rdev->
irq.crtc_vblank_int[0]) {
3346 rdev->
pm.vblank_sync =
true;
3352 DRM_DEBUG(
"IH: D1 vblank\n");
3358 DRM_DEBUG(
"IH: D1 vline\n");
3362 DRM_DEBUG(
"Unhandled interrupt: %d %d\n", src_id, src_data);
3370 if (rdev->
irq.crtc_vblank_int[1]) {
3372 rdev->
pm.vblank_sync =
true;
3378 DRM_DEBUG(
"IH: D2 vblank\n");
3384 DRM_DEBUG(
"IH: D2 vline\n");
3388 DRM_DEBUG(
"Unhandled interrupt: %d %d\n", src_id, src_data);
3397 queue_hotplug =
true;
3398 DRM_DEBUG(
"IH: HPD1\n");
3404 queue_hotplug =
true;
3405 DRM_DEBUG(
"IH: HPD2\n");
3411 queue_hotplug =
true;
3412 DRM_DEBUG(
"IH: HPD3\n");
3418 queue_hotplug =
true;
3419 DRM_DEBUG(
"IH: HPD4\n");
3425 queue_hotplug =
true;
3426 DRM_DEBUG(
"IH: HPD5\n");
3432 queue_hotplug =
true;
3433 DRM_DEBUG(
"IH: HPD6\n");
3437 DRM_DEBUG(
"Unhandled interrupt: %d %d\n", src_id, src_data);
3447 DRM_DEBUG(
"IH: HDMI0\n");
3454 DRM_DEBUG(
"IH: HDMI1\n");
3458 DRM_ERROR(
"Unhandled interrupt: %d %d\n", src_id, src_data);
3465 DRM_DEBUG(
"IH: CP int: 0x%08x\n", src_data);
3469 DRM_DEBUG(
"IH: CP EOP\n");
3473 DRM_DEBUG(
"IH: GUI idle\n");
3476 DRM_DEBUG(
"Unhandled interrupt: %d %d\n", src_id, src_data);
3482 rptr &= rdev->
ih.ptr_mask;
3493 wptr = r600_get_ih_wptr(rdev);
3503 #if defined(CONFIG_DEBUG_FS)
3505 static int r600_debugfs_mc_info(
struct seq_file *
m,
void *
data)
3507 struct drm_info_node *
node = (
struct drm_info_node *) m->
private;
3516 static struct drm_info_list r600_mc_info_list[] = {
3517 {
"r600_mc_info", r600_debugfs_mc_info, 0,
NULL},
3523 #if defined(CONFIG_DEBUG_FS)
3560 u32 link_width_cntl,
mask, target_reg;
3612 link_width_cntl |=
mask;
3634 link_width_cntl =
RREG32(target_reg);
3635 while (link_width_cntl == 0xffffffff)
3636 link_width_cntl =
RREG32(target_reg);
3642 u32 link_width_cntl;
3675 static void r600_pcie_gen2_enable(
struct radeon_device *rdev)
3677 u32 link_width_cntl,
lanes, speed_cntl, training_cntl,
tmp;
3703 if (!(mask & DRM_PCIE_SPEED_50))
3708 DRM_INFO(
"PCIE gen 2 link speeds already enabled\n");
3712 DRM_INFO(
"enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
3744 link_cntl2 =
RREG32(0x4088);
3759 WREG32(0x541c, tmp | 0x8);
3761 link_cntl2 =
RREG16(0x4088);
3764 WREG16(0x4088, link_cntl2);