Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
r600.c
Go to the documentation of this file.
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  * Alex Deucher
26  * Jerome Glisse
27  */
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
32 #include <linux/module.h>
33 #include <drm/drmP.h>
34 #include <drm/radeon_drm.h>
35 #include "radeon.h"
36 #include "radeon_asic.h"
37 #include "radeon_mode.h"
38 #include "r600d.h"
39 #include "atom.h"
40 #include "avivod.h"
41 
42 #define PFP_UCODE_SIZE 576
43 #define PM4_UCODE_SIZE 1792
44 #define RLC_UCODE_SIZE 768
45 #define R700_PFP_UCODE_SIZE 848
46 #define R700_PM4_UCODE_SIZE 1360
47 #define R700_RLC_UCODE_SIZE 1024
48 #define EVERGREEN_PFP_UCODE_SIZE 1120
49 #define EVERGREEN_PM4_UCODE_SIZE 1376
50 #define EVERGREEN_RLC_UCODE_SIZE 768
51 #define CAYMAN_RLC_UCODE_SIZE 1024
52 #define ARUBA_RLC_UCODE_SIZE 1536
53 
54 /* Firmware Names */
55 MODULE_FIRMWARE("radeon/R600_pfp.bin");
56 MODULE_FIRMWARE("radeon/R600_me.bin");
57 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
58 MODULE_FIRMWARE("radeon/RV610_me.bin");
59 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
60 MODULE_FIRMWARE("radeon/RV630_me.bin");
61 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
62 MODULE_FIRMWARE("radeon/RV620_me.bin");
63 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
64 MODULE_FIRMWARE("radeon/RV635_me.bin");
65 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
66 MODULE_FIRMWARE("radeon/RV670_me.bin");
67 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
68 MODULE_FIRMWARE("radeon/RS780_me.bin");
69 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
70 MODULE_FIRMWARE("radeon/RV770_me.bin");
71 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
72 MODULE_FIRMWARE("radeon/RV730_me.bin");
73 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
74 MODULE_FIRMWARE("radeon/RV710_me.bin");
75 MODULE_FIRMWARE("radeon/R600_rlc.bin");
76 MODULE_FIRMWARE("radeon/R700_rlc.bin");
77 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
78 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
79 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
80 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
81 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
82 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
83 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
84 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
85 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
86 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
87 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
88 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
89 MODULE_FIRMWARE("radeon/PALM_pfp.bin");
90 MODULE_FIRMWARE("radeon/PALM_me.bin");
91 MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
92 MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
93 MODULE_FIRMWARE("radeon/SUMO_me.bin");
94 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
95 MODULE_FIRMWARE("radeon/SUMO2_me.bin");
96 
98 
99 /* r600,rv610,rv630,rv620,rv635,rv670 */
101 static void r600_gpu_init(struct radeon_device *rdev);
102 void r600_fini(struct radeon_device *rdev);
103 void r600_irq_disable(struct radeon_device *rdev);
104 static void r600_pcie_gen2_enable(struct radeon_device *rdev);
105 
106 /* get temperature in millidegrees */
108 {
110  ASIC_T_SHIFT;
111  int actual_temp = temp & 0xff;
112 
113  if (temp & 0x100)
114  actual_temp -= 256;
115 
116  return actual_temp * 1000;
117 }
118 
120 {
121  int i;
122 
123  rdev->pm.dynpm_can_upclock = true;
124  rdev->pm.dynpm_can_downclock = true;
125 
126  /* power state array is low to high, default is first */
127  if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
128  int min_power_state_index = 0;
129 
130  if (rdev->pm.num_power_states > 2)
131  min_power_state_index = 1;
132 
133  switch (rdev->pm.dynpm_planned_action) {
135  rdev->pm.requested_power_state_index = min_power_state_index;
136  rdev->pm.requested_clock_mode_index = 0;
137  rdev->pm.dynpm_can_downclock = false;
138  break;
140  if (rdev->pm.current_power_state_index == min_power_state_index) {
141  rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
142  rdev->pm.dynpm_can_downclock = false;
143  } else {
144  if (rdev->pm.active_crtc_count > 1) {
145  for (i = 0; i < rdev->pm.num_power_states; i++) {
146  if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
147  continue;
148  else if (i >= rdev->pm.current_power_state_index) {
149  rdev->pm.requested_power_state_index =
150  rdev->pm.current_power_state_index;
151  break;
152  } else {
153  rdev->pm.requested_power_state_index = i;
154  break;
155  }
156  }
157  } else {
158  if (rdev->pm.current_power_state_index == 0)
159  rdev->pm.requested_power_state_index =
160  rdev->pm.num_power_states - 1;
161  else
162  rdev->pm.requested_power_state_index =
163  rdev->pm.current_power_state_index - 1;
164  }
165  }
166  rdev->pm.requested_clock_mode_index = 0;
167  /* don't use the power state if crtcs are active and no display flag is set */
168  if ((rdev->pm.active_crtc_count > 0) &&
169  (rdev->pm.power_state[rdev->pm.requested_power_state_index].
170  clock_info[rdev->pm.requested_clock_mode_index].flags &
172  rdev->pm.requested_power_state_index++;
173  }
174  break;
176  if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
177  rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
178  rdev->pm.dynpm_can_upclock = false;
179  } else {
180  if (rdev->pm.active_crtc_count > 1) {
181  for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
182  if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
183  continue;
184  else if (i <= rdev->pm.current_power_state_index) {
185  rdev->pm.requested_power_state_index =
186  rdev->pm.current_power_state_index;
187  break;
188  } else {
189  rdev->pm.requested_power_state_index = i;
190  break;
191  }
192  }
193  } else
194  rdev->pm.requested_power_state_index =
195  rdev->pm.current_power_state_index + 1;
196  }
197  rdev->pm.requested_clock_mode_index = 0;
198  break;
200  rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
201  rdev->pm.requested_clock_mode_index = 0;
202  rdev->pm.dynpm_can_upclock = false;
203  break;
204  case DYNPM_ACTION_NONE:
205  default:
206  DRM_ERROR("Requested mode for not defined action\n");
207  return;
208  }
209  } else {
210  /* XXX select a power state based on AC/DC, single/dualhead, etc. */
211  /* for now just select the first power state and switch between clock modes */
212  /* power state array is low to high, default is first (0) */
213  if (rdev->pm.active_crtc_count > 1) {
214  rdev->pm.requested_power_state_index = -1;
215  /* start at 1 as we don't want the default mode */
216  for (i = 1; i < rdev->pm.num_power_states; i++) {
217  if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
218  continue;
219  else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
220  (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
221  rdev->pm.requested_power_state_index = i;
222  break;
223  }
224  }
225  /* if nothing selected, grab the default state. */
226  if (rdev->pm.requested_power_state_index == -1)
227  rdev->pm.requested_power_state_index = 0;
228  } else
229  rdev->pm.requested_power_state_index = 1;
230 
231  switch (rdev->pm.dynpm_planned_action) {
233  rdev->pm.requested_clock_mode_index = 0;
234  rdev->pm.dynpm_can_downclock = false;
235  break;
237  if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
238  if (rdev->pm.current_clock_mode_index == 0) {
239  rdev->pm.requested_clock_mode_index = 0;
240  rdev->pm.dynpm_can_downclock = false;
241  } else
242  rdev->pm.requested_clock_mode_index =
243  rdev->pm.current_clock_mode_index - 1;
244  } else {
245  rdev->pm.requested_clock_mode_index = 0;
246  rdev->pm.dynpm_can_downclock = false;
247  }
248  /* don't use the power state if crtcs are active and no display flag is set */
249  if ((rdev->pm.active_crtc_count > 0) &&
250  (rdev->pm.power_state[rdev->pm.requested_power_state_index].
251  clock_info[rdev->pm.requested_clock_mode_index].flags &
253  rdev->pm.requested_clock_mode_index++;
254  }
255  break;
257  if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
258  if (rdev->pm.current_clock_mode_index ==
259  (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
260  rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
261  rdev->pm.dynpm_can_upclock = false;
262  } else
263  rdev->pm.requested_clock_mode_index =
264  rdev->pm.current_clock_mode_index + 1;
265  } else {
266  rdev->pm.requested_clock_mode_index =
267  rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
268  rdev->pm.dynpm_can_upclock = false;
269  }
270  break;
272  rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
273  rdev->pm.requested_clock_mode_index = 0;
274  rdev->pm.dynpm_can_upclock = false;
275  break;
276  case DYNPM_ACTION_NONE:
277  default:
278  DRM_ERROR("Requested mode for not defined action\n");
279  return;
280  }
281  }
282 
283  DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
284  rdev->pm.power_state[rdev->pm.requested_power_state_index].
285  clock_info[rdev->pm.requested_clock_mode_index].sclk,
286  rdev->pm.power_state[rdev->pm.requested_power_state_index].
287  clock_info[rdev->pm.requested_clock_mode_index].mclk,
288  rdev->pm.power_state[rdev->pm.requested_power_state_index].
289  pcie_lanes);
290 }
291 
293 {
294  if (rdev->pm.num_power_states == 2) {
295  /* default */
296  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
297  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
298  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
299  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
300  /* low sh */
301  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
302  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
303  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
304  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
305  /* mid sh */
306  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
307  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
308  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
309  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
310  /* high sh */
311  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
312  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
313  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
314  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
315  /* low mh */
316  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
317  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
318  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
319  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
320  /* mid mh */
321  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
322  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
323  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
324  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
325  /* high mh */
326  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
327  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
328  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
329  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
330  } else if (rdev->pm.num_power_states == 3) {
331  /* default */
332  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
333  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
334  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
335  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
336  /* low sh */
337  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
338  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
339  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
340  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
341  /* mid sh */
342  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
343  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
344  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
345  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
346  /* high sh */
347  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
348  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
349  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
350  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
351  /* low mh */
352  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
353  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
354  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
355  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
356  /* mid mh */
357  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
358  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
359  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
360  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
361  /* high mh */
362  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
363  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
364  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
365  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
366  } else {
367  /* default */
368  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
369  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
370  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
371  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
372  /* low sh */
373  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
374  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
375  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
376  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
377  /* mid sh */
378  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
379  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
380  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
381  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
382  /* high sh */
383  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
384  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
385  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
386  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
387  /* low mh */
388  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
389  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
390  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
391  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
392  /* mid mh */
393  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
394  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
395  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
396  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
397  /* high mh */
398  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
399  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
400  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
401  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
402  }
403 }
404 
406 {
407  int idx;
408 
409  if (rdev->family == CHIP_R600) {
410  /* XXX */
411  /* default */
412  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
413  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
414  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
415  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
416  /* low sh */
417  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
418  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
419  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
420  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
421  /* mid sh */
422  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
423  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
424  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
425  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
426  /* high sh */
427  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
428  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
429  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
430  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
431  /* low mh */
432  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
433  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
434  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
435  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
436  /* mid mh */
437  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
438  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
439  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
440  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
441  /* high mh */
442  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
443  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
444  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
445  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
446  } else {
447  if (rdev->pm.num_power_states < 4) {
448  /* default */
449  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
450  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
451  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
452  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
453  /* low sh */
454  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
455  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
456  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
457  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
458  /* mid sh */
459  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
460  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
461  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
462  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
463  /* high sh */
464  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
465  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
466  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
467  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
468  /* low mh */
469  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
470  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
471  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
472  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
473  /* low mh */
474  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
475  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
476  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
477  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
478  /* high mh */
479  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
480  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
481  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
482  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
483  } else {
484  /* default */
485  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
486  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
487  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
488  rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
489  /* low sh */
490  if (rdev->flags & RADEON_IS_MOBILITY)
492  else
494  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
495  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
496  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
497  rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
498  /* mid sh */
499  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
500  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
501  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
502  rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
503  /* high sh */
505  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
506  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
507  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
508  rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
509  /* low mh */
510  if (rdev->flags & RADEON_IS_MOBILITY)
512  else
514  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
515  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
516  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
517  rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
518  /* mid mh */
519  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
520  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
521  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
522  rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
523  /* high mh */
525  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
526  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
527  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
528  rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
529  }
530  }
531 }
532 
534 {
535  int req_ps_idx = rdev->pm.requested_power_state_index;
536  int req_cm_idx = rdev->pm.requested_clock_mode_index;
537  struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
538  struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
539 
540  if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
541  /* 0xff01 is a flag rather then an actual voltage */
542  if (voltage->voltage == 0xff01)
543  return;
544  if (voltage->voltage != rdev->pm.current_vddc) {
546  rdev->pm.current_vddc = voltage->voltage;
547  DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
548  }
549  }
550 }
551 
553 {
555  return false;
556  else
557  return true;
558 }
559 
560 /* hpd for digital panel detect/disconnect */
562 {
563  bool connected = false;
564 
565  if (ASIC_IS_DCE3(rdev)) {
566  switch (hpd) {
567  case RADEON_HPD_1:
569  connected = true;
570  break;
571  case RADEON_HPD_2:
572  if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
573  connected = true;
574  break;
575  case RADEON_HPD_3:
576  if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
577  connected = true;
578  break;
579  case RADEON_HPD_4:
580  if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
581  connected = true;
582  break;
583  /* DCE 3.2 */
584  case RADEON_HPD_5:
585  if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
586  connected = true;
587  break;
588  case RADEON_HPD_6:
589  if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
590  connected = true;
591  break;
592  default:
593  break;
594  }
595  } else {
596  switch (hpd) {
597  case RADEON_HPD_1:
599  connected = true;
600  break;
601  case RADEON_HPD_2:
602  if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
603  connected = true;
604  break;
605  case RADEON_HPD_3:
606  if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
607  connected = true;
608  break;
609  default:
610  break;
611  }
612  }
613  return connected;
614 }
615 
617  enum radeon_hpd_id hpd)
618 {
619  u32 tmp;
620  bool connected = r600_hpd_sense(rdev, hpd);
621 
622  if (ASIC_IS_DCE3(rdev)) {
623  switch (hpd) {
624  case RADEON_HPD_1:
626  if (connected)
627  tmp &= ~DC_HPDx_INT_POLARITY;
628  else
629  tmp |= DC_HPDx_INT_POLARITY;
631  break;
632  case RADEON_HPD_2:
634  if (connected)
635  tmp &= ~DC_HPDx_INT_POLARITY;
636  else
637  tmp |= DC_HPDx_INT_POLARITY;
639  break;
640  case RADEON_HPD_3:
642  if (connected)
643  tmp &= ~DC_HPDx_INT_POLARITY;
644  else
645  tmp |= DC_HPDx_INT_POLARITY;
647  break;
648  case RADEON_HPD_4:
650  if (connected)
651  tmp &= ~DC_HPDx_INT_POLARITY;
652  else
653  tmp |= DC_HPDx_INT_POLARITY;
655  break;
656  case RADEON_HPD_5:
658  if (connected)
659  tmp &= ~DC_HPDx_INT_POLARITY;
660  else
661  tmp |= DC_HPDx_INT_POLARITY;
663  break;
664  /* DCE 3.2 */
665  case RADEON_HPD_6:
667  if (connected)
668  tmp &= ~DC_HPDx_INT_POLARITY;
669  else
670  tmp |= DC_HPDx_INT_POLARITY;
672  break;
673  default:
674  break;
675  }
676  } else {
677  switch (hpd) {
678  case RADEON_HPD_1:
680  if (connected)
682  else
685  break;
686  case RADEON_HPD_2:
688  if (connected)
690  else
693  break;
694  case RADEON_HPD_3:
696  if (connected)
698  else
701  break;
702  default:
703  break;
704  }
705  }
706 }
707 
709 {
710  struct drm_device *dev = rdev->ddev;
711  struct drm_connector *connector;
712  unsigned enable = 0;
713 
714  list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
716 
717  if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
718  connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
719  /* don't try to enable hpd on eDP or LVDS avoid breaking the
720  * aux dp channel on imac and help (but not completely fix)
721  * https://bugzilla.redhat.com/show_bug.cgi?id=726143
722  */
723  continue;
724  }
725  if (ASIC_IS_DCE3(rdev)) {
727  if (ASIC_IS_DCE32(rdev))
728  tmp |= DC_HPDx_EN;
729 
730  switch (radeon_connector->hpd.hpd) {
731  case RADEON_HPD_1:
732  WREG32(DC_HPD1_CONTROL, tmp);
733  break;
734  case RADEON_HPD_2:
735  WREG32(DC_HPD2_CONTROL, tmp);
736  break;
737  case RADEON_HPD_3:
738  WREG32(DC_HPD3_CONTROL, tmp);
739  break;
740  case RADEON_HPD_4:
741  WREG32(DC_HPD4_CONTROL, tmp);
742  break;
743  /* DCE 3.2 */
744  case RADEON_HPD_5:
745  WREG32(DC_HPD5_CONTROL, tmp);
746  break;
747  case RADEON_HPD_6:
748  WREG32(DC_HPD6_CONTROL, tmp);
749  break;
750  default:
751  break;
752  }
753  } else {
754  switch (radeon_connector->hpd.hpd) {
755  case RADEON_HPD_1:
757  break;
758  case RADEON_HPD_2:
760  break;
761  case RADEON_HPD_3:
763  break;
764  default:
765  break;
766  }
767  }
768  enable |= 1 << radeon_connector->hpd.hpd;
769  radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
770  }
771  radeon_irq_kms_enable_hpd(rdev, enable);
772 }
773 
775 {
776  struct drm_device *dev = rdev->ddev;
777  struct drm_connector *connector;
778  unsigned disable = 0;
779 
780  list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
782  if (ASIC_IS_DCE3(rdev)) {
783  switch (radeon_connector->hpd.hpd) {
784  case RADEON_HPD_1:
786  break;
787  case RADEON_HPD_2:
789  break;
790  case RADEON_HPD_3:
792  break;
793  case RADEON_HPD_4:
795  break;
796  /* DCE 3.2 */
797  case RADEON_HPD_5:
799  break;
800  case RADEON_HPD_6:
802  break;
803  default:
804  break;
805  }
806  } else {
807  switch (radeon_connector->hpd.hpd) {
808  case RADEON_HPD_1:
810  break;
811  case RADEON_HPD_2:
813  break;
814  case RADEON_HPD_3:
816  break;
817  default:
818  break;
819  }
820  }
821  disable |= 1 << radeon_connector->hpd.hpd;
822  }
823  radeon_irq_kms_disable_hpd(rdev, disable);
824 }
825 
826 /*
827  * R600 PCIE GART
828  */
830 {
831  unsigned i;
832  u32 tmp;
833 
834  /* flush hdp cache so updates hit vram */
835  if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
836  !(rdev->flags & RADEON_IS_AGP)) {
837  void __iomem *ptr = (void *)rdev->gart.ptr;
838  u32 tmp;
839 
840  /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
841  * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
842  * This seems to cause problems on some AGP cards. Just use the old
843  * method for them.
844  */
845  WREG32(HDP_DEBUG1, 0);
846  tmp = readl((void __iomem *)ptr);
847  } else
849 
850  WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
851  WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
853  for (i = 0; i < rdev->usec_timeout; i++) {
854  /* read MC_STATUS */
856  tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
857  if (tmp == 2) {
858  printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
859  return;
860  }
861  if (tmp) {
862  return;
863  }
864  udelay(1);
865  }
866 }
867 
869 {
870  int r;
871 
872  if (rdev->gart.robj) {
873  WARN(1, "R600 PCIE GART already initialized\n");
874  return 0;
875  }
876  /* Initialize common gart structure */
877  r = radeon_gart_init(rdev);
878  if (r)
879  return r;
880  rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
881  return radeon_gart_table_vram_alloc(rdev);
882 }
883 
884 static int r600_pcie_gart_enable(struct radeon_device *rdev)
885 {
886  u32 tmp;
887  int r, i;
888 
889  if (rdev->gart.robj == NULL) {
890  dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
891  return -EINVAL;
892  }
893  r = radeon_gart_table_vram_pin(rdev);
894  if (r)
895  return r;
896  radeon_gart_restore(rdev);
897 
898  /* Setup L2 cache */
902  WREG32(VM_L2_CNTL2, 0);
904  /* Setup TLB control */
923  WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
924  WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
925  WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
929  (u32)(rdev->dummy_page.addr >> 12));
930  for (i = 1; i < 7; i++)
931  WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
932 
934  DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
935  (unsigned)(rdev->mc.gtt_size >> 20),
936  (unsigned long long)rdev->gart.table_addr);
937  rdev->gart.ready = true;
938  return 0;
939 }
940 
941 static void r600_pcie_gart_disable(struct radeon_device *rdev)
942 {
943  u32 tmp;
944  int i;
945 
946  /* Disable all tables */
947  for (i = 0; i < 7; i++)
948  WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
949 
950  /* Disable L2 cache */
954  /* Setup L1 TLB control */
972 }
973 
974 static void r600_pcie_gart_fini(struct radeon_device *rdev)
975 {
976  radeon_gart_fini(rdev);
977  r600_pcie_gart_disable(rdev);
979 }
980 
981 static void r600_agp_enable(struct radeon_device *rdev)
982 {
983  u32 tmp;
984  int i;
985 
986  /* Setup L2 cache */
990  WREG32(VM_L2_CNTL2, 0);
992  /* Setup TLB control */
1011  for (i = 0; i < 7; i++)
1012  WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1013 }
1014 
1016 {
1017  unsigned i;
1018  u32 tmp;
1019 
1020  for (i = 0; i < rdev->usec_timeout; i++) {
1021  /* read MC_STATUS */
1022  tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1023  if (!tmp)
1024  return 0;
1025  udelay(1);
1026  }
1027  return -1;
1028 }
1029 
1030 static void r600_mc_program(struct radeon_device *rdev)
1031 {
1032  struct rv515_mc_save save;
1033  u32 tmp;
1034  int i, j;
1035 
1036  /* Initialize HDP */
1037  for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1038  WREG32((0x2c14 + j), 0x00000000);
1039  WREG32((0x2c18 + j), 0x00000000);
1040  WREG32((0x2c1c + j), 0x00000000);
1041  WREG32((0x2c20 + j), 0x00000000);
1042  WREG32((0x2c24 + j), 0x00000000);
1043  }
1045 
1046  rv515_mc_stop(rdev, &save);
1047  if (r600_mc_wait_for_idle(rdev)) {
1048  dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1049  }
1050  /* Lockout access through VGA aperture (doesn't exist before R600) */
1052  /* Update configuration */
1053  if (rdev->flags & RADEON_IS_AGP) {
1054  if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1055  /* VRAM before AGP */
1057  rdev->mc.vram_start >> 12);
1059  rdev->mc.gtt_end >> 12);
1060  } else {
1061  /* VRAM after AGP */
1063  rdev->mc.gtt_start >> 12);
1065  rdev->mc.vram_end >> 12);
1066  }
1067  } else {
1068  WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1069  WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1070  }
1072  tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1073  tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1074  WREG32(MC_VM_FB_LOCATION, tmp);
1075  WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1076  WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1077  WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1078  if (rdev->flags & RADEON_IS_AGP) {
1079  WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1080  WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1081  WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1082  } else {
1083  WREG32(MC_VM_AGP_BASE, 0);
1084  WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1085  WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1086  }
1087  if (r600_mc_wait_for_idle(rdev)) {
1088  dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1089  }
1090  rv515_mc_resume(rdev, &save);
1091  /* we need to own VRAM, so turn off the VGA renderer here
1092  * to stop it overwriting our objects */
1094 }
1095 
1117 static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1118 {
1119  u64 size_bf, size_af;
1120 
1121  if (mc->mc_vram_size > 0xE0000000) {
1122  /* leave room for at least 512M GTT */
1123  dev_warn(rdev->dev, "limiting VRAM\n");
1124  mc->real_vram_size = 0xE0000000;
1125  mc->mc_vram_size = 0xE0000000;
1126  }
1127  if (rdev->flags & RADEON_IS_AGP) {
1128  size_bf = mc->gtt_start;
1129  size_af = 0xFFFFFFFF - mc->gtt_end;
1130  if (size_bf > size_af) {
1131  if (mc->mc_vram_size > size_bf) {
1132  dev_warn(rdev->dev, "limiting VRAM\n");
1133  mc->real_vram_size = size_bf;
1134  mc->mc_vram_size = size_bf;
1135  }
1136  mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1137  } else {
1138  if (mc->mc_vram_size > size_af) {
1139  dev_warn(rdev->dev, "limiting VRAM\n");
1140  mc->real_vram_size = size_af;
1141  mc->mc_vram_size = size_af;
1142  }
1143  mc->vram_start = mc->gtt_end + 1;
1144  }
1145  mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1146  dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1147  mc->mc_vram_size >> 20, mc->vram_start,
1148  mc->vram_end, mc->real_vram_size >> 20);
1149  } else {
1150  u64 base = 0;
1151  if (rdev->flags & RADEON_IS_IGP) {
1152  base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1153  base <<= 24;
1154  }
1155  radeon_vram_location(rdev, &rdev->mc, base);
1156  rdev->mc.gtt_base_align = 0;
1157  radeon_gtt_location(rdev, mc);
1158  }
1159 }
1160 
1161 static int r600_mc_init(struct radeon_device *rdev)
1162 {
1163  u32 tmp;
1164  int chansize, numchan;
1165 
1166  /* Get VRAM informations */
1167  rdev->mc.vram_is_ddr = true;
1168  tmp = RREG32(RAMCFG);
1169  if (tmp & CHANSIZE_OVERRIDE) {
1170  chansize = 16;
1171  } else if (tmp & CHANSIZE_MASK) {
1172  chansize = 64;
1173  } else {
1174  chansize = 32;
1175  }
1176  tmp = RREG32(CHMAP);
1177  switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1178  case 0:
1179  default:
1180  numchan = 1;
1181  break;
1182  case 1:
1183  numchan = 2;
1184  break;
1185  case 2:
1186  numchan = 4;
1187  break;
1188  case 3:
1189  numchan = 8;
1190  break;
1191  }
1192  rdev->mc.vram_width = numchan * chansize;
1193  /* Could aper size report 0 ? */
1194  rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1195  rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1196  /* Setup GPU memory space */
1197  rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1198  rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1199  rdev->mc.visible_vram_size = rdev->mc.aper_size;
1200  r600_vram_gtt_location(rdev, &rdev->mc);
1201 
1202  if (rdev->flags & RADEON_IS_IGP) {
1203  rs690_pm_info(rdev);
1204  rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1205  }
1207  return 0;
1208 }
1209 
1211 {
1212  int r;
1213 
1214  if (rdev->vram_scratch.robj == NULL) {
1217  NULL, &rdev->vram_scratch.robj);
1218  if (r) {
1219  return r;
1220  }
1221  }
1222 
1223  r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1224  if (unlikely(r != 0))
1225  return r;
1226  r = radeon_bo_pin(rdev->vram_scratch.robj,
1227  RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1228  if (r) {
1229  radeon_bo_unreserve(rdev->vram_scratch.robj);
1230  return r;
1231  }
1232  r = radeon_bo_kmap(rdev->vram_scratch.robj,
1233  (void **)&rdev->vram_scratch.ptr);
1234  if (r)
1235  radeon_bo_unpin(rdev->vram_scratch.robj);
1236  radeon_bo_unreserve(rdev->vram_scratch.robj);
1237 
1238  return r;
1239 }
1240 
1242 {
1243  int r;
1244 
1245  if (rdev->vram_scratch.robj == NULL) {
1246  return;
1247  }
1248  r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1249  if (likely(r == 0)) {
1250  radeon_bo_kunmap(rdev->vram_scratch.robj);
1251  radeon_bo_unpin(rdev->vram_scratch.robj);
1252  radeon_bo_unreserve(rdev->vram_scratch.robj);
1253  }
1254  radeon_bo_unref(&rdev->vram_scratch.robj);
1255 }
1256 
1257 /* We doesn't check that the GPU really needs a reset we simply do the
1258  * reset, it's up to the caller to determine if the GPU needs one. We
1259  * might add an helper function to check that.
1260  */
1261 static int r600_gpu_soft_reset(struct radeon_device *rdev)
1262 {
1263  struct rv515_mc_save save;
1264  u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1272  u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1280  u32 tmp;
1281 
1282  if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1283  return 0;
1284 
1285  dev_info(rdev->dev, "GPU softreset \n");
1286  dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1288  dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1290  dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1292  dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1294  dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1296  dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1297  RREG32(CP_BUSY_STAT));
1298  dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1299  RREG32(CP_STAT));
1300  rv515_mc_stop(rdev, &save);
1301  if (r600_mc_wait_for_idle(rdev)) {
1302  dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1303  }
1304  /* Disable CP parsing/prefetching */
1306  /* Check if any of the rendering block is busy and reset it */
1307  if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1308  (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
1309  tmp = S_008020_SOFT_RESET_CR(1) |
1322  dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1325  mdelay(15);
1327  }
1328  /* Reset CP (we always reset CP) */
1329  tmp = S_008020_SOFT_RESET_CP(1);
1330  dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1333  mdelay(15);
1335  /* Wait a little for things to settle down */
1336  mdelay(1);
1337  dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1339  dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1341  dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1343  dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1345  dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1347  dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1348  RREG32(CP_BUSY_STAT));
1349  dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1350  RREG32(CP_STAT));
1351  rv515_mc_resume(rdev, &save);
1352  return 0;
1353 }
1354 
1356 {
1357  u32 srbm_status;
1358  u32 grbm_status;
1359  u32 grbm_status2;
1360 
1361  srbm_status = RREG32(R_000E50_SRBM_STATUS);
1362  grbm_status = RREG32(R_008010_GRBM_STATUS);
1363  grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1364  if (!G_008010_GUI_ACTIVE(grbm_status)) {
1366  return false;
1367  }
1368  /* force CP activities */
1369  radeon_ring_force_activity(rdev, ring);
1370  return radeon_ring_test_lockup(rdev, ring);
1371 }
1372 
1374 {
1375  return r600_gpu_soft_reset(rdev);
1376 }
1377 
1379  u32 tiling_pipe_num,
1380  u32 max_rb_num,
1381  u32 total_max_rb_num,
1382  u32 disabled_rb_mask)
1383 {
1384  u32 rendering_pipe_num, rb_num_width, req_rb_num;
1385  u32 pipe_rb_ratio, pipe_rb_remain;
1386  u32 data = 0, mask = 1 << (max_rb_num - 1);
1387  unsigned i, j;
1388 
1389  /* mask out the RBs that don't exist on that asic */
1390  disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
1391 
1392  rendering_pipe_num = 1 << tiling_pipe_num;
1393  req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1394  BUG_ON(rendering_pipe_num < req_rb_num);
1395 
1396  pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1397  pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1398 
1399  if (rdev->family <= CHIP_RV740) {
1400  /* r6xx/r7xx */
1401  rb_num_width = 2;
1402  } else {
1403  /* eg+ */
1404  rb_num_width = 4;
1405  }
1406 
1407  for (i = 0; i < max_rb_num; i++) {
1408  if (!(mask & disabled_rb_mask)) {
1409  for (j = 0; j < pipe_rb_ratio; j++) {
1410  data <<= rb_num_width;
1411  data |= max_rb_num - i - 1;
1412  }
1413  if (pipe_rb_remain) {
1414  data <<= rb_num_width;
1415  data |= max_rb_num - i - 1;
1416  pipe_rb_remain--;
1417  }
1418  }
1419  mask >>= 1;
1420  }
1421 
1422  return data;
1423 }
1424 
1426 {
1427  int i, ret = 0;
1428 
1429  for (i = 0; i < 32; i++) {
1430  ret += val & 1;
1431  val >>= 1;
1432  }
1433  return ret;
1434 }
1435 
1436 static void r600_gpu_init(struct radeon_device *rdev)
1437 {
1438  u32 tiling_config;
1439  u32 ramcfg;
1440  u32 cc_rb_backend_disable;
1441  u32 cc_gc_shader_pipe_config;
1442  u32 tmp;
1443  int i, j;
1444  u32 sq_config;
1445  u32 sq_gpr_resource_mgmt_1 = 0;
1446  u32 sq_gpr_resource_mgmt_2 = 0;
1447  u32 sq_thread_resource_mgmt = 0;
1448  u32 sq_stack_resource_mgmt_1 = 0;
1449  u32 sq_stack_resource_mgmt_2 = 0;
1450  u32 disabled_rb_mask;
1451 
1452  rdev->config.r600.tiling_group_size = 256;
1453  switch (rdev->family) {
1454  case CHIP_R600:
1455  rdev->config.r600.max_pipes = 4;
1456  rdev->config.r600.max_tile_pipes = 8;
1457  rdev->config.r600.max_simds = 4;
1458  rdev->config.r600.max_backends = 4;
1459  rdev->config.r600.max_gprs = 256;
1460  rdev->config.r600.max_threads = 192;
1461  rdev->config.r600.max_stack_entries = 256;
1462  rdev->config.r600.max_hw_contexts = 8;
1463  rdev->config.r600.max_gs_threads = 16;
1464  rdev->config.r600.sx_max_export_size = 128;
1465  rdev->config.r600.sx_max_export_pos_size = 16;
1466  rdev->config.r600.sx_max_export_smx_size = 128;
1467  rdev->config.r600.sq_num_cf_insts = 2;
1468  break;
1469  case CHIP_RV630:
1470  case CHIP_RV635:
1471  rdev->config.r600.max_pipes = 2;
1472  rdev->config.r600.max_tile_pipes = 2;
1473  rdev->config.r600.max_simds = 3;
1474  rdev->config.r600.max_backends = 1;
1475  rdev->config.r600.max_gprs = 128;
1476  rdev->config.r600.max_threads = 192;
1477  rdev->config.r600.max_stack_entries = 128;
1478  rdev->config.r600.max_hw_contexts = 8;
1479  rdev->config.r600.max_gs_threads = 4;
1480  rdev->config.r600.sx_max_export_size = 128;
1481  rdev->config.r600.sx_max_export_pos_size = 16;
1482  rdev->config.r600.sx_max_export_smx_size = 128;
1483  rdev->config.r600.sq_num_cf_insts = 2;
1484  break;
1485  case CHIP_RV610:
1486  case CHIP_RV620:
1487  case CHIP_RS780:
1488  case CHIP_RS880:
1489  rdev->config.r600.max_pipes = 1;
1490  rdev->config.r600.max_tile_pipes = 1;
1491  rdev->config.r600.max_simds = 2;
1492  rdev->config.r600.max_backends = 1;
1493  rdev->config.r600.max_gprs = 128;
1494  rdev->config.r600.max_threads = 192;
1495  rdev->config.r600.max_stack_entries = 128;
1496  rdev->config.r600.max_hw_contexts = 4;
1497  rdev->config.r600.max_gs_threads = 4;
1498  rdev->config.r600.sx_max_export_size = 128;
1499  rdev->config.r600.sx_max_export_pos_size = 16;
1500  rdev->config.r600.sx_max_export_smx_size = 128;
1501  rdev->config.r600.sq_num_cf_insts = 1;
1502  break;
1503  case CHIP_RV670:
1504  rdev->config.r600.max_pipes = 4;
1505  rdev->config.r600.max_tile_pipes = 4;
1506  rdev->config.r600.max_simds = 4;
1507  rdev->config.r600.max_backends = 4;
1508  rdev->config.r600.max_gprs = 192;
1509  rdev->config.r600.max_threads = 192;
1510  rdev->config.r600.max_stack_entries = 256;
1511  rdev->config.r600.max_hw_contexts = 8;
1512  rdev->config.r600.max_gs_threads = 16;
1513  rdev->config.r600.sx_max_export_size = 128;
1514  rdev->config.r600.sx_max_export_pos_size = 16;
1515  rdev->config.r600.sx_max_export_smx_size = 128;
1516  rdev->config.r600.sq_num_cf_insts = 2;
1517  break;
1518  default:
1519  break;
1520  }
1521 
1522  /* Initialize HDP */
1523  for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1524  WREG32((0x2c14 + j), 0x00000000);
1525  WREG32((0x2c18 + j), 0x00000000);
1526  WREG32((0x2c1c + j), 0x00000000);
1527  WREG32((0x2c20 + j), 0x00000000);
1528  WREG32((0x2c24 + j), 0x00000000);
1529  }
1530 
1532 
1533  /* Setup tiling */
1534  tiling_config = 0;
1535  ramcfg = RREG32(RAMCFG);
1536  switch (rdev->config.r600.max_tile_pipes) {
1537  case 1:
1538  tiling_config |= PIPE_TILING(0);
1539  break;
1540  case 2:
1541  tiling_config |= PIPE_TILING(1);
1542  break;
1543  case 4:
1544  tiling_config |= PIPE_TILING(2);
1545  break;
1546  case 8:
1547  tiling_config |= PIPE_TILING(3);
1548  break;
1549  default:
1550  break;
1551  }
1552  rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1553  rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1554  tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1555  tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1556 
1557  tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1558  if (tmp > 3) {
1559  tiling_config |= ROW_TILING(3);
1560  tiling_config |= SAMPLE_SPLIT(3);
1561  } else {
1562  tiling_config |= ROW_TILING(tmp);
1563  tiling_config |= SAMPLE_SPLIT(tmp);
1564  }
1565  tiling_config |= BANK_SWAPS(1);
1566 
1567  cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1568  tmp = R6XX_MAX_BACKENDS -
1569  r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
1570  if (tmp < rdev->config.r600.max_backends) {
1571  rdev->config.r600.max_backends = tmp;
1572  }
1573 
1574  cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
1575  tmp = R6XX_MAX_PIPES -
1576  r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
1577  if (tmp < rdev->config.r600.max_pipes) {
1578  rdev->config.r600.max_pipes = tmp;
1579  }
1580  tmp = R6XX_MAX_SIMDS -
1581  r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1582  if (tmp < rdev->config.r600.max_simds) {
1583  rdev->config.r600.max_simds = tmp;
1584  }
1585 
1586  disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
1587  tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1588  tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
1589  R6XX_MAX_BACKENDS, disabled_rb_mask);
1590  tiling_config |= tmp << 16;
1591  rdev->config.r600.backend_map = tmp;
1592 
1593  rdev->config.r600.tile_config = tiling_config;
1594  WREG32(GB_TILING_CONFIG, tiling_config);
1595  WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1596  WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1597 
1598  tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1601 
1602  /* Setup some CP states */
1604  WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1605 
1608  /* Setup various GPU states */
1609  if (rdev->family == CHIP_RV670)
1610  WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1611 
1612  tmp = RREG32(SX_DEBUG_1);
1613  tmp |= SMX_EVENT_RELEASE;
1614  if ((rdev->family > CHIP_R600))
1615  tmp |= ENABLE_NEW_SMX_ADDRESS;
1616  WREG32(SX_DEBUG_1, tmp);
1617 
1618  if (((rdev->family) == CHIP_R600) ||
1619  ((rdev->family) == CHIP_RV630) ||
1620  ((rdev->family) == CHIP_RV610) ||
1621  ((rdev->family) == CHIP_RV620) ||
1622  ((rdev->family) == CHIP_RS780) ||
1623  ((rdev->family) == CHIP_RS880)) {
1625  } else {
1626  WREG32(DB_DEBUG, 0);
1627  }
1629  DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1630 
1633 
1636 
1637  tmp = RREG32(SQ_MS_FIFO_SIZES);
1638  if (((rdev->family) == CHIP_RV610) ||
1639  ((rdev->family) == CHIP_RV620) ||
1640  ((rdev->family) == CHIP_RS780) ||
1641  ((rdev->family) == CHIP_RS880)) {
1642  tmp = (CACHE_FIFO_SIZE(0xa) |
1643  FETCH_FIFO_HIWATER(0xa) |
1644  DONE_FIFO_HIWATER(0xe0) |
1646  } else if (((rdev->family) == CHIP_R600) ||
1647  ((rdev->family) == CHIP_RV630)) {
1648  tmp &= ~DONE_FIFO_HIWATER(0xff);
1649  tmp |= DONE_FIFO_HIWATER(0x4);
1650  }
1651  WREG32(SQ_MS_FIFO_SIZES, tmp);
1652 
1653  /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1654  * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1655  */
1656  sq_config = RREG32(SQ_CONFIG);
1657  sq_config &= ~(PS_PRIO(3) |
1658  VS_PRIO(3) |
1659  GS_PRIO(3) |
1660  ES_PRIO(3));
1661  sq_config |= (DX9_CONSTS |
1662  VC_ENABLE |
1663  PS_PRIO(0) |
1664  VS_PRIO(1) |
1665  GS_PRIO(2) |
1666  ES_PRIO(3));
1667 
1668  if ((rdev->family) == CHIP_R600) {
1669  sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1670  NUM_VS_GPRS(124) |
1672  sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1673  NUM_ES_GPRS(0));
1674  sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1675  NUM_VS_THREADS(48) |
1676  NUM_GS_THREADS(4) |
1677  NUM_ES_THREADS(4));
1678  sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1679  NUM_VS_STACK_ENTRIES(128));
1680  sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1682  } else if (((rdev->family) == CHIP_RV610) ||
1683  ((rdev->family) == CHIP_RV620) ||
1684  ((rdev->family) == CHIP_RS780) ||
1685  ((rdev->family) == CHIP_RS880)) {
1686  /* no vertex cache */
1687  sq_config &= ~VC_ENABLE;
1688 
1689  sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1690  NUM_VS_GPRS(44) |
1692  sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1693  NUM_ES_GPRS(17));
1694  sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1695  NUM_VS_THREADS(78) |
1696  NUM_GS_THREADS(4) |
1697  NUM_ES_THREADS(31));
1698  sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1699  NUM_VS_STACK_ENTRIES(40));
1700  sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1701  NUM_ES_STACK_ENTRIES(16));
1702  } else if (((rdev->family) == CHIP_RV630) ||
1703  ((rdev->family) == CHIP_RV635)) {
1704  sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1705  NUM_VS_GPRS(44) |
1707  sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1708  NUM_ES_GPRS(18));
1709  sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1710  NUM_VS_THREADS(78) |
1711  NUM_GS_THREADS(4) |
1712  NUM_ES_THREADS(31));
1713  sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1714  NUM_VS_STACK_ENTRIES(40));
1715  sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1716  NUM_ES_STACK_ENTRIES(16));
1717  } else if ((rdev->family) == CHIP_RV670) {
1718  sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1719  NUM_VS_GPRS(44) |
1721  sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1722  NUM_ES_GPRS(17));
1723  sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1724  NUM_VS_THREADS(78) |
1725  NUM_GS_THREADS(4) |
1726  NUM_ES_THREADS(31));
1727  sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1728  NUM_VS_STACK_ENTRIES(64));
1729  sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1730  NUM_ES_STACK_ENTRIES(64));
1731  }
1732 
1733  WREG32(SQ_CONFIG, sq_config);
1734  WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1735  WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1736  WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1737  WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1738  WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1739 
1740  if (((rdev->family) == CHIP_RV610) ||
1741  ((rdev->family) == CHIP_RV620) ||
1742  ((rdev->family) == CHIP_RS780) ||
1743  ((rdev->family) == CHIP_RS880)) {
1745  } else {
1747  }
1748 
1749  /* More default values. 2D/3D driver should adjust as needed */
1751  S1_X(0x4) | S1_Y(0xc)));
1753  S1_X(0x2) | S1_Y(0x2) |
1754  S2_X(0xa) | S2_Y(0x6) |
1755  S3_X(0x6) | S3_Y(0xa)));
1757  S1_X(0x4) | S1_Y(0xc) |
1758  S2_X(0x1) | S2_Y(0x6) |
1759  S3_X(0xa) | S3_Y(0xe)));
1761  S5_X(0x0) | S5_Y(0x0) |
1762  S6_X(0xb) | S6_Y(0x4) |
1763  S7_X(0x7) | S7_Y(0x8)));
1764 
1765  WREG32(VGT_STRMOUT_EN, 0);
1766  tmp = rdev->config.r600.max_pipes * 16;
1767  switch (rdev->family) {
1768  case CHIP_RV610:
1769  case CHIP_RV620:
1770  case CHIP_RS780:
1771  case CHIP_RS880:
1772  tmp += 32;
1773  break;
1774  case CHIP_RV670:
1775  tmp += 128;
1776  break;
1777  default:
1778  break;
1779  }
1780  if (tmp > 256) {
1781  tmp = 256;
1782  }
1783  WREG32(VGT_ES_PER_GS, 128);
1784  WREG32(VGT_GS_PER_ES, tmp);
1785  WREG32(VGT_GS_PER_VS, 2);
1787 
1788  /* more default values. 2D/3D driver should adjust as needed */
1790  WREG32(VGT_STRMOUT_EN, 0);
1791  WREG32(SX_MISC, 0);
1792  WREG32(PA_SC_MODE_CNTL, 0);
1793  WREG32(PA_SC_AA_CONFIG, 0);
1795  WREG32(SPI_INPUT_Z, 0);
1797  WREG32(CB_COLOR7_FRAG, 0);
1798 
1799  /* Clear render buffer base addresses */
1800  WREG32(CB_COLOR0_BASE, 0);
1801  WREG32(CB_COLOR1_BASE, 0);
1802  WREG32(CB_COLOR2_BASE, 0);
1803  WREG32(CB_COLOR3_BASE, 0);
1804  WREG32(CB_COLOR4_BASE, 0);
1805  WREG32(CB_COLOR5_BASE, 0);
1806  WREG32(CB_COLOR6_BASE, 0);
1807  WREG32(CB_COLOR7_BASE, 0);
1808  WREG32(CB_COLOR7_FRAG, 0);
1809 
1810  switch (rdev->family) {
1811  case CHIP_RV610:
1812  case CHIP_RV620:
1813  case CHIP_RS780:
1814  case CHIP_RS880:
1815  tmp = TC_L2_SIZE(8);
1816  break;
1817  case CHIP_RV630:
1818  case CHIP_RV635:
1819  tmp = TC_L2_SIZE(4);
1820  break;
1821  case CHIP_R600:
1822  tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1823  break;
1824  default:
1825  tmp = TC_L2_SIZE(0);
1826  break;
1827  }
1828  WREG32(TC_CNTL, tmp);
1829 
1830  tmp = RREG32(HDP_HOST_PATH_CNTL);
1831  WREG32(HDP_HOST_PATH_CNTL, tmp);
1832 
1833  tmp = RREG32(ARB_POP);
1834  tmp |= ENABLE_TC128;
1835  WREG32(ARB_POP, tmp);
1836 
1839  NUM_CLIP_SEQ(3)));
1841  WREG32(VC_ENHANCE, 0);
1842 }
1843 
1844 
1845 /*
1846  * Indirect registers accessor
1847  */
1849 {
1850  u32 r;
1851 
1852  WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1854  r = RREG32(PCIE_PORT_DATA);
1855  return r;
1856 }
1857 
1859 {
1860  WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1862  WREG32(PCIE_PORT_DATA, (v));
1864 }
1865 
1866 /*
1867  * CP & Ring
1868  */
1869 void r600_cp_stop(struct radeon_device *rdev)
1870 {
1871  radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1873  WREG32(SCRATCH_UMSK, 0);
1874 }
1875 
1877 {
1878  struct platform_device *pdev;
1879  const char *chip_name;
1880  const char *rlc_chip_name;
1881  size_t pfp_req_size, me_req_size, rlc_req_size;
1882  char fw_name[30];
1883  int err;
1884 
1885  DRM_DEBUG("\n");
1886 
1887  pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1888  err = IS_ERR(pdev);
1889  if (err) {
1890  printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1891  return -EINVAL;
1892  }
1893 
1894  switch (rdev->family) {
1895  case CHIP_R600:
1896  chip_name = "R600";
1897  rlc_chip_name = "R600";
1898  break;
1899  case CHIP_RV610:
1900  chip_name = "RV610";
1901  rlc_chip_name = "R600";
1902  break;
1903  case CHIP_RV630:
1904  chip_name = "RV630";
1905  rlc_chip_name = "R600";
1906  break;
1907  case CHIP_RV620:
1908  chip_name = "RV620";
1909  rlc_chip_name = "R600";
1910  break;
1911  case CHIP_RV635:
1912  chip_name = "RV635";
1913  rlc_chip_name = "R600";
1914  break;
1915  case CHIP_RV670:
1916  chip_name = "RV670";
1917  rlc_chip_name = "R600";
1918  break;
1919  case CHIP_RS780:
1920  case CHIP_RS880:
1921  chip_name = "RS780";
1922  rlc_chip_name = "R600";
1923  break;
1924  case CHIP_RV770:
1925  chip_name = "RV770";
1926  rlc_chip_name = "R700";
1927  break;
1928  case CHIP_RV730:
1929  case CHIP_RV740:
1930  chip_name = "RV730";
1931  rlc_chip_name = "R700";
1932  break;
1933  case CHIP_RV710:
1934  chip_name = "RV710";
1935  rlc_chip_name = "R700";
1936  break;
1937  case CHIP_CEDAR:
1938  chip_name = "CEDAR";
1939  rlc_chip_name = "CEDAR";
1940  break;
1941  case CHIP_REDWOOD:
1942  chip_name = "REDWOOD";
1943  rlc_chip_name = "REDWOOD";
1944  break;
1945  case CHIP_JUNIPER:
1946  chip_name = "JUNIPER";
1947  rlc_chip_name = "JUNIPER";
1948  break;
1949  case CHIP_CYPRESS:
1950  case CHIP_HEMLOCK:
1951  chip_name = "CYPRESS";
1952  rlc_chip_name = "CYPRESS";
1953  break;
1954  case CHIP_PALM:
1955  chip_name = "PALM";
1956  rlc_chip_name = "SUMO";
1957  break;
1958  case CHIP_SUMO:
1959  chip_name = "SUMO";
1960  rlc_chip_name = "SUMO";
1961  break;
1962  case CHIP_SUMO2:
1963  chip_name = "SUMO2";
1964  rlc_chip_name = "SUMO";
1965  break;
1966  default: BUG();
1967  }
1968 
1969  if (rdev->family >= CHIP_CEDAR) {
1970  pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
1971  me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
1972  rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
1973  } else if (rdev->family >= CHIP_RV770) {
1974  pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1975  me_req_size = R700_PM4_UCODE_SIZE * 4;
1976  rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1977  } else {
1978  pfp_req_size = PFP_UCODE_SIZE * 4;
1979  me_req_size = PM4_UCODE_SIZE * 12;
1980  rlc_req_size = RLC_UCODE_SIZE * 4;
1981  }
1982 
1983  DRM_INFO("Loading %s Microcode\n", chip_name);
1984 
1985  snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1986  err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1987  if (err)
1988  goto out;
1989  if (rdev->pfp_fw->size != pfp_req_size) {
1991  "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1992  rdev->pfp_fw->size, fw_name);
1993  err = -EINVAL;
1994  goto out;
1995  }
1996 
1997  snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1998  err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1999  if (err)
2000  goto out;
2001  if (rdev->me_fw->size != me_req_size) {
2003  "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2004  rdev->me_fw->size, fw_name);
2005  err = -EINVAL;
2006  }
2007 
2008  snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2009  err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
2010  if (err)
2011  goto out;
2012  if (rdev->rlc_fw->size != rlc_req_size) {
2014  "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2015  rdev->rlc_fw->size, fw_name);
2016  err = -EINVAL;
2017  }
2018 
2019 out:
2021 
2022  if (err) {
2023  if (err != -EINVAL)
2025  "r600_cp: Failed to load firmware \"%s\"\n",
2026  fw_name);
2027  release_firmware(rdev->pfp_fw);
2028  rdev->pfp_fw = NULL;
2029  release_firmware(rdev->me_fw);
2030  rdev->me_fw = NULL;
2031  release_firmware(rdev->rlc_fw);
2032  rdev->rlc_fw = NULL;
2033  }
2034  return err;
2035 }
2036 
2037 static int r600_cp_load_microcode(struct radeon_device *rdev)
2038 {
2039  const __be32 *fw_data;
2040  int i;
2041 
2042  if (!rdev->me_fw || !rdev->pfp_fw)
2043  return -EINVAL;
2044 
2045  r600_cp_stop(rdev);
2046 
2048 #ifdef __BIG_ENDIAN
2049  BUF_SWAP_32BIT |
2050 #endif
2051  RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2052 
2053  /* Reset cp */
2056  mdelay(15);
2057  WREG32(GRBM_SOFT_RESET, 0);
2058 
2059  WREG32(CP_ME_RAM_WADDR, 0);
2060 
2061  fw_data = (const __be32 *)rdev->me_fw->data;
2062  WREG32(CP_ME_RAM_WADDR, 0);
2063  for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
2065  be32_to_cpup(fw_data++));
2066 
2067  fw_data = (const __be32 *)rdev->pfp_fw->data;
2069  for (i = 0; i < PFP_UCODE_SIZE; i++)
2071  be32_to_cpup(fw_data++));
2072 
2074  WREG32(CP_ME_RAM_WADDR, 0);
2075  WREG32(CP_ME_RAM_RADDR, 0);
2076  return 0;
2077 }
2078 
2079 int r600_cp_start(struct radeon_device *rdev)
2080 {
2081  struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2082  int r;
2083  uint32_t cp_me;
2084 
2085  r = radeon_ring_lock(rdev, ring, 7);
2086  if (r) {
2087  DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2088  return r;
2089  }
2091  radeon_ring_write(ring, 0x1);
2092  if (rdev->family >= CHIP_RV770) {
2093  radeon_ring_write(ring, 0x0);
2094  radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
2095  } else {
2096  radeon_ring_write(ring, 0x3);
2097  radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
2098  }
2100  radeon_ring_write(ring, 0);
2101  radeon_ring_write(ring, 0);
2102  radeon_ring_unlock_commit(rdev, ring);
2103 
2104  cp_me = 0xff;
2105  WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2106  return 0;
2107 }
2108 
2110 {
2111  struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2112  u32 tmp;
2113  u32 rb_bufsz;
2114  int r;
2115 
2116  /* Reset cp */
2119  mdelay(15);
2120  WREG32(GRBM_SOFT_RESET, 0);
2121 
2122  /* Set ring buffer size */
2123  rb_bufsz = drm_order(ring->ring_size / 8);
2124  tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2125 #ifdef __BIG_ENDIAN
2126  tmp |= BUF_SWAP_32BIT;
2127 #endif
2128  WREG32(CP_RB_CNTL, tmp);
2129  WREG32(CP_SEM_WAIT_TIMER, 0x0);
2130 
2131  /* Set the write pointer delay */
2133 
2134  /* Initialize the ring buffer's read and write pointers */
2136  WREG32(CP_RB_RPTR_WR, 0);
2137  ring->wptr = 0;
2138  WREG32(CP_RB_WPTR, ring->wptr);
2139 
2140  /* set the wb address whether it's enabled or not */
2142  ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2144  WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2145 
2146  if (rdev->wb.enabled)
2147  WREG32(SCRATCH_UMSK, 0xff);
2148  else {
2149  tmp |= RB_NO_UPDATE;
2150  WREG32(SCRATCH_UMSK, 0);
2151  }
2152 
2153  mdelay(1);
2154  WREG32(CP_RB_CNTL, tmp);
2155 
2156  WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2157  WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2158 
2159  ring->rptr = RREG32(CP_RB_RPTR);
2160 
2161  r600_cp_start(rdev);
2162  ring->ready = true;
2164  if (r) {
2165  ring->ready = false;
2166  return r;
2167  }
2168  return 0;
2169 }
2170 
2171 void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
2172 {
2173  u32 rb_bufsz;
2174  int r;
2175 
2176  /* Align ring size */
2177  rb_bufsz = drm_order(ring_size / 8);
2178  ring_size = (1 << (rb_bufsz + 1)) * 4;
2179  ring->ring_size = ring_size;
2180  ring->align_mask = 16 - 1;
2181 
2182  if (radeon_ring_supports_scratch_reg(rdev, ring)) {
2183  r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
2184  if (r) {
2185  DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
2186  ring->rptr_save_reg = 0;
2187  }
2188  }
2189 }
2190 
2191 void r600_cp_fini(struct radeon_device *rdev)
2192 {
2193  struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2194  r600_cp_stop(rdev);
2195  radeon_ring_fini(rdev, ring);
2196  radeon_scratch_free(rdev, ring->rptr_save_reg);
2197 }
2198 
2199 
2200 /*
2201  * GPU scratch registers helpers function.
2202  */
2204 {
2205  int i;
2206 
2207  rdev->scratch.num_reg = 7;
2208  rdev->scratch.reg_base = SCRATCH_REG0;
2209  for (i = 0; i < rdev->scratch.num_reg; i++) {
2210  rdev->scratch.free[i] = true;
2211  rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2212  }
2213 }
2214 
2215 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2216 {
2217  uint32_t scratch;
2218  uint32_t tmp = 0;
2219  unsigned i;
2220  int r;
2221 
2222  r = radeon_scratch_get(rdev, &scratch);
2223  if (r) {
2224  DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2225  return r;
2226  }
2227  WREG32(scratch, 0xCAFEDEAD);
2228  r = radeon_ring_lock(rdev, ring, 3);
2229  if (r) {
2230  DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
2231  radeon_scratch_free(rdev, scratch);
2232  return r;
2233  }
2235  radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2236  radeon_ring_write(ring, 0xDEADBEEF);
2237  radeon_ring_unlock_commit(rdev, ring);
2238  for (i = 0; i < rdev->usec_timeout; i++) {
2239  tmp = RREG32(scratch);
2240  if (tmp == 0xDEADBEEF)
2241  break;
2242  DRM_UDELAY(1);
2243  }
2244  if (i < rdev->usec_timeout) {
2245  DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2246  } else {
2247  DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2248  ring->idx, scratch, tmp);
2249  r = -EINVAL;
2250  }
2251  radeon_scratch_free(rdev, scratch);
2252  return r;
2253 }
2254 
2256  struct radeon_fence *fence)
2257 {
2258  struct radeon_ring *ring = &rdev->ring[fence->ring];
2259 
2260  if (rdev->wb.use_event) {
2261  u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2262  /* flush read cache over gart */
2267  radeon_ring_write(ring, 0xFFFFFFFF);
2268  radeon_ring_write(ring, 0);
2269  radeon_ring_write(ring, 10); /* poll interval */
2270  /* EVENT_WRITE_EOP - flush caches, send int */
2273  radeon_ring_write(ring, addr & 0xffffffff);
2274  radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2275  radeon_ring_write(ring, fence->seq);
2276  radeon_ring_write(ring, 0);
2277  } else {
2278  /* flush read cache over gart */
2283  radeon_ring_write(ring, 0xFFFFFFFF);
2284  radeon_ring_write(ring, 0);
2285  radeon_ring_write(ring, 10); /* poll interval */
2288  /* wait for 3D idle clean */
2292  /* Emit fence sequence & fire IRQ */
2294  radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2295  radeon_ring_write(ring, fence->seq);
2296  /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2299  }
2300 }
2301 
2303  struct radeon_ring *ring,
2304  struct radeon_semaphore *semaphore,
2305  bool emit_wait)
2306 {
2307  uint64_t addr = semaphore->gpu_addr;
2308  unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2309 
2310  if (rdev->family < CHIP_CAYMAN)
2312 
2314  radeon_ring_write(ring, addr & 0xffffffff);
2315  radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2316 }
2317 
2319  uint64_t src_offset,
2320  uint64_t dst_offset,
2321  unsigned num_gpu_pages,
2322  struct radeon_fence **fence)
2323 {
2324  struct radeon_semaphore *sem = NULL;
2325  struct radeon_sa_bo *vb = NULL;
2326  int r;
2327 
2328  r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
2329  if (r) {
2330  return r;
2331  }
2332  r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
2333  r600_blit_done_copy(rdev, fence, vb, sem);
2334  return 0;
2335 }
2336 
2338  uint32_t tiling_flags, uint32_t pitch,
2339  uint32_t offset, uint32_t obj_size)
2340 {
2341  /* FIXME: implement */
2342  return 0;
2343 }
2344 
2346 {
2347  /* FIXME: implement */
2348 }
2349 
2350 static int r600_startup(struct radeon_device *rdev)
2351 {
2352  struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2353  int r;
2354 
2355  /* enable pcie gen2 link */
2356  r600_pcie_gen2_enable(rdev);
2357 
2358  if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2359  r = r600_init_microcode(rdev);
2360  if (r) {
2361  DRM_ERROR("Failed to load firmware!\n");
2362  return r;
2363  }
2364  }
2365 
2366  r = r600_vram_scratch_init(rdev);
2367  if (r)
2368  return r;
2369 
2370  r600_mc_program(rdev);
2371  if (rdev->flags & RADEON_IS_AGP) {
2372  r600_agp_enable(rdev);
2373  } else {
2374  r = r600_pcie_gart_enable(rdev);
2375  if (r)
2376  return r;
2377  }
2378  r600_gpu_init(rdev);
2379  r = r600_blit_init(rdev);
2380  if (r) {
2381  r600_blit_fini(rdev);
2382  rdev->asic->copy.copy = NULL;
2383  dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2384  }
2385 
2386  /* allocate wb buffer */
2387  r = radeon_wb_init(rdev);
2388  if (r)
2389  return r;
2390 
2392  if (r) {
2393  dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2394  return r;
2395  }
2396 
2397  /* Enable IRQ */
2398  r = r600_irq_init(rdev);
2399  if (r) {
2400  DRM_ERROR("radeon: IH init failed (%d).\n", r);
2401  radeon_irq_kms_fini(rdev);
2402  return r;
2403  }
2404  r600_irq_set(rdev);
2405 
2406  r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2408  0, 0xfffff, RADEON_CP_PACKET2);
2409 
2410  if (r)
2411  return r;
2412  r = r600_cp_load_microcode(rdev);
2413  if (r)
2414  return r;
2415  r = r600_cp_resume(rdev);
2416  if (r)
2417  return r;
2418 
2419  r = radeon_ib_pool_init(rdev);
2420  if (r) {
2421  dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2422  return r;
2423  }
2424 
2425  r = r600_audio_init(rdev);
2426  if (r) {
2427  DRM_ERROR("radeon: audio init failed\n");
2428  return r;
2429  }
2430 
2431  return 0;
2432 }
2433 
2434 void r600_vga_set_state(struct radeon_device *rdev, bool state)
2435 {
2436  uint32_t temp;
2437 
2438  temp = RREG32(CONFIG_CNTL);
2439  if (state == false) {
2440  temp &= ~(1<<0);
2441  temp |= (1<<1);
2442  } else {
2443  temp &= ~(1<<1);
2444  }
2445  WREG32(CONFIG_CNTL, temp);
2446 }
2447 
2448 int r600_resume(struct radeon_device *rdev)
2449 {
2450  int r;
2451 
2452  /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2453  * posting will perform necessary task to bring back GPU into good
2454  * shape.
2455  */
2456  /* post card */
2457  atom_asic_init(rdev->mode_info.atom_context);
2458 
2459  rdev->accel_working = true;
2460  r = r600_startup(rdev);
2461  if (r) {
2462  DRM_ERROR("r600 startup failed on resume\n");
2463  rdev->accel_working = false;
2464  return r;
2465  }
2466 
2467  return r;
2468 }
2469 
2470 int r600_suspend(struct radeon_device *rdev)
2471 {
2472  r600_audio_fini(rdev);
2473  r600_cp_stop(rdev);
2474  rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2475  r600_irq_suspend(rdev);
2476  radeon_wb_disable(rdev);
2477  r600_pcie_gart_disable(rdev);
2478 
2479  return 0;
2480 }
2481 
2482 /* Plan is to move initialization in that function and use
2483  * helper function so that radeon_device_init pretty much
2484  * do nothing more than calling asic specific function. This
2485  * should also allow to remove a bunch of callback function
2486  * like vram_info.
2487  */
2488 int r600_init(struct radeon_device *rdev)
2489 {
2490  int r;
2491 
2492  if (r600_debugfs_mc_info_init(rdev)) {
2493  DRM_ERROR("Failed to register debugfs file for mc !\n");
2494  }
2495  /* Read BIOS */
2496  if (!radeon_get_bios(rdev)) {
2497  if (ASIC_IS_AVIVO(rdev))
2498  return -EINVAL;
2499  }
2500  /* Must be an ATOMBIOS */
2501  if (!rdev->is_atom_bios) {
2502  dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2503  return -EINVAL;
2504  }
2505  r = radeon_atombios_init(rdev);
2506  if (r)
2507  return r;
2508  /* Post card if necessary */
2509  if (!radeon_card_posted(rdev)) {
2510  if (!rdev->bios) {
2511  dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2512  return -EINVAL;
2513  }
2514  DRM_INFO("GPU not posted. posting now...\n");
2515  atom_asic_init(rdev->mode_info.atom_context);
2516  }
2517  /* Initialize scratch registers */
2518  r600_scratch_init(rdev);
2519  /* Initialize surface registers */
2520  radeon_surface_init(rdev);
2521  /* Initialize clocks */
2522  radeon_get_clock_info(rdev->ddev);
2523  /* Fence driver */
2524  r = radeon_fence_driver_init(rdev);
2525  if (r)
2526  return r;
2527  if (rdev->flags & RADEON_IS_AGP) {
2528  r = radeon_agp_init(rdev);
2529  if (r)
2530  radeon_agp_disable(rdev);
2531  }
2532  r = r600_mc_init(rdev);
2533  if (r)
2534  return r;
2535  /* Memory manager */
2536  r = radeon_bo_init(rdev);
2537  if (r)
2538  return r;
2539 
2540  r = radeon_irq_kms_init(rdev);
2541  if (r)
2542  return r;
2543 
2544  rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
2545  r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
2546 
2547  rdev->ih.ring_obj = NULL;
2548  r600_ih_ring_init(rdev, 64 * 1024);
2549 
2550  r = r600_pcie_gart_init(rdev);
2551  if (r)
2552  return r;
2553 
2554  rdev->accel_working = true;
2555  r = r600_startup(rdev);
2556  if (r) {
2557  dev_err(rdev->dev, "disabling GPU acceleration\n");
2558  r600_cp_fini(rdev);
2559  r600_irq_fini(rdev);
2560  radeon_wb_fini(rdev);
2561  radeon_ib_pool_fini(rdev);
2562  radeon_irq_kms_fini(rdev);
2563  r600_pcie_gart_fini(rdev);
2564  rdev->accel_working = false;
2565  }
2566 
2567  return 0;
2568 }
2569 
2570 void r600_fini(struct radeon_device *rdev)
2571 {
2572  r600_audio_fini(rdev);
2573  r600_blit_fini(rdev);
2574  r600_cp_fini(rdev);
2575  r600_irq_fini(rdev);
2576  radeon_wb_fini(rdev);
2577  radeon_ib_pool_fini(rdev);
2578  radeon_irq_kms_fini(rdev);
2579  r600_pcie_gart_fini(rdev);
2580  r600_vram_scratch_fini(rdev);
2581  radeon_agp_fini(rdev);
2582  radeon_gem_fini(rdev);
2584  radeon_bo_fini(rdev);
2585  radeon_atombios_fini(rdev);
2586  kfree(rdev->bios);
2587  rdev->bios = NULL;
2588 }
2589 
2590 
2591 /*
2592  * CS stuff
2593  */
2594 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2595 {
2596  struct radeon_ring *ring = &rdev->ring[ib->ring];
2597  u32 next_rptr;
2598 
2599  if (ring->rptr_save_reg) {
2600  next_rptr = ring->wptr + 3 + 4;
2602  radeon_ring_write(ring, ((ring->rptr_save_reg -
2604  radeon_ring_write(ring, next_rptr);
2605  } else if (rdev->wb.enabled) {
2606  next_rptr = ring->wptr + 5 + 4;
2608  radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
2609  radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
2610  radeon_ring_write(ring, next_rptr);
2611  radeon_ring_write(ring, 0);
2612  }
2613 
2615  radeon_ring_write(ring,
2616 #ifdef __BIG_ENDIAN
2617  (2 << 0) |
2618 #endif
2619  (ib->gpu_addr & 0xFFFFFFFC));
2620  radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
2621  radeon_ring_write(ring, ib->length_dw);
2622 }
2623 
2624 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
2625 {
2626  struct radeon_ib ib;
2627  uint32_t scratch;
2628  uint32_t tmp = 0;
2629  unsigned i;
2630  int r;
2631 
2632  r = radeon_scratch_get(rdev, &scratch);
2633  if (r) {
2634  DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2635  return r;
2636  }
2637  WREG32(scratch, 0xCAFEDEAD);
2638  r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
2639  if (r) {
2640  DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2641  goto free_scratch;
2642  }
2643  ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2644  ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2645  ib.ptr[2] = 0xDEADBEEF;
2646  ib.length_dw = 3;
2647  r = radeon_ib_schedule(rdev, &ib, NULL);
2648  if (r) {
2649  DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2650  goto free_ib;
2651  }
2652  r = radeon_fence_wait(ib.fence, false);
2653  if (r) {
2654  DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2655  goto free_ib;
2656  }
2657  for (i = 0; i < rdev->usec_timeout; i++) {
2658  tmp = RREG32(scratch);
2659  if (tmp == 0xDEADBEEF)
2660  break;
2661  DRM_UDELAY(1);
2662  }
2663  if (i < rdev->usec_timeout) {
2664  DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
2665  } else {
2666  DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2667  scratch, tmp);
2668  r = -EINVAL;
2669  }
2670 free_ib:
2671  radeon_ib_free(rdev, &ib);
2672 free_scratch:
2673  radeon_scratch_free(rdev, scratch);
2674  return r;
2675 }
2676 
2677 /*
2678  * Interrupts
2679  *
2680  * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2681  * the same as the CP ring buffer, but in reverse. Rather than the CPU
2682  * writing to the ring and the GPU consuming, the GPU writes to the ring
2683  * and host consumes. As the host irq handler processes interrupts, it
2684  * increments the rptr. When the rptr catches up with the wptr, all the
2685  * current interrupts have been processed.
2686  */
2687 
2688 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2689 {
2690  u32 rb_bufsz;
2691 
2692  /* Align ring size */
2693  rb_bufsz = drm_order(ring_size / 4);
2694  ring_size = (1 << rb_bufsz) * 4;
2695  rdev->ih.ring_size = ring_size;
2696  rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2697  rdev->ih.rptr = 0;
2698 }
2699 
2701 {
2702  int r;
2703 
2704  /* Allocate ring buffer */
2705  if (rdev->ih.ring_obj == NULL) {
2706  r = radeon_bo_create(rdev, rdev->ih.ring_size,
2707  PAGE_SIZE, true,
2709  NULL, &rdev->ih.ring_obj);
2710  if (r) {
2711  DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2712  return r;
2713  }
2714  r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2715  if (unlikely(r != 0))
2716  return r;
2717  r = radeon_bo_pin(rdev->ih.ring_obj,
2719  &rdev->ih.gpu_addr);
2720  if (r) {
2721  radeon_bo_unreserve(rdev->ih.ring_obj);
2722  DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2723  return r;
2724  }
2725  r = radeon_bo_kmap(rdev->ih.ring_obj,
2726  (void **)&rdev->ih.ring);
2727  radeon_bo_unreserve(rdev->ih.ring_obj);
2728  if (r) {
2729  DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2730  return r;
2731  }
2732  }
2733  return 0;
2734 }
2735 
2737 {
2738  int r;
2739  if (rdev->ih.ring_obj) {
2740  r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2741  if (likely(r == 0)) {
2742  radeon_bo_kunmap(rdev->ih.ring_obj);
2743  radeon_bo_unpin(rdev->ih.ring_obj);
2744  radeon_bo_unreserve(rdev->ih.ring_obj);
2745  }
2746  radeon_bo_unref(&rdev->ih.ring_obj);
2747  rdev->ih.ring = NULL;
2748  rdev->ih.ring_obj = NULL;
2749  }
2750 }
2751 
2752 void r600_rlc_stop(struct radeon_device *rdev)
2753 {
2754 
2755  if ((rdev->family >= CHIP_RV770) &&
2756  (rdev->family <= CHIP_RV740)) {
2757  /* r7xx asics need to soft reset RLC before halting */
2760  mdelay(15);
2761  WREG32(SRBM_SOFT_RESET, 0);
2763  }
2764 
2765  WREG32(RLC_CNTL, 0);
2766 }
2767 
2768 static void r600_rlc_start(struct radeon_device *rdev)
2769 {
2771 }
2772 
2773 static int r600_rlc_init(struct radeon_device *rdev)
2774 {
2775  u32 i;
2776  const __be32 *fw_data;
2777 
2778  if (!rdev->rlc_fw)
2779  return -EINVAL;
2780 
2781  r600_rlc_stop(rdev);
2782 
2783  WREG32(RLC_HB_CNTL, 0);
2784 
2785  if (rdev->family == CHIP_ARUBA) {
2786  WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
2787  WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
2788  }
2789  if (rdev->family <= CHIP_CAYMAN) {
2790  WREG32(RLC_HB_BASE, 0);
2791  WREG32(RLC_HB_RPTR, 0);
2792  WREG32(RLC_HB_WPTR, 0);
2793  }
2794  if (rdev->family <= CHIP_CAICOS) {
2797  }
2798  WREG32(RLC_MC_CNTL, 0);
2799  WREG32(RLC_UCODE_CNTL, 0);
2800 
2801  fw_data = (const __be32 *)rdev->rlc_fw->data;
2802  if (rdev->family >= CHIP_ARUBA) {
2803  for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
2804  WREG32(RLC_UCODE_ADDR, i);
2805  WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2806  }
2807  } else if (rdev->family >= CHIP_CAYMAN) {
2808  for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
2809  WREG32(RLC_UCODE_ADDR, i);
2810  WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2811  }
2812  } else if (rdev->family >= CHIP_CEDAR) {
2813  for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2814  WREG32(RLC_UCODE_ADDR, i);
2815  WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2816  }
2817  } else if (rdev->family >= CHIP_RV770) {
2818  for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2819  WREG32(RLC_UCODE_ADDR, i);
2820  WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2821  }
2822  } else {
2823  for (i = 0; i < RLC_UCODE_SIZE; i++) {
2824  WREG32(RLC_UCODE_ADDR, i);
2825  WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2826  }
2827  }
2828  WREG32(RLC_UCODE_ADDR, 0);
2829 
2830  r600_rlc_start(rdev);
2831 
2832  return 0;
2833 }
2834 
2835 static void r600_enable_interrupts(struct radeon_device *rdev)
2836 {
2837  u32 ih_cntl = RREG32(IH_CNTL);
2838  u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2839 
2840  ih_cntl |= ENABLE_INTR;
2841  ih_rb_cntl |= IH_RB_ENABLE;
2842  WREG32(IH_CNTL, ih_cntl);
2843  WREG32(IH_RB_CNTL, ih_rb_cntl);
2844  rdev->ih.enabled = true;
2845 }
2846 
2848 {
2849  u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2850  u32 ih_cntl = RREG32(IH_CNTL);
2851 
2852  ih_rb_cntl &= ~IH_RB_ENABLE;
2853  ih_cntl &= ~ENABLE_INTR;
2854  WREG32(IH_RB_CNTL, ih_rb_cntl);
2855  WREG32(IH_CNTL, ih_cntl);
2856  /* set rptr, wptr to 0 */
2857  WREG32(IH_RB_RPTR, 0);
2858  WREG32(IH_RB_WPTR, 0);
2859  rdev->ih.enabled = false;
2860  rdev->ih.rptr = 0;
2861 }
2862 
2863 static void r600_disable_interrupt_state(struct radeon_device *rdev)
2864 {
2865  u32 tmp;
2866 
2868  WREG32(GRBM_INT_CNTL, 0);
2869  WREG32(DxMODE_INT_MASK, 0);
2872  if (ASIC_IS_DCE3(rdev)) {
2883  if (ASIC_IS_DCE32(rdev)) {
2892  } else {
2897  }
2898  } else {
2911  }
2912 }
2913 
2914 int r600_irq_init(struct radeon_device *rdev)
2915 {
2916  int ret = 0;
2917  int rb_bufsz;
2918  u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2919 
2920  /* allocate ring */
2921  ret = r600_ih_ring_alloc(rdev);
2922  if (ret)
2923  return ret;
2924 
2925  /* disable irqs */
2927 
2928  /* init rlc */
2929  ret = r600_rlc_init(rdev);
2930  if (ret) {
2931  r600_ih_ring_fini(rdev);
2932  return ret;
2933  }
2934 
2935  /* setup interrupt control */
2936  /* set dummy read address to ring address */
2937  WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2938  interrupt_cntl = RREG32(INTERRUPT_CNTL);
2939  /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2940  * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2941  */
2942  interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2943  /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2944  interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2945  WREG32(INTERRUPT_CNTL, interrupt_cntl);
2946 
2947  WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2948  rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2949 
2950  ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2952  (rb_bufsz << 1));
2953 
2954  if (rdev->wb.enabled)
2955  ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
2956 
2957  /* set the writeback address whether it's enabled or not */
2958  WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
2960 
2961  WREG32(IH_RB_CNTL, ih_rb_cntl);
2962 
2963  /* set rptr, wptr to 0 */
2964  WREG32(IH_RB_RPTR, 0);
2965  WREG32(IH_RB_WPTR, 0);
2966 
2967  /* Default settings for IH_CNTL (disabled at first) */
2968  ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2969  /* RPTR_REARM only works if msi's are enabled */
2970  if (rdev->msi_enabled)
2971  ih_cntl |= RPTR_REARM;
2972  WREG32(IH_CNTL, ih_cntl);
2973 
2974  /* force the active interrupt state to all disabled */
2975  if (rdev->family >= CHIP_CEDAR)
2977  else
2978  r600_disable_interrupt_state(rdev);
2979 
2980  /* at this point everything should be setup correctly to enable master */
2981  pci_set_master(rdev->pdev);
2982 
2983  /* enable irqs */
2984  r600_enable_interrupts(rdev);
2985 
2986  return ret;
2987 }
2988 
2990 {
2991  r600_irq_disable(rdev);
2992  r600_rlc_stop(rdev);
2993 }
2994 
2995 void r600_irq_fini(struct radeon_device *rdev)
2996 {
2997  r600_irq_suspend(rdev);
2998  r600_ih_ring_fini(rdev);
2999 }
3000 
3001 int r600_irq_set(struct radeon_device *rdev)
3002 {
3004  u32 mode_int = 0;
3005  u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3006  u32 grbm_int_cntl = 0;
3007  u32 hdmi0, hdmi1;
3008  u32 d1grph = 0, d2grph = 0;
3009 
3010  if (!rdev->irq.installed) {
3011  WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3012  return -EINVAL;
3013  }
3014  /* don't enable anything if the ih is disabled */
3015  if (!rdev->ih.enabled) {
3017  /* force the active interrupt state to all disabled */
3018  r600_disable_interrupt_state(rdev);
3019  return 0;
3020  }
3021 
3022  if (ASIC_IS_DCE3(rdev)) {
3027  if (ASIC_IS_DCE32(rdev)) {
3032  } else {
3035  }
3036  } else {
3042  }
3043 
3044  if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3045  DRM_DEBUG("r600_irq_set: sw int\n");
3046  cp_int_cntl |= RB_INT_ENABLE;
3047  cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3048  }
3049  if (rdev->irq.crtc_vblank_int[0] ||
3050  atomic_read(&rdev->irq.pflip[0])) {
3051  DRM_DEBUG("r600_irq_set: vblank 0\n");
3052  mode_int |= D1MODE_VBLANK_INT_MASK;
3053  }
3054  if (rdev->irq.crtc_vblank_int[1] ||
3055  atomic_read(&rdev->irq.pflip[1])) {
3056  DRM_DEBUG("r600_irq_set: vblank 1\n");
3057  mode_int |= D2MODE_VBLANK_INT_MASK;
3058  }
3059  if (rdev->irq.hpd[0]) {
3060  DRM_DEBUG("r600_irq_set: hpd 1\n");
3061  hpd1 |= DC_HPDx_INT_EN;
3062  }
3063  if (rdev->irq.hpd[1]) {
3064  DRM_DEBUG("r600_irq_set: hpd 2\n");
3065  hpd2 |= DC_HPDx_INT_EN;
3066  }
3067  if (rdev->irq.hpd[2]) {
3068  DRM_DEBUG("r600_irq_set: hpd 3\n");
3069  hpd3 |= DC_HPDx_INT_EN;
3070  }
3071  if (rdev->irq.hpd[3]) {
3072  DRM_DEBUG("r600_irq_set: hpd 4\n");
3073  hpd4 |= DC_HPDx_INT_EN;
3074  }
3075  if (rdev->irq.hpd[4]) {
3076  DRM_DEBUG("r600_irq_set: hpd 5\n");
3077  hpd5 |= DC_HPDx_INT_EN;
3078  }
3079  if (rdev->irq.hpd[5]) {
3080  DRM_DEBUG("r600_irq_set: hpd 6\n");
3081  hpd6 |= DC_HPDx_INT_EN;
3082  }
3083  if (rdev->irq.afmt[0]) {
3084  DRM_DEBUG("r600_irq_set: hdmi 0\n");
3085  hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3086  }
3087  if (rdev->irq.afmt[1]) {
3088  DRM_DEBUG("r600_irq_set: hdmi 0\n");
3089  hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3090  }
3091 
3092  WREG32(CP_INT_CNTL, cp_int_cntl);
3093  WREG32(DxMODE_INT_MASK, mode_int);
3096  WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3097  if (ASIC_IS_DCE3(rdev)) {
3098  WREG32(DC_HPD1_INT_CONTROL, hpd1);
3099  WREG32(DC_HPD2_INT_CONTROL, hpd2);
3100  WREG32(DC_HPD3_INT_CONTROL, hpd3);
3101  WREG32(DC_HPD4_INT_CONTROL, hpd4);
3102  if (ASIC_IS_DCE32(rdev)) {
3103  WREG32(DC_HPD5_INT_CONTROL, hpd5);
3104  WREG32(DC_HPD6_INT_CONTROL, hpd6);
3107  } else {
3110  }
3111  } else {
3117  }
3118 
3119  return 0;
3120 }
3121 
3122 static void r600_irq_ack(struct radeon_device *rdev)
3123 {
3124  u32 tmp;
3125 
3126  if (ASIC_IS_DCE3(rdev)) {
3127  rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3128  rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3129  rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3130  if (ASIC_IS_DCE32(rdev)) {
3131  rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
3132  rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
3133  } else {
3134  rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3135  rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
3136  }
3137  } else {
3138  rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3139  rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3140  rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3141  rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3142  rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
3143  }
3144  rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3145  rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3146 
3147  if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3149  if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3151  if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3153  if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3155  if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3157  if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3159  if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3160  if (ASIC_IS_DCE3(rdev)) {
3161  tmp = RREG32(DC_HPD1_INT_CONTROL);
3162  tmp |= DC_HPDx_INT_ACK;
3164  } else {
3166  tmp |= DC_HPDx_INT_ACK;
3168  }
3169  }
3170  if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3171  if (ASIC_IS_DCE3(rdev)) {
3172  tmp = RREG32(DC_HPD2_INT_CONTROL);
3173  tmp |= DC_HPDx_INT_ACK;
3175  } else {
3177  tmp |= DC_HPDx_INT_ACK;
3179  }
3180  }
3181  if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3182  if (ASIC_IS_DCE3(rdev)) {
3183  tmp = RREG32(DC_HPD3_INT_CONTROL);
3184  tmp |= DC_HPDx_INT_ACK;
3186  } else {
3188  tmp |= DC_HPDx_INT_ACK;
3190  }
3191  }
3192  if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3193  tmp = RREG32(DC_HPD4_INT_CONTROL);
3194  tmp |= DC_HPDx_INT_ACK;
3196  }
3197  if (ASIC_IS_DCE32(rdev)) {
3198  if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3199  tmp = RREG32(DC_HPD5_INT_CONTROL);
3200  tmp |= DC_HPDx_INT_ACK;
3202  }
3203  if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3204  tmp = RREG32(DC_HPD5_INT_CONTROL);
3205  tmp |= DC_HPDx_INT_ACK;
3207  }
3208  if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
3210  tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3212  }
3213  if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
3215  tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3217  }
3218  } else {
3219  if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3223  }
3224  if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3225  if (ASIC_IS_DCE3(rdev)) {
3229  } else {
3233  }
3234  }
3235  }
3236 }
3237 
3239 {
3241  /* Wait and acknowledge irq */
3242  mdelay(1);
3243  r600_irq_ack(rdev);
3244  r600_disable_interrupt_state(rdev);
3245 }
3246 
3247 static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3248 {
3249  u32 wptr, tmp;
3250 
3251  if (rdev->wb.enabled)
3252  wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3253  else
3254  wptr = RREG32(IH_RB_WPTR);
3255 
3256  if (wptr & RB_OVERFLOW) {
3257  /* When a ring buffer overflow happen start parsing interrupt
3258  * from the last not overwritten vector (wptr + 16). Hopefully
3259  * this should allow us to catchup.
3260  */
3261  dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3262  wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3263  rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3264  tmp = RREG32(IH_RB_CNTL);
3265  tmp |= IH_WPTR_OVERFLOW_CLEAR;
3266  WREG32(IH_RB_CNTL, tmp);
3267  }
3268  return (wptr & rdev->ih.ptr_mask);
3269 }
3270 
3271 /* r600 IV Ring
3272  * Each IV ring entry is 128 bits:
3273  * [7:0] - interrupt source id
3274  * [31:8] - reserved
3275  * [59:32] - interrupt source data
3276  * [127:60] - reserved
3277  *
3278  * The basic interrupt vector entries
3279  * are decoded as follows:
3280  * src_id src_data description
3281  * 1 0 D1 Vblank
3282  * 1 1 D1 Vline
3283  * 5 0 D2 Vblank
3284  * 5 1 D2 Vline
3285  * 19 0 FP Hot plug detection A
3286  * 19 1 FP Hot plug detection B
3287  * 19 2 DAC A auto-detection
3288  * 19 3 DAC B auto-detection
3289  * 21 4 HDMI block A
3290  * 21 5 HDMI block B
3291  * 176 - CP_INT RB
3292  * 177 - CP_INT IB1
3293  * 178 - CP_INT IB2
3294  * 181 - EOP Interrupt
3295  * 233 - GUI Idle
3296  *
3297  * Note, these are based on r600 and may need to be
3298  * adjusted or added to on newer asics
3299  */
3300 
3302 {
3303  u32 wptr;
3304  u32 rptr;
3305  u32 src_id, src_data;
3306  u32 ring_index;
3307  bool queue_hotplug = false;
3308  bool queue_hdmi = false;
3309 
3310  if (!rdev->ih.enabled || rdev->shutdown)
3311  return IRQ_NONE;
3312 
3313  /* No MSIs, need a dummy read to flush PCI DMAs */
3314  if (!rdev->msi_enabled)
3315  RREG32(IH_RB_WPTR);
3316 
3317  wptr = r600_get_ih_wptr(rdev);
3318 
3319 restart_ih:
3320  /* is somebody else already processing irqs? */
3321  if (atomic_xchg(&rdev->ih.lock, 1))
3322  return IRQ_NONE;
3323 
3324  rptr = rdev->ih.rptr;
3325  DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3326 
3327  /* Order reading of wptr vs. reading of IH ring data */
3328  rmb();
3329 
3330  /* display interrupts */
3331  r600_irq_ack(rdev);
3332 
3333  while (rptr != wptr) {
3334  /* wptr/rptr are in bytes! */
3335  ring_index = rptr / 4;
3336  src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3337  src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3338 
3339  switch (src_id) {
3340  case 1: /* D1 vblank/vline */
3341  switch (src_data) {
3342  case 0: /* D1 vblank */
3343  if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
3344  if (rdev->irq.crtc_vblank_int[0]) {
3345  drm_handle_vblank(rdev->ddev, 0);
3346  rdev->pm.vblank_sync = true;
3347  wake_up(&rdev->irq.vblank_queue);
3348  }
3349  if (atomic_read(&rdev->irq.pflip[0]))
3350  radeon_crtc_handle_flip(rdev, 0);
3351  rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3352  DRM_DEBUG("IH: D1 vblank\n");
3353  }
3354  break;
3355  case 1: /* D1 vline */
3356  if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3357  rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3358  DRM_DEBUG("IH: D1 vline\n");
3359  }
3360  break;
3361  default:
3362  DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3363  break;
3364  }
3365  break;
3366  case 5: /* D2 vblank/vline */
3367  switch (src_data) {
3368  case 0: /* D2 vblank */
3369  if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
3370  if (rdev->irq.crtc_vblank_int[1]) {
3371  drm_handle_vblank(rdev->ddev, 1);
3372  rdev->pm.vblank_sync = true;
3373  wake_up(&rdev->irq.vblank_queue);
3374  }
3375  if (atomic_read(&rdev->irq.pflip[1]))
3376  radeon_crtc_handle_flip(rdev, 1);
3377  rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3378  DRM_DEBUG("IH: D2 vblank\n");
3379  }
3380  break;
3381  case 1: /* D1 vline */
3382  if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3383  rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
3384  DRM_DEBUG("IH: D2 vline\n");
3385  }
3386  break;
3387  default:
3388  DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3389  break;
3390  }
3391  break;
3392  case 19: /* HPD/DAC hotplug */
3393  switch (src_data) {
3394  case 0:
3395  if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3396  rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
3397  queue_hotplug = true;
3398  DRM_DEBUG("IH: HPD1\n");
3399  }
3400  break;
3401  case 1:
3402  if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3403  rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
3404  queue_hotplug = true;
3405  DRM_DEBUG("IH: HPD2\n");
3406  }
3407  break;
3408  case 4:
3409  if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3410  rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
3411  queue_hotplug = true;
3412  DRM_DEBUG("IH: HPD3\n");
3413  }
3414  break;
3415  case 5:
3416  if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3417  rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
3418  queue_hotplug = true;
3419  DRM_DEBUG("IH: HPD4\n");
3420  }
3421  break;
3422  case 10:
3423  if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3424  rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3425  queue_hotplug = true;
3426  DRM_DEBUG("IH: HPD5\n");
3427  }
3428  break;
3429  case 12:
3430  if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3431  rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3432  queue_hotplug = true;
3433  DRM_DEBUG("IH: HPD6\n");
3434  }
3435  break;
3436  default:
3437  DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3438  break;
3439  }
3440  break;
3441  case 21: /* hdmi */
3442  switch (src_data) {
3443  case 4:
3444  if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3445  rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3446  queue_hdmi = true;
3447  DRM_DEBUG("IH: HDMI0\n");
3448  }
3449  break;
3450  case 5:
3451  if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3452  rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3453  queue_hdmi = true;
3454  DRM_DEBUG("IH: HDMI1\n");
3455  }
3456  break;
3457  default:
3458  DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3459  break;
3460  }
3461  break;
3462  case 176: /* CP_INT in ring buffer */
3463  case 177: /* CP_INT in IB1 */
3464  case 178: /* CP_INT in IB2 */
3465  DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3467  break;
3468  case 181: /* CP EOP event */
3469  DRM_DEBUG("IH: CP EOP\n");
3471  break;
3472  case 233: /* GUI IDLE */
3473  DRM_DEBUG("IH: GUI idle\n");
3474  break;
3475  default:
3476  DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3477  break;
3478  }
3479 
3480  /* wptr/rptr are in bytes! */
3481  rptr += 16;
3482  rptr &= rdev->ih.ptr_mask;
3483  }
3484  if (queue_hotplug)
3485  schedule_work(&rdev->hotplug_work);
3486  if (queue_hdmi)
3487  schedule_work(&rdev->audio_work);
3488  rdev->ih.rptr = rptr;
3489  WREG32(IH_RB_RPTR, rdev->ih.rptr);
3490  atomic_set(&rdev->ih.lock, 0);
3491 
3492  /* make sure wptr hasn't changed while processing */
3493  wptr = r600_get_ih_wptr(rdev);
3494  if (wptr != rptr)
3495  goto restart_ih;
3496 
3497  return IRQ_HANDLED;
3498 }
3499 
3500 /*
3501  * Debugfs info
3502  */
3503 #if defined(CONFIG_DEBUG_FS)
3504 
3505 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3506 {
3507  struct drm_info_node *node = (struct drm_info_node *) m->private;
3508  struct drm_device *dev = node->minor->dev;
3509  struct radeon_device *rdev = dev->dev_private;
3510 
3511  DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3512  DREG32_SYS(m, rdev, VM_L2_STATUS);
3513  return 0;
3514 }
3515 
3516 static struct drm_info_list r600_mc_info_list[] = {
3517  {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3518 };
3519 #endif
3520 
3522 {
3523 #if defined(CONFIG_DEBUG_FS)
3524  return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3525 #else
3526  return 0;
3527 #endif
3528 }
3529 
3540 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3541 {
3542  /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
3543  * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3544  * This seems to cause problems on some AGP cards. Just use the old
3545  * method for them.
3546  */
3547  if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
3548  rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
3549  void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3550  u32 tmp;
3551 
3552  WREG32(HDP_DEBUG1, 0);
3553  tmp = readl((void __iomem *)ptr);
3554  } else
3556 }
3557 
3558 void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
3559 {
3560  u32 link_width_cntl, mask, target_reg;
3561 
3562  if (rdev->flags & RADEON_IS_IGP)
3563  return;
3564 
3565  if (!(rdev->flags & RADEON_IS_PCIE))
3566  return;
3567 
3568  /* x2 cards have a special sequence */
3569  if (ASIC_IS_X2(rdev))
3570  return;
3571 
3572  /* FIXME wait for idle */
3573 
3574  switch (lanes) {
3575  case 0:
3577  break;
3578  case 1:
3580  break;
3581  case 2:
3583  break;
3584  case 4:
3586  break;
3587  case 8:
3589  break;
3590  case 12:
3592  break;
3593  case 16:
3594  default:
3596  break;
3597  }
3598 
3599  link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3600 
3601  if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
3603  return;
3604 
3605  if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
3606  return;
3607 
3608  link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
3612  link_width_cntl |= mask;
3613 
3615 
3616  /* some northbridges can renegotiate the link rather than requiring
3617  * a complete re-config.
3618  * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
3619  */
3620  if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
3622  else
3623  link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
3624 
3625  WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
3627 
3628  if (rdev->family >= CHIP_RV770)
3630  else
3632 
3633  /* wait for lane set to complete */
3634  link_width_cntl = RREG32(target_reg);
3635  while (link_width_cntl == 0xffffffff)
3636  link_width_cntl = RREG32(target_reg);
3637 
3638 }
3639 
3641 {
3642  u32 link_width_cntl;
3643 
3644  if (rdev->flags & RADEON_IS_IGP)
3645  return 0;
3646 
3647  if (!(rdev->flags & RADEON_IS_PCIE))
3648  return 0;
3649 
3650  /* x2 cards have a special sequence */
3651  if (ASIC_IS_X2(rdev))
3652  return 0;
3653 
3654  /* FIXME wait for idle */
3655 
3656  link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3657 
3660  return 0;
3662  return 1;
3664  return 2;
3666  return 4;
3668  return 8;
3670  default:
3671  return 16;
3672  }
3673 }
3674 
3675 static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3676 {
3677  u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
3678  u16 link_cntl2;
3679  u32 mask;
3680  int ret;
3681 
3682  if (radeon_pcie_gen2 == 0)
3683  return;
3684 
3685  if (rdev->flags & RADEON_IS_IGP)
3686  return;
3687 
3688  if (!(rdev->flags & RADEON_IS_PCIE))
3689  return;
3690 
3691  /* x2 cards have a special sequence */
3692  if (ASIC_IS_X2(rdev))
3693  return;
3694 
3695  /* only RV6xx+ chips are supported */
3696  if (rdev->family <= CHIP_R600)
3697  return;
3698 
3699  ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
3700  if (ret != 0)
3701  return;
3702 
3703  if (!(mask & DRM_PCIE_SPEED_50))
3704  return;
3705 
3706  speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3707  if (speed_cntl & LC_CURRENT_DATA_RATE) {
3708  DRM_INFO("PCIE gen 2 link speeds already enabled\n");
3709  return;
3710  }
3711 
3712  DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
3713 
3714  /* 55 nm r6xx asics */
3715  if ((rdev->family == CHIP_RV670) ||
3716  (rdev->family == CHIP_RV620) ||
3717  (rdev->family == CHIP_RV635)) {
3718  /* advertise upconfig capability */
3719  link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3720  link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3721  WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3722  link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3723  if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
3724  lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
3725  link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
3727  link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
3728  WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3729  } else {
3730  link_width_cntl |= LC_UPCONFIGURE_DIS;
3731  WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3732  }
3733  }
3734 
3735  speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3736  if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3737  (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3738 
3739  /* 55 nm r6xx asics */
3740  if ((rdev->family == CHIP_RV670) ||
3741  (rdev->family == CHIP_RV620) ||
3742  (rdev->family == CHIP_RV635)) {
3743  WREG32(MM_CFGREGS_CNTL, 0x8);
3744  link_cntl2 = RREG32(0x4088);
3745  WREG32(MM_CFGREGS_CNTL, 0);
3746  /* not supported yet */
3747  if (link_cntl2 & SELECTABLE_DEEMPHASIS)
3748  return;
3749  }
3750 
3752  speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
3753  speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
3754  speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
3755  speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
3756  WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3757 
3758  tmp = RREG32(0x541c);
3759  WREG32(0x541c, tmp | 0x8);
3761  link_cntl2 = RREG16(0x4088);
3762  link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
3763  link_cntl2 |= 0x2;
3764  WREG16(0x4088, link_cntl2);
3765  WREG32(MM_CFGREGS_CNTL, 0);
3766 
3767  if ((rdev->family == CHIP_RV670) ||
3768  (rdev->family == CHIP_RV620) ||
3769  (rdev->family == CHIP_RV635)) {
3770  training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
3771  training_cntl &= ~LC_POINT_7_PLUS_EN;
3772  WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
3773  } else {
3774  speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3775  speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3776  WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3777  }
3778 
3779  speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3780  speed_cntl |= LC_GEN2_EN_STRAP;
3781  WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3782 
3783  } else {
3784  link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3785  /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3786  if (1)
3787  link_width_cntl |= LC_UPCONFIGURE_DIS;
3788  else
3789  link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3790  WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3791  }
3792 }
3793 
3803 {
3804  uint64_t clock;
3805 
3806  mutex_lock(&rdev->gpu_clock_mutex);
3810  mutex_unlock(&rdev->gpu_clock_mutex);
3811  return clock;
3812 }