Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
nouveau_mem.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3  * Copyright 2005 Stephane Marchesin
4  *
5  * The Weather Channel (TM) funded Tungsten Graphics to develop the
6  * initial release of the Radeon 8500 driver under the XFree86 license.
7  * This notice must be preserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23  * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  *
28  * Authors:
29  * Ben Skeggs <[email protected]>
30  * Roy Spliet <[email protected]>
31  */
32 
33 #include "nouveau_drm.h"
34 #include "nouveau_pm.h"
35 
36 #include <subdev/fb.h>
37 
38 static int
39 nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
40  struct nouveau_pm_tbl_entry *e, u8 len,
41  struct nouveau_pm_memtiming *boot,
42  struct nouveau_pm_memtiming *t)
43 {
44  struct nouveau_drm *drm = nouveau_drm(dev);
45 
46  t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
47 
48  /* XXX: I don't trust the -1's and +1's... they must come
49  * from somewhere! */
50  t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
51  1 << 16 |
52  (e->tWTR + 2 + (t->tCWL - 1)) << 8 |
53  (e->tCL + 2 - (t->tCWL - 1));
54 
55  t->reg[2] = 0x20200000 |
56  ((t->tCWL - 1) << 24 |
57  e->tRRD << 16 |
58  e->tRCDWR << 8 |
59  e->tRCDRD);
60 
61  NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x\n", t->id,
62  t->reg[0], t->reg[1], t->reg[2]);
63  return 0;
64 }
65 
66 static int
67 nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
68  struct nouveau_pm_tbl_entry *e, u8 len,
69  struct nouveau_pm_memtiming *boot,
70  struct nouveau_pm_memtiming *t)
71 {
72  struct nouveau_device *device = nouveau_dev(dev);
73  struct nouveau_fb *pfb = nouveau_fb(device);
74  struct nouveau_drm *drm = nouveau_drm(dev);
75  struct bit_entry P;
76  uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
77 
78  if (bit_table(dev, 'P', &P))
79  return -EINVAL;
80 
81  switch (min(len, (u8) 22)) {
82  case 22:
83  unk21 = e->tUNK_21;
84  case 21:
85  unk20 = e->tUNK_20;
86  case 20:
87  if (e->tCWL > 0)
88  t->tCWL = e->tCWL;
89  case 19:
90  unk18 = e->tUNK_18;
91  break;
92  }
93 
94  t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
95 
96  t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
97  max(unk18, (u8) 1) << 16 |
98  (e->tWTR + 2 + (t->tCWL - 1)) << 8;
99 
100  t->reg[2] = ((t->tCWL - 1) << 24 |
101  e->tRRD << 16 |
102  e->tRCDWR << 8 |
103  e->tRCDRD);
104 
105  t->reg[4] = e->tUNK_13 << 8 | e->tUNK_13;
106 
107  t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
108 
109  t->reg[8] = boot->reg[8] & 0xffffff00;
110 
111  if (P.version == 1) {
112  t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
113 
114  t->reg[3] = (0x14 + e->tCL) << 24 |
115  0x16 << 16 |
116  (e->tCL - 1) << 8 |
117  (e->tCL - 1);
118 
119  t->reg[4] |= boot->reg[4] & 0xffff0000;
120 
121  t->reg[6] = (0x33 - t->tCWL) << 16 |
122  t->tCWL << 8 |
123  (0x2e + e->tCL - t->tCWL);
124 
125  t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
126 
127  /* XXX: P.version == 1 only has DDR2 and GDDR3? */
128  if (pfb->ram.type == NV_MEM_TYPE_DDR2) {
129  t->reg[5] |= (e->tCL + 3) << 8;
130  t->reg[6] |= (t->tCWL - 2) << 8;
131  t->reg[8] |= (e->tCL - 4);
132  } else {
133  t->reg[5] |= (e->tCL + 2) << 8;
134  t->reg[6] |= t->tCWL << 8;
135  t->reg[8] |= (e->tCL - 2);
136  }
137  } else {
138  t->reg[1] |= (5 + e->tCL - (t->tCWL));
139 
140  /* XXX: 0xb? 0x30? */
141  t->reg[3] = (0x30 + e->tCL) << 24 |
142  (boot->reg[3] & 0x00ff0000)|
143  (0xb + e->tCL) << 8 |
144  (e->tCL - 1);
145 
146  t->reg[4] |= (unk20 << 24 | unk21 << 16);
147 
148  /* XXX: +6? */
149  t->reg[5] |= (t->tCWL + 6) << 8;
150 
151  t->reg[6] = (0x5a + e->tCL) << 16 |
152  (6 - e->tCL + t->tCWL) << 8 |
153  (0x50 + e->tCL - t->tCWL);
154 
155  tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
156  t->reg[7] = (tmp7_3 << 24) |
157  ((tmp7_3 - 6 + e->tCL) << 16) |
158  0x202;
159  }
160 
161  NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
162  t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
163  NV_DEBUG(drm, " 230: %08x %08x %08x %08x\n",
164  t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
165  NV_DEBUG(drm, " 240: %08x\n", t->reg[8]);
166  return 0;
167 }
168 
169 static int
170 nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
171  struct nouveau_pm_tbl_entry *e, u8 len,
172  struct nouveau_pm_memtiming *boot,
173  struct nouveau_pm_memtiming *t)
174 {
175  struct nouveau_drm *drm = nouveau_drm(dev);
176 
177  if (e->tCWL > 0)
178  t->tCWL = e->tCWL;
179 
180  t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
181  e->tRFC << 8 | e->tRC);
182 
183  t->reg[1] = (boot->reg[1] & 0xff000000) |
184  (e->tRCDWR & 0x0f) << 20 |
185  (e->tRCDRD & 0x0f) << 14 |
186  (t->tCWL << 7) |
187  (e->tCL & 0x0f);
188 
189  t->reg[2] = (boot->reg[2] & 0xff0000ff) |
190  e->tWR << 16 | e->tWTR << 8;
191 
192  t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
193  (e->tUNK_21 & 0xf) << 5 |
194  (e->tUNK_13 & 0x1f);
195 
196  t->reg[4] = (boot->reg[4] & 0xfff00fff) |
197  (e->tRRD&0x1f) << 15;
198 
199  NV_DEBUG(drm, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
200  t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
201  NV_DEBUG(drm, " 2a0: %08x\n", t->reg[4]);
202  return 0;
203 }
204 
209 static int
210 nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
211  struct nouveau_pm_tbl_entry *e, u8 len,
212  struct nouveau_pm_memtiming *boot,
213  struct nouveau_pm_memtiming *t)
214 {
215  struct nouveau_drm *drm = nouveau_drm(dev);
216 
217  t->drive_strength = 0;
218  if (len < 15) {
219  t->odt = boot->odt;
220  } else {
221  t->odt = e->RAM_FT1 & 0x07;
222  }
223 
224  if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
225  NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
226  return -ERANGE;
227  }
228 
229  if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
230  NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
231  return -ERANGE;
232  }
233 
234  if (t->odt > 3) {
235  NV_WARN(drm, "(%u) Invalid odt value, assuming disabled: %x",
236  t->id, t->odt);
237  t->odt = 0;
238  }
239 
240  t->mr[0] = (boot->mr[0] & 0x100f) |
241  (e->tCL) << 4 |
242  (e->tWR - 1) << 9;
243  t->mr[1] = (boot->mr[1] & 0x101fbb) |
244  (t->odt & 0x1) << 2 |
245  (t->odt & 0x2) << 5;
246 
247  NV_DEBUG(drm, "(%u) MR: %08x", t->id, t->mr[0]);
248  return 0;
249 }
250 
251 static const uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
252  0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
253 
254 static int
255 nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
256  struct nouveau_pm_tbl_entry *e, u8 len,
257  struct nouveau_pm_memtiming *boot,
258  struct nouveau_pm_memtiming *t)
259 {
260  struct nouveau_drm *drm = nouveau_drm(dev);
261  u8 cl = e->tCL - 4;
262 
263  t->drive_strength = 0;
264  if (len < 15) {
265  t->odt = boot->odt;
266  } else {
267  t->odt = e->RAM_FT1 & 0x07;
268  }
269 
270  if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
271  NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
272  return -ERANGE;
273  }
274 
275  if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
276  NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
277  return -ERANGE;
278  }
279 
280  if (e->tCWL < 5) {
281  NV_WARN(drm, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
282  return -ERANGE;
283  }
284 
285  t->mr[0] = (boot->mr[0] & 0x180b) |
286  /* CAS */
287  (cl & 0x7) << 4 |
288  (cl & 0x8) >> 1 |
289  (nv_mem_wr_lut_ddr3[e->tWR]) << 9;
290  t->mr[1] = (boot->mr[1] & 0x101dbb) |
291  (t->odt & 0x1) << 2 |
292  (t->odt & 0x2) << 5 |
293  (t->odt & 0x4) << 7;
294  t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
295 
296  NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
297  return 0;
298 }
299 
300 static const uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
301  0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
302 static const uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
303  0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
304 
305 static int
306 nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
307  struct nouveau_pm_tbl_entry *e, u8 len,
308  struct nouveau_pm_memtiming *boot,
309  struct nouveau_pm_memtiming *t)
310 {
311  struct nouveau_drm *drm = nouveau_drm(dev);
312 
313  if (len < 15) {
314  t->drive_strength = boot->drive_strength;
315  t->odt = boot->odt;
316  } else {
317  t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
318  t->odt = e->RAM_FT1 & 0x07;
319  }
320 
321  if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
322  NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
323  return -ERANGE;
324  }
325 
326  if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
327  NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
328  return -ERANGE;
329  }
330 
331  if (t->odt > 3) {
332  NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
333  t->id, t->odt);
334  t->odt = 0;
335  }
336 
337  t->mr[0] = (boot->mr[0] & 0xe0b) |
338  /* CAS */
339  ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
340  ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
341  t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
342  (t->odt << 2) |
343  (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
344  t->mr[2] = boot->mr[2];
345 
346  NV_DEBUG(drm, "(%u) MR: %08x %08x %08x", t->id,
347  t->mr[0], t->mr[1], t->mr[2]);
348  return 0;
349 }
350 
351 static int
352 nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
353  struct nouveau_pm_tbl_entry *e, u8 len,
354  struct nouveau_pm_memtiming *boot,
355  struct nouveau_pm_memtiming *t)
356 {
357  struct nouveau_drm *drm = nouveau_drm(dev);
358 
359  if (len < 15) {
360  t->drive_strength = boot->drive_strength;
361  t->odt = boot->odt;
362  } else {
363  t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
364  t->odt = e->RAM_FT1 & 0x03;
365  }
366 
367  if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
368  NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
369  return -ERANGE;
370  }
371 
372  if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
373  NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
374  return -ERANGE;
375  }
376 
377  if (t->odt > 3) {
378  NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
379  t->id, t->odt);
380  t->odt = 0;
381  }
382 
383  t->mr[0] = (boot->mr[0] & 0x007) |
384  ((e->tCL - 5) << 3) |
385  ((e->tWR - 4) << 8);
386  t->mr[1] = (boot->mr[1] & 0x1007f0) |
387  t->drive_strength |
388  (t->odt << 2);
389 
390  NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
391  return 0;
392 }
393 
394 int
396  struct nouveau_pm_memtiming *t)
397 {
398  struct nouveau_device *device = nouveau_dev(dev);
399  struct nouveau_fb *pfb = nouveau_fb(device);
400  struct nouveau_pm *pm = nouveau_pm(dev);
401  struct nouveau_pm_memtiming *boot = &pm->boot.timing;
402  struct nouveau_pm_tbl_entry *e;
403  u8 ver, len, *ptr, *ramcfg;
404  int ret;
405 
406  ptr = nouveau_perf_timing(dev, freq, &ver, &len);
407  if (!ptr || ptr[0] == 0x00) {
408  *t = *boot;
409  return 0;
410  }
411  e = (struct nouveau_pm_tbl_entry *)ptr;
412 
413  t->tCWL = boot->tCWL;
414 
415  switch (device->card_type) {
416  case NV_40:
417  ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
418  break;
419  case NV_50:
420  ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
421  break;
422  case NV_C0:
423  case NV_D0:
424  ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
425  break;
426  default:
427  ret = -ENODEV;
428  break;
429  }
430 
431  switch (pfb->ram.type * !ret) {
432  case NV_MEM_TYPE_GDDR3:
433  ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
434  break;
435  case NV_MEM_TYPE_GDDR5:
436  ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
437  break;
438  case NV_MEM_TYPE_DDR2:
439  ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
440  break;
441  case NV_MEM_TYPE_DDR3:
442  ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
443  break;
444  default:
445  ret = -EINVAL;
446  break;
447  }
448 
449  ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
450  if (ramcfg) {
451  int dll_off;
452 
453  if (ver == 0x00)
454  dll_off = !!(ramcfg[3] & 0x04);
455  else
456  dll_off = !!(ramcfg[2] & 0x40);
457 
458  switch (pfb->ram.type) {
459  case NV_MEM_TYPE_GDDR3:
460  t->mr[1] &= ~0x00000040;
461  t->mr[1] |= 0x00000040 * dll_off;
462  break;
463  default:
464  t->mr[1] &= ~0x00000001;
465  t->mr[1] |= 0x00000001 * dll_off;
466  break;
467  }
468  }
469 
470  return ret;
471 }
472 
473 void
475 {
476  struct nouveau_device *device = nouveau_dev(dev);
477  struct nouveau_fb *pfb = nouveau_fb(device);
478  u32 timing_base, timing_regs, mr_base;
479  int i;
480 
481  if (device->card_type >= 0xC0) {
482  timing_base = 0x10f290;
483  mr_base = 0x10f300;
484  } else {
485  timing_base = 0x100220;
486  mr_base = 0x1002c0;
487  }
488 
489  t->id = -1;
490 
491  switch (device->card_type) {
492  case NV_50:
493  timing_regs = 9;
494  break;
495  case NV_C0:
496  case NV_D0:
497  timing_regs = 5;
498  break;
499  case NV_30:
500  case NV_40:
501  timing_regs = 3;
502  break;
503  default:
504  timing_regs = 0;
505  return;
506  }
507  for(i = 0; i < timing_regs; i++)
508  t->reg[i] = nv_rd32(device, timing_base + (0x04 * i));
509 
510  t->tCWL = 0;
511  if (device->card_type < NV_C0) {
512  t->tCWL = ((nv_rd32(device, 0x100228) & 0x0f000000) >> 24) + 1;
513  } else if (device->card_type <= NV_D0) {
514  t->tCWL = ((nv_rd32(device, 0x10f294) & 0x00000f80) >> 7);
515  }
516 
517  t->mr[0] = nv_rd32(device, mr_base);
518  t->mr[1] = nv_rd32(device, mr_base + 0x04);
519  t->mr[2] = nv_rd32(device, mr_base + 0x20);
520  t->mr[3] = nv_rd32(device, mr_base + 0x24);
521 
522  t->odt = 0;
523  t->drive_strength = 0;
524 
525  switch (pfb->ram.type) {
526  case NV_MEM_TYPE_DDR3:
527  t->odt |= (t->mr[1] & 0x200) >> 7;
528  case NV_MEM_TYPE_DDR2:
529  t->odt |= (t->mr[1] & 0x04) >> 2 |
530  (t->mr[1] & 0x40) >> 5;
531  break;
532  case NV_MEM_TYPE_GDDR3:
533  case NV_MEM_TYPE_GDDR5:
534  t->drive_strength = t->mr[1] & 0x03;
535  t->odt = (t->mr[1] & 0x0c) >> 2;
536  break;
537  default:
538  break;
539  }
540 }
541 
542 int
544  struct nouveau_pm_level *perflvl)
545 {
546  struct nouveau_drm *drm = nouveau_drm(exec->dev);
547  struct nouveau_device *device = nouveau_dev(exec->dev);
548  struct nouveau_fb *pfb = nouveau_fb(device);
549  struct nouveau_pm_memtiming *info = &perflvl->timing;
550  u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
551  u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
552  u32 mr1_dlloff;
553 
554  switch (pfb->ram.type) {
555  case NV_MEM_TYPE_DDR2:
556  tDLLK = 2000;
557  mr1_dlloff = 0x00000001;
558  break;
559  case NV_MEM_TYPE_DDR3:
560  tDLLK = 12000;
561  tCKSRE = 2000;
562  tXS = 1000;
563  mr1_dlloff = 0x00000001;
564  break;
565  case NV_MEM_TYPE_GDDR3:
566  tDLLK = 40000;
567  mr1_dlloff = 0x00000040;
568  break;
569  default:
570  NV_ERROR(drm, "cannot reclock unsupported memtype\n");
571  return -ENODEV;
572  }
573 
574  /* fetch current MRs */
575  switch (pfb->ram.type) {
576  case NV_MEM_TYPE_GDDR3:
577  case NV_MEM_TYPE_DDR3:
578  mr[2] = exec->mrg(exec, 2);
579  default:
580  mr[1] = exec->mrg(exec, 1);
581  mr[0] = exec->mrg(exec, 0);
582  break;
583  }
584 
585  /* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh */
586  if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
587  exec->precharge(exec);
588  exec->mrs (exec, 1, mr[1] | mr1_dlloff);
589  exec->wait(exec, tMRD);
590  }
591 
592  /* enter self-refresh mode */
593  exec->precharge(exec);
594  exec->refresh(exec);
595  exec->refresh(exec);
596  exec->refresh_auto(exec, false);
597  exec->refresh_self(exec, true);
598  exec->wait(exec, tCKSRE);
599 
600  /* modify input clock frequency */
601  exec->clock_set(exec);
602 
603  /* exit self-refresh mode */
604  exec->wait(exec, tCKSRX);
605  exec->precharge(exec);
606  exec->refresh_self(exec, false);
607  exec->refresh_auto(exec, true);
608  exec->wait(exec, tXS);
609  exec->wait(exec, tXS);
610 
611  /* update MRs */
612  if (mr[2] != info->mr[2]) {
613  exec->mrs (exec, 2, info->mr[2]);
614  exec->wait(exec, tMRD);
615  }
616 
617  if (mr[1] != info->mr[1]) {
618  /* need to keep DLL off until later, at least on GDDR3 */
619  exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
620  exec->wait(exec, tMRD);
621  }
622 
623  if (mr[0] != info->mr[0]) {
624  exec->mrs (exec, 0, info->mr[0]);
625  exec->wait(exec, tMRD);
626  }
627 
628  /* update PFB timing registers */
629  exec->timing_set(exec);
630 
631  /* DLL (enable + ) reset */
632  if (!(info->mr[1] & mr1_dlloff)) {
633  if (mr[1] & mr1_dlloff) {
634  exec->mrs (exec, 1, info->mr[1]);
635  exec->wait(exec, tMRD);
636  }
637  exec->mrs (exec, 0, info->mr[0] | 0x00000100);
638  exec->wait(exec, tMRD);
639  exec->mrs (exec, 0, info->mr[0] | 0x00000000);
640  exec->wait(exec, tMRD);
641  exec->wait(exec, tDLLK);
642  if (pfb->ram.type == NV_MEM_TYPE_GDDR3)
643  exec->precharge(exec);
644  }
645 
646  return 0;
647 }