Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
qla_nx.c
Go to the documentation of this file.
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c) 2003-2012 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include <linux/delay.h>
9 #include <linux/pci.h>
10 #include <linux/ratelimit.h>
11 #include <linux/vmalloc.h>
12 #include <scsi/scsi_tcq.h>
13 
14 #define MASK(n) ((1ULL<<(n))-1)
15 #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
16  ((addr >> 25) & 0x3ff))
17 #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \
18  ((addr >> 25) & 0x3ff))
19 #define MS_WIN(addr) (addr & 0x0ffc0000)
20 #define QLA82XX_PCI_MN_2M (0)
21 #define QLA82XX_PCI_MS_2M (0x80000)
22 #define QLA82XX_PCI_OCM0_2M (0xc0000)
23 #define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
24 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
25 #define BLOCK_PROTECT_BITS 0x0F
26 
27 /* CRB window related */
28 #define CRB_BLK(off) ((off >> 20) & 0x3f)
29 #define CRB_SUBBLK(off) ((off >> 16) & 0xf)
30 #define CRB_WINDOW_2M (0x130060)
31 #define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL)
32 #define CRB_HI(off) ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
33  ((off) & 0xf0000))
34 #define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL)
35 #define CRB_INDIRECT_2M (0x1e0000UL)
36 
37 #define MAX_CRB_XFORM 60
38 static unsigned long crb_addr_xform[MAX_CRB_XFORM];
40 
41 #define qla82xx_crb_addr_transform(name) \
42  (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
43  QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
44 
45 static void qla82xx_crb_addr_transform_setup(void)
46 {
97  /*
98  * Used only in P3 just define it for P2 also.
99  */
101 
103 }
104 
106  {{{0, 0, 0, 0} } },
107  {{{1, 0x0100000, 0x0102000, 0x120000},
108  {1, 0x0110000, 0x0120000, 0x130000},
109  {1, 0x0120000, 0x0122000, 0x124000},
110  {1, 0x0130000, 0x0132000, 0x126000},
111  {1, 0x0140000, 0x0142000, 0x128000},
112  {1, 0x0150000, 0x0152000, 0x12a000},
113  {1, 0x0160000, 0x0170000, 0x110000},
114  {1, 0x0170000, 0x0172000, 0x12e000},
115  {0, 0x0000000, 0x0000000, 0x000000},
116  {0, 0x0000000, 0x0000000, 0x000000},
117  {0, 0x0000000, 0x0000000, 0x000000},
118  {0, 0x0000000, 0x0000000, 0x000000},
119  {0, 0x0000000, 0x0000000, 0x000000},
120  {0, 0x0000000, 0x0000000, 0x000000},
121  {1, 0x01e0000, 0x01e0800, 0x122000},
122  {0, 0x0000000, 0x0000000, 0x000000} } } ,
123  {{{1, 0x0200000, 0x0210000, 0x180000} } },
124  {{{0, 0, 0, 0} } },
125  {{{1, 0x0400000, 0x0401000, 0x169000} } },
126  {{{1, 0x0500000, 0x0510000, 0x140000} } },
127  {{{1, 0x0600000, 0x0610000, 0x1c0000} } },
128  {{{1, 0x0700000, 0x0704000, 0x1b8000} } },
129  {{{1, 0x0800000, 0x0802000, 0x170000},
130  {0, 0x0000000, 0x0000000, 0x000000},
131  {0, 0x0000000, 0x0000000, 0x000000},
132  {0, 0x0000000, 0x0000000, 0x000000},
133  {0, 0x0000000, 0x0000000, 0x000000},
134  {0, 0x0000000, 0x0000000, 0x000000},
135  {0, 0x0000000, 0x0000000, 0x000000},
136  {0, 0x0000000, 0x0000000, 0x000000},
137  {0, 0x0000000, 0x0000000, 0x000000},
138  {0, 0x0000000, 0x0000000, 0x000000},
139  {0, 0x0000000, 0x0000000, 0x000000},
140  {0, 0x0000000, 0x0000000, 0x000000},
141  {0, 0x0000000, 0x0000000, 0x000000},
142  {0, 0x0000000, 0x0000000, 0x000000},
143  {0, 0x0000000, 0x0000000, 0x000000},
144  {1, 0x08f0000, 0x08f2000, 0x172000} } },
145  {{{1, 0x0900000, 0x0902000, 0x174000},
146  {0, 0x0000000, 0x0000000, 0x000000},
147  {0, 0x0000000, 0x0000000, 0x000000},
148  {0, 0x0000000, 0x0000000, 0x000000},
149  {0, 0x0000000, 0x0000000, 0x000000},
150  {0, 0x0000000, 0x0000000, 0x000000},
151  {0, 0x0000000, 0x0000000, 0x000000},
152  {0, 0x0000000, 0x0000000, 0x000000},
153  {0, 0x0000000, 0x0000000, 0x000000},
154  {0, 0x0000000, 0x0000000, 0x000000},
155  {0, 0x0000000, 0x0000000, 0x000000},
156  {0, 0x0000000, 0x0000000, 0x000000},
157  {0, 0x0000000, 0x0000000, 0x000000},
158  {0, 0x0000000, 0x0000000, 0x000000},
159  {0, 0x0000000, 0x0000000, 0x000000},
160  {1, 0x09f0000, 0x09f2000, 0x176000} } },
161  {{{0, 0x0a00000, 0x0a02000, 0x178000},
162  {0, 0x0000000, 0x0000000, 0x000000},
163  {0, 0x0000000, 0x0000000, 0x000000},
164  {0, 0x0000000, 0x0000000, 0x000000},
165  {0, 0x0000000, 0x0000000, 0x000000},
166  {0, 0x0000000, 0x0000000, 0x000000},
167  {0, 0x0000000, 0x0000000, 0x000000},
168  {0, 0x0000000, 0x0000000, 0x000000},
169  {0, 0x0000000, 0x0000000, 0x000000},
170  {0, 0x0000000, 0x0000000, 0x000000},
171  {0, 0x0000000, 0x0000000, 0x000000},
172  {0, 0x0000000, 0x0000000, 0x000000},
173  {0, 0x0000000, 0x0000000, 0x000000},
174  {0, 0x0000000, 0x0000000, 0x000000},
175  {0, 0x0000000, 0x0000000, 0x000000},
176  {1, 0x0af0000, 0x0af2000, 0x17a000} } },
177  {{{0, 0x0b00000, 0x0b02000, 0x17c000},
178  {0, 0x0000000, 0x0000000, 0x000000},
179  {0, 0x0000000, 0x0000000, 0x000000},
180  {0, 0x0000000, 0x0000000, 0x000000},
181  {0, 0x0000000, 0x0000000, 0x000000},
182  {0, 0x0000000, 0x0000000, 0x000000},
183  {0, 0x0000000, 0x0000000, 0x000000},
184  {0, 0x0000000, 0x0000000, 0x000000},
185  {0, 0x0000000, 0x0000000, 0x000000},
186  {0, 0x0000000, 0x0000000, 0x000000},
187  {0, 0x0000000, 0x0000000, 0x000000},
188  {0, 0x0000000, 0x0000000, 0x000000},
189  {0, 0x0000000, 0x0000000, 0x000000},
190  {0, 0x0000000, 0x0000000, 0x000000},
191  {0, 0x0000000, 0x0000000, 0x000000},
192  {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
193  {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },
194  {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },
195  {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },
196  {{{1, 0x0f00000, 0x0f01000, 0x164000} } },
197  {{{0, 0x1000000, 0x1004000, 0x1a8000} } },
198  {{{1, 0x1100000, 0x1101000, 0x160000} } },
199  {{{1, 0x1200000, 0x1201000, 0x161000} } },
200  {{{1, 0x1300000, 0x1301000, 0x162000} } },
201  {{{1, 0x1400000, 0x1401000, 0x163000} } },
202  {{{1, 0x1500000, 0x1501000, 0x165000} } },
203  {{{1, 0x1600000, 0x1601000, 0x166000} } },
204  {{{0, 0, 0, 0} } },
205  {{{0, 0, 0, 0} } },
206  {{{0, 0, 0, 0} } },
207  {{{0, 0, 0, 0} } },
208  {{{0, 0, 0, 0} } },
209  {{{0, 0, 0, 0} } },
210  {{{1, 0x1d00000, 0x1d10000, 0x190000} } },
211  {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },
212  {{{1, 0x1f00000, 0x1f10000, 0x150000} } },
213  {{{0} } },
214  {{{1, 0x2100000, 0x2102000, 0x120000},
215  {1, 0x2110000, 0x2120000, 0x130000},
216  {1, 0x2120000, 0x2122000, 0x124000},
217  {1, 0x2130000, 0x2132000, 0x126000},
218  {1, 0x2140000, 0x2142000, 0x128000},
219  {1, 0x2150000, 0x2152000, 0x12a000},
220  {1, 0x2160000, 0x2170000, 0x110000},
221  {1, 0x2170000, 0x2172000, 0x12e000},
222  {0, 0x0000000, 0x0000000, 0x000000},
223  {0, 0x0000000, 0x0000000, 0x000000},
224  {0, 0x0000000, 0x0000000, 0x000000},
225  {0, 0x0000000, 0x0000000, 0x000000},
226  {0, 0x0000000, 0x0000000, 0x000000},
227  {0, 0x0000000, 0x0000000, 0x000000},
228  {0, 0x0000000, 0x0000000, 0x000000},
229  {0, 0x0000000, 0x0000000, 0x000000} } },
230  {{{1, 0x2200000, 0x2204000, 0x1b0000} } },
231  {{{0} } },
232  {{{0} } },
233  {{{0} } },
234  {{{0} } },
235  {{{0} } },
236  {{{1, 0x2800000, 0x2804000, 0x1a4000} } },
237  {{{1, 0x2900000, 0x2901000, 0x16b000} } },
238  {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },
239  {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },
240  {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },
241  {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },
242  {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },
243  {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },
244  {{{1, 0x3000000, 0x3000400, 0x1adc00} } },
245  {{{0, 0x3100000, 0x3104000, 0x1a8000} } },
246  {{{1, 0x3200000, 0x3204000, 0x1d4000} } },
247  {{{1, 0x3300000, 0x3304000, 0x1a0000} } },
248  {{{0} } },
249  {{{1, 0x3500000, 0x3500400, 0x1ac000} } },
250  {{{1, 0x3600000, 0x3600400, 0x1ae000} } },
251  {{{1, 0x3700000, 0x3700400, 0x1ae400} } },
252  {{{1, 0x3800000, 0x3804000, 0x1d0000} } },
253  {{{1, 0x3900000, 0x3904000, 0x1b4000} } },
254  {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },
255  {{{0} } },
256  {{{0} } },
257  {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },
258  {{{1, 0x3e00000, 0x3e01000, 0x167000} } },
259  {{{1, 0x3f00000, 0x3f01000, 0x168000} } }
260 };
261 
262 /*
263  * top 12 bits of crb internal address (hub, agent)
264  */
265 unsigned qla82xx_crb_hub_agt[64] = {
266  0,
270  0,
293  0,
296  0,
298  0,
301  0,
302  0,
303  0,
304  0,
305  0,
307  0,
318  0,
323  0,
327  0,
329  0,
330 };
331 
332 /* Device states */
333 char *q_dev_state[] = {
334  "Unknown",
335  "Cold",
336  "Initializing",
337  "Ready",
338  "Need Reset",
339  "Need Quiescent",
340  "Failed",
341  "Quiescent",
342 };
343 
345 {
346  return q_dev_state[dev_state];
347 }
348 
349 /*
350  * In: 'off' is offset from CRB space in 128M pci map
351  * Out: 'off' is 2M pci map addr
352  * side effect: lock crb window
353  */
354 static void
355 qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
356 {
357  u32 win_read;
358  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
359 
360  ha->crb_win = CRB_HI(*off);
361  writel(ha->crb_win,
362  (void *)(CRB_WINDOW_2M + ha->nx_pcibase));
363 
364  /* Read back value to make sure write has gone through before trying
365  * to use it.
366  */
367  win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
368  if (win_read != ha->crb_win) {
369  ql_dbg(ql_dbg_p3p, vha, 0xb000,
370  "%s: Written crbwin (0x%x) "
371  "!= Read crbwin (0x%x), off=0x%lx.\n",
372  __func__, ha->crb_win, win_read, *off);
373  }
374  *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
375 }
376 
377 static inline unsigned long
378 qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
379 {
380  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
381  /* See if we are currently pointing to the region we want to use next */
382  if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
383  /* No need to change window. PCIX and PCIEregs are in both
384  * regs are in both windows.
385  */
386  return off;
387  }
388 
389  if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
390  /* We are in first CRB window */
391  if (ha->curr_window != 0)
392  WARN_ON(1);
393  return off;
394  }
395 
396  if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
397  /* We are in second CRB window */
399 
400  if (ha->curr_window != 1)
401  return off;
402 
403  /* We are in the QM or direct access
404  * register region - do nothing
405  */
406  if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
407  (off < QLA82XX_PCI_CAMQM_MAX))
408  return off;
409  }
410  /* strange address given */
411  ql_dbg(ql_dbg_p3p, vha, 0xb001,
412  "%s: Warning: unm_nic_pci_set_crbwindow "
413  "called with an unknown address(%llx).\n",
414  QLA2XXX_DRIVER_NAME, off);
415  return off;
416 }
417 
418 static int
419 qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
420 {
422 
423  if (*off >= QLA82XX_CRB_MAX)
424  return -1;
425 
426  if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
427  *off = (*off - QLA82XX_PCI_CAMQM) +
429  return 0;
430  }
431 
432  if (*off < QLA82XX_PCI_CRBSPACE)
433  return -1;
434 
435  *off -= QLA82XX_PCI_CRBSPACE;
436 
437  /* Try direct map */
438  m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
439 
440  if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
441  *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
442  return 0;
443  }
444  /* Not in direct map, use crb window */
445  return 1;
446 }
447 
448 #define CRB_WIN_LOCK_TIMEOUT 100000000
449 static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
450 {
451  int done = 0, timeout = 0;
452 
453  while (!done) {
454  /* acquire semaphore3 from PCI HW block */
456  if (done == 1)
457  break;
459  return -1;
460  timeout++;
461  }
463  return 0;
464 }
465 
466 int
468 {
469  unsigned long flags = 0;
470  int rv;
471 
472  rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
473 
474  BUG_ON(rv == -1);
475 
476  if (rv == 1) {
477  write_lock_irqsave(&ha->hw_lock, flags);
478  qla82xx_crb_win_lock(ha);
479  qla82xx_pci_set_crbwindow_2M(ha, &off);
480  }
481 
482  writel(data, (void __iomem *)off);
483 
484  if (rv == 1) {
486  write_unlock_irqrestore(&ha->hw_lock, flags);
487  }
488  return 0;
489 }
490 
491 int
493 {
494  unsigned long flags = 0;
495  int rv;
496  u32 data;
497 
498  rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
499 
500  BUG_ON(rv == -1);
501 
502  if (rv == 1) {
503  write_lock_irqsave(&ha->hw_lock, flags);
504  qla82xx_crb_win_lock(ha);
505  qla82xx_pci_set_crbwindow_2M(ha, &off);
506  }
507  data = RD_REG_DWORD((void __iomem *)off);
508 
509  if (rv == 1) {
511  write_unlock_irqrestore(&ha->hw_lock, flags);
512  }
513  return data;
514 }
515 
516 #define IDC_LOCK_TIMEOUT 100000000
518 {
519  int i;
520  int done = 0, timeout = 0;
521 
522  while (!done) {
523  /* acquire semaphore5 from PCI HW block */
525  if (done == 1)
526  break;
527  if (timeout >= IDC_LOCK_TIMEOUT)
528  return -1;
529 
530  timeout++;
531 
532  /* Yield CPU */
533  if (!in_interrupt())
534  schedule();
535  else {
536  for (i = 0; i < 20; i++)
537  cpu_relax();
538  }
539  }
540 
541  return 0;
542 }
543 
545 {
547 }
548 
549 /* PCI Windowing for DDR regions. */
550 #define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
551  (((addr) <= (high)) && ((addr) >= (low)))
552 /*
553  * check memory access boundary.
554  * used by test agent. support ddr access only for now
555  */
556 static unsigned long
557 qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
558  unsigned long long addr, int size)
559 {
564  ((size != 1) && (size != 2) && (size != 4) && (size != 8)))
565  return 0;
566  else
567  return 1;
568 }
569 
571 
572 static unsigned long
573 qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
574 {
575  int window;
576  u32 win_read;
577  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
578 
581  /* DDR network side */
582  window = MN_WIN(addr);
583  ha->ddr_mn_window = window;
584  qla82xx_wr_32(ha,
585  ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
586  win_read = qla82xx_rd_32(ha,
588  if ((win_read << 17) != window) {
589  ql_dbg(ql_dbg_p3p, vha, 0xb003,
590  "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n",
591  __func__, window, win_read);
592  }
593  addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
594  } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
596  unsigned int temp1;
597  if ((addr & 0x00ff800) == 0xff800) {
598  ql_log(ql_log_warn, vha, 0xb004,
599  "%s: QM access not handled.\n", __func__);
600  addr = -1UL;
601  }
602  window = OCM_WIN(addr);
603  ha->ddr_mn_window = window;
604  qla82xx_wr_32(ha,
605  ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
606  win_read = qla82xx_rd_32(ha,
608  temp1 = ((window & 0x1FF) << 7) |
609  ((window & 0x0FFFE0000) >> 17);
610  if (win_read != temp1) {
611  ql_log(ql_log_warn, vha, 0xb005,
612  "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n",
613  __func__, temp1, win_read);
614  }
615  addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
616 
619  /* QDR network side */
620  window = MS_WIN(addr);
621  ha->qdr_sn_window = window;
622  qla82xx_wr_32(ha,
623  ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window);
624  win_read = qla82xx_rd_32(ha,
626  if (win_read != window) {
627  ql_log(ql_log_warn, vha, 0xb006,
628  "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n",
629  __func__, window, win_read);
630  }
631  addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
632  } else {
633  /*
634  * peg gdb frequently accesses memory that doesn't exist,
635  * this limits the chit chat so debugging isn't slowed down.
636  */
637  if ((qla82xx_pci_set_window_warning_count++ < 8) ||
638  (qla82xx_pci_set_window_warning_count%64 == 0)) {
639  ql_log(ql_log_warn, vha, 0xb007,
640  "%s: Warning:%s Unknown address range!.\n",
641  __func__, QLA2XXX_DRIVER_NAME);
642  }
643  addr = -1UL;
644  }
645  return addr;
646 }
647 
648 /* check if address is in the same windows as the previous access */
649 static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
650  unsigned long long addr)
651 {
652  int window;
653  unsigned long long qdr_max;
654 
655  qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
656 
657  /* DDR network side */
660  BUG();
663  return 1;
666  return 1;
667  else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
668  /* QDR network side */
669  window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
670  if (ha->qdr_sn_window == window)
671  return 1;
672  }
673  return 0;
674 }
675 
676 static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
677  u64 off, void *data, int size)
678 {
679  unsigned long flags;
680  void *addr = NULL;
681  int ret = 0;
682  u64 start;
683  uint8_t *mem_ptr = NULL;
684  unsigned long mem_base;
685  unsigned long mem_page;
686  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
687 
688  write_lock_irqsave(&ha->hw_lock, flags);
689 
690  /*
691  * If attempting to access unknown address or straddle hw windows,
692  * do not access.
693  */
694  start = qla82xx_pci_set_window(ha, off);
695  if ((start == -1UL) ||
696  (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
697  write_unlock_irqrestore(&ha->hw_lock, flags);
698  ql_log(ql_log_fatal, vha, 0xb008,
699  "%s out of bound pci memory "
700  "access, offset is 0x%llx.\n",
701  QLA2XXX_DRIVER_NAME, off);
702  return -1;
703  }
704 
705  write_unlock_irqrestore(&ha->hw_lock, flags);
706  mem_base = pci_resource_start(ha->pdev, 0);
707  mem_page = start & PAGE_MASK;
708  /* Map two pages whenever user tries to access addresses in two
709  * consecutive pages.
710  */
711  if (mem_page != ((start + size - 1) & PAGE_MASK))
712  mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
713  else
714  mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
715  if (mem_ptr == 0UL) {
716  *(u8 *)data = 0;
717  return -1;
718  }
719  addr = mem_ptr;
720  addr += start & (PAGE_SIZE - 1);
721  write_lock_irqsave(&ha->hw_lock, flags);
722 
723  switch (size) {
724  case 1:
725  *(u8 *)data = readb(addr);
726  break;
727  case 2:
728  *(u16 *)data = readw(addr);
729  break;
730  case 4:
731  *(u32 *)data = readl(addr);
732  break;
733  case 8:
734  *(u64 *)data = readq(addr);
735  break;
736  default:
737  ret = -1;
738  break;
739  }
740  write_unlock_irqrestore(&ha->hw_lock, flags);
741 
742  if (mem_ptr)
743  iounmap(mem_ptr);
744  return ret;
745 }
746 
747 static int
748 qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
749  u64 off, void *data, int size)
750 {
751  unsigned long flags;
752  void *addr = NULL;
753  int ret = 0;
754  u64 start;
755  uint8_t *mem_ptr = NULL;
756  unsigned long mem_base;
757  unsigned long mem_page;
758  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
759 
760  write_lock_irqsave(&ha->hw_lock, flags);
761 
762  /*
763  * If attempting to access unknown address or straddle hw windows,
764  * do not access.
765  */
766  start = qla82xx_pci_set_window(ha, off);
767  if ((start == -1UL) ||
768  (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
769  write_unlock_irqrestore(&ha->hw_lock, flags);
770  ql_log(ql_log_fatal, vha, 0xb009,
771  "%s out of bount memory "
772  "access, offset is 0x%llx.\n",
773  QLA2XXX_DRIVER_NAME, off);
774  return -1;
775  }
776 
777  write_unlock_irqrestore(&ha->hw_lock, flags);
778  mem_base = pci_resource_start(ha->pdev, 0);
779  mem_page = start & PAGE_MASK;
780  /* Map two pages whenever user tries to access addresses in two
781  * consecutive pages.
782  */
783  if (mem_page != ((start + size - 1) & PAGE_MASK))
784  mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
785  else
786  mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
787  if (mem_ptr == 0UL)
788  return -1;
789 
790  addr = mem_ptr;
791  addr += start & (PAGE_SIZE - 1);
792  write_lock_irqsave(&ha->hw_lock, flags);
793 
794  switch (size) {
795  case 1:
796  writeb(*(u8 *)data, addr);
797  break;
798  case 2:
799  writew(*(u16 *)data, addr);
800  break;
801  case 4:
802  writel(*(u32 *)data, addr);
803  break;
804  case 8:
805  writeq(*(u64 *)data, addr);
806  break;
807  default:
808  ret = -1;
809  break;
810  }
811  write_unlock_irqrestore(&ha->hw_lock, flags);
812  if (mem_ptr)
813  iounmap(mem_ptr);
814  return ret;
815 }
816 
817 #define MTU_FUDGE_FACTOR 100
818 static unsigned long
819 qla82xx_decode_crb_addr(unsigned long addr)
820 {
821  int i;
822  unsigned long base_addr, offset, pci_base;
823 
825  qla82xx_crb_addr_transform_setup();
826 
827  pci_base = ADDR_ERROR;
828  base_addr = addr & 0xfff00000;
829  offset = addr & 0x000fffff;
830 
831  for (i = 0; i < MAX_CRB_XFORM; i++) {
832  if (crb_addr_xform[i] == base_addr) {
833  pci_base = i << 20;
834  break;
835  }
836  }
837  if (pci_base == ADDR_ERROR)
838  return pci_base;
839  return pci_base + offset;
840 }
841 
842 static long rom_max_timeout = 100;
843 static long qla82xx_rom_lock_timeout = 100;
844 
845 static int
846 qla82xx_rom_lock(struct qla_hw_data *ha)
847 {
848  int done = 0, timeout = 0;
849 
850  while (!done) {
851  /* acquire semaphore2 from PCI HW block */
853  if (done == 1)
854  break;
855  if (timeout >= qla82xx_rom_lock_timeout)
856  return -1;
857  timeout++;
858  }
860  return 0;
861 }
862 
863 static void
864 qla82xx_rom_unlock(struct qla_hw_data *ha)
865 {
867 }
868 
869 static int
870 qla82xx_wait_rom_busy(struct qla_hw_data *ha)
871 {
872  long timeout = 0;
873  long done = 0 ;
874  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
875 
876  while (done == 0) {
878  done &= 4;
879  timeout++;
880  if (timeout >= rom_max_timeout) {
881  ql_dbg(ql_dbg_p3p, vha, 0xb00a,
882  "%s: Timeout reached waiting for rom busy.\n",
884  return -1;
885  }
886  }
887  return 0;
888 }
889 
890 static int
891 qla82xx_wait_rom_done(struct qla_hw_data *ha)
892 {
893  long timeout = 0;
894  long done = 0 ;
895  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
896 
897  while (done == 0) {
899  done &= 2;
900  timeout++;
901  if (timeout >= rom_max_timeout) {
902  ql_dbg(ql_dbg_p3p, vha, 0xb00b,
903  "%s: Timeout reached waiting for rom done.\n",
905  return -1;
906  }
907  }
908  return 0;
909 }
910 
911 int
913 {
914  uint32_t off_value, rval = 0;
915 
916  WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase),
917  (off & 0xFFFF0000));
918 
919  /* Read back value to make sure write has gone through */
920  RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
921  off_value = (off & 0x0000FFFF);
922 
923  if (flag)
924  WRT_REG_DWORD((void *)
925  (off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
926  data);
927  else
928  rval = RD_REG_DWORD((void *)
929  (off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
930 
931  return rval;
932 }
933 
934 static int
935 qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
936 {
937  /* Dword reads to flash. */
938  qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (addr & 0xFFFF0000), 1);
940  (addr & 0x0000FFFF), 0, 0);
941 
942  return 0;
943 }
944 
945 static int
946 qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
947 {
948  int ret, loops = 0;
949  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
950 
951  while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
952  udelay(100);
953  schedule();
954  loops++;
955  }
956  if (loops >= 50000) {
957  ql_log(ql_log_fatal, vha, 0x00b9,
958  "Failed to aquire SEM2 lock.\n");
959  return -1;
960  }
961  ret = qla82xx_do_rom_fast_read(ha, addr, valp);
962  qla82xx_rom_unlock(ha);
963  return ret;
964 }
965 
966 static int
967 qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
968 {
969  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
971  qla82xx_wait_rom_busy(ha);
972  if (qla82xx_wait_rom_done(ha)) {
973  ql_log(ql_log_warn, vha, 0xb00c,
974  "Error waiting for rom done.\n");
975  return -1;
976  }
978  return 0;
979 }
980 
981 static int
982 qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
983 {
984  long timeout = 0;
985  uint32_t done = 1 ;
986  uint32_t val;
987  int ret = 0;
988  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
989 
991  while ((done != 0) && (ret == 0)) {
992  ret = qla82xx_read_status_reg(ha, &val);
993  done = val & 1;
994  timeout++;
995  udelay(10);
996  cond_resched();
997  if (timeout >= 50000) {
998  ql_log(ql_log_warn, vha, 0xb00d,
999  "Timeout reached waiting for write finish.\n");
1000  return -1;
1001  }
1002  }
1003  return ret;
1004 }
1005 
1006 static int
1007 qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
1008 {
1009  uint32_t val;
1010  qla82xx_wait_rom_busy(ha);
1013  qla82xx_wait_rom_busy(ha);
1014  if (qla82xx_wait_rom_done(ha))
1015  return -1;
1016  if (qla82xx_read_status_reg(ha, &val) != 0)
1017  return -1;
1018  if ((val & 2) != 2)
1019  return -1;
1020  return 0;
1021 }
1022 
1023 static int
1024 qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
1025 {
1026  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1027  if (qla82xx_flash_set_write_enable(ha))
1028  return -1;
1031  if (qla82xx_wait_rom_done(ha)) {
1032  ql_log(ql_log_warn, vha, 0xb00e,
1033  "Error waiting for rom done.\n");
1034  return -1;
1035  }
1036  return qla82xx_flash_wait_write_finish(ha);
1037 }
1038 
1039 static int
1040 qla82xx_write_disable_flash(struct qla_hw_data *ha)
1041 {
1042  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1044  if (qla82xx_wait_rom_done(ha)) {
1045  ql_log(ql_log_warn, vha, 0xb00f,
1046  "Error waiting for rom done.\n");
1047  return -1;
1048  }
1049  return 0;
1050 }
1051 
1052 static int
1053 ql82xx_rom_lock_d(struct qla_hw_data *ha)
1054 {
1055  int loops = 0;
1056  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1057 
1058  while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
1059  udelay(100);
1060  cond_resched();
1061  loops++;
1062  }
1063  if (loops >= 50000) {
1064  ql_log(ql_log_warn, vha, 0xb010,
1065  "ROM lock failed.\n");
1066  return -1;
1067  }
1068  return 0;
1069 }
1070 
1071 static int
1072 qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1073  uint32_t data)
1074 {
1075  int ret = 0;
1076  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1077 
1078  ret = ql82xx_rom_lock_d(ha);
1079  if (ret < 0) {
1080  ql_log(ql_log_warn, vha, 0xb011,
1081  "ROM lock failed.\n");
1082  return ret;
1083  }
1084 
1085  if (qla82xx_flash_set_write_enable(ha))
1086  goto done_write;
1087 
1089  qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr);
1092  qla82xx_wait_rom_busy(ha);
1093  if (qla82xx_wait_rom_done(ha)) {
1094  ql_log(ql_log_warn, vha, 0xb012,
1095  "Error waiting for rom done.\n");
1096  ret = -1;
1097  goto done_write;
1098  }
1099 
1100  ret = qla82xx_flash_wait_write_finish(ha);
1101 
1102 done_write:
1103  qla82xx_rom_unlock(ha);
1104  return ret;
1105 }
1106 
1107 /* This routine does CRB initialize sequence
1108  * to put the ISP into operational state
1109  */
1110 static int
1111 qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1112 {
1113  int addr, val;
1114  int i ;
1115  struct crb_addr_pair *buf;
1116  unsigned long off;
1117  unsigned offset, n;
1118  struct qla_hw_data *ha = vha->hw;
1119 
1120  struct crb_addr_pair {
1121  long addr;
1122  long data;
1123  };
1124 
1125  /* Halt all the indiviual PEGs and other blocks of the ISP */
1126  qla82xx_rom_lock(ha);
1127 
1128  /* disable all I2Q */
1129  qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
1130  qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
1131  qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
1132  qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
1133  qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
1134  qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
1135 
1136  /* disable all niu interrupts */
1137  qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
1138  /* disable xge rx/tx */
1139  qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
1140  /* disable xg1 rx/tx */
1141  qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
1142  /* disable sideband mac */
1143  qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
1144  /* disable ap0 mac */
1145  qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
1146  /* disable ap1 mac */
1147  qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
1148 
1149  /* halt sre */
1150  val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
1151  qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
1152 
1153  /* halt epg */
1154  qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
1155 
1156  /* halt timers */
1157  qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
1158  qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
1159  qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
1160  qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
1161  qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1162  qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
1163 
1164  /* halt pegs */
1165  qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
1166  qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
1167  qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
1168  qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
1169  qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1170  msleep(20);
1171 
1172  /* big hammer */
1173  if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
1174  /* don't reset CAM block on reset */
1175  qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
1176  else
1177  qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
1178  qla82xx_rom_unlock(ha);
1179 
1180  /* Read the signature value from the flash.
1181  * Offset 0: Contain signature (0xcafecafe)
1182  * Offset 4: Offset and number of addr/value pairs
1183  * that present in CRB initialize sequence
1184  */
1185  if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1186  qla82xx_rom_fast_read(ha, 4, &n) != 0) {
1187  ql_log(ql_log_fatal, vha, 0x006e,
1188  "Error Reading crb_init area: n: %08x.\n", n);
1189  return -1;
1190  }
1191 
1192  /* Offset in flash = lower 16 bits
1193  * Number of entries = upper 16 bits
1194  */
1195  offset = n & 0xffffU;
1196  n = (n >> 16) & 0xffffU;
1197 
1198  /* number of addr/value pair should not exceed 1024 entries */
1199  if (n >= 1024) {
1200  ql_log(ql_log_fatal, vha, 0x0071,
1201  "Card flash not initialized:n=0x%x.\n", n);
1202  return -1;
1203  }
1204 
1205  ql_log(ql_log_info, vha, 0x0072,
1206  "%d CRB init values found in ROM.\n", n);
1207 
1208  buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
1209  if (buf == NULL) {
1210  ql_log(ql_log_fatal, vha, 0x010c,
1211  "Unable to allocate memory.\n");
1212  return -1;
1213  }
1214 
1215  for (i = 0; i < n; i++) {
1216  if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
1217  qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) {
1218  kfree(buf);
1219  return -1;
1220  }
1221 
1222  buf[i].addr = addr;
1223  buf[i].data = val;
1224  }
1225 
1226  for (i = 0; i < n; i++) {
1227  /* Translate internal CRB initialization
1228  * address to PCI bus address
1229  */
1230  off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) +
1232  /* Not all CRB addr/value pair to be written,
1233  * some of them are skipped
1234  */
1235 
1236  /* skipping cold reboot MAGIC */
1237  if (off == QLA82XX_CAM_RAM(0x1fc))
1238  continue;
1239 
1240  /* do not reset PCI */
1241  if (off == (ROMUSB_GLB + 0xbc))
1242  continue;
1243 
1244  /* skip core clock, so that firmware can increase the clock */
1245  if (off == (ROMUSB_GLB + 0xc8))
1246  continue;
1247 
1248  /* skip the function enable register */
1250  continue;
1251 
1253  continue;
1254 
1255  if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
1256  continue;
1257 
1258  if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
1259  continue;
1260 
1261  if (off == ADDR_ERROR) {
1262  ql_log(ql_log_fatal, vha, 0x0116,
1263  "Unknow addr: 0x%08lx.\n", buf[i].addr);
1264  continue;
1265  }
1266 
1267  qla82xx_wr_32(ha, off, buf[i].data);
1268 
1269  /* ISP requires much bigger delay to settle down,
1270  * else crb_window returns 0xffffffff
1271  */
1272  if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
1273  msleep(1000);
1274 
1275  /* ISP requires millisec delay between
1276  * successive CRB register updation
1277  */
1278  msleep(1);
1279  }
1280 
1281  kfree(buf);
1282 
1283  /* Resetting the data and instruction cache */
1284  qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
1285  qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
1286  qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
1287 
1288  /* Clear all protocol processing engines */
1297  return 0;
1298 }
1299 
1300 static int
1301 qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1302  u64 off, void *data, int size)
1303 {
1304  int i, j, ret = 0, loop, sz[2], off0;
1305  int scale, shift_amount, startword;
1306  uint32_t temp;
1307  uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1308 
1309  /*
1310  * If not MN, go check for MS or invalid.
1311  */
1313  mem_crb = QLA82XX_CRB_QDR_NET;
1314  else {
1315  mem_crb = QLA82XX_CRB_DDR_NET;
1316  if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1317  return qla82xx_pci_mem_write_direct(ha,
1318  off, data, size);
1319  }
1320 
1321  off0 = off & 0x7;
1322  sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1323  sz[1] = size - sz[0];
1324 
1325  off8 = off & 0xfffffff0;
1326  loop = (((off & 0xf) + size - 1) >> 4) + 1;
1327  shift_amount = 4;
1328  scale = 2;
1329  startword = (off & 0xf)/8;
1330 
1331  for (i = 0; i < loop; i++) {
1332  if (qla82xx_pci_mem_read_2M(ha, off8 +
1333  (i << shift_amount), &word[i * scale], 8))
1334  return -1;
1335  }
1336 
1337  switch (size) {
1338  case 1:
1339  tmpw = *((uint8_t *)data);
1340  break;
1341  case 2:
1342  tmpw = *((uint16_t *)data);
1343  break;
1344  case 4:
1345  tmpw = *((uint32_t *)data);
1346  break;
1347  case 8:
1348  default:
1349  tmpw = *((uint64_t *)data);
1350  break;
1351  }
1352 
1353  if (sz[0] == 8) {
1354  word[startword] = tmpw;
1355  } else {
1356  word[startword] &=
1357  ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1358  word[startword] |= tmpw << (off0 * 8);
1359  }
1360  if (sz[1] != 0) {
1361  word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1362  word[startword+1] |= tmpw >> (sz[0] * 8);
1363  }
1364 
1365  for (i = 0; i < loop; i++) {
1366  temp = off8 + (i << shift_amount);
1367  qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1368  temp = 0;
1369  qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1370  temp = word[i * scale] & 0xffffffff;
1371  qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1372  temp = (word[i * scale] >> 32) & 0xffffffff;
1373  qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1374  temp = word[i*scale + 1] & 0xffffffff;
1375  qla82xx_wr_32(ha, mem_crb +
1377  temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1378  qla82xx_wr_32(ha, mem_crb +
1380 
1382  qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1384  qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1385 
1386  for (j = 0; j < MAX_CTL_CHECK; j++) {
1387  temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1388  if ((temp & MIU_TA_CTL_BUSY) == 0)
1389  break;
1390  }
1391 
1392  if (j >= MAX_CTL_CHECK) {
1393  if (printk_ratelimit())
1394  dev_err(&ha->pdev->dev,
1395  "failed to write through agent.\n");
1396  ret = -1;
1397  break;
1398  }
1399  }
1400 
1401  return ret;
1402 }
1403 
1404 static int
1405 qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
1406 {
1407  int i;
1408  long size = 0;
1409  long flashaddr = ha->flt_region_bootload << 2;
1410  long memaddr = BOOTLD_START;
1411  u64 data;
1412  u32 high, low;
1413  size = (IMAGE_START - BOOTLD_START) / 8;
1414 
1415  for (i = 0; i < size; i++) {
1416  if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
1417  (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) {
1418  return -1;
1419  }
1420  data = ((u64)high << 32) | low ;
1421  qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
1422  flashaddr += 8;
1423  memaddr += 8;
1424 
1425  if (i % 0x1000 == 0)
1426  msleep(1);
1427  }
1428  udelay(100);
1429  read_lock(&ha->hw_lock);
1430  qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1432  read_unlock(&ha->hw_lock);
1433  return 0;
1434 }
1435 
1436 int
1438  u64 off, void *data, int size)
1439 {
1440  int i, j = 0, k, start, end, loop, sz[2], off0[2];
1441  int shift_amount;
1442  uint32_t temp;
1443  uint64_t off8, val, mem_crb, word[2] = {0, 0};
1444 
1445  /*
1446  * If not MN, go check for MS or invalid.
1447  */
1448 
1450  mem_crb = QLA82XX_CRB_QDR_NET;
1451  else {
1452  mem_crb = QLA82XX_CRB_DDR_NET;
1453  if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1454  return qla82xx_pci_mem_read_direct(ha,
1455  off, data, size);
1456  }
1457 
1458  off8 = off & 0xfffffff0;
1459  off0[0] = off & 0xf;
1460  sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
1461  shift_amount = 4;
1462  loop = ((off0[0] + size - 1) >> shift_amount) + 1;
1463  off0[1] = 0;
1464  sz[1] = size - sz[0];
1465 
1466  for (i = 0; i < loop; i++) {
1467  temp = off8 + (i << shift_amount);
1468  qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
1469  temp = 0;
1470  qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
1471  temp = MIU_TA_CTL_ENABLE;
1472  qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1474  qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1475 
1476  for (j = 0; j < MAX_CTL_CHECK; j++) {
1477  temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1478  if ((temp & MIU_TA_CTL_BUSY) == 0)
1479  break;
1480  }
1481 
1482  if (j >= MAX_CTL_CHECK) {
1483  if (printk_ratelimit())
1484  dev_err(&ha->pdev->dev,
1485  "failed to read through agent.\n");
1486  break;
1487  }
1488 
1489  start = off0[i] >> 2;
1490  end = (off0[i] + sz[i] - 1) >> 2;
1491  for (k = start; k <= end; k++) {
1492  temp = qla82xx_rd_32(ha,
1493  mem_crb + MIU_TEST_AGT_RDDATA(k));
1494  word[i] |= ((uint64_t)temp << (32 * (k & 1)));
1495  }
1496  }
1497 
1498  if (j >= MAX_CTL_CHECK)
1499  return -1;
1500 
1501  if ((off0[0] & 7) == 0) {
1502  val = word[0];
1503  } else {
1504  val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
1505  ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
1506  }
1507 
1508  switch (size) {
1509  case 1:
1510  *(uint8_t *)data = val;
1511  break;
1512  case 2:
1513  *(uint16_t *)data = val;
1514  break;
1515  case 4:
1516  *(uint32_t *)data = val;
1517  break;
1518  case 8:
1519  *(uint64_t *)data = val;
1520  break;
1521  }
1522  return 0;
1523 }
1524 
1525 
1526 static struct qla82xx_uri_table_desc *
1527 qla82xx_get_table_desc(const u8 *unirom, int section)
1528 {
1529  uint32_t i;
1530  struct qla82xx_uri_table_desc *directory =
1531  (struct qla82xx_uri_table_desc *)&unirom[0];
1532  __le32 offset;
1533  __le32 tab_type;
1534  __le32 entries = cpu_to_le32(directory->num_entries);
1535 
1536  for (i = 0; i < entries; i++) {
1537  offset = cpu_to_le32(directory->findex) +
1538  (i * cpu_to_le32(directory->entry_size));
1539  tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8));
1540 
1541  if (tab_type == section)
1542  return (struct qla82xx_uri_table_desc *)&unirom[offset];
1543  }
1544 
1545  return NULL;
1546 }
1547 
1548 static struct qla82xx_uri_data_desc *
1549 qla82xx_get_data_desc(struct qla_hw_data *ha,
1550  u32 section, u32 idx_offset)
1551 {
1552  const u8 *unirom = ha->hablob->fw->data;
1553  int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset));
1554  struct qla82xx_uri_table_desc *tab_desc = NULL;
1555  __le32 offset;
1556 
1557  tab_desc = qla82xx_get_table_desc(unirom, section);
1558  if (!tab_desc)
1559  return NULL;
1560 
1561  offset = cpu_to_le32(tab_desc->findex) +
1562  (cpu_to_le32(tab_desc->entry_size) * idx);
1563 
1564  return (struct qla82xx_uri_data_desc *)&unirom[offset];
1565 }
1566 
1567 static u8 *
1568 qla82xx_get_bootld_offset(struct qla_hw_data *ha)
1569 {
1570  u32 offset = BOOTLD_START;
1571  struct qla82xx_uri_data_desc *uri_desc = NULL;
1572 
1573  if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1574  uri_desc = qla82xx_get_data_desc(ha,
1576  if (uri_desc)
1577  offset = cpu_to_le32(uri_desc->findex);
1578  }
1579 
1580  return (u8 *)&ha->hablob->fw->data[offset];
1581 }
1582 
1583 static __le32
1584 qla82xx_get_fw_size(struct qla_hw_data *ha)
1585 {
1586  struct qla82xx_uri_data_desc *uri_desc = NULL;
1587 
1588  if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1589  uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1591  if (uri_desc)
1592  return cpu_to_le32(uri_desc->size);
1593  }
1594 
1595  return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
1596 }
1597 
1598 static u8 *
1599 qla82xx_get_fw_offs(struct qla_hw_data *ha)
1600 {
1601  u32 offset = IMAGE_START;
1602  struct qla82xx_uri_data_desc *uri_desc = NULL;
1603 
1604  if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1605  uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1607  if (uri_desc)
1608  offset = cpu_to_le32(uri_desc->findex);
1609  }
1610 
1611  return (u8 *)&ha->hablob->fw->data[offset];
1612 }
1613 
1614 /* PCI related functions */
1616 {
1617  unsigned long val = 0;
1618  u32 control;
1619 
1620  switch (region) {
1621  case 0:
1622  val = 0;
1623  break;
1624  case 1:
1625  pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
1626  val = control + QLA82XX_MSIX_TBL_SPACE;
1627  break;
1628  }
1629  return val;
1630 }
1631 
1632 
1633 int
1635 {
1636  uint32_t len = 0;
1637 
1639  ql_log_pci(ql_log_fatal, ha->pdev, 0x000c,
1640  "Failed to reserver selected regions.\n");
1641  goto iospace_error_exit;
1642  }
1643 
1644  /* Use MMIO operations for all accesses. */
1645  if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
1646  ql_log_pci(ql_log_fatal, ha->pdev, 0x000d,
1647  "Region #0 not an MMIO resource, aborting.\n");
1648  goto iospace_error_exit;
1649  }
1650 
1651  len = pci_resource_len(ha->pdev, 0);
1652  ha->nx_pcibase =
1653  (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
1654  if (!ha->nx_pcibase) {
1655  ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
1656  "Cannot remap pcibase MMIO, aborting.\n");
1658  goto iospace_error_exit;
1659  }
1660 
1661  /* Mapping of IO base pointer */
1662  ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
1663  0xbc000 + (ha->pdev->devfn << 11));
1664 
1665  if (!ql2xdbwr) {
1666  ha->nxdb_wr_ptr =
1667  (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
1668  (ha->pdev->devfn << 12)), 4);
1669  if (!ha->nxdb_wr_ptr) {
1670  ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
1671  "Cannot remap MMIO, aborting.\n");
1673  goto iospace_error_exit;
1674  }
1675 
1676  /* Mapping of IO base pointer,
1677  * door bell read and write pointer
1678  */
1679  ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
1680  (ha->pdev->devfn * 8);
1681  } else {
1682  ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
1685  }
1686 
1687  ha->max_req_queues = ha->max_rsp_queues = 1;
1688  ha->msix_count = ha->max_rsp_queues + 1;
1689  ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
1690  "nx_pci_base=%p iobase=%p "
1691  "max_req_queues=%d msix_count=%d.\n",
1692  (void *)ha->nx_pcibase, ha->iobase,
1693  ha->max_req_queues, ha->msix_count);
1694  ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
1695  "nx_pci_base=%p iobase=%p "
1696  "max_req_queues=%d msix_count=%d.\n",
1697  (void *)ha->nx_pcibase, ha->iobase,
1698  ha->max_req_queues, ha->msix_count);
1699  return 0;
1700 
1701 iospace_error_exit:
1702  return -ENOMEM;
1703 }
1704 
1705 /* GS related functions */
1706 
1707 /* Initialization related functions */
1708 
1715 int
1717 {
1718  struct qla_hw_data *ha = vha->hw;
1719  int ret;
1720 
1721  pci_set_master(ha->pdev);
1722  ret = pci_set_mwi(ha->pdev);
1723  ha->chip_revision = ha->pdev->revision;
1724  ql_dbg(ql_dbg_init, vha, 0x0043,
1725  "Chip revision:%d.\n",
1726  ha->chip_revision);
1727  return 0;
1728 }
1729 
1736 void
1738 {
1739  struct qla_hw_data *ha = vha->hw;
1740  ha->isp_ops->disable_intrs(ha);
1741 }
1742 
1744 {
1745  struct qla_hw_data *ha = vha->hw;
1746  struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1747  struct init_cb_81xx *icb;
1748  struct req_que *req = ha->req_q_map[0];
1749  struct rsp_que *rsp = ha->rsp_q_map[0];
1750 
1751  /* Setup ring parameters in initialization control block. */
1752  icb = (struct init_cb_81xx *)ha->init_cb;
1755  icb->request_q_length = cpu_to_le16(req->length);
1756  icb->response_q_length = cpu_to_le16(rsp->length);
1757  icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1758  icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1759  icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1760  icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1761 
1762  WRT_REG_DWORD((unsigned long __iomem *)&reg->req_q_out[0], 0);
1763  WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_in[0], 0);
1764  WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0);
1765 }
1766 
1768 {
1769  struct qla_hw_data *ha = vha->hw;
1770  vha->flags.online = 0;
1772  ha->isp_ops->disable_intrs(ha);
1773 }
1774 
1775 static int
1776 qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
1777 {
1778  u64 *ptr64;
1779  u32 i, flashaddr, size;
1780  __le64 data;
1781 
1782  size = (IMAGE_START - BOOTLD_START) / 8;
1783 
1784  ptr64 = (u64 *)qla82xx_get_bootld_offset(ha);
1785  flashaddr = BOOTLD_START;
1786 
1787  for (i = 0; i < size; i++) {
1788  data = cpu_to_le64(ptr64[i]);
1789  if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1790  return -EIO;
1791  flashaddr += 8;
1792  }
1793 
1794  flashaddr = FLASH_ADDR_START;
1795  size = (__force u32)qla82xx_get_fw_size(ha) / 8;
1796  ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
1797 
1798  for (i = 0; i < size; i++) {
1799  data = cpu_to_le64(ptr64[i]);
1800 
1801  if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1802  return -EIO;
1803  flashaddr += 8;
1804  }
1805  udelay(100);
1806 
1807  /* Write a magic value to CAMRAM register
1808  * at a specified offset to indicate
1809  * that all data is written and
1810  * ready for firmware to initialize.
1811  */
1813 
1814  read_lock(&ha->hw_lock);
1815  qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1817  read_unlock(&ha->hw_lock);
1818  return 0;
1819 }
1820 
1821 static int
1822 qla82xx_set_product_offset(struct qla_hw_data *ha)
1823 {
1824  struct qla82xx_uri_table_desc *ptab_desc = NULL;
1825  const uint8_t *unirom = ha->hablob->fw->data;
1826  uint32_t i;
1827  __le32 entries;
1828  __le32 flags, file_chiprev, offset;
1829  uint8_t chiprev = ha->chip_revision;
1830  /* Hardcoding mn_present flag for P3P */
1831  int mn_present = 0;
1832  uint32_t flagbit;
1833 
1834  ptab_desc = qla82xx_get_table_desc(unirom,
1836  if (!ptab_desc)
1837  return -1;
1838 
1839  entries = cpu_to_le32(ptab_desc->num_entries);
1840 
1841  for (i = 0; i < entries; i++) {
1842  offset = cpu_to_le32(ptab_desc->findex) +
1843  (i * cpu_to_le32(ptab_desc->entry_size));
1844  flags = cpu_to_le32(*((int *)&unirom[offset] +
1846  file_chiprev = cpu_to_le32(*((int *)&unirom[offset] +
1848 
1849  flagbit = mn_present ? 1 : 2;
1850 
1851  if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) {
1852  ha->file_prd_off = offset;
1853  return 0;
1854  }
1855  }
1856  return -1;
1857 }
1858 
1859 int
1861 {
1862  __le32 val;
1863  uint32_t min_size;
1864  struct qla_hw_data *ha = vha->hw;
1865  const struct firmware *fw = ha->hablob->fw;
1866 
1867  ha->fw_type = fw_type;
1868 
1869  if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1870  if (qla82xx_set_product_offset(ha))
1871  return -EINVAL;
1872 
1873  min_size = QLA82XX_URI_FW_MIN_SIZE;
1874  } else {
1875  val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
1876  if ((__force u32)val != QLA82XX_BDINFO_MAGIC)
1877  return -EINVAL;
1878 
1879  min_size = QLA82XX_FW_MIN_SIZE;
1880  }
1881 
1882  if (fw->size < min_size)
1883  return -EINVAL;
1884  return 0;
1885 }
1886 
1887 static int
1888 qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1889 {
1890  u32 val = 0;
1891  int retries = 60;
1892  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1893 
1894  do {
1895  read_lock(&ha->hw_lock);
1896  val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE);
1897  read_unlock(&ha->hw_lock);
1898 
1899  switch (val) {
1901  case PHAN_INITIALIZE_ACK:
1902  return QLA_SUCCESS;
1904  break;
1905  default:
1906  break;
1907  }
1908  ql_log(ql_log_info, vha, 0x00a8,
1909  "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n",
1910  val, retries);
1911 
1912  msleep(500);
1913 
1914  } while (--retries);
1915 
1916  ql_log(ql_log_fatal, vha, 0x00a9,
1917  "Cmd Peg initialization failed: 0x%x.\n", val);
1918 
1920  read_lock(&ha->hw_lock);
1922  read_unlock(&ha->hw_lock);
1923  return QLA_FUNCTION_FAILED;
1924 }
1925 
1926 static int
1927 qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1928 {
1929  u32 val = 0;
1930  int retries = 60;
1931  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1932 
1933  do {
1934  read_lock(&ha->hw_lock);
1935  val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE);
1936  read_unlock(&ha->hw_lock);
1937 
1938  switch (val) {
1940  case PHAN_INITIALIZE_ACK:
1941  return QLA_SUCCESS;
1943  break;
1944  default:
1945  break;
1946  }
1947  ql_log(ql_log_info, vha, 0x00ab,
1948  "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n",
1949  val, retries);
1950 
1951  msleep(500);
1952 
1953  } while (--retries);
1954 
1955  ql_log(ql_log_fatal, vha, 0x00ac,
1956  "Rcv Peg initializatin failed: 0x%x.\n", val);
1957  read_lock(&ha->hw_lock);
1959  read_unlock(&ha->hw_lock);
1960  return QLA_FUNCTION_FAILED;
1961 }
1962 
1963 /* ISR related functions */
1968  ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7
1969 };
1970 
1975  ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7
1976 };
1977 
1978 static struct qla82xx_legacy_intr_set legacy_intr[] = \
1979  QLA82XX_LEGACY_INTR_CONFIG;
1980 
1981 /*
1982  * qla82xx_mbx_completion() - Process mailbox command completions.
1983  * @ha: SCSI driver HA context
1984  * @mb0: Mailbox0 register
1985  */
1986 static void
1987 qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1988 {
1989  uint16_t cnt;
1991  struct qla_hw_data *ha = vha->hw;
1992  struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1993  wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
1994 
1995  /* Load return mailbox registers. */
1996  ha->flags.mbox_int = 1;
1997  ha->mailbox_out[0] = mb0;
1998 
1999  for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2000  ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2001  wptr++;
2002  }
2003 
2004  if (!ha->mcp)
2005  ql_dbg(ql_dbg_async, vha, 0x5053,
2006  "MBX pointer ERROR.\n");
2007 }
2008 
2009 /*
2010  * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
2011  * @irq:
2012  * @dev_id: SCSI driver HA context
2013  * @regs:
2014  *
2015  * Called by system whenever the host adapter generates an interrupt.
2016  *
2017  * Returns handled flag.
2018  */
2021 {
2023  struct qla_hw_data *ha;
2024  struct rsp_que *rsp;
2025  struct device_reg_82xx __iomem *reg;
2026  int status = 0, status1 = 0;
2027  unsigned long flags;
2028  unsigned long iter;
2029  uint32_t stat = 0;
2030  uint16_t mb[4];
2031 
2032  rsp = (struct rsp_que *) dev_id;
2033  if (!rsp) {
2034  ql_log(ql_log_info, NULL, 0xb053,
2035  "%s: NULL response queue pointer.\n", __func__);
2036  return IRQ_NONE;
2037  }
2038  ha = rsp->hw;
2039 
2040  if (!ha->flags.msi_enabled) {
2041  status = qla82xx_rd_32(ha, ISR_INT_VECTOR);
2042  if (!(status & ha->nx_legacy_intr.int_vec_bit))
2043  return IRQ_NONE;
2044 
2047  return IRQ_NONE;
2048  }
2049 
2050  /* clear the interrupt */
2051  qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
2052 
2053  /* read twice to ensure write is flushed */
2056 
2057  reg = &ha->iobase->isp82;
2058 
2059  spin_lock_irqsave(&ha->hardware_lock, flags);
2060  vha = pci_get_drvdata(ha->pdev);
2061  for (iter = 1; iter--; ) {
2062 
2063  if (RD_REG_DWORD(&reg->host_int)) {
2064  stat = RD_REG_DWORD(&reg->host_status);
2065 
2066  switch (stat & 0xff) {
2067  case 0x1:
2068  case 0x2:
2069  case 0x10:
2070  case 0x11:
2071  qla82xx_mbx_completion(vha, MSW(stat));
2072  status |= MBX_INTERRUPT;
2073  break;
2074  case 0x12:
2075  mb[0] = MSW(stat);
2076  mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2077  mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2078  mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2079  qla2x00_async_event(vha, rsp, mb);
2080  break;
2081  case 0x13:
2083  break;
2084  default:
2085  ql_dbg(ql_dbg_async, vha, 0x5054,
2086  "Unrecognized interrupt type (%d).\n",
2087  stat & 0xff);
2088  break;
2089  }
2090  }
2091  WRT_REG_DWORD(&reg->host_int, 0);
2092  }
2093  spin_unlock_irqrestore(&ha->hardware_lock, flags);
2094  if (!ha->flags.msi_enabled)
2095  qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2096 
2097 #ifdef QL_DEBUG_LEVEL_17
2098  if (!irq && ha->flags.eeh_busy)
2099  ql_log(ql_log_warn, vha, 0x503d,
2100  "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2101  status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2102 #endif
2103 
2104  if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2105  (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2106  set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2107  complete(&ha->mbx_intr_comp);
2108  }
2109  return IRQ_HANDLED;
2110 }
2111 
2114 {
2116  struct qla_hw_data *ha;
2117  struct rsp_que *rsp;
2118  struct device_reg_82xx __iomem *reg;
2119  int status = 0;
2120  unsigned long flags;
2121  uint32_t stat = 0;
2122  uint16_t mb[4];
2123 
2124  rsp = (struct rsp_que *) dev_id;
2125  if (!rsp) {
2127  "%s(): NULL response queue pointer.\n", __func__);
2128  return IRQ_NONE;
2129  }
2130  ha = rsp->hw;
2131 
2132  reg = &ha->iobase->isp82;
2133 
2134  spin_lock_irqsave(&ha->hardware_lock, flags);
2135  vha = pci_get_drvdata(ha->pdev);
2136  do {
2137  if (RD_REG_DWORD(&reg->host_int)) {
2138  stat = RD_REG_DWORD(&reg->host_status);
2139 
2140  switch (stat & 0xff) {
2141  case 0x1:
2142  case 0x2:
2143  case 0x10:
2144  case 0x11:
2145  qla82xx_mbx_completion(vha, MSW(stat));
2146  status |= MBX_INTERRUPT;
2147  break;
2148  case 0x12:
2149  mb[0] = MSW(stat);
2150  mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2151  mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2152  mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2153  qla2x00_async_event(vha, rsp, mb);
2154  break;
2155  case 0x13:
2157  break;
2158  default:
2159  ql_dbg(ql_dbg_async, vha, 0x5041,
2160  "Unrecognized interrupt type (%d).\n",
2161  stat & 0xff);
2162  break;
2163  }
2164  }
2165  WRT_REG_DWORD(&reg->host_int, 0);
2166  } while (0);
2167 
2168  spin_unlock_irqrestore(&ha->hardware_lock, flags);
2169 
2170 #ifdef QL_DEBUG_LEVEL_17
2171  if (!irq && ha->flags.eeh_busy)
2172  ql_log(ql_log_warn, vha, 0x5044,
2173  "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2174  status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2175 #endif
2176 
2177  if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2178  (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2179  set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2180  complete(&ha->mbx_intr_comp);
2181  }
2182  return IRQ_HANDLED;
2183 }
2184 
2187 {
2189  struct qla_hw_data *ha;
2190  struct rsp_que *rsp;
2191  struct device_reg_82xx __iomem *reg;
2192  unsigned long flags;
2193 
2194  rsp = (struct rsp_que *) dev_id;
2195  if (!rsp) {
2197  "%s(): NULL response queue pointer.\n", __func__);
2198  return IRQ_NONE;
2199  }
2200 
2201  ha = rsp->hw;
2202  reg = &ha->iobase->isp82;
2203  spin_lock_irqsave(&ha->hardware_lock, flags);
2204  vha = pci_get_drvdata(ha->pdev);
2206  WRT_REG_DWORD(&reg->host_int, 0);
2207  spin_unlock_irqrestore(&ha->hardware_lock, flags);
2208  return IRQ_HANDLED;
2209 }
2210 
2211 void
2212 qla82xx_poll(int irq, void *dev_id)
2213 {
2215  struct qla_hw_data *ha;
2216  struct rsp_que *rsp;
2217  struct device_reg_82xx __iomem *reg;
2218  int status = 0;
2219  uint32_t stat;
2220  uint16_t mb[4];
2221  unsigned long flags;
2222 
2223  rsp = (struct rsp_que *) dev_id;
2224  if (!rsp) {
2226  "%s(): NULL response queue pointer.\n", __func__);
2227  return;
2228  }
2229  ha = rsp->hw;
2230 
2231  reg = &ha->iobase->isp82;
2232  spin_lock_irqsave(&ha->hardware_lock, flags);
2233  vha = pci_get_drvdata(ha->pdev);
2234 
2235  if (RD_REG_DWORD(&reg->host_int)) {
2236  stat = RD_REG_DWORD(&reg->host_status);
2237  switch (stat & 0xff) {
2238  case 0x1:
2239  case 0x2:
2240  case 0x10:
2241  case 0x11:
2242  qla82xx_mbx_completion(vha, MSW(stat));
2243  status |= MBX_INTERRUPT;
2244  break;
2245  case 0x12:
2246  mb[0] = MSW(stat);
2247  mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2248  mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2249  mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2250  qla2x00_async_event(vha, rsp, mb);
2251  break;
2252  case 0x13:
2254  break;
2255  default:
2256  ql_dbg(ql_dbg_p3p, vha, 0xb013,
2257  "Unrecognized interrupt type (%d).\n",
2258  stat * 0xff);
2259  break;
2260  }
2261  }
2262  WRT_REG_DWORD(&reg->host_int, 0);
2263  spin_unlock_irqrestore(&ha->hardware_lock, flags);
2264 }
2265 
2266 void
2268 {
2269  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2271  spin_lock_irq(&ha->hardware_lock);
2272  qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2273  spin_unlock_irq(&ha->hardware_lock);
2274  ha->interrupts_on = 1;
2275 }
2276 
2277 void
2279 {
2280  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2282  spin_lock_irq(&ha->hardware_lock);
2283  qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
2284  spin_unlock_irq(&ha->hardware_lock);
2285  ha->interrupts_on = 0;
2286 }
2287 
2289 {
2290  struct qla82xx_legacy_intr_set *nx_legacy_intr;
2291 
2292  /* ISP 8021 initializations */
2293  rwlock_init(&ha->hw_lock);
2294  ha->qdr_sn_window = -1;
2295  ha->ddr_mn_window = -1;
2296  ha->curr_window = 255;
2297  ha->portnum = PCI_FUNC(ha->pdev->devfn);
2298  nx_legacy_intr = &legacy_intr[ha->portnum];
2299  ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
2300  ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
2301  ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
2302  ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
2303 }
2304 
2305 inline void
2307 {
2308  int idc_ver;
2309  uint32_t drv_active;
2310  struct qla_hw_data *ha = vha->hw;
2311 
2312  drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2313  if (drv_active == (QLA82XX_DRV_ACTIVE << (ha->portnum * 4))) {
2316  ql_log(ql_log_info, vha, 0xb082,
2317  "IDC version updated to %d\n", QLA82XX_IDC_VERSION);
2318  } else {
2320  if (idc_ver != QLA82XX_IDC_VERSION)
2321  ql_log(ql_log_info, vha, 0xb083,
2322  "qla2xxx driver IDC version %d is not compatible "
2323  "with IDC version %d of the other drivers\n",
2324  QLA82XX_IDC_VERSION, idc_ver);
2325  }
2326 }
2327 
2328 inline void
2330 {
2331  uint32_t drv_active;
2332  struct qla_hw_data *ha = vha->hw;
2333 
2334  drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2335 
2336  /* If reset value is all FF's, initialize DRV_ACTIVE */
2337  if (drv_active == 0xffffffff) {
2340  drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2341  }
2342  drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2343  qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2344 }
2345 
2346 inline void
2348 {
2349  uint32_t drv_active;
2350 
2351  drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2352  drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
2353  qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2354 }
2355 
2356 static inline int
2357 qla82xx_need_reset(struct qla_hw_data *ha)
2358 {
2359  uint32_t drv_state;
2360  int rval;
2361 
2362  if (ha->flags.nic_core_reset_owner)
2363  return 1;
2364  else {
2365  drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2366  rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2367  return rval;
2368  }
2369 }
2370 
2371 static inline void
2372 qla82xx_set_rst_ready(struct qla_hw_data *ha)
2373 {
2374  uint32_t drv_state;
2375  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2376 
2377  drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2378 
2379  /* If reset value is all FF's, initialize DRV_STATE */
2380  if (drv_state == 0xffffffff) {
2382  drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2383  }
2384  drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2385  ql_dbg(ql_dbg_init, vha, 0x00bb,
2386  "drv_state = 0x%08x.\n", drv_state);
2387  qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2388 }
2389 
2390 static inline void
2391 qla82xx_clear_rst_ready(struct qla_hw_data *ha)
2392 {
2393  uint32_t drv_state;
2394 
2395  drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2396  drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2397  qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2398 }
2399 
2400 static inline void
2401 qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
2402 {
2403  uint32_t qsnt_state;
2404 
2405  qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2406  qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2407  qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2408 }
2409 
2410 void
2412 {
2413  struct qla_hw_data *ha = vha->hw;
2414  uint32_t qsnt_state;
2415 
2416  qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2417  qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2418  qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2419 }
2420 
2421 static int
2422 qla82xx_load_fw(scsi_qla_host_t *vha)
2423 {
2424  int rst;
2425  struct fw_blob *blob;
2426  struct qla_hw_data *ha = vha->hw;
2427 
2428  if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
2429  ql_log(ql_log_fatal, vha, 0x009f,
2430  "Error during CRB initialization.\n");
2431  return QLA_FUNCTION_FAILED;
2432  }
2433  udelay(500);
2434 
2435  /* Bring QM and CAMRAM out of reset */
2437  rst &= ~((1 << 28) | (1 << 24));
2439 
2440  /*
2441  * FW Load priority:
2442  * 1) Operational firmware residing in flash.
2443  * 2) Firmware via request-firmware interface (.bin file).
2444  */
2445  if (ql2xfwloadbin == 2)
2446  goto try_blob_fw;
2447 
2448  ql_log(ql_log_info, vha, 0x00a0,
2449  "Attempting to load firmware from flash.\n");
2450 
2451  if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2452  ql_log(ql_log_info, vha, 0x00a1,
2453  "Firmware loaded successfully from flash.\n");
2454  return QLA_SUCCESS;
2455  } else {
2456  ql_log(ql_log_warn, vha, 0x0108,
2457  "Firmware load from flash failed.\n");
2458  }
2459 
2460 try_blob_fw:
2461  ql_log(ql_log_info, vha, 0x00a2,
2462  "Attempting to load firmware from blob.\n");
2463 
2464  /* Load firmware blob. */
2465  blob = ha->hablob = qla2x00_request_firmware(vha);
2466  if (!blob) {
2467  ql_log(ql_log_fatal, vha, 0x00a3,
2468  "Firmware image not present.\n");
2469  goto fw_load_failed;
2470  }
2471 
2472  /* Validating firmware blob */
2475  /* Fallback to URI format */
2478  ql_log(ql_log_fatal, vha, 0x00a4,
2479  "No valid firmware image found.\n");
2480  return QLA_FUNCTION_FAILED;
2481  }
2482  }
2483 
2484  if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
2485  ql_log(ql_log_info, vha, 0x00a5,
2486  "Firmware loaded successfully from binary blob.\n");
2487  return QLA_SUCCESS;
2488  } else {
2489  ql_log(ql_log_fatal, vha, 0x00a6,
2490  "Firmware load failed for binary blob.\n");
2491  blob->fw = NULL;
2492  blob = NULL;
2493  goto fw_load_failed;
2494  }
2495  return QLA_SUCCESS;
2496 
2497 fw_load_failed:
2498  return QLA_FUNCTION_FAILED;
2499 }
2500 
2501 int
2503 {
2504  uint16_t lnk;
2505  struct qla_hw_data *ha = vha->hw;
2506 
2507  /* scrub dma mask expansion register */
2509 
2510  /* Put both the PEG CMD and RCV PEG to default state
2511  * of 0 before resetting the hardware
2512  */
2515 
2516  /* Overwrite stale initialization register values */
2519 
2520  if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
2521  ql_log(ql_log_fatal, vha, 0x00a7,
2522  "Error trying to start fw.\n");
2523  return QLA_FUNCTION_FAILED;
2524  }
2525 
2526  /* Handshake with the card before we register the devices. */
2527  if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
2528  ql_log(ql_log_fatal, vha, 0x00aa,
2529  "Error during card handshake.\n");
2530  return QLA_FUNCTION_FAILED;
2531  }
2532 
2533  /* Negotiated Link width */
2535  ha->link_width = (lnk >> 4) & 0x3f;
2536 
2537  /* Synchronize with Receive peg */
2538  return qla82xx_check_rcvpeg_state(ha);
2539 }
2540 
2541 static uint32_t *
2542 qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
2543  uint32_t length)
2544 {
2545  uint32_t i;
2546  uint32_t val;
2547  struct qla_hw_data *ha = vha->hw;
2548 
2549  /* Dword reads to flash. */
2550  for (i = 0; i < length/4; i++, faddr += 4) {
2551  if (qla82xx_rom_fast_read(ha, faddr, &val)) {
2552  ql_log(ql_log_warn, vha, 0x0106,
2553  "Do ROM fast read failed.\n");
2554  goto done_read;
2555  }
2556  dwptr[i] = __constant_cpu_to_le32(val);
2557  }
2558 done_read:
2559  return dwptr;
2560 }
2561 
2562 static int
2563 qla82xx_unprotect_flash(struct qla_hw_data *ha)
2564 {
2565  int ret;
2566  uint32_t val;
2567  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2568 
2569  ret = ql82xx_rom_lock_d(ha);
2570  if (ret < 0) {
2571  ql_log(ql_log_warn, vha, 0xb014,
2572  "ROM Lock failed.\n");
2573  return ret;
2574  }
2575 
2576  ret = qla82xx_read_status_reg(ha, &val);
2577  if (ret < 0)
2578  goto done_unprotect;
2579 
2580  val &= ~(BLOCK_PROTECT_BITS << 2);
2581  ret = qla82xx_write_status_reg(ha, val);
2582  if (ret < 0) {
2583  val |= (BLOCK_PROTECT_BITS << 2);
2584  qla82xx_write_status_reg(ha, val);
2585  }
2586 
2587  if (qla82xx_write_disable_flash(ha) != 0)
2588  ql_log(ql_log_warn, vha, 0xb015,
2589  "Write disable failed.\n");
2590 
2591 done_unprotect:
2592  qla82xx_rom_unlock(ha);
2593  return ret;
2594 }
2595 
2596 static int
2597 qla82xx_protect_flash(struct qla_hw_data *ha)
2598 {
2599  int ret;
2600  uint32_t val;
2601  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2602 
2603  ret = ql82xx_rom_lock_d(ha);
2604  if (ret < 0) {
2605  ql_log(ql_log_warn, vha, 0xb016,
2606  "ROM Lock failed.\n");
2607  return ret;
2608  }
2609 
2610  ret = qla82xx_read_status_reg(ha, &val);
2611  if (ret < 0)
2612  goto done_protect;
2613 
2614  val |= (BLOCK_PROTECT_BITS << 2);
2615  /* LOCK all sectors */
2616  ret = qla82xx_write_status_reg(ha, val);
2617  if (ret < 0)
2618  ql_log(ql_log_warn, vha, 0xb017,
2619  "Write status register failed.\n");
2620 
2621  if (qla82xx_write_disable_flash(ha) != 0)
2622  ql_log(ql_log_warn, vha, 0xb018,
2623  "Write disable failed.\n");
2624 done_protect:
2625  qla82xx_rom_unlock(ha);
2626  return ret;
2627 }
2628 
2629 static int
2630 qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
2631 {
2632  int ret = 0;
2633  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2634 
2635  ret = ql82xx_rom_lock_d(ha);
2636  if (ret < 0) {
2637  ql_log(ql_log_warn, vha, 0xb019,
2638  "ROM Lock failed.\n");
2639  return ret;
2640  }
2641 
2642  qla82xx_flash_set_write_enable(ha);
2646 
2647  if (qla82xx_wait_rom_done(ha)) {
2648  ql_log(ql_log_warn, vha, 0xb01a,
2649  "Error waiting for rom done.\n");
2650  ret = -1;
2651  goto done;
2652  }
2653  ret = qla82xx_flash_wait_write_finish(ha);
2654 done:
2655  qla82xx_rom_unlock(ha);
2656  return ret;
2657 }
2658 
2659 /*
2660  * Address and length are byte address
2661  */
2662 uint8_t *
2664  uint32_t offset, uint32_t length)
2665 {
2666  scsi_block_requests(vha->host);
2667  qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
2669  return buf;
2670 }
2671 
2672 static int
2673 qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
2674  uint32_t faddr, uint32_t dwords)
2675 {
2676  int ret;
2677  uint32_t liter;
2678  uint32_t sec_mask, rest_addr;
2679  dma_addr_t optrom_dma;
2680  void *optrom = NULL;
2681  int page_mode = 0;
2682  struct qla_hw_data *ha = vha->hw;
2683 
2684  ret = -1;
2685 
2686  /* Prepare burst-capable write on supported ISPs. */
2687  if (page_mode && !(faddr & 0xfff) &&
2688  dwords > OPTROM_BURST_DWORDS) {
2689  optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
2690  &optrom_dma, GFP_KERNEL);
2691  if (!optrom) {
2692  ql_log(ql_log_warn, vha, 0xb01b,
2693  "Unable to allocate memory "
2694  "for optrom burst write (%x KB).\n",
2695  OPTROM_BURST_SIZE / 1024);
2696  }
2697  }
2698 
2699  rest_addr = ha->fdt_block_size - 1;
2700  sec_mask = ~rest_addr;
2701 
2702  ret = qla82xx_unprotect_flash(ha);
2703  if (ret) {
2704  ql_log(ql_log_warn, vha, 0xb01c,
2705  "Unable to unprotect flash for update.\n");
2706  goto write_done;
2707  }
2708 
2709  for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
2710  /* Are we at the beginning of a sector? */
2711  if ((faddr & rest_addr) == 0) {
2712 
2713  ret = qla82xx_erase_sector(ha, faddr);
2714  if (ret) {
2715  ql_log(ql_log_warn, vha, 0xb01d,
2716  "Unable to erase sector: address=%x.\n",
2717  faddr);
2718  break;
2719  }
2720  }
2721 
2722  /* Go with burst-write. */
2723  if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
2724  /* Copy data to DMA'ble buffer. */
2725  memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
2726 
2727  ret = qla2x00_load_ram(vha, optrom_dma,
2728  (ha->flash_data_off | faddr),
2730  if (ret != QLA_SUCCESS) {
2731  ql_log(ql_log_warn, vha, 0xb01e,
2732  "Unable to burst-write optrom segment "
2733  "(%x/%x/%llx).\n", ret,
2734  (ha->flash_data_off | faddr),
2735  (unsigned long long)optrom_dma);
2736  ql_log(ql_log_warn, vha, 0xb01f,
2737  "Reverting to slow-write.\n");
2738 
2739  dma_free_coherent(&ha->pdev->dev,
2740  OPTROM_BURST_SIZE, optrom, optrom_dma);
2741  optrom = NULL;
2742  } else {
2743  liter += OPTROM_BURST_DWORDS - 1;
2744  faddr += OPTROM_BURST_DWORDS - 1;
2745  dwptr += OPTROM_BURST_DWORDS - 1;
2746  continue;
2747  }
2748  }
2749 
2750  ret = qla82xx_write_flash_dword(ha, faddr,
2751  cpu_to_le32(*dwptr));
2752  if (ret) {
2753  ql_dbg(ql_dbg_p3p, vha, 0xb020,
2754  "Unable to program flash address=%x data=%x.\n",
2755  faddr, *dwptr);
2756  break;
2757  }
2758  }
2759 
2760  ret = qla82xx_protect_flash(ha);
2761  if (ret)
2762  ql_log(ql_log_warn, vha, 0xb021,
2763  "Unable to protect flash after update.\n");
2764 write_done:
2765  if (optrom)
2766  dma_free_coherent(&ha->pdev->dev,
2767  OPTROM_BURST_SIZE, optrom, optrom_dma);
2768  return ret;
2769 }
2770 
2771 int
2773  uint32_t offset, uint32_t length)
2774 {
2775  int rval;
2776 
2777  /* Suspend HBA. */
2778  scsi_block_requests(vha->host);
2779  rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
2780  length >> 2);
2782 
2783  /* Convert return ISP82xx to generic */
2784  if (rval)
2785  rval = QLA_FUNCTION_FAILED;
2786  else
2787  rval = QLA_SUCCESS;
2788  return rval;
2789 }
2790 
2791 void
2793 {
2794  struct qla_hw_data *ha = vha->hw;
2795  struct req_que *req = ha->req_q_map[0];
2796  struct device_reg_82xx __iomem *reg;
2797  uint32_t dbval;
2798 
2799  /* Adjust ring index. */
2800  req->ring_index++;
2801  if (req->ring_index == req->length) {
2802  req->ring_index = 0;
2803  req->ring_ptr = req->ring;
2804  } else
2805  req->ring_ptr++;
2806 
2807  reg = &ha->iobase->isp82;
2808  dbval = 0x04 | (ha->portnum << 5);
2809 
2810  dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2811  if (ql2xdbwr)
2812  qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2813  else {
2814  WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
2815  wmb();
2816  while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2817  WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr,
2818  dbval);
2819  wmb();
2820  }
2821  }
2822 }
2823 
2825 {
2826  scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2827 
2828  if (qla82xx_rom_lock(ha))
2829  /* Someone else is holding the lock. */
2830  ql_log(ql_log_info, vha, 0xb022,
2831  "Resetting rom_lock.\n");
2832 
2833  /*
2834  * Either we got the lock, or someone
2835  * else died while holding it.
2836  * In either case, unlock.
2837  */
2838  qla82xx_rom_unlock(ha);
2839 }
2840 
2841 /*
2842  * qla82xx_device_bootstrap
2843  * Initialize device, set DEV_READY, start fw
2844  *
2845  * Note:
2846  * IDC lock must be held upon entry
2847  *
2848  * Return:
2849  * Success : 0
2850  * Failed : 1
2851  */
2852 static int
2853 qla82xx_device_bootstrap(scsi_qla_host_t *vha)
2854 {
2855  int rval = QLA_SUCCESS;
2856  int i, timeout;
2857  uint32_t old_count, count;
2858  struct qla_hw_data *ha = vha->hw;
2859  int need_reset = 0, peg_stuck = 1;
2860 
2861  need_reset = qla82xx_need_reset(ha);
2862 
2863  old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
2864 
2865  for (i = 0; i < 10; i++) {
2866  timeout = msleep_interruptible(200);
2867  if (timeout) {
2870  return QLA_FUNCTION_FAILED;
2871  }
2872 
2874  if (count != old_count)
2875  peg_stuck = 0;
2876  }
2877 
2878  if (need_reset) {
2879  /* We are trying to perform a recovery here. */
2880  if (peg_stuck)
2882  goto dev_initialize;
2883  } else {
2884  /* Start of day for this ha context. */
2885  if (peg_stuck) {
2886  /* Either we are the first or recovery in progress. */
2888  goto dev_initialize;
2889  } else
2890  /* Firmware already running. */
2891  goto dev_ready;
2892  }
2893 
2894  return rval;
2895 
2896 dev_initialize:
2897  /* set to DEV_INITIALIZING */
2898  ql_log(ql_log_info, vha, 0x009e,
2899  "HW State: INITIALIZING.\n");
2901 
2902  qla82xx_idc_unlock(ha);
2903  rval = qla82xx_start_firmware(vha);
2904  qla82xx_idc_lock(ha);
2905 
2906  if (rval != QLA_SUCCESS) {
2907  ql_log(ql_log_fatal, vha, 0x00ad,
2908  "HW State: FAILED.\n");
2911  return rval;
2912  }
2913 
2914 dev_ready:
2915  ql_log(ql_log_info, vha, 0x00ae,
2916  "HW State: READY.\n");
2918 
2919  return QLA_SUCCESS;
2920 }
2921 
2922 /*
2923 * qla82xx_need_qsnt_handler
2924 * Code to start quiescence sequence
2925 *
2926 * Note:
2927 * IDC lock must be held upon entry
2928 *
2929 * Return: void
2930 */
2931 
2932 static void
2933 qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
2934 {
2935  struct qla_hw_data *ha = vha->hw;
2936  uint32_t dev_state, drv_state, drv_active;
2937  unsigned long reset_timeout;
2938 
2939  if (vha->flags.online) {
2940  /*Block any further I/O and wait for pending cmnds to complete*/
2941  qla2x00_quiesce_io(vha);
2942  }
2943 
2944  /* Set the quiescence ready bit */
2945  qla82xx_set_qsnt_ready(ha);
2946 
2947  /*wait for 30 secs for other functions to ack */
2948  reset_timeout = jiffies + (30 * HZ);
2949 
2950  drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2951  drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2952  /* Its 2 that is written when qsnt is acked, moving one bit */
2953  drv_active = drv_active << 0x01;
2954 
2955  while (drv_state != drv_active) {
2956 
2957  if (time_after_eq(jiffies, reset_timeout)) {
2958  /* quiescence timeout, other functions didn't ack
2959  * changing the state to DEV_READY
2960  */
2961  ql_log(ql_log_info, vha, 0xb023,
2962  "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d "
2963  "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
2964  drv_active, drv_state);
2967  ql_log(ql_log_info, vha, 0xb025,
2968  "HW State: DEV_READY.\n");
2969  qla82xx_idc_unlock(ha);
2971  qla82xx_idc_lock(ha);
2972 
2974  return;
2975  }
2976 
2977  qla82xx_idc_unlock(ha);
2978  msleep(1000);
2979  qla82xx_idc_lock(ha);
2980 
2981  drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2982  drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2983  drv_active = drv_active << 0x01;
2984  }
2985  dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
2986  /* everyone acked so set the state to DEV_QUIESCENCE */
2987  if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
2988  ql_log(ql_log_info, vha, 0xb026,
2989  "HW State: DEV_QUIESCENT.\n");
2991  }
2992 }
2993 
2994 /*
2995 * qla82xx_wait_for_state_change
2996 * Wait for device state to change from given current state
2997 *
2998 * Note:
2999 * IDC lock must not be held upon entry
3000 *
3001 * Return:
3002 * Changed device state.
3003 */
3004 uint32_t
3006 {
3007  struct qla_hw_data *ha = vha->hw;
3009 
3010  do {
3011  msleep(1000);
3012  qla82xx_idc_lock(ha);
3013  dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3014  qla82xx_idc_unlock(ha);
3015  } while (dev_state == curr_state);
3016 
3017  return dev_state;
3018 }
3019 
3020 void
3022 {
3023  struct qla_hw_data *ha = vha->hw;
3024 
3025  /* Disable the board */
3026  ql_log(ql_log_fatal, vha, 0x00b8,
3027  "Disabling the board.\n");
3028 
3029  if (IS_QLA82XX(ha)) {
3031  qla82xx_idc_unlock(ha);
3032  }
3033 
3034  /* Set DEV_FAILED flag to disable timer */
3035  vha->device_flags |= DFLG_DEV_FAILED;
3038  vha->flags.online = 0;
3039  vha->flags.init_done = 0;
3040 }
3041 
3042 /*
3043  * qla82xx_need_reset_handler
3044  * Code to start reset sequence
3045  *
3046  * Note:
3047  * IDC lock must be held upon entry
3048  *
3049  * Return:
3050  * Success : 0
3051  * Failed : 1
3052  */
3053 static void
3054 qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3055 {
3056  uint32_t dev_state, drv_state, drv_active;
3057  uint32_t active_mask = 0;
3058  unsigned long reset_timeout;
3059  struct qla_hw_data *ha = vha->hw;
3060  struct req_que *req = ha->req_q_map[0];
3061 
3062  if (vha->flags.online) {
3063  qla82xx_idc_unlock(ha);
3065  ha->isp_ops->get_flash_version(vha, req->ring);
3066  ha->isp_ops->nvram_config(vha);
3067  qla82xx_idc_lock(ha);
3068  }
3069 
3070  drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3071  if (!ha->flags.nic_core_reset_owner) {
3072  ql_dbg(ql_dbg_p3p, vha, 0xb028,
3073  "reset_acknowledged by 0x%x\n", ha->portnum);
3074  qla82xx_set_rst_ready(ha);
3075  } else {
3076  active_mask = ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
3077  drv_active &= active_mask;
3078  ql_dbg(ql_dbg_p3p, vha, 0xb029,
3079  "active_mask: 0x%08x\n", active_mask);
3080  }
3081 
3082  /* wait for 10 seconds for reset ack from all functions */
3083  reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
3084 
3085  drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3086  drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3087  dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3088 
3089  ql_dbg(ql_dbg_p3p, vha, 0xb02a,
3090  "drv_state: 0x%08x, drv_active: 0x%08x, "
3091  "dev_state: 0x%08x, active_mask: 0x%08x\n",
3092  drv_state, drv_active, dev_state, active_mask);
3093 
3094  while (drv_state != drv_active &&
3095  dev_state != QLA8XXX_DEV_INITIALIZING) {
3096  if (time_after_eq(jiffies, reset_timeout)) {
3097  ql_log(ql_log_warn, vha, 0x00b5,
3098  "Reset timeout.\n");
3099  break;
3100  }
3101  qla82xx_idc_unlock(ha);
3102  msleep(1000);
3103  qla82xx_idc_lock(ha);
3104  drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3105  drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3106  if (ha->flags.nic_core_reset_owner)
3107  drv_active &= active_mask;
3108  dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3109  }
3110 
3111  ql_dbg(ql_dbg_p3p, vha, 0xb02b,
3112  "drv_state: 0x%08x, drv_active: 0x%08x, "
3113  "dev_state: 0x%08x, active_mask: 0x%08x\n",
3114  drv_state, drv_active, dev_state, active_mask);
3115 
3116  ql_log(ql_log_info, vha, 0x00b6,
3117  "Device state is 0x%x = %s.\n",
3118  dev_state,
3119  dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3120 
3121  /* Force to DEV_COLD unless someone else is starting a reset */
3122  if (dev_state != QLA8XXX_DEV_INITIALIZING &&
3123  dev_state != QLA8XXX_DEV_COLD) {
3124  ql_log(ql_log_info, vha, 0x00b7,
3125  "HW State: COLD/RE-INIT.\n");
3127  qla82xx_set_rst_ready(ha);
3128  if (ql2xmdenable) {
3129  if (qla82xx_md_collect(vha))
3130  ql_log(ql_log_warn, vha, 0xb02c,
3131  "Minidump not collected.\n");
3132  } else
3133  ql_log(ql_log_warn, vha, 0xb04f,
3134  "Minidump disabled.\n");
3135  }
3136 }
3137 
3138 int
3140 {
3141  struct qla_hw_data *ha = vha->hw;
3143  int rval = QLA_SUCCESS;
3144 
3145  fw_major_version = ha->fw_major_version;
3146  fw_minor_version = ha->fw_minor_version;
3147  fw_subminor_version = ha->fw_subminor_version;
3148 
3149  rval = qla2x00_get_fw_version(vha);
3150  if (rval != QLA_SUCCESS)
3151  return rval;
3152 
3153  if (ql2xmdenable) {
3154  if (!ha->fw_dumped) {
3155  if (fw_major_version != ha->fw_major_version ||
3156  fw_minor_version != ha->fw_minor_version ||
3157  fw_subminor_version != ha->fw_subminor_version) {
3158  ql_log(ql_log_info, vha, 0xb02d,
3159  "Firmware version differs "
3160  "Previous version: %d:%d:%d - "
3161  "New version: %d:%d:%d\n",
3162  fw_major_version, fw_minor_version,
3163  fw_subminor_version,
3164  ha->fw_major_version,
3165  ha->fw_minor_version,
3166  ha->fw_subminor_version);
3167  /* Release MiniDump resources */
3168  qla82xx_md_free(vha);
3169  /* ALlocate MiniDump resources */
3170  qla82xx_md_prep(vha);
3171  }
3172  } else
3173  ql_log(ql_log_info, vha, 0xb02e,
3174  "Firmware dump available to retrieve\n");
3175  }
3176  return rval;
3177 }
3178 
3179 
3180 int
3182 {
3183  uint32_t fw_heartbeat_counter;
3184  int status = 0;
3185 
3186  fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
3188  /* all 0xff, assume AER/EEH in progress, ignore */
3189  if (fw_heartbeat_counter == 0xffffffff) {
3190  ql_dbg(ql_dbg_timer, vha, 0x6003,
3191  "FW heartbeat counter is 0xffffffff, "
3192  "returning status=%d.\n", status);
3193  return status;
3194  }
3195  if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
3197  /* FW not alive after 2 seconds */
3198  if (vha->seconds_since_last_heartbeat == 2) {
3200  status = 1;
3201  }
3202  } else
3204  vha->fw_heartbeat_counter = fw_heartbeat_counter;
3205  if (status)
3206  ql_dbg(ql_dbg_timer, vha, 0x6004,
3207  "Returning status=%d.\n", status);
3208  return status;
3209 }
3210 
3211 /*
3212  * qla82xx_device_state_handler
3213  * Main state handler
3214  *
3215  * Note:
3216  * IDC lock must be held upon entry
3217  *
3218  * Return:
3219  * Success : 0
3220  * Failed : 1
3221  */
3222 int
3224 {
3226  uint32_t old_dev_state;
3227  int rval = QLA_SUCCESS;
3228  unsigned long dev_init_timeout;
3229  struct qla_hw_data *ha = vha->hw;
3230  int loopcount = 0;
3231 
3232  qla82xx_idc_lock(ha);
3233  if (!vha->flags.init_done) {
3236  }
3237 
3238  dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3239  old_dev_state = dev_state;
3240  ql_log(ql_log_info, vha, 0x009b,
3241  "Device state is 0x%x = %s.\n",
3242  dev_state,
3243  dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3244 
3245  /* wait for 30 seconds for device to go ready */
3246  dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
3247 
3248  while (1) {
3249 
3250  if (time_after_eq(jiffies, dev_init_timeout)) {
3251  ql_log(ql_log_fatal, vha, 0x009c,
3252  "Device init failed.\n");
3253  rval = QLA_FUNCTION_FAILED;
3254  break;
3255  }
3256  dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3257  if (old_dev_state != dev_state) {
3258  loopcount = 0;
3259  old_dev_state = dev_state;
3260  }
3261  if (loopcount < 5) {
3262  ql_log(ql_log_info, vha, 0x009d,
3263  "Device state is 0x%x = %s.\n",
3264  dev_state,
3265  dev_state < MAX_STATES ? qdev_state(dev_state) :
3266  "Unknown");
3267  }
3268 
3269  switch (dev_state) {
3270  case QLA8XXX_DEV_READY:
3271  ha->flags.nic_core_reset_owner = 0;
3272  goto rel_lock;
3273  case QLA8XXX_DEV_COLD:
3274  rval = qla82xx_device_bootstrap(vha);
3275  break;
3277  qla82xx_idc_unlock(ha);
3278  msleep(1000);
3279  qla82xx_idc_lock(ha);
3280  break;
3282  if (!ql2xdontresethba)
3283  qla82xx_need_reset_handler(vha);
3284  else {
3285  qla82xx_idc_unlock(ha);
3286  msleep(1000);
3287  qla82xx_idc_lock(ha);
3288  }
3289  dev_init_timeout = jiffies +
3290  (ha->fcoe_dev_init_timeout * HZ);
3291  break;
3293  qla82xx_need_qsnt_handler(vha);
3294  /* Reset timeout value after quiescence handler */
3295  dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
3296  * HZ);
3297  break;
3298  case QLA8XXX_DEV_QUIESCENT:
3299  /* Owner will exit and other will wait for the state
3300  * to get changed
3301  */
3302  if (ha->flags.quiesce_owner)
3303  goto rel_lock;
3304 
3305  qla82xx_idc_unlock(ha);
3306  msleep(1000);
3307  qla82xx_idc_lock(ha);
3308 
3309  /* Reset timeout value after quiescence handler */
3310  dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
3311  * HZ);
3312  break;
3313  case QLA8XXX_DEV_FAILED:
3315  rval = QLA_FUNCTION_FAILED;
3316  goto exit;
3317  default:
3318  qla82xx_idc_unlock(ha);
3319  msleep(1000);
3320  qla82xx_idc_lock(ha);
3321  }
3322  loopcount++;
3323  }
3324 rel_lock:
3325  qla82xx_idc_unlock(ha);
3326 exit:
3327  return rval;
3328 }
3329 
3330 static int qla82xx_check_temp(scsi_qla_host_t *vha)
3331 {
3332  uint32_t temp, temp_state, temp_val;
3333  struct qla_hw_data *ha = vha->hw;
3334 
3335  temp = qla82xx_rd_32(ha, CRB_TEMP_STATE);
3336  temp_state = qla82xx_get_temp_state(temp);
3337  temp_val = qla82xx_get_temp_val(temp);
3338 
3339  if (temp_state == QLA82XX_TEMP_PANIC) {
3340  ql_log(ql_log_warn, vha, 0x600e,
3341  "Device temperature %d degrees C exceeds "
3342  " maximum allowed. Hardware has been shut down.\n",
3343  temp_val);
3344  return 1;
3345  } else if (temp_state == QLA82XX_TEMP_WARN) {
3346  ql_log(ql_log_warn, vha, 0x600f,
3347  "Device temperature %d degrees C exceeds "
3348  "operating range. Immediate action needed.\n",
3349  temp_val);
3350  }
3351  return 0;
3352 }
3353 
3355 {
3356  struct qla_hw_data *ha = vha->hw;
3357 
3358  if (ha->flags.mbox_busy) {
3359  ha->flags.mbox_int = 1;
3360  ha->flags.mbox_busy = 0;
3361  ql_log(ql_log_warn, vha, 0x6010,
3362  "Doing premature completion of mbx command.\n");
3364  complete(&ha->mbx_intr_comp);
3365  }
3366 }
3367 
3369 {
3370  uint32_t dev_state, halt_status;
3371  struct qla_hw_data *ha = vha->hw;
3372 
3373  /* don't poll if reset is going on */
3374  if (!ha->flags.nic_core_reset_hdlr_active) {
3375  dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3376  if (qla82xx_check_temp(vha)) {
3378  ha->flags.isp82xx_fw_hung = 1;
3380  } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
3381  !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
3382  ql_log(ql_log_warn, vha, 0x6001,
3383  "Adapter reset needed.\n");
3385  } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
3387  ql_log(ql_log_warn, vha, 0x6002,
3388  "Quiescent needed.\n");
3390  } else if (dev_state == QLA8XXX_DEV_FAILED &&
3392  vha->flags.online == 1) {
3393  ql_log(ql_log_warn, vha, 0xb055,
3394  "Adapter state is failed. Offlining.\n");
3396  ha->flags.isp82xx_fw_hung = 1;
3398  } else {
3399  if (qla82xx_check_fw_alive(vha)) {
3400  ql_dbg(ql_dbg_timer, vha, 0x6011,
3401  "disabling pause transmit on port 0 & 1.\n");
3402  qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
3404  halt_status = qla82xx_rd_32(ha,
3406  ql_log(ql_log_info, vha, 0x6005,
3407  "dumping hw/fw registers:.\n "
3408  " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
3409  " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
3410  " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n "
3411  " PEG_NET_4_PC: 0x%x.\n", halt_status,
3413  qla82xx_rd_32(ha,
3414  QLA82XX_CRB_PEG_NET_0 + 0x3c),
3415  qla82xx_rd_32(ha,
3416  QLA82XX_CRB_PEG_NET_1 + 0x3c),
3417  qla82xx_rd_32(ha,
3418  QLA82XX_CRB_PEG_NET_2 + 0x3c),
3419  qla82xx_rd_32(ha,
3420  QLA82XX_CRB_PEG_NET_3 + 0x3c),
3421  qla82xx_rd_32(ha,
3422  QLA82XX_CRB_PEG_NET_4 + 0x3c));
3423  if (((halt_status & 0x1fffff00) >> 8) == 0x67)
3424  ql_log(ql_log_warn, vha, 0xb052,
3425  "Firmware aborted with "
3426  "error code 0x00006700. Device is "
3427  "being reset.\n");
3428  if (halt_status & HALT_STATUS_UNRECOVERABLE) {
3430  &vha->dpc_flags);
3431  } else {
3432  ql_log(ql_log_info, vha, 0x6006,
3433  "Detect abort needed.\n");
3435  &vha->dpc_flags);
3436  }
3437  ha->flags.isp82xx_fw_hung = 1;
3438  ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
3440  }
3441  }
3442  }
3443 }
3444 
3446 {
3447  int rval;
3448  rval = qla82xx_device_state_handler(vha);
3449  return rval;
3450 }
3451 
3452 void
3454 {
3455  struct qla_hw_data *ha = vha->hw;
3457 
3458  dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3459  if (dev_state == QLA8XXX_DEV_READY) {
3460  ql_log(ql_log_info, vha, 0xb02f,
3461  "HW State: NEED RESET\n");
3464  ha->flags.nic_core_reset_owner = 1;
3465  ql_dbg(ql_dbg_p3p, vha, 0xb030,
3466  "reset_owner is 0x%x\n", ha->portnum);
3467  } else
3468  ql_log(ql_log_info, vha, 0xb031,
3469  "Device state is 0x%x = %s.\n",
3470  dev_state,
3471  dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3472 }
3473 
3474 /*
3475  * qla82xx_abort_isp
3476  * Resets ISP and aborts all outstanding commands.
3477  *
3478  * Input:
3479  * ha = adapter block pointer.
3480  *
3481  * Returns:
3482  * 0 = success
3483  */
3484 int
3486 {
3487  int rval;
3488  struct qla_hw_data *ha = vha->hw;
3489 
3490  if (vha->device_flags & DFLG_DEV_FAILED) {
3491  ql_log(ql_log_warn, vha, 0x8024,
3492  "Device in failed state, exiting.\n");
3493  return QLA_SUCCESS;
3494  }
3495  ha->flags.nic_core_reset_hdlr_active = 1;
3496 
3497  qla82xx_idc_lock(ha);
3499  qla82xx_idc_unlock(ha);
3500 
3501  rval = qla82xx_device_state_handler(vha);
3502 
3503  qla82xx_idc_lock(ha);
3504  qla82xx_clear_rst_ready(ha);
3505  qla82xx_idc_unlock(ha);
3506 
3507  if (rval == QLA_SUCCESS) {
3508  ha->flags.isp82xx_fw_hung = 0;
3509  ha->flags.nic_core_reset_hdlr_active = 0;
3510  qla82xx_restart_isp(vha);
3511  }
3512 
3513  if (rval) {
3514  vha->flags.online = 1;
3515  if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3516  if (ha->isp_abort_cnt == 0) {
3517  ql_log(ql_log_warn, vha, 0x8027,
3518  "ISP error recover failed - board "
3519  "disabled.\n");
3520  /*
3521  * The next call disables the board
3522  * completely.
3523  */
3524  ha->isp_ops->reset_adapter(vha);
3525  vha->flags.online = 0;
3527  &vha->dpc_flags);
3528  rval = QLA_SUCCESS;
3529  } else { /* schedule another ISP abort */
3530  ha->isp_abort_cnt--;
3531  ql_log(ql_log_warn, vha, 0x8036,
3532  "ISP abort - retry remaining %d.\n",
3533  ha->isp_abort_cnt);
3534  rval = QLA_FUNCTION_FAILED;
3535  }
3536  } else {
3538  ql_dbg(ql_dbg_taskm, vha, 0x8029,
3539  "ISP error recovery - retrying (%d) more times.\n",
3540  ha->isp_abort_cnt);
3542  rval = QLA_FUNCTION_FAILED;
3543  }
3544  }
3545  return rval;
3546 }
3547 
3548 /*
3549  * qla82xx_fcoe_ctx_reset
3550  * Perform a quick reset and aborts all outstanding commands.
3551  * This will only perform an FCoE context reset and avoids a full blown
3552  * chip reset.
3553  *
3554  * Input:
3555  * ha = adapter block pointer.
3556  * is_reset_path = flag for identifying the reset path.
3557  *
3558  * Returns:
3559  * 0 = success
3560  */
3562 {
3563  int rval = QLA_FUNCTION_FAILED;
3564 
3565  if (vha->flags.online) {
3566  /* Abort all outstanding commands, so as to be requeued later */
3568  }
3569 
3570  /* Stop currently executing firmware.
3571  * This will destroy existing FCoE context at the F/W end.
3572  */
3574 
3575  /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */
3576  rval = qla82xx_restart_isp(vha);
3577 
3578  return rval;
3579 }
3580 
3581 /*
3582  * qla2x00_wait_for_fcoe_ctx_reset
3583  * Wait till the FCoE context is reset.
3584  *
3585  * Note:
3586  * Does context switching here.
3587  * Release SPIN_LOCK (if any) before calling this routine.
3588  *
3589  * Return:
3590  * Success (fcoe_ctx reset is done) : 0
3591  * Failed (fcoe_ctx reset not completed within max loop timout ) : 1
3592  */
3594 {
3596  unsigned long wait_reset;
3597 
3598  wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
3599  while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3601  && time_before(jiffies, wait_reset)) {
3602 
3605 
3606  if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) &&
3607  !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
3608  status = QLA_SUCCESS;
3609  break;
3610  }
3611  }
3612  ql_dbg(ql_dbg_p3p, vha, 0xb027,
3613  "%s: status=%d.\n", __func__, status);
3614 
3615  return status;
3616 }
3617 
3618 void
3620 {
3621  int i;
3622  unsigned long flags;
3623  struct qla_hw_data *ha = vha->hw;
3624 
3625  /* Check if 82XX firmware is alive or not
3626  * We may have arrived here from NEED_RESET
3627  * detection only
3628  */
3629  if (!ha->flags.isp82xx_fw_hung) {
3630  for (i = 0; i < 2; i++) {
3631  msleep(1000);
3632  if (qla82xx_check_fw_alive(vha)) {
3633  ha->flags.isp82xx_fw_hung = 1;
3635  break;
3636  }
3637  }
3638  }
3639  ql_dbg(ql_dbg_init, vha, 0x00b0,
3640  "Entered %s fw_hung=%d.\n",
3641  __func__, ha->flags.isp82xx_fw_hung);
3642 
3643  /* Abort all commands gracefully if fw NOT hung */
3644  if (!ha->flags.isp82xx_fw_hung) {
3645  int cnt, que;
3646  srb_t *sp;
3647  struct req_que *req;
3648 
3649  spin_lock_irqsave(&ha->hardware_lock, flags);
3650  for (que = 0; que < ha->max_req_queues; que++) {
3651  req = ha->req_q_map[que];
3652  if (!req)
3653  continue;
3654  for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3655  sp = req->outstanding_cmds[cnt];
3656  if (sp) {
3657  if (!sp->u.scmd.ctx ||
3658  (sp->flags & SRB_FCP_CMND_DMA_VALID)) {
3659  spin_unlock_irqrestore(
3660  &ha->hardware_lock, flags);
3661  if (ha->isp_ops->abort_command(sp)) {
3662  ql_log(ql_log_info, vha,
3663  0x00b1,
3664  "mbx abort failed.\n");
3665  } else {
3666  ql_log(ql_log_info, vha,
3667  0x00b2,
3668  "mbx abort success.\n");
3669  }
3670  spin_lock_irqsave(&ha->hardware_lock, flags);
3671  }
3672  }
3673  }
3674  }
3675  spin_unlock_irqrestore(&ha->hardware_lock, flags);
3676 
3677  /* Wait for pending cmds (physical and virtual) to complete */
3678  if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
3679  WAIT_HOST) == QLA_SUCCESS) {
3680  ql_dbg(ql_dbg_init, vha, 0x00b3,
3681  "Done wait for "
3682  "pending commands.\n");
3683  }
3684  }
3685 }
3686 
3687 /* Minidump related functions */
3688 static int
3689 qla82xx_minidump_process_control(scsi_qla_host_t *vha,
3690  qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
3691 {
3692  struct qla_hw_data *ha = vha->hw;
3693  struct qla82xx_md_entry_crb *crb_entry;
3694  uint32_t read_value, opcode, poll_time;
3695  uint32_t addr, index, crb_addr;
3696  unsigned long wtime;
3697  struct qla82xx_md_template_hdr *tmplt_hdr;
3698  uint32_t rval = QLA_SUCCESS;
3699  int i;
3700 
3701  tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
3702  crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr;
3703  crb_addr = crb_entry->addr;
3704 
3705  for (i = 0; i < crb_entry->op_count; i++) {
3706  opcode = crb_entry->crb_ctrl.opcode;
3707  if (opcode & QLA82XX_DBG_OPCODE_WR) {
3708  qla82xx_md_rw_32(ha, crb_addr,
3709  crb_entry->value_1, 1);
3710  opcode &= ~QLA82XX_DBG_OPCODE_WR;
3711  }
3712 
3713  if (opcode & QLA82XX_DBG_OPCODE_RW) {
3714  read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
3715  qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
3716  opcode &= ~QLA82XX_DBG_OPCODE_RW;
3717  }
3718 
3719  if (opcode & QLA82XX_DBG_OPCODE_AND) {
3720  read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
3721  read_value &= crb_entry->value_2;
3722  opcode &= ~QLA82XX_DBG_OPCODE_AND;
3723  if (opcode & QLA82XX_DBG_OPCODE_OR) {
3724  read_value |= crb_entry->value_3;
3725  opcode &= ~QLA82XX_DBG_OPCODE_OR;
3726  }
3727  qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
3728  }
3729 
3730  if (opcode & QLA82XX_DBG_OPCODE_OR) {
3731  read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
3732  read_value |= crb_entry->value_3;
3733  qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
3734  opcode &= ~QLA82XX_DBG_OPCODE_OR;
3735  }
3736 
3737  if (opcode & QLA82XX_DBG_OPCODE_POLL) {
3738  poll_time = crb_entry->crb_strd.poll_timeout;
3739  wtime = jiffies + poll_time;
3740  read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
3741 
3742  do {
3743  if ((read_value & crb_entry->value_2)
3744  == crb_entry->value_1)
3745  break;
3746  else if (time_after_eq(jiffies, wtime)) {
3747  /* capturing dump failed */
3748  rval = QLA_FUNCTION_FAILED;
3749  break;
3750  } else
3751  read_value = qla82xx_md_rw_32(ha,
3752  crb_addr, 0, 0);
3753  } while (1);
3754  opcode &= ~QLA82XX_DBG_OPCODE_POLL;
3755  }
3756 
3757  if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
3758  if (crb_entry->crb_strd.state_index_a) {
3759  index = crb_entry->crb_strd.state_index_a;
3760  addr = tmplt_hdr->saved_state_array[index];
3761  } else
3762  addr = crb_addr;
3763 
3764  read_value = qla82xx_md_rw_32(ha, addr, 0, 0);
3765  index = crb_entry->crb_ctrl.state_index_v;
3766  tmplt_hdr->saved_state_array[index] = read_value;
3767  opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
3768  }
3769 
3770  if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
3771  if (crb_entry->crb_strd.state_index_a) {
3772  index = crb_entry->crb_strd.state_index_a;
3773  addr = tmplt_hdr->saved_state_array[index];
3774  } else
3775  addr = crb_addr;
3776 
3777  if (crb_entry->crb_ctrl.state_index_v) {
3778  index = crb_entry->crb_ctrl.state_index_v;
3779  read_value =
3780  tmplt_hdr->saved_state_array[index];
3781  } else
3782  read_value = crb_entry->value_1;
3783 
3784  qla82xx_md_rw_32(ha, addr, read_value, 1);
3785  opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
3786  }
3787 
3788  if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
3789  index = crb_entry->crb_ctrl.state_index_v;
3790  read_value = tmplt_hdr->saved_state_array[index];
3791  read_value <<= crb_entry->crb_ctrl.shl;
3792  read_value >>= crb_entry->crb_ctrl.shr;
3793  if (crb_entry->value_2)
3794  read_value &= crb_entry->value_2;
3795  read_value |= crb_entry->value_3;
3796  read_value += crb_entry->value_1;
3797  tmplt_hdr->saved_state_array[index] = read_value;
3798  opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
3799  }
3800  crb_addr += crb_entry->crb_strd.addr_stride;
3801  }
3802  return rval;
3803 }
3804 
3805 static void
3806 qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
3807  qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
3808 {
3809  struct qla_hw_data *ha = vha->hw;
3810  uint32_t r_addr, r_stride, loop_cnt, i, r_value;
3811  struct qla82xx_md_entry_rdocm *ocm_hdr;
3812  uint32_t *data_ptr = *d_ptr;
3813 
3814  ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr;
3815  r_addr = ocm_hdr->read_addr;
3816  r_stride = ocm_hdr->read_addr_stride;
3817  loop_cnt = ocm_hdr->op_count;
3818 
3819  for (i = 0; i < loop_cnt; i++) {
3820  r_value = RD_REG_DWORD((void *)(r_addr + ha->nx_pcibase));
3821  *data_ptr++ = cpu_to_le32(r_value);
3822  r_addr += r_stride;
3823  }
3824  *d_ptr = data_ptr;
3825 }
3826 
3827 static void
3828 qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha,
3829  qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
3830 {
3831  struct qla_hw_data *ha = vha->hw;
3832  uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
3833  struct qla82xx_md_entry_mux *mux_hdr;
3834  uint32_t *data_ptr = *d_ptr;
3835 
3836  mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr;
3837  r_addr = mux_hdr->read_addr;
3838  s_addr = mux_hdr->select_addr;
3839  s_stride = mux_hdr->select_value_stride;
3840  s_value = mux_hdr->select_value;
3841  loop_cnt = mux_hdr->op_count;
3842 
3843  for (i = 0; i < loop_cnt; i++) {
3844  qla82xx_md_rw_32(ha, s_addr, s_value, 1);
3845  r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
3846  *data_ptr++ = cpu_to_le32(s_value);
3847  *data_ptr++ = cpu_to_le32(r_value);
3848  s_value += s_stride;
3849  }
3850  *d_ptr = data_ptr;
3851 }
3852 
3853 static void
3854 qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha,
3855  qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
3856 {
3857  struct qla_hw_data *ha = vha->hw;
3858  uint32_t r_addr, r_stride, loop_cnt, i, r_value;
3859  struct qla82xx_md_entry_crb *crb_hdr;
3860  uint32_t *data_ptr = *d_ptr;
3861 
3862  crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr;
3863  r_addr = crb_hdr->addr;
3864  r_stride = crb_hdr->crb_strd.addr_stride;
3865  loop_cnt = crb_hdr->op_count;
3866 
3867  for (i = 0; i < loop_cnt; i++) {
3868  r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
3869  *data_ptr++ = cpu_to_le32(r_addr);
3870  *data_ptr++ = cpu_to_le32(r_value);
3871  r_addr += r_stride;
3872  }
3873  *d_ptr = data_ptr;
3874 }
3875 
3876 static int
3877 qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
3878  qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
3879 {
3880  struct qla_hw_data *ha = vha->hw;
3881  uint32_t addr, r_addr, c_addr, t_r_addr;
3882  uint32_t i, k, loop_count, t_value, r_cnt, r_value;
3883  unsigned long p_wait, w_time, p_mask;
3884  uint32_t c_value_w, c_value_r;
3885  struct qla82xx_md_entry_cache *cache_hdr;
3886  int rval = QLA_FUNCTION_FAILED;
3887  uint32_t *data_ptr = *d_ptr;
3888 
3889  cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
3890  loop_count = cache_hdr->op_count;
3891  r_addr = cache_hdr->read_addr;
3892  c_addr = cache_hdr->control_addr;
3893  c_value_w = cache_hdr->cache_ctrl.write_value;
3894 
3895  t_r_addr = cache_hdr->tag_reg_addr;
3896  t_value = cache_hdr->addr_ctrl.init_tag_value;
3897  r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
3898  p_wait = cache_hdr->cache_ctrl.poll_wait;
3899  p_mask = cache_hdr->cache_ctrl.poll_mask;
3900 
3901  for (i = 0; i < loop_count; i++) {
3902  qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
3903  if (c_value_w)
3904  qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
3905 
3906  if (p_mask) {
3907  w_time = jiffies + p_wait;
3908  do {
3909  c_value_r = qla82xx_md_rw_32(ha, c_addr, 0, 0);
3910  if ((c_value_r & p_mask) == 0)
3911  break;
3912  else if (time_after_eq(jiffies, w_time)) {
3913  /* capturing dump failed */
3914  ql_dbg(ql_dbg_p3p, vha, 0xb032,
3915  "c_value_r: 0x%x, poll_mask: 0x%lx, "
3916  "w_time: 0x%lx\n",
3917  c_value_r, p_mask, w_time);
3918  return rval;
3919  }
3920  } while (1);
3921  }
3922 
3923  addr = r_addr;
3924  for (k = 0; k < r_cnt; k++) {
3925  r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
3926  *data_ptr++ = cpu_to_le32(r_value);
3927  addr += cache_hdr->read_ctrl.read_addr_stride;
3928  }
3929  t_value += cache_hdr->addr_ctrl.tag_value_stride;
3930  }
3931  *d_ptr = data_ptr;
3932  return QLA_SUCCESS;
3933 }
3934 
3935 static void
3936 qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha,
3937  qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
3938 {
3939  struct qla_hw_data *ha = vha->hw;
3940  uint32_t addr, r_addr, c_addr, t_r_addr;
3941  uint32_t i, k, loop_count, t_value, r_cnt, r_value;
3942  uint32_t c_value_w;
3943  struct qla82xx_md_entry_cache *cache_hdr;
3944  uint32_t *data_ptr = *d_ptr;
3945 
3946  cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
3947  loop_count = cache_hdr->op_count;
3948  r_addr = cache_hdr->read_addr;
3949  c_addr = cache_hdr->control_addr;
3950  c_value_w = cache_hdr->cache_ctrl.write_value;
3951 
3952  t_r_addr = cache_hdr->tag_reg_addr;
3953  t_value = cache_hdr->addr_ctrl.init_tag_value;
3954  r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
3955 
3956  for (i = 0; i < loop_count; i++) {
3957  qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
3958  qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
3959  addr = r_addr;
3960  for (k = 0; k < r_cnt; k++) {
3961  r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
3962  *data_ptr++ = cpu_to_le32(r_value);
3963  addr += cache_hdr->read_ctrl.read_addr_stride;
3964  }
3965  t_value += cache_hdr->addr_ctrl.tag_value_stride;
3966  }
3967  *d_ptr = data_ptr;
3968 }
3969 
3970 static void
3971 qla82xx_minidump_process_queue(scsi_qla_host_t *vha,
3972  qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
3973 {
3974  struct qla_hw_data *ha = vha->hw;
3975  uint32_t s_addr, r_addr;
3976  uint32_t r_stride, r_value, r_cnt, qid = 0;
3977  uint32_t i, k, loop_cnt;
3978  struct qla82xx_md_entry_queue *q_hdr;
3979  uint32_t *data_ptr = *d_ptr;
3980 
3981  q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr;
3982  s_addr = q_hdr->select_addr;
3983  r_cnt = q_hdr->rd_strd.read_addr_cnt;
3984  r_stride = q_hdr->rd_strd.read_addr_stride;
3985  loop_cnt = q_hdr->op_count;
3986 
3987  for (i = 0; i < loop_cnt; i++) {
3988  qla82xx_md_rw_32(ha, s_addr, qid, 1);
3989  r_addr = q_hdr->read_addr;
3990  for (k = 0; k < r_cnt; k++) {
3991  r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
3992  *data_ptr++ = cpu_to_le32(r_value);
3993  r_addr += r_stride;
3994  }
3995  qid += q_hdr->q_strd.queue_id_stride;
3996  }
3997  *d_ptr = data_ptr;
3998 }
3999 
4000 static void
4001 qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha,
4002  qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4003 {
4004  struct qla_hw_data *ha = vha->hw;
4005  uint32_t r_addr, r_value;
4006  uint32_t i, loop_cnt;
4008  uint32_t *data_ptr = *d_ptr;
4009 
4010  rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr;
4011  r_addr = rom_hdr->read_addr;
4012  loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
4013 
4014  for (i = 0; i < loop_cnt; i++) {
4016  (r_addr & 0xFFFF0000), 1);
4017  r_value = qla82xx_md_rw_32(ha,
4019  (r_addr & 0x0000FFFF), 0, 0);
4020  *data_ptr++ = cpu_to_le32(r_value);
4021  r_addr += sizeof(uint32_t);
4022  }
4023  *d_ptr = data_ptr;
4024 }
4025 
4026 static int
4027 qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
4028  qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4029 {
4030  struct qla_hw_data *ha = vha->hw;
4031  uint32_t r_addr, r_value, r_data;
4032  uint32_t i, j, loop_cnt;
4033  struct qla82xx_md_entry_rdmem *m_hdr;
4034  unsigned long flags;
4035  int rval = QLA_FUNCTION_FAILED;
4036  uint32_t *data_ptr = *d_ptr;
4037 
4038  m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr;
4039  r_addr = m_hdr->read_addr;
4040  loop_cnt = m_hdr->read_data_size/16;
4041 
4042  if (r_addr & 0xf) {
4043  ql_log(ql_log_warn, vha, 0xb033,
4044  "Read addr 0x%x not 16 bytes aligned\n", r_addr);
4045  return rval;
4046  }
4047 
4048  if (m_hdr->read_data_size % 16) {
4049  ql_log(ql_log_warn, vha, 0xb034,
4050  "Read data[0x%x] not multiple of 16 bytes\n",
4051  m_hdr->read_data_size);
4052  return rval;
4053  }
4054 
4055  ql_dbg(ql_dbg_p3p, vha, 0xb035,
4056  "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
4057  __func__, r_addr, m_hdr->read_data_size, loop_cnt);
4058 
4059  write_lock_irqsave(&ha->hw_lock, flags);
4060  for (i = 0; i < loop_cnt; i++) {
4061  qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
4062  r_value = 0;
4063  qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
4064  r_value = MIU_TA_CTL_ENABLE;
4065  qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
4066  r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
4067  qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
4068 
4069  for (j = 0; j < MAX_CTL_CHECK; j++) {
4070  r_value = qla82xx_md_rw_32(ha,
4071  MD_MIU_TEST_AGT_CTRL, 0, 0);
4072  if ((r_value & MIU_TA_CTL_BUSY) == 0)
4073  break;
4074  }
4075 
4076  if (j >= MAX_CTL_CHECK) {
4078  "failed to read through agent\n");
4079  write_unlock_irqrestore(&ha->hw_lock, flags);
4080  return rval;
4081  }
4082 
4083  for (j = 0; j < 4; j++) {
4084  r_data = qla82xx_md_rw_32(ha,
4085  MD_MIU_TEST_AGT_RDDATA[j], 0, 0);
4086  *data_ptr++ = cpu_to_le32(r_data);
4087  }
4088  r_addr += 16;
4089  }
4090  write_unlock_irqrestore(&ha->hw_lock, flags);
4091  *d_ptr = data_ptr;
4092  return QLA_SUCCESS;
4093 }
4094 
4095 static int
4096 qla82xx_validate_template_chksum(scsi_qla_host_t *vha)
4097 {
4098  struct qla_hw_data *ha = vha->hw;
4099  uint64_t chksum = 0;
4100  uint32_t *d_ptr = (uint32_t *)ha->md_tmplt_hdr;
4101  int count = ha->md_template_size/sizeof(uint32_t);
4102 
4103  while (count-- > 0)
4104  chksum += *d_ptr++;
4105  while (chksum >> 32)
4106  chksum = (chksum & 0xFFFFFFFF) + (chksum >> 32);
4107  return ~chksum;
4108 }
4109 
4110 static void
4111 qla82xx_mark_entry_skipped(scsi_qla_host_t *vha,
4112  qla82xx_md_entry_hdr_t *entry_hdr, int index)
4113 {
4114  entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
4115  ql_dbg(ql_dbg_p3p, vha, 0xb036,
4116  "Skipping entry[%d]: "
4117  "ETYPE[0x%x]-ELEVEL[0x%x]\n",
4118  index, entry_hdr->entry_type,
4119  entry_hdr->d_ctrl.entry_capture_mask);
4120 }
4121 
4122 int
4124 {
4125  struct qla_hw_data *ha = vha->hw;
4126  int no_entry_hdr = 0;
4127  qla82xx_md_entry_hdr_t *entry_hdr;
4128  struct qla82xx_md_template_hdr *tmplt_hdr;
4129  uint32_t *data_ptr;
4130  uint32_t total_data_size = 0, f_capture_mask, data_collected = 0;
4131  int i = 0, rval = QLA_FUNCTION_FAILED;
4132 
4133  tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
4134  data_ptr = (uint32_t *)ha->md_dump;
4135 
4136  if (ha->fw_dumped) {
4137  ql_log(ql_log_warn, vha, 0xb037,
4138  "Firmware has been previously dumped (%p) "
4139  "-- ignoring request.\n", ha->fw_dump);
4140  goto md_failed;
4141  }
4142 
4143  ha->fw_dumped = 0;
4144 
4145  if (!ha->md_tmplt_hdr || !ha->md_dump) {
4146  ql_log(ql_log_warn, vha, 0xb038,
4147  "Memory not allocated for minidump capture\n");
4148  goto md_failed;
4149  }
4150 
4151  if (ha->flags.isp82xx_no_md_cap) {
4152  ql_log(ql_log_warn, vha, 0xb054,
4153  "Forced reset from application, "
4154  "ignore minidump capture\n");
4155  ha->flags.isp82xx_no_md_cap = 0;
4156  goto md_failed;
4157  }
4158 
4159  if (qla82xx_validate_template_chksum(vha)) {
4160  ql_log(ql_log_info, vha, 0xb039,
4161  "Template checksum validation error\n");
4162  goto md_failed;
4163  }
4164 
4165  no_entry_hdr = tmplt_hdr->num_of_entries;
4166  ql_dbg(ql_dbg_p3p, vha, 0xb03a,
4167  "No of entry headers in Template: 0x%x\n", no_entry_hdr);
4168 
4169  ql_dbg(ql_dbg_p3p, vha, 0xb03b,
4170  "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
4171 
4172  f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
4173 
4174  /* Validate whether required debug level is set */
4175  if ((f_capture_mask & 0x3) != 0x3) {
4176  ql_log(ql_log_warn, vha, 0xb03c,
4177  "Minimum required capture mask[0x%x] level not set\n",
4178  f_capture_mask);
4179  goto md_failed;
4180  }
4181  tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
4182 
4183  tmplt_hdr->driver_info[0] = vha->host_no;
4184  tmplt_hdr->driver_info[1] = (QLA_DRIVER_MAJOR_VER << 24) |
4185  (QLA_DRIVER_MINOR_VER << 16) | (QLA_DRIVER_PATCH_VER << 8) |
4187 
4188  total_data_size = ha->md_dump_size;
4189 
4190  ql_dbg(ql_dbg_p3p, vha, 0xb03d,
4191  "Total minidump data_size 0x%x to be captured\n", total_data_size);
4192 
4193  /* Check whether template obtained is valid */
4194  if (tmplt_hdr->entry_type != QLA82XX_TLHDR) {
4195  ql_log(ql_log_warn, vha, 0xb04e,
4196  "Bad template header entry type: 0x%x obtained\n",
4197  tmplt_hdr->entry_type);
4198  goto md_failed;
4199  }
4200 
4201  entry_hdr = (qla82xx_md_entry_hdr_t *) \
4202  (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
4203 
4204  /* Walk through the entry headers */
4205  for (i = 0; i < no_entry_hdr; i++) {
4206 
4207  if (data_collected > total_data_size) {
4208  ql_log(ql_log_warn, vha, 0xb03e,
4209  "More MiniDump data collected: [0x%x]\n",
4210  data_collected);
4211  goto md_failed;
4212  }
4213 
4214  if (!(entry_hdr->d_ctrl.entry_capture_mask &
4215  ql2xmdcapmask)) {
4216  entry_hdr->d_ctrl.driver_flags |=
4218  ql_dbg(ql_dbg_p3p, vha, 0xb03f,
4219  "Skipping entry[%d]: "
4220  "ETYPE[0x%x]-ELEVEL[0x%x]\n",
4221  i, entry_hdr->entry_type,
4222  entry_hdr->d_ctrl.entry_capture_mask);
4223  goto skip_nxt_entry;
4224  }
4225 
4226  ql_dbg(ql_dbg_p3p, vha, 0xb040,
4227  "[%s]: data ptr[%d]: %p, entry_hdr: %p\n"
4228  "entry_type: 0x%x, captrue_mask: 0x%x\n",
4229  __func__, i, data_ptr, entry_hdr,
4230  entry_hdr->entry_type,
4231  entry_hdr->d_ctrl.entry_capture_mask);
4232 
4233  ql_dbg(ql_dbg_p3p, vha, 0xb041,
4234  "Data collected: [0x%x], Dump size left:[0x%x]\n",
4235  data_collected, (ha->md_dump_size - data_collected));
4236 
4237  /* Decode the entry type and take
4238  * required action to capture debug data */
4239  switch (entry_hdr->entry_type) {
4240  case QLA82XX_RDEND:
4241  qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4242  break;
4243  case QLA82XX_CNTRL:
4244  rval = qla82xx_minidump_process_control(vha,
4245  entry_hdr, &data_ptr);
4246  if (rval != QLA_SUCCESS) {
4247  qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4248  goto md_failed;
4249  }
4250  break;
4251  case QLA82XX_RDCRB:
4252  qla82xx_minidump_process_rdcrb(vha,
4253  entry_hdr, &data_ptr);
4254  break;
4255  case QLA82XX_RDMEM:
4256  rval = qla82xx_minidump_process_rdmem(vha,
4257  entry_hdr, &data_ptr);
4258  if (rval != QLA_SUCCESS) {
4259  qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4260  goto md_failed;
4261  }
4262  break;
4263  case QLA82XX_BOARD:
4264  case QLA82XX_RDROM:
4265  qla82xx_minidump_process_rdrom(vha,
4266  entry_hdr, &data_ptr);
4267  break;
4268  case QLA82XX_L2DTG:
4269  case QLA82XX_L2ITG:
4270  case QLA82XX_L2DAT:
4271  case QLA82XX_L2INS:
4272  rval = qla82xx_minidump_process_l2tag(vha,
4273  entry_hdr, &data_ptr);
4274  if (rval != QLA_SUCCESS) {
4275  qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4276  goto md_failed;
4277  }
4278  break;
4279  case QLA82XX_L1DAT:
4280  case QLA82XX_L1INS:
4281  qla82xx_minidump_process_l1cache(vha,
4282  entry_hdr, &data_ptr);
4283  break;
4284  case QLA82XX_RDOCM:
4285  qla82xx_minidump_process_rdocm(vha,
4286  entry_hdr, &data_ptr);
4287  break;
4288  case QLA82XX_RDMUX:
4289  qla82xx_minidump_process_rdmux(vha,
4290  entry_hdr, &data_ptr);
4291  break;
4292  case QLA82XX_QUEUE:
4293  qla82xx_minidump_process_queue(vha,
4294  entry_hdr, &data_ptr);
4295  break;
4296  case QLA82XX_RDNOP:
4297  default:
4298  qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4299  break;
4300  }
4301 
4302  ql_dbg(ql_dbg_p3p, vha, 0xb042,
4303  "[%s]: data ptr[%d]: %p\n", __func__, i, data_ptr);
4304 
4305  data_collected = (uint8_t *)data_ptr -
4306  (uint8_t *)ha->md_dump;
4307 skip_nxt_entry:
4308  entry_hdr = (qla82xx_md_entry_hdr_t *) \
4309  (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
4310  }
4311 
4312  if (data_collected != total_data_size) {
4313  ql_dbg(ql_dbg_p3p, vha, 0xb043,
4314  "MiniDump data mismatch: Data collected: [0x%x],"
4315  "total_data_size:[0x%x]\n",
4316  data_collected, total_data_size);
4317  goto md_failed;
4318  }
4319 
4320  ql_log(ql_log_info, vha, 0xb044,
4321  "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
4322  vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
4323  ha->fw_dumped = 1;
4325 
4326 md_failed:
4327  return rval;
4328 }
4329 
4330 int
4332 {
4333  struct qla_hw_data *ha = vha->hw;
4334  int i, k;
4335  struct qla82xx_md_template_hdr *tmplt_hdr;
4336 
4337  tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
4338 
4339  if (ql2xmdcapmask < 0x3 || ql2xmdcapmask > 0x7F) {
4340  ql2xmdcapmask = tmplt_hdr->capture_debug_level & 0xFF;
4341  ql_log(ql_log_info, vha, 0xb045,
4342  "Forcing driver capture mask to firmware default capture mask: 0x%x.\n",
4343  ql2xmdcapmask);
4344  }
4345 
4346  for (i = 0x2, k = 1; (i & QLA82XX_DEFAULT_CAP_MASK); i <<= 1, k++) {
4347  if (i & ql2xmdcapmask)
4348  ha->md_dump_size += tmplt_hdr->capture_size_array[k];
4349  }
4350 
4351  if (ha->md_dump) {
4352  ql_log(ql_log_warn, vha, 0xb046,
4353  "Firmware dump previously allocated.\n");
4354  return 1;
4355  }
4356 
4357  ha->md_dump = vmalloc(ha->md_dump_size);
4358  if (ha->md_dump == NULL) {
4359  ql_log(ql_log_warn, vha, 0xb047,
4360  "Unable to allocate memory for Minidump size "
4361  "(0x%x).\n", ha->md_dump_size);
4362  return 1;
4363  }
4364  return 0;
4365 }
4366 
4367 void
4369 {
4370  struct qla_hw_data *ha = vha->hw;
4371 
4372  /* Release the template header allocated */
4373  if (ha->md_tmplt_hdr) {
4374  ql_log(ql_log_info, vha, 0xb048,
4375  "Free MiniDump template: %p, size (%d KB)\n",
4376  ha->md_tmplt_hdr, ha->md_template_size / 1024);
4377  dma_free_coherent(&ha->pdev->dev, ha->md_template_size,
4378  ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
4379  ha->md_tmplt_hdr = 0;
4380  }
4381 
4382  /* Release the template data buffer allocated */
4383  if (ha->md_dump) {
4384  ql_log(ql_log_info, vha, 0xb049,
4385  "Free MiniDump memory: %p, size (%d KB)\n",
4386  ha->md_dump, ha->md_dump_size / 1024);
4387  vfree(ha->md_dump);
4388  ha->md_dump_size = 0;
4389  ha->md_dump = 0;
4390  }
4391 }
4392 
4393 void
4395 {
4396  struct qla_hw_data *ha = vha->hw;
4397  int rval;
4398 
4399  /* Get Minidump template size */
4400  rval = qla82xx_md_get_template_size(vha);
4401  if (rval == QLA_SUCCESS) {
4402  ql_log(ql_log_info, vha, 0xb04a,
4403  "MiniDump Template size obtained (%d KB)\n",
4404  ha->md_template_size / 1024);
4405 
4406  /* Get Minidump template */
4407  rval = qla82xx_md_get_template(vha);
4408  if (rval == QLA_SUCCESS) {
4409  ql_dbg(ql_dbg_p3p, vha, 0xb04b,
4410  "MiniDump Template obtained\n");
4411 
4412  /* Allocate memory for minidump */
4413  rval = qla82xx_md_alloc(vha);
4414  if (rval == QLA_SUCCESS)
4415  ql_log(ql_log_info, vha, 0xb04c,
4416  "MiniDump memory allocated (%d KB)\n",
4417  ha->md_dump_size / 1024);
4418  else {
4419  ql_log(ql_log_info, vha, 0xb04d,
4420  "Free MiniDump template: %p, size: (%d KB)\n",
4421  ha->md_tmplt_hdr,
4422  ha->md_template_size / 1024);
4423  dma_free_coherent(&ha->pdev->dev,
4424  ha->md_template_size,
4425  ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
4426  ha->md_tmplt_hdr = 0;
4427  }
4428 
4429  }
4430  }
4431 }
4432 
4433 int
4435 {
4436 
4437  int rval;
4438  struct qla_hw_data *ha = vha->hw;
4439  qla82xx_idc_lock(ha);
4440  rval = qla82xx_mbx_beacon_ctl(vha, 1);
4441 
4442  if (rval) {
4443  ql_log(ql_log_warn, vha, 0xb050,
4444  "mbx set led config failed in %s\n", __func__);
4445  goto exit;
4446  }
4447  ha->beacon_blink_led = 1;
4448 exit:
4449  qla82xx_idc_unlock(ha);
4450  return rval;
4451 }
4452 
4453 int
4455 {
4456 
4457  int rval;
4458  struct qla_hw_data *ha = vha->hw;
4459  qla82xx_idc_lock(ha);
4460  rval = qla82xx_mbx_beacon_ctl(vha, 0);
4461 
4462  if (rval) {
4463  ql_log(ql_log_warn, vha, 0xb051,
4464  "mbx set led config failed in %s\n", __func__);
4465  goto exit;
4466  }
4467  ha->beacon_blink_led = 0;
4468 exit:
4469  qla82xx_idc_unlock(ha);
4470  return rval;
4471 }