Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
t3_hw.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses. You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  * Redistribution and use in source and binary forms, with or
11  * without modification, are permitted provided that the following
12  * conditions are met:
13  *
14  * - Redistributions of source code must retain the above
15  * copyright notice, this list of conditions and the following
16  * disclaimer.
17  *
18  * - Redistributions in binary form must reproduce the above
19  * copyright notice, this list of conditions and the following
20  * disclaimer in the documentation and/or other materials
21  * provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include "common.h"
33 #include "regs.h"
34 #include "sge_defs.h"
35 #include "firmware_exports.h"
36 
37 static void t3_port_intr_clear(struct adapter *adapter, int idx);
38 
56  int polarity, int attempts, int delay, u32 *valp)
57 {
58  while (1) {
59  u32 val = t3_read_reg(adapter, reg);
60 
61  if (!!(val & mask) == polarity) {
62  if (valp)
63  *valp = val;
64  return 0;
65  }
66  if (--attempts == 0)
67  return -EAGAIN;
68  if (delay)
69  udelay(delay);
70  }
71 }
72 
84 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
85  int n, unsigned int offset)
86 {
87  while (n--) {
88  t3_write_reg(adapter, p->reg_addr + offset, p->val);
89  p++;
90  }
91 }
92 
103 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
104  u32 val)
105 {
106  u32 v = t3_read_reg(adapter, addr) & ~mask;
107 
108  t3_write_reg(adapter, addr, v | val);
109  t3_read_reg(adapter, addr); /* flush */
110 }
111 
124 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
125  unsigned int data_reg, u32 *vals,
126  unsigned int nregs, unsigned int start_idx)
127 {
128  while (nregs--) {
129  t3_write_reg(adap, addr_reg, start_idx);
130  *vals++ = t3_read_reg(adap, data_reg);
131  start_idx++;
132  }
133 }
134 
145 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146  u64 *buf)
147 {
148  static const int shift[] = { 0, 0, 16, 24 };
149  static const int step[] = { 0, 32, 16, 8 };
150 
151  unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
152  struct adapter *adap = mc7->adapter;
153 
154  if (start >= size64 || start + n > size64)
155  return -EINVAL;
156 
157  start *= (8 << mc7->width);
158  while (n--) {
159  int i;
160  u64 val64 = 0;
161 
162  for (i = (1 << mc7->width) - 1; i >= 0; --i) {
163  int attempts = 10;
164  u32 val;
165 
166  t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
167  t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
168  val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
169  while ((val & F_BUSY) && attempts--)
170  val = t3_read_reg(adap,
171  mc7->offset + A_MC7_BD_OP);
172  if (val & F_BUSY)
173  return -EIO;
174 
175  val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
176  if (mc7->width == 0) {
177  val64 = t3_read_reg(adap,
178  mc7->offset +
180  val64 |= (u64) val << 32;
181  } else {
182  if (mc7->width > 1)
183  val >>= shift[mc7->width];
184  val64 |= (u64) val << (step[mc7->width] * i);
185  }
186  start += 8;
187  }
188  *buf++ = val64;
189  }
190  return 0;
191 }
192 
193 /*
194  * Initialize MI1.
195  */
196 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
197 {
198  u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
199  u32 val = F_PREEN | V_CLKDIV(clkdiv);
200 
201  t3_write_reg(adap, A_MI1_CFG, val);
202 }
203 
204 #define MDIO_ATTEMPTS 20
205 
206 /*
207  * MI1 read/write operations for clause 22 PHYs.
208  */
209 static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
210  u16 reg_addr)
211 {
212  struct port_info *pi = netdev_priv(dev);
213  struct adapter *adapter = pi->adapter;
214  int ret;
215  u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
216 
217  mutex_lock(&adapter->mdio_lock);
218  t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
219  t3_write_reg(adapter, A_MI1_ADDR, addr);
220  t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
221  ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
222  if (!ret)
223  ret = t3_read_reg(adapter, A_MI1_DATA);
224  mutex_unlock(&adapter->mdio_lock);
225  return ret;
226 }
227 
228 static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
229  u16 reg_addr, u16 val)
230 {
231  struct port_info *pi = netdev_priv(dev);
232  struct adapter *adapter = pi->adapter;
233  int ret;
234  u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
235 
236  mutex_lock(&adapter->mdio_lock);
237  t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
238  t3_write_reg(adapter, A_MI1_ADDR, addr);
239  t3_write_reg(adapter, A_MI1_DATA, val);
240  t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
241  ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
242  mutex_unlock(&adapter->mdio_lock);
243  return ret;
244 }
245 
246 static const struct mdio_ops mi1_mdio_ops = {
247  .read = t3_mi1_read,
248  .write = t3_mi1_write,
249  .mode_support = MDIO_SUPPORTS_C22
250 };
251 
252 /*
253  * Performs the address cycle for clause 45 PHYs.
254  * Must be called with the MDIO_LOCK held.
255  */
256 static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
257  int reg_addr)
258 {
259  u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260 
261  t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
262  t3_write_reg(adapter, A_MI1_ADDR, addr);
263  t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264  t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265  return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
266  MDIO_ATTEMPTS, 10);
267 }
268 
269 /*
270  * MI1 read/write operations for indirect-addressed PHYs.
271  */
272 static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
273  u16 reg_addr)
274 {
275  struct port_info *pi = netdev_priv(dev);
276  struct adapter *adapter = pi->adapter;
277  int ret;
278 
279  mutex_lock(&adapter->mdio_lock);
280  ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
281  if (!ret) {
282  t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
283  ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
284  MDIO_ATTEMPTS, 10);
285  if (!ret)
286  ret = t3_read_reg(adapter, A_MI1_DATA);
287  }
288  mutex_unlock(&adapter->mdio_lock);
289  return ret;
290 }
291 
292 static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
293  u16 reg_addr, u16 val)
294 {
295  struct port_info *pi = netdev_priv(dev);
296  struct adapter *adapter = pi->adapter;
297  int ret;
298 
299  mutex_lock(&adapter->mdio_lock);
300  ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
301  if (!ret) {
302  t3_write_reg(adapter, A_MI1_DATA, val);
303  t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
304  ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
305  MDIO_ATTEMPTS, 10);
306  }
307  mutex_unlock(&adapter->mdio_lock);
308  return ret;
309 }
310 
311 static const struct mdio_ops mi1_mdio_ext_ops = {
312  .read = mi1_ext_read,
313  .write = mi1_ext_write,
314  .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
315 };
316 
328 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
329  unsigned int set)
330 {
331  int ret;
332  unsigned int val;
333 
334  ret = t3_mdio_read(phy, mmd, reg, &val);
335  if (!ret) {
336  val &= ~clear;
337  ret = t3_mdio_write(phy, mmd, reg, val | set);
338  }
339  return ret;
340 }
341 
352 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
353 {
354  int err;
355  unsigned int ctl;
356 
359  if (err || !wait)
360  return err;
361 
362  do {
363  err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
364  if (err)
365  return err;
366  ctl &= MDIO_CTRL1_RESET;
367  if (ctl)
368  msleep(1);
369  } while (ctl && --wait);
370 
371  return ctl ? -1 : 0;
372 }
373 
382 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
383 {
384  int err;
385  unsigned int val = 0;
386 
387  err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
388  if (err)
389  return err;
390 
392  if (advert & ADVERTISED_1000baseT_Half)
393  val |= ADVERTISE_1000HALF;
394  if (advert & ADVERTISED_1000baseT_Full)
395  val |= ADVERTISE_1000FULL;
396 
397  err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
398  if (err)
399  return err;
400 
401  val = 1;
402  if (advert & ADVERTISED_10baseT_Half)
403  val |= ADVERTISE_10HALF;
404  if (advert & ADVERTISED_10baseT_Full)
405  val |= ADVERTISE_10FULL;
406  if (advert & ADVERTISED_100baseT_Half)
407  val |= ADVERTISE_100HALF;
408  if (advert & ADVERTISED_100baseT_Full)
409  val |= ADVERTISE_100FULL;
410  if (advert & ADVERTISED_Pause)
411  val |= ADVERTISE_PAUSE_CAP;
412  if (advert & ADVERTISED_Asym_Pause)
413  val |= ADVERTISE_PAUSE_ASYM;
414  return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
415 }
416 
425 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
426 {
427  unsigned int val = 0;
428 
429  if (advert & ADVERTISED_1000baseT_Half)
430  val |= ADVERTISE_1000XHALF;
431  if (advert & ADVERTISED_1000baseT_Full)
432  val |= ADVERTISE_1000XFULL;
433  if (advert & ADVERTISED_Pause)
434  val |= ADVERTISE_1000XPAUSE;
435  if (advert & ADVERTISED_Asym_Pause)
437  return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
438 }
439 
449 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
450 {
451  int err;
452  unsigned int ctl;
453 
454  err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
455  if (err)
456  return err;
457 
458  if (speed >= 0) {
460  if (speed == SPEED_100)
461  ctl |= BMCR_SPEED100;
462  else if (speed == SPEED_1000)
463  ctl |= BMCR_SPEED1000;
464  }
465  if (duplex >= 0) {
466  ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
467  if (duplex == DUPLEX_FULL)
468  ctl |= BMCR_FULLDPLX;
469  }
470  if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
471  ctl |= BMCR_ANENABLE;
472  return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
473 }
474 
476 {
477  return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
479 }
480 
482 {
483  return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
484 }
485 
487 {
488  u32 val;
489 
490  return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
491 }
492 
494 {
495  unsigned int status;
496  int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
497  &status);
498 
499  if (err)
500  return err;
501  return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
502 }
503 
504 static const struct adapter_info t3_adap_info[] = {
505  {1, 1, 0,
508  &mi1_mdio_ops, "Chelsio PE9000"},
509  {1, 1, 0,
512  &mi1_mdio_ops, "Chelsio T302"},
513  {1, 0, 0,
517  &mi1_mdio_ext_ops, "Chelsio T310"},
518  {1, 1, 0,
523  &mi1_mdio_ext_ops, "Chelsio T320"},
524  {},
525  {},
526  {1, 0, 0,
530  &mi1_mdio_ext_ops, "Chelsio T310" },
531  {1, 0, 0,
535  &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
536 };
537 
538 /*
539  * Return the adapter_info structure with a given index. Out-of-range indices
540  * return NULL.
541  */
542 const struct adapter_info *t3_get_adapter_info(unsigned int id)
543 {
544  return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
545 }
546 
548  int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
549  int phy_addr, const struct mdio_ops *ops);
550 };
551 
552 static const struct port_type_info port_types[] = {
553  { NULL },
556  { NULL},
559  { t3_qt2045_phy_prep },
561  { NULL },
562  { t3_aq100x_phy_prep },
564 };
565 
566 #define VPD_ENTRY(name, len) \
567  u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
568 
569 /*
570  * Partial EEPROM Vital Product Data structure. Includes only the ID and
571  * VPD-R sections.
572  */
573 struct t3_vpd {
575  u8 id_len[2];
576  u8 id_data[16];
579  VPD_ENTRY(pn, 16); /* part number */
580  VPD_ENTRY(ec, 16); /* EC level */
581  VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
582  VPD_ENTRY(na, 12); /* MAC address base */
583  VPD_ENTRY(cclk, 6); /* core clock */
584  VPD_ENTRY(mclk, 6); /* mem clock */
585  VPD_ENTRY(uclk, 6); /* uP clk */
586  VPD_ENTRY(mdc, 6); /* MDIO clk */
587  VPD_ENTRY(mt, 2); /* mem timing */
588  VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
589  VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
590  VPD_ENTRY(port0, 2); /* PHY0 complex */
591  VPD_ENTRY(port1, 2); /* PHY1 complex */
592  VPD_ENTRY(port2, 2); /* PHY2 complex */
593  VPD_ENTRY(port3, 2); /* PHY3 complex */
594  VPD_ENTRY(rv, 1); /* csum */
595  u32 pad; /* for multiple-of-4 sizing and alignment */
596 };
597 
598 #define EEPROM_MAX_POLL 40
599 #define EEPROM_STAT_ADDR 0x4000
600 #define VPD_BASE 0xc00
601 
613 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
614 {
615  u16 val;
616  int attempts = EEPROM_MAX_POLL;
617  u32 v;
618  unsigned int base = adapter->params.pci.vpd_cap_addr;
619 
620  if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
621  return -EINVAL;
622 
623  pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
624  do {
625  udelay(10);
626  pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
627  } while (!(val & PCI_VPD_ADDR_F) && --attempts);
628 
629  if (!(val & PCI_VPD_ADDR_F)) {
630  CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
631  return -EIO;
632  }
633  pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
634  *data = cpu_to_le32(v);
635  return 0;
636 }
637 
647 int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
648 {
649  u16 val;
650  int attempts = EEPROM_MAX_POLL;
651  unsigned int base = adapter->params.pci.vpd_cap_addr;
652 
653  if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
654  return -EINVAL;
655 
656  pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
657  le32_to_cpu(data));
658  pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
659  addr | PCI_VPD_ADDR_F);
660  do {
661  msleep(1);
662  pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
663  } while ((val & PCI_VPD_ADDR_F) && --attempts);
664 
665  if (val & PCI_VPD_ADDR_F) {
666  CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
667  return -EIO;
668  }
669  return 0;
670 }
671 
679 int t3_seeprom_wp(struct adapter *adapter, int enable)
680 {
681  return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
682 }
683 
691 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
692 {
693  int i, addr, ret;
694  struct t3_vpd vpd;
695 
696  /*
697  * Card information is normally at VPD_BASE but some early cards had
698  * it at 0.
699  */
700  ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
701  if (ret)
702  return ret;
703  addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
704 
705  for (i = 0; i < sizeof(vpd); i += 4) {
706  ret = t3_seeprom_read(adapter, addr + i,
707  (__le32 *)((u8 *)&vpd + i));
708  if (ret)
709  return ret;
710  }
711 
712  p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
713  p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
714  p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
715  p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
716  p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
717  memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
718 
719  /* Old eeproms didn't have port information */
720  if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
721  p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
722  p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
723  } else {
724  p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
725  p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
726  p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
727  p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
728  }
729 
730  for (i = 0; i < 6; i++)
731  p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 +
732  hex_to_bin(vpd.na_data[2 * i + 1]);
733  return 0;
734 }
735 
736 /* serial flash and firmware constants */
737 enum {
738  SF_ATTEMPTS = 5, /* max retries for SF1 operations */
739  SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
740  SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
741 
742  /* flash command opcodes */
743  SF_PROG_PAGE = 2, /* program page */
744  SF_WR_DISABLE = 4, /* disable writes */
745  SF_RD_STATUS = 5, /* read status register */
746  SF_WR_ENABLE = 6, /* enable writes */
747  SF_RD_DATA_FAST = 0xb, /* read flash */
748  SF_ERASE_SECTOR = 0xd8, /* erase sector */
749 
750  FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
751  FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
752  FW_MIN_SIZE = 8 /* at least version and csum */
753 };
754 
766 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
767  u32 *valp)
768 {
769  int ret;
770 
771  if (!byte_cnt || byte_cnt > 4)
772  return -EINVAL;
773  if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
774  return -EBUSY;
775  t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
776  ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
777  if (!ret)
778  *valp = t3_read_reg(adapter, A_SF_DATA);
779  return ret;
780 }
781 
793 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
794  u32 val)
795 {
796  if (!byte_cnt || byte_cnt > 4)
797  return -EINVAL;
798  if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
799  return -EBUSY;
800  t3_write_reg(adapter, A_SF_DATA, val);
801  t3_write_reg(adapter, A_SF_OP,
802  V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
803  return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
804 }
805 
814 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
815 {
816  int ret;
817  u32 status;
818 
819  while (1) {
820  if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
821  (ret = sf1_read(adapter, 1, 0, &status)) != 0)
822  return ret;
823  if (!(status & 1))
824  return 0;
825  if (--attempts == 0)
826  return -EAGAIN;
827  if (delay)
828  msleep(delay);
829  }
830 }
831 
845 static int t3_read_flash(struct adapter *adapter, unsigned int addr,
846  unsigned int nwords, u32 *data, int byte_oriented)
847 {
848  int ret;
849 
850  if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
851  return -EINVAL;
852 
853  addr = swab32(addr) | SF_RD_DATA_FAST;
854 
855  if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
856  (ret = sf1_read(adapter, 1, 1, data)) != 0)
857  return ret;
858 
859  for (; nwords; nwords--, data++) {
860  ret = sf1_read(adapter, 4, nwords > 1, data);
861  if (ret)
862  return ret;
863  if (byte_oriented)
864  *data = htonl(*data);
865  }
866  return 0;
867 }
868 
879 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
880  unsigned int n, const u8 *data)
881 {
882  int ret;
883  u32 buf[64];
884  unsigned int i, c, left, val, offset = addr & 0xff;
885 
886  if (addr + n > SF_SIZE || offset + n > 256)
887  return -EINVAL;
888 
889  val = swab32(addr) | SF_PROG_PAGE;
890 
891  if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
892  (ret = sf1_write(adapter, 4, 1, val)) != 0)
893  return ret;
894 
895  for (left = n; left; left -= c) {
896  c = min(left, 4U);
897  for (val = 0, i = 0; i < c; ++i)
898  val = (val << 8) + *data++;
899 
900  ret = sf1_write(adapter, c, c != left, val);
901  if (ret)
902  return ret;
903  }
904  if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
905  return ret;
906 
907  /* Read the page to verify the write succeeded */
908  ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
909  if (ret)
910  return ret;
911 
912  if (memcmp(data - n, (u8 *) buf + offset, n))
913  return -EIO;
914  return 0;
915 }
916 
924 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
925 {
926  int ret;
927 
928  /* Get version loaded in SRAM */
929  t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
930  ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
931  1, 1, 5, 1);
932  if (ret)
933  return ret;
934 
935  *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
936 
937  return 0;
938 }
939 
946 int t3_check_tpsram_version(struct adapter *adapter)
947 {
948  int ret;
949  u32 vers;
950  unsigned int major, minor;
951 
952  if (adapter->params.rev == T3_REV_A)
953  return 0;
954 
955 
956  ret = t3_get_tp_version(adapter, &vers);
957  if (ret)
958  return ret;
959 
960  major = G_TP_VERSION_MAJOR(vers);
961  minor = G_TP_VERSION_MINOR(vers);
962 
963  if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
964  return 0;
965  else {
966  CH_ERR(adapter, "found wrong TP version (%u.%u), "
967  "driver compiled for version %d.%d\n", major, minor,
969  }
970  return -EINVAL;
971 }
972 
983 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
984  unsigned int size)
985 {
986  u32 csum;
987  unsigned int i;
988  const __be32 *p = (const __be32 *)tp_sram;
989 
990  /* Verify checksum */
991  for (csum = 0, i = 0; i < size / sizeof(csum); i++)
992  csum += ntohl(p[i]);
993  if (csum != 0xffffffff) {
994  CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
995  csum);
996  return -EINVAL;
997  }
998 
999  return 0;
1000 }
1001 
1005 };
1006 
1014 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1015 {
1016  return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1017 }
1018 
1026 int t3_check_fw_version(struct adapter *adapter)
1027 {
1028  int ret;
1029  u32 vers;
1030  unsigned int type, major, minor;
1031 
1032  ret = t3_get_fw_version(adapter, &vers);
1033  if (ret)
1034  return ret;
1035 
1036  type = G_FW_VERSION_TYPE(vers);
1037  major = G_FW_VERSION_MAJOR(vers);
1038  minor = G_FW_VERSION_MINOR(vers);
1039 
1040  if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1041  minor == FW_VERSION_MINOR)
1042  return 0;
1043  else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1044  CH_WARN(adapter, "found old FW minor version(%u.%u), "
1045  "driver compiled for version %u.%u\n", major, minor,
1047  else {
1048  CH_WARN(adapter, "found newer FW version(%u.%u), "
1049  "driver compiled for version %u.%u\n", major, minor,
1051  return 0;
1052  }
1053  return -EINVAL;
1054 }
1055 
1064 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1065 {
1066  while (start <= end) {
1067  int ret;
1068 
1069  if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1070  (ret = sf1_write(adapter, 4, 0,
1071  SF_ERASE_SECTOR | (start << 8))) != 0 ||
1072  (ret = flash_wait_op(adapter, 5, 500)) != 0)
1073  return ret;
1074  start++;
1075  }
1076  return 0;
1077 }
1078 
1090 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1091 {
1092  u32 csum;
1093  unsigned int i;
1094  const __be32 *p = (const __be32 *)fw_data;
1095  int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1096 
1097  if ((size & 3) || size < FW_MIN_SIZE)
1098  return -EINVAL;
1099  if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1100  return -EFBIG;
1101 
1102  for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1103  csum += ntohl(p[i]);
1104  if (csum != 0xffffffff) {
1105  CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1106  csum);
1107  return -EINVAL;
1108  }
1109 
1110  ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1111  if (ret)
1112  goto out;
1113 
1114  size -= 8; /* trim off version and checksum */
1115  for (addr = FW_FLASH_BOOT_ADDR; size;) {
1116  unsigned int chunk_size = min(size, 256U);
1117 
1118  ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1119  if (ret)
1120  goto out;
1121 
1122  addr += chunk_size;
1123  fw_data += chunk_size;
1124  size -= chunk_size;
1125  }
1126 
1127  ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1128 out:
1129  if (ret)
1130  CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1131  return ret;
1132 }
1133 
1134 #define CIM_CTL_BASE 0x2000
1135 
1146 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1147  unsigned int n, unsigned int *valp)
1148 {
1149  int ret = 0;
1150 
1151  if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1152  return -EBUSY;
1153 
1154  for ( ; !ret && n--; addr += 4) {
1155  t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1156  ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1157  0, 5, 2);
1158  if (!ret)
1159  *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1160  }
1161  return ret;
1162 }
1163 
1164 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1165  u32 *rx_hash_high, u32 *rx_hash_low)
1166 {
1167  /* stop Rx unicast traffic */
1169 
1170  /* stop broadcast, multicast, promiscuous mode traffic */
1171  *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1174  F_DISBCAST);
1175 
1176  *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1177  t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1178 
1179  *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1180  t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1181 
1182  /* Leave time to drain max RX fifo */
1183  msleep(1);
1184 }
1185 
1186 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1187  u32 rx_hash_high, u32 rx_hash_low)
1188 {
1192  rx_cfg);
1193  t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1194  t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1195 }
1196 
1206 void t3_link_changed(struct adapter *adapter, int port_id)
1207 {
1208  int link_ok, speed, duplex, fc;
1209  struct port_info *pi = adap2pinfo(adapter, port_id);
1210  struct cphy *phy = &pi->phy;
1211  struct cmac *mac = &pi->mac;
1212  struct link_config *lc = &pi->link_config;
1213 
1214  phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1215 
1216  if (!lc->link_ok && link_ok) {
1217  u32 rx_cfg, rx_hash_high, rx_hash_low;
1218  u32 status;
1219 
1220  t3_xgm_intr_enable(adapter, port_id);
1221  t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1222  t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1224 
1225  status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1226  if (status & F_LINKFAULTCHANGE) {
1227  mac->stats.link_faults++;
1228  pi->link_fault = 1;
1229  }
1230  t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1231  }
1232 
1233  if (lc->requested_fc & PAUSE_AUTONEG)
1234  fc &= lc->requested_fc;
1235  else
1236  fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1237 
1238  if (link_ok == lc->link_ok && speed == lc->speed &&
1239  duplex == lc->duplex && fc == lc->fc)
1240  return; /* nothing changed */
1241 
1242  if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1243  uses_xaui(adapter)) {
1244  if (link_ok)
1245  t3b_pcs_reset(mac);
1246  t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1247  link_ok ? F_TXACTENABLE | F_RXEN : 0);
1248  }
1249  lc->link_ok = link_ok;
1250  lc->speed = speed < 0 ? SPEED_INVALID : speed;
1251  lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1252 
1253  if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1254  /* Set MAC speed, duplex, and flow control to match PHY. */
1255  t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1256  lc->fc = fc;
1257  }
1258 
1259  t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
1260  speed, duplex, fc);
1261 }
1262 
1263 void t3_link_fault(struct adapter *adapter, int port_id)
1264 {
1265  struct port_info *pi = adap2pinfo(adapter, port_id);
1266  struct cmac *mac = &pi->mac;
1267  struct cphy *phy = &pi->phy;
1268  struct link_config *lc = &pi->link_config;
1269  int link_ok, speed, duplex, fc, link_fault;
1270  u32 rx_cfg, rx_hash_high, rx_hash_low;
1271 
1272  t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1273 
1274  if (adapter->params.rev > 0 && uses_xaui(adapter))
1275  t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1276 
1277  t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1279 
1280  t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1281 
1282  link_fault = t3_read_reg(adapter,
1283  A_XGM_INT_STATUS + mac->offset);
1284  link_fault &= F_LINKFAULTCHANGE;
1285 
1286  link_ok = lc->link_ok;
1287  speed = lc->speed;
1288  duplex = lc->duplex;
1289  fc = lc->fc;
1290 
1291  phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1292 
1293  if (link_fault) {
1294  lc->link_ok = 0;
1295  lc->speed = SPEED_INVALID;
1296  lc->duplex = DUPLEX_INVALID;
1297 
1298  t3_os_link_fault(adapter, port_id, 0);
1299 
1300  /* Account link faults only when the phy reports a link up */
1301  if (link_ok)
1302  mac->stats.link_faults++;
1303  } else {
1304  if (link_ok)
1305  t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1306  F_TXACTENABLE | F_RXEN);
1307 
1308  pi->link_fault = 0;
1309  lc->link_ok = (unsigned char)link_ok;
1310  lc->speed = speed < 0 ? SPEED_INVALID : speed;
1311  lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1312  t3_os_link_fault(adapter, port_id, link_ok);
1313  }
1314 }
1315 
1329 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1330 {
1331  unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1332 
1333  lc->link_ok = 0;
1334  if (lc->supported & SUPPORTED_Autoneg) {
1336  if (fc) {
1338  if (fc & PAUSE_RX)
1340  }
1341  phy->ops->advertise(phy, lc->advertising);
1342 
1343  if (lc->autoneg == AUTONEG_DISABLE) {
1344  lc->speed = lc->requested_speed;
1345  lc->duplex = lc->requested_duplex;
1346  lc->fc = (unsigned char)fc;
1347  t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1348  fc);
1349  /* Also disables autoneg */
1350  phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1351  } else
1352  phy->ops->autoneg_enable(phy);
1353  } else {
1354  t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1355  lc->fc = (unsigned char)fc;
1356  phy->ops->reset(phy, 0);
1357  }
1358  return 0;
1359 }
1360 
1369 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1370 {
1372  ports << S_VLANEXTRACTIONENABLE,
1373  on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1374 }
1375 
1376 struct intr_info {
1377  unsigned int mask; /* bits to check in interrupt status */
1378  const char *msg; /* message to print or NULL */
1379  short stat_idx; /* stat counter to increment or -1 */
1380  unsigned short fatal; /* whether the condition reported is fatal */
1381 };
1382 
1398 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1399  unsigned int mask,
1400  const struct intr_info *acts,
1401  unsigned long *stats)
1402 {
1403  int fatal = 0;
1404  unsigned int status = t3_read_reg(adapter, reg) & mask;
1405 
1406  for (; acts->mask; ++acts) {
1407  if (!(status & acts->mask))
1408  continue;
1409  if (acts->fatal) {
1410  fatal++;
1411  CH_ALERT(adapter, "%s (0x%x)\n",
1412  acts->msg, status & acts->mask);
1413  status &= ~acts->mask;
1414  } else if (acts->msg)
1415  CH_WARN(adapter, "%s (0x%x)\n",
1416  acts->msg, status & acts->mask);
1417  if (acts->stat_idx >= 0)
1418  stats[acts->stat_idx]++;
1419  }
1420  if (status) /* clear processed interrupts */
1421  t3_write_reg(adapter, reg, status);
1422  return fatal;
1423 }
1424 
1425 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1426  F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1427  F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1428  F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1429  V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1430  F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1431  F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1432  F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1433  F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1434  F_LOPIODRBDROPERR)
1435 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1436  F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1437  F_NFASRCHFAIL)
1438 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1439 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1440  V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1441  F_TXFIFO_UNDERRUN)
1442 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1443  F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1444  F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1445  F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1446  V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1447  V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1448 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1449  F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1450  /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1451  F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1452  F_TXPARERR | V_BISTERR(M_BISTERR))
1453 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1454  F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1455  F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1456 #define ULPTX_INTR_MASK 0xfc
1457 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1458  F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1459  F_ZERO_SWITCH_ERROR)
1460 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1461  F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1462  F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1463  F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1464  F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1465  F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1466  F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1467  F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1468 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1469  V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1470  V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1471 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1472  V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1473  V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1474 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1475  V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1476  V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1477  V_MCAPARERRENB(M_MCAPARERRENB))
1478 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1479 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1480  F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1481  F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1482  F_MPS0 | F_CPL_SWITCH)
1483 /*
1484  * Interrupt handler for the PCIX1 module.
1485  */
1486 static void pci_intr_handler(struct adapter *adapter)
1487 {
1488  static const struct intr_info pcix1_intr_info[] = {
1489  {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1490  {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1491  {F_RCVTARABT, "PCI received target abort", -1, 1},
1492  {F_RCVMSTABT, "PCI received master abort", -1, 1},
1493  {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1494  {F_DETPARERR, "PCI detected parity error", -1, 1},
1495  {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1496  {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1497  {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1498  1},
1499  {F_DETCORECCERR, "PCI correctable ECC error",
1500  STAT_PCI_CORR_ECC, 0},
1501  {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1502  {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1503  {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1504  1},
1505  {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1506  1},
1507  {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1508  1},
1509  {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1510  "error", -1, 1},
1511  {0}
1512  };
1513 
1514  if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1515  pcix1_intr_info, adapter->irq_stats))
1516  t3_fatal_err(adapter);
1517 }
1518 
1519 /*
1520  * Interrupt handler for the PCIE module.
1521  */
1522 static void pcie_intr_handler(struct adapter *adapter)
1523 {
1524  static const struct intr_info pcie_intr_info[] = {
1525  {F_PEXERR, "PCI PEX error", -1, 1},
1526  {F_UNXSPLCPLERRR,
1527  "PCI unexpected split completion DMA read error", -1, 1},
1528  {F_UNXSPLCPLERRC,
1529  "PCI unexpected split completion DMA command error", -1, 1},
1530  {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1531  {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1532  {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1533  {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1535  "PCI MSI-X table/PBA parity error", -1, 1},
1536  {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1537  {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1538  {F_RXPARERR, "PCI Rx parity error", -1, 1},
1539  {F_TXPARERR, "PCI Tx parity error", -1, 1},
1540  {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1541  {0}
1542  };
1543 
1544  if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1545  CH_ALERT(adapter, "PEX error code 0x%x\n",
1546  t3_read_reg(adapter, A_PCIE_PEX_ERR));
1547 
1548  if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1549  pcie_intr_info, adapter->irq_stats))
1550  t3_fatal_err(adapter);
1551 }
1552 
1553 /*
1554  * TP interrupt handler.
1555  */
1556 static void tp_intr_handler(struct adapter *adapter)
1557 {
1558  static const struct intr_info tp_intr_info[] = {
1559  {0xffffff, "TP parity error", -1, 1},
1560  {0x1000000, "TP out of Rx pages", -1, 1},
1561  {0x2000000, "TP out of Tx pages", -1, 1},
1562  {0}
1563  };
1564 
1565  static const struct intr_info tp_intr_info_t3c[] = {
1566  {0x1fffffff, "TP parity error", -1, 1},
1567  {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1568  {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1569  {0}
1570  };
1571 
1572  if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1573  adapter->params.rev < T3_REV_C ?
1574  tp_intr_info : tp_intr_info_t3c, NULL))
1575  t3_fatal_err(adapter);
1576 }
1577 
1578 /*
1579  * CIM interrupt handler.
1580  */
1581 static void cim_intr_handler(struct adapter *adapter)
1582 {
1583  static const struct intr_info cim_intr_info[] = {
1584  {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1585  {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1586  {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1587  {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1588  {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1589  {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1590  {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1591  {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1592  {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1593  {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1594  {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1595  {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1596  {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1597  {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1598  {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1599  {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1600  {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1601  {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1602  {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1603  {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1604  {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1605  {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1606  {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1607  {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1608  {0}
1609  };
1610 
1611  if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1612  cim_intr_info, NULL))
1613  t3_fatal_err(adapter);
1614 }
1615 
1616 /*
1617  * ULP RX interrupt handler.
1618  */
1619 static void ulprx_intr_handler(struct adapter *adapter)
1620 {
1621  static const struct intr_info ulprx_intr_info[] = {
1622  {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1623  {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1624  {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1625  {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1626  {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1627  {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1628  {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1629  {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1630  {0}
1631  };
1632 
1633  if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1634  ulprx_intr_info, NULL))
1635  t3_fatal_err(adapter);
1636 }
1637 
1638 /*
1639  * ULP TX interrupt handler.
1640  */
1641 static void ulptx_intr_handler(struct adapter *adapter)
1642 {
1643  static const struct intr_info ulptx_intr_info[] = {
1644  {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1646  {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1648  {0xfc, "ULP TX parity error", -1, 1},
1649  {0}
1650  };
1651 
1652  if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1653  ulptx_intr_info, adapter->irq_stats))
1654  t3_fatal_err(adapter);
1655 }
1656 
1657 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1658  F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1659  F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1660  F_ICSPI1_TX_FRAMING_ERROR)
1661 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1662  F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1663  F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1664  F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1665 
1666 /*
1667  * PM TX interrupt handler.
1668  */
1669 static void pmtx_intr_handler(struct adapter *adapter)
1670 {
1671  static const struct intr_info pmtx_intr_info[] = {
1672  {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1673  {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1674  {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1676  "PMTX ispi parity error", -1, 1},
1678  "PMTX ospi parity error", -1, 1},
1679  {0}
1680  };
1681 
1682  if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1683  pmtx_intr_info, NULL))
1684  t3_fatal_err(adapter);
1685 }
1686 
1687 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1688  F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1689  F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1690  F_IESPI1_TX_FRAMING_ERROR)
1691 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1692  F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1693  F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1694  F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1695 
1696 /*
1697  * PM RX interrupt handler.
1698  */
1699 static void pmrx_intr_handler(struct adapter *adapter)
1700 {
1701  static const struct intr_info pmrx_intr_info[] = {
1702  {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1703  {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1704  {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1706  "PMRX ispi parity error", -1, 1},
1708  "PMRX ospi parity error", -1, 1},
1709  {0}
1710  };
1711 
1712  if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1713  pmrx_intr_info, NULL))
1714  t3_fatal_err(adapter);
1715 }
1716 
1717 /*
1718  * CPL switch interrupt handler.
1719  */
1720 static void cplsw_intr_handler(struct adapter *adapter)
1721 {
1722  static const struct intr_info cplsw_intr_info[] = {
1723  {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1724  {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1725  {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1726  {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1727  {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1728  {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1729  {0}
1730  };
1731 
1732  if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1733  cplsw_intr_info, NULL))
1734  t3_fatal_err(adapter);
1735 }
1736 
1737 /*
1738  * MPS interrupt handler.
1739  */
1740 static void mps_intr_handler(struct adapter *adapter)
1741 {
1742  static const struct intr_info mps_intr_info[] = {
1743  {0x1ff, "MPS parity error", -1, 1},
1744  {0}
1745  };
1746 
1747  if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1748  mps_intr_info, NULL))
1749  t3_fatal_err(adapter);
1750 }
1751 
1752 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1753 
1754 /*
1755  * MC7 interrupt handler.
1756  */
1757 static void mc7_intr_handler(struct mc7 *mc7)
1758 {
1759  struct adapter *adapter = mc7->adapter;
1760  u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1761 
1762  if (cause & F_CE) {
1763  mc7->stats.corr_err++;
1764  CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1765  "data 0x%x 0x%x 0x%x\n", mc7->name,
1766  t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1767  t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1768  t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1769  t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1770  }
1771 
1772  if (cause & F_UE) {
1773  mc7->stats.uncorr_err++;
1774  CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1775  "data 0x%x 0x%x 0x%x\n", mc7->name,
1776  t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1777  t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1778  t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1779  t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1780  }
1781 
1782  if (G_PE(cause)) {
1783  mc7->stats.parity_err++;
1784  CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1785  mc7->name, G_PE(cause));
1786  }
1787 
1788  if (cause & F_AE) {
1789  u32 addr = 0;
1790 
1791  if (adapter->params.rev > 0)
1792  addr = t3_read_reg(adapter,
1793  mc7->offset + A_MC7_ERR_ADDR);
1794  mc7->stats.addr_err++;
1795  CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1796  mc7->name, addr);
1797  }
1798 
1799  if (cause & MC7_INTR_FATAL)
1800  t3_fatal_err(adapter);
1801 
1802  t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1803 }
1804 
1805 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1806  V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1807 /*
1808  * XGMAC interrupt handler.
1809  */
1810 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1811 {
1812  struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1813  /*
1814  * We mask out interrupt causes for which we're not taking interrupts.
1815  * This allows us to use polling logic to monitor some of the other
1816  * conditions when taking interrupts would impose too much load on the
1817  * system.
1818  */
1819  u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1821 
1822  if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1823  mac->stats.tx_fifo_parity_err++;
1824  CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1825  }
1826  if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1827  mac->stats.rx_fifo_parity_err++;
1828  CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1829  }
1830  if (cause & F_TXFIFO_UNDERRUN)
1831  mac->stats.tx_fifo_urun++;
1832  if (cause & F_RXFIFO_OVERFLOW)
1833  mac->stats.rx_fifo_ovfl++;
1834  if (cause & V_SERDES_LOS(M_SERDES_LOS))
1835  mac->stats.serdes_signal_loss++;
1836  if (cause & F_XAUIPCSCTCERR)
1837  mac->stats.xaui_pcs_ctc_err++;
1838  if (cause & F_XAUIPCSALIGNCHANGE)
1839  mac->stats.xaui_pcs_align_change++;
1840  if (cause & F_XGM_INT) {
1841  t3_set_reg_field(adap,
1842  A_XGM_INT_ENABLE + mac->offset,
1843  F_XGM_INT, 0);
1844  mac->stats.link_faults++;
1845 
1846  t3_os_link_fault_handler(adap, idx);
1847  }
1848 
1849  if (cause & XGM_INTR_FATAL)
1850  t3_fatal_err(adap);
1851 
1852  t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1853  return cause != 0;
1854 }
1855 
1856 /*
1857  * Interrupt handler for PHY events.
1858  */
1859 int t3_phy_intr_handler(struct adapter *adapter)
1860 {
1861  u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1862 
1863  for_each_port(adapter, i) {
1864  struct port_info *p = adap2pinfo(adapter, i);
1865 
1866  if (!(p->phy.caps & SUPPORTED_IRQ))
1867  continue;
1868 
1869  if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1870  int phy_cause = p->phy.ops->intr_handler(&p->phy);
1871 
1872  if (phy_cause & cphy_cause_link_change)
1873  t3_link_changed(adapter, i);
1874  if (phy_cause & cphy_cause_fifo_error)
1875  p->phy.fifo_errors++;
1876  if (phy_cause & cphy_cause_module_change)
1877  t3_os_phymod_changed(adapter, i);
1878  }
1879  }
1880 
1881  t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1882  return 0;
1883 }
1884 
1885 /*
1886  * T3 slow path (non-data) interrupt handler.
1887  */
1888 int t3_slow_intr_handler(struct adapter *adapter)
1889 {
1890  u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1891 
1892  cause &= adapter->slow_intr_mask;
1893  if (!cause)
1894  return 0;
1895  if (cause & F_PCIM0) {
1896  if (is_pcie(adapter))
1897  pcie_intr_handler(adapter);
1898  else
1899  pci_intr_handler(adapter);
1900  }
1901  if (cause & F_SGE3)
1902  t3_sge_err_intr_handler(adapter);
1903  if (cause & F_MC7_PMRX)
1904  mc7_intr_handler(&adapter->pmrx);
1905  if (cause & F_MC7_PMTX)
1906  mc7_intr_handler(&adapter->pmtx);
1907  if (cause & F_MC7_CM)
1908  mc7_intr_handler(&adapter->cm);
1909  if (cause & F_CIM)
1910  cim_intr_handler(adapter);
1911  if (cause & F_TP1)
1912  tp_intr_handler(adapter);
1913  if (cause & F_ULP2_RX)
1914  ulprx_intr_handler(adapter);
1915  if (cause & F_ULP2_TX)
1916  ulptx_intr_handler(adapter);
1917  if (cause & F_PM1_RX)
1918  pmrx_intr_handler(adapter);
1919  if (cause & F_PM1_TX)
1920  pmtx_intr_handler(adapter);
1921  if (cause & F_CPL_SWITCH)
1922  cplsw_intr_handler(adapter);
1923  if (cause & F_MPS0)
1924  mps_intr_handler(adapter);
1925  if (cause & F_MC5A)
1926  t3_mc5_intr_handler(&adapter->mc5);
1927  if (cause & F_XGMAC0_0)
1928  mac_intr_handler(adapter, 0);
1929  if (cause & F_XGMAC0_1)
1930  mac_intr_handler(adapter, 1);
1931  if (cause & F_T3DBG)
1932  t3_os_ext_intr_handler(adapter);
1933 
1934  /* Clear the interrupts just processed. */
1935  t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1936  t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1937  return 1;
1938 }
1939 
1940 static unsigned int calc_gpio_intr(struct adapter *adap)
1941 {
1942  unsigned int i, gpi_intr = 0;
1943 
1944  for_each_port(adap, i)
1945  if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1946  adapter_info(adap)->gpio_intr[i])
1947  gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1948  return gpi_intr;
1949 }
1950 
1959 void t3_intr_enable(struct adapter *adapter)
1960 {
1961  static const struct addr_val_pair intr_en_avp[] = {
1965  MC7_INTR_MASK},
1967  MC7_INTR_MASK},
1974  };
1975 
1976  adapter->slow_intr_mask = PL_INTR_MASK;
1977 
1978  t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1979  t3_write_reg(adapter, A_TP_INT_ENABLE,
1980  adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1981 
1982  if (adapter->params.rev > 0) {
1983  t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1985  t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1988  } else {
1989  t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1990  t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1991  }
1992 
1993  t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1994 
1995  if (is_pcie(adapter))
1996  t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1997  else
1998  t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1999  t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2000  t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2001 }
2002 
2010 void t3_intr_disable(struct adapter *adapter)
2011 {
2012  t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2013  t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2014  adapter->slow_intr_mask = 0;
2015 }
2016 
2023 void t3_intr_clear(struct adapter *adapter)
2024 {
2025  static const unsigned int cause_reg_addr[] = {
2030  A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2031  A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2042  };
2043  unsigned int i;
2044 
2045  /* Clear PHY and MAC interrupts for each port. */
2046  for_each_port(adapter, i)
2047  t3_port_intr_clear(adapter, i);
2048 
2049  for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2050  t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2051 
2052  if (is_pcie(adapter))
2053  t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2054  t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2055  t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2056 }
2057 
2058 void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2059 {
2060  struct port_info *pi = adap2pinfo(adapter, idx);
2061 
2062  t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2064 }
2065 
2066 void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2067 {
2068  struct port_info *pi = adap2pinfo(adapter, idx);
2069 
2070  t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2071  0x7ff);
2072 }
2073 
2082 void t3_port_intr_enable(struct adapter *adapter, int idx)
2083 {
2084  struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2085 
2086  t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2087  t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2088  phy->ops->intr_enable(phy);
2089 }
2090 
2099 void t3_port_intr_disable(struct adapter *adapter, int idx)
2100 {
2101  struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2102 
2103  t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2104  t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2105  phy->ops->intr_disable(phy);
2106 }
2107 
2116 static void t3_port_intr_clear(struct adapter *adapter, int idx)
2117 {
2118  struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2119 
2120  t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2121  t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2122  phy->ops->intr_clear(phy);
2123 }
2124 
2125 #define SG_CONTEXT_CMD_ATTEMPTS 100
2126 
2136 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2137  unsigned int type)
2138 {
2139  if (type == F_RESPONSEQ) {
2140  /*
2141  * Can't write the Response Queue Context bits for
2142  * Interrupt Armed or the Reserve bits after the chip
2143  * has been initialized out of reset. Writing to these
2144  * bits can confuse the hardware.
2145  */
2146  t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2147  t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2148  t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2149  t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2150  } else {
2151  t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2152  t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2153  t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2154  t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2155  }
2156  t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2157  V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2158  return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2159  0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2160 }
2161 
2173 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2174  unsigned int type)
2175 {
2176  t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2177  t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2178  t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2179  t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2180  t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2181  t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2182  t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2183  t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2184  t3_write_reg(adap, A_SG_CONTEXT_CMD,
2185  V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2186  return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2187  0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2188 }
2189 
2207 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2208  enum sge_context_type type, int respq, u64 base_addr,
2209  unsigned int size, unsigned int token, int gen,
2210  unsigned int cidx)
2211 {
2212  unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2213 
2214  if (base_addr & 0xfff) /* must be 4K aligned */
2215  return -EINVAL;
2216  if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2217  return -EBUSY;
2218 
2219  base_addr >>= 12;
2220  t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2221  V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2222  t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2223  V_EC_BASE_LO(base_addr & 0xffff));
2224  base_addr >>= 16;
2225  t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2226  base_addr >>= 32;
2227  t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2228  V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2229  V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2230  F_EC_VALID);
2231  return t3_sge_write_context(adapter, id, F_EGRESS);
2232 }
2233 
2250 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2251  int gts_enable, u64 base_addr, unsigned int size,
2252  unsigned int bsize, unsigned int cong_thres, int gen,
2253  unsigned int cidx)
2254 {
2255  if (base_addr & 0xfff) /* must be 4K aligned */
2256  return -EINVAL;
2257  if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2258  return -EBUSY;
2259 
2260  base_addr >>= 12;
2261  t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2262  base_addr >>= 32;
2263  t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2264  V_FL_BASE_HI((u32) base_addr) |
2265  V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2266  t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2267  V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2269  t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2270  V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2271  V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2272  return t3_sge_write_context(adapter, id, F_FREELIST);
2273 }
2274 
2290 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2291  int irq_vec_idx, u64 base_addr, unsigned int size,
2292  unsigned int fl_thres, int gen, unsigned int cidx)
2293 {
2294  unsigned int intr = 0;
2295 
2296  if (base_addr & 0xfff) /* must be 4K aligned */
2297  return -EINVAL;
2298  if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2299  return -EBUSY;
2300 
2301  base_addr >>= 12;
2302  t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2303  V_CQ_INDEX(cidx));
2304  t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2305  base_addr >>= 32;
2306  if (irq_vec_idx >= 0)
2307  intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2308  t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2309  V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2310  t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2311  return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2312 }
2313 
2329 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2330  unsigned int size, int rspq, int ovfl_mode,
2331  unsigned int credits, unsigned int credit_thres)
2332 {
2333  if (base_addr & 0xfff) /* must be 4K aligned */
2334  return -EINVAL;
2335  if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2336  return -EBUSY;
2337 
2338  base_addr >>= 12;
2339  t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2340  t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2341  base_addr >>= 32;
2342  t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2343  V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2344  V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2345  V_CQ_ERR(ovfl_mode));
2346  t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2347  V_CQ_CREDIT_THRES(credit_thres));
2348  return t3_sge_write_context(adapter, id, F_CQ);
2349 }
2350 
2360 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2361 {
2362  if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2363  return -EBUSY;
2364 
2365  t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2366  t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2367  t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2368  t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2369  t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2370  t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2372  return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2373  0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2374 }
2375 
2384 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2385 {
2386  if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2387  return -EBUSY;
2388 
2389  t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2390  t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2391  t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2392  t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2393  t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2394  t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2396  return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2397  0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2398 }
2399 
2408 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2409 {
2410  if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2411  return -EBUSY;
2412 
2413  t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2414  t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2415  t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2416  t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2417  t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2418  t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2420  return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2421  0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2422 }
2423 
2432 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2433 {
2434  if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2435  return -EBUSY;
2436 
2437  t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2438  t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2439  t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2440  t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2441  t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2442  t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2443  V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2444  return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2445  0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2446 }
2447 
2458 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2459  unsigned int credits)
2460 {
2461  u32 val;
2462 
2463  if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2464  return -EBUSY;
2465 
2466  t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2467  t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2468  V_CONTEXT(id) | F_CQ);
2469  if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2470  0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2471  return -EIO;
2472 
2473  if (op >= 2 && op < 7) {
2474  if (adapter->params.rev > 0)
2475  return G_CQ_INDEX(val);
2476 
2477  t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2478  V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2479  if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2480  F_CONTEXT_CMD_BUSY, 0,
2482  return -EIO;
2483  return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2484  }
2485  return 0;
2486 }
2487 
2500 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2501  const u8 * cpus, const u16 *rspq)
2502 {
2503  int i, j, cpu_idx = 0, q_idx = 0;
2504 
2505  if (cpus)
2506  for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2507  u32 val = i << 16;
2508 
2509  for (j = 0; j < 2; ++j) {
2510  val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2511  if (cpus[cpu_idx] == 0xff)
2512  cpu_idx = 0;
2513  }
2514  t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2515  }
2516 
2517  if (rspq)
2518  for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2519  t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2520  (i << 16) | rspq[q_idx++]);
2521  if (rspq[q_idx] == 0xffff)
2522  q_idx = 0;
2523  }
2524 
2525  t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2526 }
2527 
2535 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2536 {
2537  if (is_offload(adap) || !enable)
2539  V_NICMODE(!enable));
2540 }
2541 
2551 static inline unsigned int pm_num_pages(unsigned int mem_size,
2552  unsigned int pg_size)
2553 {
2554  unsigned int n = mem_size / pg_size;
2555 
2556  return n - n % 24;
2557 }
2558 
2559 #define mem_region(adap, start, size, reg) \
2560  t3_write_reg((adap), A_ ## reg, (start)); \
2561  start += size
2562 
2571 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2572 {
2573  unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2574  unsigned int timers = 0, timers_shift = 22;
2575 
2576  if (adap->params.rev > 0) {
2577  if (tids <= 16 * 1024) {
2578  timers = 1;
2579  timers_shift = 16;
2580  } else if (tids <= 64 * 1024) {
2581  timers = 2;
2582  timers_shift = 18;
2583  } else if (tids <= 256 * 1024) {
2584  timers = 3;
2585  timers_shift = 20;
2586  }
2587  }
2588 
2589  t3_write_reg(adap, A_TP_PMM_SIZE,
2590  p->chan_rx_size | (p->chan_tx_size >> 16));
2591 
2592  t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2593  t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2594  t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2596  V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2597 
2598  t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2599  t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2600  t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2601 
2602  pstructs = p->rx_num_pgs + p->tx_num_pgs;
2603  /* Add a bit of headroom and make multiple of 24 */
2604  pstructs += 48;
2605  pstructs -= pstructs % 24;
2606  t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2607 
2608  m = tids * TCB_SIZE;
2609  mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2610  mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2611  t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2612  m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2613  mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2614  mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2615  mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2616  mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2617 
2618  m = (m + 4095) & ~0xfff;
2619  t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2620  t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2621 
2622  tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2623  m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2624  adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2625  if (tids < m)
2626  adap->params.mc5.nservers += m - tids;
2627 }
2628 
2629 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2630  u32 val)
2631 {
2632  t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2633  t3_write_reg(adap, A_TP_PIO_DATA, val);
2634 }
2635 
2636 static void tp_config(struct adapter *adap, const struct tp_params *p)
2637 {
2638  t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2641  t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2643  V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2644  t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2645  V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2646  V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2650  t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2651  t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2653  adap->params.rev > 0 ? F_ENABLEESND :
2655 
2663  t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2664  t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2665 
2666  if (adap->params.rev > 0) {
2667  tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2669  F_TXPACEAUTO);
2672  } else
2674 
2675  if (adap->params.rev == T3_REV_C)
2678  V_TABLELATENCYDELTA(4));
2679 
2680  t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2681  t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2682  t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2683  t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2684 }
2685 
2686 /* Desired TP timer resolution in usec */
2687 #define TP_TMR_RES 50
2688 
2689 /* TCP timer values in ms */
2690 #define TP_DACK_TIMER 50
2691 #define TP_RTO_MIN 250
2692 
2701 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2702 {
2703  unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2704  unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2705  unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2706  unsigned int tps = core_clk >> tre;
2707 
2708  t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2709  V_DELAYEDACKRESOLUTION(dack_re) |
2710  V_TIMESTAMPRESOLUTION(tstamp_re));
2711  t3_write_reg(adap, A_TP_DACK_TIMER,
2712  (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2713  t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2714  t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2715  t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2716  t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2717  t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2718  V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2720  V_KEEPALIVEMAX(9));
2721 
2722 #define SECONDS * tps
2723 
2724  t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2725  t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2726  t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2727  t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2728  t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2729  t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2730  t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2731  t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2732  t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2733 
2734 #undef SECONDS
2735 }
2736 
2745 static int t3_tp_set_coalescing_size(struct adapter *adap,
2746  unsigned int size, int psh)
2747 {
2748  u32 val;
2749 
2750  if (size > MAX_RX_COALESCING_LEN)
2751  return -EINVAL;
2752 
2753  val = t3_read_reg(adap, A_TP_PARA_REG3);
2755 
2756  if (size) {
2757  val |= F_RXCOALESCEENABLE;
2758  if (psh)
2759  val |= F_RXCOALESCEPSHEN;
2760  size = min(MAX_RX_COALESCING_LEN, size);
2761  t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2763  }
2764  t3_write_reg(adap, A_TP_PARA_REG3, val);
2765  return 0;
2766 }
2767 
2776 static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2777 {
2778  t3_write_reg(adap, A_TP_PARA_REG7,
2779  V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2780 }
2781 
2782 static void init_mtus(unsigned short mtus[])
2783 {
2784  /*
2785  * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2786  * it can accommodate max size TCP/IP headers when SACK and timestamps
2787  * are enabled and still have at least 8 bytes of payload.
2788  */
2789  mtus[0] = 88;
2790  mtus[1] = 88;
2791  mtus[2] = 256;
2792  mtus[3] = 512;
2793  mtus[4] = 576;
2794  mtus[5] = 1024;
2795  mtus[6] = 1280;
2796  mtus[7] = 1492;
2797  mtus[8] = 1500;
2798  mtus[9] = 2002;
2799  mtus[10] = 2048;
2800  mtus[11] = 4096;
2801  mtus[12] = 4352;
2802  mtus[13] = 8192;
2803  mtus[14] = 9000;
2804  mtus[15] = 9600;
2805 }
2806 
2807 /*
2808  * Initial congestion control parameters.
2809  */
2810 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2811 {
2812  a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2813  a[9] = 2;
2814  a[10] = 3;
2815  a[11] = 4;
2816  a[12] = 5;
2817  a[13] = 6;
2818  a[14] = 7;
2819  a[15] = 8;
2820  a[16] = 9;
2821  a[17] = 10;
2822  a[18] = 14;
2823  a[19] = 17;
2824  a[20] = 21;
2825  a[21] = 25;
2826  a[22] = 30;
2827  a[23] = 35;
2828  a[24] = 45;
2829  a[25] = 60;
2830  a[26] = 80;
2831  a[27] = 100;
2832  a[28] = 200;
2833  a[29] = 300;
2834  a[30] = 400;
2835  a[31] = 500;
2836 
2837  b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2838  b[9] = b[10] = 1;
2839  b[11] = b[12] = 2;
2840  b[13] = b[14] = b[15] = b[16] = 3;
2841  b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2842  b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2843  b[28] = b[29] = 6;
2844  b[30] = b[31] = 7;
2845 }
2846 
2847 /* The minimum additive increment value for the congestion control table */
2848 #define CC_MIN_INCR 2U
2849 
2862 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2863  unsigned short alpha[NCCTRL_WIN],
2864  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2865 {
2866  static const unsigned int avg_pkts[NCCTRL_WIN] = {
2867  2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2868  896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2869  28672, 40960, 57344, 81920, 114688, 163840, 229376
2870  };
2871 
2872  unsigned int i, w;
2873 
2874  for (i = 0; i < NMTUS; ++i) {
2875  unsigned int mtu = min(mtus[i], mtu_cap);
2876  unsigned int log2 = fls(mtu);
2877 
2878  if (!(mtu & ((1 << log2) >> 2))) /* round */
2879  log2--;
2880  t3_write_reg(adap, A_TP_MTU_TABLE,
2881  (i << 24) | (log2 << 16) | mtu);
2882 
2883  for (w = 0; w < NCCTRL_WIN; ++w) {
2884  unsigned int inc;
2885 
2886  inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2887  CC_MIN_INCR);
2888 
2889  t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2890  (w << 16) | (beta[w] << 13) | inc);
2891  }
2892  }
2893 }
2894 
2902 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2903 {
2904  t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2905  sizeof(*tps) / sizeof(u32), 0);
2906 }
2907 
2908 #define ulp_region(adap, name, start, len) \
2909  t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2910  t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2911  (start) + (len) - 1); \
2912  start += len
2913 
2914 #define ulptx_region(adap, name, start, len) \
2915  t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2916  t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2917  (start) + (len) - 1)
2918 
2919 static void ulp_config(struct adapter *adap, const struct tp_params *p)
2920 {
2921  unsigned int m = p->chan_rx_size;
2922 
2923  ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2924  ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2925  ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2926  ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2927  ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2928  ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2929  ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2930  t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2931 }
2932 
2940 int t3_set_proto_sram(struct adapter *adap, const u8 *data)
2941 {
2942  int i;
2943  const __be32 *buf = (const __be32 *)data;
2944 
2945  for (i = 0; i < PROTO_SRAM_LINES; i++) {
2946  t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2947  t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2948  t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2949  t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2950  t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2951 
2952  t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2953  if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2954  return -EIO;
2955  }
2956  t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2957 
2958  return 0;
2959 }
2960 
2961 void t3_config_trace_filter(struct adapter *adapter,
2962  const struct trace_params *tp, int filter_index,
2963  int invert, int enable)
2964 {
2965  u32 addr, key[4], mask[4];
2966 
2967  key[0] = tp->sport | (tp->sip << 16);
2968  key[1] = (tp->sip >> 16) | (tp->dport << 16);
2969  key[2] = tp->dip;
2970  key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2971 
2972  mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2973  mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2974  mask[2] = tp->dip_mask;
2975  mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2976 
2977  if (invert)
2978  key[3] |= (1 << 29);
2979  if (enable)
2980  key[3] |= (1 << 28);
2981 
2982  addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2983  tp_wr_indirect(adapter, addr++, key[0]);
2984  tp_wr_indirect(adapter, addr++, mask[0]);
2985  tp_wr_indirect(adapter, addr++, key[1]);
2986  tp_wr_indirect(adapter, addr++, mask[1]);
2987  tp_wr_indirect(adapter, addr++, key[2]);
2988  tp_wr_indirect(adapter, addr++, mask[2]);
2989  tp_wr_indirect(adapter, addr++, key[3]);
2990  tp_wr_indirect(adapter, addr, mask[3]);
2991  t3_read_reg(adapter, A_TP_PIO_DATA);
2992 }
2993 
3002 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3003 {
3004  unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3005  unsigned int clk = adap->params.vpd.cclk * 1000;
3006  unsigned int selected_cpt = 0, selected_bpt = 0;
3007 
3008  if (kbps > 0) {
3009  kbps *= 125; /* -> bytes */
3010  for (cpt = 1; cpt <= 255; cpt++) {
3011  tps = clk / cpt;
3012  bpt = (kbps + tps / 2) / tps;
3013  if (bpt > 0 && bpt <= 255) {
3014  v = bpt * tps;
3015  delta = v >= kbps ? v - kbps : kbps - v;
3016  if (delta <= mindelta) {
3017  mindelta = delta;
3018  selected_cpt = cpt;
3019  selected_bpt = bpt;
3020  }
3021  } else if (selected_cpt)
3022  break;
3023  }
3024  if (!selected_cpt)
3025  return -EINVAL;
3026  }
3027  t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3028  A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3029  v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3030  if (sched & 1)
3031  v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3032  else
3033  v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3034  t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3035  return 0;
3036 }
3037 
3038 static int tp_init(struct adapter *adap, const struct tp_params *p)
3039 {
3040  int busy = 0;
3041 
3042  tp_config(adap, p);
3043  t3_set_vlan_accel(adap, 3, 0);
3044 
3045  if (is_offload(adap)) {
3046  tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3047  t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3048  busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3049  0, 1000, 5);
3050  if (busy)
3051  CH_ERR(adap, "TP initialization timed out\n");
3052  }
3053 
3054  if (!busy)
3055  t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3056  return busy;
3057 }
3058 
3059 /*
3060  * Perform the bits of HW initialization that are dependent on the Tx
3061  * channels being used.
3062  */
3063 static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3064 {
3065  int i;
3066 
3067  if (chan_map != 3) { /* one channel */
3070  t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3071  (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3073  t3_write_reg(adap, A_PM1_TX_CFG,
3074  chan_map == 1 ? 0xffffffff : 0);
3075  } else { /* two channels */
3078  t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3079  V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3080  t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3082  F_ENFORCEPKT);
3083  t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3085  t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3086  V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3087  for (i = 0; i < 16; i++)
3088  t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3089  (i << 16) | 0x1010);
3090  }
3091 }
3092 
3093 static int calibrate_xgm(struct adapter *adapter)
3094 {
3095  if (uses_xaui(adapter)) {
3096  unsigned int v, i;
3097 
3098  for (i = 0; i < 5; ++i) {
3099  t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3100  t3_read_reg(adapter, A_XGM_XAUI_IMP);
3101  msleep(1);
3102  v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3103  if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3104  t3_write_reg(adapter, A_XGM_XAUI_IMP,
3105  V_XAUIIMP(G_CALIMP(v) >> 2));
3106  return 0;
3107  }
3108  }
3109  CH_ERR(adapter, "MAC calibration failed\n");
3110  return -1;
3111  } else {
3112  t3_write_reg(adapter, A_XGM_RGMII_IMP,
3113  V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3116  }
3117  return 0;
3118 }
3119 
3120 static void calibrate_xgm_t3b(struct adapter *adapter)
3121 {
3122  if (!uses_xaui(adapter)) {
3123  t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3126  t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3129  0);
3132  }
3133 }
3134 
3136  unsigned char ActToPreDly;
3137  unsigned char ActToRdWrDly;
3138  unsigned char PreCyc;
3139  unsigned char RefCyc[5];
3140  unsigned char BkCyc;
3141  unsigned char WrToRdDly;
3142  unsigned char RdToWrDly;
3143 };
3144 
3145 /*
3146  * Write a value to a register and check that the write completed. These
3147  * writes normally complete in a cycle or two, so one read should suffice.
3148  * The very first read exists to flush the posted write to the device.
3149  */
3150 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3151 {
3152  t3_write_reg(adapter, addr, val);
3153  t3_read_reg(adapter, addr); /* flush */
3154  if (!(t3_read_reg(adapter, addr) & F_BUSY))
3155  return 0;
3156  CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3157  return -EIO;
3158 }
3159 
3160 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3161 {
3162  static const unsigned int mc7_mode[] = {
3163  0x632, 0x642, 0x652, 0x432, 0x442
3164  };
3165  static const struct mc7_timing_params mc7_timings[] = {
3166  {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3167  {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3168  {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3169  {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3170  {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3171  };
3172 
3173  u32 val;
3174  unsigned int width, density, slow, attempts;
3175  struct adapter *adapter = mc7->adapter;
3176  const struct mc7_timing_params *p = &mc7_timings[mem_type];
3177 
3178  if (!mc7->size)
3179  return 0;
3180 
3181  val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3182  slow = val & F_SLOW;
3183  width = G_WIDTH(val);
3184  density = G_DEN(val);
3185 
3186  t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3187  val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3188  msleep(1);
3189 
3190  if (!slow) {
3191  t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3192  t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3193  msleep(1);
3194  if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3195  (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3196  CH_ERR(adapter, "%s MC7 calibration timed out\n",
3197  mc7->name);
3198  goto out_fail;
3199  }
3200  }
3201 
3202  t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3205  V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3207 
3208  t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3209  val | F_CLKEN | F_TERM150);
3210  t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3211 
3212  if (!slow)
3213  t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3214  F_DLLENB);
3215  udelay(1);
3216 
3217  val = slow ? 3 : 6;
3218  if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3219  wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3220  wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3221  wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3222  goto out_fail;
3223 
3224  if (!slow) {
3225  t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3226  t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3227  udelay(5);
3228  }
3229 
3230  if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3231  wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3232  wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3233  wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3234  mc7_mode[mem_type]) ||
3235  wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3236  wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3237  goto out_fail;
3238 
3239  /* clock value is in KHz */
3240  mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3241  mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3242 
3243  t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3244  F_PERREFEN | V_PREREFDIV(mc7_clock));
3245  t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3246 
3247  t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3248  t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3249  t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3250  t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3251  (mc7->size << width) - 1);
3252  t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3253  t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3254 
3255  attempts = 50;
3256  do {
3257  msleep(250);
3258  val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3259  } while ((val & F_BUSY) && --attempts);
3260  if (val & F_BUSY) {
3261  CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3262  goto out_fail;
3263  }
3264 
3265  /* Enable normal memory accesses. */
3266  t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3267  return 0;
3268 
3269 out_fail:
3270  return -1;
3271 }
3272 
3273 static void config_pcie(struct adapter *adap)
3274 {
3275  static const u16 ack_lat[4][6] = {
3276  {237, 416, 559, 1071, 2095, 4143},
3277  {128, 217, 289, 545, 1057, 2081},
3278  {73, 118, 154, 282, 538, 1050},
3279  {67, 107, 86, 150, 278, 534}
3280  };
3281  static const u16 rpl_tmr[4][6] = {
3282  {711, 1248, 1677, 3213, 6285, 12429},
3283  {384, 651, 867, 1635, 3171, 6243},
3284  {219, 354, 462, 846, 1614, 3150},
3285  {201, 321, 258, 450, 834, 1602}
3286  };
3287 
3288  u16 val, devid;
3289  unsigned int log2_width, pldsize;
3290  unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3291 
3293  pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3294 
3295  pci_read_config_word(adap->pdev, 0x2, &devid);
3296  if (devid == 0x37) {
3298  val & ~PCI_EXP_DEVCTL_READRQ &
3300  pldsize = 0;
3301  }
3302 
3304 
3305  fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3306  fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3307  G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3308  log2_width = fls(adap->params.pci.width) - 1;
3309  acklat = ack_lat[log2_width][pldsize];
3310  if (val & 1) /* check LOsEnable */
3311  acklat += fst_trn_tx * 4;
3312  rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3313 
3314  if (adap->params.rev == 0)
3317  V_T3A_ACKLAT(acklat));
3318  else
3320  V_ACKLAT(acklat));
3321 
3323  V_REPLAYLMT(rpllmt));
3324 
3325  t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3326  t3_set_reg_field(adap, A_PCIE_CFG, 0,
3329 }
3330 
3331 /*
3332  * Initialize and configure T3 HW modules. This performs the
3333  * initialization steps that need to be done once after a card is reset.
3334  * MAC and PHY initialization is handled separarely whenever a port is enabled.
3335  *
3336  * fw_params are passed to FW and their value is platform dependent. Only the
3337  * top 8 bits are available for use, the rest must be 0.
3338  */
3339 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3340 {
3341  int err = -EIO, attempts, i;
3342  const struct vpd_params *vpd = &adapter->params.vpd;
3343 
3344  if (adapter->params.rev > 0)
3345  calibrate_xgm_t3b(adapter);
3346  else if (calibrate_xgm(adapter))
3347  goto out_err;
3348 
3349  if (vpd->mclk) {
3350  partition_mem(adapter, &adapter->params.tp);
3351 
3352  if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3353  mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3354  mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3355  t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3356  adapter->params.mc5.nfilters,
3357  adapter->params.mc5.nroutes))
3358  goto out_err;
3359 
3360  for (i = 0; i < 32; i++)
3361  if (clear_sge_ctxt(adapter, i, F_CQ))
3362  goto out_err;
3363  }
3364 
3365  if (tp_init(adapter, &adapter->params.tp))
3366  goto out_err;
3367 
3368  t3_tp_set_coalescing_size(adapter,
3369  min(adapter->params.sge.max_pkt_size,
3370  MAX_RX_COALESCING_LEN), 1);
3371  t3_tp_set_max_rxsize(adapter,
3372  min(adapter->params.sge.max_pkt_size, 16384U));
3373  ulp_config(adapter, &adapter->params.tp);
3374 
3375  if (is_pcie(adapter))
3376  config_pcie(adapter);
3377  else
3378  t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3380 
3381  if (adapter->params.rev == T3_REV_C)
3382  t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3384 
3385  t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3386  t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3387  t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3388  chan_init_hw(adapter, adapter->params.chan_map);
3389  t3_sge_init(adapter, &adapter->params.sge);
3390  t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
3391 
3392  t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3393 
3394  t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3395  t3_write_reg(adapter, A_CIM_BOOT_CFG,
3397  t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3398 
3399  attempts = 100;
3400  do { /* wait for uP to initialize */
3401  msleep(20);
3402  } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3403  if (!attempts) {
3404  CH_ERR(adapter, "uP initialization timed out\n");
3405  goto out_err;
3406  }
3407 
3408  err = 0;
3409 out_err:
3410  return err;
3411 }
3412 
3421 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3422 {
3423  static unsigned short speed_map[] = { 33, 66, 100, 133 };
3424  u32 pci_mode;
3425 
3426  if (pci_is_pcie(adapter->pdev)) {
3427  u16 val;
3428 
3431  p->width = (val >> 4) & 0x3f;
3432  return;
3433  }
3434 
3435  pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3436  p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3437  p->width = (pci_mode & F_64BIT) ? 64 : 32;
3438  pci_mode = G_PCIXINITPAT(pci_mode);
3439  if (pci_mode == 0)
3440  p->variant = PCI_VARIANT_PCI;
3441  else if (pci_mode < 4)
3443  else if (pci_mode < 8)
3445  else
3447 }
3448 
3458 static void init_link_config(struct link_config *lc, unsigned int caps)
3459 {
3460  lc->supported = caps;
3461  lc->requested_speed = lc->speed = SPEED_INVALID;
3463  lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3464  if (lc->supported & SUPPORTED_Autoneg) {
3465  lc->advertising = lc->supported;
3466  lc->autoneg = AUTONEG_ENABLE;
3467  lc->requested_fc |= PAUSE_AUTONEG;
3468  } else {
3469  lc->advertising = 0;
3470  lc->autoneg = AUTONEG_DISABLE;
3471  }
3472 }
3473 
3481 static unsigned int mc7_calc_size(u32 cfg)
3482 {
3483  unsigned int width = G_WIDTH(cfg);
3484  unsigned int banks = !!(cfg & F_BKS) + 1;
3485  unsigned int org = !!(cfg & F_ORG) + 1;
3486  unsigned int density = G_DEN(cfg);
3487  unsigned int MBs = ((256 << density) * banks) / (org << width);
3488 
3489  return MBs << 20;
3490 }
3491 
3492 static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3493  unsigned int base_addr, const char *name)
3494 {
3495  u32 cfg;
3496 
3497  mc7->adapter = adapter;
3498  mc7->name = name;
3499  mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3500  cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3501  mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3502  mc7->width = G_WIDTH(cfg);
3503 }
3504 
3505 static void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3506 {
3507  u16 devid;
3508 
3509  mac->adapter = adapter;
3510  pci_read_config_word(adapter->pdev, 0x2, &devid);
3511 
3512  if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
3513  index = 0;
3514  mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3515  mac->nucast = 1;
3516 
3517  if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3518  t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3519  is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3520  t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3521  F_ENRGMII, 0);
3522  }
3523 }
3524 
3525 static void early_hw_init(struct adapter *adapter,
3526  const struct adapter_info *ai)
3527 {
3528  u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3529 
3530  mi1_init(adapter, ai);
3531  t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3532  V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3533  t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3535  t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3536  t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3537 
3538  if (adapter->params.rev == 0 || !uses_xaui(adapter))
3539  val |= F_ENRGMII;
3540 
3541  /* Enable MAC clocks so we can access the registers */
3542  t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3543  t3_read_reg(adapter, A_XGM_PORT_CFG);
3544 
3545  val |= F_CLKDIVRESET_;
3546  t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3547  t3_read_reg(adapter, A_XGM_PORT_CFG);
3548  t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3549  t3_read_reg(adapter, A_XGM_PORT_CFG);
3550 }
3551 
3552 /*
3553  * Reset the adapter.
3554  * Older PCIe cards lose their config space during reset, PCI-X
3555  * ones don't.
3556  */
3557 int t3_reset_adapter(struct adapter *adapter)
3558 {
3559  int i, save_and_restore_pcie =
3560  adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3561  uint16_t devid = 0;
3562 
3563  if (save_and_restore_pcie)
3564  pci_save_state(adapter->pdev);
3565  t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3566 
3567  /*
3568  * Delay. Give Some time to device to reset fully.
3569  * XXX The delay time should be modified.
3570  */
3571  for (i = 0; i < 10; i++) {
3572  msleep(50);
3573  pci_read_config_word(adapter->pdev, 0x00, &devid);
3574  if (devid == 0x1425)
3575  break;
3576  }
3577 
3578  if (devid != 0x1425)
3579  return -1;
3580 
3581  if (save_and_restore_pcie)
3582  pci_restore_state(adapter->pdev);
3583  return 0;
3584 }
3585 
3586 static int init_parity(struct adapter *adap)
3587 {
3588  int i, err, addr;
3589 
3590  if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3591  return -EBUSY;
3592 
3593  for (err = i = 0; !err && i < 16; i++)
3594  err = clear_sge_ctxt(adap, i, F_EGRESS);
3595  for (i = 0xfff0; !err && i <= 0xffff; i++)
3596  err = clear_sge_ctxt(adap, i, F_EGRESS);
3597  for (i = 0; !err && i < SGE_QSETS; i++)
3598  err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3599  if (err)
3600  return err;
3601 
3602  t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3603  for (i = 0; i < 4; i++)
3604  for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3605  t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3606  F_IBQDBGWR | V_IBQDBGQID(i) |
3607  V_IBQDBGADDR(addr));
3608  err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3609  F_IBQDBGBUSY, 0, 2, 1);
3610  if (err)
3611  return err;
3612  }
3613  return 0;
3614 }
3615 
3616 /*
3617  * Initialize adapter SW state for the various HW modules, set initial values
3618  * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3619  * interface.
3620  */
3621 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3622  int reset)
3623 {
3624  int ret;
3625  unsigned int i, j = -1;
3626 
3627  get_pci_mode(adapter, &adapter->params.pci);
3628 
3629  adapter->params.info = ai;
3630  adapter->params.nports = ai->nports0 + ai->nports1;
3631  adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3632  adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3633  /*
3634  * We used to only run the "adapter check task" once a second if
3635  * we had PHYs which didn't support interrupts (we would check
3636  * their link status once a second). Now we check other conditions
3637  * in that routine which could potentially impose a very high
3638  * interrupt load on the system. As such, we now always scan the
3639  * adapter state once a second ...
3640  */
3641  adapter->params.linkpoll_period = 10;
3642  adapter->params.stats_update_period = is_10G(adapter) ?
3644  adapter->params.pci.vpd_cap_addr =
3646  ret = get_vpd_params(adapter, &adapter->params.vpd);
3647  if (ret < 0)
3648  return ret;
3649 
3650  if (reset && t3_reset_adapter(adapter))
3651  return -1;
3652 
3653  t3_sge_prep(adapter, &adapter->params.sge);
3654 
3655  if (adapter->params.vpd.mclk) {
3656  struct tp_params *p = &adapter->params.tp;
3657 
3658  mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3659  mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3660  mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3661 
3662  p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3663  p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3664  p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3665  p->cm_size = t3_mc7_size(&adapter->cm);
3666  p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3667  p->chan_tx_size = p->pmtx_size / p->nchan;
3668  p->rx_pg_size = 64 * 1024;
3669  p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3670  p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3671  p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3672  p->ntimer_qs = p->cm_size >= (128 << 20) ||
3673  adapter->params.rev > 0 ? 12 : 6;
3674  }
3675 
3676  adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3677  t3_mc7_size(&adapter->pmtx) &&
3678  t3_mc7_size(&adapter->cm);
3679 
3680  if (is_offload(adapter)) {
3681  adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3682  adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3683  DEFAULT_NFILTERS : 0;
3684  adapter->params.mc5.nroutes = 0;
3685  t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3686 
3687  init_mtus(adapter->params.mtus);
3688  init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3689  }
3690 
3691  early_hw_init(adapter, ai);
3692  ret = init_parity(adapter);
3693  if (ret)
3694  return ret;
3695 
3696  for_each_port(adapter, i) {
3697  u8 hw_addr[6];
3698  const struct port_type_info *pti;
3699  struct port_info *p = adap2pinfo(adapter, i);
3700 
3701  while (!adapter->params.vpd.port_type[++j])
3702  ;
3703 
3704  pti = &port_types[adapter->params.vpd.port_type[j]];
3705  if (!pti->phy_prep) {
3706  CH_ALERT(adapter, "Invalid port type index %d\n",
3707  adapter->params.vpd.port_type[j]);
3708  return -EINVAL;
3709  }
3710 
3711  p->phy.mdio.dev = adapter->port[i];
3712  ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3713  ai->mdio_ops);
3714  if (ret)
3715  return ret;
3716  mac_prep(&p->mac, adapter, j);
3717 
3718  /*
3719  * The VPD EEPROM stores the base Ethernet address for the
3720  * card. A port's address is derived from the base by adding
3721  * the port's index to the base's low octet.
3722  */
3723  memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3724  hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3725 
3726  memcpy(adapter->port[i]->dev_addr, hw_addr,
3727  ETH_ALEN);
3728  memcpy(adapter->port[i]->perm_addr, hw_addr,
3729  ETH_ALEN);
3730  init_link_config(&p->link_config, p->phy.caps);
3731  p->phy.ops->power_down(&p->phy, 1);
3732 
3733  /*
3734  * If the PHY doesn't support interrupts for link status
3735  * changes, schedule a scan of the adapter links at least
3736  * once a second.
3737  */
3738  if (!(p->phy.caps & SUPPORTED_IRQ) &&
3739  adapter->params.linkpoll_period > 10)
3740  adapter->params.linkpoll_period = 10;
3741  }
3742 
3743  return 0;
3744 }
3745 
3746 void t3_led_ready(struct adapter *adapter)
3747 {
3749  F_GPIO0_OUT_VAL);
3750 }
3751 
3752 int t3_replay_prep_adapter(struct adapter *adapter)
3753 {
3754  const struct adapter_info *ai = adapter->params.info;
3755  unsigned int i, j = -1;
3756  int ret;
3757 
3758  early_hw_init(adapter, ai);
3759  ret = init_parity(adapter);
3760  if (ret)
3761  return ret;
3762 
3763  for_each_port(adapter, i) {
3764  const struct port_type_info *pti;
3765  struct port_info *p = adap2pinfo(adapter, i);
3766 
3767  while (!adapter->params.vpd.port_type[++j])
3768  ;
3769 
3770  pti = &port_types[adapter->params.vpd.port_type[j]];
3771  ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3772  if (ret)
3773  return ret;
3774  p->phy.ops->power_down(&p->phy, 1);
3775  }
3776 
3777 return 0;
3778 }
3779