Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
t4_hw.c
Go to the documentation of this file.
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses. You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  * Redistribution and use in source and binary forms, with or
13  * without modification, are permitted provided that the following
14  * conditions are met:
15  *
16  * - Redistributions of source code must retain the above
17  * copyright notice, this list of conditions and the following
18  * disclaimer.
19  *
20  * - Redistributions in binary form must reproduce the above
21  * copyright notice, this list of conditions and the following
22  * disclaimer in the documentation and/or other materials
23  * provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include "cxgb4.h"
38 #include "t4_regs.h"
39 #include "t4fw_api.h"
40 
56 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57  int polarity, int attempts, int delay, u32 *valp)
58 {
59  while (1) {
60  u32 val = t4_read_reg(adapter, reg);
61 
62  if (!!(val & mask) == polarity) {
63  if (valp)
64  *valp = val;
65  return 0;
66  }
67  if (--attempts == 0)
68  return -EAGAIN;
69  if (delay)
70  udelay(delay);
71  }
72 }
73 
74 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75  int polarity, int attempts, int delay)
76 {
77  return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78  delay, NULL);
79 }
80 
91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92  u32 val)
93 {
94  u32 v = t4_read_reg(adapter, addr) & ~mask;
95 
96  t4_write_reg(adapter, addr, v | val);
97  (void) t4_read_reg(adapter, addr); /* flush */
98 }
99 
112 static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113  unsigned int data_reg, u32 *vals,
114  unsigned int nregs, unsigned int start_idx)
115 {
116  while (nregs--) {
117  t4_write_reg(adap, addr_reg, start_idx);
118  *vals++ = t4_read_reg(adap, data_reg);
119  start_idx++;
120  }
121 }
122 
135 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136  unsigned int data_reg, const u32 *vals,
137  unsigned int nregs, unsigned int start_idx)
138 {
139  while (nregs--) {
140  t4_write_reg(adap, addr_reg, start_idx++);
141  t4_write_reg(adap, data_reg, *vals++);
142  }
143 }
144 
145 /*
146  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
147  */
148 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
149  u32 mbox_addr)
150 {
151  for ( ; nflit; nflit--, mbox_addr += 8)
152  *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
153 }
154 
155 /*
156  * Handle a FW assertion reported in a mailbox.
157  */
158 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
159 {
160  struct fw_debug_cmd asrt;
161 
162  get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
163  dev_alert(adap->pdev_dev,
164  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
165  asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
166  ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
167 }
168 
169 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
170 {
171  dev_err(adap->pdev_dev,
172  "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
173  (unsigned long long)t4_read_reg64(adap, data_reg),
174  (unsigned long long)t4_read_reg64(adap, data_reg + 8),
175  (unsigned long long)t4_read_reg64(adap, data_reg + 16),
176  (unsigned long long)t4_read_reg64(adap, data_reg + 24),
177  (unsigned long long)t4_read_reg64(adap, data_reg + 32),
178  (unsigned long long)t4_read_reg64(adap, data_reg + 40),
179  (unsigned long long)t4_read_reg64(adap, data_reg + 48),
180  (unsigned long long)t4_read_reg64(adap, data_reg + 56));
181 }
182 
205 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
206  void *rpl, bool sleep_ok)
207 {
208  static const int delay[] = {
209  1, 1, 3, 5, 10, 10, 20, 50, 100, 200
210  };
211 
212  u32 v;
213  u64 res;
214  int i, ms, delay_idx;
215  const __be64 *p = cmd;
216  u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
217  u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
218 
219  if ((size & 15) || size > MBOX_LEN)
220  return -EINVAL;
221 
222  /*
223  * If the device is off-line, as in EEH, commands will time out.
224  * Fail them early so we don't waste time waiting.
225  */
226  if (adap->pdev->error_state != pci_channel_io_normal)
227  return -EIO;
228 
229  v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
230  for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
231  v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
232 
233  if (v != MBOX_OWNER_DRV)
234  return v ? -EBUSY : -ETIMEDOUT;
235 
236  for (i = 0; i < size; i += 8)
237  t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
238 
239  t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
240  t4_read_reg(adap, ctl_reg); /* flush write */
241 
242  delay_idx = 0;
243  ms = delay[0];
244 
245  for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
246  if (sleep_ok) {
247  ms = delay[delay_idx]; /* last element may repeat */
248  if (delay_idx < ARRAY_SIZE(delay) - 1)
249  delay_idx++;
250  msleep(ms);
251  } else
252  mdelay(ms);
253 
254  v = t4_read_reg(adap, ctl_reg);
255  if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
256  if (!(v & MBMSGVALID)) {
257  t4_write_reg(adap, ctl_reg, 0);
258  continue;
259  }
260 
261  res = t4_read_reg64(adap, data_reg);
262  if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
263  fw_asrt(adap, data_reg);
264  res = FW_CMD_RETVAL(EIO);
265  } else if (rpl)
266  get_mbox_rpl(adap, rpl, size / 8, data_reg);
267 
268  if (FW_CMD_RETVAL_GET((int)res))
269  dump_mbox(adap, mbox, data_reg);
270  t4_write_reg(adap, ctl_reg, 0);
271  return -FW_CMD_RETVAL_GET((int)res);
272  }
273  }
274 
275  dump_mbox(adap, mbox, data_reg);
276  dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
277  *(const u8 *)cmd, mbox);
278  return -ETIMEDOUT;
279 }
280 
292 int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
293 {
294  int i;
295 
296  if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
297  return -EBUSY;
298  t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
299  t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
300  t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
301  t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
302  BIST_CMD_GAP(1));
303  i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
304  if (i)
305  return i;
306 
307 #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
308 
309  for (i = 15; i >= 0; i--)
310  *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
311  if (ecc)
312  *ecc = t4_read_reg64(adap, MC_DATA(16));
313 #undef MC_DATA
314  return 0;
315 }
316 
329 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
330 {
331  int i;
332 
333  idx *= EDC_STRIDE;
334  if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
335  return -EBUSY;
336  t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
337  t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
338  t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
339  t4_write_reg(adap, EDC_BIST_CMD + idx,
340  BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
341  i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
342  if (i)
343  return i;
344 
345 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
346 
347  for (i = 15; i >= 0; i--)
348  *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
349  if (ecc)
350  *ecc = t4_read_reg64(adap, EDC_DATA(16));
351 #undef EDC_DATA
352  return 0;
353 }
354 
355 /*
356  * t4_mem_win_rw - read/write memory through PCIE memory window
357  * @adap: the adapter
358  * @addr: address of first byte requested
359  * @data: MEMWIN0_APERTURE bytes of data containing the requested address
360  * @dir: direction of transfer 1 => read, 0 => write
361  *
362  * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
363  * MEMWIN0_APERTURE-byte-aligned address that covers the requested
364  * address @addr.
365  */
366 static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
367 {
368  int i;
369 
370  /*
371  * Setup offset into PCIE memory window. Address must be a
372  * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to
373  * ensure that changes propagate before we attempt to use the new
374  * values.)
375  */
376  t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
377  addr & ~(MEMWIN0_APERTURE - 1));
378  t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
379 
380  /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
381  for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
382  if (dir)
383  *data++ = (__force __be32) t4_read_reg(adap,
384  (MEMWIN0_BASE + i));
385  else
386  t4_write_reg(adap, (MEMWIN0_BASE + i),
387  (__force u32) *data++);
388  }
389 
390  return 0;
391 }
392 
409 static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
410  __be32 *buf, int dir)
411 {
412  u32 pos, start, end, offset, memoffset;
413  int ret = 0;
414  __be32 *data;
415 
416  /*
417  * Argument sanity checks ...
418  */
419  if ((addr & 0x3) || (len & 0x3))
420  return -EINVAL;
421 
422  data = vmalloc(MEMWIN0_APERTURE);
423  if (!data)
424  return -ENOMEM;
425 
426  /*
427  * Offset into the region of memory which is being accessed
428  * MEM_EDC0 = 0
429  * MEM_EDC1 = 1
430  * MEM_MC = 2
431  */
432  memoffset = (mtype * (5 * 1024 * 1024));
433 
434  /* Determine the PCIE_MEM_ACCESS_OFFSET */
435  addr = addr + memoffset;
436 
437  /*
438  * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
439  * at a time so we need to round down the start and round up the end.
440  * We'll start copying out of the first line at (addr - start) a word
441  * at a time.
442  */
443  start = addr & ~(MEMWIN0_APERTURE-1);
444  end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
445  offset = (addr - start)/sizeof(__be32);
446 
447  for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
448 
449  /*
450  * If we're writing, copy the data from the caller's memory
451  * buffer
452  */
453  if (!dir) {
454  /*
455  * If we're doing a partial write, then we need to do
456  * a read-modify-write ...
457  */
458  if (offset || len < MEMWIN0_APERTURE) {
459  ret = t4_mem_win_rw(adap, pos, data, 1);
460  if (ret)
461  break;
462  }
463  while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
464  len > 0) {
465  data[offset++] = *buf++;
466  len -= sizeof(__be32);
467  }
468  }
469 
470  /*
471  * Transfer a block of memory and bail if there's an error.
472  */
473  ret = t4_mem_win_rw(adap, pos, data, dir);
474  if (ret)
475  break;
476 
477  /*
478  * If we're reading, copy the data into the caller's memory
479  * buffer.
480  */
481  if (dir)
482  while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
483  len > 0) {
484  *buf++ = data[offset++];
485  len -= sizeof(__be32);
486  }
487  }
488 
489  vfree(data);
490  return ret;
491 }
492 
493 int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
494  __be32 *buf)
495 {
496  return t4_memory_rw(adap, mtype, addr, len, buf, 0);
497 }
498 
499 #define EEPROM_STAT_ADDR 0x7bfc
500 #define VPD_BASE 0
501 #define VPD_LEN 512
502 
510 int t4_seeprom_wp(struct adapter *adapter, bool enable)
511 {
512  unsigned int v = enable ? 0xc : 0;
513  int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
514  return ret < 0 ? ret : 0;
515 }
516 
524 int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
525 {
526  u32 cclk_param, cclk_val;
527  int i, ret;
528  int ec, sn;
529  u8 *vpd, csum;
530  unsigned int vpdr_len, kw_offset, id_len;
531 
532  vpd = vmalloc(VPD_LEN);
533  if (!vpd)
534  return -ENOMEM;
535 
536  ret = pci_read_vpd(adapter->pdev, VPD_BASE, VPD_LEN, vpd);
537  if (ret < 0)
538  goto out;
539 
540  if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
541  dev_err(adapter->pdev_dev, "missing VPD ID string\n");
542  ret = -EINVAL;
543  goto out;
544  }
545 
546  id_len = pci_vpd_lrdt_size(vpd);
547  if (id_len > ID_LEN)
548  id_len = ID_LEN;
549 
551  if (i < 0) {
552  dev_err(adapter->pdev_dev, "missing VPD-R section\n");
553  ret = -EINVAL;
554  goto out;
555  }
556 
557  vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
558  kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
559  if (vpdr_len + kw_offset > VPD_LEN) {
560  dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
561  ret = -EINVAL;
562  goto out;
563  }
564 
565 #define FIND_VPD_KW(var, name) do { \
566  var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
567  if (var < 0) { \
568  dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
569  ret = -EINVAL; \
570  goto out; \
571  } \
572  var += PCI_VPD_INFO_FLD_HDR_SIZE; \
573 } while (0)
574 
575  FIND_VPD_KW(i, "RV");
576  for (csum = 0; i >= 0; i--)
577  csum += vpd[i];
578 
579  if (csum) {
580  dev_err(adapter->pdev_dev,
581  "corrupted VPD EEPROM, actual csum %u\n", csum);
582  ret = -EINVAL;
583  goto out;
584  }
585 
586  FIND_VPD_KW(ec, "EC");
587  FIND_VPD_KW(sn, "SN");
588 #undef FIND_VPD_KW
589 
590  memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
591  strim(p->id);
592  memcpy(p->ec, vpd + ec, EC_LEN);
593  strim(p->ec);
594  i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
595  memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
596  strim(p->sn);
597 
598  /*
599  * Ask firmware for the Core Clock since it knows how to translate the
600  * Reference Clock ('V2') VPD field into a Core Clock value ...
601  */
602  cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
604  ret = t4_query_params(adapter, adapter->mbox, 0, 0,
605  1, &cclk_param, &cclk_val);
606 
607 out:
608  vfree(vpd);
609  if (ret)
610  return ret;
611  p->cclk = cclk_val;
612 
613  return 0;
614 }
615 
616 /* serial flash and firmware constants */
617 enum {
618  SF_ATTEMPTS = 10, /* max retries for SF operations */
619 
620  /* flash command opcodes */
621  SF_PROG_PAGE = 2, /* program page */
622  SF_WR_DISABLE = 4, /* disable writes */
623  SF_RD_STATUS = 5, /* read status register */
624  SF_WR_ENABLE = 6, /* enable writes */
625  SF_RD_DATA_FAST = 0xb, /* read flash */
626  SF_RD_ID = 0x9f, /* read ID */
627  SF_ERASE_SECTOR = 0xd8, /* erase sector */
628 
629  FW_MAX_SIZE = 512 * 1024,
630 };
631 
644 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
645  int lock, u32 *valp)
646 {
647  int ret;
648 
649  if (!byte_cnt || byte_cnt > 4)
650  return -EINVAL;
651  if (t4_read_reg(adapter, SF_OP) & BUSY)
652  return -EBUSY;
653  cont = cont ? SF_CONT : 0;
654  lock = lock ? SF_LOCK : 0;
655  t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
656  ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
657  if (!ret)
658  *valp = t4_read_reg(adapter, SF_DATA);
659  return ret;
660 }
661 
674 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
675  int lock, u32 val)
676 {
677  if (!byte_cnt || byte_cnt > 4)
678  return -EINVAL;
679  if (t4_read_reg(adapter, SF_OP) & BUSY)
680  return -EBUSY;
681  cont = cont ? SF_CONT : 0;
682  lock = lock ? SF_LOCK : 0;
683  t4_write_reg(adapter, SF_DATA, val);
684  t4_write_reg(adapter, SF_OP, lock |
685  cont | BYTECNT(byte_cnt - 1) | OP_WR);
686  return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
687 }
688 
697 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
698 {
699  int ret;
700  u32 status;
701 
702  while (1) {
703  if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
704  (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
705  return ret;
706  if (!(status & 1))
707  return 0;
708  if (--attempts == 0)
709  return -EAGAIN;
710  if (delay)
711  msleep(delay);
712  }
713 }
714 
728 static int t4_read_flash(struct adapter *adapter, unsigned int addr,
729  unsigned int nwords, u32 *data, int byte_oriented)
730 {
731  int ret;
732 
733  if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
734  return -EINVAL;
735 
736  addr = swab32(addr) | SF_RD_DATA_FAST;
737 
738  if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
739  (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
740  return ret;
741 
742  for ( ; nwords; nwords--, data++) {
743  ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
744  if (nwords == 1)
745  t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
746  if (ret)
747  return ret;
748  if (byte_oriented)
749  *data = (__force __u32) (htonl(*data));
750  }
751  return 0;
752 }
753 
764 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
765  unsigned int n, const u8 *data)
766 {
767  int ret;
768  u32 buf[64];
769  unsigned int i, c, left, val, offset = addr & 0xff;
770 
771  if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
772  return -EINVAL;
773 
774  val = swab32(addr) | SF_PROG_PAGE;
775 
776  if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
777  (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
778  goto unlock;
779 
780  for (left = n; left; left -= c) {
781  c = min(left, 4U);
782  for (val = 0, i = 0; i < c; ++i)
783  val = (val << 8) + *data++;
784 
785  ret = sf1_write(adapter, c, c != left, 1, val);
786  if (ret)
787  goto unlock;
788  }
789  ret = flash_wait_op(adapter, 8, 1);
790  if (ret)
791  goto unlock;
792 
793  t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
794 
795  /* Read the page to verify the write succeeded */
796  ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
797  if (ret)
798  return ret;
799 
800  if (memcmp(data - n, (u8 *)buf + offset, n)) {
801  dev_err(adapter->pdev_dev,
802  "failed to correctly write the flash page at %#x\n",
803  addr);
804  return -EIO;
805  }
806  return 0;
807 
808 unlock:
809  t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
810  return ret;
811 }
812 
820 static int get_fw_version(struct adapter *adapter, u32 *vers)
821 {
822  return t4_read_flash(adapter, adapter->params.sf_fw_start +
823  offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
824 }
825 
833 static int get_tp_version(struct adapter *adapter, u32 *vers)
834 {
835  return t4_read_flash(adapter, adapter->params.sf_fw_start +
836  offsetof(struct fw_hdr, tp_microcode_ver),
837  1, vers, 0);
838 }
839 
849 int t4_check_fw_version(struct adapter *adapter)
850 {
851  u32 api_vers[2];
852  int ret, major, minor, micro;
853 
854  ret = get_fw_version(adapter, &adapter->params.fw_vers);
855  if (!ret)
856  ret = get_tp_version(adapter, &adapter->params.tp_vers);
857  if (!ret)
858  ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
859  offsetof(struct fw_hdr, intfver_nic),
860  2, api_vers, 1);
861  if (ret)
862  return ret;
863 
864  major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
865  minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
866  micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
867  memcpy(adapter->params.api_vers, api_vers,
868  sizeof(adapter->params.api_vers));
869 
870  if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
871  dev_err(adapter->pdev_dev,
872  "card FW has major version %u, driver wants %u\n",
873  major, FW_VERSION_MAJOR);
874  return -EINVAL;
875  }
876 
877  if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
878  return 0; /* perfect match */
879 
880  /* Minor/micro version mismatch. Report it but often it's OK. */
881  return 1;
882 }
883 
892 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
893 {
894  int ret = 0;
895 
896  while (start <= end) {
897  if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
898  (ret = sf1_write(adapter, 4, 0, 1,
899  SF_ERASE_SECTOR | (start << 8))) != 0 ||
900  (ret = flash_wait_op(adapter, 14, 500)) != 0) {
901  dev_err(adapter->pdev_dev,
902  "erase of flash sector %d failed, error %d\n",
903  start, ret);
904  break;
905  }
906  start++;
907  }
908  t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
909  return ret;
910 }
911 
919 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
920 {
921  if (adapter->params.sf_size == 0x100000)
922  return FLASH_FPGA_CFG_START;
923  else
924  return FLASH_CFG_START;
925 }
926 
935 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
936 {
937  int ret, i, n;
938  unsigned int addr;
939  unsigned int flash_cfg_start_sec;
940  unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
941 
942  addr = t4_flash_cfg_addr(adap);
943  flash_cfg_start_sec = addr / SF_SEC_SIZE;
944 
945  if (size > FLASH_CFG_MAX_SIZE) {
946  dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
948  return -EFBIG;
949  }
950 
951  i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
952  sf_sec_size);
953  ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
954  flash_cfg_start_sec + i - 1);
955  /*
956  * If size == 0 then we're simply erasing the FLASH sectors associated
957  * with the on-adapter Firmware Configuration File.
958  */
959  if (ret || size == 0)
960  goto out;
961 
962  /* this will write to the flash up to SF_PAGE_SIZE at a time */
963  for (i = 0; i < size; i += SF_PAGE_SIZE) {
964  if ((size - i) < SF_PAGE_SIZE)
965  n = size - i;
966  else
967  n = SF_PAGE_SIZE;
968  ret = t4_write_flash(adap, addr, n, cfg_data);
969  if (ret)
970  goto out;
971 
972  addr += SF_PAGE_SIZE;
973  cfg_data += SF_PAGE_SIZE;
974  }
975 
976 out:
977  if (ret)
978  dev_err(adap->pdev_dev, "config file %s failed %d\n",
979  (size == 0 ? "clear" : "download"), ret);
980  return ret;
981 }
982 
991 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
992 {
993  u32 csum;
994  int ret, addr;
995  unsigned int i;
996  u8 first_page[SF_PAGE_SIZE];
997  const __be32 *p = (const __be32 *)fw_data;
998  const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
999  unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1000  unsigned int fw_img_start = adap->params.sf_fw_start;
1001  unsigned int fw_start_sec = fw_img_start / sf_sec_size;
1002 
1003  if (!size) {
1004  dev_err(adap->pdev_dev, "FW image has no data\n");
1005  return -EINVAL;
1006  }
1007  if (size & 511) {
1008  dev_err(adap->pdev_dev,
1009  "FW image size not multiple of 512 bytes\n");
1010  return -EINVAL;
1011  }
1012  if (ntohs(hdr->len512) * 512 != size) {
1013  dev_err(adap->pdev_dev,
1014  "FW image size differs from size in FW header\n");
1015  return -EINVAL;
1016  }
1017  if (size > FW_MAX_SIZE) {
1018  dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1019  FW_MAX_SIZE);
1020  return -EFBIG;
1021  }
1022 
1023  for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1024  csum += ntohl(p[i]);
1025 
1026  if (csum != 0xffffffff) {
1027  dev_err(adap->pdev_dev,
1028  "corrupted firmware image, checksum %#x\n", csum);
1029  return -EINVAL;
1030  }
1031 
1032  i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1033  ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
1034  if (ret)
1035  goto out;
1036 
1037  /*
1038  * We write the correct version at the end so the driver can see a bad
1039  * version if the FW write fails. Start by writing a copy of the
1040  * first page with a bad version.
1041  */
1042  memcpy(first_page, fw_data, SF_PAGE_SIZE);
1043  ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1044  ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
1045  if (ret)
1046  goto out;
1047 
1048  addr = fw_img_start;
1049  for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1050  addr += SF_PAGE_SIZE;
1051  fw_data += SF_PAGE_SIZE;
1052  ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1053  if (ret)
1054  goto out;
1055  }
1056 
1057  ret = t4_write_flash(adap,
1058  fw_img_start + offsetof(struct fw_hdr, fw_ver),
1059  sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1060 out:
1061  if (ret)
1062  dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1063  ret);
1064  return ret;
1065 }
1066 
1067 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1068  FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1069 
1083 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1084  struct link_config *lc)
1085 {
1086  struct fw_port_cmd c;
1087  unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
1088 
1089  lc->link_ok = 0;
1090  if (lc->requested_fc & PAUSE_RX)
1091  fc |= FW_PORT_CAP_FC_RX;
1092  if (lc->requested_fc & PAUSE_TX)
1093  fc |= FW_PORT_CAP_FC_TX;
1094 
1095  memset(&c, 0, sizeof(c));
1099  FW_LEN16(c));
1100 
1101  if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1102  c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1103  lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1104  } else if (lc->autoneg == AUTONEG_DISABLE) {
1105  c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1106  lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1107  } else
1108  c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1109 
1110  return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1111 }
1112 
1121 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1122 {
1123  struct fw_port_cmd c;
1124 
1125  memset(&c, 0, sizeof(c));
1129  FW_LEN16(c));
1130  c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1131  return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1132 }
1133 
1134 typedef void (*int_handler_t)(struct adapter *adap);
1135 
1136 struct intr_info {
1137  unsigned int mask; /* bits to check in interrupt status */
1138  const char *msg; /* message to print or NULL */
1139  short stat_idx; /* stat counter to increment or -1 */
1140  unsigned short fatal; /* whether the condition reported is fatal */
1141  int_handler_t int_handler; /* platform-specific int handler */
1142 };
1143 
1157 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1158  const struct intr_info *acts)
1159 {
1160  int fatal = 0;
1161  unsigned int mask = 0;
1162  unsigned int status = t4_read_reg(adapter, reg);
1163 
1164  for ( ; acts->mask; ++acts) {
1165  if (!(status & acts->mask))
1166  continue;
1167  if (acts->fatal) {
1168  fatal++;
1169  dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1170  status & acts->mask);
1171  } else if (acts->msg && printk_ratelimit())
1172  dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1173  status & acts->mask);
1174  if (acts->int_handler)
1175  acts->int_handler(adapter);
1176  mask |= acts->mask;
1177  }
1178  status &= mask;
1179  if (status) /* clear processed interrupts */
1180  t4_write_reg(adapter, reg, status);
1181  return fatal;
1182 }
1183 
1184 /*
1185  * Interrupt handler for the PCIE module.
1186  */
1187 static void pcie_intr_handler(struct adapter *adapter)
1188 {
1189  static const struct intr_info sysbus_intr_info[] = {
1190  { RNPP, "RXNP array parity error", -1, 1 },
1191  { RPCP, "RXPC array parity error", -1, 1 },
1192  { RCIP, "RXCIF array parity error", -1, 1 },
1193  { RCCP, "Rx completions control array parity error", -1, 1 },
1194  { RFTP, "RXFT array parity error", -1, 1 },
1195  { 0 }
1196  };
1197  static const struct intr_info pcie_port_intr_info[] = {
1198  { TPCP, "TXPC array parity error", -1, 1 },
1199  { TNPP, "TXNP array parity error", -1, 1 },
1200  { TFTP, "TXFT array parity error", -1, 1 },
1201  { TCAP, "TXCA array parity error", -1, 1 },
1202  { TCIP, "TXCIF array parity error", -1, 1 },
1203  { RCAP, "RXCA array parity error", -1, 1 },
1204  { OTDD, "outbound request TLP discarded", -1, 1 },
1205  { RDPE, "Rx data parity error", -1, 1 },
1206  { TDUE, "Tx uncorrectable data error", -1, 1 },
1207  { 0 }
1208  };
1209  static const struct intr_info pcie_intr_info[] = {
1210  { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1211  { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1212  { MSIDATAPERR, "MSI data parity error", -1, 1 },
1213  { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1214  { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1215  { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1216  { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1217  { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1218  { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1219  { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1220  { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1221  { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1222  { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1223  { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1224  { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1225  { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1226  { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1227  { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1228  { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1229  { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1230  { FIDPERR, "PCI FID parity error", -1, 1 },
1231  { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1232  { MATAGPERR, "PCI MA tag parity error", -1, 1 },
1233  { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1234  { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1235  { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1236  { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1237  { PCIESINT, "PCI core secondary fault", -1, 1 },
1238  { PCIEPINT, "PCI core primary fault", -1, 1 },
1239  { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1240  { 0 }
1241  };
1242 
1243  int fat;
1244 
1245  fat = t4_handle_intr_status(adapter,
1247  sysbus_intr_info) +
1248  t4_handle_intr_status(adapter,
1250  pcie_port_intr_info) +
1251  t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
1252  if (fat)
1253  t4_fatal_err(adapter);
1254 }
1255 
1256 /*
1257  * TP interrupt handler.
1258  */
1259 static void tp_intr_handler(struct adapter *adapter)
1260 {
1261  static const struct intr_info tp_intr_info[] = {
1262  { 0x3fffffff, "TP parity error", -1, 1 },
1263  { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1264  { 0 }
1265  };
1266 
1267  if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1268  t4_fatal_err(adapter);
1269 }
1270 
1271 /*
1272  * SGE interrupt handler.
1273  */
1274 static void sge_intr_handler(struct adapter *adapter)
1275 {
1276  u64 v;
1277 
1278  static const struct intr_info sge_intr_info[] = {
1280  "SGE received CPL exceeding IQE size", -1, 1 },
1282  "SGE GTS CIDX increment too large", -1, 0 },
1283  { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1284  { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1285  { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1286  { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
1288  "SGE IQID > 1023 received CPL for FL", -1, 0 },
1289  { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1290  0 },
1291  { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1292  0 },
1293  { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1294  0 },
1295  { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1296  0 },
1298  "SGE too many priority ingress contexts", -1, 0 },
1300  "SGE too many priority egress contexts", -1, 0 },
1301  { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1302  { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1303  { 0 }
1304  };
1305 
1306  v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1307  ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1308  if (v) {
1309  dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1310  (unsigned long long)v);
1311  t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1312  t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1313  }
1314 
1315  if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1316  v != 0)
1317  t4_fatal_err(adapter);
1318 }
1319 
1320 /*
1321  * CIM interrupt handler.
1322  */
1323 static void cim_intr_handler(struct adapter *adapter)
1324 {
1325  static const struct intr_info cim_intr_info[] = {
1326  { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1327  { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1328  { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1329  { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1330  { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1331  { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1332  { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1333  { 0 }
1334  };
1335  static const struct intr_info cim_upintr_info[] = {
1336  { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1337  { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1338  { ILLWRINT, "CIM illegal write", -1, 1 },
1339  { ILLRDINT, "CIM illegal read", -1, 1 },
1340  { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1341  { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1342  { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1343  { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1344  { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1345  { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1346  { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1347  { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1348  { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1349  { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1350  { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1351  { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1352  { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1353  { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1354  { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1355  { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1356  { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1357  { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1358  { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1359  { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1360  { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1361  { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1362  { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1363  { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1364  { 0 }
1365  };
1366 
1367  int fat;
1368 
1369  fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1370  cim_intr_info) +
1371  t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1372  cim_upintr_info);
1373  if (fat)
1374  t4_fatal_err(adapter);
1375 }
1376 
1377 /*
1378  * ULP RX interrupt handler.
1379  */
1380 static void ulprx_intr_handler(struct adapter *adapter)
1381 {
1382  static const struct intr_info ulprx_intr_info[] = {
1383  { 0x1800000, "ULPRX context error", -1, 1 },
1384  { 0x7fffff, "ULPRX parity error", -1, 1 },
1385  { 0 }
1386  };
1387 
1388  if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1389  t4_fatal_err(adapter);
1390 }
1391 
1392 /*
1393  * ULP TX interrupt handler.
1394  */
1395 static void ulptx_intr_handler(struct adapter *adapter)
1396 {
1397  static const struct intr_info ulptx_intr_info[] = {
1398  { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1399  0 },
1400  { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1401  0 },
1402  { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1403  0 },
1404  { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1405  0 },
1406  { 0xfffffff, "ULPTX parity error", -1, 1 },
1407  { 0 }
1408  };
1409 
1410  if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1411  t4_fatal_err(adapter);
1412 }
1413 
1414 /*
1415  * PM TX interrupt handler.
1416  */
1417 static void pmtx_intr_handler(struct adapter *adapter)
1418 {
1419  static const struct intr_info pmtx_intr_info[] = {
1420  { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1421  { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1422  { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1423  { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1424  { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1425  { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1426  { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1427  { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1428  { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1429  { 0 }
1430  };
1431 
1432  if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1433  t4_fatal_err(adapter);
1434 }
1435 
1436 /*
1437  * PM RX interrupt handler.
1438  */
1439 static void pmrx_intr_handler(struct adapter *adapter)
1440 {
1441  static const struct intr_info pmrx_intr_info[] = {
1442  { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1443  { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1444  { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1445  { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1446  { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1447  { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1448  { 0 }
1449  };
1450 
1451  if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1452  t4_fatal_err(adapter);
1453 }
1454 
1455 /*
1456  * CPL switch interrupt handler.
1457  */
1458 static void cplsw_intr_handler(struct adapter *adapter)
1459 {
1460  static const struct intr_info cplsw_intr_info[] = {
1461  { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1462  { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1463  { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1464  { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1465  { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1466  { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1467  { 0 }
1468  };
1469 
1470  if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1471  t4_fatal_err(adapter);
1472 }
1473 
1474 /*
1475  * LE interrupt handler.
1476  */
1477 static void le_intr_handler(struct adapter *adap)
1478 {
1479  static const struct intr_info le_intr_info[] = {
1480  { LIPMISS, "LE LIP miss", -1, 0 },
1481  { LIP0, "LE 0 LIP error", -1, 0 },
1482  { PARITYERR, "LE parity error", -1, 1 },
1483  { UNKNOWNCMD, "LE unknown command", -1, 1 },
1484  { REQQPARERR, "LE request queue parity error", -1, 1 },
1485  { 0 }
1486  };
1487 
1488  if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1489  t4_fatal_err(adap);
1490 }
1491 
1492 /*
1493  * MPS interrupt handler.
1494  */
1495 static void mps_intr_handler(struct adapter *adapter)
1496 {
1497  static const struct intr_info mps_rx_intr_info[] = {
1498  { 0xffffff, "MPS Rx parity error", -1, 1 },
1499  { 0 }
1500  };
1501  static const struct intr_info mps_tx_intr_info[] = {
1502  { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1503  { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1504  { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1505  { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1506  { BUBBLE, "MPS Tx underflow", -1, 1 },
1507  { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1508  { FRMERR, "MPS Tx framing error", -1, 1 },
1509  { 0 }
1510  };
1511  static const struct intr_info mps_trc_intr_info[] = {
1512  { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1513  { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1514  { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1515  { 0 }
1516  };
1517  static const struct intr_info mps_stat_sram_intr_info[] = {
1518  { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1519  { 0 }
1520  };
1521  static const struct intr_info mps_stat_tx_intr_info[] = {
1522  { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1523  { 0 }
1524  };
1525  static const struct intr_info mps_stat_rx_intr_info[] = {
1526  { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1527  { 0 }
1528  };
1529  static const struct intr_info mps_cls_intr_info[] = {
1530  { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1531  { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1532  { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1533  { 0 }
1534  };
1535 
1536  int fat;
1537 
1538  fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1539  mps_rx_intr_info) +
1540  t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1541  mps_tx_intr_info) +
1542  t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1543  mps_trc_intr_info) +
1544  t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1545  mps_stat_sram_intr_info) +
1546  t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1547  mps_stat_tx_intr_info) +
1548  t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1549  mps_stat_rx_intr_info) +
1550  t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1551  mps_cls_intr_info);
1552 
1553  t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1554  RXINT | TXINT | STATINT);
1555  t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1556  if (fat)
1557  t4_fatal_err(adapter);
1558 }
1559 
1560 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1561 
1562 /*
1563  * EDC/MC interrupt handler.
1564  */
1565 static void mem_intr_handler(struct adapter *adapter, int idx)
1566 {
1567  static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1568 
1569  unsigned int addr, cnt_addr, v;
1570 
1571  if (idx <= MEM_EDC1) {
1572  addr = EDC_REG(EDC_INT_CAUSE, idx);
1573  cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1574  } else {
1575  addr = MC_INT_CAUSE;
1576  cnt_addr = MC_ECC_STATUS;
1577  }
1578 
1579  v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1580  if (v & PERR_INT_CAUSE)
1581  dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1582  name[idx]);
1583  if (v & ECC_CE_INT_CAUSE) {
1584  u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1585 
1586  t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1587  if (printk_ratelimit())
1588  dev_warn(adapter->pdev_dev,
1589  "%u %s correctable ECC data error%s\n",
1590  cnt, name[idx], cnt > 1 ? "s" : "");
1591  }
1592  if (v & ECC_UE_INT_CAUSE)
1593  dev_alert(adapter->pdev_dev,
1594  "%s uncorrectable ECC data error\n", name[idx]);
1595 
1596  t4_write_reg(adapter, addr, v);
1597  if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1598  t4_fatal_err(adapter);
1599 }
1600 
1601 /*
1602  * MA interrupt handler.
1603  */
1604 static void ma_intr_handler(struct adapter *adap)
1605 {
1606  u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1607 
1608  if (status & MEM_PERR_INT_CAUSE)
1609  dev_alert(adap->pdev_dev,
1610  "MA parity error, parity status %#x\n",
1611  t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1612  if (status & MEM_WRAP_INT_CAUSE) {
1613  v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1614  dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1615  "client %u to address %#x\n",
1617  MEM_WRAP_ADDRESS_GET(v) << 4);
1618  }
1619  t4_write_reg(adap, MA_INT_CAUSE, status);
1620  t4_fatal_err(adap);
1621 }
1622 
1623 /*
1624  * SMB interrupt handler.
1625  */
1626 static void smb_intr_handler(struct adapter *adap)
1627 {
1628  static const struct intr_info smb_intr_info[] = {
1629  { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1630  { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1631  { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1632  { 0 }
1633  };
1634 
1635  if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1636  t4_fatal_err(adap);
1637 }
1638 
1639 /*
1640  * NC-SI interrupt handler.
1641  */
1642 static void ncsi_intr_handler(struct adapter *adap)
1643 {
1644  static const struct intr_info ncsi_intr_info[] = {
1645  { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1646  { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1647  { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1648  { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1649  { 0 }
1650  };
1651 
1652  if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1653  t4_fatal_err(adap);
1654 }
1655 
1656 /*
1657  * XGMAC interrupt handler.
1658  */
1659 static void xgmac_intr_handler(struct adapter *adap, int port)
1660 {
1661  u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1662 
1664  if (!v)
1665  return;
1666 
1667  if (v & TXFIFO_PRTY_ERR)
1668  dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1669  port);
1670  if (v & RXFIFO_PRTY_ERR)
1671  dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1672  port);
1673  t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1674  t4_fatal_err(adap);
1675 }
1676 
1677 /*
1678  * PL interrupt handler.
1679  */
1680 static void pl_intr_handler(struct adapter *adap)
1681 {
1682  static const struct intr_info pl_intr_info[] = {
1683  { FATALPERR, "T4 fatal parity error", -1, 1 },
1684  { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1685  { 0 }
1686  };
1687 
1688  if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1689  t4_fatal_err(adap);
1690 }
1691 
1692 #define PF_INTR_MASK (PFSW)
1693 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1694  EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1695  CPL_SWITCH | SGE | ULP_TX)
1696 
1705 int t4_slow_intr_handler(struct adapter *adapter)
1706 {
1707  u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1708 
1709  if (!(cause & GLBL_INTR_MASK))
1710  return 0;
1711  if (cause & CIM)
1712  cim_intr_handler(adapter);
1713  if (cause & MPS)
1714  mps_intr_handler(adapter);
1715  if (cause & NCSI)
1716  ncsi_intr_handler(adapter);
1717  if (cause & PL)
1718  pl_intr_handler(adapter);
1719  if (cause & SMB)
1720  smb_intr_handler(adapter);
1721  if (cause & XGMAC0)
1722  xgmac_intr_handler(adapter, 0);
1723  if (cause & XGMAC1)
1724  xgmac_intr_handler(adapter, 1);
1725  if (cause & XGMAC_KR0)
1726  xgmac_intr_handler(adapter, 2);
1727  if (cause & XGMAC_KR1)
1728  xgmac_intr_handler(adapter, 3);
1729  if (cause & PCIE)
1730  pcie_intr_handler(adapter);
1731  if (cause & MC)
1732  mem_intr_handler(adapter, MEM_MC);
1733  if (cause & EDC0)
1734  mem_intr_handler(adapter, MEM_EDC0);
1735  if (cause & EDC1)
1736  mem_intr_handler(adapter, MEM_EDC1);
1737  if (cause & LE)
1738  le_intr_handler(adapter);
1739  if (cause & TP)
1740  tp_intr_handler(adapter);
1741  if (cause & MA)
1742  ma_intr_handler(adapter);
1743  if (cause & PM_TX)
1744  pmtx_intr_handler(adapter);
1745  if (cause & PM_RX)
1746  pmrx_intr_handler(adapter);
1747  if (cause & ULP_RX)
1748  ulprx_intr_handler(adapter);
1749  if (cause & CPL_SWITCH)
1750  cplsw_intr_handler(adapter);
1751  if (cause & SGE)
1752  sge_intr_handler(adapter);
1753  if (cause & ULP_TX)
1754  ulptx_intr_handler(adapter);
1755 
1756  /* Clear the interrupts just processed for which we are the master. */
1757  t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1758  (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1759  return 1;
1760 }
1761 
1775 void t4_intr_enable(struct adapter *adapter)
1776 {
1777  u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1778 
1779  t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1787  EGRESS_SIZE_ERR);
1788  t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1789  t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1790 }
1791 
1800 void t4_intr_disable(struct adapter *adapter)
1801 {
1802  u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1803 
1804  t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1805  t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1806 }
1807 
1815 static int hash_mac_addr(const u8 *addr)
1816 {
1817  u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1818  u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1819  a ^= b;
1820  a ^= (a >> 12);
1821  a ^= (a >> 6);
1822  return a & 0x3f;
1823 }
1824 
1842 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1843  int start, int n, const u16 *rspq, unsigned int nrspq)
1844 {
1845  int ret;
1846  const u16 *rsp = rspq;
1847  const u16 *rsp_end = rspq + nrspq;
1848  struct fw_rss_ind_tbl_cmd cmd;
1849 
1850  memset(&cmd, 0, sizeof(cmd));
1853  FW_RSS_IND_TBL_CMD_VIID(viid));
1854  cmd.retval_len16 = htonl(FW_LEN16(cmd));
1855 
1856  /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1857  while (n > 0) {
1858  int nq = min(n, 32);
1859  __be32 *qp = &cmd.iq0_to_iq2;
1860 
1861  cmd.niqid = htons(nq);
1862  cmd.startidx = htons(start);
1863 
1864  start += nq;
1865  n -= nq;
1866 
1867  while (nq > 0) {
1868  unsigned int v;
1869 
1870  v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1871  if (++rsp >= rsp_end)
1872  rsp = rspq;
1873  v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1874  if (++rsp >= rsp_end)
1875  rsp = rspq;
1876  v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1877  if (++rsp >= rsp_end)
1878  rsp = rspq;
1879 
1880  *qp++ = htonl(v);
1881  nq -= 3;
1882  }
1883 
1884  ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1885  if (ret)
1886  return ret;
1887  }
1888  return 0;
1889 }
1890 
1900 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1901  unsigned int flags)
1902 {
1903  struct fw_rss_glb_config_cmd c;
1904 
1905  memset(&c, 0, sizeof(c));
1908  c.retval_len16 = htonl(FW_LEN16(c));
1909  if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1910  c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1911  } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1912  c.u.basicvirtual.mode_pkd =
1914  c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1915  } else
1916  return -EINVAL;
1917  return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1918 }
1919 
1929 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1930  struct tp_tcp_stats *v6)
1931 {
1933 
1934 #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1935 #define STAT(x) val[STAT_IDX(x)]
1936 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1937 
1938  if (v4) {
1939  t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1941  v4->tcpOutRsts = STAT(OUT_RST);
1942  v4->tcpInSegs = STAT64(IN_SEG);
1943  v4->tcpOutSegs = STAT64(OUT_SEG);
1944  v4->tcpRetransSegs = STAT64(RXT_SEG);
1945  }
1946  if (v6) {
1947  t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1949  v6->tcpOutRsts = STAT(OUT_RST);
1950  v6->tcpInSegs = STAT64(IN_SEG);
1951  v6->tcpOutSegs = STAT64(OUT_SEG);
1952  v6->tcpRetransSegs = STAT64(RXT_SEG);
1953  }
1954 #undef STAT64
1955 #undef STAT
1956 #undef STAT_IDX
1957 }
1958 
1967 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1968 {
1969  u32 v;
1970  int i;
1971 
1972  for (i = 0; i < NMTUS; ++i) {
1973  t4_write_reg(adap, TP_MTU_TABLE,
1974  MTUINDEX(0xff) | MTUVALUE(i));
1975  v = t4_read_reg(adap, TP_MTU_TABLE);
1976  mtus[i] = MTUVALUE_GET(v);
1977  if (mtu_log)
1978  mtu_log[i] = MTUWIDTH_GET(v);
1979  }
1980 }
1981 
1991 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
1992  unsigned int mask, unsigned int val)
1993 {
1994  t4_write_reg(adap, TP_PIO_ADDR, addr);
1995  val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
1996  t4_write_reg(adap, TP_PIO_DATA, val);
1997 }
1998 
2006 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2007 {
2008  a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2009  a[9] = 2;
2010  a[10] = 3;
2011  a[11] = 4;
2012  a[12] = 5;
2013  a[13] = 6;
2014  a[14] = 7;
2015  a[15] = 8;
2016  a[16] = 9;
2017  a[17] = 10;
2018  a[18] = 14;
2019  a[19] = 17;
2020  a[20] = 21;
2021  a[21] = 25;
2022  a[22] = 30;
2023  a[23] = 35;
2024  a[24] = 45;
2025  a[25] = 60;
2026  a[26] = 80;
2027  a[27] = 100;
2028  a[28] = 200;
2029  a[29] = 300;
2030  a[30] = 400;
2031  a[31] = 500;
2032 
2033  b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2034  b[9] = b[10] = 1;
2035  b[11] = b[12] = 2;
2036  b[13] = b[14] = b[15] = b[16] = 3;
2037  b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2038  b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2039  b[28] = b[29] = 6;
2040  b[30] = b[31] = 7;
2041 }
2042 
2043 /* The minimum additive increment value for the congestion control table */
2044 #define CC_MIN_INCR 2U
2045 
2058 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2059  const unsigned short *alpha, const unsigned short *beta)
2060 {
2061  static const unsigned int avg_pkts[NCCTRL_WIN] = {
2062  2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2063  896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2064  28672, 40960, 57344, 81920, 114688, 163840, 229376
2065  };
2066 
2067  unsigned int i, w;
2068 
2069  for (i = 0; i < NMTUS; ++i) {
2070  unsigned int mtu = mtus[i];
2071  unsigned int log2 = fls(mtu);
2072 
2073  if (!(mtu & ((1 << log2) >> 2))) /* round */
2074  log2--;
2075  t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2076  MTUWIDTH(log2) | MTUVALUE(mtu));
2077 
2078  for (w = 0; w < NCCTRL_WIN; ++w) {
2079  unsigned int inc;
2080 
2081  inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2082  CC_MIN_INCR);
2083 
2084  t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2085  (w << 16) | (beta[w] << 13) | inc);
2086  }
2087  }
2088 }
2089 
2099 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2100 {
2101  u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2102 
2103  if (n == 0)
2104  return idx == 0 ? 0xf : 0;
2105  if (n == 1)
2106  return idx < 2 ? (3 << (2 * idx)) : 0;
2107  return 1 << idx;
2108 }
2109 
2118 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2119 {
2120  u32 bgmap = get_mps_bg_map(adap, idx);
2121 
2122 #define GET_STAT(name) \
2123  t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
2124 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2125 
2126  p->tx_octets = GET_STAT(TX_PORT_BYTES);
2127  p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2128  p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2129  p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2130  p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2131  p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2132  p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2133  p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2134  p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2135  p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2136  p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2137  p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2138  p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2139  p->tx_drop = GET_STAT(TX_PORT_DROP);
2140  p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2141  p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2142  p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2143  p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2144  p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2145  p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2146  p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2147  p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2148  p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2149 
2150  p->rx_octets = GET_STAT(RX_PORT_BYTES);
2151  p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2152  p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2153  p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2154  p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2155  p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2156  p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2157  p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2158  p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2159  p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2160  p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2161  p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2162  p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2163  p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2164  p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2165  p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2166  p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2167  p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2168  p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2169  p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2170  p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2171  p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2172  p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2173  p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2174  p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2175  p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2176  p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2177 
2178  p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2179  p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2180  p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2181  p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2182  p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2183  p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2184  p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2185  p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2186 
2187 #undef GET_STAT
2188 #undef GET_STAT_COM
2189 }
2190 
2199 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2200  const u8 *addr)
2201 {
2202  if (addr) {
2203  t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
2204  (addr[2] << 24) | (addr[3] << 16) |
2205  (addr[4] << 8) | addr[5]);
2206  t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
2207  (addr[0] << 8) | addr[1]);
2208  }
2210  addr ? MAGICEN : 0);
2211 }
2212 
2228 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2229  u64 mask0, u64 mask1, unsigned int crc, bool enable)
2230 {
2231  int i;
2232 
2233  if (!enable) {
2235  PATEN, 0);
2236  return 0;
2237  }
2238  if (map > 0xff)
2239  return -EINVAL;
2240 
2241 #define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
2242 
2243  t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2244  t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2245  t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2246 
2247  for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2248  if (!(map & 1))
2249  continue;
2250 
2251  /* write byte masks */
2252  t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2253  t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2254  t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2255  if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2256  return -ETIMEDOUT;
2257 
2258  /* write CRC */
2259  t4_write_reg(adap, EPIO_REG(DATA0), crc);
2260  t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2261  t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2262  if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2263  return -ETIMEDOUT;
2264  }
2265 #undef EPIO_REG
2266 
2267  t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2268  return 0;
2269 }
2270 
2271 #define INIT_CMD(var, cmd, rd_wr) do { \
2272  (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2273  FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2274  (var).retval_len16 = htonl(FW_LEN16(var)); \
2275 } while (0)
2276 
2277 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2278  u32 addr, u32 val)
2279 {
2280  struct fw_ldst_cmd c;
2281 
2282  memset(&c, 0, sizeof(c));
2284  FW_CMD_WRITE |
2286  c.cycles_to_len16 = htonl(FW_LEN16(c));
2287  c.u.addrval.addr = htonl(addr);
2288  c.u.addrval.val = htonl(val);
2289 
2290  return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2291 }
2292 
2304 int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
2305 {
2306  int i;
2307  int off;
2308 
2309  /*
2310  * Align on a 16B boundary.
2311  */
2312  off = addr & 15;
2313  if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
2314  return -EINVAL;
2315 
2316  t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15);
2317  t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
2318 
2319  for (i = 0; i < len; i += 4)
2320  *data++ = (__force __be32) t4_read_reg(adap,
2321  (MEMWIN0_BASE + off + i));
2322 
2323  return 0;
2324 }
2325 
2337 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2338  unsigned int mmd, unsigned int reg, u16 *valp)
2339 {
2340  int ret;
2341  struct fw_ldst_cmd c;
2342 
2343  memset(&c, 0, sizeof(c));
2346  c.cycles_to_len16 = htonl(FW_LEN16(c));
2347  c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2348  FW_LDST_CMD_MMD(mmd));
2349  c.u.mdio.raddr = htons(reg);
2350 
2351  ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2352  if (ret == 0)
2353  *valp = ntohs(c.u.mdio.rval);
2354  return ret;
2355 }
2356 
2368 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2369  unsigned int mmd, unsigned int reg, u16 val)
2370 {
2371  struct fw_ldst_cmd c;
2372 
2373  memset(&c, 0, sizeof(c));
2376  c.cycles_to_len16 = htonl(FW_LEN16(c));
2377  c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2378  FW_LDST_CMD_MMD(mmd));
2379  c.u.mdio.raddr = htons(reg);
2380  c.u.mdio.rval = htons(val);
2381 
2382  return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2383 }
2384 
2396 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2397  enum dev_master master, enum dev_state *state)
2398 {
2399  int ret;
2400  struct fw_hello_cmd c;
2401  u32 v;
2402  unsigned int master_mbox;
2404 
2405 retry:
2406  memset(&c, 0, sizeof(c));
2407  INIT_CMD(c, HELLO, WRITE);
2409  FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2411  FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2413  FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2416 
2417  /*
2418  * Issue the HELLO command to the firmware. If it's not successful
2419  * but indicates that we got a "busy" or "timeout" condition, retry
2420  * the HELLO until we exhaust our retry limit.
2421  */
2422  ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2423  if (ret < 0) {
2424  if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2425  goto retry;
2426  return ret;
2427  }
2428 
2429  v = ntohl(c.err_to_mbasyncnot);
2430  master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2431  if (state) {
2432  if (v & FW_HELLO_CMD_ERR)
2433  *state = DEV_STATE_ERR;
2434  else if (v & FW_HELLO_CMD_INIT)
2435  *state = DEV_STATE_INIT;
2436  else
2437  *state = DEV_STATE_UNINIT;
2438  }
2439 
2440  /*
2441  * If we're not the Master PF then we need to wait around for the
2442  * Master PF Driver to finish setting up the adapter.
2443  *
2444  * Note that we also do this wait if we're a non-Master-capable PF and
2445  * there is no current Master PF; a Master PF may show up momentarily
2446  * and we wouldn't want to fail pointlessly. (This can happen when an
2447  * OS loads lots of different drivers rapidly at the same time). In
2448  * this case, the Master PF returned by the firmware will be
2449  * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2450  */
2451  if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2452  master_mbox != mbox) {
2453  int waiting = FW_CMD_HELLO_TIMEOUT;
2454 
2455  /*
2456  * Wait for the firmware to either indicate an error or
2457  * initialized state. If we see either of these we bail out
2458  * and report the issue to the caller. If we exhaust the
2459  * "hello timeout" and we haven't exhausted our retries, try
2460  * again. Otherwise bail with a timeout error.
2461  */
2462  for (;;) {
2463  u32 pcie_fw;
2464 
2465  msleep(50);
2466  waiting -= 50;
2467 
2468  /*
2469  * If neither Error nor Initialialized are indicated
2470  * by the firmware keep waiting till we exaust our
2471  * timeout ... and then retry if we haven't exhausted
2472  * our retries ...
2473  */
2474  pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2475  if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2476  if (waiting <= 0) {
2477  if (retries-- > 0)
2478  goto retry;
2479 
2480  return -ETIMEDOUT;
2481  }
2482  continue;
2483  }
2484 
2485  /*
2486  * We either have an Error or Initialized condition
2487  * report errors preferentially.
2488  */
2489  if (state) {
2490  if (pcie_fw & FW_PCIE_FW_ERR)
2491  *state = DEV_STATE_ERR;
2492  else if (pcie_fw & FW_PCIE_FW_INIT)
2493  *state = DEV_STATE_INIT;
2494  }
2495 
2496  /*
2497  * If we arrived before a Master PF was selected and
2498  * there's not a valid Master PF, grab its identity
2499  * for our caller.
2500  */
2501  if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2502  (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2503  master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2504  break;
2505  }
2506  }
2507 
2508  return master_mbox;
2509 }
2510 
2518 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2519 {
2520  struct fw_bye_cmd c;
2521 
2522  memset(&c, 0, sizeof(c));
2523  INIT_CMD(c, BYE, WRITE);
2524  return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2525 }
2526 
2535 int t4_early_init(struct adapter *adap, unsigned int mbox)
2536 {
2537  struct fw_initialize_cmd c;
2538 
2539  memset(&c, 0, sizeof(c));
2540  INIT_CMD(c, INITIALIZE, WRITE);
2541  return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2542 }
2543 
2552 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2553 {
2554  struct fw_reset_cmd c;
2555 
2556  memset(&c, 0, sizeof(c));
2557  INIT_CMD(c, RESET, WRITE);
2558  c.val = htonl(reset);
2559  return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2560 }
2561 
2578 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
2579 {
2580  int ret = 0;
2581 
2582  /*
2583  * If a legitimate mailbox is provided, issue a RESET command
2584  * with a HALT indication.
2585  */
2586  if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2587  struct fw_reset_cmd c;
2588 
2589  memset(&c, 0, sizeof(c));
2590  INIT_CMD(c, RESET, WRITE);
2591  c.val = htonl(PIORST | PIORSTMODE);
2593  ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2594  }
2595 
2596  /*
2597  * Normally we won't complete the operation if the firmware RESET
2598  * command fails but if our caller insists we'll go ahead and put the
2599  * uP into RESET. This can be useful if the firmware is hung or even
2600  * missing ... We'll have to take the risk of putting the uP into
2601  * RESET without the cooperation of firmware in that case.
2602  *
2603  * We also force the firmware's HALT flag to be on in case we bypassed
2604  * the firmware RESET command above or we're dealing with old firmware
2605  * which doesn't have the HALT capability. This will serve as a flag
2606  * for the incoming firmware to know that it's coming out of a HALT
2607  * rather than a RESET ... if it's new enough to understand that ...
2608  */
2609  if (ret == 0 || force) {
2612  FW_PCIE_FW_HALT);
2613  }
2614 
2615  /*
2616  * And we always return the result of the firmware RESET command
2617  * even when we force the uP into RESET ...
2618  */
2619  return ret;
2620 }
2621 
2643 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
2644 {
2645  if (reset) {
2646  /*
2647  * Since we're directing the RESET instead of the firmware
2648  * doing it automatically, we need to clear the PCIE_FW.HALT
2649  * bit.
2650  */
2652 
2653  /*
2654  * If we've been given a valid mailbox, first try to get the
2655  * firmware to do the RESET. If that works, great and we can
2656  * return success. Otherwise, if we haven't been given a
2657  * valid mailbox or the RESET command failed, fall back to
2658  * hitting the chip with a hammer.
2659  */
2660  if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2662  msleep(100);
2663  if (t4_fw_reset(adap, mbox,
2664  PIORST | PIORSTMODE) == 0)
2665  return 0;
2666  }
2667 
2668  t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
2669  msleep(2000);
2670  } else {
2671  int ms;
2672 
2674  for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
2675  if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
2676  return 0;
2677  msleep(100);
2678  ms += 100;
2679  }
2680  return -ETIMEDOUT;
2681  }
2682  return 0;
2683 }
2684 
2706 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
2707  const u8 *fw_data, unsigned int size, int force)
2708 {
2709  const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
2710  int reset, ret;
2711 
2712  ret = t4_fw_halt(adap, mbox, force);
2713  if (ret < 0 && !force)
2714  return ret;
2715 
2716  ret = t4_load_fw(adap, fw_data, size);
2717  if (ret < 0)
2718  return ret;
2719 
2720  /*
2721  * Older versions of the firmware don't understand the new
2722  * PCIE_FW.HALT flag and so won't know to perform a RESET when they
2723  * restart. So for newly loaded older firmware we'll have to do the
2724  * RESET for it so it starts up on a clean slate. We can tell if
2725  * the newly loaded firmware will handle this right by checking
2726  * its header flags to see if it advertises the capability.
2727  */
2728  reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
2729  return t4_fw_restart(adap, mbox, reset);
2730 }
2731 
2732 
2759 int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
2760  unsigned int mtype, unsigned int maddr,
2761  u32 *finiver, u32 *finicsum, u32 *cfcsum)
2762 {
2763  struct fw_caps_config_cmd caps_cmd;
2764  int ret;
2765 
2766  /*
2767  * Tell the firmware to process the indicated Configuration File.
2768  * If there are no errors and the caller has provided return value
2769  * pointers for the [fini] section version, checksum and computed
2770  * checksum, pass those back to the caller.
2771  */
2772  memset(&caps_cmd, 0, sizeof(caps_cmd));
2773  caps_cmd.op_to_write =
2775  FW_CMD_REQUEST |
2776  FW_CMD_READ);
2777  caps_cmd.retval_len16 =
2780  FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
2781  FW_LEN16(caps_cmd));
2782  ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
2783  if (ret < 0)
2784  return ret;
2785 
2786  if (finiver)
2787  *finiver = ntohl(caps_cmd.finiver);
2788  if (finicsum)
2789  *finicsum = ntohl(caps_cmd.finicsum);
2790  if (cfcsum)
2791  *cfcsum = ntohl(caps_cmd.cfcsum);
2792 
2793  /*
2794  * And now tell the firmware to use the configuration we just loaded.
2795  */
2796  caps_cmd.op_to_write =
2798  FW_CMD_REQUEST |
2799  FW_CMD_WRITE);
2800  caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
2801  return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
2802 }
2803 
2814 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
2815  unsigned int cache_line_size)
2816 {
2817  unsigned int page_shift = fls(page_size) - 1;
2818  unsigned int sge_hps = page_shift - 10;
2819  unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
2820  unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
2821  unsigned int fl_align_log = fls(fl_align) - 1;
2822 
2823  t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
2824  HOSTPAGESIZEPF0(sge_hps) |
2825  HOSTPAGESIZEPF1(sge_hps) |
2826  HOSTPAGESIZEPF2(sge_hps) |
2827  HOSTPAGESIZEPF3(sge_hps) |
2828  HOSTPAGESIZEPF4(sge_hps) |
2829  HOSTPAGESIZEPF5(sge_hps) |
2830  HOSTPAGESIZEPF6(sge_hps) |
2831  HOSTPAGESIZEPF7(sge_hps));
2832 
2836  INGPADBOUNDARY(fl_align_log - 5) |
2837  EGRSTATUSPAGESIZE(stat_len != 64));
2838 
2839  /*
2840  * Adjust various SGE Free List Host Buffer Sizes.
2841  *
2842  * This is something of a crock since we're using fixed indices into
2843  * the array which are also known by the sge.c code and the T4
2844  * Firmware Configuration File. We need to come up with a much better
2845  * approach to managing this array. For now, the first four entries
2846  * are:
2847  *
2848  * 0: Host Page Size
2849  * 1: 64KB
2850  * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
2851  * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
2852  *
2853  * For the single-MTU buffers in unpacked mode we need to include
2854  * space for the SGE Control Packet Shift, 14 byte Ethernet header,
2855  * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
2856  * Padding boundry. All of these are accommodated in the Factory
2857  * Default Firmware Configuration File but we need to adjust it for
2858  * this host's cache line size.
2859  */
2860  t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
2861  t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
2862  (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
2863  & ~(fl_align-1));
2864  t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
2865  (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
2866  & ~(fl_align-1));
2867 
2868  t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
2869 
2870  return 0;
2871 }
2872 
2881 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
2882 {
2883  struct fw_initialize_cmd c;
2884 
2885  memset(&c, 0, sizeof(c));
2886  INIT_CMD(c, INITIALIZE, WRITE);
2887  return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2888 }
2889 
2903 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2904  unsigned int vf, unsigned int nparams, const u32 *params,
2905  u32 *val)
2906 {
2907  int i, ret;
2908  struct fw_params_cmd c;
2909  __be32 *p = &c.param[0].mnem;
2910 
2911  if (nparams > 7)
2912  return -EINVAL;
2913 
2914  memset(&c, 0, sizeof(c));
2917  FW_PARAMS_CMD_VFN(vf));
2918  c.retval_len16 = htonl(FW_LEN16(c));
2919  for (i = 0; i < nparams; i++, p += 2)
2920  *p = htonl(*params++);
2921 
2922  ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2923  if (ret == 0)
2924  for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2925  *val++ = ntohl(*p);
2926  return ret;
2927 }
2928 
2942 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2943  unsigned int vf, unsigned int nparams, const u32 *params,
2944  const u32 *val)
2945 {
2946  struct fw_params_cmd c;
2947  __be32 *p = &c.param[0].mnem;
2948 
2949  if (nparams > 7)
2950  return -EINVAL;
2951 
2952  memset(&c, 0, sizeof(c));
2955  FW_PARAMS_CMD_VFN(vf));
2956  c.retval_len16 = htonl(FW_LEN16(c));
2957  while (nparams--) {
2958  *p++ = htonl(*params++);
2959  *p++ = htonl(*val++);
2960  }
2961 
2962  return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2963 }
2964 
2986 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2987  unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2988  unsigned int rxqi, unsigned int rxq, unsigned int tc,
2989  unsigned int vi, unsigned int cmask, unsigned int pmask,
2990  unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2991 {
2992  struct fw_pfvf_cmd c;
2993 
2994  memset(&c, 0, sizeof(c));
2997  FW_PFVF_CMD_VFN(vf));
2998  c.retval_len16 = htonl(FW_LEN16(c));
3000  FW_PFVF_CMD_NIQ(rxq));
3001  c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
3002  FW_PFVF_CMD_PMASK(pmask) |
3003  FW_PFVF_CMD_NEQ(txq));
3005  FW_PFVF_CMD_NEXACTF(nexact));
3007  FW_PFVF_CMD_WX_CAPS(wxcaps) |
3008  FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
3009  return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3010 }
3011 
3029 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3030  unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3031  unsigned int *rss_size)
3032 {
3033  int ret;
3034  struct fw_vi_cmd c;
3035 
3036  memset(&c, 0, sizeof(c));
3039  FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
3041  c.portid_pkd = FW_VI_CMD_PORTID(port);
3042  c.nmac = nmac - 1;
3043 
3044  ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3045  if (ret)
3046  return ret;
3047 
3048  if (mac) {
3049  memcpy(mac, c.mac, sizeof(c.mac));
3050  switch (nmac) {
3051  case 5:
3052  memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3053  case 4:
3054  memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3055  case 3:
3056  memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3057  case 2:
3058  memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3059  }
3060  }
3061  if (rss_size)
3062  *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
3063  return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
3064 }
3065 
3080 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
3081  int mtu, int promisc, int all_multi, int bcast, int vlanex,
3082  bool sleep_ok)
3083 {
3084  struct fw_vi_rxmode_cmd c;
3085 
3086  /* convert to FW values */
3087  if (mtu < 0)
3088  mtu = FW_RXMODE_MTU_NO_CHG;
3089  if (promisc < 0)
3091  if (all_multi < 0)
3093  if (bcast < 0)
3095  if (vlanex < 0)
3097 
3098  memset(&c, 0, sizeof(c));
3101  c.retval_len16 = htonl(FW_LEN16(c));
3103  FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
3104  FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
3106  FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
3107  return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3108 }
3109 
3132 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3133  unsigned int viid, bool free, unsigned int naddr,
3134  const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3135 {
3136  int i, ret;
3137  struct fw_vi_mac_cmd c;
3138  struct fw_vi_mac_exact *p;
3139 
3140  if (naddr > 7)
3141  return -EINVAL;
3142 
3143  memset(&c, 0, sizeof(c));
3145  FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
3146  FW_VI_MAC_CMD_VIID(viid));
3148  FW_CMD_LEN16((naddr + 2) / 2));
3149 
3150  for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3151  p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3153  memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3154  }
3155 
3156  ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3157  if (ret)
3158  return ret;
3159 
3160  for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3161  u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3162 
3163  if (idx)
3164  idx[i] = index >= NEXACT_MAC ? 0xffff : index;
3165  if (index < NEXACT_MAC)
3166  ret++;
3167  else if (hash)
3168  *hash |= (1ULL << hash_mac_addr(addr[i]));
3169  }
3170  return ret;
3171 }
3172 
3192 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3193  int idx, const u8 *addr, bool persist, bool add_smt)
3194 {
3195  int ret, mode;
3196  struct fw_vi_mac_cmd c;
3197  struct fw_vi_mac_exact *p = c.u.exact;
3198 
3199  if (idx < 0) /* new allocation */
3200  idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3202 
3203  memset(&c, 0, sizeof(c));
3207  p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3209  FW_VI_MAC_CMD_IDX(idx));
3210  memcpy(p->macaddr, addr, sizeof(p->macaddr));
3211 
3212  ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3213  if (ret == 0) {
3214  ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3215  if (ret >= NEXACT_MAC)
3216  ret = -ENOMEM;
3217  }
3218  return ret;
3219 }
3220 
3232 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3233  bool ucast, u64 vec, bool sleep_ok)
3234 {
3235  struct fw_vi_mac_cmd c;
3236 
3237  memset(&c, 0, sizeof(c));
3241  FW_VI_MAC_CMD_HASHUNIEN(ucast) |
3242  FW_CMD_LEN16(1));
3243  c.u.hash.hashvec = cpu_to_be64(vec);
3244  return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3245 }
3246 
3257 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3258  bool rx_en, bool tx_en)
3259 {
3260  struct fw_vi_enable_cmd c;
3261 
3262  memset(&c, 0, sizeof(c));
3266  FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
3267  return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3268 }
3269 
3279 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3280  unsigned int nblinks)
3281 {
3282  struct fw_vi_enable_cmd c;
3283 
3284  memset(&c, 0, sizeof(c));
3288  c.blinkdur = htons(nblinks);
3289  return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3290 }
3291 
3305 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3306  unsigned int vf, unsigned int iqtype, unsigned int iqid,
3307  unsigned int fl0id, unsigned int fl1id)
3308 {
3309  struct fw_iq_cmd c;
3310 
3311  memset(&c, 0, sizeof(c));
3313  FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
3314  FW_IQ_CMD_VFN(vf));
3317  c.iqid = htons(iqid);
3318  c.fl0id = htons(fl0id);
3319  c.fl1id = htons(fl1id);
3320  return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3321 }
3322 
3333 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3334  unsigned int vf, unsigned int eqid)
3335 {
3336  struct fw_eq_eth_cmd c;
3337 
3338  memset(&c, 0, sizeof(c));
3341  FW_EQ_ETH_CMD_VFN(vf));
3343  c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
3344  return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3345 }
3346 
3357 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3358  unsigned int vf, unsigned int eqid)
3359 {
3360  struct fw_eq_ctrl_cmd c;
3361 
3362  memset(&c, 0, sizeof(c));
3365  FW_EQ_CTRL_CMD_VFN(vf));
3368  return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3369 }
3370 
3381 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3382  unsigned int vf, unsigned int eqid)
3383 {
3384  struct fw_eq_ofld_cmd c;
3385 
3386  memset(&c, 0, sizeof(c));
3389  FW_EQ_OFLD_CMD_VFN(vf));
3391  c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
3392  return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3393 }
3394 
3402 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3403 {
3404  u8 opcode = *(const u8 *)rpl;
3405 
3406  if (opcode == FW_PORT_CMD) { /* link/module state change message */
3407  int speed = 0, fc = 0;
3408  const struct fw_port_cmd *p = (void *)rpl;
3410  int port = adap->chan_map[chan];
3411  struct port_info *pi = adap2pinfo(adap, port);
3412  struct link_config *lc = &pi->link_cfg;
3413  u32 stat = ntohl(p->u.info.lstatus_to_modtype);
3414  int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
3416 
3417  if (stat & FW_PORT_CMD_RXPAUSE)
3418  fc |= PAUSE_RX;
3419  if (stat & FW_PORT_CMD_TXPAUSE)
3420  fc |= PAUSE_TX;
3422  speed = SPEED_100;
3423  else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
3424  speed = SPEED_1000;
3425  else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
3426  speed = SPEED_10000;
3427 
3428  if (link_ok != lc->link_ok || speed != lc->speed ||
3429  fc != lc->fc) { /* something changed */
3430  lc->link_ok = link_ok;
3431  lc->speed = speed;
3432  lc->fc = fc;
3433  t4_os_link_changed(adap, port, link_ok);
3434  }
3435  if (mod != pi->mod_type) {
3436  pi->mod_type = mod;
3437  t4_os_portmod_changed(adap, port);
3438  }
3439  }
3440  return 0;
3441 }
3442 
3443 static void __devinit get_pci_mode(struct adapter *adapter,
3444  struct pci_params *p)
3445 {
3446  u16 val;
3447 
3448  if (pci_is_pcie(adapter->pdev)) {
3450  p->speed = val & PCI_EXP_LNKSTA_CLS;
3451  p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3452  }
3453 }
3454 
3463 static void __devinit init_link_config(struct link_config *lc,
3464  unsigned int caps)
3465 {
3466  lc->supported = caps;
3467  lc->requested_speed = 0;
3468  lc->speed = 0;
3469  lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3470  if (lc->supported & FW_PORT_CAP_ANEG) {
3471  lc->advertising = lc->supported & ADVERT_MASK;
3472  lc->autoneg = AUTONEG_ENABLE;
3473  lc->requested_fc |= PAUSE_AUTONEG;
3474  } else {
3475  lc->advertising = 0;
3476  lc->autoneg = AUTONEG_DISABLE;
3477  }
3478 }
3479 
3480 int t4_wait_dev_ready(struct adapter *adap)
3481 {
3482  if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3483  return 0;
3484  msleep(500);
3485  return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3486 }
3487 
3488 static int __devinit get_flash_params(struct adapter *adap)
3489 {
3490  int ret;
3491  u32 info;
3492 
3493  ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3494  if (!ret)
3495  ret = sf1_read(adap, 3, 0, 1, &info);
3496  t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3497  if (ret)
3498  return ret;
3499 
3500  if ((info & 0xff) != 0x20) /* not a Numonix flash */
3501  return -EINVAL;
3502  info >>= 16; /* log2 of size */
3503  if (info >= 0x14 && info < 0x18)
3504  adap->params.sf_nsec = 1 << (info - 16);
3505  else if (info == 0x18)
3506  adap->params.sf_nsec = 64;
3507  else
3508  return -EINVAL;
3509  adap->params.sf_size = 1 << info;
3510  adap->params.sf_fw_start =
3511  t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3512  return 0;
3513 }
3514 
3524 int __devinit t4_prep_adapter(struct adapter *adapter)
3525 {
3526  int ret;
3527 
3528  ret = t4_wait_dev_ready(adapter);
3529  if (ret < 0)
3530  return ret;
3531 
3532  get_pci_mode(adapter, &adapter->params.pci);
3533  adapter->params.rev = t4_read_reg(adapter, PL_REV);
3534 
3535  ret = get_flash_params(adapter);
3536  if (ret < 0) {
3537  dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3538  return ret;
3539  }
3540 
3541  init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3542 
3543  /*
3544  * Default port for debugging in case we can't reach FW.
3545  */
3546  adapter->params.nports = 1;
3547  adapter->params.portvec = 1;
3548  adapter->params.vpd.cclk = 50000;
3549  return 0;
3550 }
3551 
3552 int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3553 {
3554  u8 addr[6];
3555  int ret, i, j = 0;
3556  struct fw_port_cmd c;
3557  struct fw_rss_vi_config_cmd rvc;
3558 
3559  memset(&c, 0, sizeof(c));
3560  memset(&rvc, 0, sizeof(rvc));
3561 
3562  for_each_port(adap, i) {
3563  unsigned int rss_size;
3564  struct port_info *p = adap2pinfo(adap, i);
3565 
3566  while ((adap->params.portvec & (1 << j)) == 0)
3567  j++;
3568 
3571  FW_PORT_CMD_PORTID(j));
3572  c.action_to_len16 = htonl(
3574  FW_LEN16(c));
3575  ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3576  if (ret)
3577  return ret;
3578 
3579  ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
3580  if (ret < 0)
3581  return ret;
3582 
3583  p->viid = ret;
3584  p->tx_chan = j;
3585  p->lport = j;
3586  p->rss_size = rss_size;
3587  memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3588  memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
3589  adap->port[i]->dev_id = j;
3590 
3591  ret = ntohl(c.u.info.lstatus_to_modtype);
3592  p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
3593  FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
3594  p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
3596 
3600  rvc.retval_len16 = htonl(FW_LEN16(rvc));
3601  ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
3602  if (ret)
3603  return ret;
3604  p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
3605 
3606  init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
3607  j++;
3608  }
3609  return 0;
3610 }