Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cxgb3_main.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses. You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  * Redistribution and use in source and binary forms, with or
11  * without modification, are permitted provided that the following
12  * conditions are met:
13  *
14  * - Redistributions of source code must retain the above
15  * copyright notice, this list of conditions and the following
16  * disclaimer.
17  *
18  * - Redistributions in binary form must reproduce the above
19  * copyright notice, this list of conditions and the following
20  * disclaimer in the documentation and/or other materials
21  * provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mdio.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <linux/stringify.h>
48 #include <linux/sched.h>
49 #include <linux/slab.h>
50 #include <asm/uaccess.h>
51 
52 #include "common.h"
53 #include "cxgb3_ioctl.h"
54 #include "regs.h"
55 #include "cxgb3_offload.h"
56 #include "version.h"
57 
58 #include "cxgb3_ctl_defs.h"
59 #include "t3_cpl.h"
60 #include "firmware_exports.h"
61 
62 enum {
63  MAX_TXQ_ENTRIES = 16384,
66  MAX_RX_BUFFERS = 16384,
72 };
73 
74 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
75 
76 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
77  NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
78  NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
79 
80 #define EEPROM_MAGIC 0x38E2F10C
81 
82 #define CH_DEVICE(devid, idx) \
83  { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
84 
85 static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
86  CH_DEVICE(0x20, 0), /* PE9000 */
87  CH_DEVICE(0x21, 1), /* T302E */
88  CH_DEVICE(0x22, 2), /* T310E */
89  CH_DEVICE(0x23, 3), /* T320X */
90  CH_DEVICE(0x24, 1), /* T302X */
91  CH_DEVICE(0x25, 3), /* T320E */
92  CH_DEVICE(0x26, 2), /* T310X */
93  CH_DEVICE(0x30, 2), /* T3B10 */
94  CH_DEVICE(0x31, 3), /* T3B20 */
95  CH_DEVICE(0x32, 1), /* T3B02 */
96  CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
97  CH_DEVICE(0x36, 3), /* S320E-CR */
98  CH_DEVICE(0x37, 7), /* N320E-G2 */
99  {0,}
100 };
101 
103 MODULE_AUTHOR("Chelsio Communications");
104 MODULE_LICENSE("Dual BSD/GPL");
106 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
107 
108 static int dflt_msg_enable = DFLT_MSG_ENABLE;
109 
110 module_param(dflt_msg_enable, int, 0644);
111 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
112 
113 /*
114  * The driver uses the best interrupt scheme available on a platform in the
115  * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
116  * of these schemes the driver may consider as follows:
117  *
118  * msi = 2: choose from among all three options
119  * msi = 1: only consider MSI and pin interrupts
120  * msi = 0: force pin interrupts
121  */
122 static int msi = 2;
123 
124 module_param(msi, int, 0644);
125 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
126 
127 /*
128  * The driver enables offload as a default.
129  * To disable it, use ofld_disable = 1.
130  */
131 
132 static int ofld_disable = 0;
133 
134 module_param(ofld_disable, int, 0644);
135 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
136 
137 /*
138  * We have work elements that we need to cancel when an interface is taken
139  * down. Normally the work elements would be executed by keventd but that
140  * can deadlock because of linkwatch. If our close method takes the rtnl
141  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
142  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
143  * for our work to complete. Get our own work queue to solve this.
144  */
146 
153 static void link_report(struct net_device *dev)
154 {
155  if (!netif_carrier_ok(dev))
156  printk(KERN_INFO "%s: link down\n", dev->name);
157  else {
158  const char *s = "10Mbps";
159  const struct port_info *p = netdev_priv(dev);
160 
161  switch (p->link_config.speed) {
162  case SPEED_10000:
163  s = "10Gbps";
164  break;
165  case SPEED_1000:
166  s = "1000Mbps";
167  break;
168  case SPEED_100:
169  s = "100Mbps";
170  break;
171  }
172 
173  printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
174  p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
175  }
176 }
177 
178 static void enable_tx_fifo_drain(struct adapter *adapter,
179  struct port_info *pi)
180 {
181  t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
182  F_ENDROPPKT);
183  t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
184  t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
185  t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
186 }
187 
188 static void disable_tx_fifo_drain(struct adapter *adapter,
189  struct port_info *pi)
190 {
191  t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
192  F_ENDROPPKT, 0);
193 }
194 
195 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
196 {
197  struct net_device *dev = adap->port[port_id];
198  struct port_info *pi = netdev_priv(dev);
199 
200  if (state == netif_carrier_ok(dev))
201  return;
202 
203  if (state) {
204  struct cmac *mac = &pi->mac;
205 
206  netif_carrier_on(dev);
207 
208  disable_tx_fifo_drain(adap, pi);
209 
210  /* Clear local faults */
211  t3_xgm_intr_disable(adap, pi->port_id);
212  t3_read_reg(adap, A_XGM_INT_STATUS +
213  pi->mac.offset);
214  t3_write_reg(adap,
215  A_XGM_INT_CAUSE + pi->mac.offset,
216  F_XGM_INT);
217 
218  t3_set_reg_field(adap,
220  pi->mac.offset,
222  t3_xgm_intr_enable(adap, pi->port_id);
223 
225  } else {
226  netif_carrier_off(dev);
227 
228  /* Flush TX FIFO */
229  enable_tx_fifo_drain(adap, pi);
230  }
231  link_report(dev);
232 }
233 
247 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
248  int speed, int duplex, int pause)
249 {
250  struct net_device *dev = adapter->port[port_id];
251  struct port_info *pi = netdev_priv(dev);
252  struct cmac *mac = &pi->mac;
253 
254  /* Skip changes from disabled ports. */
255  if (!netif_running(dev))
256  return;
257 
258  if (link_stat != netif_carrier_ok(dev)) {
259  if (link_stat) {
260  disable_tx_fifo_drain(adapter, pi);
261 
263 
264  /* Clear local faults */
265  t3_xgm_intr_disable(adapter, pi->port_id);
266  t3_read_reg(adapter, A_XGM_INT_STATUS +
267  pi->mac.offset);
268  t3_write_reg(adapter,
269  A_XGM_INT_CAUSE + pi->mac.offset,
270  F_XGM_INT);
271 
272  t3_set_reg_field(adapter,
273  A_XGM_INT_ENABLE + pi->mac.offset,
275  t3_xgm_intr_enable(adapter, pi->port_id);
276 
277  netif_carrier_on(dev);
278  } else {
279  netif_carrier_off(dev);
280 
281  t3_xgm_intr_disable(adapter, pi->port_id);
282  t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
283  t3_set_reg_field(adapter,
284  A_XGM_INT_ENABLE + pi->mac.offset,
285  F_XGM_INT, 0);
286 
287  if (is_10G(adapter))
288  pi->phy.ops->power_down(&pi->phy, 1);
289 
290  t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
292  t3_link_start(&pi->phy, mac, &pi->link_config);
293 
294  /* Flush TX FIFO */
295  enable_tx_fifo_drain(adapter, pi);
296  }
297 
298  link_report(dev);
299  }
300 }
301 
311 void t3_os_phymod_changed(struct adapter *adap, int port_id)
312 {
313  static const char *mod_str[] = {
314  NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
315  };
316 
317  const struct net_device *dev = adap->port[port_id];
318  const struct port_info *pi = netdev_priv(dev);
319 
320  if (pi->phy.modtype == phy_modtype_none)
321  printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
322  else
323  printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
324  mod_str[pi->phy.modtype]);
325 }
326 
327 static void cxgb_set_rxmode(struct net_device *dev)
328 {
329  struct port_info *pi = netdev_priv(dev);
330 
331  t3_mac_set_rx_mode(&pi->mac, dev);
332 }
333 
340 static void link_start(struct net_device *dev)
341 {
342  struct port_info *pi = netdev_priv(dev);
343  struct cmac *mac = &pi->mac;
344 
345  t3_mac_reset(mac);
347  t3_mac_set_mtu(mac, dev->mtu);
349  t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
350  t3_mac_set_rx_mode(mac, dev);
351  t3_link_start(&pi->phy, mac, &pi->link_config);
353 }
354 
355 static inline void cxgb_disable_msi(struct adapter *adapter)
356 {
357  if (adapter->flags & USING_MSIX) {
358  pci_disable_msix(adapter->pdev);
359  adapter->flags &= ~USING_MSIX;
360  } else if (adapter->flags & USING_MSI) {
361  pci_disable_msi(adapter->pdev);
362  adapter->flags &= ~USING_MSI;
363  }
364 }
365 
366 /*
367  * Interrupt handler for asynchronous events used with MSI-X.
368  */
369 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
370 {
371  t3_slow_intr_handler(cookie);
372  return IRQ_HANDLED;
373 }
374 
375 /*
376  * Name the MSI-X interrupts.
377  */
378 static void name_msix_vecs(struct adapter *adap)
379 {
380  int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
381 
382  snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
383  adap->msix_info[0].desc[n] = 0;
384 
385  for_each_port(adap, j) {
386  struct net_device *d = adap->port[j];
387  const struct port_info *pi = netdev_priv(d);
388 
389  for (i = 0; i < pi->nqsets; i++, msi_idx++) {
390  snprintf(adap->msix_info[msi_idx].desc, n,
391  "%s-%d", d->name, pi->first_qset + i);
392  adap->msix_info[msi_idx].desc[n] = 0;
393  }
394  }
395 }
396 
397 static int request_msix_data_irqs(struct adapter *adap)
398 {
399  int i, j, err, qidx = 0;
400 
401  for_each_port(adap, i) {
402  int nqsets = adap2pinfo(adap, i)->nqsets;
403 
404  for (j = 0; j < nqsets; ++j) {
405  err = request_irq(adap->msix_info[qidx + 1].vec,
406  t3_intr_handler(adap,
407  adap->sge.qs[qidx].
408  rspq.polling), 0,
409  adap->msix_info[qidx + 1].desc,
410  &adap->sge.qs[qidx]);
411  if (err) {
412  while (--qidx >= 0)
413  free_irq(adap->msix_info[qidx + 1].vec,
414  &adap->sge.qs[qidx]);
415  return err;
416  }
417  qidx++;
418  }
419  }
420  return 0;
421 }
422 
423 static void free_irq_resources(struct adapter *adapter)
424 {
425  if (adapter->flags & USING_MSIX) {
426  int i, n = 0;
427 
428  free_irq(adapter->msix_info[0].vec, adapter);
429  for_each_port(adapter, i)
430  n += adap2pinfo(adapter, i)->nqsets;
431 
432  for (i = 0; i < n; ++i)
433  free_irq(adapter->msix_info[i + 1].vec,
434  &adapter->sge.qs[i]);
435  } else
436  free_irq(adapter->pdev->irq, adapter);
437 }
438 
439 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
440  unsigned long n)
441 {
442  int attempts = 10;
443 
444  while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
445  if (!--attempts)
446  return -ETIMEDOUT;
447  msleep(10);
448  }
449  return 0;
450 }
451 
452 static int init_tp_parity(struct adapter *adap)
453 {
454  int i;
455  struct sk_buff *skb;
456  struct cpl_set_tcb_field *greq;
457  unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
458 
459  t3_tp_set_offload_mode(adap, 1);
460 
461  for (i = 0; i < 16; i++) {
462  struct cpl_smt_write_req *req;
463 
464  skb = alloc_skb(sizeof(*req), GFP_KERNEL);
465  if (!skb)
466  skb = adap->nofail_skb;
467  if (!skb)
468  goto alloc_skb_fail;
469 
470  req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
471  memset(req, 0, sizeof(*req));
472  req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
474  req->mtu_idx = NMTUS - 1;
475  req->iff = i;
476  t3_mgmt_tx(adap, skb);
477  if (skb == adap->nofail_skb) {
478  await_mgmt_replies(adap, cnt, i + 1);
479  adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
480  if (!adap->nofail_skb)
481  goto alloc_skb_fail;
482  }
483  }
484 
485  for (i = 0; i < 2048; i++) {
486  struct cpl_l2t_write_req *req;
487 
488  skb = alloc_skb(sizeof(*req), GFP_KERNEL);
489  if (!skb)
490  skb = adap->nofail_skb;
491  if (!skb)
492  goto alloc_skb_fail;
493 
494  req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
495  memset(req, 0, sizeof(*req));
496  req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
498  req->params = htonl(V_L2T_W_IDX(i));
499  t3_mgmt_tx(adap, skb);
500  if (skb == adap->nofail_skb) {
501  await_mgmt_replies(adap, cnt, 16 + i + 1);
502  adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
503  if (!adap->nofail_skb)
504  goto alloc_skb_fail;
505  }
506  }
507 
508  for (i = 0; i < 2048; i++) {
509  struct cpl_rte_write_req *req;
510 
511  skb = alloc_skb(sizeof(*req), GFP_KERNEL);
512  if (!skb)
513  skb = adap->nofail_skb;
514  if (!skb)
515  goto alloc_skb_fail;
516 
517  req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
518  memset(req, 0, sizeof(*req));
519  req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
521  req->l2t_idx = htonl(V_L2T_W_IDX(i));
522  t3_mgmt_tx(adap, skb);
523  if (skb == adap->nofail_skb) {
524  await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
525  adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
526  if (!adap->nofail_skb)
527  goto alloc_skb_fail;
528  }
529  }
530 
531  skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
532  if (!skb)
533  skb = adap->nofail_skb;
534  if (!skb)
535  goto alloc_skb_fail;
536 
537  greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
538  memset(greq, 0, sizeof(*greq));
539  greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
541  greq->mask = cpu_to_be64(1);
542  t3_mgmt_tx(adap, skb);
543 
544  i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
545  if (skb == adap->nofail_skb) {
546  i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
547  adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
548  }
549 
550  t3_tp_set_offload_mode(adap, 0);
551  return i;
552 
553 alloc_skb_fail:
554  t3_tp_set_offload_mode(adap, 0);
555  return -ENOMEM;
556 }
557 
569 static void setup_rss(struct adapter *adap)
570 {
571  int i;
572  unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
573  unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
574  u8 cpus[SGE_QSETS + 1];
575  u16 rspq_map[RSS_TABLE_SIZE];
576 
577  for (i = 0; i < SGE_QSETS; ++i)
578  cpus[i] = i;
579  cpus[SGE_QSETS] = 0xff; /* terminator */
580 
581  for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
582  rspq_map[i] = i % nq0;
583  rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
584  }
585 
588  V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
589 }
590 
591 static void ring_dbs(struct adapter *adap)
592 {
593  int i, j;
594 
595  for (i = 0; i < SGE_QSETS; i++) {
596  struct sge_qset *qs = &adap->sge.qs[i];
597 
598  if (qs->adap)
599  for (j = 0; j < SGE_TXQ_PER_SET; j++)
600  t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
601  }
602 }
603 
604 static void init_napi(struct adapter *adap)
605 {
606  int i;
607 
608  for (i = 0; i < SGE_QSETS; i++) {
609  struct sge_qset *qs = &adap->sge.qs[i];
610 
611  if (qs->adap)
612  netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
613  64);
614  }
615 
616  /*
617  * netif_napi_add() can be called only once per napi_struct because it
618  * adds each new napi_struct to a list. Be careful not to call it a
619  * second time, e.g., during EEH recovery, by making a note of it.
620  */
621  adap->flags |= NAPI_INIT;
622 }
623 
624 /*
625  * Wait until all NAPI handlers are descheduled. This includes the handlers of
626  * both netdevices representing interfaces and the dummy ones for the extra
627  * queues.
628  */
629 static void quiesce_rx(struct adapter *adap)
630 {
631  int i;
632 
633  for (i = 0; i < SGE_QSETS; i++)
634  if (adap->sge.qs[i].adap)
635  napi_disable(&adap->sge.qs[i].napi);
636 }
637 
638 static void enable_all_napi(struct adapter *adap)
639 {
640  int i;
641  for (i = 0; i < SGE_QSETS; i++)
642  if (adap->sge.qs[i].adap)
643  napi_enable(&adap->sge.qs[i].napi);
644 }
645 
654 static int setup_sge_qsets(struct adapter *adap)
655 {
656  int i, j, err, irq_idx = 0, qset_idx = 0;
657  unsigned int ntxq = SGE_TXQ_PER_SET;
658 
659  if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
660  irq_idx = -1;
661 
662  for_each_port(adap, i) {
663  struct net_device *dev = adap->port[i];
664  struct port_info *pi = netdev_priv(dev);
665 
666  pi->qs = &adap->sge.qs[pi->first_qset];
667  for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
668  err = t3_sge_alloc_qset(adap, qset_idx, 1,
669  (adap->flags & USING_MSIX) ? qset_idx + 1 :
670  irq_idx,
671  &adap->params.sge.qset[qset_idx], ntxq, dev,
672  netdev_get_tx_queue(dev, j));
673  if (err) {
674  t3_free_sge_resources(adap);
675  return err;
676  }
677  }
678  }
679 
680  return 0;
681 }
682 
683 static ssize_t attr_show(struct device *d, char *buf,
684  ssize_t(*format) (struct net_device *, char *))
685 {
686  ssize_t len;
687 
688  /* Synchronize with ioctls that may shut down the device */
689  rtnl_lock();
690  len = (*format) (to_net_dev(d), buf);
691  rtnl_unlock();
692  return len;
693 }
694 
695 static ssize_t attr_store(struct device *d,
696  const char *buf, size_t len,
697  ssize_t(*set) (struct net_device *, unsigned int),
698  unsigned int min_val, unsigned int max_val)
699 {
700  char *endp;
701  ssize_t ret;
702  unsigned int val;
703 
704  if (!capable(CAP_NET_ADMIN))
705  return -EPERM;
706 
707  val = simple_strtoul(buf, &endp, 0);
708  if (endp == buf || val < min_val || val > max_val)
709  return -EINVAL;
710 
711  rtnl_lock();
712  ret = (*set) (to_net_dev(d), val);
713  if (!ret)
714  ret = len;
715  rtnl_unlock();
716  return ret;
717 }
718 
719 #define CXGB3_SHOW(name, val_expr) \
720 static ssize_t format_##name(struct net_device *dev, char *buf) \
721 { \
722  struct port_info *pi = netdev_priv(dev); \
723  struct adapter *adap = pi->adapter; \
724  return sprintf(buf, "%u\n", val_expr); \
725 } \
726 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
727  char *buf) \
728 { \
729  return attr_show(d, buf, format_##name); \
730 }
731 
732 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
733 {
734  struct port_info *pi = netdev_priv(dev);
735  struct adapter *adap = pi->adapter;
736  int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
737 
738  if (adap->flags & FULL_INIT_DONE)
739  return -EBUSY;
740  if (val && adap->params.rev == 0)
741  return -EINVAL;
742  if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
743  min_tids)
744  return -EINVAL;
745  adap->params.mc5.nfilters = val;
746  return 0;
747 }
748 
749 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
750  const char *buf, size_t len)
751 {
752  return attr_store(d, buf, len, set_nfilters, 0, ~0);
753 }
754 
755 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
756 {
757  struct port_info *pi = netdev_priv(dev);
758  struct adapter *adap = pi->adapter;
759 
760  if (adap->flags & FULL_INIT_DONE)
761  return -EBUSY;
762  if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
763  MC5_MIN_TIDS)
764  return -EINVAL;
765  adap->params.mc5.nservers = val;
766  return 0;
767 }
768 
769 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
770  const char *buf, size_t len)
771 {
772  return attr_store(d, buf, len, set_nservers, 0, ~0);
773 }
774 
775 #define CXGB3_ATTR_R(name, val_expr) \
776 CXGB3_SHOW(name, val_expr) \
777 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
778 
779 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
780 CXGB3_SHOW(name, val_expr) \
781 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
782 
783 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
784 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
785 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
786 
787 static struct attribute *cxgb3_attrs[] = {
788  &dev_attr_cam_size.attr,
789  &dev_attr_nfilters.attr,
790  &dev_attr_nservers.attr,
791  NULL
792 };
793 
794 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
795 
796 static ssize_t tm_attr_show(struct device *d,
797  char *buf, int sched)
798 {
799  struct port_info *pi = netdev_priv(to_net_dev(d));
800  struct adapter *adap = pi->adapter;
801  unsigned int v, addr, bpt, cpt;
802  ssize_t len;
803 
804  addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
805  rtnl_lock();
806  t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
807  v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
808  if (sched & 1)
809  v >>= 16;
810  bpt = (v >> 8) & 0xff;
811  cpt = v & 0xff;
812  if (!cpt)
813  len = sprintf(buf, "disabled\n");
814  else {
815  v = (adap->params.vpd.cclk * 1000) / cpt;
816  len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
817  }
818  rtnl_unlock();
819  return len;
820 }
821 
822 static ssize_t tm_attr_store(struct device *d,
823  const char *buf, size_t len, int sched)
824 {
825  struct port_info *pi = netdev_priv(to_net_dev(d));
826  struct adapter *adap = pi->adapter;
827  unsigned int val;
828  char *endp;
829  ssize_t ret;
830 
831  if (!capable(CAP_NET_ADMIN))
832  return -EPERM;
833 
834  val = simple_strtoul(buf, &endp, 0);
835  if (endp == buf || val > 10000000)
836  return -EINVAL;
837 
838  rtnl_lock();
839  ret = t3_config_sched(adap, val, sched);
840  if (!ret)
841  ret = len;
842  rtnl_unlock();
843  return ret;
844 }
845 
846 #define TM_ATTR(name, sched) \
847 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
848  char *buf) \
849 { \
850  return tm_attr_show(d, buf, sched); \
851 } \
852 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
853  const char *buf, size_t len) \
854 { \
855  return tm_attr_store(d, buf, len, sched); \
856 } \
857 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
858 
859 TM_ATTR(sched0, 0);
860 TM_ATTR(sched1, 1);
861 TM_ATTR(sched2, 2);
862 TM_ATTR(sched3, 3);
863 TM_ATTR(sched4, 4);
864 TM_ATTR(sched5, 5);
865 TM_ATTR(sched6, 6);
866 TM_ATTR(sched7, 7);
867 
868 static struct attribute *offload_attrs[] = {
869  &dev_attr_sched0.attr,
870  &dev_attr_sched1.attr,
871  &dev_attr_sched2.attr,
872  &dev_attr_sched3.attr,
873  &dev_attr_sched4.attr,
874  &dev_attr_sched5.attr,
875  &dev_attr_sched6.attr,
876  &dev_attr_sched7.attr,
877  NULL
878 };
879 
880 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
881 
882 /*
883  * Sends an sk_buff to an offload queue driver
884  * after dealing with any active network taps.
885  */
886 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
887 {
888  int ret;
889 
891  ret = t3_offload_tx(tdev, skb);
892  local_bh_enable();
893  return ret;
894 }
895 
896 static int write_smt_entry(struct adapter *adapter, int idx)
897 {
898  struct cpl_smt_write_req *req;
899  struct port_info *pi = netdev_priv(adapter->port[idx]);
900  struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
901 
902  if (!skb)
903  return -ENOMEM;
904 
905  req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
906  req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
908  req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
909  req->iff = idx;
910  memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
911  memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
912  skb->priority = 1;
913  offload_tx(&adapter->tdev, skb);
914  return 0;
915 }
916 
917 static int init_smt(struct adapter *adapter)
918 {
919  int i;
920 
921  for_each_port(adapter, i)
922  write_smt_entry(adapter, i);
923  return 0;
924 }
925 
926 static void init_port_mtus(struct adapter *adapter)
927 {
928  unsigned int mtus = adapter->port[0]->mtu;
929 
930  if (adapter->port[1])
931  mtus |= adapter->port[1]->mtu << 16;
932  t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
933 }
934 
935 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
936  int hi, int port)
937 {
938  struct sk_buff *skb;
939  struct mngt_pktsched_wr *req;
940  int ret;
941 
942  skb = alloc_skb(sizeof(*req), GFP_KERNEL);
943  if (!skb)
944  skb = adap->nofail_skb;
945  if (!skb)
946  return -ENOMEM;
947 
948  req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
951  req->sched = sched;
952  req->idx = qidx;
953  req->min = lo;
954  req->max = hi;
955  req->binding = port;
956  ret = t3_mgmt_tx(adap, skb);
957  if (skb == adap->nofail_skb) {
958  adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
959  GFP_KERNEL);
960  if (!adap->nofail_skb)
961  ret = -ENOMEM;
962  }
963 
964  return ret;
965 }
966 
967 static int bind_qsets(struct adapter *adap)
968 {
969  int i, j, err = 0;
970 
971  for_each_port(adap, i) {
972  const struct port_info *pi = adap2pinfo(adap, i);
973 
974  for (j = 0; j < pi->nqsets; ++j) {
975  int ret = send_pktsched_cmd(adap, 1,
976  pi->first_qset + j, -1,
977  -1, i);
978  if (ret)
979  err = ret;
980  }
981  }
982 
983  return err;
984 }
985 
986 #define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \
987  __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
988 #define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
989 #define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \
990  __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
991 #define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
992 #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
993 #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
994 #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
996 MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
997 MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1001 
1002 static inline const char *get_edc_fw_name(int edc_idx)
1003 {
1004  const char *fw_name = NULL;
1005 
1006  switch (edc_idx) {
1007  case EDC_OPT_AEL2005:
1008  fw_name = AEL2005_OPT_EDC_NAME;
1009  break;
1010  case EDC_TWX_AEL2005:
1011  fw_name = AEL2005_TWX_EDC_NAME;
1012  break;
1013  case EDC_TWX_AEL2020:
1014  fw_name = AEL2020_TWX_EDC_NAME;
1015  break;
1016  }
1017  return fw_name;
1018 }
1019 
1020 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1021 {
1022  struct adapter *adapter = phy->adapter;
1023  const struct firmware *fw;
1024  char buf[64];
1025  u32 csum;
1026  const __be32 *p;
1027  u16 *cache = phy->phy_cache;
1028  int i, ret;
1029 
1030  snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
1031 
1032  ret = request_firmware(&fw, buf, &adapter->pdev->dev);
1033  if (ret < 0) {
1034  dev_err(&adapter->pdev->dev,
1035  "could not upgrade firmware: unable to load %s\n",
1036  buf);
1037  return ret;
1038  }
1039 
1040  /* check size, take checksum in account */
1041  if (fw->size > size + 4) {
1042  CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1043  (unsigned int)fw->size, size + 4);
1044  ret = -EINVAL;
1045  }
1046 
1047  /* compute checksum */
1048  p = (const __be32 *)fw->data;
1049  for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1050  csum += ntohl(p[i]);
1051 
1052  if (csum != 0xffffffff) {
1053  CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1054  csum);
1055  ret = -EINVAL;
1056  }
1057 
1058  for (i = 0; i < size / 4 ; i++) {
1059  *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1060  *cache++ = be32_to_cpu(p[i]) & 0xffff;
1061  }
1062 
1063  release_firmware(fw);
1064 
1065  return ret;
1066 }
1067 
1068 static int upgrade_fw(struct adapter *adap)
1069 {
1070  int ret;
1071  const struct firmware *fw;
1072  struct device *dev = &adap->pdev->dev;
1073 
1074  ret = request_firmware(&fw, FW_FNAME, dev);
1075  if (ret < 0) {
1076  dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1077  FW_FNAME);
1078  return ret;
1079  }
1080  ret = t3_load_fw(adap, fw->data, fw->size);
1081  release_firmware(fw);
1082 
1083  if (ret == 0)
1084  dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1086  else
1087  dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1089 
1090  return ret;
1091 }
1092 
1093 static inline char t3rev2char(struct adapter *adapter)
1094 {
1095  char rev = 0;
1096 
1097  switch(adapter->params.rev) {
1098  case T3_REV_B:
1099  case T3_REV_B2:
1100  rev = 'b';
1101  break;
1102  case T3_REV_C:
1103  rev = 'c';
1104  break;
1105  }
1106  return rev;
1107 }
1108 
1109 static int update_tpsram(struct adapter *adap)
1110 {
1111  const struct firmware *tpsram;
1112  char buf[64];
1113  struct device *dev = &adap->pdev->dev;
1114  int ret;
1115  char rev;
1116 
1117  rev = t3rev2char(adap);
1118  if (!rev)
1119  return 0;
1120 
1121  snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1122 
1123  ret = request_firmware(&tpsram, buf, dev);
1124  if (ret < 0) {
1125  dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1126  buf);
1127  return ret;
1128  }
1129 
1130  ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1131  if (ret)
1132  goto release_tpsram;
1133 
1134  ret = t3_set_proto_sram(adap, tpsram->data);
1135  if (ret == 0)
1136  dev_info(dev,
1137  "successful update of protocol engine "
1138  "to %d.%d.%d\n",
1140  else
1141  dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1143  if (ret)
1144  dev_err(dev, "loading protocol SRAM failed\n");
1145 
1146 release_tpsram:
1147  release_firmware(tpsram);
1148 
1149  return ret;
1150 }
1151 
1161 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1162 {
1163  int i;
1164 
1165  for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1166  struct sge_rspq *q = &adap->sge.qs[i].rspq;
1167 
1168  spin_lock_irq(&q->lock);
1169  spin_unlock_irq(&q->lock);
1170  }
1171 }
1172 
1173 static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1174 {
1175  struct port_info *pi = netdev_priv(dev);
1176  struct adapter *adapter = pi->adapter;
1177 
1178  if (adapter->params.rev > 0) {
1179  t3_set_vlan_accel(adapter, 1 << pi->port_id,
1180  features & NETIF_F_HW_VLAN_RX);
1181  } else {
1182  /* single control for all ports */
1183  unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX;
1184 
1185  for_each_port(adapter, i)
1186  have_vlans |=
1187  adapter->port[i]->features & NETIF_F_HW_VLAN_RX;
1188 
1189  t3_set_vlan_accel(adapter, 1, have_vlans);
1190  }
1191  t3_synchronize_rx(adapter, pi);
1192 }
1193 
1204 static int cxgb_up(struct adapter *adap)
1205 {
1206  int i, err;
1207 
1208  if (!(adap->flags & FULL_INIT_DONE)) {
1209  err = t3_check_fw_version(adap);
1210  if (err == -EINVAL) {
1211  err = upgrade_fw(adap);
1212  CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1214  FW_VERSION_MICRO, err ? "failed" : "succeeded");
1215  }
1216 
1217  err = t3_check_tpsram_version(adap);
1218  if (err == -EINVAL) {
1219  err = update_tpsram(adap);
1220  CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1222  TP_VERSION_MICRO, err ? "failed" : "succeeded");
1223  }
1224 
1225  /*
1226  * Clear interrupts now to catch errors if t3_init_hw fails.
1227  * We clear them again later as initialization may trigger
1228  * conditions that can interrupt.
1229  */
1230  t3_intr_clear(adap);
1231 
1232  err = t3_init_hw(adap, 0);
1233  if (err)
1234  goto out;
1235 
1237  t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1238 
1239  err = setup_sge_qsets(adap);
1240  if (err)
1241  goto out;
1242 
1243  for_each_port(adap, i)
1244  cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1245 
1246  setup_rss(adap);
1247  if (!(adap->flags & NAPI_INIT))
1248  init_napi(adap);
1249 
1250  t3_start_sge_timers(adap);
1251  adap->flags |= FULL_INIT_DONE;
1252  }
1253 
1254  t3_intr_clear(adap);
1255 
1256  if (adap->flags & USING_MSIX) {
1257  name_msix_vecs(adap);
1258  err = request_irq(adap->msix_info[0].vec,
1259  t3_async_intr_handler, 0,
1260  adap->msix_info[0].desc, adap);
1261  if (err)
1262  goto irq_err;
1263 
1264  err = request_msix_data_irqs(adap);
1265  if (err) {
1266  free_irq(adap->msix_info[0].vec, adap);
1267  goto irq_err;
1268  }
1269  } else if ((err = request_irq(adap->pdev->irq,
1270  t3_intr_handler(adap,
1271  adap->sge.qs[0].rspq.
1272  polling),
1273  (adap->flags & USING_MSI) ?
1274  0 : IRQF_SHARED,
1275  adap->name, adap)))
1276  goto irq_err;
1277 
1278  enable_all_napi(adap);
1279  t3_sge_start(adap);
1280  t3_intr_enable(adap);
1281 
1282  if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1283  is_offload(adap) && init_tp_parity(adap) == 0)
1284  adap->flags |= TP_PARITY_INIT;
1285 
1286  if (adap->flags & TP_PARITY_INIT) {
1287  t3_write_reg(adap, A_TP_INT_CAUSE,
1289  t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1290  }
1291 
1292  if (!(adap->flags & QUEUES_BOUND)) {
1293  int ret = bind_qsets(adap);
1294 
1295  if (ret < 0) {
1296  CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1297  t3_intr_disable(adap);
1298  free_irq_resources(adap);
1299  err = ret;
1300  goto out;
1301  }
1302  adap->flags |= QUEUES_BOUND;
1303  }
1304 
1305 out:
1306  return err;
1307 irq_err:
1308  CH_ERR(adap, "request_irq failed, err %d\n", err);
1309  goto out;
1310 }
1311 
1312 /*
1313  * Release resources when all the ports and offloading have been stopped.
1314  */
1315 static void cxgb_down(struct adapter *adapter, int on_wq)
1316 {
1317  t3_sge_stop(adapter);
1318  spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1319  t3_intr_disable(adapter);
1320  spin_unlock_irq(&adapter->work_lock);
1321 
1322  free_irq_resources(adapter);
1323  quiesce_rx(adapter);
1324  t3_sge_stop(adapter);
1325  if (!on_wq)
1326  flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1327 }
1328 
1329 static void schedule_chk_task(struct adapter *adap)
1330 {
1331  unsigned int timeo;
1332 
1333  timeo = adap->params.linkpoll_period ?
1334  (HZ * adap->params.linkpoll_period) / 10 :
1335  adap->params.stats_update_period * HZ;
1336  if (timeo)
1337  queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1338 }
1339 
1340 static int offload_open(struct net_device *dev)
1341 {
1342  struct port_info *pi = netdev_priv(dev);
1343  struct adapter *adapter = pi->adapter;
1344  struct t3cdev *tdev = dev2t3cdev(dev);
1345  int adap_up = adapter->open_device_map & PORT_MASK;
1346  int err;
1347 
1349  return 0;
1350 
1351  if (!adap_up && (err = cxgb_up(adapter)) < 0)
1352  goto out;
1353 
1354  t3_tp_set_offload_mode(adapter, 1);
1355  tdev->lldev = adapter->port[0];
1356  err = cxgb3_offload_activate(adapter);
1357  if (err)
1358  goto out;
1359 
1360  init_port_mtus(adapter);
1361  t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1362  adapter->params.b_wnd,
1363  adapter->params.rev == 0 ?
1364  adapter->port[0]->mtu : 0xffff);
1365  init_smt(adapter);
1366 
1367  if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1368  dev_dbg(&dev->dev, "cannot create sysfs group\n");
1369 
1370  /* Call back all registered clients */
1371  cxgb3_add_clients(tdev);
1372 
1373 out:
1374  /* restore them in case the offload module has changed them */
1375  if (err) {
1376  t3_tp_set_offload_mode(adapter, 0);
1378  cxgb3_set_dummy_ops(tdev);
1379  }
1380  return err;
1381 }
1382 
1383 static int offload_close(struct t3cdev *tdev)
1384 {
1385  struct adapter *adapter = tdev2adap(tdev);
1386  struct t3c_data *td = T3C_DATA(tdev);
1387 
1388  if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1389  return 0;
1390 
1391  /* Call back all registered clients */
1392  cxgb3_remove_clients(tdev);
1393 
1394  sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1395 
1396  /* Flush work scheduled while releasing TIDs */
1398 
1399  tdev->lldev = NULL;
1400  cxgb3_set_dummy_ops(tdev);
1401  t3_tp_set_offload_mode(adapter, 0);
1403 
1404  if (!adapter->open_device_map)
1405  cxgb_down(adapter, 0);
1406 
1407  cxgb3_offload_deactivate(adapter);
1408  return 0;
1409 }
1410 
1411 static int cxgb_open(struct net_device *dev)
1412 {
1413  struct port_info *pi = netdev_priv(dev);
1414  struct adapter *adapter = pi->adapter;
1415  int other_ports = adapter->open_device_map & PORT_MASK;
1416  int err;
1417 
1418  if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1419  return err;
1420 
1421  set_bit(pi->port_id, &adapter->open_device_map);
1422  if (is_offload(adapter) && !ofld_disable) {
1423  err = offload_open(dev);
1424  if (err)
1426  "Could not initialize offload capabilities\n");
1427  }
1428 
1430  err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1431  if (err)
1432  return err;
1433  link_start(dev);
1434  t3_port_intr_enable(adapter, pi->port_id);
1435  netif_tx_start_all_queues(dev);
1436  if (!other_ports)
1437  schedule_chk_task(adapter);
1438 
1440  return 0;
1441 }
1442 
1443 static int __cxgb_close(struct net_device *dev, int on_wq)
1444 {
1445  struct port_info *pi = netdev_priv(dev);
1446  struct adapter *adapter = pi->adapter;
1447 
1448 
1449  if (!adapter->open_device_map)
1450  return 0;
1451 
1452  /* Stop link fault interrupts */
1453  t3_xgm_intr_disable(adapter, pi->port_id);
1454  t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1455 
1456  t3_port_intr_disable(adapter, pi->port_id);
1457  netif_tx_stop_all_queues(dev);
1458  pi->phy.ops->power_down(&pi->phy, 1);
1459  netif_carrier_off(dev);
1461 
1462  spin_lock_irq(&adapter->work_lock); /* sync with update task */
1463  clear_bit(pi->port_id, &adapter->open_device_map);
1464  spin_unlock_irq(&adapter->work_lock);
1465 
1466  if (!(adapter->open_device_map & PORT_MASK))
1468 
1469  if (!adapter->open_device_map)
1470  cxgb_down(adapter, on_wq);
1471 
1473  return 0;
1474 }
1475 
1476 static int cxgb_close(struct net_device *dev)
1477 {
1478  return __cxgb_close(dev, 0);
1479 }
1480 
1481 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1482 {
1483  struct port_info *pi = netdev_priv(dev);
1484  struct adapter *adapter = pi->adapter;
1485  struct net_device_stats *ns = &pi->netstats;
1486  const struct mac_stats *pstats;
1487 
1488  spin_lock(&adapter->stats_lock);
1489  pstats = t3_mac_update_stats(&pi->mac);
1490  spin_unlock(&adapter->stats_lock);
1491 
1492  ns->tx_bytes = pstats->tx_octets;
1493  ns->tx_packets = pstats->tx_frames;
1494  ns->rx_bytes = pstats->rx_octets;
1495  ns->rx_packets = pstats->rx_frames;
1496  ns->multicast = pstats->rx_mcast_frames;
1497 
1498  ns->tx_errors = pstats->tx_underrun;
1499  ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1500  pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1501  pstats->rx_fifo_ovfl;
1502 
1503  /* detailed rx_errors */
1504  ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1505  ns->rx_over_errors = 0;
1506  ns->rx_crc_errors = pstats->rx_fcs_errs;
1507  ns->rx_frame_errors = pstats->rx_symbol_errs;
1508  ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1509  ns->rx_missed_errors = pstats->rx_cong_drops;
1510 
1511  /* detailed tx_errors */
1512  ns->tx_aborted_errors = 0;
1513  ns->tx_carrier_errors = 0;
1514  ns->tx_fifo_errors = pstats->tx_underrun;
1515  ns->tx_heartbeat_errors = 0;
1516  ns->tx_window_errors = 0;
1517  return ns;
1518 }
1519 
1520 static u32 get_msglevel(struct net_device *dev)
1521 {
1522  struct port_info *pi = netdev_priv(dev);
1523  struct adapter *adapter = pi->adapter;
1524 
1525  return adapter->msg_enable;
1526 }
1527 
1528 static void set_msglevel(struct net_device *dev, u32 val)
1529 {
1530  struct port_info *pi = netdev_priv(dev);
1531  struct adapter *adapter = pi->adapter;
1532 
1533  adapter->msg_enable = val;
1534 }
1535 
1536 static char stats_strings[][ETH_GSTRING_LEN] = {
1537  "TxOctetsOK ",
1538  "TxFramesOK ",
1539  "TxMulticastFramesOK",
1540  "TxBroadcastFramesOK",
1541  "TxPauseFrames ",
1542  "TxUnderrun ",
1543  "TxExtUnderrun ",
1544 
1545  "TxFrames64 ",
1546  "TxFrames65To127 ",
1547  "TxFrames128To255 ",
1548  "TxFrames256To511 ",
1549  "TxFrames512To1023 ",
1550  "TxFrames1024To1518 ",
1551  "TxFrames1519ToMax ",
1552 
1553  "RxOctetsOK ",
1554  "RxFramesOK ",
1555  "RxMulticastFramesOK",
1556  "RxBroadcastFramesOK",
1557  "RxPauseFrames ",
1558  "RxFCSErrors ",
1559  "RxSymbolErrors ",
1560  "RxShortErrors ",
1561  "RxJabberErrors ",
1562  "RxLengthErrors ",
1563  "RxFIFOoverflow ",
1564 
1565  "RxFrames64 ",
1566  "RxFrames65To127 ",
1567  "RxFrames128To255 ",
1568  "RxFrames256To511 ",
1569  "RxFrames512To1023 ",
1570  "RxFrames1024To1518 ",
1571  "RxFrames1519ToMax ",
1572 
1573  "PhyFIFOErrors ",
1574  "TSO ",
1575  "VLANextractions ",
1576  "VLANinsertions ",
1577  "TxCsumOffload ",
1578  "RxCsumGood ",
1579  "LroAggregated ",
1580  "LroFlushed ",
1581  "LroNoDesc ",
1582  "RxDrops ",
1583 
1584  "CheckTXEnToggled ",
1585  "CheckResets ",
1586 
1587  "LinkFaults ",
1588 };
1589 
1590 static int get_sset_count(struct net_device *dev, int sset)
1591 {
1592  switch (sset) {
1593  case ETH_SS_STATS:
1594  return ARRAY_SIZE(stats_strings);
1595  default:
1596  return -EOPNOTSUPP;
1597  }
1598 }
1599 
1600 #define T3_REGMAP_SIZE (3 * 1024)
1601 
1602 static int get_regs_len(struct net_device *dev)
1603 {
1604  return T3_REGMAP_SIZE;
1605 }
1606 
1607 static int get_eeprom_len(struct net_device *dev)
1608 {
1609  return EEPROMSIZE;
1610 }
1611 
1612 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1613 {
1614  struct port_info *pi = netdev_priv(dev);
1615  struct adapter *adapter = pi->adapter;
1616  u32 fw_vers = 0;
1617  u32 tp_vers = 0;
1618 
1619  spin_lock(&adapter->stats_lock);
1620  t3_get_fw_version(adapter, &fw_vers);
1621  t3_get_tp_version(adapter, &tp_vers);
1622  spin_unlock(&adapter->stats_lock);
1623 
1624  strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1625  strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1626  strlcpy(info->bus_info, pci_name(adapter->pdev),
1627  sizeof(info->bus_info));
1628  if (fw_vers)
1629  snprintf(info->fw_version, sizeof(info->fw_version),
1630  "%s %u.%u.%u TP %u.%u.%u",
1631  G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1632  G_FW_VERSION_MAJOR(fw_vers),
1633  G_FW_VERSION_MINOR(fw_vers),
1634  G_FW_VERSION_MICRO(fw_vers),
1635  G_TP_VERSION_MAJOR(tp_vers),
1636  G_TP_VERSION_MINOR(tp_vers),
1637  G_TP_VERSION_MICRO(tp_vers));
1638 }
1639 
1640 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1641 {
1642  if (stringset == ETH_SS_STATS)
1643  memcpy(data, stats_strings, sizeof(stats_strings));
1644 }
1645 
1646 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1647  struct port_info *p, int idx)
1648 {
1649  int i;
1650  unsigned long tot = 0;
1651 
1652  for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1653  tot += adapter->sge.qs[i].port_stats[idx];
1654  return tot;
1655 }
1656 
1657 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1658  u64 *data)
1659 {
1660  struct port_info *pi = netdev_priv(dev);
1661  struct adapter *adapter = pi->adapter;
1662  const struct mac_stats *s;
1663 
1664  spin_lock(&adapter->stats_lock);
1665  s = t3_mac_update_stats(&pi->mac);
1666  spin_unlock(&adapter->stats_lock);
1667 
1668  *data++ = s->tx_octets;
1669  *data++ = s->tx_frames;
1670  *data++ = s->tx_mcast_frames;
1671  *data++ = s->tx_bcast_frames;
1672  *data++ = s->tx_pause;
1673  *data++ = s->tx_underrun;
1674  *data++ = s->tx_fifo_urun;
1675 
1676  *data++ = s->tx_frames_64;
1677  *data++ = s->tx_frames_65_127;
1678  *data++ = s->tx_frames_128_255;
1679  *data++ = s->tx_frames_256_511;
1680  *data++ = s->tx_frames_512_1023;
1681  *data++ = s->tx_frames_1024_1518;
1682  *data++ = s->tx_frames_1519_max;
1683 
1684  *data++ = s->rx_octets;
1685  *data++ = s->rx_frames;
1686  *data++ = s->rx_mcast_frames;
1687  *data++ = s->rx_bcast_frames;
1688  *data++ = s->rx_pause;
1689  *data++ = s->rx_fcs_errs;
1690  *data++ = s->rx_symbol_errs;
1691  *data++ = s->rx_short;
1692  *data++ = s->rx_jabber;
1693  *data++ = s->rx_too_long;
1694  *data++ = s->rx_fifo_ovfl;
1695 
1696  *data++ = s->rx_frames_64;
1697  *data++ = s->rx_frames_65_127;
1698  *data++ = s->rx_frames_128_255;
1699  *data++ = s->rx_frames_256_511;
1700  *data++ = s->rx_frames_512_1023;
1701  *data++ = s->rx_frames_1024_1518;
1702  *data++ = s->rx_frames_1519_max;
1703 
1704  *data++ = pi->phy.fifo_errors;
1705 
1706  *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1707  *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1708  *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1709  *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1710  *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1711  *data++ = 0;
1712  *data++ = 0;
1713  *data++ = 0;
1714  *data++ = s->rx_cong_drops;
1715 
1716  *data++ = s->num_toggled;
1717  *data++ = s->num_resets;
1718 
1719  *data++ = s->link_faults;
1720 }
1721 
1722 static inline void reg_block_dump(struct adapter *ap, void *buf,
1723  unsigned int start, unsigned int end)
1724 {
1725  u32 *p = buf + start;
1726 
1727  for (; start <= end; start += sizeof(u32))
1728  *p++ = t3_read_reg(ap, start);
1729 }
1730 
1731 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1732  void *buf)
1733 {
1734  struct port_info *pi = netdev_priv(dev);
1735  struct adapter *ap = pi->adapter;
1736 
1737  /*
1738  * Version scheme:
1739  * bits 0..9: chip version
1740  * bits 10..15: chip revision
1741  * bit 31: set for PCIe cards
1742  */
1743  regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1744 
1745  /*
1746  * We skip the MAC statistics registers because they are clear-on-read.
1747  * Also reading multi-register stats would need to synchronize with the
1748  * periodic mac stats accumulation. Hard to justify the complexity.
1749  */
1750  memset(buf, 0, T3_REGMAP_SIZE);
1751  reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1752  reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1753  reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1754  reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1755  reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1756  reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1758  reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1760 }
1761 
1762 static int restart_autoneg(struct net_device *dev)
1763 {
1764  struct port_info *p = netdev_priv(dev);
1765 
1766  if (!netif_running(dev))
1767  return -EAGAIN;
1768  if (p->link_config.autoneg != AUTONEG_ENABLE)
1769  return -EINVAL;
1770  p->phy.ops->autoneg_restart(&p->phy);
1771  return 0;
1772 }
1773 
1774 static int set_phys_id(struct net_device *dev,
1776 {
1777  struct port_info *pi = netdev_priv(dev);
1778  struct adapter *adapter = pi->adapter;
1779 
1780  switch (state) {
1781  case ETHTOOL_ID_ACTIVE:
1782  return 1; /* cycle on/off once per second */
1783 
1784  case ETHTOOL_ID_OFF:
1786  break;
1787 
1788  case ETHTOOL_ID_ON:
1789  case ETHTOOL_ID_INACTIVE:
1791  F_GPIO0_OUT_VAL);
1792  }
1793 
1794  return 0;
1795 }
1796 
1797 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1798 {
1799  struct port_info *p = netdev_priv(dev);
1800 
1801  cmd->supported = p->link_config.supported;
1802  cmd->advertising = p->link_config.advertising;
1803 
1804  if (netif_carrier_ok(dev)) {
1805  ethtool_cmd_speed_set(cmd, p->link_config.speed);
1806  cmd->duplex = p->link_config.duplex;
1807  } else {
1808  ethtool_cmd_speed_set(cmd, -1);
1809  cmd->duplex = -1;
1810  }
1811 
1812  cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1813  cmd->phy_address = p->phy.mdio.prtad;
1814  cmd->transceiver = XCVR_EXTERNAL;
1815  cmd->autoneg = p->link_config.autoneg;
1816  cmd->maxtxpkt = 0;
1817  cmd->maxrxpkt = 0;
1818  return 0;
1819 }
1820 
1821 static int speed_duplex_to_caps(int speed, int duplex)
1822 {
1823  int cap = 0;
1824 
1825  switch (speed) {
1826  case SPEED_10:
1827  if (duplex == DUPLEX_FULL)
1828  cap = SUPPORTED_10baseT_Full;
1829  else
1830  cap = SUPPORTED_10baseT_Half;
1831  break;
1832  case SPEED_100:
1833  if (duplex == DUPLEX_FULL)
1835  else
1837  break;
1838  case SPEED_1000:
1839  if (duplex == DUPLEX_FULL)
1841  else
1843  break;
1844  case SPEED_10000:
1845  if (duplex == DUPLEX_FULL)
1847  }
1848  return cap;
1849 }
1850 
1851 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1852  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1853  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1854  ADVERTISED_10000baseT_Full)
1855 
1856 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1857 {
1858  struct port_info *p = netdev_priv(dev);
1859  struct link_config *lc = &p->link_config;
1860 
1861  if (!(lc->supported & SUPPORTED_Autoneg)) {
1862  /*
1863  * PHY offers a single speed/duplex. See if that's what's
1864  * being requested.
1865  */
1866  if (cmd->autoneg == AUTONEG_DISABLE) {
1867  u32 speed = ethtool_cmd_speed(cmd);
1868  int cap = speed_duplex_to_caps(speed, cmd->duplex);
1869  if (lc->supported & cap)
1870  return 0;
1871  }
1872  return -EINVAL;
1873  }
1874 
1875  if (cmd->autoneg == AUTONEG_DISABLE) {
1876  u32 speed = ethtool_cmd_speed(cmd);
1877  int cap = speed_duplex_to_caps(speed, cmd->duplex);
1878 
1879  if (!(lc->supported & cap) || (speed == SPEED_1000))
1880  return -EINVAL;
1881  lc->requested_speed = speed;
1882  lc->requested_duplex = cmd->duplex;
1883  lc->advertising = 0;
1884  } else {
1885  cmd->advertising &= ADVERTISED_MASK;
1886  cmd->advertising &= lc->supported;
1887  if (!cmd->advertising)
1888  return -EINVAL;
1892  }
1893  lc->autoneg = cmd->autoneg;
1894  if (netif_running(dev))
1895  t3_link_start(&p->phy, &p->mac, lc);
1896  return 0;
1897 }
1898 
1899 static void get_pauseparam(struct net_device *dev,
1900  struct ethtool_pauseparam *epause)
1901 {
1902  struct port_info *p = netdev_priv(dev);
1903 
1904  epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1905  epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1906  epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1907 }
1908 
1909 static int set_pauseparam(struct net_device *dev,
1910  struct ethtool_pauseparam *epause)
1911 {
1912  struct port_info *p = netdev_priv(dev);
1913  struct link_config *lc = &p->link_config;
1914 
1915  if (epause->autoneg == AUTONEG_DISABLE)
1916  lc->requested_fc = 0;
1917  else if (lc->supported & SUPPORTED_Autoneg)
1919  else
1920  return -EINVAL;
1921 
1922  if (epause->rx_pause)
1923  lc->requested_fc |= PAUSE_RX;
1924  if (epause->tx_pause)
1925  lc->requested_fc |= PAUSE_TX;
1926  if (lc->autoneg == AUTONEG_ENABLE) {
1927  if (netif_running(dev))
1928  t3_link_start(&p->phy, &p->mac, lc);
1929  } else {
1930  lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1931  if (netif_running(dev))
1932  t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1933  }
1934  return 0;
1935 }
1936 
1937 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1938 {
1939  struct port_info *pi = netdev_priv(dev);
1940  struct adapter *adapter = pi->adapter;
1941  const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1942 
1946 
1947  e->rx_pending = q->fl_size;
1948  e->rx_mini_pending = q->rspq_size;
1949  e->rx_jumbo_pending = q->jumbo_size;
1950  e->tx_pending = q->txq_size[0];
1951 }
1952 
1953 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1954 {
1955  struct port_info *pi = netdev_priv(dev);
1956  struct adapter *adapter = pi->adapter;
1957  struct qset_params *q;
1958  int i;
1959 
1960  if (e->rx_pending > MAX_RX_BUFFERS ||
1962  e->tx_pending > MAX_TXQ_ENTRIES ||
1965  e->rx_pending < MIN_FL_ENTRIES ||
1967  e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1968  return -EINVAL;
1969 
1970  if (adapter->flags & FULL_INIT_DONE)
1971  return -EBUSY;
1972 
1973  q = &adapter->params.sge.qset[pi->first_qset];
1974  for (i = 0; i < pi->nqsets; ++i, ++q) {
1975  q->rspq_size = e->rx_mini_pending;
1976  q->fl_size = e->rx_pending;
1977  q->jumbo_size = e->rx_jumbo_pending;
1978  q->txq_size[0] = e->tx_pending;
1979  q->txq_size[1] = e->tx_pending;
1980  q->txq_size[2] = e->tx_pending;
1981  }
1982  return 0;
1983 }
1984 
1985 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1986 {
1987  struct port_info *pi = netdev_priv(dev);
1988  struct adapter *adapter = pi->adapter;
1989  struct qset_params *qsp;
1990  struct sge_qset *qs;
1991  int i;
1992 
1993  if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1994  return -EINVAL;
1995 
1996  for (i = 0; i < pi->nqsets; i++) {
1997  qsp = &adapter->params.sge.qset[i];
1998  qs = &adapter->sge.qs[i];
2000  t3_update_qset_coalesce(qs, qsp);
2001  }
2002 
2003  return 0;
2004 }
2005 
2006 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2007 {
2008  struct port_info *pi = netdev_priv(dev);
2009  struct adapter *adapter = pi->adapter;
2010  struct qset_params *q = adapter->params.sge.qset;
2011 
2013  return 0;
2014 }
2015 
2016 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2017  u8 * data)
2018 {
2019  struct port_info *pi = netdev_priv(dev);
2020  struct adapter *adapter = pi->adapter;
2021  int i, err = 0;
2022 
2023  u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2024  if (!buf)
2025  return -ENOMEM;
2026 
2027  e->magic = EEPROM_MAGIC;
2028  for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2029  err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2030 
2031  if (!err)
2032  memcpy(data, buf + e->offset, e->len);
2033  kfree(buf);
2034  return err;
2035 }
2036 
2037 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2038  u8 * data)
2039 {
2040  struct port_info *pi = netdev_priv(dev);
2041  struct adapter *adapter = pi->adapter;
2042  u32 aligned_offset, aligned_len;
2043  __le32 *p;
2044  u8 *buf;
2045  int err;
2046 
2047  if (eeprom->magic != EEPROM_MAGIC)
2048  return -EINVAL;
2049 
2050  aligned_offset = eeprom->offset & ~3;
2051  aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2052 
2053  if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2054  buf = kmalloc(aligned_len, GFP_KERNEL);
2055  if (!buf)
2056  return -ENOMEM;
2057  err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2058  if (!err && aligned_len > 4)
2059  err = t3_seeprom_read(adapter,
2060  aligned_offset + aligned_len - 4,
2061  (__le32 *) & buf[aligned_len - 4]);
2062  if (err)
2063  goto out;
2064  memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2065  } else
2066  buf = data;
2067 
2068  err = t3_seeprom_wp(adapter, 0);
2069  if (err)
2070  goto out;
2071 
2072  for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2073  err = t3_seeprom_write(adapter, aligned_offset, *p);
2074  aligned_offset += 4;
2075  }
2076 
2077  if (!err)
2078  err = t3_seeprom_wp(adapter, 1);
2079 out:
2080  if (buf != data)
2081  kfree(buf);
2082  return err;
2083 }
2084 
2085 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2086 {
2087  wol->supported = 0;
2088  wol->wolopts = 0;
2089  memset(&wol->sopass, 0, sizeof(wol->sopass));
2090 }
2091 
2092 static const struct ethtool_ops cxgb_ethtool_ops = {
2093  .get_settings = get_settings,
2094  .set_settings = set_settings,
2095  .get_drvinfo = get_drvinfo,
2096  .get_msglevel = get_msglevel,
2097  .set_msglevel = set_msglevel,
2098  .get_ringparam = get_sge_param,
2099  .set_ringparam = set_sge_param,
2100  .get_coalesce = get_coalesce,
2101  .set_coalesce = set_coalesce,
2102  .get_eeprom_len = get_eeprom_len,
2103  .get_eeprom = get_eeprom,
2104  .set_eeprom = set_eeprom,
2105  .get_pauseparam = get_pauseparam,
2106  .set_pauseparam = set_pauseparam,
2107  .get_link = ethtool_op_get_link,
2108  .get_strings = get_strings,
2109  .set_phys_id = set_phys_id,
2110  .nway_reset = restart_autoneg,
2111  .get_sset_count = get_sset_count,
2112  .get_ethtool_stats = get_stats,
2113  .get_regs_len = get_regs_len,
2114  .get_regs = get_regs,
2115  .get_wol = get_wol,
2116 };
2117 
2118 static int in_range(int val, int lo, int hi)
2119 {
2120  return val < 0 || (val <= hi && val >= lo);
2121 }
2122 
2123 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2124 {
2125  struct port_info *pi = netdev_priv(dev);
2126  struct adapter *adapter = pi->adapter;
2127  u32 cmd;
2128  int ret;
2129 
2130  if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2131  return -EFAULT;
2132 
2133  switch (cmd) {
2135  int i;
2136  struct qset_params *q;
2137  struct ch_qset_params t;
2138  int q1 = pi->first_qset;
2139  int nqsets = pi->nqsets;
2140 
2141  if (!capable(CAP_NET_ADMIN))
2142  return -EPERM;
2143  if (copy_from_user(&t, useraddr, sizeof(t)))
2144  return -EFAULT;
2145  if (t.qset_idx >= SGE_QSETS)
2146  return -EINVAL;
2147  if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2148  !in_range(t.cong_thres, 0, 255) ||
2149  !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2150  MAX_TXQ_ENTRIES) ||
2151  !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2152  MAX_TXQ_ENTRIES) ||
2153  !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2155  !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2156  MAX_RX_BUFFERS) ||
2157  !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2159  !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2161  return -EINVAL;
2162 
2163  if ((adapter->flags & FULL_INIT_DONE) &&
2164  (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2165  t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2166  t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2167  t.polling >= 0 || t.cong_thres >= 0))
2168  return -EBUSY;
2169 
2170  /* Allow setting of any available qset when offload enabled */
2171  if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2172  q1 = 0;
2173  for_each_port(adapter, i) {
2174  pi = adap2pinfo(adapter, i);
2175  nqsets += pi->first_qset + pi->nqsets;
2176  }
2177  }
2178 
2179  if (t.qset_idx < q1)
2180  return -EINVAL;
2181  if (t.qset_idx > q1 + nqsets - 1)
2182  return -EINVAL;
2183 
2184  q = &adapter->params.sge.qset[t.qset_idx];
2185 
2186  if (t.rspq_size >= 0)
2187  q->rspq_size = t.rspq_size;
2188  if (t.fl_size[0] >= 0)
2189  q->fl_size = t.fl_size[0];
2190  if (t.fl_size[1] >= 0)
2191  q->jumbo_size = t.fl_size[1];
2192  if (t.txq_size[0] >= 0)
2193  q->txq_size[0] = t.txq_size[0];
2194  if (t.txq_size[1] >= 0)
2195  q->txq_size[1] = t.txq_size[1];
2196  if (t.txq_size[2] >= 0)
2197  q->txq_size[2] = t.txq_size[2];
2198  if (t.cong_thres >= 0)
2199  q->cong_thres = t.cong_thres;
2200  if (t.intr_lat >= 0) {
2201  struct sge_qset *qs =
2202  &adapter->sge.qs[t.qset_idx];
2203 
2204  q->coalesce_usecs = t.intr_lat;
2205  t3_update_qset_coalesce(qs, q);
2206  }
2207  if (t.polling >= 0) {
2208  if (adapter->flags & USING_MSIX)
2209  q->polling = t.polling;
2210  else {
2211  /* No polling with INTx for T3A */
2212  if (adapter->params.rev == 0 &&
2213  !(adapter->flags & USING_MSI))
2214  t.polling = 0;
2215 
2216  for (i = 0; i < SGE_QSETS; i++) {
2217  q = &adapter->params.sge.
2218  qset[i];
2219  q->polling = t.polling;
2220  }
2221  }
2222  }
2223 
2224  if (t.lro >= 0) {
2225  if (t.lro)
2226  dev->wanted_features |= NETIF_F_GRO;
2227  else
2228  dev->wanted_features &= ~NETIF_F_GRO;
2230  }
2231 
2232  break;
2233  }
2235  struct qset_params *q;
2236  struct ch_qset_params t;
2237  int q1 = pi->first_qset;
2238  int nqsets = pi->nqsets;
2239  int i;
2240 
2241  if (copy_from_user(&t, useraddr, sizeof(t)))
2242  return -EFAULT;
2243 
2244  /* Display qsets for all ports when offload enabled */
2245  if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2246  q1 = 0;
2247  for_each_port(adapter, i) {
2248  pi = adap2pinfo(adapter, i);
2249  nqsets = pi->first_qset + pi->nqsets;
2250  }
2251  }
2252 
2253  if (t.qset_idx >= nqsets)
2254  return -EINVAL;
2255 
2256  q = &adapter->params.sge.qset[q1 + t.qset_idx];
2257  t.rspq_size = q->rspq_size;
2258  t.txq_size[0] = q->txq_size[0];
2259  t.txq_size[1] = q->txq_size[1];
2260  t.txq_size[2] = q->txq_size[2];
2261  t.fl_size[0] = q->fl_size;
2262  t.fl_size[1] = q->jumbo_size;
2263  t.polling = q->polling;
2264  t.lro = !!(dev->features & NETIF_F_GRO);
2265  t.intr_lat = q->coalesce_usecs;
2266  t.cong_thres = q->cong_thres;
2267  t.qnum = q1;
2268 
2269  if (adapter->flags & USING_MSIX)
2270  t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2271  else
2272  t.vector = adapter->pdev->irq;
2273 
2274  if (copy_to_user(useraddr, &t, sizeof(t)))
2275  return -EFAULT;
2276  break;
2277  }
2278  case CHELSIO_SET_QSET_NUM:{
2279  struct ch_reg edata;
2280  unsigned int i, first_qset = 0, other_qsets = 0;
2281 
2282  if (!capable(CAP_NET_ADMIN))
2283  return -EPERM;
2284  if (adapter->flags & FULL_INIT_DONE)
2285  return -EBUSY;
2286  if (copy_from_user(&edata, useraddr, sizeof(edata)))
2287  return -EFAULT;
2288  if (edata.val < 1 ||
2289  (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2290  return -EINVAL;
2291 
2292  for_each_port(adapter, i)
2293  if (adapter->port[i] && adapter->port[i] != dev)
2294  other_qsets += adap2pinfo(adapter, i)->nqsets;
2295 
2296  if (edata.val + other_qsets > SGE_QSETS)
2297  return -EINVAL;
2298 
2299  pi->nqsets = edata.val;
2300 
2301  for_each_port(adapter, i)
2302  if (adapter->port[i]) {
2303  pi = adap2pinfo(adapter, i);
2304  pi->first_qset = first_qset;
2305  first_qset += pi->nqsets;
2306  }
2307  break;
2308  }
2309  case CHELSIO_GET_QSET_NUM:{
2310  struct ch_reg edata;
2311 
2312  memset(&edata, 0, sizeof(struct ch_reg));
2313 
2315  edata.val = pi->nqsets;
2316  if (copy_to_user(useraddr, &edata, sizeof(edata)))
2317  return -EFAULT;
2318  break;
2319  }
2320  case CHELSIO_LOAD_FW:{
2321  u8 *fw_data;
2322  struct ch_mem_range t;
2323 
2324  if (!capable(CAP_SYS_RAWIO))
2325  return -EPERM;
2326  if (copy_from_user(&t, useraddr, sizeof(t)))
2327  return -EFAULT;
2328  /* Check t.len sanity ? */
2329  fw_data = memdup_user(useraddr + sizeof(t), t.len);
2330  if (IS_ERR(fw_data))
2331  return PTR_ERR(fw_data);
2332 
2333  ret = t3_load_fw(adapter, fw_data, t.len);
2334  kfree(fw_data);
2335  if (ret)
2336  return ret;
2337  break;
2338  }
2339  case CHELSIO_SETMTUTAB:{
2340  struct ch_mtus m;
2341  int i;
2342 
2343  if (!is_offload(adapter))
2344  return -EOPNOTSUPP;
2345  if (!capable(CAP_NET_ADMIN))
2346  return -EPERM;
2347  if (offload_running(adapter))
2348  return -EBUSY;
2349  if (copy_from_user(&m, useraddr, sizeof(m)))
2350  return -EFAULT;
2351  if (m.nmtus != NMTUS)
2352  return -EINVAL;
2353  if (m.mtus[0] < 81) /* accommodate SACK */
2354  return -EINVAL;
2355 
2356  /* MTUs must be in ascending order */
2357  for (i = 1; i < NMTUS; ++i)
2358  if (m.mtus[i] < m.mtus[i - 1])
2359  return -EINVAL;
2360 
2361  memcpy(adapter->params.mtus, m.mtus,
2362  sizeof(adapter->params.mtus));
2363  break;
2364  }
2365  case CHELSIO_GET_PM:{
2366  struct tp_params *p = &adapter->params.tp;
2367  struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2368 
2369  if (!is_offload(adapter))
2370  return -EOPNOTSUPP;
2371  m.tx_pg_sz = p->tx_pg_size;
2372  m.tx_num_pg = p->tx_num_pgs;
2373  m.rx_pg_sz = p->rx_pg_size;
2374  m.rx_num_pg = p->rx_num_pgs;
2375  m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2376  if (copy_to_user(useraddr, &m, sizeof(m)))
2377  return -EFAULT;
2378  break;
2379  }
2380  case CHELSIO_SET_PM:{
2381  struct ch_pm m;
2382  struct tp_params *p = &adapter->params.tp;
2383 
2384  if (!is_offload(adapter))
2385  return -EOPNOTSUPP;
2386  if (!capable(CAP_NET_ADMIN))
2387  return -EPERM;
2388  if (adapter->flags & FULL_INIT_DONE)
2389  return -EBUSY;
2390  if (copy_from_user(&m, useraddr, sizeof(m)))
2391  return -EFAULT;
2392  if (!is_power_of_2(m.rx_pg_sz) ||
2393  !is_power_of_2(m.tx_pg_sz))
2394  return -EINVAL; /* not power of 2 */
2395  if (!(m.rx_pg_sz & 0x14000))
2396  return -EINVAL; /* not 16KB or 64KB */
2397  if (!(m.tx_pg_sz & 0x1554000))
2398  return -EINVAL;
2399  if (m.tx_num_pg == -1)
2400  m.tx_num_pg = p->tx_num_pgs;
2401  if (m.rx_num_pg == -1)
2402  m.rx_num_pg = p->rx_num_pgs;
2403  if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2404  return -EINVAL;
2405  if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2406  m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2407  return -EINVAL;
2408  p->rx_pg_size = m.rx_pg_sz;
2409  p->tx_pg_size = m.tx_pg_sz;
2410  p->rx_num_pgs = m.rx_num_pg;
2411  p->tx_num_pgs = m.tx_num_pg;
2412  break;
2413  }
2414  case CHELSIO_GET_MEM:{
2415  struct ch_mem_range t;
2416  struct mc7 *mem;
2417  u64 buf[32];
2418 
2419  if (!is_offload(adapter))
2420  return -EOPNOTSUPP;
2421  if (!(adapter->flags & FULL_INIT_DONE))
2422  return -EIO; /* need the memory controllers */
2423  if (copy_from_user(&t, useraddr, sizeof(t)))
2424  return -EFAULT;
2425  if ((t.addr & 7) || (t.len & 7))
2426  return -EINVAL;
2427  if (t.mem_id == MEM_CM)
2428  mem = &adapter->cm;
2429  else if (t.mem_id == MEM_PMRX)
2430  mem = &adapter->pmrx;
2431  else if (t.mem_id == MEM_PMTX)
2432  mem = &adapter->pmtx;
2433  else
2434  return -EINVAL;
2435 
2436  /*
2437  * Version scheme:
2438  * bits 0..9: chip version
2439  * bits 10..15: chip revision
2440  */
2441  t.version = 3 | (adapter->params.rev << 10);
2442  if (copy_to_user(useraddr, &t, sizeof(t)))
2443  return -EFAULT;
2444 
2445  /*
2446  * Read 256 bytes at a time as len can be large and we don't
2447  * want to use huge intermediate buffers.
2448  */
2449  useraddr += sizeof(t); /* advance to start of buffer */
2450  while (t.len) {
2451  unsigned int chunk =
2452  min_t(unsigned int, t.len, sizeof(buf));
2453 
2454  ret =
2455  t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2456  buf);
2457  if (ret)
2458  return ret;
2459  if (copy_to_user(useraddr, buf, chunk))
2460  return -EFAULT;
2461  useraddr += chunk;
2462  t.addr += chunk;
2463  t.len -= chunk;
2464  }
2465  break;
2466  }
2468  struct ch_trace t;
2469  const struct trace_params *tp;
2470 
2471  if (!capable(CAP_NET_ADMIN))
2472  return -EPERM;
2473  if (!offload_running(adapter))
2474  return -EAGAIN;
2475  if (copy_from_user(&t, useraddr, sizeof(t)))
2476  return -EFAULT;
2477 
2478  tp = (const struct trace_params *)&t.sip;
2479  if (t.config_tx)
2480  t3_config_trace_filter(adapter, tp, 0,
2481  t.invert_match,
2482  t.trace_tx);
2483  if (t.config_rx)
2484  t3_config_trace_filter(adapter, tp, 1,
2485  t.invert_match,
2486  t.trace_rx);
2487  break;
2488  }
2489  default:
2490  return -EOPNOTSUPP;
2491  }
2492  return 0;
2493 }
2494 
2495 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2496 {
2497  struct mii_ioctl_data *data = if_mii(req);
2498  struct port_info *pi = netdev_priv(dev);
2499  struct adapter *adapter = pi->adapter;
2500 
2501  switch (cmd) {
2502  case SIOCGMIIREG:
2503  case SIOCSMIIREG:
2504  /* Convert phy_id from older PRTAD/DEVAD format */
2505  if (is_10G(adapter) &&
2506  !mdio_phy_id_is_c45(data->phy_id) &&
2507  (data->phy_id & 0x1f00) &&
2508  !(data->phy_id & 0xe0e0))
2509  data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2510  data->phy_id & 0x1f);
2511  /* FALLTHRU */
2512  case SIOCGMIIPHY:
2513  return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2514  case SIOCCHIOCTL:
2515  return cxgb_extension_ioctl(dev, req->ifr_data);
2516  default:
2517  return -EOPNOTSUPP;
2518  }
2519 }
2520 
2521 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2522 {
2523  struct port_info *pi = netdev_priv(dev);
2524  struct adapter *adapter = pi->adapter;
2525  int ret;
2526 
2527  if (new_mtu < 81) /* accommodate SACK */
2528  return -EINVAL;
2529  if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2530  return ret;
2531  dev->mtu = new_mtu;
2532  init_port_mtus(adapter);
2533  if (adapter->params.rev == 0 && offload_running(adapter))
2534  t3_load_mtus(adapter, adapter->params.mtus,
2535  adapter->params.a_wnd, adapter->params.b_wnd,
2536  adapter->port[0]->mtu);
2537  return 0;
2538 }
2539 
2540 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2541 {
2542  struct port_info *pi = netdev_priv(dev);
2543  struct adapter *adapter = pi->adapter;
2544  struct sockaddr *addr = p;
2545 
2546  if (!is_valid_ether_addr(addr->sa_data))
2547  return -EADDRNOTAVAIL;
2548 
2549  memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2551  if (offload_running(adapter))
2552  write_smt_entry(adapter, pi->port_id);
2553  return 0;
2554 }
2555 
2556 static netdev_features_t cxgb_fix_features(struct net_device *dev,
2557  netdev_features_t features)
2558 {
2559  /*
2560  * Since there is no support for separate rx/tx vlan accel
2561  * enable/disable make sure tx flag is always in same state as rx.
2562  */
2563  if (features & NETIF_F_HW_VLAN_RX)
2564  features |= NETIF_F_HW_VLAN_TX;
2565  else
2566  features &= ~NETIF_F_HW_VLAN_TX;
2567 
2568  return features;
2569 }
2570 
2571 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2572 {
2574 
2575  if (changed & NETIF_F_HW_VLAN_RX)
2576  cxgb_vlan_mode(dev, features);
2577 
2578  return 0;
2579 }
2580 
2581 #ifdef CONFIG_NET_POLL_CONTROLLER
2582 static void cxgb_netpoll(struct net_device *dev)
2583 {
2584  struct port_info *pi = netdev_priv(dev);
2585  struct adapter *adapter = pi->adapter;
2586  int qidx;
2587 
2588  for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2589  struct sge_qset *qs = &adapter->sge.qs[qidx];
2590  void *source;
2591 
2592  if (adapter->flags & USING_MSIX)
2593  source = qs;
2594  else
2595  source = adapter;
2596 
2597  t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2598  }
2599 }
2600 #endif
2601 
2602 /*
2603  * Periodic accumulation of MAC statistics.
2604  */
2605 static void mac_stats_update(struct adapter *adapter)
2606 {
2607  int i;
2608 
2609  for_each_port(adapter, i) {
2610  struct net_device *dev = adapter->port[i];
2611  struct port_info *p = netdev_priv(dev);
2612 
2613  if (netif_running(dev)) {
2614  spin_lock(&adapter->stats_lock);
2615  t3_mac_update_stats(&p->mac);
2616  spin_unlock(&adapter->stats_lock);
2617  }
2618  }
2619 }
2620 
2621 static void check_link_status(struct adapter *adapter)
2622 {
2623  int i;
2624 
2625  for_each_port(adapter, i) {
2626  struct net_device *dev = adapter->port[i];
2627  struct port_info *p = netdev_priv(dev);
2628  int link_fault;
2629 
2630  spin_lock_irq(&adapter->work_lock);
2631  link_fault = p->link_fault;
2632  spin_unlock_irq(&adapter->work_lock);
2633 
2634  if (link_fault) {
2635  t3_link_fault(adapter, i);
2636  continue;
2637  }
2638 
2639  if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2640  t3_xgm_intr_disable(adapter, i);
2641  t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2642 
2643  t3_link_changed(adapter, i);
2644  t3_xgm_intr_enable(adapter, i);
2645  }
2646  }
2647 }
2648 
2649 static void check_t3b2_mac(struct adapter *adapter)
2650 {
2651  int i;
2652 
2653  if (!rtnl_trylock()) /* synchronize with ifdown */
2654  return;
2655 
2656  for_each_port(adapter, i) {
2657  struct net_device *dev = adapter->port[i];
2658  struct port_info *p = netdev_priv(dev);
2659  int status;
2660 
2661  if (!netif_running(dev))
2662  continue;
2663 
2664  status = 0;
2665  if (netif_running(dev) && netif_carrier_ok(dev))
2666  status = t3b2_mac_watchdog_task(&p->mac);
2667  if (status == 1)
2668  p->mac.stats.num_toggled++;
2669  else if (status == 2) {
2670  struct cmac *mac = &p->mac;
2671 
2672  t3_mac_set_mtu(mac, dev->mtu);
2674  cxgb_set_rxmode(dev);
2675  t3_link_start(&p->phy, mac, &p->link_config);
2677  t3_port_intr_enable(adapter, p->port_id);
2678  p->mac.stats.num_resets++;
2679  }
2680  }
2681  rtnl_unlock();
2682 }
2683 
2684 
2685 static void t3_adap_check_task(struct work_struct *work)
2686 {
2687  struct adapter *adapter = container_of(work, struct adapter,
2688  adap_check_task.work);
2689  const struct adapter_params *p = &adapter->params;
2690  int port;
2691  unsigned int v, status, reset;
2692 
2693  adapter->check_task_cnt++;
2694 
2695  check_link_status(adapter);
2696 
2697  /* Accumulate MAC stats if needed */
2698  if (!p->linkpoll_period ||
2699  (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2700  p->stats_update_period) {
2701  mac_stats_update(adapter);
2702  adapter->check_task_cnt = 0;
2703  }
2704 
2705  if (p->rev == T3_REV_B2)
2706  check_t3b2_mac(adapter);
2707 
2708  /*
2709  * Scan the XGMAC's to check for various conditions which we want to
2710  * monitor in a periodic polling manner rather than via an interrupt
2711  * condition. This is used for conditions which would otherwise flood
2712  * the system with interrupts and we only really need to know that the
2713  * conditions are "happening" ... For each condition we count the
2714  * detection of the condition and reset it for the next polling loop.
2715  */
2716  for_each_port(adapter, port) {
2717  struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2718  u32 cause;
2719 
2720  cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2721  reset = 0;
2722  if (cause & F_RXFIFO_OVERFLOW) {
2723  mac->stats.rx_fifo_ovfl++;
2724  reset |= F_RXFIFO_OVERFLOW;
2725  }
2726 
2727  t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2728  }
2729 
2730  /*
2731  * We do the same as above for FL_EMPTY interrupts.
2732  */
2733  status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2734  reset = 0;
2735 
2736  if (status & F_FLEMPTY) {
2737  struct sge_qset *qs = &adapter->sge.qs[0];
2738  int i = 0;
2739 
2740  reset |= F_FLEMPTY;
2741 
2742  v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2743  0xffff;
2744 
2745  while (v) {
2746  qs->fl[i].empty += (v & 1);
2747  if (i)
2748  qs++;
2749  i ^= 1;
2750  v >>= 1;
2751  }
2752  }
2753 
2754  t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2755 
2756  /* Schedule the next check update if any port is active. */
2757  spin_lock_irq(&adapter->work_lock);
2758  if (adapter->open_device_map & PORT_MASK)
2759  schedule_chk_task(adapter);
2760  spin_unlock_irq(&adapter->work_lock);
2761 }
2762 
2763 static void db_full_task(struct work_struct *work)
2764 {
2765  struct adapter *adapter = container_of(work, struct adapter,
2766  db_full_task);
2767 
2768  cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2769 }
2770 
2771 static void db_empty_task(struct work_struct *work)
2772 {
2773  struct adapter *adapter = container_of(work, struct adapter,
2774  db_empty_task);
2775 
2776  cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2777 }
2778 
2779 static void db_drop_task(struct work_struct *work)
2780 {
2781  struct adapter *adapter = container_of(work, struct adapter,
2782  db_drop_task);
2783  unsigned long delay = 1000;
2784  unsigned short r;
2785 
2786  cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2787 
2788  /*
2789  * Sleep a while before ringing the driver qset dbs.
2790  * The delay is between 1000-2023 usecs.
2791  */
2792  get_random_bytes(&r, 2);
2793  delay += r & 1023;
2796  ring_dbs(adapter);
2797 }
2798 
2799 /*
2800  * Processes external (PHY) interrupts in process context.
2801  */
2802 static void ext_intr_task(struct work_struct *work)
2803 {
2804  struct adapter *adapter = container_of(work, struct adapter,
2806  int i;
2807 
2808  /* Disable link fault interrupts */
2809  for_each_port(adapter, i) {
2810  struct net_device *dev = adapter->port[i];
2811  struct port_info *p = netdev_priv(dev);
2812 
2813  t3_xgm_intr_disable(adapter, i);
2814  t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2815  }
2816 
2817  /* Re-enable link fault interrupts */
2818  t3_phy_intr_handler(adapter);
2819 
2820  for_each_port(adapter, i)
2821  t3_xgm_intr_enable(adapter, i);
2822 
2823  /* Now reenable external interrupts */
2824  spin_lock_irq(&adapter->work_lock);
2825  if (adapter->slow_intr_mask) {
2826  adapter->slow_intr_mask |= F_T3DBG;
2827  t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2828  t3_write_reg(adapter, A_PL_INT_ENABLE0,
2829  adapter->slow_intr_mask);
2830  }
2831  spin_unlock_irq(&adapter->work_lock);
2832 }
2833 
2834 /*
2835  * Interrupt-context handler for external (PHY) interrupts.
2836  */
2837 void t3_os_ext_intr_handler(struct adapter *adapter)
2838 {
2839  /*
2840  * Schedule a task to handle external interrupts as they may be slow
2841  * and we use a mutex to protect MDIO registers. We disable PHY
2842  * interrupts in the meantime and let the task reenable them when
2843  * it's done.
2844  */
2845  spin_lock(&adapter->work_lock);
2846  if (adapter->slow_intr_mask) {
2847  adapter->slow_intr_mask &= ~F_T3DBG;
2848  t3_write_reg(adapter, A_PL_INT_ENABLE0,
2849  adapter->slow_intr_mask);
2850  queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2851  }
2852  spin_unlock(&adapter->work_lock);
2853 }
2854 
2855 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2856 {
2857  struct net_device *netdev = adapter->port[port_id];
2858  struct port_info *pi = netdev_priv(netdev);
2859 
2860  spin_lock(&adapter->work_lock);
2861  pi->link_fault = 1;
2862  spin_unlock(&adapter->work_lock);
2863 }
2864 
2865 static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2866 {
2867  int i, ret = 0;
2868 
2869  if (is_offload(adapter) &&
2872  offload_close(&adapter->tdev);
2873  }
2874 
2875  /* Stop all ports */
2876  for_each_port(adapter, i) {
2877  struct net_device *netdev = adapter->port[i];
2878 
2879  if (netif_running(netdev))
2880  __cxgb_close(netdev, on_wq);
2881  }
2882 
2883  /* Stop SGE timers */
2884  t3_stop_sge_timers(adapter);
2885 
2886  adapter->flags &= ~FULL_INIT_DONE;
2887 
2888  if (reset)
2889  ret = t3_reset_adapter(adapter);
2890 
2891  pci_disable_device(adapter->pdev);
2892 
2893  return ret;
2894 }
2895 
2896 static int t3_reenable_adapter(struct adapter *adapter)
2897 {
2898  if (pci_enable_device(adapter->pdev)) {
2899  dev_err(&adapter->pdev->dev,
2900  "Cannot re-enable PCI device after reset.\n");
2901  goto err;
2902  }
2903  pci_set_master(adapter->pdev);
2904  pci_restore_state(adapter->pdev);
2905  pci_save_state(adapter->pdev);
2906 
2907  /* Free sge resources */
2908  t3_free_sge_resources(adapter);
2909 
2910  if (t3_replay_prep_adapter(adapter))
2911  goto err;
2912 
2913  return 0;
2914 err:
2915  return -1;
2916 }
2917 
2918 static void t3_resume_ports(struct adapter *adapter)
2919 {
2920  int i;
2921 
2922  /* Restart the ports */
2923  for_each_port(adapter, i) {
2924  struct net_device *netdev = adapter->port[i];
2925 
2926  if (netif_running(netdev)) {
2927  if (cxgb_open(netdev)) {
2928  dev_err(&adapter->pdev->dev,
2929  "can't bring device back up"
2930  " after reset\n");
2931  continue;
2932  }
2933  }
2934  }
2935 
2936  if (is_offload(adapter) && !ofld_disable)
2937  cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2938 }
2939 
2940 /*
2941  * processes a fatal error.
2942  * Bring the ports down, reset the chip, bring the ports back up.
2943  */
2944 static void fatal_error_task(struct work_struct *work)
2945 {
2946  struct adapter *adapter = container_of(work, struct adapter,
2948  int err = 0;
2949 
2950  rtnl_lock();
2951  err = t3_adapter_error(adapter, 1, 1);
2952  if (!err)
2953  err = t3_reenable_adapter(adapter);
2954  if (!err)
2955  t3_resume_ports(adapter);
2956 
2957  CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2958  rtnl_unlock();
2959 }
2960 
2961 void t3_fatal_err(struct adapter *adapter)
2962 {
2963  unsigned int fw_status[4];
2964 
2965  if (adapter->flags & FULL_INIT_DONE) {
2966  t3_sge_stop(adapter);
2967  t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2968  t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2969  t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2970  t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2971 
2972  spin_lock(&adapter->work_lock);
2973  t3_intr_disable(adapter);
2974  queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2975  spin_unlock(&adapter->work_lock);
2976  }
2977  CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2978  if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2979  CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2980  fw_status[0], fw_status[1],
2981  fw_status[2], fw_status[3]);
2982 }
2983 
2992 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2993  pci_channel_state_t state)
2994 {
2995  struct adapter *adapter = pci_get_drvdata(pdev);
2996 
2997  if (state == pci_channel_io_perm_failure)
2999 
3000  t3_adapter_error(adapter, 0, 0);
3001 
3002  /* Request a slot reset. */
3004 }
3005 
3012 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3013 {
3014  struct adapter *adapter = pci_get_drvdata(pdev);
3015 
3016  if (!t3_reenable_adapter(adapter))
3017  return PCI_ERS_RESULT_RECOVERED;
3018 
3020 }
3021 
3029 static void t3_io_resume(struct pci_dev *pdev)
3030 {
3031  struct adapter *adapter = pci_get_drvdata(pdev);
3032 
3033  CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3034  t3_read_reg(adapter, A_PCIE_PEX_ERR));
3035 
3036  t3_resume_ports(adapter);
3037 }
3038 
3039 static const struct pci_error_handlers t3_err_handler = {
3040  .error_detected = t3_io_error_detected,
3041  .slot_reset = t3_io_slot_reset,
3042  .resume = t3_io_resume,
3043 };
3044 
3045 /*
3046  * Set the number of qsets based on the number of CPUs and the number of ports,
3047  * not to exceed the number of available qsets, assuming there are enough qsets
3048  * per port in HW.
3049  */
3050 static void set_nqsets(struct adapter *adap)
3051 {
3052  int i, j = 0;
3053  int num_cpus = netif_get_num_default_rss_queues();
3054  int hwports = adap->params.nports;
3055  int nqsets = adap->msix_nvectors - 1;
3056 
3057  if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3058  if (hwports == 2 &&
3059  (hwports * nqsets > SGE_QSETS ||
3060  num_cpus >= nqsets / hwports))
3061  nqsets /= hwports;
3062  if (nqsets > num_cpus)
3063  nqsets = num_cpus;
3064  if (nqsets < 1 || hwports == 4)
3065  nqsets = 1;
3066  } else
3067  nqsets = 1;
3068 
3069  for_each_port(adap, i) {
3070  struct port_info *pi = adap2pinfo(adap, i);
3071 
3072  pi->first_qset = j;
3073  pi->nqsets = nqsets;
3074  j = pi->first_qset + nqsets;
3075 
3076  dev_info(&adap->pdev->dev,
3077  "Port %d using %d queue sets.\n", i, nqsets);
3078  }
3079 }
3080 
3081 static int __devinit cxgb_enable_msix(struct adapter *adap)
3082 {
3083  struct msix_entry entries[SGE_QSETS + 1];
3084  int vectors;
3085  int i, err;
3086 
3087  vectors = ARRAY_SIZE(entries);
3088  for (i = 0; i < vectors; ++i)
3089  entries[i].entry = i;
3090 
3091  while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
3092  vectors = err;
3093 
3094  if (err < 0)
3095  pci_disable_msix(adap->pdev);
3096 
3097  if (!err && vectors < (adap->params.nports + 1)) {
3098  pci_disable_msix(adap->pdev);
3099  err = -1;
3100  }
3101 
3102  if (!err) {
3103  for (i = 0; i < vectors; ++i)
3104  adap->msix_info[i].vec = entries[i].vector;
3105  adap->msix_nvectors = vectors;
3106  }
3107 
3108  return err;
3109 }
3110 
3111 static void __devinit print_port_info(struct adapter *adap,
3112  const struct adapter_info *ai)
3113 {
3114  static const char *pci_variant[] = {
3115  "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3116  };
3117 
3118  int i;
3119  char buf[80];
3120 
3121  if (is_pcie(adap))
3122  snprintf(buf, sizeof(buf), "%s x%d",
3123  pci_variant[adap->params.pci.variant],
3124  adap->params.pci.width);
3125  else
3126  snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3127  pci_variant[adap->params.pci.variant],
3128  adap->params.pci.speed, adap->params.pci.width);
3129 
3130  for_each_port(adap, i) {
3131  struct net_device *dev = adap->port[i];
3132  const struct port_info *pi = netdev_priv(dev);
3133 
3134  if (!test_bit(i, &adap->registered_device_map))
3135  continue;
3136  printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
3137  dev->name, ai->desc, pi->phy.desc,
3138  is_offload(adap) ? "R" : "", adap->params.rev, buf,
3139  (adap->flags & USING_MSIX) ? " MSI-X" :
3140  (adap->flags & USING_MSI) ? " MSI" : "");
3141  if (adap->name == dev->name && adap->params.vpd.mclk)
3143  "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3144  adap->name, t3_mc7_size(&adap->cm) >> 20,
3145  t3_mc7_size(&adap->pmtx) >> 20,
3146  t3_mc7_size(&adap->pmrx) >> 20,
3147  adap->params.vpd.sn);
3148  }
3149 }
3150 
3151 static const struct net_device_ops cxgb_netdev_ops = {
3152  .ndo_open = cxgb_open,
3153  .ndo_stop = cxgb_close,
3154  .ndo_start_xmit = t3_eth_xmit,
3155  .ndo_get_stats = cxgb_get_stats,
3156  .ndo_validate_addr = eth_validate_addr,
3157  .ndo_set_rx_mode = cxgb_set_rxmode,
3158  .ndo_do_ioctl = cxgb_ioctl,
3159  .ndo_change_mtu = cxgb_change_mtu,
3160  .ndo_set_mac_address = cxgb_set_mac_addr,
3161  .ndo_fix_features = cxgb_fix_features,
3162  .ndo_set_features = cxgb_set_features,
3163 #ifdef CONFIG_NET_POLL_CONTROLLER
3164  .ndo_poll_controller = cxgb_netpoll,
3165 #endif
3166 };
3167 
3168 static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
3169 {
3170  struct port_info *pi = netdev_priv(dev);
3171 
3172  memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3173  pi->iscsic.mac_addr[3] |= 0x80;
3174 }
3175 
3176 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3177 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3178  NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3179 static int __devinit init_one(struct pci_dev *pdev,
3180  const struct pci_device_id *ent)
3181 {
3182  static int version_printed;
3183 
3184  int i, err, pci_using_dac = 0;
3185  resource_size_t mmio_start, mmio_len;
3186  const struct adapter_info *ai;
3187  struct adapter *adapter = NULL;
3188  struct port_info *pi;
3189 
3190  if (!version_printed) {
3191  printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3192  ++version_printed;
3193  }
3194 
3195  if (!cxgb3_wq) {
3197  if (!cxgb3_wq) {
3199  ": cannot initialize work queue\n");
3200  return -ENOMEM;
3201  }
3202  }
3203 
3204  err = pci_enable_device(pdev);
3205  if (err) {
3206  dev_err(&pdev->dev, "cannot enable PCI device\n");
3207  goto out;
3208  }
3209 
3210  err = pci_request_regions(pdev, DRV_NAME);
3211  if (err) {
3212  /* Just info, some other driver may have claimed the device. */
3213  dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3214  goto out_disable_device;
3215  }
3216 
3217  if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3218  pci_using_dac = 1;
3219  err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3220  if (err) {
3221  dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3222  "coherent allocations\n");
3223  goto out_release_regions;
3224  }
3225  } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3226  dev_err(&pdev->dev, "no usable DMA configuration\n");
3227  goto out_release_regions;
3228  }
3229 
3230  pci_set_master(pdev);
3231  pci_save_state(pdev);
3232 
3233  mmio_start = pci_resource_start(pdev, 0);
3234  mmio_len = pci_resource_len(pdev, 0);
3235  ai = t3_get_adapter_info(ent->driver_data);
3236 
3237  adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3238  if (!adapter) {
3239  err = -ENOMEM;
3240  goto out_release_regions;
3241  }
3242 
3243  adapter->nofail_skb =
3244  alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3245  if (!adapter->nofail_skb) {
3246  dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3247  err = -ENOMEM;
3248  goto out_free_adapter;
3249  }
3250 
3251  adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3252  if (!adapter->regs) {
3253  dev_err(&pdev->dev, "cannot map device registers\n");
3254  err = -ENOMEM;
3255  goto out_free_adapter;
3256  }
3257 
3258  adapter->pdev = pdev;
3259  adapter->name = pci_name(pdev);
3260  adapter->msg_enable = dflt_msg_enable;
3261  adapter->mmio_len = mmio_len;
3262 
3263  mutex_init(&adapter->mdio_lock);
3264  spin_lock_init(&adapter->work_lock);
3265  spin_lock_init(&adapter->stats_lock);
3266 
3267  INIT_LIST_HEAD(&adapter->adapter_list);
3268  INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3269  INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3270 
3271  INIT_WORK(&adapter->db_full_task, db_full_task);
3272  INIT_WORK(&adapter->db_empty_task, db_empty_task);
3273  INIT_WORK(&adapter->db_drop_task, db_drop_task);
3274 
3275  INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3276 
3277  for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3278  struct net_device *netdev;
3279 
3280  netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3281  if (!netdev) {
3282  err = -ENOMEM;
3283  goto out_free_dev;
3284  }
3285 
3286  SET_NETDEV_DEV(netdev, &pdev->dev);
3287 
3288  adapter->port[i] = netdev;
3289  pi = netdev_priv(netdev);
3290  pi->adapter = adapter;
3291  pi->port_id = i;
3292  netif_carrier_off(netdev);
3293  netdev->irq = pdev->irq;
3294  netdev->mem_start = mmio_start;
3295  netdev->mem_end = mmio_start + mmio_len - 1;
3296  netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3298  netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX;
3299  netdev->vlan_features |= netdev->features & VLAN_FEAT;
3300  if (pci_using_dac)
3301  netdev->features |= NETIF_F_HIGHDMA;
3302 
3303  netdev->netdev_ops = &cxgb_netdev_ops;
3304  SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3305  }
3306 
3307  pci_set_drvdata(pdev, adapter);
3308  if (t3_prep_adapter(adapter, ai, 1) < 0) {
3309  err = -ENODEV;
3310  goto out_free_dev;
3311  }
3312 
3313  /*
3314  * The card is now ready to go. If any errors occur during device
3315  * registration we do not fail the whole card but rather proceed only
3316  * with the ports we manage to register successfully. However we must
3317  * register at least one net device.
3318  */
3319  for_each_port(adapter, i) {
3320  err = register_netdev(adapter->port[i]);
3321  if (err)
3322  dev_warn(&pdev->dev,
3323  "cannot register net device %s, skipping\n",
3324  adapter->port[i]->name);
3325  else {
3326  /*
3327  * Change the name we use for messages to the name of
3328  * the first successfully registered interface.
3329  */
3330  if (!adapter->registered_device_map)
3331  adapter->name = adapter->port[i]->name;
3332 
3333  __set_bit(i, &adapter->registered_device_map);
3334  }
3335  }
3336  if (!adapter->registered_device_map) {
3337  dev_err(&pdev->dev, "could not register any net devices\n");
3338  goto out_free_dev;
3339  }
3340 
3341  for_each_port(adapter, i)
3342  cxgb3_init_iscsi_mac(adapter->port[i]);
3343 
3344  /* Driver's ready. Reflect it on LEDs */
3345  t3_led_ready(adapter);
3346 
3347  if (is_offload(adapter)) {
3349  cxgb3_adapter_ofld(adapter);
3350  }
3351 
3352  /* See what interrupts we'll be using */
3353  if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3354  adapter->flags |= USING_MSIX;
3355  else if (msi > 0 && pci_enable_msi(pdev) == 0)
3356  adapter->flags |= USING_MSI;
3357 
3358  set_nqsets(adapter);
3359 
3360  err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3361  &cxgb3_attr_group);
3362 
3363  print_port_info(adapter, ai);
3364  return 0;
3365 
3366 out_free_dev:
3367  iounmap(adapter->regs);
3368  for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3369  if (adapter->port[i])
3370  free_netdev(adapter->port[i]);
3371 
3372 out_free_adapter:
3373  kfree(adapter);
3374 
3375 out_release_regions:
3376  pci_release_regions(pdev);
3377 out_disable_device:
3378  pci_disable_device(pdev);
3379  pci_set_drvdata(pdev, NULL);
3380 out:
3381  return err;
3382 }
3383 
3384 static void __devexit remove_one(struct pci_dev *pdev)
3385 {
3386  struct adapter *adapter = pci_get_drvdata(pdev);
3387 
3388  if (adapter) {
3389  int i;
3390 
3391  t3_sge_stop(adapter);
3392  sysfs_remove_group(&adapter->port[0]->dev.kobj,
3393  &cxgb3_attr_group);
3394 
3395  if (is_offload(adapter)) {
3396  cxgb3_adapter_unofld(adapter);
3398  &adapter->open_device_map))
3399  offload_close(&adapter->tdev);
3400  }
3401 
3402  for_each_port(adapter, i)
3403  if (test_bit(i, &adapter->registered_device_map))
3404  unregister_netdev(adapter->port[i]);
3405 
3406  t3_stop_sge_timers(adapter);
3407  t3_free_sge_resources(adapter);
3408  cxgb_disable_msi(adapter);
3409 
3410  for_each_port(adapter, i)
3411  if (adapter->port[i])
3412  free_netdev(adapter->port[i]);
3413 
3414  iounmap(adapter->regs);
3415  if (adapter->nofail_skb)
3416  kfree_skb(adapter->nofail_skb);
3417  kfree(adapter);
3418  pci_release_regions(pdev);
3419  pci_disable_device(pdev);
3420  pci_set_drvdata(pdev, NULL);
3421  }
3422 }
3423 
3424 static struct pci_driver driver = {
3425  .name = DRV_NAME,
3426  .id_table = cxgb3_pci_tbl,
3427  .probe = init_one,
3428  .remove = __devexit_p(remove_one),
3429  .err_handler = &t3_err_handler,
3430 };
3431 
3432 static int __init cxgb3_init_module(void)
3433 {
3434  int ret;
3435 
3437 
3438  ret = pci_register_driver(&driver);
3439  return ret;
3440 }
3441 
3442 static void __exit cxgb3_cleanup_module(void)
3443 {
3445  if (cxgb3_wq)
3446  destroy_workqueue(cxgb3_wq);
3447 }
3448 
3449 module_init(cxgb3_init_module);
3450 module_exit(cxgb3_cleanup_module);