32 #include <linux/module.h>
35 #include <linux/pci.h>
37 #include <linux/netdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mdio.h>
44 #include <linux/rtnetlink.h>
48 #include <linux/sched.h>
49 #include <linux/slab.h>
50 #include <asm/uaccess.h>
74 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
76 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
77 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
78 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
80 #define EEPROM_MAGIC 0x38E2F10C
82 #define CH_DEVICE(devid, idx) \
83 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
111 MODULE_PARM_DESC(dflt_msg_enable,
"Chelsio T3 default message enable bitmap");
132 static int ofld_disable = 0;
135 MODULE_PARM_DESC(ofld_disable,
"whether to enable offload at init time or not");
155 if (!netif_carrier_ok(dev))
158 const char *
s =
"10Mbps";
200 if (state == netif_carrier_ok(dev))
208 disable_tx_fifo_drain(adap, pi);
229 enable_tx_fifo_drain(adap, pi);
255 if (!netif_running(dev))
258 if (link_stat != netif_carrier_ok(dev)) {
260 disable_tx_fifo_drain(adapter, pi);
268 t3_write_reg(adapter,
288 pi->
phy.ops->power_down(&pi->
phy, 1);
295 enable_tx_fifo_drain(adapter, pi);
313 static const char *mod_str[] = {
314 NULL,
"SR",
"LR",
"LRM",
"TWINAX",
"TWINAX",
"unknown"
318 const struct port_info *pi = netdev_priv(dev);
324 mod_str[pi->
phy.modtype]);
378 static void name_msix_vecs(
struct adapter *adap)
380 int i,
j, msi_idx = 1,
n =
sizeof(adap->
msix_info[0].desc) - 1;
387 const struct port_info *pi = netdev_priv(d);
389 for (i = 0; i < pi->
nqsets; i++, msi_idx++) {
397 static int request_msix_data_irqs(
struct adapter *adap)
399 int i,
j,
err, qidx = 0;
402 int nqsets = adap2pinfo(adap, i)->nqsets;
410 &adap->
sge.qs[qidx]);
414 &adap->
sge.qs[qidx]);
423 static void free_irq_resources(
struct adapter *adapter)
430 n += adap2pinfo(adapter, i)->nqsets;
432 for (i = 0; i < n; ++i)
433 free_irq(adapter->msix_info[i + 1].vec,
434 &adapter->
sge.
qs[i]);
436 free_irq(adapter->pdev->irq, adapter);
439 static
int await_mgmt_replies(
struct adapter *adap,
unsigned long init_cnt,
444 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
452 static int init_tp_parity(
struct adapter *adap)
457 unsigned long cnt = adap->
sge.qs[0].rspq.offload_pkts;
461 for (i = 0; i < 16; i++) {
471 memset(req, 0,
sizeof(*req));
478 await_mgmt_replies(adap, cnt, i + 1);
485 for (i = 0; i < 2048; i++) {
495 memset(req, 0,
sizeof(*req));
501 await_mgmt_replies(adap, cnt, 16 + i + 1);
508 for (i = 0; i < 2048; i++) {
518 memset(req, 0,
sizeof(*req));
524 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
538 memset(greq, 0,
sizeof(*greq));
544 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
546 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
569 static void setup_rss(
struct adapter *adap)
572 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
573 unsigned int nq1 = adap->
port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
582 rspq_map[
i] = i % nq0;
591 static void ring_dbs(
struct adapter *adap)
604 static void init_napi(
struct adapter *adap)
629 static void quiesce_rx(
struct adapter *adap)
634 if (adap->
sge.qs[i].adap)
635 napi_disable(&adap->
sge.qs[i].napi);
638 static void enable_all_napi(
struct adapter *adap)
642 if (adap->
sge.qs[i].adap)
643 napi_enable(&adap->
sge.qs[i].napi);
654 static int setup_sge_qsets(
struct adapter *adap)
656 int i,
j,
err, irq_idx = 0, qset_idx = 0;
667 for (j = 0; j < pi->
nqsets; ++
j, ++qset_idx) {
671 &adap->
params.sge.qset[qset_idx], ntxq, dev,
672 netdev_get_tx_queue(dev, j));
696 const char *buf,
size_t len,
698 unsigned int min_val,
unsigned int max_val)
708 if (endp == buf || val < min_val || val > max_val)
719 #define CXGB3_SHOW(name, val_expr) \
720 static ssize_t format_##name(struct net_device *dev, char *buf) \
722 struct port_info *pi = netdev_priv(dev); \
723 struct adapter *adap = pi->adapter; \
724 return sprintf(buf, "%u\n", val_expr); \
726 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
729 return attr_show(d, buf, format_##name); \
735 struct adapter *adap = pi->
adapter;
740 if (val && adap->
params.rev == 0)
742 if (val > t3_mc5_size(&adap->
mc5) - adap->
params.mc5.nservers -
750 const char *buf,
size_t len)
752 return attr_store(d, buf, len, set_nfilters, 0, ~0);
758 struct adapter *adap = pi->
adapter;
762 if (val > t3_mc5_size(&adap->
mc5) - adap->
params.mc5.nfilters -
770 const char *buf,
size_t len)
772 return attr_store(d, buf, len, set_nservers, 0, ~0);
775 #define CXGB3_ATTR_R(name, val_expr) \
776 CXGB3_SHOW(name, val_expr) \
777 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
779 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
780 CXGB3_SHOW(name, val_expr) \
781 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
787 static struct attribute *cxgb3_attrs[] = {
788 &dev_attr_cam_size.attr,
789 &dev_attr_nfilters.attr,
790 &dev_attr_nservers.attr,
794 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
797 char *buf,
int sched)
800 struct adapter *adap = pi->
adapter;
810 bpt = (v >> 8) & 0xff;
813 len =
sprintf(buf,
"disabled\n");
815 v = (adap->
params.vpd.cclk * 1000) / cpt;
816 len =
sprintf(buf,
"%u Kbps\n", (v * bpt) / 125);
823 const char *buf,
size_t len,
int sched)
826 struct adapter *adap = pi->
adapter;
835 if (endp == buf || val > 10000000)
846 #define TM_ATTR(name, sched) \
847 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
850 return tm_attr_show(d, buf, sched); \
852 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
853 const char *buf, size_t len) \
855 return tm_attr_store(d, buf, len, sched); \
857 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
868 static struct attribute *offload_attrs[] = {
869 &dev_attr_sched0.attr,
870 &dev_attr_sched1.attr,
871 &dev_attr_sched2.attr,
872 &dev_attr_sched3.attr,
873 &dev_attr_sched4.attr,
874 &dev_attr_sched5.attr,
875 &dev_attr_sched6.attr,
876 &dev_attr_sched7.attr,
880 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
886 static inline int offload_tx(
struct t3cdev *tdev,
struct sk_buff *skb)
896 static int write_smt_entry(
struct adapter *adapter,
int idx)
913 offload_tx(&adapter->
tdev, skb);
917 static int init_smt(
struct adapter *adapter)
922 write_smt_entry(adapter, i);
926 static
void init_port_mtus(
struct adapter *adapter)
928 unsigned int mtus = adapter->port[0]->mtu;
930 if (adapter->port[1])
931 mtus |= adapter->port[1]->mtu << 16;
935 static int send_pktsched_cmd(
struct adapter *adap,
int sched,
int qidx,
int lo,
967 static int bind_qsets(
struct adapter *adap)
972 const struct port_info *pi = adap2pinfo(adap, i);
974 for (j = 0; j < pi->
nqsets; ++
j) {
975 int ret = send_pktsched_cmd(adap, 1,
986 #define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \
987 __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
988 #define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
989 #define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \
990 __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
991 #define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
992 #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
993 #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
994 #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1002 static inline const char *get_edc_fw_name(
int edc_idx)
1022 struct adapter *adapter = phy->
adapter;
1030 snprintf(buf,
sizeof(buf), get_edc_fw_name(edc_idx));
1035 "could not upgrade firmware: unable to load %s\n",
1041 if (fw->
size > size + 4) {
1042 CH_ERR(adapter,
"firmware image too large %u, expected %d\n",
1043 (
unsigned int)fw->
size, size + 4);
1049 for (csum = 0, i = 0; i < fw->
size /
sizeof(
csum); i++)
1050 csum +=
ntohl(p[i]);
1052 if (csum != 0xffffffff) {
1053 CH_ERR(adapter,
"corrupted firmware image, checksum %u\n",
1058 for (i = 0; i < size / 4 ; i++) {
1059 *cache++ = (
be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1068 static int upgrade_fw(
struct adapter *adap)
1076 dev_err(dev,
"could not upgrade firmware: unable to load %s\n",
1084 dev_info(dev,
"successful upgrade to firmware %d.%d.%d\n",
1087 dev_err(dev,
"failed to upgrade to firmware %d.%d.%d\n",
1093 static inline char t3rev2char(
struct adapter *adapter)
1097 switch(adapter->
params.rev) {
1109 static int update_tpsram(
struct adapter *adap)
1117 rev = t3rev2char(adap);
1125 dev_err(dev,
"could not load TP SRAM: unable to load %s\n",
1132 goto release_tpsram;
1137 "successful update of protocol engine "
1141 dev_err(dev,
"failed to update of protocol engine %d.%d.%d\n",
1144 dev_err(dev,
"loading protocol SRAM failed\n");
1161 static void t3_synchronize_rx(
struct adapter *adap,
const struct port_info *
p)
1168 spin_lock_irq(&q->
lock);
1169 spin_unlock_irq(&q->
lock);
1175 struct port_info *pi = netdev_priv(dev);
1176 struct adapter *adapter = pi->
adapter;
1178 if (adapter->
params.rev > 0) {
1187 adapter->port[i]->features & NETIF_F_HW_VLAN_RX;
1191 t3_synchronize_rx(adapter, pi);
1204 static
int cxgb_up(
struct adapter *adap)
1211 err = upgrade_fw(adap);
1212 CH_WARN(adap,
"FW upgrade to %d.%d.%d %s\n",
1219 err = update_tpsram(adap);
1220 CH_WARN(adap,
"TP upgrade to %d.%d.%d %s\n",
1239 err = setup_sge_qsets(adap);
1244 cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1257 name_msix_vecs(adap);
1259 t3_async_intr_handler, 0,
1260 adap->msix_info[0].desc, adap);
1264 err = request_msix_data_irqs(adap);
1266 free_irq(adap->msix_info[0].vec, adap);
1271 adap->sge.qs[0].rspq.
1278 enable_all_napi(adap);
1283 is_offload(adap) && init_tp_parity(adap) == 0)
1293 int ret = bind_qsets(adap);
1296 CH_ERR(adap,
"failed to bind qsets, err %d\n", ret);
1298 free_irq_resources(adap);
1308 CH_ERR(adap,
"request_irq failed, err %d\n", err);
1315 static void cxgb_down(
struct adapter *adapter,
int on_wq)
1322 free_irq_resources(adapter);
1323 quiesce_rx(adapter);
1329 static void schedule_chk_task(
struct adapter *adap)
1333 timeo = adap->
params.linkpoll_period ?
1334 (
HZ * adap->
params.linkpoll_period) / 10 :
1335 adap->
params.stats_update_period *
HZ;
1340 static int offload_open(
struct net_device *dev)
1342 struct port_info *pi = netdev_priv(dev);
1343 struct adapter *adapter = pi->
adapter;
1351 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1360 init_port_mtus(adapter);
1363 adapter->
params.rev == 0 ?
1364 adapter->
port[0]->mtu : 0xffff);
1368 dev_dbg(&dev->
dev,
"cannot create sysfs group\n");
1383 static int offload_close(
struct t3cdev *tdev)
1385 struct adapter *adapter =
tdev2adap(tdev);
1405 cxgb_down(adapter, 0);
1413 struct port_info *pi = netdev_priv(dev);
1414 struct adapter *adapter = pi->
adapter;
1422 if (is_offload(adapter) && !ofld_disable) {
1423 err = offload_open(dev);
1426 "Could not initialize offload capabilities\n");
1430 err = netif_set_real_num_rx_queues(dev, pi->
nqsets);
1435 netif_tx_start_all_queues(dev);
1437 schedule_chk_task(adapter);
1443 static int __cxgb_close(
struct net_device *dev,
int on_wq)
1445 struct port_info *pi = netdev_priv(dev);
1446 struct adapter *adapter = pi->
adapter;
1457 netif_tx_stop_all_queues(dev);
1458 pi->
phy.ops->power_down(&pi->
phy, 1);
1470 cxgb_down(adapter, on_wq);
1476 static int cxgb_close(
struct net_device *dev)
1478 return __cxgb_close(dev, 0);
1483 struct port_info *pi = netdev_priv(dev);
1484 struct adapter *adapter = pi->
adapter;
1522 struct port_info *pi = netdev_priv(dev);
1523 struct adapter *adapter = pi->
adapter;
1530 struct port_info *pi = netdev_priv(dev);
1531 struct adapter *adapter = pi->
adapter;
1539 "TxMulticastFramesOK",
1540 "TxBroadcastFramesOK",
1547 "TxFrames128To255 ",
1548 "TxFrames256To511 ",
1549 "TxFrames512To1023 ",
1550 "TxFrames1024To1518 ",
1551 "TxFrames1519ToMax ",
1555 "RxMulticastFramesOK",
1556 "RxBroadcastFramesOK",
1567 "RxFrames128To255 ",
1568 "RxFrames256To511 ",
1569 "RxFrames512To1023 ",
1570 "RxFrames1024To1518 ",
1571 "RxFrames1519ToMax ",
1584 "CheckTXEnToggled ",
1590 static int get_sset_count(
struct net_device *dev,
int sset)
1600 #define T3_REGMAP_SIZE (3 * 1024)
1602 static int get_regs_len(
struct net_device *dev)
1607 static int get_eeprom_len(
struct net_device *dev)
1614 struct port_info *pi = netdev_priv(dev);
1615 struct adapter *adapter = pi->
adapter;
1630 "%s %u.%u.%u TP %u.%u.%u",
1643 memcpy(data, stats_strings,
sizeof(stats_strings));
1646 static unsigned long collect_sge_port_stats(
struct adapter *adapter,
1650 unsigned long tot = 0;
1653 tot += adapter->
sge.qs[
i].port_stats[
idx];
1660 struct port_info *pi = netdev_priv(dev);
1661 struct adapter *adapter = pi->
adapter;
1704 *data++ = pi->
phy.fifo_errors;
1706 *data++ = collect_sge_port_stats(adapter, pi,
SGE_PSTAT_TSO);
1722 static inline void reg_block_dump(
struct adapter *ap,
void *buf,
1723 unsigned int start,
unsigned int end)
1727 for (; start <=
end; start +=
sizeof(
u32))
1728 *p++ = t3_read_reg(ap, start);
1734 struct port_info *pi = netdev_priv(dev);
1735 struct adapter *ap = pi->
adapter;
1743 regs->
version = 3 | (ap->
params.rev << 10) | (is_pcie(ap) << 31);
1762 static int restart_autoneg(
struct net_device *dev)
1766 if (!netif_running(dev))
1770 p->
phy.ops->autoneg_restart(&p->
phy);
1774 static int set_phys_id(
struct net_device *dev,
1777 struct port_info *pi = netdev_priv(dev);
1778 struct adapter *adapter = pi->
adapter;
1804 if (netif_carrier_ok(dev)) {
1808 ethtool_cmd_speed_set(cmd, -1);
1821 static int speed_duplex_to_caps(
int speed,
int duplex)
1851 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1852 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1853 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1854 ADVERTISED_10000baseT_Full)
1867 u32 speed = ethtool_cmd_speed(cmd);
1868 int cap = speed_duplex_to_caps(speed, cmd->
duplex);
1876 u32 speed = ethtool_cmd_speed(cmd);
1877 int cap = speed_duplex_to_caps(speed, cmd->
duplex);
1894 if (netif_running(dev))
1899 static void get_pauseparam(
struct net_device *dev,
1909 static int set_pauseparam(
struct net_device *dev,
1927 if (netif_running(dev))
1931 if (netif_running(dev))
1939 struct port_info *pi = netdev_priv(dev);
1940 struct adapter *adapter = pi->
adapter;
1955 struct port_info *pi = netdev_priv(dev);
1956 struct adapter *adapter = pi->
adapter;
1970 if (adapter->
flags & FULL_INIT_DONE)
1974 for (i = 0; i < pi->
nqsets; ++
i, ++
q) {
1987 struct port_info *pi = netdev_priv(dev);
1988 struct adapter *adapter = pi->
adapter;
1996 for (i = 0; i < pi->
nqsets; i++) {
1997 qsp = &adapter->
params.sge.qset[
i];
1998 qs = &adapter->
sge.qs[
i];
2008 struct port_info *pi = netdev_priv(dev);
2009 struct adapter *adapter = pi->
adapter;
2019 struct port_info *pi = netdev_priv(dev);
2020 struct adapter *adapter = pi->
adapter;
2040 struct port_info *pi = netdev_priv(dev);
2041 struct adapter *adapter = pi->
adapter;
2042 u32 aligned_offset, aligned_len;
2050 aligned_offset = eeprom->
offset & ~3;
2051 aligned_len = (eeprom->
len + (eeprom->
offset & 3) + 3) & ~3;
2053 if (aligned_offset != eeprom->
offset || aligned_len != eeprom->
len) {
2058 if (!err && aligned_len > 4)
2060 aligned_offset + aligned_len - 4,
2061 (
__le32 *) & buf[aligned_len - 4]);
2072 for (p = (
__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2074 aligned_offset += 4;
2092 static const struct ethtool_ops cxgb_ethtool_ops = {
2098 .get_ringparam = get_sge_param,
2099 .set_ringparam = set_sge_param,
2110 .nway_reset = restart_autoneg,
2118 static int in_range(
int val,
int lo,
int hi)
2120 return val < 0 || (val <= hi && val >= lo);
2123 static int cxgb_extension_ioctl(
struct net_device *dev,
void __user *useraddr)
2125 struct port_info *pi = netdev_priv(dev);
2126 struct adapter *adapter = pi->
adapter;
2145 if (
t.qset_idx >= SGE_QSETS)
2163 if ((adapter->
flags & FULL_INIT_DONE) &&
2164 (
t.rspq_size >= 0 ||
t.fl_size[0] >= 0 ||
2165 t.fl_size[1] >= 0 ||
t.txq_size[0] >= 0 ||
2166 t.txq_size[1] >= 0 ||
t.txq_size[2] >= 0 ||
2167 t.polling >= 0 ||
t.cong_thres >= 0))
2174 pi = adap2pinfo(adapter, i);
2179 if (
t.qset_idx < q1)
2181 if (
t.qset_idx > q1 + nqsets - 1)
2184 q = &adapter->
params.sge.qset[
t.qset_idx];
2186 if (
t.rspq_size >= 0)
2188 if (
t.fl_size[0] >= 0)
2190 if (
t.fl_size[1] >= 0)
2192 if (
t.txq_size[0] >= 0)
2194 if (
t.txq_size[1] >= 0)
2196 if (
t.txq_size[2] >= 0)
2198 if (
t.cong_thres >= 0)
2200 if (
t.intr_lat >= 0) {
2202 &adapter->
sge.qs[
t.qset_idx];
2207 if (
t.polling >= 0) {
2208 if (adapter->
flags & USING_MSIX)
2212 if (adapter->
params.rev == 0 &&
2217 q = &adapter->
params.sge.
2248 pi = adap2pinfo(adapter, i);
2253 if (
t.qset_idx >= nqsets)
2256 q = &adapter->
params.sge.qset[q1 +
t.qset_idx];
2269 if (adapter->
flags & USING_MSIX)
2270 t.vector = adapter->
msix_info[q1 +
t.qset_idx + 1].vec;
2272 t.vector = adapter->
pdev->irq;
2280 unsigned int i, first_qset = 0, other_qsets = 0;
2284 if (adapter->
flags & FULL_INIT_DONE)
2288 if (
edata.val < 1 ||
2289 (
edata.val > 1 && !(adapter->
flags & USING_MSIX)))
2293 if (adapter->port[i] && adapter->port[i] != dev)
2294 other_qsets += adap2pinfo(adapter, i)->nqsets;
2296 if (edata.val + other_qsets > SGE_QSETS)
2299 pi->nqsets = edata.val;
2302 if (adapter->port[i]) {
2303 pi = adap2pinfo(adapter, i);
2305 first_qset += pi->
nqsets;
2330 if (IS_ERR(fw_data))
2331 return PTR_ERR(fw_data);
2343 if (!is_offload(adapter))
2347 if (offload_running(adapter))
2357 for (i = 1; i <
NMTUS; ++
i)
2358 if (
m.mtus[i] <
m.mtus[i - 1])
2362 sizeof(adapter->
params.mtus));
2369 if (!is_offload(adapter))
2384 if (!is_offload(adapter))
2388 if (adapter->
flags & FULL_INIT_DONE)
2419 if (!is_offload(adapter))
2421 if (!(adapter->
flags & FULL_INIT_DONE))
2425 if ((
t.
addr & 7) || (
t.len & 7))
2430 mem = &adapter->
pmrx;
2432 mem = &adapter->
pmtx;
2441 t.version = 3 | (adapter->
params.rev << 10);
2449 useraddr +=
sizeof(
t);
2451 unsigned int chunk =
2452 min_t(
unsigned int,
t.len,
sizeof(buf));
2473 if (!offload_running(adapter))
2495 static int cxgb_ioctl(
struct net_device *dev,
struct ifreq *req,
int cmd)
2498 struct port_info *pi = netdev_priv(dev);
2499 struct adapter *adapter = pi->
adapter;
2506 !mdio_phy_id_is_c45(data->
phy_id) &&
2507 (data->
phy_id & 0x1f00) &&
2508 !(data->
phy_id & 0xe0e0))
2515 return cxgb_extension_ioctl(dev, req->ifr_data);
2521 static int cxgb_change_mtu(
struct net_device *dev,
int new_mtu)
2523 struct port_info *pi = netdev_priv(dev);
2524 struct adapter *adapter = pi->
adapter;
2532 init_port_mtus(adapter);
2533 if (adapter->
params.rev == 0 && offload_running(adapter))
2536 adapter->
port[0]->mtu);
2540 static int cxgb_set_mac_addr(
struct net_device *dev,
void *p)
2542 struct port_info *pi = netdev_priv(dev);
2543 struct adapter *adapter = pi->
adapter;
2546 if (!is_valid_ether_addr(addr->
sa_data))
2551 if (offload_running(adapter))
2552 write_smt_entry(adapter, pi->
port_id);
2563 if (features & NETIF_F_HW_VLAN_RX)
2575 if (changed & NETIF_F_HW_VLAN_RX)
2576 cxgb_vlan_mode(dev, features);
2581 #ifdef CONFIG_NET_POLL_CONTROLLER
2582 static void cxgb_netpoll(
struct net_device *dev)
2584 struct port_info *pi = netdev_priv(dev);
2585 struct adapter *adapter = pi->
adapter;
2592 if (adapter->
flags & USING_MSIX)
2605 static void mac_stats_update(
struct adapter *adapter)
2613 if (netif_running(dev)) {
2621 static void check_link_status(
struct adapter *adapter)
2649 static void check_t3b2_mac(
struct adapter *adapter)
2661 if (!netif_running(dev))
2665 if (netif_running(dev) && netif_carrier_ok(dev))
2668 p->
mac.stats.num_toggled++;
2669 else if (status == 2) {
2674 cxgb_set_rxmode(dev);
2678 p->
mac.stats.num_resets++;
2687 struct adapter *adapter =
container_of(work,
struct adapter,
2695 check_link_status(adapter);
2701 mac_stats_update(adapter);
2706 check_t3b2_mac(adapter);
2717 struct cmac *
mac = &adap2pinfo(adapter, port)->
mac;
2723 mac->
stats.rx_fifo_ovfl++;
2746 qs->
fl[
i].empty += (v & 1);
2759 schedule_chk_task(adapter);
2763 static void db_full_task(
struct work_struct *work)
2765 struct adapter *adapter =
container_of(work,
struct adapter,
2773 struct adapter *adapter =
container_of(work,
struct adapter,
2781 struct adapter *adapter =
container_of(work,
struct adapter,
2783 unsigned long delay = 1000;
2802 static void ext_intr_task(
struct work_struct *work)
2804 struct adapter *adapter =
container_of(work,
struct adapter,
2824 spin_lock_irq(&adapter->work_lock);
2825 if (adapter->slow_intr_mask) {
2826 adapter->slow_intr_mask |=
F_T3DBG;
2829 adapter->slow_intr_mask);
2858 struct port_info *pi = netdev_priv(netdev);
2865 static int t3_adapter_error(
struct adapter *adapter,
int reset,
int on_wq)
2869 if (is_offload(adapter) &&
2872 offload_close(&adapter->
tdev);
2879 if (netif_running(netdev))
2880 __cxgb_close(netdev, on_wq);
2886 adapter->
flags &= ~FULL_INIT_DONE;
2896 static int t3_reenable_adapter(
struct adapter *adapter)
2900 "Cannot re-enable PCI device after reset.\n");
2918 static void t3_resume_ports(
struct adapter *adapter)
2926 if (netif_running(netdev)) {
2927 if (cxgb_open(netdev)) {
2929 "can't bring device back up"
2936 if (is_offload(adapter) && !ofld_disable)
2944 static void fatal_error_task(
struct work_struct *work)
2946 struct adapter *adapter =
container_of(work,
struct adapter,
2951 err = t3_adapter_error(adapter, 1, 1);
2953 err = t3_reenable_adapter(adapter);
2955 t3_resume_ports(adapter);
2957 CH_ALERT(adapter,
"adapter reset %s\n", err ?
"failed" :
"succeeded");
2965 if (adapter->
flags & FULL_INIT_DONE) {
2977 CH_ALERT(adapter,
"encountered fatal error, operation suspended\n");
2979 CH_ALERT(adapter,
"FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2980 fw_status[0], fw_status[1],
2981 fw_status[2], fw_status[3]);
2995 struct adapter *adapter = pci_get_drvdata(pdev);
3000 t3_adapter_error(adapter, 0, 0);
3014 struct adapter *adapter = pci_get_drvdata(pdev);
3016 if (!t3_reenable_adapter(adapter))
3029 static void t3_io_resume(
struct pci_dev *pdev)
3031 struct adapter *adapter = pci_get_drvdata(pdev);
3033 CH_ALERT(adapter,
"adapter recovering, PEX ERR 0x%x\n",
3036 t3_resume_ports(adapter);
3040 .error_detected = t3_io_error_detected,
3041 .slot_reset = t3_io_slot_reset,
3042 .resume = t3_io_resume,
3050 static void set_nqsets(
struct adapter *adap)
3054 int hwports = adap->
params.nports;
3057 if (adap->
params.rev > 0 && adap->
flags & USING_MSIX) {
3059 (hwports * nqsets > SGE_QSETS ||
3060 num_cpus >= nqsets / hwports))
3062 if (nqsets > num_cpus)
3064 if (nqsets < 1 || hwports == 4)
3070 struct port_info *pi = adap2pinfo(adap, i);
3077 "Port %d using %d queue sets.\n", i, nqsets);
3081 static int __devinit cxgb_enable_msix(
struct adapter *adap)
3083 struct msix_entry
entries[SGE_QSETS + 1];
3097 if (!err && vectors < (adap->
params.nports + 1)) {
3111 static void __devinit print_port_info(
struct adapter *adap,
3114 static const char *pci_variant[] = {
3115 "PCI",
"PCI-X",
"PCI-X ECC",
"PCI-X 266",
"PCI Express"
3122 snprintf(buf,
sizeof(buf),
"%s x%d",
3123 pci_variant[adap->
params.pci.variant],
3126 snprintf(buf,
sizeof(buf),
"%s %dMHz/%d-bit",
3127 pci_variant[adap->
params.pci.variant],
3132 const struct port_info *pi = netdev_priv(dev);
3138 is_offload(adap) ?
"R" :
"", adap->
params.rev, buf,
3139 (adap->
flags & USING_MSIX) ?
" MSI-X" :
3143 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3144 adap->
name, t3_mc7_size(&adap->
cm) >> 20,
3145 t3_mc7_size(&adap->
pmtx) >> 20,
3146 t3_mc7_size(&adap->
pmrx) >> 20,
3152 .ndo_open = cxgb_open,
3153 .ndo_stop = cxgb_close,
3155 .ndo_get_stats = cxgb_get_stats,
3157 .ndo_set_rx_mode = cxgb_set_rxmode,
3158 .ndo_do_ioctl = cxgb_ioctl,
3159 .ndo_change_mtu = cxgb_change_mtu,
3160 .ndo_set_mac_address = cxgb_set_mac_addr,
3161 .ndo_fix_features = cxgb_fix_features,
3162 .ndo_set_features = cxgb_set_features,
3163 #ifdef CONFIG_NET_POLL_CONTROLLER
3164 .ndo_poll_controller = cxgb_netpoll,
3170 struct port_info *pi = netdev_priv(dev);
3173 pi->
iscsic.mac_addr[3] |= 0x80;
3176 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3177 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3178 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3182 static int version_printed;
3184 int i,
err, pci_using_dac = 0;
3187 struct adapter *adapter =
NULL;
3190 if (!version_printed) {
3199 ": cannot initialize work queue\n");
3206 dev_err(&pdev->
dev,
"cannot enable PCI device\n");
3213 dev_info(&pdev->
dev,
"cannot obtain PCI resources\n");
3214 goto out_disable_device;
3219 err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(64));
3221 dev_err(&pdev->
dev,
"unable to obtain 64-bit DMA for "
3222 "coherent allocations\n");
3223 goto out_release_regions;
3225 }
else if ((err = pci_set_dma_mask(pdev,
DMA_BIT_MASK(32))) != 0) {
3226 dev_err(&pdev->
dev,
"no usable DMA configuration\n");
3227 goto out_release_regions;
3237 adapter = kzalloc(
sizeof(*adapter),
GFP_KERNEL);
3240 goto out_release_regions;
3246 dev_err(&pdev->
dev,
"cannot allocate nofail buffer\n");
3248 goto out_free_adapter;
3252 if (!adapter->
regs) {
3253 dev_err(&pdev->
dev,
"cannot map device registers\n");
3255 goto out_free_adapter;
3258 adapter->
pdev = pdev;
3259 adapter->
name = pci_name(pdev);
3280 netdev = alloc_etherdev_mq(
sizeof(
struct port_info), SGE_QSETS);
3288 adapter->
port[
i] = netdev;
3289 pi = netdev_priv(netdev);
3295 netdev->
mem_end = mmio_start + mmio_len - 1;
3307 pci_set_drvdata(pdev, adapter);
3323 "cannot register net device %s, skipping\n",
3324 adapter->
port[i]->name);
3337 dev_err(&pdev->
dev,
"could not register any net devices\n");
3342 cxgb3_init_iscsi_mac(adapter->
port[i]);
3347 if (is_offload(adapter)) {
3353 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3355 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3358 set_nqsets(adapter);
3363 print_port_info(adapter, ai);
3369 if (adapter->
port[i])
3375 out_release_regions:
3379 pci_set_drvdata(pdev,
NULL);
3386 struct adapter *adapter = pci_get_drvdata(pdev);
3395 if (is_offload(adapter)) {
3399 offload_close(&adapter->
tdev);
3408 cxgb_disable_msi(adapter);
3411 if (adapter->port[i])
3420 pci_set_drvdata(pdev,
NULL);
3426 .id_table = cxgb3_pci_tbl,
3429 .err_handler = &t3_err_handler,
3432 static int __init cxgb3_init_module(
void)
3438 ret = pci_register_driver(&
driver);
3442 static void __exit cxgb3_cleanup_module(
void)