36 #include <linux/module.h>
39 #include <linux/pci.h>
41 #include <linux/netdevice.h>
44 #include <linux/ethtool.h>
49 #include "../cxgb4/t4_regs.h"
50 #include "../cxgb4/t4_msg.h"
55 #define DRV_VERSION "1.0.0"
56 #define DRV_DESC "Chelsio T4 Virtual Function (VF) Network Driver"
66 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
67 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
68 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
74 "default adapter ethtool message level bitmap");
90 #define MSI_DEFAULT MSI_MSIX
131 static struct dentry *cxgb4vf_debugfs_root;
149 if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
159 const struct port_info *pi = netdev_priv(dev);
261 "%s-FWeventq", adapter->
name);
269 const struct port_info *pi = netdev_priv(dev);
274 "%s-%d", dev->
name, qs);
283 static int request_msix_queue_irqs(
struct adapter *adapter)
321 static void free_msix_queue_irqs(
struct adapter *adapter)
323 struct sge *s = &adapter->
sge;
329 free_irq(adapter->msix_info[msi++].vec,
338 napi_enable(&rspq->napi);
353 static void enable_rx(
struct adapter *adapter)
356 struct sge *s = &adapter->
sge;
359 qenable(&s->
ethrxq[rxq].rspq);
360 qenable(&s->fw_evtq);
377 static
void quiesce_rx(
struct adapter *adapter)
379 struct sge *s = &adapter->sge;
384 napi_disable(&s->fw_evtq.napi);
396 struct adapter *adapter = rspq->adapter;
398 void *cpl = (
void *)(rsp + 1);
423 struct sge *s = &adapter->
sge;
438 "Egress Update QID %d out of range\n", qid);
444 "Egress Update QID %d TXQ=NULL\n", qid);
450 "Egress Update QID %d refers to TXQ %d\n",
460 netif_tx_wake_queue(txq->
txq);
466 "unexpected CPL %#x on FW event queue\n", opcode);
477 static int setup_sge_queues(
struct adapter *adapter)
479 struct sge *s = &adapter->
sge;
486 bitmap_zero(s->starving_fl,
MAX_EGRQ);
500 goto err_free_queues;
509 goto err_free_queues;
525 for (qs = 0; qs < pi->
nqsets; qs++, rxq++, txq++) {
530 goto err_free_queues;
533 netdev_get_tx_queue(dev, qs),
534 s->fw_evtq.cntxt_id);
536 goto err_free_queues;
539 memset(&rxq->stats, 0,
sizeof(rxq->stats));
548 IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
556 for (qs = 0; qs < pi->
nqsets; qs++, rxq++, txq++) {
557 IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
569 rxq->fl.abs_id = rxq->fl.cntxt_id + s->
egr_base;
570 EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
588 static int setup_rss(
struct adapter *adapter)
593 struct port_info *pi = adap2pinfo(adapter, pidx);
598 for (qs = 0; qs < pi->
nqsets; qs++)
599 rss[qs] = rxq[qs].rspq.abs_id;
609 switch (adapter->
params.rss.mode) {
618 if (!adapter->
params.rss.u.basicvirtual.tnlalllookup) {
625 config.basicvirtual.defaultq =
647 static int adapter_up(
struct adapter *adapter)
657 err = setup_sge_queues(adapter);
660 err = setup_rss(adapter);
667 name_msix_vecs(adapter);
676 err = request_msix_queue_irqs(adapter);
680 adapter->
name, adapter);
700 static void adapter_down(
struct adapter *adapter)
706 free_msix_queue_irqs(adapter);
719 static int cxgb4vf_open(
struct net_device *dev)
723 struct adapter *adapter = pi->
adapter;
730 err = adapter_up(adapter);
739 err = netif_set_real_num_rx_queues(dev, pi->
nqsets);
742 err = link_start(dev);
746 netif_tx_start_all_queues(dev);
752 adapter_down(adapter);
760 static int cxgb4vf_stop(
struct net_device *dev)
763 struct adapter *adapter = pi->
adapter;
765 netif_tx_stop_all_queues(dev);
772 adapter_down(adapter);
782 struct port_info *pi = netdev2pinfo(dev);
783 struct adapter *adapter = pi->
adapter;
791 memset(ns, 0,
sizeof(*ns));
796 stats.tx_ucast_bytes +
stats.tx_offload_bytes);
798 stats.tx_ucast_frames +
stats.tx_offload_frames);
800 stats.rx_ucast_bytes);
802 stats.rx_ucast_frames);
815 static inline unsigned int collect_netdev_uc_list_addrs(
const struct net_device *dev,
818 unsigned int maxaddrs)
820 unsigned int index = 0;
821 unsigned int naddr = 0;
825 if (index++ >= offset) {
826 addr[naddr++] = ha->
addr;
827 if (naddr >= maxaddrs)
838 static inline unsigned int collect_netdev_mc_list_addrs(
const struct net_device *dev,
841 unsigned int maxaddrs)
843 unsigned int index = 0;
844 unsigned int naddr = 0;
848 if (index++ >= offset) {
849 addr[naddr++] = ha->
addr;
850 if (naddr >= maxaddrs)
860 static int set_addr_filters(
const struct net_device *dev,
bool sleep)
865 unsigned int offset, naddr;
868 const struct port_info *pi = netdev_priv(dev);
871 for (offset = 0; ; offset += naddr) {
872 naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
878 naddr, addr,
NULL, &uhash, sleep);
886 for (offset = 0; ; offset += naddr) {
887 naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
893 naddr, addr,
NULL, &mhash, sleep);
900 uhash | mhash, sleep);
907 static int set_rxmode(
struct net_device *dev,
int mtu,
bool sleep_ok)
912 ret = set_addr_filters(dev, sleep_ok);
924 static void cxgb4vf_set_rxmode(
struct net_device *dev)
927 set_rxmode(dev, -1,
false);
934 static int closest_timer(
const struct sge *s,
int us)
936 int i, timer_idx = 0, min_delta =
INT_MAX;
942 if (delta < min_delta) {
950 static int closest_thres(
const struct sge *s,
int thres)
958 if (delta < min_delta) {
969 static unsigned int qtimer_val(
const struct adapter *adapter,
975 ? adapter->
sge.timer_val[timer_idx]
990 static int set_rxq_intr_params(
struct adapter *adapter,
struct sge_rspq *rspq,
991 unsigned int us,
unsigned int cnt)
993 unsigned int timer_idx;
1012 pktcnt_idx = closest_thres(&adapter->
sge, cnt);
1029 timer_idx = (us == 0
1031 : closest_timer(&adapter->
sge, us));
1047 static inline unsigned int mk_adap_vers(
const struct adapter *adapter)
1052 return 4 | (0x3f << 10);
1079 static int cxgb4vf_change_mtu(
struct net_device *dev,
int new_mtu)
1082 struct port_info *pi = netdev_priv(dev);
1089 -1, -1, -1, -1,
true);
1110 static int cxgb4vf_set_features(
struct net_device *dev,
1113 struct port_info *pi = netdev_priv(dev);
1126 static int cxgb4vf_set_mac_addr(
struct net_device *dev,
void *_addr)
1130 struct port_info *pi = netdev_priv(dev);
1132 if (!is_valid_ether_addr(addr->
sa_data))
1145 #ifdef CONFIG_NET_POLL_CONTROLLER
1150 static void cxgb4vf_poll_controller(
struct net_device *dev)
1152 struct port_info *pi = netdev_priv(dev);
1153 struct adapter *adapter = pi->
adapter;
1160 for (nqsets = pi->
nqsets; nqsets; nqsets--) {
1180 static int cxgb4vf_get_settings(
struct net_device *dev,
1183 const struct port_info *pi = netdev_priv(dev);
1187 ethtool_cmd_speed_set(cmd,
1188 netif_carrier_ok(dev) ? pi->
link_cfg.speed : -1);
1203 static void cxgb4vf_get_drvinfo(
struct net_device *dev,
1206 struct adapter *adapter = netdev2adap(dev);
1213 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1229 return netdev2adap(dev)->msg_enable;
1235 static void cxgb4vf_set_msglevel(
struct net_device *dev,
u32 msglevel)
1237 netdev2adap(dev)->msg_enable = msglevel;
1246 static void cxgb4vf_get_ringparam(
struct net_device *dev,
1249 const struct port_info *pi = netdev_priv(dev);
1269 static int cxgb4vf_set_ringparam(
struct net_device *dev,
1272 const struct port_info *pi = netdev_priv(dev);
1273 struct adapter *adapter = pi->
adapter;
1274 struct sge *s = &adapter->
sge;
1302 static int cxgb4vf_get_coalesce(
struct net_device *dev,
1305 const struct port_info *pi = netdev_priv(dev);
1306 const struct adapter *adapter = pi->
adapter;
1322 static int cxgb4vf_set_coalesce(
struct net_device *dev,
1325 const struct port_info *pi = netdev_priv(dev);
1326 struct adapter *adapter = pi->
adapter;
1328 return set_rxq_intr_params(adapter,
1337 static void cxgb4vf_get_pauseparam(
struct net_device *dev,
1340 struct port_info *pi = netdev_priv(dev);
1350 static int cxgb4vf_phys_id(
struct net_device *dev,
1354 struct port_info *pi = netdev_priv(dev);
1388 "TxBroadcastBytes ",
1389 "TxBroadcastFrames ",
1390 "TxMulticastBytes ",
1391 "TxMulticastFrames ",
1397 "RxBroadcastBytes ",
1398 "RxBroadcastFrames ",
1399 "RxMulticastBytes ",
1400 "RxMulticastFrames ",
1421 static int cxgb4vf_get_sset_count(
struct net_device *dev,
int sset)
1435 static void cxgb4vf_get_strings(
struct net_device *dev,
1441 memcpy(data, stats_strings,
sizeof(stats_strings));
1450 static void collect_sge_port_stats(
const struct adapter *adapter,
1458 memset(stats, 0,
sizeof(*stats));
1459 for (qs = 0; qs < pi->
nqsets; qs++, rxq++, txq++) {
1462 stats->
rx_csum += rxq->stats.rx_cso;
1463 stats->
vlan_ex += rxq->stats.vlan_ex;
1465 stats->
lro_pkts += rxq->stats.lro_pkts;
1473 static void cxgb4vf_get_ethtool_stats(
struct net_device *dev,
1477 struct port_info *pi = netdev2pinfo(dev);
1478 struct adapter *adapter = pi->
adapter;
1491 static int cxgb4vf_get_regs_len(
struct net_device *dev)
1499 static void reg_block_dump(
struct adapter *adapter,
void *regbuf,
1500 unsigned int start,
unsigned int end)
1504 for ( ; start <=
end; start +=
sizeof(
u32)) {
1513 *bp++ = t4_read_reg(adapter, start);
1520 static void cxgb4vf_get_regs(
struct net_device *dev,
1524 struct adapter *adapter = netdev2adap(dev);
1526 regs->
version = mk_adap_vers(adapter);
1533 reg_block_dump(adapter, regbuf,
1536 reg_block_dump(adapter, regbuf,
1539 reg_block_dump(adapter, regbuf,
1542 reg_block_dump(adapter, regbuf,
1546 reg_block_dump(adapter, regbuf,
1554 static void cxgb4vf_get_wol(
struct net_device *dev,
1565 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1567 static const struct ethtool_ops cxgb4vf_ethtool_ops = {
1568 .get_settings = cxgb4vf_get_settings,
1569 .get_drvinfo = cxgb4vf_get_drvinfo,
1570 .get_msglevel = cxgb4vf_get_msglevel,
1571 .set_msglevel = cxgb4vf_set_msglevel,
1572 .get_ringparam = cxgb4vf_get_ringparam,
1573 .set_ringparam = cxgb4vf_set_ringparam,
1574 .get_coalesce = cxgb4vf_get_coalesce,
1575 .set_coalesce = cxgb4vf_set_coalesce,
1576 .get_pauseparam = cxgb4vf_get_pauseparam,
1578 .get_strings = cxgb4vf_get_strings,
1579 .set_phys_id = cxgb4vf_phys_id,
1580 .get_sset_count = cxgb4vf_get_sset_count,
1581 .get_ethtool_stats = cxgb4vf_get_ethtool_stats,
1582 .get_regs_len = cxgb4vf_get_regs_len,
1583 .get_regs = cxgb4vf_get_regs,
1584 .get_wol = cxgb4vf_get_wol,
1597 static int sge_qinfo_show(
struct seq_file *seq,
void *v)
1599 struct adapter *adapter = seq->
private;
1606 #define S3(fmt_spec, s, v) \
1608 seq_printf(seq, "%-12s", s); \
1609 for (qs = 0; qs < n; ++qs) \
1610 seq_printf(seq, " %16" fmt_spec, v); \
1611 seq_putc(seq, '\n'); \
1613 #define S(s, v) S3("s", s, v)
1614 #define T(s, v) S3("u", s, txq[qs].v)
1615 #define R(s, v) S3("u", s, rxq[qs].v)
1617 if (r < eth_entries) {
1622 S(
"QType:",
"Ethernet");
1625 ? rxq[qs].
rspq.netdev->name
1630 netdev_priv(rxq[qs].rspq.
netdev))->port_id
1632 T(
"TxQ ID:",
q.abs_id);
1633 T(
"TxQ size:",
q.size);
1634 T(
"TxQ inuse:",
q.in_use);
1635 T(
"TxQ PIdx:",
q.pidx);
1636 T(
"TxQ CIdx:",
q.cidx);
1638 R(
"RspQ size:", rspq.
size);
1640 S3(
"u",
"Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
1641 S3(
"u",
"Intr pktcnt:",
1642 adapter->
sge.counter_val[rxq[qs].
rspq.pktcnt_idx]);
1643 R(
"RspQ CIdx:", rspq.
cidx);
1644 R(
"RspQ Gen:", rspq.
gen);
1645 R(
"FL ID:",
fl.abs_id);
1647 R(
"FL avail:",
fl.avail);
1648 R(
"FL PIdx:",
fl.pidx);
1649 R(
"FL CIdx:",
fl.cidx);
1657 seq_printf(seq,
"%-12s %16s\n",
"QType:",
"FW event queue");
1659 seq_printf(seq,
"%-12s %16u\n",
"Intr delay:",
1660 qtimer_val(adapter, evtq));
1661 seq_printf(seq,
"%-12s %16u\n",
"Intr pktcnt:",
1665 }
else if (r == 1) {
1666 const struct sge_rspq *intrq = &adapter->
sge.intrq;
1668 seq_printf(seq,
"%-12s %16s\n",
"QType:",
"Interrupt Queue");
1670 seq_printf(seq,
"%-12s %16u\n",
"Intr delay:",
1671 qtimer_val(adapter, intrq));
1672 seq_printf(seq,
"%-12s %16u\n",
"Intr pktcnt:",
1694 static int sge_queue_entries(
const struct adapter *adapter)
1700 static void *sge_queue_start(
struct seq_file *seq, loff_t *
pos)
1704 return *pos < entries ? (
void *)((
uintptr_t)*pos + 1) :
NULL;
1707 static void sge_queue_stop(
struct seq_file *seq,
void *v)
1711 static void *sge_queue_next(
struct seq_file *seq,
void *v, loff_t *pos)
1713 int entries = sge_queue_entries(seq->
private);
1716 return *pos < entries ? (
void *)((
uintptr_t)*pos + 1) :
NULL;
1720 .start = sge_queue_start,
1721 .next = sge_queue_next,
1722 .stop = sge_queue_stop,
1723 .show = sge_qinfo_show
1739 .open = sge_qinfo_open,
1750 static int sge_qstats_show(
struct seq_file *seq,
void *v)
1752 struct adapter *adapter = seq->
private;
1759 #define S3(fmt, s, v) \
1761 seq_printf(seq, "%-16s", s); \
1762 for (qs = 0; qs < n; ++qs) \
1763 seq_printf(seq, " %8" fmt, v); \
1764 seq_putc(seq, '\n'); \
1766 #define S(s, v) S3("s", s, v)
1768 #define T3(fmt, s, v) S3(fmt, s, txq[qs].v)
1769 #define T(s, v) T3("lu", s, v)
1771 #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v)
1772 #define R(s, v) R3("lu", s, v)
1774 if (r < eth_entries) {
1779 S(
"QType:",
"Ethernet");
1782 ? rxq[qs].
rspq.netdev->name
1785 R(
"RxPackets:", stats.pkts);
1786 R(
"RxCSO:", stats.rx_cso);
1787 R(
"VLANxtract:", stats.vlan_ex);
1788 R(
"LROmerged:", stats.lro_merged);
1789 R(
"LROpackets:", stats.lro_pkts);
1790 R(
"RxDrops:", stats.rx_drops);
1794 T(
"TxQFull:",
q.stops);
1795 T(
"TxQRestarts:",
q.restarts);
1797 R(
"FLAllocErr:",
fl.alloc_failed);
1798 R(
"FLLrgAlcErr:",
fl.large_alloc_failed);
1799 R(
"FLStarving:",
fl.starving);
1805 const struct sge_rspq *evtq = &adapter->
sge.fw_evtq;
1807 seq_printf(seq,
"%-8s %16s\n",
"QType:",
"FW event queue");
1808 seq_printf(seq,
"%-16s %8u\n",
"RspQNullInts:",
1812 }
else if (r == 1) {
1813 const struct sge_rspq *intrq = &adapter->
sge.intrq;
1815 seq_printf(seq,
"%-8s %16s\n",
"QType:",
"Interrupt Queue");
1816 seq_printf(seq,
"%-16s %8u\n",
"RspQNullInts:",
1840 static int sge_qstats_entries(
const struct adapter *adapter)
1846 static void *sge_qstats_start(
struct seq_file *seq, loff_t *pos)
1848 int entries = sge_qstats_entries(seq->
private);
1850 return *pos < entries ? (
void *)((
uintptr_t)*pos + 1) :
NULL;
1853 static void sge_qstats_stop(
struct seq_file *seq,
void *v)
1857 static void *sge_qstats_next(
struct seq_file *seq,
void *v, loff_t *pos)
1859 int entries = sge_qstats_entries(seq->
private);
1862 return *pos < entries ? (
void *)((
uintptr_t)*pos + 1) :
NULL;
1866 .start = sge_qstats_start,
1867 .next = sge_qstats_next,
1868 .stop = sge_qstats_stop,
1869 .show = sge_qstats_show
1872 static int sge_qstats_open(
struct inode *inode,
struct file *file)
1874 int res =
seq_open(file, &sge_qstats_seq_ops);
1885 .open = sge_qstats_open,
1894 static int resources_show(
struct seq_file *seq,
void *v)
1896 struct adapter *adapter = seq->
private;
1899 #define S(desc, fmt, var) \
1900 seq_printf(seq, "%-60s " fmt "\n", \
1901 desc " (" #var "):", vfres->var)
1903 S(
"Virtual Interfaces",
"%d",
nvi);
1904 S(
"Egress Queues",
"%d",
neq);
1906 S(
"Ingress Queues/w Free Lists/Interrupts",
"%d",
niqflint);
1907 S(
"Ingress Queues",
"%d",
niq);
1908 S(
"Traffic Class",
"%d",
tc);
1909 S(
"Port Access Rights Mask",
"%#x",
pmask);
1910 S(
"MAC Address Filters",
"%d",
nexactf);
1911 S(
"Firmware Command Read Capabilities",
"%#x",
r_caps);
1912 S(
"Firmware Command Write/Execute Capabilities",
"%#x",
wx_caps);
1919 static int resources_open(
struct inode *inode,
struct file *file)
1926 .open = resources_open,
1935 static int interfaces_show(
struct seq_file *seq,
void *v)
1938 seq_puts(seq,
"Interface Port VIID\n");
1940 struct adapter *adapter = seq->
private;
1943 struct port_info *pi = netdev_priv(dev);
1951 static inline void *interfaces_get_idx(
struct adapter *adapter, loff_t pos)
1953 return pos <= adapter->
params.nports
1958 static void *interfaces_start(
struct seq_file *seq, loff_t *pos)
1961 ? interfaces_get_idx(seq->
private, *pos)
1965 static void *interfaces_next(
struct seq_file *seq,
void *v, loff_t *pos)
1968 return interfaces_get_idx(seq->
private, *pos);
1971 static void interfaces_stop(
struct seq_file *seq,
void *v)
1976 .start = interfaces_start,
1977 .next = interfaces_next,
1978 .stop = interfaces_stop,
1979 .show = interfaces_show
1982 static int interfaces_open(
struct inode *inode,
struct file *file)
1984 int res =
seq_open(file, &interfaces_seq_ops);
1995 .open = interfaces_open,
2011 {
"sge_qinfo",
S_IRUGO, &sge_qinfo_debugfs_fops },
2012 {
"sge_qstats",
S_IRUGO, &sge_qstats_proc_fops },
2013 {
"resources",
S_IRUGO, &resources_proc_fops },
2014 {
"interfaces",
S_IRUGO, &interfaces_proc_fops },
2026 static int __devinit setup_debugfs(
struct adapter *adapter)
2035 for (i = 0; i <
ARRAY_SIZE(debugfs_files); i++)
2037 debugfs_files[i].
mode,
2040 debugfs_files[i].
fops);
2049 static void cleanup_debugfs(
struct adapter *adapter)
2067 static int __devinit adap_init0(
struct adapter *adapter)
2071 struct sge *s = &adapter->
sge;
2112 " device parameters: err=%d\n", err);
2118 " VPD parameters: err=%d\n", err);
2124 " SGE parameters: err=%d\n", err);
2130 " RSS parameters: err=%d\n", err);
2133 if (adapter->
params.rss.mode !=
2136 " mode %d\n", adapter->
params.rss.mode);
2150 s->
timer_val[0] = core_ticks_to_us(adapter,
2152 s->
timer_val[1] = core_ticks_to_us(adapter,
2154 s->
timer_val[2] = core_ticks_to_us(adapter,
2156 s->
timer_val[3] = core_ticks_to_us(adapter,
2158 s->
timer_val[4] = core_ticks_to_us(adapter,
2160 s->
timer_val[5] = core_ticks_to_us(adapter,
2180 " resources: err=%d\n", err);
2210 " ingress/egress queues (%d/%d); using minimum for"
2211 " number of Queue Sets\n", ethqsets, vfres->
nethctrl);
2214 if (vfres->
neq < ethqsets*2) {
2216 " to support Queue Sets (%d); reducing allowed Queue"
2217 " Sets\n", vfres->
neq, ethqsets);
2218 ethqsets = vfres->
neq/2;
2225 if (vfres->
niq != 0 || vfres->
neq > ethqsets*2) {
2227 " ignored\n", vfres->
niq, vfres->
neq - ethqsets*2);
2236 if (adapter->
sge.max_ethqsets < adapter->
params.nports) {
2238 " virtual interfaces (too few Queue Sets)\n",
2239 adapter->
sge.max_ethqsets, adapter->
params.nports);
2240 adapter->
params.nports = adapter->
sge.max_ethqsets;
2242 if (adapter->
params.nports == 0) {
2250 static inline void init_rspq(
struct sge_rspq *rspq,
u8 timer_idx,
2251 u8 pkt_cnt_idx,
unsigned int size,
2252 unsigned int iqe_size)
2269 static void __devinit cfg_queues(
struct adapter *adapter)
2271 struct sge *s = &adapter->
sge;
2272 int q10g, n10g, qidx, pidx,
qs;
2287 n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
2296 int n1g = (adapter->params.nports - n10g);
2297 q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
2309 struct port_info *pi = adap2pinfo(adapter, pidx);
2332 init_rspq(&rxq->
rspq, 0, 0, 1024, iqe_size);
2364 static void __devinit reduce_ethqs(
struct adapter *adapter,
int n)
2376 pi = adap2pinfo(adapter, i);
2379 adapter->
sge.ethqsets--;
2380 if (adapter->
sge.ethqsets <= n)
2390 pi = adap2pinfo(adapter, i);
2403 static int __devinit enable_msix(
struct adapter *adapter)
2405 int i,
err, want, need;
2407 struct sge *s = &adapter->
sge;
2410 entries[i].
entry = i;
2428 " for %d Queue Sets\n", nqsets);
2430 if (nqsets < s->ethqsets)
2431 reduce_ethqs(adapter, nqsets);
2433 for (i = 0; i < want; ++
i)
2434 adapter->
msix_info[i].vec = entries[i].vector;
2435 }
else if (err > 0) {
2438 " not using MSI-X\n", err);
2444 .ndo_open = cxgb4vf_open,
2445 .ndo_stop = cxgb4vf_stop,
2447 .ndo_get_stats = cxgb4vf_get_stats,
2448 .ndo_set_rx_mode = cxgb4vf_set_rxmode,
2449 .ndo_set_mac_address = cxgb4vf_set_mac_addr,
2451 .ndo_do_ioctl = cxgb4vf_do_ioctl,
2452 .ndo_change_mtu = cxgb4vf_change_mtu,
2453 .ndo_fix_features = cxgb4vf_fix_features,
2454 .ndo_set_features = cxgb4vf_set_features,
2455 #ifdef CONFIG_NET_POLL_CONTROLLER
2456 .ndo_poll_controller = cxgb4vf_poll_controller,
2468 static int version_printed;
2481 if (version_printed == 0) {
2483 version_printed = 1;
2491 dev_err(&pdev->
dev,
"cannot enable PCI device\n");
2501 dev_err(&pdev->
dev,
"cannot obtain PCI resources\n");
2502 goto err_disable_device;
2511 err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(64));
2513 dev_err(&pdev->
dev,
"unable to obtain 64-bit DMA for"
2514 " coherent allocations\n");
2515 goto err_release_regions;
2521 dev_err(&pdev->
dev,
"no usable DMA configuration\n");
2522 goto err_release_regions;
2535 adapter = kzalloc(
sizeof(*adapter),
GFP_KERNEL);
2538 goto err_release_regions;
2540 pci_set_drvdata(pdev, adapter);
2541 adapter->
pdev = pdev;
2553 if (!adapter->
regs) {
2554 dev_err(&pdev->
dev,
"cannot map device registers\n");
2556 goto err_free_adapter;
2562 adapter->
name = pci_name(pdev);
2564 err = adap_init0(adapter);
2571 pmask = adapter->
params.vfres.pmask;
2583 port_id =
ffs(pmask) - 1;
2587 dev_err(&pdev->
dev,
"cannot allocate VI for port %d:"
2588 " err=%d\n", port_id, viid);
2596 netdev = alloc_etherdev_mq(
sizeof(
struct port_info),
2598 if (netdev ==
NULL) {
2603 adapter->
port[pidx] = netdev;
2605 pi = netdev_priv(netdev);
2639 dev_err(&pdev->
dev,
"cannot initialize port %d\n",
2652 netdev = adapter->
port[pidx];
2658 dev_warn(&pdev->
dev,
"cannot register net device %s,"
2659 " skipping\n", netdev->
name);
2666 dev_err(&pdev->
dev,
"could not register any net devices\n");
2673 if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
2676 cxgb4vf_debugfs_root);
2681 setup_debugfs(adapter);
2690 if (msi ==
MSI_MSIX && enable_msix(adapter) == 0)
2693 err = pci_enable_msi(pdev);
2695 dev_err(&pdev->
dev,
"Unable to allocate %s interrupts;"
2697 msi ==
MSI_MSIX ?
"MSI-X or MSI" :
"MSI", err);
2698 goto err_free_debugfs;
2708 cfg_queues(adapter);
2716 adapter->
port[pidx]->name,
2733 cleanup_debugfs(adapter);
2739 netdev = adapter->
port[pidx];
2742 pi = netdev_priv(netdev);
2754 pci_set_drvdata(pdev,
NULL);
2756 err_release_regions:
2758 pci_set_drvdata(pdev,
NULL);
2774 struct adapter *adapter = pci_get_drvdata(pdev);
2792 adapter->flags &= ~USING_MSIX;
2802 cleanup_debugfs(adapter);
2817 pi = netdev_priv(netdev);
2823 pci_set_drvdata(pdev,
NULL);
2843 adapter = pci_get_drvdata(pdev);
2863 pi = netdev_priv(netdev);
2877 #define CH_DEVICE(devid, idx) \
2878 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
2905 .name = KBUILD_MODNAME,
2906 .id_table = cxgb4vf_pci_tbl,
2907 .probe = cxgb4vf_pci_probe,
2915 static int __init cxgb4vf_module_init(
void)
2924 ": bad module parameter msi=%d; must be %d"
2925 " (MSI-X or MSI) or %d (MSI)\n",
2932 if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2934 " debugfs entry, continuing\n");
2936 ret = pci_register_driver(&cxgb4vf_driver);
2937 if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2945 static void __exit cxgb4vf_module_exit(
void)