35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
39 #include <linux/ctype.h>
45 #include <linux/if_vlan.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
63 #include <asm/uaccess.h>
71 #define DRV_VERSION "1.3.0-ko"
72 #define DRV_DESC "Chelsio T4 Network Driver"
79 #define MAX_SGE_TIMERVAL 200U
109 VFRES_NVI = VFRES_NPORTS,
110 VFRES_NETHCTRL = VFRES_NQSETS,
111 VFRES_NIQFLINT = VFRES_NQSETS+2,
112 VFRES_NEQ = VFRES_NQSETS*2,
129 unsigned int pf,
unsigned int vf)
131 unsigned int portn, portvec;
146 if (adapter->
params.nports == 0)
149 portn = pf % adapter->
params.nports;
150 portvec = adapter->
params.portvec;
158 unsigned int pmask = portvec ^ (portvec & (portvec-1));
178 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
179 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
180 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
182 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
211 #define FW_FNAME "cxgb4/t4fw.bin"
212 #define FW_CFNAME "cxgb4/t4-config.txt"
227 static uint force_init;
230 MODULE_PARM_DESC(force_init,
"Forcibly become Master PF and initialize adapter");
238 static uint force_old_init;
246 MODULE_PARM_DESC(dflt_msg_enable,
"Chelsio T4 default message enable bitmap");
266 static unsigned int intr_holdoff[
SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
269 MODULE_PARM_DESC(intr_holdoff,
"values for queue interrupt hold-off timers "
270 "0..4 in microseconds");
272 static unsigned int intr_cnt[
SGE_NCOUNTERS - 1] = { 4, 8, 16 };
276 "thresholds 1..3 for queue interrupt packet counters");
290 static int rx_dma_offset = 2;
294 #ifdef CONFIG_PCI_IOV
296 MODULE_PARM_DESC(vf_acls,
"if set enable virtualization L2 ACL enforcement");
298 static unsigned int num_vf[4];
328 static struct dentry *cxgb4_debugfs_root;
333 static const char *uld_str[] = {
"RDMA",
"iSCSI" };
337 if (!netif_carrier_ok(dev))
338 netdev_info(dev,
"link down\n");
340 static const char *
fc[] = {
"no",
"Rx",
"Tx",
"Tx/Rx" };
342 const char *
s =
"10Mbps";
357 netdev_info(dev,
"link up, %s, full-duplex, %s PAUSE\n", s,
367 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
379 static const char *mod_str[] = {
380 NULL,
"LR",
"SR",
"ER",
"passive DA",
"active DA",
"LRM"
384 const struct port_info *pi = netdev_priv(dev);
387 netdev_info(dev,
"port module unplugged\n");
389 netdev_info(dev,
"%s module inserted\n", mod_str[pi->
mod_type]);
396 static int set_addr_filters(
const struct net_device *dev,
bool sleep)
407 const struct port_info *pi = netdev_priv(dev);
412 addr[naddr++] = ha->
addr;
413 if (--uc_cnt == 0 || naddr >=
ARRAY_SIZE(addr)) {
415 naddr, addr, filt_idx, &uhash, sleep);
426 addr[naddr++] = ha->
addr;
427 if (--mc_cnt == 0 || naddr >=
ARRAY_SIZE(addr)) {
429 naddr, addr, filt_idx, &mhash, sleep);
439 uhash | mhash, sleep);
449 static int dbfifo_drain_delay = 1000;
452 "usecs to sleep while draining the dbfifo");
458 static int set_rxmode(
struct net_device *dev,
int mtu,
bool sleep_ok)
463 ret = set_addr_filters(dev, sleep_ok);
484 unsigned int mb = pi->
adapter->fn;
523 txq = q->
adap->sge.egr_map[qid - q->
adap->sge.egr_start];
525 if ((
u8 *)txq < (
u8 *)q->
adap->sge.ofldtxq) {
529 netif_tx_wake_queue(eq->
txq);
547 "unexpected CPL %#x on FW event queue\n", opcode);
565 if (ulds[q->
uld].rx_handler(q->
adap->uld_handle[q->
uld], rsp, gl)) {
594 struct adapter *adap =
cookie;
608 static void name_msix_vecs(
struct adapter *adap)
610 int i,
j, msi_idx = 2,
n =
sizeof(adap->
msix_info[0].desc);
617 adap->
port[0]->name);
622 const struct port_info *pi = netdev_priv(d);
624 for (i = 0; i < pi->
nqsets; i++, msi_idx++)
635 snprintf(adap->msix_info[msi_idx++].desc, n, "%
s-rdma%d",
636 adap->
port[0]->name, i);
639 static
int request_msix_queue_irqs(
struct adapter *adap)
641 struct sge *
s = &adap->sge;
642 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
645 adap->msix_info[1].desc, &s->fw_evtq);
652 adap->msix_info[msi_index].desc,
653 &s->
ethrxq[ethqidx].rspq);
661 adap->msix_info[msi_index].desc,
670 adap->msix_info[msi_index].desc,
679 while (--rdmaqidx >= 0)
680 free_irq(adap->msix_info[--msi_index].vec,
682 while (--ofldqidx >= 0)
683 free_irq(adap->msix_info[--msi_index].vec,
685 while (--ethqidx >= 0)
686 free_irq(adap->msix_info[--msi_index].vec,
687 &s->
ethrxq[ethqidx].rspq);
688 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
692 static void free_msix_queue_irqs(
struct adapter *adap)
694 int i, msi_index = 2;
695 struct sge *s = &adap->
sge;
725 for (i = 0; i < pi->rss_size; i++, queues++)
726 rss[i] = q[*queues].rspq.abs_id;
729 pi->rss_size, rss, pi->rss_size);
740 static int setup_rss(
struct adapter *adap)
745 const struct port_info *pi = adap2pinfo(adap, i);
747 err = write_rss(pi, pi->
rss);
757 static unsigned int rxq_to_chan(
const struct sge *p,
unsigned int qid)
760 return netdev2pinfo(p->
ingr_map[qid]->netdev)->tx_chan;
766 static void quiesce_rx(
struct adapter *adap)
774 napi_disable(&q->
napi);
781 static void enable_rx(
struct adapter *adap)
791 napi_enable(&q->
napi);
807 static int setup_sge_queues(
struct adapter *adap)
809 int err, msi_idx,
i,
j;
810 struct sge *s = &adap->
sge;
812 bitmap_zero(s->starving_fl,
MAX_EGRQ);
813 bitmap_zero(s->txq_maperr,
MAX_EGRQ);
822 msi_idx = -((
int)s->intrq.abs_id + 1);
826 msi_idx,
NULL, fwevtq_handler);
838 for (j = 0; j < pi->
nqsets; j++, q++) {
849 for (j = 0; j < pi->
nqsets; j++, t++) {
851 netdev_get_tx_queue(dev, j),
852 s->fw_evtq.cntxt_id);
866 &q->
fl, uldrx_handler);
872 s->fw_evtq.cntxt_id);
883 msi_idx, &q->
fl, uldrx_handler);
912 static int upgrade_fw(
struct adapter *adap)
923 ", error %d\n", ret);
938 vers > adap->
params.fw_vers) {
939 dev_info(dev,
"upgrading firmware ...\n");
943 dev_info(dev,
"firmware successfully upgraded to "
950 dev_err(dev,
"firmware upgrade failed! err=%d\n", -ret);
978 static void t4_free_mem(
void *addr)
980 if (is_vmalloc_addr(addr))
986 static inline int is_offload(
const struct adapter *adap)
988 return adap->
params.offload;
997 return netdev2adap(dev)->msg_enable;
1002 netdev2adap(dev)->msg_enable =
val;
1008 "TxBroadcastFrames ",
1009 "TxMulticastFrames ",
1015 "TxFrames128To255 ",
1016 "TxFrames256To511 ",
1017 "TxFrames512To1023 ",
1018 "TxFrames1024To1518 ",
1019 "TxFrames1519ToMax ",
1034 "RxBroadcastFrames ",
1035 "RxMulticastFrames ",
1047 "RxFrames128To255 ",
1048 "RxFrames256To511 ",
1049 "RxFrames512To1023 ",
1050 "RxFrames1024To1518 ",
1051 "RxFrames1519ToMax ",
1063 "RxBG0FramesDropped ",
1064 "RxBG1FramesDropped ",
1065 "RxBG2FramesDropped ",
1066 "RxBG3FramesDropped ",
1067 "RxBG0FramesTrunc ",
1068 "RxBG1FramesTrunc ",
1069 "RxBG2FramesTrunc ",
1070 "RxBG3FramesTrunc ",
1081 static int get_sset_count(
struct net_device *dev,
int sset)
1091 #define T4_REGMAP_SIZE (160 * 1024)
1093 static int get_regs_len(
struct net_device *dev)
1098 static int get_eeprom_len(
struct net_device *dev)
1105 struct adapter *adapter = netdev2adap(dev);
1112 if (adapter->
params.fw_vers)
1114 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1128 memcpy(data, stats_strings,
sizeof(stats_strings));
1145 static void collect_sge_port_stats(
const struct adapter *adap,
1152 memset(s, 0,
sizeof(*s));
1153 for (i = 0; i < p->
nqsets; i++, rx++, tx++) {
1156 s->
rx_csum += rx->stats.rx_cso;
1157 s->
vlan_ex += rx->stats.vlan_ex;
1167 struct port_info *pi = netdev_priv(dev);
1168 struct adapter *adapter = pi->
adapter;
1182 static inline unsigned int mk_adap_vers(
const struct adapter *ap)
1184 return 4 | (ap->
params.rev << 10) | (1 << 16);
1187 static void reg_block_dump(
struct adapter *ap,
void *
buf,
unsigned int start,
1192 for ( ; start <=
end; start +=
sizeof(
u32))
1193 *p++ = t4_read_reg(ap, start);
1199 static const unsigned int reg_ranges[] = {
1420 struct adapter *ap = netdev2adap(dev);
1422 regs->
version = mk_adap_vers(ap);
1425 for (i = 0; i <
ARRAY_SIZE(reg_ranges); i += 2)
1426 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1429 static int restart_autoneg(
struct net_device *dev)
1433 if (!netif_running(dev))
1441 static int identify_port(
struct net_device *dev,
1445 struct adapter *adap = netdev2adap(dev);
1457 static unsigned int from_fw_linkcaps(
unsigned int type,
unsigned int caps)
1472 if (caps & FW_PORT_CAP_SPEED_1G)
1494 static unsigned int to_fw_linkcaps(
unsigned int caps)
1509 const struct port_info *p = netdev_priv(dev);
1541 ethtool_cmd_speed_set(cmd,
1542 netif_carrier_ok(dev) ? p->
link_cfg.speed : 0);
1550 static unsigned int speed_to_caps(
int speed)
1566 u32 speed = ethtool_cmd_speed(cmd);
1583 cap = speed_to_caps(speed);
1599 if (netif_running(dev))
1605 static void get_pauseparam(
struct net_device *dev,
1615 static int set_pauseparam(
struct net_device *dev,
1632 if (netif_running(dev))
1640 const struct port_info *pi = netdev_priv(dev);
1641 const struct sge *s = &pi->
adapter->sge;
1657 const struct port_info *pi = netdev_priv(dev);
1658 struct adapter *adapter = pi->
adapter;
1659 struct sge *s = &adapter->
sge;
1671 for (i = 0; i < pi->
nqsets; ++
i) {
1679 static int closest_timer(
const struct sge *s,
int time)
1687 if (delta < min_delta) {
1695 static int closest_thres(
const struct sge *s,
int thres)
1703 if (delta < min_delta) {
1714 static unsigned int qtimer_val(
const struct adapter *adap,
1732 static int set_rxq_intr_params(
struct adapter *adap,
struct sge_rspq *q,
1733 unsigned int us,
unsigned int cnt)
1735 if ((us | cnt) == 0)
1742 new_idx = closest_thres(&adap->
sge, cnt);
1756 us = us == 0 ? 6 : closest_timer(&adap->
sge, us);
1763 const struct port_info *pi = netdev_priv(dev);
1764 struct adapter *adap = pi->
adapter;
1766 return set_rxq_intr_params(adap, &adap->
sge.ethrxq[pi->
first_qset].rspq,
1772 const struct port_info *pi = netdev_priv(dev);
1773 const struct adapter *adap = pi->
adapter;
1799 static int eeprom_ptov(
unsigned int phys_addr,
unsigned int fn,
unsigned int sz)
1802 if (phys_addr < 1024)
1803 return phys_addr + (31 << 10);
1804 if (phys_addr < 1024 + fn)
1805 return 31744 - fn + phys_addr - 1024;
1807 return phys_addr - 1024 -
fn;
1814 static int eeprom_rd_phys(
struct adapter *adap,
unsigned int phys_addr,
u32 *v)
1820 return vaddr < 0 ? vaddr : 0;
1823 static int eeprom_wr_phys(
struct adapter *adap,
unsigned int phys_addr,
u32 v)
1829 return vaddr < 0 ? vaddr : 0;
1832 #define EEPROM_MAGIC 0x38E2F10C
1838 struct adapter *adapter = netdev2adap(dev);
1846 err = eeprom_rd_phys(adapter, i, (
u32 *)&buf[i]);
1859 u32 aligned_offset, aligned_len, *
p;
1860 struct adapter *adapter = netdev2adap(dev);
1865 aligned_offset = eeprom->
offset & ~3;
1866 aligned_len = (eeprom->
len + (eeprom->
offset & 3) + 3) & ~3;
1868 if (adapter->
fn > 0) {
1871 if (aligned_offset < start ||
1876 if (aligned_offset != eeprom->
offset || aligned_len != eeprom->
len) {
1883 err = eeprom_rd_phys(adapter, aligned_offset, (
u32 *)buf);
1884 if (!err && aligned_len > 4)
1885 err = eeprom_rd_phys(adapter,
1886 aligned_offset + aligned_len - 4,
1887 (
u32 *)&buf[aligned_len - 4]);
1898 for (p = (
u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1899 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1900 aligned_offset += 4;
1915 struct adapter *adap = netdev2adap(netdev);
1917 ef->
data[
sizeof(ef->
data) - 1] =
'\0';
1929 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1930 #define BCAST_CRC 0xa0ccc1a6
1935 wol->
wolopts = netdev2adap(dev)->wol;
1942 struct port_info *pi = netdev_priv(dev);
1961 const struct port_info *pi = netdev_priv(dev);
1970 !!(features & NETIF_F_HW_VLAN_RX),
true);
1978 const struct port_info *pi = netdev_priv(dev);
1985 const struct port_info *pi = netdev_priv(dev);
1993 static int set_rss_table(
struct net_device *dev,
const u32 *p)
1996 struct port_info *pi = netdev_priv(dev);
2001 return write_rss(pi, pi->
rss);
2008 const struct port_info *pi = netdev_priv(dev);
2010 switch (info->
cmd) {
2024 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2028 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2034 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2045 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2046 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2049 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2055 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2068 static const struct ethtool_ops cxgb_ethtool_ops = {
2074 .get_ringparam = get_sge_param,
2075 .set_ringparam = set_sge_param,
2085 .set_phys_id = identify_port,
2086 .nway_reset = restart_autoneg,
2094 .get_rxfh_indir_size = get_rss_table_size,
2095 .get_rxfh_indir = get_rss_table,
2096 .set_rxfh_indir = set_rss_table,
2097 .flash_device = set_flash,
2107 loff_t
avail = file->
f_path.dentry->d_inode->i_size;
2115 if (count > avail - pos)
2116 count = avail -
pos;
2130 ofst = pos %
sizeof(
data);
2131 len =
min(count,
sizeof(data) - ofst);
2139 count = pos - *ppos;
2151 static void __devinit add_debugfs_mem(
struct adapter *adap,
const char *name,
2152 unsigned int idx,
unsigned int size_mb)
2157 (
void *)adap + idx, &mem_debugfs_fops);
2159 de->
d_inode->i_size = size_mb << 20;
2162 static int __devinit setup_debugfs(
struct adapter *adap)
2171 add_debugfs_mem(adap,
"edc0",
MEM_EDC0, 5);
2173 add_debugfs_mem(adap,
"edc1",
MEM_EDC1, 5);
2175 add_debugfs_mem(adap,
"mc",
MEM_MC,
2194 spin_lock_bh(&t->atid_lock);
2203 spin_unlock_bh(&t->atid_lock);
2215 spin_lock_bh(&t->atid_lock);
2219 spin_unlock_bh(&t->atid_lock);
2233 if (stid < t->nstids)
2272 static void mk_tid_release(
struct sk_buff *
skb,
unsigned int chan,
2287 static void cxgb4_queue_tid_release(
struct tid_info *t,
unsigned int chan,
2310 struct adapter *adap;
2318 p = (
void *)p - chan;
2328 mk_tid_release(skb, chan, p - adap->
tids.tid_tab);
2350 mk_tid_release(skb, chan, tid);
2353 cxgb4_queue_tid_release(t, chan, tid);
2362 static int tid_init(
struct tid_info *t)
2365 unsigned int natids = t->
natids;
2411 struct adapter *adap;
2418 adap = netdev2adap(dev);
2426 chan = rxq_to_chan(&adap->
sge, queue);
2449 while (i <
NMTUS - 1 && mtus[i + 1] <= mtu)
2465 return netdev2pinfo(dev)->tx_chan;
2471 struct adapter *adap = netdev2adap(dev);
2487 return netdev2pinfo(dev)->viid;
2499 return netdev2pinfo(dev)->port_id;
2506 struct adapter *adap = pci_get_drvdata(pdev);
2515 const unsigned int *pgsz_order)
2517 struct adapter *adap = netdev2adap(dev);
2521 HPZ1(pgsz_order[1]) |
HPZ2(pgsz_order[2]) |
2522 HPZ3(pgsz_order[3]));
2528 struct adapter *adap = netdev2adap(dev);
2537 static int read_eq_indices(
struct adapter *adap,
u16 qid,
u16 *pidx,
u16 *cidx)
2554 struct adapter *adap = netdev2adap(dev);
2555 u16 hw_pidx, hw_cidx;
2558 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2562 if (pidx != hw_pidx) {
2565 if (pidx >= hw_pidx)
2566 delta = pidx - hw_pidx;
2568 delta = size - hw_pidx + pidx;
2580 static void check_neigh_update(
struct neighbour *neigh)
2587 parent = netdev->
dev.parent;
2588 if (parent && parent->
driver == &cxgb4_driver.driver)
2597 check_neigh_update(data);
2606 static bool netevent_registered;
2608 .notifier_call = netevent_cb
2611 static void drain_db_fifo(
struct adapter *adap,
int usecs)
2624 static void disable_txq_db(
struct sge_txq *q)
2631 static void enable_txq_db(
struct sge_txq *q)
2638 static void disable_dbs(
struct adapter *adap)
2643 disable_txq_db(&adap->sge.ethtxq[i].q);
2645 disable_txq_db(&adap->sge.ofldtxq[i].q);
2647 disable_txq_db(&adap->sge.ctrlq[i].q);
2650 static
void enable_dbs(
struct adapter *adap)
2655 enable_txq_db(&adap->sge.ethtxq[i].q);
2657 enable_txq_db(&adap->sge.ofldtxq[i].q);
2659 enable_txq_db(&adap->sge.ctrlq[i].q);
2664 u16 hw_pidx, hw_cidx;
2667 spin_lock_bh(&q->db_lock);
2668 ret = read_eq_indices(adap, (
u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2671 if (q->db_pidx != hw_pidx) {
2674 if (q->db_pidx >= hw_pidx)
2675 delta = q->db_pidx - hw_pidx;
2677 delta = q->size - hw_pidx + q->db_pidx;
2680 QID(q->cntxt_id) |
PIDX(delta));
2684 spin_unlock_bh(&q->db_lock);
2686 CH_WARN(adap,
"DB drop recovery failed.\n");
2688 static void recover_all_queues(
struct adapter *adap)
2693 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2695 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
2697 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2709 static void process_db_full(
struct work_struct *work)
2711 struct adapter *adap;
2716 drain_db_fifo(adap, dbfifo_drain_delay);
2723 static void process_db_drop(
struct work_struct *work)
2725 struct adapter *adap;
2732 drain_db_fifo(adap, 1);
2733 recover_all_queues(adap);
2749 static void uld_attach(
struct adapter *adap,
unsigned int uld)
2761 lli.rxq_ids = adap->
sge.rdma_rxq;
2762 lli.nrxq = adap->
sge.rdmaqs;
2764 lli.rxq_ids = adap->
sge.ofld_rxq;
2765 lli.nrxq = adap->
sge.ofldqsets;
2767 lli.ntxq = adap->
sge.ofldqsets;
2770 lli.wr_cred = adap->
params.ofldq_wr_cred;
2784 handle = ulds[uld].add(&
lli);
2785 if (IS_ERR(handle)) {
2787 "could not attach to the %s driver, error %ld\n",
2788 uld_str[uld], PTR_ERR(handle));
2794 if (!netevent_registered) {
2796 netevent_registered =
true;
2803 static void attach_ulds(
struct adapter *adap)
2811 uld_attach(adap, i);
2815 static void detach_ulds(
struct adapter *adap)
2827 if (netevent_registered && list_empty(&adapter_list)) {
2829 netevent_registered =
false;
2841 ulds[
i].state_change(adap->
uld_handle[i], new_state);
2857 struct adapter *adap;
2859 if (type >= CXGB4_ULD_MAX)
2862 if (ulds[type].
add) {
2868 uld_attach(adap, type);
2882 struct adapter *adap;
2884 if (type >= CXGB4_ULD_MAX)
2905 static int cxgb_up(
struct adapter *adap)
2909 err = setup_sge_queues(adap);
2912 err = setup_rss(adap);
2917 name_msix_vecs(adap);
2923 err = request_msix_queue_irqs(adap);
2931 adap->
port[0]->name, adap);
2949 static void cxgb_down(
struct adapter *adapter)
2959 free_msix_queue_irqs(adapter);
2963 quiesce_rx(adapter);
2975 struct port_info *pi = netdev_priv(dev);
2976 struct adapter *adapter = pi->
adapter;
2981 err = cxgb_up(adapter);
2986 err = link_start(dev);
2988 netif_tx_start_all_queues(dev);
2992 static int cxgb_close(
struct net_device *dev)
2994 struct port_info *pi = netdev_priv(dev);
2995 struct adapter *adapter = pi->
adapter;
2997 netif_tx_stop_all_queues(dev);
3007 struct adapter *adapter = p->
adapter;
3026 stats.rx_ovflow2 + stats.rx_ovflow3 +
3027 stats.rx_trunc0 + stats.rx_trunc1 +
3028 stats.rx_trunc2 + stats.rx_trunc3;
3039 ns->
rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
3044 static int cxgb_ioctl(
struct net_device *dev,
struct ifreq *req,
int cmd)
3047 int ret = 0, prtad, devad;
3048 struct port_info *pi = netdev_priv(dev);
3059 if (mdio_phy_id_is_c45(data->
phy_id)) {
3060 prtad = mdio_phy_id_prtad(data->
phy_id);
3061 devad = mdio_phy_id_devad(data->
phy_id);
3062 }
else if (data->
phy_id < 32) {
3083 static void cxgb_set_rxmode(
struct net_device *dev)
3086 set_rxmode(dev, -1,
false);
3089 static int cxgb_change_mtu(
struct net_device *dev,
int new_mtu)
3092 struct port_info *pi = netdev_priv(dev);
3094 if (new_mtu < 81 || new_mtu >
MAX_MTU)
3103 static int cxgb_set_mac_addr(
struct net_device *dev,
void *p)
3107 struct port_info *pi = netdev_priv(dev);
3109 if (!is_valid_ether_addr(addr->
sa_data))
3122 #ifdef CONFIG_NET_POLL_CONTROLLER
3123 static void cxgb_netpoll(
struct net_device *dev)
3125 struct port_info *pi = netdev_priv(dev);
3126 struct adapter *adap = pi->
adapter;
3132 for (i = pi->
nqsets; i; i--, rx++)
3140 .ndo_open = cxgb_open,
3141 .ndo_stop = cxgb_close,
3143 .ndo_get_stats64 = cxgb_get_stats,
3144 .ndo_set_rx_mode = cxgb_set_rxmode,
3145 .ndo_set_mac_address = cxgb_set_mac_addr,
3146 .ndo_set_features = cxgb_set_features,
3148 .ndo_do_ioctl = cxgb_ioctl,
3149 .ndo_change_mtu = cxgb_change_mtu,
3150 #ifdef CONFIG_NET_POLL_CONTROLLER
3151 .ndo_poll_controller = cxgb_netpoll,
3159 dev_alert(adap->
pdev_dev,
"encountered fatal error, adapter stopped\n");
3162 static void setup_memwin(
struct adapter *adap)
3178 static void setup_memwin_rdma(
struct adapter *adap)
3180 if (adap->
vres.ocq.size) {
3181 unsigned int start, sz_kb;
3191 adap->
vres.ocq.start);
3203 memset(c, 0,
sizeof(*c));
3207 ret = t4_wr_mbox(adap, adap->
fn, c,
sizeof(*c), c);
3217 }
else if (vf_acls) {
3223 ret = t4_wr_mbox(adap, adap->
fn, c,
sizeof(*c),
NULL);
3255 #define MAX_ATIDS 8192U
3273 static int adap_init0_tweaks(
struct adapter *adapter)
3285 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3287 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3308 static int adap_init0_config(
struct adapter *adapter,
int reset)
3312 unsigned long mtype = 0, maddr = 0;
3313 u32 finiver, finicsum, cfcsum;
3314 int ret, using_flash;
3346 adapter->
fn, 0, 1, params, val);
3358 size_t resid = cf->
size & 0x3;
3359 size_t size = cf->
size & ~0x3;
3367 if (ret == 0 && resid != 0) {
3374 last.word = data[size >> 2];
3375 for (i = resid; i < 4; i++)
3395 memset(&caps_cmd, 0,
sizeof(caps_cmd));
3396 caps_cmd.op_to_write =
3400 caps_cmd.retval_len16 =
3405 ret = t4_wr_mbox(adapter, adapter->
mbox, &caps_cmd,
sizeof(caps_cmd),
3410 finiver =
ntohl(caps_cmd.finiver);
3411 finicsum =
ntohl(caps_cmd.finicsum);
3412 cfcsum =
ntohl(caps_cmd.cfcsum);
3413 if (finicsum != cfcsum)
3415 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3421 caps_cmd.op_to_write =
3426 ret = t4_wr_mbox(adapter, adapter->
mbox, &caps_cmd,
sizeof(caps_cmd),
3435 ret = adap_init0_tweaks(adapter);
3454 "Configuration File %s, version %#x, computed checksum %#x\n",
3477 static int adap_init0_no_config(
struct adapter *adapter,
int reset)
3479 struct sge *s = &adapter->
sge;
3497 memset(&caps_cmd, 0,
sizeof(caps_cmd));
3501 ret = t4_wr_mbox(adapter, adapter->
mbox, &caps_cmd,
sizeof(caps_cmd),
3511 }
else if (vf_acls) {
3517 ret = t4_wr_mbox(adapter, adapter->
mbox, &caps_cmd,
sizeof(caps_cmd),
3526 ret = adap_init0_tweaks(adapter);
3553 pfvfres_pmask(adapter, adapter->
fn, 0),
3574 #ifdef CONFIG_PCI_IOV
3586 for (pf = 0; pf <
ARRAY_SIZE(num_vf); pf++) {
3587 if (num_vf[pf] <= 0)
3591 for (vf = 1; vf <= num_vf[
pf]; vf++) {
3594 VFRES_NEQ, VFRES_NETHCTRL,
3595 VFRES_NIQFLINT, VFRES_NIQ,
3596 VFRES_TC, VFRES_NVI,
3602 VFRES_R_CAPS, VFRES_WX_CAPS);
3606 "provision pf/vf=%d/%d; "
3607 "err=%d\n", pf, vf, ret);
3624 switch (tp_vlan_pri_map & (1 << j)) {
3662 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
3663 " using %#x\n", tp_vlan_pri_map, bits,
3668 v = tp_vlan_pri_map;
3682 if (tp_vlan_pri_map)
3707 "driver parameters\n");
3720 static int adap_init0(
struct adapter *adap)
3725 u32 params[7], val[7];
3742 if (ret == adap->
mbox)
3756 if (ret == -
EINVAL || ret > 0) {
3757 if (upgrade_fw(adap) >= 0) {
3795 adap->
params.portvec = port_vec;
3805 "Adapter already initialized\n",
3810 "Initializing adapter\n");
3818 "configuration file.\n");
3820 ret = adap_init0_no_config(adap, reset);
3840 ret = adap_init0_no_config(adap, reset);
3849 ret = adap_init0_config(adap, reset);
3852 "No Configuration File present "
3853 "on adapter. Using hard-wired "
3854 "configuration parameters.\n");
3855 ret = adap_init0_no_config(adap, reset);
3861 "could not initialize adapter, error %d\n",
3880 if (is_bypass_device(adap->
pdev->device))
3886 #define FW_PARAM_DEV(param) \
3887 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3888 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3890 #define FW_PARAM_PFVF(param) \
3891 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3892 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
3893 FW_PARAMS_PARAM_Y(0) | \
3894 FW_PARAMS_PARAM_Z(0)
3905 adap->
sge.egr_start = val[0];
3908 adap->
tids.ftid_base = val[3];
3909 adap->
tids.nftids = val[4] - val[3] + 1;
3910 adap->
sge.ingr_start = val[5];
3919 if ((val[0] != val[1]) && (ret >= 0)) {
3921 adap->
tids.aftid_base = val[0];
3922 adap->
tids.aftid_end = val[1];
3929 memset(&caps_cmd, 0,
sizeof(caps_cmd));
3933 ret = t4_wr_mbox(adap, adap->
mbox, &caps_cmd,
sizeof(caps_cmd),
3938 if (caps_cmd.ofldcaps) {
3950 adap->
tids.ntids = val[0];
3952 adap->
tids.stid_base = val[1];
3953 adap->
tids.nstids = val[2] - val[1] + 1;
3964 adap->
tids.sftid_base = adap->
tids.ftid_base +
3966 adap->
tids.nsftids = adap->
tids.nftids -
3968 adap->
tids.nftids = adap->
tids.sftid_base -
3969 adap->
tids.ftid_base;
3971 adap->
vres.ddp.start = val[3];
3972 adap->
vres.ddp.size = val[4] - val[3] + 1;
3973 adap->
params.ofldq_wr_cred = val[5];
3975 adap->
params.offload = 1;
3977 if (caps_cmd.rdmacaps) {
3988 adap->
vres.stag.start = val[0];
3989 adap->
vres.stag.size = val[1] - val[0] + 1;
3990 adap->
vres.rq.start = val[2];
3991 adap->
vres.rq.size = val[3] - val[2] + 1;
3992 adap->
vres.pbl.start = val[4];
3993 adap->
vres.pbl.size = val[5] - val[4] + 1;
4004 adap->
vres.qp.start = val[0];
4005 adap->
vres.qp.size = val[1] - val[0] + 1;
4006 adap->
vres.cq.start = val[2];
4007 adap->
vres.cq.size = val[3] - val[2] + 1;
4008 adap->
vres.ocq.start = val[4];
4009 adap->
vres.ocq.size = val[5] - val[4] + 1;
4011 if (caps_cmd.iscsicaps) {
4018 adap->
vres.iscsi.start = val[0];
4019 adap->
vres.iscsi.size = val[1] - val[0] + 1;
4021 #undef FW_PARAM_PFVF
4035 for (j = 0; j <
NCHAN; j++)
4036 adap->
params.tp.tx_modq[j] = j;
4058 struct adapter *adap = pci_get_drvdata(pdev);
4084 struct adapter *adap = pci_get_drvdata(pdev);
4093 dev_err(&pdev->
dev,
"cannot reenable PCI device after reset\n");
4107 if (adap_init1(adap, &c))
4111 struct port_info *p = adap2pinfo(adap, i);
4129 static void eeh_resume(
struct pci_dev *pdev)
4132 struct adapter *adap = pci_get_drvdata(pdev);
4141 if (netif_running(dev)) {
4143 cxgb_set_rxmode(dev);
4151 .error_detected = eeh_err_detected,
4152 .slot_reset = eeh_slot_reset,
4153 .resume = eeh_resume,
4156 static inline bool is_10g_port(
const struct link_config *lc)
4161 static inline void init_rspq(
struct sge_rspq *q,
u8 timer_idx,
u8 pkt_cnt_idx,
4162 unsigned int size,
unsigned int iqe_size)
4166 q->
pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
4176 static void __devinit cfg_queues(
struct adapter *adap)
4178 struct sge *s = &adap->
sge;
4179 int i, q10g = 0, n10g = 0, qidx = 0;
4182 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
4189 q10g = (
MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
4194 struct port_info *pi = adap2pinfo(adap, i);
4204 if (is_offload(adap)) {
4223 init_rspq(&r->
rspq, 0, 0, 1024, 64);
4228 s->
ethtxq[i].q.size = 1024;
4231 s->
ctrlq[i].q.size = 512;
4239 init_rspq(&r->
rspq, 0, 0, 1024, 64);
4247 init_rspq(&r->
rspq, 0, 0, 511, 64);
4252 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
4253 init_rspq(&s->intrq, 6, 0, 2 *
MAX_INGQ, 64);
4260 static void __devinit reduce_ethqs(
struct adapter *adap,
int n)
4265 while (n < adap->sge.ethqsets)
4267 pi = adap2pinfo(adap, i);
4270 adap->
sge.ethqsets--;
4271 if (adap->
sge.ethqsets <= n)
4278 pi = adap2pinfo(adap, i);
4285 #define EXTRA_VECS 2
4287 static int __devinit enable_msix(
struct adapter *adap)
4290 int i,
err, want, need;
4291 struct sge *s = &adap->
sge;
4292 unsigned int nchan = adap->
params.nports;
4293 struct msix_entry entries[MAX_INGQ + 1];
4296 entries[i].
entry = i;
4299 if (is_offload(adap)) {
4302 ofld_need = 2 * nchan;
4316 if (i < s->max_ethqsets) {
4318 if (i < s->ethqsets)
4319 reduce_ethqs(adap, i);
4321 if (is_offload(adap)) {
4323 i -= ofld_need - nchan;
4326 for (i = 0; i < want; ++
i)
4327 adap->
msix_info[i].vec = entries[i].vector;
4330 "only %d MSI-X vectors left, not using MSI-X\n", err);
4336 static int __devinit init_rss(
struct adapter *adap)
4341 struct port_info *pi = adap2pinfo(adap, i);
4347 pi->
rss[j] = ethtool_rxfh_indir_default(j, pi->
nqsets);
4354 static const char *
base[] = {
4355 "R XFI",
"R XAUI",
"T SGMII",
"T XFI",
"T XAUI",
"KX4",
"CX4",
4356 "KX",
"KR",
"R SFP+",
"KR/KX",
"KR/KX/KX4"
4361 const char *spd =
"";
4362 const struct port_info *pi = netdev_priv(dev);
4363 const struct adapter *adap = pi->
adapter;
4370 if (pi->
link_cfg.supported & FW_PORT_CAP_SPEED_100M)
4371 bufp +=
sprintf(bufp,
"100/");
4372 if (pi->
link_cfg.supported & FW_PORT_CAP_SPEED_1G)
4373 bufp +=
sprintf(bufp,
"1000/");
4375 bufp +=
sprintf(bufp,
"10G/");
4380 netdev_info(dev,
"Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
4382 is_offload(adap) ?
"R" :
"", adap->
params.pci.width, spd,
4385 netdev_info(dev,
"S/N: %s, E/C: %s\n",
4401 static void free_some_resources(
struct adapter *adapter)
4405 t4_free_mem(adapter->
l2t);
4406 t4_free_mem(adapter->
tids.tid_tab);
4407 disable_msi(adapter);
4411 kfree(adap2pinfo(adapter, i)->rss);
4418 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
4419 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4420 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4427 bool highdma =
false;
4428 struct adapter *adapter =
NULL;
4435 dev_info(&pdev->
dev,
"cannot obtain PCI resources\n");
4448 dev_err(&pdev->
dev,
"cannot enable PCI device\n");
4449 goto out_release_regions;
4454 err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(64));
4456 dev_err(&pdev->
dev,
"unable to obtain 64-bit DMA for "
4457 "coherent allocations\n");
4458 goto out_disable_device;
4463 dev_err(&pdev->
dev,
"no usable DMA configuration\n");
4464 goto out_disable_device;
4469 enable_pcie_relaxed_ordering(pdev);
4473 adapter = kzalloc(
sizeof(*adapter),
GFP_KERNEL);
4476 goto out_disable_device;
4480 if (!adapter->
regs) {
4481 dev_err(&pdev->
dev,
"cannot map device registers\n");
4483 goto out_free_adapter;
4503 setup_memwin(adapter);
4504 err = adap_init0(adapter);
4505 setup_memwin_rdma(adapter);
4512 netdev = alloc_etherdev_mq(
sizeof(
struct port_info),
4521 adapter->
port[
i] = netdev;
4522 pi = netdev_priv(netdev);
4543 pci_set_drvdata(pdev, adapter);
4555 cfg_queues(adapter);
4558 if (!adapter->
l2t) {
4560 dev_warn(&pdev->
dev,
"could not allocate L2T, continuing\n");
4561 adapter->
params.offload = 0;
4564 if (is_offload(adapter) && tid_init(&adapter->
tids) < 0) {
4565 dev_warn(&pdev->
dev,
"could not allocate TID table, "
4567 adapter->
params.offload = 0;
4571 if (msi > 1 && enable_msix(adapter) == 0)
4573 else if (msi > 0 && pci_enable_msi(pdev) == 0)
4576 err = init_rss(adapter);
4587 pi = adap2pinfo(adapter, i);
4589 netif_set_real_num_rx_queues(adapter->
port[i], pi->
nqsets);
4595 print_port_info(adapter->
port[i]);
4598 dev_err(&pdev->
dev,
"could not register any net devices\n");
4602 dev_warn(&pdev->
dev,
"only %d net devices registered\n", i);
4606 if (cxgb4_debugfs_root) {
4608 cxgb4_debugfs_root);
4609 setup_debugfs(adapter);
4615 if (is_offload(adapter))
4616 attach_ulds(adapter);
4619 #ifdef CONFIG_PCI_IOV
4620 if (func <
ARRAY_SIZE(num_vf) && num_vf[func] > 0)
4623 "instantiated %u virtual functions\n",
4629 free_some_resources(adapter);
4637 out_release_regions:
4639 pci_set_drvdata(pdev,
NULL);
4645 struct adapter *adapter = pci_get_drvdata(pdev);
4647 #ifdef CONFIG_PCI_IOV
4655 if (is_offload(adapter))
4656 detach_ulds(adapter);
4659 if (adapter->
port[i]->reg_state == NETREG_REGISTERED)
4668 free_some_resources(adapter);
4674 pci_set_drvdata(pdev,
NULL);
4680 .
name = KBUILD_MODNAME,
4681 .id_table = cxgb4_pci_tbl,
4684 .err_handler = &cxgb4_eeh,
4687 static int __init cxgb4_init_module(
void)
4697 if (!cxgb4_debugfs_root)
4698 pr_warning(
"could not create debugfs entry, continuing\n");
4700 ret = pci_register_driver(&cxgb4_driver);
4706 static void __exit cxgb4_cleanup_module(
void)