44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/bitops.h>
47 #include <linux/if_vlan.h>
49 #include <linux/pci.h>
50 #include <linux/slab.h>
51 #include <linux/tcp.h>
53 #include <linux/netdevice.h>
57 #include <linux/prefetch.h>
58 #include <linux/module.h>
64 "Virtualized Server Adapter");
84 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
86 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
91 static inline int is_vxge_card_up(
struct vxgedev *vdev)
96 static inline void VXGE_COMPLETE_VPATH_TX(
struct vxge_fifo *
fifo)
100 #define NR_SKB_COMPLETED 128
108 if (__netif_tx_trylock(fifo->
txq)) {
110 NR_SKB_COMPLETED, &more);
111 __netif_tx_unlock(fifo->
txq);
115 for (temp = completed; temp !=
skb_ptr; temp++)
120 static inline void VXGE_COMPLETE_ALL_TX(
struct vxgedev *vdev)
126 VXGE_COMPLETE_VPATH_TX(&vdev->
vpaths[i].fifo);
129 static inline void VXGE_COMPLETE_ALL_RX(
struct vxgedev *vdev)
150 struct vxgedev *vdev = netdev_priv(dev);
153 vdev->
ndev->name, __func__, __LINE__);
154 netdev_notice(vdev->
ndev,
"Link Up\n");
155 vdev->
stats.link_up++;
158 netif_tx_wake_all_queues(vdev->
ndev);
161 "%s: %s:%d Exiting...", vdev->
ndev->name, __func__, __LINE__);
173 struct vxgedev *vdev = netdev_priv(dev);
176 "%s: %s:%d", vdev->
ndev->name, __func__, __LINE__);
177 netdev_notice(vdev->
ndev,
"Link Down\n");
179 vdev->
stats.link_down++;
181 netif_tx_stop_all_queues(vdev->
ndev);
184 "%s: %s:%d Exiting...", vdev->
ndev->name, __func__, __LINE__);
193 vxge_rx_alloc(
void *dtrh,
struct vxge_ring *ring,
const int skb_size)
201 ring->
ndev->name, __func__, __LINE__);
203 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
206 skb = netdev_alloc_skb(dev, skb_size +
210 "%s: out of memory to allocate SKB", dev->
name);
211 ring->
stats.skb_alloc_fail++;
216 "%s: %s:%d Skb : 0x%p", ring->
ndev->name,
217 __func__, __LINE__, skb);
225 "%s: %s:%d Exiting...", ring->
ndev->name, __func__, __LINE__);
233 static int vxge_rx_map(
void *dtrh,
struct vxge_ring *ring)
239 ring->
ndev->name, __func__, __LINE__);
240 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
243 dma_addr = pci_map_single(ring->
pdev, rx_priv->
skb_data,
246 if (
unlikely(pci_dma_mapping_error(ring->
pdev, dma_addr))) {
247 ring->
stats.pci_map_fail++;
251 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
252 ring->
ndev->name, __func__, __LINE__,
253 (
unsigned long long)dma_addr);
254 vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->
data_size);
258 "%s: %s:%d Exiting...", ring->
ndev->name, __func__, __LINE__);
268 vxge_rx_initial_replenish(
void *dtrh,
void *userdata)
274 ring->
ndev->name, __func__, __LINE__);
275 if (vxge_rx_alloc(dtrh, ring,
279 if (vxge_rx_map(dtrh, ring)) {
280 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
281 dev_kfree_skb(rx_priv->
skb);
286 "%s: %s:%d Exiting...", ring->
ndev->name, __func__, __LINE__);
297 ring->
ndev->name, __func__, __LINE__);
298 skb_record_rx_queue(skb, ring->
driver_id);
301 u64_stats_update_begin(&ring->
stats.syncp);
302 ring->
stats.rx_frms++;
306 ring->
stats.rx_mcast++;
307 u64_stats_update_end(&ring->
stats.syncp);
310 "%s: %s:%d skb protocol = %d",
313 if (ext_info->
vlan &&
315 __vlan_hwaccel_put_tag(skb, ext_info->
vlan);
319 "%s: %s:%d Exiting...", ring->
ndev->name, __func__, __LINE__);
322 static inline void vxge_re_pre_post(
void *
dtr,
struct vxge_ring *ring,
325 pci_dma_sync_single_for_device(ring->
pdev,
332 static inline void vxge_post(
int *dtr_cnt,
void **first_dtr,
335 int dtr_count = *dtr_cnt;
339 *first_dtr = post_dtr;
343 *dtr_cnt = dtr_count;
354 u8 t_code,
void *userdata)
358 unsigned int dma_sizes;
359 void *first_dtr =
NULL;
368 ring->
ndev->name, __func__, __LINE__);
372 rx_priv = vxge_hw_ring_rxd_private_get(dtr);
379 "%s: %s:%d skb = 0x%p",
380 ring->
ndev->name, __func__, __LINE__, skb);
382 vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
383 pkt_length = dma_sizes;
388 "%s: %s:%d Packet Length = %d",
389 ring->
ndev->name, __func__, __LINE__, pkt_length);
391 vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
401 ring->
stats.rx_errors++;
403 "%s: %s :%d Rx T_code is %d",
404 ring->
ndev->name, __func__,
411 vxge_re_pre_post(dtr, ring, rx_priv);
413 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
414 ring->
stats.rx_dropped++;
420 if (vxge_rx_alloc(dtr, ring, data_size) !=
NULL) {
421 if (!vxge_rx_map(dtr, ring)) {
424 pci_unmap_single(ring->
pdev, data_dma,
428 vxge_post(&dtr_cnt, &first_dtr, dtr,
431 dev_kfree_skb(rx_priv->
skb);
434 vxge_re_pre_post(dtr, ring, rx_priv);
436 vxge_post(&dtr_cnt, &first_dtr, dtr,
438 ring->
stats.rx_dropped++;
442 vxge_re_pre_post(dtr, ring, rx_priv);
444 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
445 ring->
stats.rx_dropped++;
451 skb_up = netdev_alloc_skb(dev, pkt_length +
453 if (skb_up !=
NULL) {
457 pci_dma_sync_single_for_cpu(ring->
pdev,
462 "%s: %s:%d skb_up = %p",
463 ring->
ndev->name, __func__,
467 vxge_re_pre_post(dtr, ring, rx_priv);
469 vxge_post(&dtr_cnt, &first_dtr, dtr,
475 vxge_re_pre_post(dtr, ring, rx_priv);
477 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
479 "%s: vxge_rx_1b_compl: out of "
480 "memory", dev->
name);
481 ring->
stats.skb_alloc_fail++;
493 skb_checksum_none_assert(skb);
500 skb_hwts = skb_hwtstamps(skb);
501 skb_hwts->
hwtstamp = ns_to_ktime(ns);
512 vxge_rx_complete(ring, skb, ext_info.
vlan,
513 pkt_length, &ext_info);
543 struct sk_buff ***skb_ptr,
int nr_skb,
int *more)
550 "%s:%d Entered....", __func__, __LINE__);
557 vxge_hw_fifo_txdl_private_get(dtr);
560 frg_cnt = skb_shinfo(skb)->nr_frags;
561 frag = &skb_shinfo(skb)->frags[0];
564 "%s: %s:%d fifo_hw = %p dtr = %p "
565 "tcode = 0x%x", fifo->
ndev->name, __func__,
566 __LINE__, fifo_hw, dtr, t_code);
570 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
571 fifo->
ndev->name, __func__, __LINE__,
572 skb, txd_priv, frg_cnt);
574 fifo->
stats.tx_errors++;
576 "%s: tx: dtr %p completed due to "
577 "error t_code %01x", fifo->
ndev->name,
586 for (
j = 0;
j < frg_cnt;
j++) {
587 pci_unmap_page(fifo->
pdev,
596 u64_stats_update_begin(&fifo->
stats.syncp);
597 fifo->
stats.tx_frms++;
599 u64_stats_update_end(&fifo->
stats.syncp);
616 if (netif_tx_queue_stopped(fifo->
txq))
617 netif_tx_wake_queue(fifo->
txq);
620 "%s: %s:%d Exiting...",
621 fifo->
ndev->name, __func__, __LINE__);
635 if (!ip_is_fragment(ip)) {
636 th = (
struct tcphdr *)(((
unsigned char *)ip) +
643 if (counter >= queue_len)
644 counter = queue_len - 1;
670 if (!new_mac_entry) {
672 "%s: memory allocation failed",
680 mac_address = (
u8 *)&new_mac_entry->
macaddr;
686 if (is_multicast_ether_addr(mac->
macaddr))
700 if (is_multicast_ether_addr(mac->
macaddr))
710 "DA config add entry failed for vpath:%d",
713 if (
FALSE == vxge_mac_list_add(vpath, mac))
729 hldev = pci_get_drvdata(vdev->
pdev);
731 mac_address = (
u8 *)&mac_addr;
735 for (vpath_idx = 0; vpath_idx < vdev->
no_of_vpath; vpath_idx++) {
736 vpath = &vdev->
vpaths[vpath_idx];
737 if (vxge_search_mac_addr_in_list(vpath, mac_addr))
745 for (vpath_idx = 0; vpath_idx < vdev->
no_of_vpath; vpath_idx++) {
746 vpath = &vdev->
vpaths[vpath_idx];
751 status = vxge_add_mac_addr(vdev, &
mac_info);
762 vpath = &vdev->
vpaths[vpath_idx];
781 "%s: Unable to set the vpath-%d in catch-basin mode",
808 int frg_cnt, first_frg_len;
818 dev->
name, __func__, __LINE__);
823 "%s: Buffer has no data..", dev->
name);
828 vdev = netdev_priv(dev);
830 if (
unlikely(!is_vxge_card_up(vdev))) {
832 "%s: vdev not initialized", dev->
name);
837 if (vdev->
config.addr_learn_en) {
839 if (vpath_no == -
EPERM) {
841 "%s: Failed to store the mac address",
849 vpath_no = skb_get_queue_mapping(skb);
851 vpath_no = vxge_get_vpath_no(vdev, skb);
858 fifo = &vdev->
vpaths[vpath_no].fifo;
861 if (netif_tx_queue_stopped(fifo->
txq))
867 "%s: No free TXDs available", dev->
name);
868 fifo->
stats.txd_not_free++;
876 netif_tx_stop_queue(fifo->
txq);
881 "%s: Out of descriptors .", dev->
name);
882 fifo->
stats.txd_out_of_desc++;
887 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
888 dev->
name, __func__, __LINE__,
889 fifo_hw, dtr, dtr_priv);
893 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
896 first_frg_len = skb_headlen(skb);
898 dma_pointer = pci_map_single(fifo->
pdev, skb->
data, first_frg_len,
901 if (
unlikely(pci_dma_mapping_error(fifo->
pdev, dma_pointer))) {
903 fifo->
stats.pci_map_fail++;
907 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
911 frg_cnt = skb_shinfo(skb)->nr_frags;
913 "%s: %s:%d skb = %p txdl_priv = %p "
914 "frag_cnt = %d dma_pointer = 0x%llx", dev->
name,
915 __func__, __LINE__, skb, txdl_priv,
916 frg_cnt, (
unsigned long long)dma_pointer);
921 frag = &skb_shinfo(skb)->frags[0];
922 for (i = 0; i < frg_cnt; i++) {
924 if (!skb_frag_size(frag))
927 dma_pointer = (
u64)skb_frag_dma_map(&fifo->
pdev->dev, frag,
928 0, skb_frag_size(frag),
934 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
935 dev->
name, __func__, __LINE__, i,
936 (
unsigned long long)dma_pointer);
940 skb_frag_size(frag));
944 offload_type = vxge_offload_type(skb);
947 int mss = vxge_tcp_mss(skb);
950 dev->
name, __func__, __LINE__, mss);
951 vxge_hw_fifo_txdl_mss_set(dtr, mss);
961 vxge_hw_fifo_txdl_cksum_set_bits(dtr,
969 dev->
name, __func__, __LINE__);
976 frag = &skb_shinfo(skb)->frags[0];
989 netif_tx_stop_queue(fifo->
txq);
1006 vxge_hw_ring_rxd_private_get(dtrh);
1009 ring->
ndev->name, __func__, __LINE__);
1016 dev_kfree_skb(rx_priv->
skb);
1020 "%s: %s:%d Exiting...",
1021 ring->
ndev->name, __func__, __LINE__);
1034 int i = 0,
j, frg_cnt;
1035 struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
1045 frg_cnt = skb_shinfo(skb)->nr_frags;
1046 frag = &skb_shinfo(skb)->frags[0];
1052 for (
j = 0;
j < frg_cnt;
j++) {
1061 "%s:%d Exiting...", __func__, __LINE__);
1068 u8 *mac_address = (
u8 *) (&del_mac);
1079 if (is_multicast_ether_addr(mac->
macaddr))
1100 "DA config delete entry failed for vpath:%d",
1103 vxge_mac_list_del(vpath, mac);
1118 static void vxge_set_multicast(
struct net_device *dev)
1122 int i, mcast_cnt = 0;
1134 "%s:%d", __func__, __LINE__);
1136 vdev = netdev_priv(dev);
1139 if (
unlikely(!is_vxge_card_up(vdev)))
1149 "multicast, status %d", status);
1159 "multicast, status %d", status);
1165 if (!vdev->
config.addr_learn_en) {
1179 "enable" :
"disable", status);
1186 mcast_cnt = vdev->
vpaths[0].mcast_addr_cnt;
1187 list_head = &vdev->
vpaths[0].mac_addr_list;
1189 (vdev->
vpaths[0].mac_addr_cnt - mcast_cnt)) >
1190 vdev->
vpaths[0].max_mac_addr_cnt)
1191 goto _set_all_mcast;
1194 for (i = 0; i < mcast_cnt; i++) {
1198 mac_address = (
u8 *)&mac_entry->
macaddr;
1201 if (is_multicast_ether_addr(
mac_info.macaddr)) {
1202 for (vpath_idx = 0; vpath_idx <
1206 status = vxge_del_mac_addr(
1217 for (vpath_idx = 0; vpath_idx < vdev->
no_of_vpath;
1221 status = vxge_add_mac_addr(vdev, &
mac_info);
1224 "%s:%d Setting individual"
1225 "multicast address failed",
1226 __func__, __LINE__);
1227 goto _set_all_mcast;
1234 mcast_cnt = vdev->
vpaths[0].mcast_addr_cnt;
1236 for (i = 0; i < mcast_cnt; i++) {
1240 mac_address = (
u8 *)&mac_entry->
macaddr;
1243 if (is_multicast_ether_addr(
mac_info.macaddr))
1247 for (vpath_idx = 0; vpath_idx < vdev->
no_of_vpath;
1250 status = vxge_del_mac_addr(vdev, &
mac_info);
1262 "%s:%d Enabling all multicasts failed",
1263 __func__, __LINE__);
1271 "%s:%d Exiting...", __func__, __LINE__);
1280 static int vxge_set_mac_addr(
struct net_device *dev,
void *
p)
1286 struct macInfo mac_info_new, mac_info_old;
1291 vdev = netdev_priv(dev);
1294 if (!is_valid_ether_addr(addr->
sa_data))
1301 __func__, __LINE__);
1311 for (vpath_idx = 0; vpath_idx < vdev->
no_of_vpath; vpath_idx++) {
1317 vxge_mac_list_del(vpath, &mac_info_old);
1321 vxge_mac_list_add(vpath, &mac_info_new);
1326 mac_info_old.vpath_no = vpath_idx;
1327 status = vxge_del_mac_addr(vdev, &mac_info_old);
1330 if (
unlikely(!is_vxge_card_up(vdev))) {
1336 for (vpath_idx = 0; vpath_idx < vdev->
no_of_vpath; vpath_idx++) {
1337 mac_info_new.vpath_no = vpath_idx;
1339 status = vxge_add_mac_addr(vdev, &mac_info_new);
1356 static void vxge_vpath_intr_enable(
struct vxgedev *vdev,
int vp_id)
1360 int tim_msix_id[4] = {0, 1, 0, 0};
1376 msix_id = (vpath->
handle->vpath->hldev->first_vp_id *
1389 static void vxge_vpath_intr_disable(
struct vxgedev *vdev,
int vp_id)
1395 hldev = pci_get_drvdata(vdev->
pdev);
1409 msix_id = (vpath->
handle->vpath->hldev->first_vp_id *
1427 "DA config list entry failed for vpath:%d",
1461 status = vxge_search_mac_addr_in_da_table(vpath,
1471 "DA add entry failed for vpath:%d",
1485 vxge_restore_vpath_vid_table(
struct vxge_vpath *vpath)
1507 static
int vxge_reset_vpath(
struct vxgedev *vdev,
int vp_id)
1510 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1514 if (
unlikely(!is_vxge_card_up(vdev)))
1523 if (is_vxge_card_up(vdev) &&
1527 "vxge_hw_vpath_recover_from_reset"
1528 "failed for vpath:%d", vp_id);
1533 "vxge_hw_vpath_reset failed for"
1540 vxge_restore_vpath_mac_addr(vpath);
1541 vxge_restore_vpath_vid_table(vpath);
1547 if (vdev->all_multi_flg) {
1551 "%s:%d Enabling multicast failed",
1552 __func__, __LINE__);
1556 vxge_vpath_intr_enable(vdev, vp_id);
1571 if (netif_tx_queue_stopped(vpath->
fifo.txq))
1572 netif_tx_wake_queue(vpath->
fifo.txq);
1578 static void vxge_config_ci_for_tti_rti(
struct vxgedev *vdev)
1587 hw_ring = vdev->
vpaths[
i].ring.handle;
1600 if ((vdev->
config.intr_type ==
INTA) && (i == 0))
1607 static int do_vxge_reset(
struct vxgedev *vdev,
int event)
1616 if (
unlikely(!is_vxge_card_up(vdev)))
1628 for (vp_id = 0; vp_id < vdev->
no_of_vpath; vp_id++) {
1638 "%s: execution mode is debug, returning..",
1641 netif_tx_stop_all_queues(vdev->
ndev);
1652 netif_tx_stop_all_queues(vdev->
ndev);
1654 "fatal: %s: Disabling device due to"
1673 netif_tx_stop_all_queues(vdev->
ndev);
1675 "fatal: %s: Disabling device due to"
1683 netif_tx_stop_all_queues(vdev->
ndev);
1685 "fatal: %s: Disabling device due to"
1695 netif_tx_stop_all_queues(vdev->
ndev);
1697 "fatal: %s: Disabling device due to"
1709 netif_tx_stop_all_queues(vdev->
ndev);
1715 "fatal: %s: can not reset vpaths",
1724 if (vdev->
vpaths[i].handle) {
1729 "vxge_hw_vpath_recover_"
1730 "from_reset failed for vpath: "
1737 "vxge_hw_vpath_reset failed for "
1746 for (vp_id = 0; vp_id < vdev->
no_of_vpath; vp_id++) {
1747 vxge_restore_vpath_mac_addr(&vdev->
vpaths[vp_id]);
1748 vxge_restore_vpath_vid_table(&vdev->
vpaths[vp_id]);
1753 vxge_vpath_intr_enable(vdev, i);
1769 netif_tx_wake_all_queues(vdev->
ndev);
1773 vxge_config_ci_for_tti_rti(vdev);
1777 "%s:%d Exiting...", __func__, __LINE__);
1795 if (!netif_running(vdev->
ndev))
1840 static int vxge_poll_inta(
struct napi_struct *napi,
int budget)
1843 int pkts_processed = 0;
1845 int budget_org = budget;
1861 VXGE_COMPLETE_ALL_TX(vdev);
1863 if (pkts_processed < budget_org) {
1870 return pkts_processed;
1873 #ifdef CONFIG_NET_POLL_CONTROLLER
1883 static void vxge_netpoll(
struct net_device *dev)
1885 struct vxgedev *vdev = netdev_priv(dev);
1888 const int irq = pdev->
irq;
1892 if (pci_channel_offline(pdev))
1899 VXGE_COMPLETE_ALL_RX(vdev);
1900 VXGE_COMPLETE_ALL_TX(vdev);
1905 "%s:%d Exiting...", __func__, __LINE__);
1914 u8 itable[256] = {0};
1915 u8 mtable[256] = {0};
1923 for (index = 0; index < (1 << vdev->
config.rth_bkt_sz); index++) {
1932 vdev->
config.rth_bkt_sz);
1935 "RTH indirection table configuration failed "
1936 "for vpath:%d", vdev->
vpaths[0].device_id);
1941 hash_types.hash_type_tcpipv4_en = vdev->
config.rth_hash_type_tcpipv4;
1942 hash_types.hash_type_ipv4_en = vdev->
config.rth_hash_type_ipv4;
1943 hash_types.hash_type_tcpipv6_en = vdev->
config.rth_hash_type_tcpipv6;
1944 hash_types.hash_type_ipv6_en = vdev->
config.rth_hash_type_ipv6;
1945 hash_types.hash_type_tcpipv6ex_en =
1946 vdev->
config.rth_hash_type_tcpipv6ex;
1947 hash_types.hash_type_ipv6ex_en = vdev->
config.rth_hash_type_ipv6ex;
1955 for (index = 0; index < vdev->
no_of_vpath; index++) {
1957 vdev->
vpaths[index].handle,
1958 vdev->
config.rth_algorithm,
1960 vdev->
config.rth_bkt_sz);
1963 "RTH configuration failed for vpath:%d",
1964 vdev->
vpaths[index].device_id);
1983 if (is_vxge_card_up(vdev) &&
1987 "vxge_hw_vpath_recover_"
1988 "from_reset failed for vpath: "
1994 "vxge_hw_vpath_reset failed for "
2005 static void vxge_close_vpaths(
struct vxgedev *vdev,
int index)
2015 vdev->
stats.vpaths_open--;
2023 static int vxge_open_vpaths(
struct vxgedev *vdev)
2051 attr.fifo_attr.callback = vxge_xmit_compl;
2052 attr.fifo_attr.txdl_term = vxge_tx_term;
2054 attr.fifo_attr.userdata = &vpath->
fifo;
2056 attr.ring_attr.callback = vxge_rx_1b_compl;
2057 attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
2058 attr.ring_attr.rxd_term = vxge_rx_term;
2060 attr.ring_attr.userdata = &vpath->
ring;
2067 vpath->
fifo.handle =
2069 vpath->
ring.handle =
2071 vpath->
fifo.tx_steering_type =
2072 vdev->
config.tx_steering_type;
2075 if (vdev->
config.tx_steering_type)
2077 netdev_get_tx_queue(vdev->
ndev, i);
2080 netdev_get_tx_queue(vdev->
ndev, 0);
2081 vpath->
fifo.indicate_max_pkts =
2082 vdev->
config.fifo_indicate_max_pkts;
2083 vpath->
fifo.tx_vector_no = 0;
2084 vpath->
ring.rx_vector_no = 0;
2089 vdev->
stats.vpaths_open++;
2091 vdev->
stats.vpath_open_fail++;
2093 "open with status: %d",
2096 vxge_close_vpaths(vdev, 0);
2100 vp_id = vpath->
handle->vpath->vp_id;
2115 static void adaptive_coalesce_tx_interrupts(
struct vxge_fifo *fifo)
2118 if (jiffies > fifo->
jiffies +
HZ / 100) {
2126 }
else if (hw_fifo->
rtimer != 0) {
2143 static void adaptive_coalesce_rx_interrupts(
struct vxge_ring *ring)
2146 if (jiffies > ring->
jiffies +
HZ / 100) {
2154 }
else if (hw_ring->
rtimer != 0) {
2183 hldev = pci_get_drvdata(vdev->
pdev);
2185 if (pci_channel_offline(vdev->
pdev))
2188 if (
unlikely(!is_vxge_card_up(vdev)))
2201 napi_schedule(&vdev->
napi);
2203 "%s:%d Exiting...", __func__, __LINE__);
2220 #ifdef CONFIG_PCI_MSI
2222 static irqreturn_t vxge_tx_msix_handle(
int irq,
void *dev_id)
2226 adaptive_coalesce_tx_interrupts(fifo);
2234 VXGE_COMPLETE_VPATH_TX(fifo);
2244 static irqreturn_t vxge_rx_msix_napi_handle(
int irq,
void *dev_id)
2248 adaptive_coalesce_rx_interrupts(ring);
2256 napi_schedule(&ring->
napi);
2261 vxge_alarm_msix_handle(
int irq,
void *dev_id)
2267 int msix_id = (vpath->
handle->vpath->vp_id *
2288 "%s: vxge_hw_vpath_alarm_process failed %x ",
2294 static int vxge_alloc_msix(
struct vxgedev *vdev)
2297 int msix_intr_vect = 0,
temp;
2311 "%s: memory allocation failed",
2314 goto alloc_entries_failed;
2324 goto alloc_vxge_entries_failed;
2332 vdev->
entries[
j].entry = msix_intr_vect;
2338 vdev->
entries[
j].entry = msix_intr_vect + 1;
2352 "%s: MSI-X enable failed for %d vectors, ret: %d",
2356 goto enable_msix_failed;
2365 vxge_close_vpaths(vdev, temp);
2368 }
else if (ret < 0) {
2370 goto enable_msix_failed;
2376 alloc_vxge_entries_failed:
2378 alloc_entries_failed:
2382 static int vxge_enable_msix(
struct vxgedev *vdev)
2387 int tim_msix_id[4] = {0, 1, 0, 0};
2392 ret = vxge_alloc_msix(vdev);
2414 static void vxge_rem_msix_isr(
struct vxgedev *vdev)
2418 for (intr_cnt = 0; intr_cnt < (vdev->
no_of_vpath * 2 + 1);
2438 static void vxge_rem_isr(
struct vxgedev *vdev)
2441 hldev = pci_get_drvdata(vdev->
pdev);
2443 #ifdef CONFIG_PCI_MSI
2445 vxge_rem_msix_isr(vdev);
2454 static int vxge_add_isr(
struct vxgedev *vdev)
2457 #ifdef CONFIG_PCI_MSI
2458 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
2462 ret = vxge_enable_msix(vdev);
2483 "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2485 vdev->
entries[intr_cnt].entry,
2488 vdev->
entries[intr_cnt].vector,
2489 vxge_tx_msix_handle, 0,
2490 vdev->
desc[intr_cnt],
2491 &vdev->
vpaths[vp_idx].fifo);
2493 &vdev->
vpaths[vp_idx].fifo;
2498 "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2500 vdev->
entries[intr_cnt].entry,
2503 vdev->
entries[intr_cnt].vector,
2504 vxge_rx_msix_napi_handle,
2506 vdev->
desc[intr_cnt],
2507 &vdev->
vpaths[vp_idx].ring);
2509 &vdev->
vpaths[vp_idx].ring;
2516 "%s: MSIX - %d Registration failed",
2517 vdev->
ndev->name, intr_cnt);
2518 vxge_rem_msix_isr(vdev);
2521 "%s: Defaulting to INTA"
2522 , vdev->
ndev->name);
2529 msix_idx += vdev->
vpaths[vp_idx].device_id *
2532 vdev->
vpaths[vp_idx].handle,
2545 "%s:vxge:MSI-X %d - Alarm - fn:%d",
2547 vdev->
entries[intr_cnt].entry,
2551 vxge_alarm_msix_handle, 0,
2552 vdev->
desc[intr_cnt],
2556 "%s: MSIX - %d Registration failed",
2557 vdev->
ndev->name, intr_cnt);
2558 vxge_rem_msix_isr(vdev);
2561 "%s: Defaulting to INTA",
2566 msix_idx = (vdev->
vpaths[0].handle->vpath->vp_id *
2578 "%s:vxge:INTA", vdev->
ndev->name);
2589 "%s %s-%d: ISR registration failed",
2594 "new %s-%d line allocated",
2595 "IRQ", vdev->
pdev->irq);
2601 static void vxge_poll_vp_reset(
unsigned long data)
2608 vxge_reset_vpath(vdev, i);
2620 static void vxge_poll_vp_lockup(
unsigned long data)
2627 unsigned long rx_frms;
2636 if (ring->
stats.prev_rx_frms == rx_frms) {
2648 vxge_vpath_intr_disable(vdev, i);
2651 netif_tx_stop_queue(vpath->
fifo.txq);
2656 ring->
stats.prev_rx_frms = rx_frms;
2681 struct vxgedev *vdev = netdev_priv(dev);
2684 if (!(changed & NETIF_F_RXHASH))
2717 u64 val64, function_mode;
2720 "%s: %s:%d", dev->
name, __func__, __LINE__);
2722 vdev = netdev_priv(dev);
2723 hldev = pci_get_drvdata(vdev->
pdev);
2724 function_mode = vdev->
config.device_hw_info.function_mode;
2731 status = vxge_open_vpaths(vdev);
2734 "%s: fatal: Vpath open failed", vdev->
ndev->name);
2741 status = vxge_add_isr(vdev);
2744 "%s: fatal: ISR add failed", dev->
name);
2751 vdev->
config.napi_weight);
2752 napi_enable(&vdev->
napi);
2761 vxge_poll_msix, vdev->
config.napi_weight);
2762 napi_enable(&vpath->
ring.napi);
2763 vpath->
ring.napi_p = &vpath->
ring.napi;
2768 if (vdev->
config.rth_steering) {
2769 status = vxge_rth_configure(vdev);
2772 "%s: fatal: RTH configuration failed",
2779 hldev->
config.rth_en ?
"enabled" :
"disabled");
2788 "%s: fatal: can not set new MTU", dev->
name);
2796 "%s: MTU is %d", vdev->
ndev->name, vdev->
mtu);
2805 vxge_restore_vpath_mac_addr(vpath);
2806 vxge_restore_vpath_vid_table(vpath);
2811 "%s:%d Enabling multicast failed",
2812 __func__, __LINE__);
2828 rxmac_authorize_all_addr),
2835 rxmac_authorize_all_vid),
2838 vxge_set_multicast(dev);
2846 "%s : Can not enable bcast for vpath "
2847 "id %d", dev->
name, i);
2848 if (vdev->
config.addr_learn_en) {
2852 "%s : Can not enable mcast for vpath "
2853 "id %d", dev->
name, i);
2858 vdev->
config.tx_pause_enable,
2859 vdev->
config.rx_pause_enable);
2876 netdev_notice(vdev->
ndev,
"Link Up\n");
2877 vdev->
stats.link_up++;
2892 netif_tx_start_all_queues(vdev->
ndev);
2895 vxge_config_ci_for_tti_rti(vdev);
2904 napi_disable(&vdev->
napi);
2907 napi_disable(&vdev->
vpaths[i].ring.napi);
2911 vxge_close_vpaths(vdev, 0);
2914 "%s: %s:%d Exiting...",
2915 dev->
name, __func__, __LINE__);
2920 static void vxge_free_mac_add_list(
struct vxge_vpath *vpath)
2933 static void vxge_napi_del_all(
struct vxgedev *vdev)
2944 static int do_vxge_close(
struct net_device *dev,
int do_io)
2950 u64 val64, vpath_vector;
2952 dev->
name, __func__, __LINE__);
2954 vdev = netdev_priv(dev);
2955 hldev = pci_get_drvdata(vdev->
pdev);
2957 if (
unlikely(!is_vxge_card_up(vdev)))
2973 rts_mgr_cbasin_cfg),
2976 val64 &= ~vpath_vector;
2982 rts_mgr_cbasin_cfg),
2991 rxmac_authorize_all_addr),
2998 rxmac_authorize_all_vid),
3016 napi_disable(&vdev->
napi);
3019 napi_disable(&vdev->
vpaths[i].ring.napi);
3023 netdev_notice(vdev->
ndev,
"Link Down\n");
3024 netif_tx_stop_all_queues(vdev->
ndev);
3032 vxge_napi_del_all(vdev);
3037 vxge_close_vpaths(vdev, 0);
3040 "%s: %s:%d Exiting...", dev->
name, __func__, __LINE__);
3058 static int vxge_close(
struct net_device *dev)
3060 do_vxge_close(dev, 1);
3072 static int vxge_change_mtu(
struct net_device *dev,
int new_mtu)
3074 struct vxgedev *vdev = netdev_priv(dev);
3077 "%s:%d", __func__, __LINE__);
3080 "%s: mtu size is invalid", dev->
name);
3085 if (
unlikely(!is_vxge_card_up(vdev))) {
3089 "%s",
"device is down on MTU change");
3094 "trying to apply new MTU %d", new_mtu);
3096 if (vxge_close(dev))
3100 vdev->
mtu = new_mtu;
3106 "%s: MTU changed to %d", vdev->
ndev->name, new_mtu);
3109 "%s:%d Exiting...", __func__, __LINE__);
3123 struct vxgedev *vdev = netdev_priv(dev);
3134 start = u64_stats_fetch_begin_bh(&rxstats->
syncp);
3139 }
while (u64_stats_fetch_retry_bh(&rxstats->
syncp, start));
3149 start = u64_stats_fetch_begin_bh(&txstats->
syncp);
3153 }
while (u64_stats_fetch_retry_bh(&txstats->
syncp, start));
3188 static int vxge_hwtstamp_ioctl(
struct vxgedev *vdev,
void __user *data)
3201 switch (
config.tx_type) {
3209 switch (
config.rx_filter) {
3261 struct vxgedev *vdev = netdev_priv(dev);
3266 ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
3285 static void vxge_tx_watchdog(
struct net_device *dev)
3291 vdev = netdev_priv(dev);
3297 "%s:%d Exiting...", __func__, __LINE__);
3308 vxge_vlan_rx_add_vid(
struct net_device *dev,
unsigned short vid)
3310 struct vxgedev *vdev = netdev_priv(dev);
3315 for (vp_id = 0; vp_id < vdev->
no_of_vpath; vp_id++) {
3316 vpath = &vdev->
vpaths[vp_id];
3333 vxge_vlan_rx_kill_vid(
struct net_device *dev,
unsigned short vid)
3335 struct vxgedev *vdev = netdev_priv(dev);
3342 for (vp_id = 0; vp_id < vdev->
no_of_vpath; vp_id++) {
3343 vpath = &vdev->
vpaths[vp_id];
3349 "%s:%d Exiting...", __func__, __LINE__);
3355 .ndo_open = vxge_open,
3356 .ndo_stop = vxge_close,
3357 .ndo_get_stats64 = vxge_get_stats64,
3358 .ndo_start_xmit = vxge_xmit,
3360 .ndo_set_rx_mode = vxge_set_multicast,
3361 .ndo_do_ioctl = vxge_ioctl,
3362 .ndo_set_mac_address = vxge_set_mac_addr,
3363 .ndo_change_mtu = vxge_change_mtu,
3364 .ndo_fix_features = vxge_fix_features,
3365 .ndo_set_features = vxge_set_features,
3366 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3367 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
3368 .ndo_tx_timeout = vxge_tx_watchdog,
3369 #ifdef CONFIG_NET_POLL_CONTROLLER
3370 .ndo_poll_controller = vxge_netpoll,
3376 int high_dma,
int no_of_vpath,
3382 int ret = 0, no_of_queue = 1;
3389 ndev = alloc_etherdev_mq(
sizeof(
struct vxgedev),
3394 "%s : device allocation failed", __func__);
3401 "%s: %s:%d Entering...",
3402 ndev->
name, __func__, __LINE__);
3404 vdev = netdev_priv(ndev);
3439 "%s: vpath memory allocation failed",
3446 "%s : checksuming enabled", __func__);
3451 "%s : using High DMA", __func__);
3457 "%s: %s : device registration failed!",
3458 ndev->
name, __func__);
3472 "%s: Ethernet device registered",
3489 "%s: device stats clear returns"
3490 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->
name);
3493 "%s: %s:%d Exiting...",
3494 ndev->
name, __func__, __LINE__);
3517 vdev = netdev_priv(dev);
3520 __func__, __LINE__);
3537 __func__, __LINE__);
3551 struct vxgedev *vdev = netdev_priv(dev);
3556 "%s: %s:%d", vdev->
ndev->name, __func__, __LINE__);
3563 for (vpath_idx = 0; vpath_idx < vdev->
no_of_vpath; vpath_idx++) {
3564 vpath = &vdev->
vpaths[vpath_idx];
3572 "%s: Slot is frozen", vdev->
ndev->name);
3575 "%s: Encountered Serious Error",
3579 "%s: Encountered Critical Error",
3601 vxge_vpath_intr_disable(vdev, vpath_idx);
3604 netif_tx_stop_queue(vpath->
fifo.txq);
3610 "%s: %s:%d Exiting...",
3611 vdev->
ndev->name, __func__, __LINE__);
3614 static void verify_bandwidth(
void)
3616 int i, band_width, total = 0, equal_priority = 0;
3620 if (bw_percentage[i] == 0) {
3626 if (!equal_priority) {
3629 if (bw_percentage[i] == 0xFF)
3632 total += bw_percentage[
i];
3640 if (!equal_priority) {
3643 if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
3647 (VXGE_HW_MAX_VIRTUAL_PATHS - i);
3657 }
else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
3661 if (equal_priority) {
3663 "%s: Assigning equal bandwidth to all the vpaths",
3668 bw_percentage[i] = bw_percentage[0];
3675 static int __devinit vxge_config_vpaths(
3679 int i, no_of_vpaths = 0, default_no_vpath = 0,
temp;
3680 u32 txdl_size, txdl_per_memblock;
3702 if (default_no_vpath < driver_config->vpath_per_dev)
3713 "%s: Disable tx and rx steering, "
3717 device_config->rth_en = 0;
3722 device_config->
vp_config[i].min_bandwidth = bw_percentage[i];
3727 if (no_of_vpaths < driver_config->vpath_per_dev) {
3730 "%s: vpath: %d is not available",
3735 "%s: vpath: %d available",
3741 "%s: vpath: %d is not configured, "
3742 "max_config_vpath exceeded",
3752 device_config->
vp_config[
i].fifo.memblock_size =
3755 txdl_size = device_config->
vp_config[
i].fifo.max_frags *
3759 device_config->
vp_config[
i].fifo.fifo_blocks =
3766 device_config->
vp_config[
i].tti.intr_enable =
3772 device_config->
vp_config[
i].tti.timer_ac_en =
3778 device_config->
vp_config[
i].tti.timer_ci_en =
3781 device_config->
vp_config[
i].tti.timer_ri_en =
3805 device_config->
vp_config[
i].ring.ring_blocks =
3808 device_config->
vp_config[
i].ring.buffer_mode =
3811 device_config->
vp_config[
i].ring.rxds_limit =
3814 device_config->
vp_config[
i].ring.scatter_mode =
3818 device_config->
vp_config[
i].rti.intr_enable =
3824 device_config->
vp_config[
i].rti.timer_ac_en =
3827 device_config->
vp_config[
i].rti.timer_ci_en =
3830 device_config->
vp_config[
i].rti.timer_ri_en =
3853 device_config->
vp_config[
i].rpa_strip_vlan_tag =
3858 return no_of_vpaths;
3862 static void __devinit vxge_device_config_init(
3876 #ifndef CONFIG_PCI_MSI
3878 "%s: This Kernel does not support "
3884 switch (*intr_type) {
3898 device_config->rts_mac_en = addr_learn_en;
3910 device_config->rth_en);
3912 device_config->rth_it_type);
3920 "%s: %d Vpath(s) opened",
3923 switch (vdev->
config.intr_type) {
3926 "%s: Interrupt type INTA", vdev->
ndev->name);
3931 "%s: Interrupt type MSI-X", vdev->
ndev->name);
3935 if (vdev->
config.rth_steering) {
3937 "%s: RTH steering enabled for TCP_IPV4",
3941 "%s: RTH steering disabled", vdev->
ndev->name);
3944 switch (vdev->
config.tx_steering_type) {
3947 "%s: Tx steering disabled", vdev->
ndev->name);
3951 "%s: Unsupported tx steering option",
3954 "%s: Tx steering disabled", vdev->
ndev->name);
3955 vdev->
config.tx_steering_type = 0;
3959 "%s: Unsupported tx steering option",
3962 "%s: Tx steering disabled", vdev->
ndev->name);
3963 vdev->
config.tx_steering_type = 0;
3967 "%s: Tx multiqueue steering enabled",
3972 "%s: Tx port steering enabled",
3977 "%s: Unsupported tx steering type",
3980 "%s: Tx steering disabled", vdev->
ndev->name);
3981 vdev->
config.tx_steering_type = 0;
3984 if (vdev->
config.addr_learn_en)
3986 "%s: MAC Address learning enabled", vdev->
ndev->name);
3992 "%s: MTU size - %d", vdev->
ndev->name,
3994 config.vp_config[i].mtu);
3996 "%s: VLAN tag stripping %s", vdev->
ndev->name,
3998 config.vp_config[i].rpa_strip_vlan_tag
3999 ?
"Enabled" :
"Disabled");
4001 "%s: Max frags : %d", vdev->
ndev->name,
4003 config.vp_config[i].fifo.max_frags);
4021 static int vxge_pm_resume(
struct pci_dev *pdev)
4047 if (netif_running(netdev)) {
4049 do_vxge_close(netdev, 0);
4071 struct vxgedev *vdev = netdev_priv(netdev);
4074 netdev_err(netdev,
"Cannot re-enable device after reset\n");
4091 static void vxge_io_resume(
struct pci_dev *pdev)
4096 if (netif_running(netdev)) {
4097 if (vxge_open(netdev)) {
4099 "Can't bring device back up after reset\n");
4107 static inline u32 vxge_get_num_vfs(
u64 function_mode)
4111 switch (function_mode) {
4156 "%s: FW image download to adapter failed '%s'.",
4166 "%s: Upgrade read version failed '%s'.",
4172 cmaj = vdev->
config.device_hw_info.fw_version.major;
4173 cmin = vdev->
config.device_hw_info.fw_version.minor;
4174 cbld = vdev->
config.device_hw_info.fw_version.build;
4198 "hard reset before using, thus requiring a system reboot or a "
4199 "hotplug event.\n");
4206 static int vxge_probe_fw_update(
struct vxgedev *vdev)
4212 maj = vdev->
config.device_hw_info.fw_version.major;
4213 min = vdev->
config.device_hw_info.fw_version.minor;
4214 bld = vdev->
config.device_hw_info.fw_version.build;
4224 "version, unable to load driver\n",
4242 if (vdev->
devh->eprom_versions[i]) {
4248 fw_name =
"vxge/X3fw-pxe.ncf";
4250 fw_name =
"vxge/X3fw.ncf";
4264 " be used with this driver.",
4287 .link_up = vxge_callback_link_up,
4288 .link_down = vxge_callback_link_down,
4289 .crit_err = vxge_callback_crit_err,
4315 int i,
j, no_of_vpath = 0, max_vpath_supported = 0;
4335 bus = pdev->
bus->number;
4343 "%s: Configured %d of %d devices",
4365 if (!device_config) {
4368 "device_config : malloc failed %s %d",
4369 __FILE__, __LINE__);
4377 "device_config : malloc failed %s %d",
4378 __FILE__, __LINE__);
4390 vxge_device_config_init(device_config, &ll_config->
intr_type);
4395 "%s : can not enable PCI device", __func__);
4401 "%s : using 64bit DMA", __func__);
4405 if (pci_set_consistent_dma_mask(pdev,
4408 "%s : unable to obtain 64bit DMA for "
4409 "consistent allocations", __func__);
4413 }
else if (!pci_set_dma_mask(pdev,
DMA_BIT_MASK(32))) {
4415 "%s : using 32bit DMA", __func__);
4424 "%s : request regions failed", __func__);
4433 "%s : cannot remap io memory bar0", __func__);
4438 "pci ioremap bar0: %p:0x%llx",
4446 "%s: Reading of hardware info failed."
4453 if (vpath_mask == 0) {
4461 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4462 (
unsigned long long)vpath_mask);
4473 max_vpath_supported++;
4477 num_vfs = vxge_get_num_vfs(function_mode) - 1;
4480 if (
is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
4485 "Failed in enabling SRIOV mode: %d\n", ret);
4493 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
4502 attr.uld_callbacks = &vxge_callbacks;
4507 "Failed to initialize device (%d)", status);
4556 status = vxge_timestamp_config(hldev);
4568 pci_set_drvdata(pdev, hldev);
4583 ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4590 ret = vxge_probe_fw_update(vdev);
4611 vdev->
vpaths[
j].is_configured = 1;
4613 vdev->
vpaths[
j].ring.driver_id =
j;
4615 vdev->
vpaths[
j].max_mac_addr_cnt = max_mac_vpath;
4621 INIT_LIST_HEAD(&vdev->
vpaths[j].mac_addr_list);
4623 vdev->
vpaths[
j].mac_addr_cnt = 0;
4624 vdev->
vpaths[
j].mcast_addr_cnt = 0;
4636 macaddr = (
u8 *)vdev->
vpaths[0].macaddr;
4652 vdev->
ndev->name, macaddr);
4658 "%s: Firmware version : %s Date : %s", vdev->
ndev->name,
4666 "%s: Single Function Mode Enabled", vdev->
ndev->name);
4670 "%s: Multi Function Mode Enabled", vdev->
ndev->name);
4674 "%s: Single Root IOV Mode Enabled", vdev->
ndev->name);
4678 "%s: Multi Root IOV Mode Enabled", vdev->
ndev->name);
4683 vxge_print_parm(vdev, vpath_mask);
4693 if (
NULL == entry) {
4695 "%s: mac_addr_list : memory allocation failed",
4702 list_add(&entry->
item, &vdev->
vpaths[i].mac_addr_list);
4703 vdev->
vpaths[
i].mac_addr_cnt = 1;
4706 kfree(device_config);
4729 vdev->
ndev->name, __func__, __LINE__);
4740 vxge_free_mac_add_list(&vdev->
vpaths[i]);
4742 vxge_device_unregister(hldev);
4744 pci_set_drvdata(pdev,
NULL);
4755 kfree(device_config);
4773 hldev = pci_get_drvdata(pdev);
4777 vdev = netdev_priv(hldev->
ndev);
4784 vxge_free_mac_add_list(&vdev->
vpaths[i]);
4786 vxge_device_unregister(hldev);
4787 pci_set_drvdata(pdev,
NULL);
4797 __func__, __LINE__);
4803 .error_detected = vxge_io_error_detected,
4804 .slot_reset = vxge_io_slot_reset,
4805 .resume = vxge_io_resume,
4810 .id_table = vxge_id_table,
4811 .probe = vxge_probe,
4814 .suspend = vxge_pm_suspend,
4815 .resume = vxge_pm_resume,
4817 .err_handler = &vxge_err_handler,
4825 pr_info(
"Copyright(c) 2002-2010 Exar Corp.\n");
4834 ret = pci_register_driver(&vxge_driver);
4836 kfree(driver_config);
4843 "%s: Configured %d of %d devices",
4854 kfree(driver_config);