18 #include <linux/prefetch.h>
19 #include <linux/module.h>
22 #include <asm/div64.h>
31 static unsigned int num_vfs;
35 static ushort rx_frag_size = 2048;
51 static const char *
const ue_status_low_desc[] = {
86 static const char *
const ue_status_hi_desc[] = {
146 mem->
size = len * entry_size;
166 if (!enabled && enable)
168 else if (enabled && !enable)
169 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
173 pci_write_config_dword(adapter->
pdev,
198 bool arm,
bool clear_int,
u16 num_popped)
233 static int be_mac_addr_set(
struct net_device *netdev,
void *
p)
235 struct be_adapter *adapter = netdev_priv(netdev);
241 if (!is_valid_ether_addr(addr->
sa_data))
264 static void populate_be2_stats(
struct be_adapter *adapter)
313 static void populate_be3_stats(
struct be_adapter *adapter)
358 static void populate_lancer_stats(
struct be_adapter *adapter)
363 pport_stats_from_cmd(adapter);
397 static void accumulate_16bit_val(
u32 *acc,
u16 val)
399 #define lo(x) (x & 0xFFFF)
400 #define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val <
lo(*acc);
417 populate_lancer_stats(adapter);
419 populate_be3_stats(adapter);
421 populate_be2_stats(adapter);
432 accumulate_16bit_val(&
rx_stats(rxo)->rx_drops_no_frags,
442 struct be_adapter *adapter = netdev_priv(netdev);
453 start = u64_stats_fetch_begin_bh(&rx_stats->
sync);
456 }
while (u64_stats_fetch_retry_bh(&rx_stats->
sync, start));
467 start = u64_stats_fetch_begin_bh(&tx_stats->
sync);
470 }
while (u64_stats_fetch_retry_bh(&tx_stats->
sync, start));
520 static void be_tx_stats_update(
struct be_tx_obj *txo,
525 u64_stats_update_begin(&stats->
sync);
529 stats->
tx_pkts += (gso_segs ? gso_segs : 1);
532 u64_stats_update_end(&stats->
sync);
541 cnt += skb_shinfo(skb)->nr_frags;
564 static inline u16 be_get_tx_vlan_tag(
struct be_adapter *adapter,
590 memset(hdr, 0,
sizeof(*hdr));
594 if (skb_is_gso(skb)) {
597 hdr, skb_shinfo(skb)->gso_size);
606 else if (is_udp_pkt(skb))
613 else if (is_udp_pkt(skb))
619 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
647 struct sk_buff *skb,
u32 wrb_cnt,
bool dummy_wrb)
655 bool map_single =
false;
658 hdr = queue_head_node(txq);
660 map_head = txq->
head;
663 int len = skb_headlen(skb);
668 wrb = queue_head_node(txq);
669 wrb_fill(wrb, busaddr, len);
675 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
677 &skb_shinfo(skb)->frags[
i];
678 busaddr = skb_frag_dma_map(dev, frag, 0,
682 wrb = queue_head_node(txq);
683 wrb_fill(wrb, busaddr, skb_frag_size(frag));
686 copied += skb_frag_size(frag);
690 wrb = queue_head_node(txq);
696 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
701 txq->
head = map_head;
703 wrb = queue_head_node(txq);
704 unmap_tx_frag(dev, wrb, map_single);
722 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723 __vlan_put_tag(skb, vlan_tag);
733 struct be_adapter *adapter = netdev_priv(netdev);
737 u32 wrb_cnt = 0, copied = 0;
738 u32 start = txq->
head, eth_hdr_len;
739 bool dummy_wrb, stopped =
false;
747 if (skb->
len <= 60 && be_vlan_tag_chk(adapter, skb) &&
749 ip = (
struct iphdr *)ip_hdr(skb);
758 be_vlan_tag_chk(adapter, skb)) {
759 skb = be_insert_vlan_in_pkt(adapter, skb);
764 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
766 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
768 int gso_segs = skb_shinfo(skb)->gso_segs;
781 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
785 be_txq_notify(adapter, txq->
id, wrb_cnt);
787 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
796 static int be_change_mtu(
struct net_device *netdev,
int new_mtu)
798 struct be_adapter *adapter = netdev_priv(netdev);
803 "MTU must be between %d and %d bytes\n",
808 dev_info(&adapter->
pdev->dev,
"MTU changed from %d to %d bytes\n",
809 netdev->
mtu, new_mtu);
810 netdev->
mtu = new_mtu;
818 static int be_vid_config(
struct be_adapter *adapter)
829 goto set_vlan_promisc;
841 dev_info(&adapter->
pdev->dev,
"Exhausted VLAN HW filters.\n");
842 dev_info(&adapter->
pdev->dev,
"Disabling HW VLAN filtering.\n");
843 goto set_vlan_promisc;
856 struct be_adapter *adapter = netdev_priv(netdev);
866 status = be_vid_config(adapter);
876 static int be_vlan_rem_vid(
struct net_device *netdev,
u16 vid)
878 struct be_adapter *adapter = netdev_priv(netdev);
888 status = be_vid_config(adapter);
898 static void be_set_rx_mode(
struct net_device *netdev)
900 struct be_adapter *adapter = netdev_priv(netdev);
915 be_vid_config(adapter);
952 dev_info(&adapter->
pdev->dev,
"Exhausted multicast HW filters.\n");
953 dev_info(&adapter->
pdev->dev,
"Disabling HW multicast filtering.\n");
962 struct be_adapter *adapter = netdev_priv(netdev);
969 if (!is_valid_ether_addr(mac) || vf >= adapter->
num_vfs)
983 dev_err(&adapter->
pdev->dev,
"MAC %pM set on VF %d Failed\n",
991 static int be_get_vf_config(
struct net_device *netdev,
int vf,
994 struct be_adapter *adapter = netdev_priv(netdev);
1012 static int be_set_vf_vlan(
struct net_device *netdev,
1015 struct be_adapter *adapter = netdev_priv(netdev);
1021 if (vf >= adapter->
num_vfs || vlan > 4095)
1025 if (adapter->
vf_cfg[vf].vlan_tag != vlan) {
1030 vf + 1, adapter->
vf_cfg[vf].if_handle);
1035 vlan = adapter->
vf_cfg[
vf].def_vid;
1037 adapter->
vf_cfg[vf].if_handle);
1043 "VLAN %d config on VF %d failed\n", vlan, vf);
1047 static int be_set_vf_tx_rate(
struct net_device *netdev,
1050 struct be_adapter *adapter = netdev_priv(netdev);
1059 if (rate < 100 || rate > 10000) {
1061 "tx rate must be between 100 and 10000 Mbps\n");
1069 "tx rate %d on VF %d failed\n", rate, vf);
1078 int vfs = 0, assigned_vfs = 0,
pos;
1089 if (dev->
is_virtfn && pci_physfn(dev) == pdev) {
1096 return (vf_state ==
ASSIGNED) ? assigned_vfs : vfs;
1105 unsigned int start, eqd;
1128 start = u64_stats_fetch_begin_bh(&stats->
sync);
1130 }
while (u64_stats_fetch_retry_bh(&stats->
sync, start));
1135 eqd = (stats->
rx_pps / 110000) << 3;
1148 static void be_rx_stats_update(
struct be_rx_obj *rxo,
1153 u64_stats_update_begin(&stats->
sync);
1161 u64_stats_update_end(&stats->
sync);
1190 return rx_page_info;
1194 static void be_rx_compl_discard(
struct be_rx_obj *rxo,
1201 for (i = 0; i < num_rcvd; i++) {
1202 page_info = get_rx_page_info(rxo, rxcp->
rxq_idx);
1204 memset(page_info, 0,
sizeof(*page_info));
1222 page_info = get_rx_page_info(rxo, rxcp->
rxq_idx);
1227 curr_frag_len =
min(rxcp->
pkt_size, rx_frag_size);
1229 skb->
len = curr_frag_len;
1235 skb->
tail += curr_frag_len;
1239 skb_shinfo(skb)->nr_frags = 1;
1240 skb_frag_set_page(skb, 0, page_info->
page);
1241 skb_shinfo(skb)->frags[0].page_offset =
1243 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1250 if (rxcp->
pkt_size <= rx_frag_size) {
1257 remaining = rxcp->
pkt_size - curr_frag_len;
1258 for (i = 1, j = 0; i < rxcp->
num_rcvd; i++) {
1259 page_info = get_rx_page_info(rxo, rxcp->
rxq_idx);
1260 curr_frag_len =
min(remaining, rx_frag_size);
1266 skb_frag_set_page(skb, j, page_info->
page);
1267 skb_shinfo(skb)->frags[
j].page_offset =
1269 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1270 skb_shinfo(skb)->nr_frags++;
1275 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1276 skb->
len += curr_frag_len;
1279 remaining -= curr_frag_len;
1287 static void be_rx_compl_process(
struct be_rx_obj *rxo,
1297 be_rx_compl_discard(rxo, rxcp);
1301 skb_fill_rx_data(rxo, skb, rxcp);
1306 skb_checksum_none_assert(skb);
1309 skb_record_rx_queue(skb, rxo - &adapter->
rx_obj[0]);
1315 __vlan_hwaccel_put_tag(skb, rxcp->
vlan_tag);
1328 u16 remaining, curr_frag_len;
1333 be_rx_compl_discard(rxo, rxcp);
1338 for (i = 0, j = -1; i < rxcp->
num_rcvd; i++) {
1339 page_info = get_rx_page_info(rxo, rxcp->
rxq_idx);
1341 curr_frag_len =
min(remaining, rx_frag_size);
1347 skb_frag_set_page(skb, j, page_info->
page);
1348 skb_shinfo(skb)->frags[
j].page_offset =
1350 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1354 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1356 remaining -= curr_frag_len;
1358 memset(page_info, 0,
sizeof(*page_info));
1362 skb_shinfo(skb)->nr_frags = j + 1;
1366 skb_record_rx_queue(skb, rxo - &adapter->
rx_obj[0]);
1371 __vlan_hwaccel_put_tag(skb, rxcp->
vlan_tag);
1455 be_parse_rx_compl_v1(compl, rxcp);
1457 be_parse_rx_compl_v0(compl, rxcp);
1476 queue_tail_inc(&rxo->
cq);
1500 u64 page_dmaaddr = 0, frag_dmaaddr;
1504 for (posted = 0; posted <
MAX_RX_POST && !page_info->
page; posted++) {
1517 page_info->
page_offset = page_offset + rx_frag_size;
1520 page_info->
page = pagep;
1522 frag_dmaaddr = page_dmaaddr + page_info->
page_offset;
1524 rxd = queue_head_node(rxq);
1529 if ((page_offset + rx_frag_size + rx_frag_size) >
1535 prev_page_info = page_info;
1536 queue_head_inc(rxq);
1540 prev_page_info->last_page_user =
true;
1544 be_rxq_notify(adapter, rxq->
id, posted);
1563 queue_tail_inc(tx_cq);
1574 u16 cur_index, num_wrbs = 1;
1575 bool unmap_skb_hdr =
true;
1577 sent_skb = sent_skbs[txq->
tail];
1582 queue_tail_inc(txq);
1585 cur_index = txq->
tail;
1586 wrb = queue_tail_node(txq);
1587 unmap_tx_frag(&adapter->
pdev->dev, wrb,
1588 (unmap_skb_hdr && skb_headlen(sent_skb)));
1589 unmap_skb_hdr =
false;
1592 queue_tail_inc(txq);
1593 }
while (cur_index != last_index);
1600 static inline int events_get(
struct be_eq_obj *eqo)
1606 eqe = queue_tail_node(&eqo->
q);
1613 queue_tail_inc(&eqo->
q);
1619 static int event_handle(
struct be_eq_obj *eqo)
1622 int num = events_get(eqo);
1629 be_eq_notify(eqo->
adapter, eqo->
q.id, rearm,
true, num);
1632 napi_schedule(&eqo->
napi);
1638 static void be_eq_clean(
struct be_eq_obj *eqo)
1640 int num = events_get(eqo);
1642 be_eq_notify(eqo->
adapter, eqo->
q.id,
false,
true, num);
1645 static void be_rx_cq_clean(
struct be_rx_obj *rxo)
1654 while ((rxcp = be_rx_compl_get(rxo)) !=
NULL) {
1655 be_rx_compl_discard(rxo, rxcp);
1662 page_info = get_rx_page_info(rxo, tail);
1664 memset(page_info, 0,
sizeof(*page_info));
1670 static void be_tx_compl_clean(
struct be_adapter *adapter)
1675 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1678 int i, pending_txqs;
1686 while ((txcp = be_tx_compl_get(&txo->
cq))) {
1690 num_wrbs += be_tx_compl_process(adapter, txo,
1704 if (pending_txqs == 0 || ++timeo > 200)
1713 dev_err(&adapter->
pdev->dev,
"%d pending tx-compls\n",
1719 end_idx = txq->
tail;
1720 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1722 index_adv(&end_idx, num_wrbs - 1, txq->
len);
1723 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1729 static void be_evt_queues_destroy(
struct be_adapter *adapter)
1735 if (eqo->
q.created) {
1739 be_queue_free(adapter, &eqo->
q);
1743 static int be_evt_queues_create(
struct be_adapter *adapter)
1771 static void be_mcc_queues_destroy(
struct be_adapter *adapter)
1778 be_queue_free(adapter, q);
1783 be_queue_free(adapter, q);
1787 static int be_mcc_queues_create(
struct be_adapter *adapter)
1802 goto mcc_cq_destroy;
1810 be_queue_free(adapter, q);
1814 be_queue_free(adapter, cq);
1819 static void be_tx_queues_destroy(
struct be_adapter *adapter)
1829 be_queue_free(adapter, q);
1834 be_queue_free(adapter, q);
1838 static int be_num_txqs_want(
struct be_adapter *adapter)
1840 if (
sriov_want(adapter) || be_is_mc(adapter) ||
1848 static int be_tx_cqs_create(
struct be_adapter *adapter)
1855 adapter->
num_tx_qs = be_num_txqs_want(adapter);
1865 status = be_queue_alloc(adapter, cq,
TX_CQ_LEN,
1881 static int be_tx_qs_create(
struct be_adapter *adapter)
1887 status = be_queue_alloc(adapter, &txo->
q,
TX_Q_LEN,
1897 dev_info(&adapter->
pdev->dev,
"created %d TX queue(s)\n",
1902 static void be_rx_cqs_destroy(
struct be_adapter *adapter)
1912 be_queue_free(adapter, q);
1916 static int be_rx_cqs_create(
struct be_adapter *adapter)
1929 netif_set_real_num_rx_queues(adapter->
netdev,
1938 rc = be_queue_alloc(adapter, cq,
RX_CQ_LEN,
1950 "created %d RSS queue(s) and 1 default RX queue\n",
1961 num_evts = event_handle(&adapter->
eq_obj[0]);
1978 return (rxcp->
tcpf && !rxcp->
err) ?
true :
false;
1989 for (work_done = 0; work_done < budget; work_done++) {
1990 rxcp = be_rx_compl_get(rxo);
2000 be_rx_compl_discard(rxo, rxcp);
2009 be_rx_compl_discard(rxo, rxcp);
2016 be_rx_compl_process(rxo, rxcp);
2018 be_rx_stats_update(rxo, rxcp);
2032 int budget,
int idx)
2035 int num_wrbs = 0, work_done;
2037 for (work_done = 0; work_done < budget; work_done++) {
2038 txcp = be_tx_compl_get(&txo->
cq);
2041 num_wrbs += be_tx_compl_process(adapter, txo,
2052 if (__netif_subqueue_stopped(adapter->
netdev, idx) &&
2054 netif_wake_subqueue(adapter->
netdev, idx);
2057 u64_stats_update_begin(&
tx_stats(txo)->sync_compl);
2058 tx_stats(txo)->tx_compl += work_done;
2059 u64_stats_update_end(&
tx_stats(txo)->sync_compl);
2061 return (work_done < budget);
2068 int max_work = 0,
work,
i;
2073 tx_done = be_process_tx(adapter, &adapter->
tx_obj[i],
2084 work = be_process_rx(&adapter->
rx_obj[i], napi, budget);
2085 max_work =
max(
work, max_work);
2091 if (max_work < budget) {
2093 be_eq_notify(adapter, eqo->
q.id,
true,
false, 0);
2096 be_eq_notify(adapter, eqo->
q.id,
false,
false, events_get(eqo));
2103 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2104 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2107 if (be_crit_error(adapter))
2119 pci_read_config_dword(adapter->
pdev,
2121 pci_read_config_dword(adapter->
pdev,
2123 pci_read_config_dword(adapter->
pdev,
2125 pci_read_config_dword(adapter->
pdev,
2128 ue_lo = (ue_lo & ~ue_lo_mask);
2129 ue_hi = (ue_hi & ~ue_hi_mask);
2139 "Error detected in the card\n");
2142 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2144 "ERR: sliport status 0x%x\n", sliport_status);
2146 "ERR: sliport error1 0x%x\n", sliport_err1);
2148 "ERR: sliport error2 0x%x\n", sliport_err2);
2152 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2155 "UE: %s bit set\n", ue_status_low_desc[i]);
2160 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2163 "UE: %s bit set\n", ue_status_hi_desc[i]);
2169 static void be_msix_disable(
struct be_adapter *adapter)
2188 static void be_msix_enable(
struct be_adapter *adapter)
2190 #define BE_MIN_MSIX_VECTORS 1
2191 int i,
status, num_vec, num_roce_vec = 0;
2200 num_vec += num_roce_vec;
2203 num_vec =
max(num_vec, BE_MIN_MSIX_VECTORS);
2205 for (i = 0; i < num_vec; i++)
2211 }
else if (status >= BE_MIN_MSIX_VECTORS) {
2218 dev_warn(dev,
"MSIx enable failed\n");
2222 if (num_vec > num_roce_vec) {
2236 static inline int be_msix_vec_get(
struct be_adapter *adapter,
2242 static int be_msix_register(
struct be_adapter *adapter)
2250 vec = be_msix_vec_get(adapter, eqo);
2258 for (i--, eqo = &adapter->
eq_obj[i]; i >= 0; i--, eqo--)
2259 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2260 dev_warn(&adapter->
pdev->dev,
"MSIX Request IRQ failed - err %d\n",
2262 be_msix_disable(adapter);
2266 static int be_irq_register(
struct be_adapter *adapter)
2272 status = be_msix_register(adapter);
2281 netdev->
irq = adapter->
pdev->irq;
2286 "INTx request IRQ failed - err %d\n", status);
2294 static void be_irq_unregister(
struct be_adapter *adapter)
2311 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2314 adapter->isr_registered =
false;
2332 be_rx_cq_clean(rxo);
2334 be_queue_free(adapter, q);
2338 static int be_close(
struct net_device *netdev)
2340 struct be_adapter *adapter = netdev_priv(netdev);
2349 be_intr_set(adapter,
false);
2352 napi_disable(&eqo->
napi);
2360 be_irq_unregister(adapter);
2365 be_tx_compl_clean(adapter);
2367 be_rx_qs_destroy(adapter);
2371 static int be_rx_qs_create(
struct be_adapter *adapter)
2378 rc = be_queue_alloc(adapter, &rxo->
q,
RX_Q_LEN,
2399 if (be_multi_rxq(adapter)) {
2400 for (j = 0; j < 128; j += adapter->
num_rx_qs - 1) {
2404 rsstable[j +
i] = rxo->
rss_id;
2420 struct be_adapter *adapter = netdev_priv(netdev);
2427 status = be_rx_qs_create(adapter);
2431 be_irq_register(adapter);
2434 be_intr_set(adapter,
true);
2445 napi_enable(&eqo->napi);
2446 be_eq_notify(adapter, eqo->q.id,
true,
false, 0);
2456 be_close(adapter->
netdev);
2460 static int be_setup_wol(
struct be_adapter *adapter,
bool enable)
2476 status = pci_write_config_dword(adapter->
pdev,
2480 "Could not enable Wake-on-lan\n");
2505 static inline int be_vf_eth_addr_config(
struct be_adapter *adapter)
2512 be_vf_eth_addr_generate(adapter, mac);
2525 "Mac address assignment failed for VF %d\n", vf);
2534 static void be_vf_clear(
struct be_adapter *adapter)
2539 if (be_find_vfs(adapter,
ASSIGNED)) {
2540 dev_warn(&adapter->
pdev->dev,
"VFs are assigned to VMs\n");
2559 static int be_clear(
struct be_adapter *adapter)
2569 be_vf_clear(adapter);
2577 be_mcc_queues_destroy(adapter);
2578 be_rx_cqs_destroy(adapter);
2579 be_tx_queues_destroy(adapter);
2580 be_evt_queues_destroy(adapter);
2582 be_msix_disable(adapter);
2586 static int be_vf_setup_init(
struct be_adapter *adapter)
2591 adapter->
vf_cfg = kcalloc(adapter->
num_vfs,
sizeof(*vf_cfg),
2603 static int be_vf_setup(
struct be_adapter *adapter)
2607 u32 cap_flags, en_flags,
vf;
2608 u16 def_vlan, lnk_speed;
2611 enabled_vfs = be_find_vfs(adapter,
ENABLED);
2613 dev_warn(dev,
"%d VFs are already enabled\n", enabled_vfs);
2614 dev_warn(dev,
"Ignoring num_vfs=%d setting\n", num_vfs);
2619 dev_warn(dev,
"Device supports %d VFs and not %d\n",
2629 dev_warn(dev,
"SRIOV enable failed\n");
2633 status = be_vf_setup_init(adapter);
2647 status = be_vf_eth_addr_config(adapter);
2657 vf_cfg->
tx_rate = lnk_speed * 10;
2670 static void be_setup_init(
struct be_adapter *adapter)
2673 adapter->
phy.link_speed = -1;
2680 static int be_get_mac_addr(
struct be_adapter *adapter,
u8 *mac,
u32 if_handle,
2681 bool *active_mac,
u32 *pmac_id)
2685 if (!is_zero_ether_addr(adapter->
netdev->perm_addr)) {
2690 *active_mac =
false;
2697 active_mac, pmac_id, 0);
2700 if_handle, *pmac_id);
2705 *active_mac =
false;
2716 static int be_get_config(
struct be_adapter *adapter)
2732 static int be_setup(
struct be_adapter *adapter)
2735 u32 cap_flags, en_flags;
2741 be_setup_init(adapter);
2743 be_get_config(adapter);
2747 be_msix_enable(adapter);
2749 status = be_evt_queues_create(adapter);
2753 status = be_tx_cqs_create(adapter);
2757 status = be_rx_cqs_create(adapter);
2761 status = be_mcc_queues_create(adapter);
2779 cap_flags = en_flags;
2789 status = be_get_mac_addr(adapter, mac, adapter->
if_handle,
2790 &active_mac, &adapter->
pmac_id[0]);
2801 if (is_zero_ether_addr(adapter->
netdev->dev_addr)) {
2806 status = be_tx_qs_create(adapter);
2813 be_vid_config(adapter);
2815 be_set_rx_mode(adapter->
netdev);
2819 if (rx_fc != adapter->
rx_fc || tx_fc != adapter->
tx_fc)
2825 be_vf_setup(adapter);
2827 dev_warn(dev,
"device doesn't support SRIOV\n");
2832 adapter->
phy.fc_autoneg = 1;
2842 #ifdef CONFIG_NET_POLL_CONTROLLER
2843 static void be_netpoll(
struct net_device *netdev)
2845 struct be_adapter *adapter = netdev_priv(netdev);
2856 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2859 static bool be_flash_redboot(
struct be_adapter *adapter,
2867 crc_offset = hdr_size + img_start + image_size - 4;
2875 "could not get crc from flash, not flashing redboot\n");
2880 if (!
memcmp(flashed_crc, p, 4))
2886 static bool phy_flashing_required(
struct be_adapter *adapter)
2892 static bool is_comp_in_ufi(
struct be_adapter *adapter,
2895 int i = 0, img_type = 0;
2907 if (img_type == type)
2922 while (p < (fw->
data + fw->
size)) {
2924 if (!
memcmp(flash_cookie, fsec->
cookie,
sizeof(flash_cookie)))
2931 static int be_flash_data(
struct be_adapter *adapter,
2937 int status = 0,
i, filehdr_size = 0;
2938 int img_hdrs_size = (num_of_images *
sizeof(
struct image_hdr));
2990 pflashcomp = gen3_flash_types;
2994 pflashcomp = gen2_flash_types;
2999 fsec =
get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3002 "Invalid Cookie. UFI corrupted ?\n");
3005 for (i = 0; i < num_comp; i++) {
3006 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3014 if (!phy_flashing_required(adapter))
3018 hdr_size = filehdr_size +
3019 (num_of_images *
sizeof(
struct image_hdr));
3022 (!be_flash_redboot(adapter, fw->
data, pflashcomp[i].
offset,
3023 pflashcomp[i].
size, hdr_size)))
3028 p += filehdr_size + pflashcomp[
i].
offset + img_hdrs_size;
3029 if (p + pflashcomp[i].size > fw->
data + fw->
size)
3031 total_bytes = pflashcomp[
i].
size;
3032 while (total_bytes) {
3033 if (total_bytes > 32*1024)
3034 num_bytes = 32*1024;
3052 pflashcomp[i].optype, flash_op, num_bytes);
3055 (pflashcomp[i].optype ==
3059 "cmd to write to flash rom failed.\n");
3071 if (fhdr->
build[0] ==
'3')
3073 else if (fhdr->
build[0] ==
'2')
3079 static int lancer_wait_idle(
struct be_adapter *adapter)
3081 #define SLIPORT_IDLE_TIMEOUT 30
3093 if (i == SLIPORT_IDLE_TIMEOUT)
3099 static int lancer_fw_reset(
struct be_adapter *adapter)
3103 status = lancer_wait_idle(adapter);
3113 static int lancer_fw_download(
struct be_adapter *adapter,
3116 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3117 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3119 const u8 *data_ptr =
NULL;
3120 u8 *dest_image_ptr =
NULL;
3121 size_t image_size = 0;
3123 u32 data_written = 0;
3131 "FW Image not properly aligned. "
3132 "Length must be 4 byte aligned.\n");
3134 goto lancer_fw_exit;
3140 &flash_cmd.
dma, GFP_KERNEL);
3141 if (!flash_cmd.
va) {
3144 "Memory allocation failure while flashing\n");
3145 goto lancer_fw_exit;
3148 dest_image_ptr = flash_cmd.
va +
3150 image_size = fw->
size;
3151 data_ptr = fw->
data;
3153 while (image_size) {
3154 chunk_size =
min_t(
u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3157 memcpy(dest_image_ptr, data_ptr, chunk_size);
3161 LANCER_FW_DOWNLOAD_LOCATION,
3162 &data_written, &change_status,
3167 offset += data_written;
3168 data_ptr += data_written;
3169 image_size -= data_written;
3176 LANCER_FW_DOWNLOAD_LOCATION,
3177 &data_written, &change_status,
3185 "Firmware load error. "
3186 "Status code: 0x%x Additional Status: 0x%x\n",
3187 status, add_status);
3188 goto lancer_fw_exit;
3192 status = lancer_fw_reset(adapter);
3195 "Adapter busy for FW reset.\n"
3196 "New FW will not be active.\n");
3197 goto lancer_fw_exit;
3201 "System reboot required for new FW"
3205 dev_info(&adapter->
pdev->dev,
"Firmware flashed successfully\n");
3217 int status = 0, i = 0, num_imgs = 0;
3224 &flash_cmd.
dma, GFP_KERNEL);
3225 if (!flash_cmd.
va) {
3228 "Memory allocation failure while flashing\n");
3233 (get_ufigen_type(fhdr) ==
BE_GEN3)) {
3241 status = be_flash_data(adapter, fw, &flash_cmd,
3245 (get_ufigen_type(fhdr) ==
BE_GEN2)) {
3246 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3249 "UFI and Interface are not compatible for flashing\n");
3256 dev_err(&adapter->
pdev->dev,
"Firmware load error\n");
3260 dev_info(&adapter->
pdev->dev,
"Firmware flashed successfully\n");
3271 if (!netif_running(adapter->
netdev)) {
3273 "Firmware load not allowed (interface is down)\n");
3281 dev_info(&adapter->
pdev->dev,
"Flashing firmware file %s\n", fw_file);
3284 status = lancer_fw_download(adapter, fw);
3286 status = be_fw_download(adapter, fw);
3294 .ndo_open = be_open,
3295 .ndo_stop = be_close,
3296 .ndo_start_xmit = be_xmit,
3297 .ndo_set_rx_mode = be_set_rx_mode,
3298 .ndo_set_mac_address = be_mac_addr_set,
3299 .ndo_change_mtu = be_change_mtu,
3300 .ndo_get_stats64 = be_get_stats64,
3302 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3303 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
3304 .ndo_set_vf_mac = be_set_vf_mac,
3305 .ndo_set_vf_vlan = be_set_vf_vlan,
3306 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
3307 .ndo_get_vf_config = be_get_vf_config,
3308 #ifdef CONFIG_NET_POLL_CONTROLLER
3309 .ndo_poll_controller = be_netpoll,
3313 static void be_netdev_init(
struct net_device *netdev)
3315 struct be_adapter *adapter = netdev_priv(netdev);
3322 if (be_multi_rxq(adapter))
3335 netif_set_gso_max_size(netdev, 65535 -
ETH_HLEN);
3351 if (adapter->roce_db.base)
3352 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3355 static int lancer_roce_map_pci_bars(
struct be_adapter *adapter)
3360 addr = pci_iomap(pdev, 2, 0);
3371 static int be_map_pci_bars(
struct be_adapter *adapter)
3377 if (be_type_2_3(adapter)) {
3386 if (lancer_roce_map_pci_bars(adapter))
3422 be_unmap_pci_bars(adapter);
3426 static void be_ctrl_cleanup(
struct be_adapter *adapter)
3430 be_unmap_pci_bars(adapter);
3443 static int be_ctrl_init(
struct be_adapter *adapter)
3450 status = be_map_pci_bars(adapter);
3456 mbox_mem_alloc->
size,
3457 &mbox_mem_alloc->
dma,
3459 if (!mbox_mem_alloc->
va) {
3461 goto unmap_pci_bars;
3470 &rx_filter->
dma, GFP_KERNEL);
3471 if (rx_filter->
va ==
NULL) {
3479 sizeof(*adapter->
pmac_id), GFP_KERNEL);
3493 mbox_mem_alloc->
va, mbox_mem_alloc->
dma);
3496 be_unmap_pci_bars(adapter);
3502 static void be_stats_cleanup(
struct be_adapter *adapter)
3511 static int be_stats_init(
struct be_adapter *adapter)
3533 struct be_adapter *adapter = pci_get_drvdata(pdev);
3549 be_stats_cleanup(adapter);
3551 be_ctrl_cleanup(adapter);
3555 pci_set_drvdata(pdev,
NULL);
3565 !be_is_wol_excluded(adapter)) ?
true :
false;
3581 if (!extfat_cmd.
va) {
3582 dev_err(&adapter->
pdev->dev,
"%s: Memory allocation failure\n",
3593 level = cfgs->
module[0].trace_lvl[
j].dbg_lvl;
3601 static int be_get_initial_config(
struct be_adapter *adapter)
3629 if (!be_is_wol_excluded(adapter))
3634 adapter->
wol =
true;
3645 static int be_dev_type_check(
struct be_adapter *adapter)
3648 u32 sli_intf = 0, if_type;
3667 !be_type_2_3(adapter)) {
3668 dev_err(&pdev->
dev,
"SLI_INTF reg val is not valid\n");
3678 dev_err(&pdev->
dev,
"SLI_INTF reg val is not valid\n");
3694 static int lancer_recover_func(
struct be_adapter *adapter)
3702 if (netif_running(adapter->
netdev))
3703 be_close(adapter->
netdev);
3710 status = be_setup(adapter);
3714 if (netif_running(adapter->
netdev)) {
3715 status = be_open(adapter->
netdev);
3721 "Adapter SLIPORT recovery succeeded\n");
3725 "Adapter SLIPORT recovery failed\n");
3747 status = lancer_recover_func(adapter);
3768 if (!netif_running(adapter->
netdev)) {
3789 be_post_rx_frags(rxo, GFP_KERNEL);
3794 be_eqd_update(adapter, eqo);
3797 adapter->work_counter++;
3803 return be_find_vfs(adapter,
ENABLED) > 0 ?
false :
true;
3806 static char *mc_name(
struct be_adapter *adapter)
3818 static inline char *func_name(
struct be_adapter *adapter)
3820 return be_physfn(adapter) ?
"PF" :
"VF";
3841 if (netdev ==
NULL) {
3845 adapter = netdev_priv(netdev);
3846 adapter->
pdev = pdev;
3847 pci_set_drvdata(pdev, adapter);
3849 status = be_dev_type_check(adapter);
3853 adapter->
netdev = netdev;
3862 dev_err(&pdev->
dev,
"Could not set PCI DMA Mask\n");
3869 dev_err(&pdev->
dev,
"Could not use PCIe error reporting\n");
3871 status = be_ctrl_init(adapter);
3887 if (be_reset_required(adapter)) {
3897 be_intr_set(adapter,
false);
3899 status = be_stats_init(adapter);
3903 status = be_get_initial_config(adapter);
3911 status = be_setup(adapter);
3915 be_netdev_init(netdev);
3927 dev_info(&pdev->
dev,
"%s: %s %s port %c\n", nic_name(pdev),
3928 func_name(adapter), mc_name(adapter), port_name);
3935 be_stats_cleanup(adapter);
3937 be_ctrl_cleanup(adapter);
3940 pci_set_drvdata(pdev,
NULL);
3946 dev_err(&pdev->
dev,
"%s initialization failed\n", nic_name(pdev));
3952 struct be_adapter *adapter = pci_get_drvdata(pdev);
3956 be_setup_wol(adapter,
true);
3961 if (netif_running(netdev)) {
3974 static int be_resume(
struct pci_dev *pdev)
3977 struct be_adapter *adapter = pci_get_drvdata(pdev);
3995 if (netif_running(netdev)) {
4006 be_setup_wol(adapter,
false);
4014 static void be_shutdown(
struct pci_dev *pdev)
4016 struct be_adapter *adapter = pci_get_drvdata(pdev);
4027 be_setup_wol(adapter,
true);
4037 struct be_adapter *adapter = pci_get_drvdata(pdev);
4040 dev_err(&adapter->
pdev->dev,
"EEH error detected\n");
4050 if (netif_running(netdev)) {
4072 struct be_adapter *adapter = pci_get_drvdata(pdev);
4076 be_clear_all_error(adapter);
4095 static void be_eeh_resume(
struct pci_dev *pdev)
4098 struct be_adapter *adapter = pci_get_drvdata(pdev);
4114 status = be_setup(adapter);
4118 if (netif_running(netdev)) {
4119 status = be_open(netdev);
4129 dev_err(&adapter->
pdev->dev,
"EEH resume failed\n");
4133 .error_detected = be_eeh_err_detected,
4134 .slot_reset = be_eeh_reset,
4135 .resume = be_eeh_resume,
4140 .id_table = be_dev_ids,
4142 .remove = be_remove,
4143 .suspend = be_suspend,
4144 .resume = be_resume,
4145 .shutdown = be_shutdown,
4146 .err_handler = &be_eeh_handlers
4149 static int __init be_init_module(
void)
4151 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4152 rx_frag_size != 2048) {
4154 " : Module param rx_frag_size must be 2048/4096/8192."
4156 rx_frag_size = 2048;
4159 return pci_register_driver(&be_driver);
4163 static void __exit be_exit_module(
void)