8 #include <linux/kernel.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
24 #include <linux/errno.h>
28 #include <linux/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
42 #include <linux/prefetch.h>
55 static const u32 default_msg =
67 static int debug = -1;
78 static int qlge_mpi_coredump;
81 "Option to enable MPI firmware dump. "
82 "Default is OFF - Do Not allocate memory. ");
84 static int qlge_force_coredump;
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
106 static int ql_sem_trylock(
struct ql_adapter *qdev,
u32 sem_mask)
140 ql_write32(qdev,
SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev,
SEM) & sem_bits);
146 unsigned int wait_count = 30;
148 if (!ql_sem_trylock(qdev, sem_mask))
151 }
while (--wait_count);
157 ql_write32(qdev,
SEM, sem_mask);
158 ql_read32(qdev,
SEM);
172 temp = ql_read32(qdev, reg);
175 if (temp & err_bit) {
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
180 }
else if (temp & bit)
186 "Timed out waiting for reg %x to come ready.\n", reg);
199 temp = ql_read32(qdev,
CFG);
227 map = pci_map_single(qdev->
pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->
pdev, map)) {
229 netif_err(qdev, ifup, qdev->
ndev,
"Couldn't map DMA area.\n");
237 status = ql_wait_cfg(qdev, bit);
240 "Timed out waiting for CFG to come ready.\n");
245 ql_write32(qdev,
ICB_H, (
u32) (map >> 32));
249 ql_write32(qdev,
CFG, (mask | value));
254 status = ql_wait_cfg(qdev, bit);
257 pci_unmap_single(qdev->
pdev, map, size, direction);
322 "Address type %d not yet supported.\n", type);
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
374 u32 upper = (addr[0] << 8) | addr[1];
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
420 u32 enable_bit = *((
u32 *) &addr[0]);
440 "Address type %d not yet supported.\n", type);
451 static int ql_set_mac_addr(
struct ql_adapter *qdev,
int set)
460 "Set Mac addr %pM\n", addr);
463 addr = &zero_mac_addr[0];
465 "Clearing MAC address\n");
470 status = ql_set_mac_addr_reg(qdev, (
u8 *) addr,
475 "Failed to init mac address.\n");
483 ql_set_mac_addr(qdev, 1);
490 ql_set_mac_addr(qdev, 0);
509 *value = ql_read32(qdev,
RT_DATA);
600 "Mask type %d not yet supported.\n", mask);
610 ql_write32(qdev,
RT_IDX, value);
611 ql_write32(qdev,
RT_DATA, enable ? mask : 0);
617 static void ql_enable_interrupts(
struct ql_adapter *qdev)
622 static void ql_disable_interrupts(
struct ql_adapter *qdev)
645 var = ql_read32(qdev,
STS);
653 var = ql_read32(qdev,
STS);
655 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
675 var = ql_read32(qdev,
STS);
682 static void ql_enable_all_completion_interrupts(
struct ql_adapter *qdev)
706 netif_err(qdev, ifup, qdev->
ndev,
"Invalid flash signature.\n");
710 for (i = 0; i <
size; i++)
715 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
744 static int ql_get_8000_flash_params(
struct ql_adapter *qdev)
764 for (i = 0; i <
size; i++, p++) {
765 status = ql_read_flash_word(qdev, i+offset, p);
768 "Error reading flash.\n");
773 status = ql_validate_flash(qdev,
788 qdev->
ndev->addr_len);
792 qdev->
ndev->addr_len);
794 if (!is_valid_ether_addr(
mac_addr)) {
802 qdev->
ndev->addr_len);
809 static int ql_get_8012_flash_params(
struct ql_adapter *qdev)
826 for (i = 0; i <
size; i++, p++) {
827 status = ql_read_flash_word(qdev, i+offset, p);
830 "Error reading flash.\n");
836 status = ql_validate_flash(qdev,
852 qdev->
ndev->addr_len);
918 *data = (
u64) lo | ((
u64) hi << 32);
924 static int ql_8000_port_initialize(
struct ql_adapter *qdev)
949 static int ql_8012_port_initialize(
struct ql_adapter *qdev)
959 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
963 "Port initialize timed out.\n");
974 status = ql_write_xgmac_reg(qdev,
GLOBAL_CFG, data);
983 status = ql_write_xgmac_reg(qdev,
GLOBAL_CFG, data);
993 status = ql_write_xgmac_reg(qdev,
TX_CFG, data);
1003 status = ql_write_xgmac_reg(qdev,
RX_CFG, data);
1024 static inline unsigned int ql_lbq_block_size(
struct ql_adapter *qdev)
1041 struct rx_ring *rx_ring)
1043 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1045 pci_dma_sync_single_for_cpu(qdev->
pdev,
1054 == ql_lbq_block_size(qdev))
1055 pci_unmap_page(qdev->
pdev,
1057 ql_lbq_block_size(qdev),
1063 static struct bq_desc *ql_get_curr_sbuf(
struct rx_ring *rx_ring)
1074 static void ql_update_cq(
struct rx_ring *rx_ring)
1084 static void ql_write_cq_idx(
struct rx_ring *rx_ring)
1089 static int ql_get_next_chunk(
struct ql_adapter *qdev,
struct rx_ring *rx_ring,
1099 "page allocation failed.\n");
1104 0, ql_lbq_block_size(qdev),
1106 if (pci_dma_mapping_error(qdev->
pdev, map)) {
1110 "PCI mapping failed.\n");
1126 if (rx_ring->
pg_chunk.offset == ql_lbq_block_size(qdev)) {
1137 static void ql_update_lbq(
struct ql_adapter *qdev,
struct rx_ring *rx_ring)
1140 u32 start_idx = clean_idx;
1148 "lbq: try cleaning clean_idx = %d.\n",
1150 lbq_desc = &rx_ring->
lbq[clean_idx];
1151 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1154 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1166 pci_dma_sync_single_for_device(qdev->
pdev, map,
1170 if (clean_idx == rx_ring->
lbq_len)
1181 if (start_idx != clean_idx) {
1183 "lbq: updating prod idx = %d.\n",
1191 static void ql_update_sbq(
struct ql_adapter *qdev,
struct rx_ring *rx_ring)
1194 u32 start_idx = clean_idx;
1201 sbq_desc = &rx_ring->
sbq[clean_idx];
1203 "sbq: try cleaning clean_idx = %d.\n",
1208 "sbq: getting new skb for index %d.\n",
1211 netdev_alloc_skb(qdev->
ndev,
1215 "Couldn't get an skb.\n");
1220 map = pci_map_single(qdev->
pdev,
1221 sbq_desc->
p.
skb->data,
1224 if (pci_dma_mapping_error(qdev->
pdev, map)) {
1226 "PCI mapping failed.\n");
1239 if (clean_idx == rx_ring->
sbq_len)
1249 if (start_idx != clean_idx) {
1251 "sbq: updating prod idx = %d.\n",
1258 static void ql_update_buffer_queues(
struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1268 static void ql_unmap_send(
struct ql_adapter *qdev,
1272 for (i = 0; i <
mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1286 "unmapping OAL area.\n");
1288 pci_unmap_single(qdev->
pdev,
1296 "unmapping frag %d.\n", i);
1297 pci_unmap_page(qdev->
pdev,
1310 static int ql_map_send(
struct ql_adapter *qdev,
1312 struct sk_buff *
skb,
struct tx_ring_desc *tx_ring_desc)
1314 int len = skb_headlen(skb);
1316 int frag_idx,
err, map_idx = 0;
1318 int frag_cnt = skb_shinfo(skb)->nr_frags;
1322 "frag_cnt = %d.\n", frag_cnt);
1329 err = pci_dma_mapping_error(qdev->
pdev, map);
1332 "PCI mapping failed with error: %d\n", err);
1350 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1353 if (frag_idx == 6 && frag_cnt > 7) {
1373 map = pci_map_single(qdev->
pdev, &tx_ring_desc->
oal,
1376 err = pci_dma_mapping_error(qdev->
pdev, map);
1379 "PCI mapping outbound address list with error: %d\n",
1396 sizeof(
struct oal));
1401 map = skb_frag_dma_map(&qdev->
pdev->dev, frag, 0, skb_frag_size(frag),
1407 "PCI mapping frags failed with error: %d.\n",
1416 skb_frag_size(frag));
1420 tx_ring_desc->
map_cnt = map_idx;
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1437 static void ql_categorize_rx_err(
struct ql_adapter *qdev,
u8 rx_err)
1467 static void ql_process_mac_rx_gro_page(
struct ql_adapter *qdev,
1468 struct rx_ring *rx_ring,
1474 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1482 "Couldn't get an skb, exiting.\n");
1488 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1496 skb_shinfo(skb)->nr_frags++;
1501 skb_record_rx_queue(skb, rx_ring->
cq_id);
1502 if (vlan_id != 0xffff)
1503 __vlan_hwaccel_put_tag(skb, vlan_id);
1508 static void ql_process_mac_rx_page(
struct ql_adapter *qdev,
1509 struct rx_ring *rx_ring,
1517 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1520 skb = netdev_alloc_skb(ndev, length);
1523 "Couldn't get an skb, need to unwind!.\n");
1537 "Segment too small, dropping.\n");
1543 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1545 skb_fill_page_desc(skb, 0, lbq_desc->
p.
pg_chunk.page,
1555 skb_checksum_none_assert(skb);
1562 "TCP checksum done!\n");
1574 "UDP checksum done!\n");
1579 skb_record_rx_queue(skb, rx_ring->
cq_id);
1580 if (vlan_id != 0xffff)
1581 __vlan_hwaccel_put_tag(skb, vlan_id);
1593 static void ql_process_mac_rx_skb(
struct ql_adapter *qdev,
1594 struct rx_ring *rx_ring,
1602 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1604 skb = sbq_desc->
p.
skb;
1607 if (new_skb ==
NULL) {
1609 "No skb available, drop the packet.\n");
1627 if (skb->
len > ndev->
mtu + ETH_HLEN) {
1646 "Promiscuous Packet.\n");
1651 skb_checksum_none_assert(skb);
1661 "TCP checksum done!\n");
1672 "UDP checksum done!\n");
1677 skb_record_rx_queue(skb, rx_ring->
cq_id);
1678 if (vlan_id != 0xffff)
1679 __vlan_hwaccel_put_tag(skb, vlan_id);
1686 static void ql_realign_skb(
struct sk_buff *skb,
int len)
1688 void *temp_addr = skb->
data;
1696 skb_copy_to_linear_data(skb, temp_addr,
1706 struct rx_ring *rx_ring,
1721 "Header of %d bytes in small buffer.\n", hdr_len);
1725 sbq_desc = ql_get_curr_sbuf(rx_ring);
1726 pci_unmap_single(qdev->
pdev,
1730 skb = sbq_desc->
p.
skb;
1731 ql_realign_skb(skb, hdr_len);
1741 "No Data buffer in this packet.\n");
1748 "Headers in small, data of %d bytes in small, combine them.\n",
1757 sbq_desc = ql_get_curr_sbuf(rx_ring);
1758 pci_dma_sync_single_for_cpu(qdev->
pdev,
1760 (sbq_desc, mapaddr),
1765 sbq_desc->
p.
skb->data, length);
1766 pci_dma_sync_single_for_device(qdev->
pdev,
1776 "%d bytes in a single small buffer.\n",
1778 sbq_desc = ql_get_curr_sbuf(rx_ring);
1779 skb = sbq_desc->
p.
skb;
1780 ql_realign_skb(skb, length);
1782 pci_unmap_single(qdev->
pdev,
1793 "Header in small, %d bytes in large. Chain large to small!\n",
1800 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1802 "Chaining page at offset = %d, for %d bytes to skb.\n",
1804 skb_fill_page_desc(skb, 0, lbq_desc->
p.
pg_chunk.page,
1816 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1817 skb = netdev_alloc_skb(qdev->
ndev, length);
1820 "No skb available, drop the packet.\n");
1823 pci_unmap_page(qdev->
pdev,
1828 skb_reserve(skb, NET_IP_ALIGN);
1830 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1832 skb_fill_page_desc(skb, 0,
1857 sbq_desc = ql_get_curr_sbuf(rx_ring);
1858 pci_unmap_single(qdev->
pdev,
1873 "%d bytes of headers & data in chain of large.\n",
1875 skb = sbq_desc->
p.
skb;
1877 skb_reserve(skb, NET_IP_ALIGN);
1879 while (length > 0) {
1880 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1885 "Adding page %d to skb for %d bytes.\n",
1887 skb_fill_page_desc(skb, i,
1904 static void ql_process_mac_split_rx_intr(
struct ql_adapter *qdev,
1905 struct rx_ring *rx_ring,
1914 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1917 "No skb available, drop packet.\n");
1925 if (skb->
len > ndev->
mtu + ETH_HLEN) {
1951 "Promiscuous Packet.\n");
1955 skb_checksum_none_assert(skb);
1965 "TCP checksum done!\n");
1975 "TCP checksum done!\n");
1982 skb_record_rx_queue(skb, rx_ring->
cq_id);
1984 __vlan_hwaccel_put_tag(skb, vlan_id);
1992 static unsigned long ql_process_mac_rx_intr(
struct ql_adapter *qdev,
1993 struct rx_ring *rx_ring,
2005 ql_categorize_rx_err(qdev, ib_mac_rsp->
flags2);
2006 return (
unsigned long)
length;
2013 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2020 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2028 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2034 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2040 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2044 return (
unsigned long)
length;
2048 static void ql_process_mac_tx_intr(
struct ql_adapter *qdev,
2052 struct tx_ring_desc *tx_ring_desc;
2056 tx_ring_desc = &tx_ring->
q[mac_rsp->
tid];
2057 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->
map_cnt);
2060 dev_kfree_skb(tx_ring_desc->
skb);
2069 "Total descriptor length did not match transfer length.\n");
2073 "Frame too short to be valid, not sent.\n");
2077 "Frame too long, but sent anyway.\n");
2081 "PCI backplane error. Frame not sent.\n");
2097 ql_disable_interrupts(qdev);
2110 static void ql_process_chip_ae_intr(
struct ql_adapter *qdev,
2113 switch (ib_ae_rsp->
event) {
2116 "Management Processor Fatal Error.\n");
2121 netdev_err(qdev->
ndev,
"Multiple CAM hits lookup occurred.\n");
2122 netdev_err(qdev->
ndev,
"This event shouldn't occur.\n");
2127 netdev_err(qdev->
ndev,
"Soft ECC error detected.\n");
2132 netdev_err(qdev->
ndev,
"PCI error occurred when reading "
2133 "anonymous buffers from rx_ring %d.\n",
2146 static int ql_clean_outbound_rx_ring(
struct rx_ring *rx_ring)
2158 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2163 switch (net_rsp->
opcode) {
2167 ql_process_mac_tx_intr(qdev, net_rsp);
2171 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2175 ql_update_cq(rx_ring);
2180 ql_write_cq_idx(rx_ring);
2182 if (__netif_subqueue_stopped(qdev->
ndev, tx_ring->
wq_id)) {
2188 netif_wake_subqueue(qdev->
ndev, tx_ring->
wq_id);
2194 static int ql_clean_inbound_rx_ring(
struct rx_ring *rx_ring,
int budget)
2205 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2210 switch (net_rsp->
opcode) {
2212 ql_process_mac_rx_intr(qdev, rx_ring,
2223 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2228 ql_update_cq(rx_ring);
2230 if (count == budget)
2233 ql_update_buffer_queues(qdev, rx_ring);
2234 ql_write_cq_idx(rx_ring);
2238 static int ql_napi_poll_msix(
struct napi_struct *napi,
int budget)
2240 struct rx_ring *rx_ring =
container_of(napi,
struct rx_ring, napi);
2242 struct rx_ring *trx_ring;
2243 int i, work_done = 0;
2247 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->
cq_id);
2260 "%s: Servicing TX completion ring %d.\n",
2261 __func__, trx_ring->
cq_id);
2262 ql_clean_outbound_rx_ring(trx_ring);
2272 "%s: Servicing RX completion ring %d.\n",
2273 __func__, rx_ring->
cq_id);
2274 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2277 if (work_done < budget) {
2311 static int qlge_set_features(
struct net_device *ndev,
2317 qlge_vlan_mode(ndev, features);
2327 err = ql_set_mac_addr_reg(qdev, (
u8 *) &enable_bit,
2331 "Failed to init vlan address.\n");
2335 static int qlge_vlan_rx_add_vid(
struct net_device *ndev,
u16 vid)
2345 err = __qlge_vlan_rx_add_vid(qdev, vid);
2353 static int __qlge_vlan_rx_kill_vid(
struct ql_adapter *qdev,
u16 vid)
2358 err = ql_set_mac_addr_reg(qdev, (
u8 *) &enable_bit,
2362 "Failed to clear vlan address.\n");
2366 static int qlge_vlan_rx_kill_vid(
struct net_device *ndev,
u16 vid)
2376 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2384 static void qlge_restore_vlan(
struct ql_adapter *qdev)
2394 __qlge_vlan_rx_add_vid(qdev, vid);
2402 struct rx_ring *rx_ring =
dev_id;
2403 napi_schedule(&rx_ring->
napi);
2414 struct rx_ring *rx_ring =
dev_id;
2423 "Shared Interrupt, Not ours!\n");
2429 var = ql_disable_completion_interrupt(qdev, intr_context->
intr);
2436 netdev_err(qdev->
ndev,
"Got fatal error, STS = %x.\n", var);
2437 var = ql_read32(qdev,
ERR_STS);
2438 netdev_err(qdev->
ndev,
"Resetting chip. "
2439 "Error Status Register = 0x%x\n", var);
2453 "Got MPI processor interrupt.\n");
2454 ql_disable_completion_interrupt(qdev, intr_context->
intr);
2455 ql_write32(qdev,
INTR_MASK, (INTR_MASK_PI << 16));
2466 var = ql_read32(qdev,
ISR1);
2467 if (var & intr_context->
irq_mask) {
2469 "Waking handler for rx_ring[0].\n");
2470 ql_disable_completion_interrupt(qdev, intr_context->
intr);
2471 napi_schedule(&rx_ring->
napi);
2481 if (skb_is_gso(skb)) {
2483 if (skb_header_cloned(skb)) {
2493 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2496 skb_transport_offset(skb)
2501 struct iphdr *iph = ip_hdr(skb);
2510 tcp_hdr(skb)->check =
2512 &ipv6_hdr(skb)->
daddr,
2520 static void ql_hw_csum_setup(
struct sk_buff *skb,
2524 struct iphdr *iph = ip_hdr(skb);
2535 check = &(tcp_hdr(skb)->check);
2539 (tcp_hdr(skb)->doff << 2));
2541 check = &(udp_hdr(skb)->check);
2553 struct tx_ring_desc *tx_ring_desc;
2560 tx_ring = &qdev->
tx_ring[tx_ring_idx];
2567 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2568 __func__, tx_ring_idx);
2569 netif_stop_subqueue(ndev, tx_ring->
wq_id);
2573 tx_ring_desc = &tx_ring->
q[tx_ring->
prod_idx];
2575 memset((
void *)mac_iocb_ptr, 0,
sizeof(*mac_iocb_ptr));
2578 mac_iocb_ptr->
tid = tx_ring_desc->
index;
2582 mac_iocb_ptr->
txq_idx = tx_ring_idx;
2598 ql_hw_csum_setup(skb,
2601 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2604 "Could not map the segments.\n");
2616 "tx queued, slot %d, len %d\n",
2622 netif_stop_subqueue(ndev, tx_ring->
wq_id);
2628 netif_wake_subqueue(qdev->
ndev, tx_ring->
wq_id);
2634 static void ql_free_shadow_space(
struct ql_adapter *qdev)
2652 static int ql_alloc_shadow_space(
struct ql_adapter *qdev)
2659 "Allocation of RX shadow space failed.\n");
2668 "Allocation of TX shadow space failed.\n");
2669 goto err_wqp_sh_area;
2682 static void ql_init_tx_ring(
struct ql_adapter *qdev,
struct tx_ring *tx_ring)
2684 struct tx_ring_desc *tx_ring_desc;
2688 mac_iocb_ptr = tx_ring->
wq_base;
2689 tx_ring_desc = tx_ring->
q;
2690 for (i = 0; i < tx_ring->
wq_len; i++) {
2700 static void ql_free_tx_resources(
struct ql_adapter *qdev,
2701 struct tx_ring *tx_ring)
2712 static int ql_alloc_tx_resources(
struct ql_adapter *qdev,
2713 struct tx_ring *tx_ring)
2725 if (tx_ring->
q ==
NULL)
2734 netif_err(qdev, ifup, qdev->
ndev,
"tx_ring alloc failed.\n");
2738 static void ql_free_lbq_buffers(
struct ql_adapter *qdev,
struct rx_ring *rx_ring)
2746 while (curr_idx != clean_idx) {
2747 lbq_desc = &rx_ring->
lbq[curr_idx];
2750 pci_unmap_page(qdev->
pdev,
2752 ql_lbq_block_size(qdev),
2760 if (++curr_idx == rx_ring->
lbq_len)
2766 static void ql_free_sbq_buffers(
struct ql_adapter *qdev,
struct rx_ring *rx_ring)
2771 for (i = 0; i < rx_ring->
sbq_len; i++) {
2772 sbq_desc = &rx_ring->
sbq[
i];
2773 if (sbq_desc ==
NULL) {
2775 "sbq_desc %d is NULL.\n", i);
2778 if (sbq_desc->
p.
skb) {
2779 pci_unmap_single(qdev->
pdev,
2783 dev_kfree_skb(sbq_desc->
p.
skb);
2792 static void ql_free_rx_buffers(
struct ql_adapter *qdev)
2800 ql_free_lbq_buffers(qdev, rx_ring);
2802 ql_free_sbq_buffers(qdev, rx_ring);
2806 static void ql_alloc_rx_buffers(
struct ql_adapter *qdev)
2814 ql_update_buffer_queues(qdev, rx_ring);
2818 static void ql_init_lbq_ring(
struct ql_adapter *qdev,
2819 struct rx_ring *rx_ring)
2826 for (i = 0; i < rx_ring->
lbq_len; i++) {
2827 lbq_desc = &rx_ring->
lbq[
i];
2828 memset(lbq_desc, 0,
sizeof(*lbq_desc));
2830 lbq_desc->
addr = bq;
2835 static void ql_init_sbq_ring(
struct ql_adapter *qdev,
2836 struct rx_ring *rx_ring)
2843 for (i = 0; i < rx_ring->
sbq_len; i++) {
2844 sbq_desc = &rx_ring->
sbq[
i];
2845 memset(sbq_desc, 0,
sizeof(*sbq_desc));
2847 sbq_desc->
addr = bq;
2852 static void ql_free_rx_resources(
struct ql_adapter *qdev,
2853 struct rx_ring *rx_ring)
2890 static int ql_alloc_rx_resources(
struct ql_adapter *qdev,
2891 struct rx_ring *rx_ring)
2902 netif_err(qdev, ifup, qdev->
ndev,
"rx_ring alloc failed.\n");
2916 "Small buffer queue allocation failed.\n");
2928 "Small buffer queue control block allocation failed.\n");
2932 ql_init_sbq_ring(qdev, rx_ring);
2945 "Large buffer queue allocation failed.\n");
2956 "Large buffer queue control block allocation failed.\n");
2960 ql_init_lbq_ring(qdev, rx_ring);
2966 ql_free_rx_resources(qdev, rx_ring);
2970 static void ql_tx_ring_clean(
struct ql_adapter *qdev)
2973 struct tx_ring_desc *tx_ring_desc;
2982 for (i = 0; i < tx_ring->
wq_len; i++) {
2983 tx_ring_desc = &tx_ring->
q[
i];
2984 if (tx_ring_desc && tx_ring_desc->
skb) {
2986 "Freeing lost SKB %p, from queue %d, index %d.\n",
2987 tx_ring_desc->
skb, j,
2988 tx_ring_desc->
index);
2989 ql_unmap_send(qdev, tx_ring_desc,
2991 dev_kfree_skb(tx_ring_desc->
skb);
2998 static void ql_free_mem_resources(
struct ql_adapter *qdev)
3003 ql_free_tx_resources(qdev, &qdev->
tx_ring[i]);
3005 ql_free_rx_resources(qdev, &qdev->
rx_ring[i]);
3006 ql_free_shadow_space(qdev);
3009 static int ql_alloc_mem_resources(
struct ql_adapter *qdev)
3014 if (ql_alloc_shadow_space(qdev))
3018 if (ql_alloc_rx_resources(qdev, &qdev->
rx_ring[i]) != 0) {
3020 "RX resource allocation failed.\n");
3026 if (ql_alloc_tx_resources(qdev, &qdev->
tx_ring[i]) != 0) {
3028 "TX resource allocation failed.\n");
3035 ql_free_mem_resources(qdev);
3043 static int ql_start_rx_ring(
struct ql_adapter *qdev,
struct rx_ring *rx_ring)
3055 __le64 *base_indirect_ptr;
3062 shadow_reg +=
sizeof(
u64);
3063 shadow_reg_dma +=
sizeof(
u64);
3085 memset((
void *)cqicb, 0,
sizeof(
struct cqicb));
3109 base_indirect_ptr++;
3117 bq_len = (rx_ring->
lbq_len == 65536) ? 0 :
3133 base_indirect_ptr++;
3140 bq_len = (rx_ring->
sbq_len == 65536) ? 0 :
3148 switch (rx_ring->
type) {
3164 "Invalid rx_ring->type = %d.\n", rx_ring->
type);
3169 netif_err(qdev, ifup, qdev->
ndev,
"Failed to load CQICB.\n");
3175 static int ql_start_tx_ring(
struct ql_adapter *qdev,
struct tx_ring *tx_ring)
3177 struct wqicb *
wqicb = (
struct wqicb *)tx_ring;
3210 ql_init_tx_ring(qdev, tx_ring);
3215 netif_err(qdev, ifup, qdev->
ndev,
"Failed to load tx_ring.\n");
3221 static void ql_disable_msix(
struct ql_adapter *qdev)
3238 static void ql_enable_msix(
struct ql_adapter *qdev)
3248 sizeof(
struct msix_entry),
3272 "MSI-X Enable failed, trying MSI.\n");
3275 }
else if (err == 0) {
3278 "MSI-X Enabled, got %d vectors.\n",
3285 if (qlge_irq_type ==
MSI_IRQ) {
3286 if (!pci_enable_msi(qdev->
pdev)) {
3289 "Running with MSI interrupts.\n");
3295 "Running with legacy interrupts.\n");
3307 static void ql_set_tx_vect(
struct ql_adapter *qdev)
3315 i < qdev->rx_ring_count; i++) {
3316 if (j == tx_rings_per_vector) {
3337 static void ql_set_irq_mask(
struct ql_adapter *qdev,
struct intr_context *ctx)
3339 int j, vect = ctx->
intr;
3349 for (j = 0; j < tx_rings_per_vector; j++) {
3352 (vect * tx_rings_per_vector) + j].cq_id);
3369 static void ql_resolve_queues_to_irqs(
struct ql_adapter *qdev)
3372 struct intr_context *intr_context = &qdev->
intr_context[0];
3379 for (i = 0; i < qdev->
intr_count; i++, intr_context++) {
3381 intr_context->
intr =
i;
3386 ql_set_irq_mask(qdev, intr_context);
3409 intr_context->
handler = qlge_isr;
3411 qdev->
ndev->name, i);
3416 intr_context->
handler = qlge_msix_rx_isr;
3418 qdev->
ndev->name, i);
3426 intr_context->
intr = 0;
3442 intr_context->
handler = qlge_isr;
3449 ql_set_irq_mask(qdev, intr_context);
3454 ql_set_tx_vect(qdev);
3457 static void ql_free_irq(
struct ql_adapter *qdev)
3460 struct intr_context *intr_context = &qdev->
intr_context[0];
3462 for (i = 0; i < qdev->
intr_count; i++, intr_context++) {
3463 if (intr_context->
hooked) {
3472 ql_disable_msix(qdev);
3475 static int ql_request_irq(
struct ql_adapter *qdev)
3480 struct intr_context *intr_context = &qdev->
intr_context[0];
3482 ql_resolve_queues_to_irqs(qdev);
3484 for (i = 0; i < qdev->
intr_count; i++, intr_context++) {
3494 "Failed request for MSIX interrupt %d.\n",
3500 "trying msi or legacy interrupts.\n");
3502 "%s: irq = %d.\n", __func__, pdev->
irq);
3504 "%s: context->name = %s.\n", __func__,
3505 intr_context->
name);
3507 "%s: dev_id = 0x%p.\n", __func__,
3519 "Hooked intr %d, queue type %s, with name %s.\n",
3525 intr_context->
name);
3527 intr_context->
hooked = 1;
3531 netif_err(qdev, ifup, qdev->
ndev,
"Failed to get the interrupts!!!/n");
3536 static int ql_start_rss(
struct ql_adapter *qdev)
3538 static const u8 init_hash_seed[] = {
3539 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3540 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3541 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3542 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3543 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3550 memset((
void *)ricb, 0,
sizeof(*ricb));
3560 for (i = 0; i < 1024; i++)
3568 netif_err(qdev, ifup, qdev->
ndev,
"Failed to load RICB.\n");
3574 static int ql_clear_routing_entries(
struct ql_adapter *qdev)
3582 for (i = 0; i < 16; i++) {
3583 status = ql_set_routing_reg(qdev, i, 0, 0);
3586 "Failed to init routing register for CAM packets.\n");
3595 static int ql_route_initialize(
struct ql_adapter *qdev)
3600 status = ql_clear_routing_entries(qdev);
3612 "Failed to init routing register "
3613 "for IP CSUM error packets.\n");
3620 "Failed to init routing register "
3621 "for TCP/UDP CSUM error packets.\n");
3627 "Failed to init routing register for broadcast packets.\n");
3638 "Failed to init routing register for MATCH RSS packets.\n");
3647 "Failed to init routing register for CAM packets.\n");
3661 set = ql_read32(qdev,
STS);
3663 status = ql_set_mac_addr(qdev,
set);
3665 netif_err(qdev, ifup, qdev->
ndev,
"Failed to init mac address.\n");
3669 status = ql_route_initialize(qdev);
3671 netif_err(qdev, ifup, qdev->
ndev,
"Failed to init routing table.\n");
3676 static int ql_adapter_initialize(
struct ql_adapter *qdev)
3687 ql_write32(qdev,
SYS, mask | value);
3695 ql_write32(qdev,
INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3705 ql_write32(qdev,
FSC, mask | value);
3728 if (qdev->
pdev->subsystem_device == 0x0068 ||
3729 qdev->
pdev->subsystem_device == 0x0180)
3734 status = ql_start_rx_ring(qdev, &qdev->
rx_ring[i]);
3737 "Failed to start rx ring[%d].\n", i);
3746 status = ql_start_rss(qdev);
3748 netif_err(qdev, ifup, qdev->
ndev,
"Failed to start RSS.\n");
3755 status = ql_start_tx_ring(qdev, &qdev->
tx_ring[i]);
3758 "Failed to start tx ring[%d].\n", i);
3764 status = qdev->
nic_ops->port_initialize(qdev);
3766 netif_err(qdev, ifup, qdev->
ndev,
"Failed to start port.\n");
3772 "Failed to init CAM/Routing tables.\n");
3778 napi_enable(&qdev->
rx_ring[i].napi);
3784 static int ql_adapter_reset(
struct ql_adapter *qdev)
3788 unsigned long end_jiffies;
3791 status = ql_clear_routing_entries(qdev);
3793 netif_err(qdev, ifup, qdev->
ndev,
"Failed to clear routing bits.\n");
3815 value = ql_read32(qdev,
RST_FO);
3823 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3832 static void ql_display_dev_info(
struct net_device *ndev)
3837 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3838 "XG Roll = %d, XG Rev = %d.\n",
3846 "MAC address %pM\n", ndev->
dev_addr);
3864 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3873 "Failed to set magic packet on %s.\n",
3878 "Enabled magic packet successfully on %s.\n",
3888 "WOL %s (wol code 0x%x) on %s\n",
3889 (status == 0) ?
"Successfully set" :
"Failed",
3890 wol, qdev->
ndev->name);
3896 static void ql_cancel_all_work_sync(
struct ql_adapter *qdev)
3911 static int ql_adapter_down(
struct ql_adapter *qdev)
3917 ql_cancel_all_work_sync(qdev);
3920 napi_disable(&qdev->
rx_ring[i].napi);
3924 ql_disable_interrupts(qdev);
3926 ql_tx_ring_clean(qdev);
3933 status = ql_adapter_reset(qdev);
3935 netif_err(qdev, ifdown, qdev->
ndev,
"reset(func #%d) FAILED!\n",
3937 ql_free_rx_buffers(qdev);
3942 static int ql_adapter_up(
struct ql_adapter *qdev)
3946 err = ql_adapter_initialize(qdev);
3948 netif_info(qdev, ifup, qdev->
ndev,
"Unable to initialize adapter.\n");
3952 ql_alloc_rx_buffers(qdev);
3962 qlge_set_multicast_list(qdev->
ndev);
3965 qlge_restore_vlan(qdev);
3967 ql_enable_interrupts(qdev);
3968 ql_enable_all_completion_interrupts(qdev);
3969 netif_tx_start_all_queues(qdev->
ndev);
3973 ql_adapter_reset(qdev);
3977 static void ql_release_adapter_resources(
struct ql_adapter *qdev)
3979 ql_free_mem_resources(qdev);
3983 static int ql_get_adapter_resources(
struct ql_adapter *qdev)
3987 if (ql_alloc_mem_resources(qdev)) {
3988 netif_err(qdev, ifup, qdev->
ndev,
"Unable to allocate memory.\n");
3991 status = ql_request_irq(qdev);
3995 static int qlge_close(
struct net_device *ndev)
4004 netif_err(qdev, drv, qdev->
ndev,
"EEH fatal did unload.\n");
4015 ql_adapter_down(qdev);
4016 ql_release_adapter_resources(qdev);
4020 static int ql_configure_rings(
struct ql_adapter *qdev)
4026 unsigned int lbq_buf_len = (qdev->
ndev->mtu > 1500) ?
4039 ql_enable_msix(qdev);
4047 memset((
void *)tx_ring, 0,
sizeof(*tx_ring));
4063 memset((
void *)rx_ring, 0,
sizeof(*rx_ring));
4064 rx_ring->
qdev = qdev;
4067 if (i < qdev->rss_ring_count) {
4103 static int qlge_open(
struct net_device *ndev)
4108 err = ql_adapter_reset(qdev);
4112 err = ql_configure_rings(qdev);
4116 err = ql_get_adapter_resources(qdev);
4120 err = ql_adapter_up(qdev);
4127 ql_release_adapter_resources(qdev);
4131 static int ql_change_rx_buffers(
struct ql_adapter *qdev)
4142 "Waiting for adapter UP...\n");
4148 "Timed out waiting for adapter UP\n");
4153 status = ql_adapter_down(qdev);
4158 lbq_buf_len = (qdev->
ndev->mtu > 1500) ?
4168 status = ql_adapter_up(qdev);
4175 "Driver up/down cycle failed, closing device.\n");
4181 static int qlge_change_mtu(
struct net_device *ndev,
int new_mtu)
4186 if (ndev->
mtu == 1500 && new_mtu == 9000) {
4187 netif_err(qdev, ifup, qdev->
ndev,
"Changing to jumbo MTU.\n");
4188 }
else if (ndev->
mtu == 9000 && new_mtu == 1500) {
4189 netif_err(qdev, ifup, qdev->
ndev,
"Changing to normal MTU.\n");
4196 ndev->
mtu = new_mtu;
4198 if (!netif_running(qdev->
ndev)) {
4202 status = ql_change_rx_buffers(qdev);
4205 "Changing MTU failed.\n");
4215 struct rx_ring *rx_ring = &qdev->
rx_ring[0];
4216 struct tx_ring *tx_ring = &qdev->
tx_ring[0];
4217 unsigned long pkts, mcast, dropped,
errors,
bytes;
4221 pkts = mcast = dropped = errors = bytes = 0;
4229 ndev->
stats.rx_packets = pkts;
4231 ndev->
stats.rx_dropped = dropped;
4233 ndev->
stats.multicast = mcast;
4236 pkts = errors = bytes = 0;
4242 ndev->
stats.tx_packets = pkts;
4245 return &ndev->
stats;
4248 static void qlge_set_multicast_list(
struct net_device *ndev)
4263 if (ql_set_routing_reg
4266 "Failed to set promiscuous mode.\n");
4273 if (ql_set_routing_reg
4276 "Failed to clear promiscuous mode.\n");
4290 if (ql_set_routing_reg
4293 "Failed to set all-multi mode.\n");
4300 if (ql_set_routing_reg
4303 "Failed to clear all-multi mode.\n");
4316 if (ql_set_mac_addr_reg(qdev, (
u8 *) ha->
addr,
4319 "Failed to loadmulticast address.\n");
4326 if (ql_set_routing_reg
4329 "Failed to set multicast match mode.\n");
4338 static int qlge_set_mac_address(
struct net_device *ndev,
void *p)
4344 if (!is_valid_ether_addr(addr->
sa_data))
4353 status = ql_set_mac_addr_reg(qdev, (
u8 *) ndev->
dev_addr,
4361 static void qlge_tx_timeout(
struct net_device *ndev)
4373 status = ql_adapter_down(qdev);
4377 status = ql_adapter_up(qdev);
4384 qlge_set_multicast_list(qdev->
ndev);
4390 "Driver up/down cycle failed, closing device\n");
4398 .get_flash = ql_get_8012_flash_params,
4399 .port_initialize = ql_8012_port_initialize,
4403 .get_flash = ql_get_8000_flash_params,
4404 .port_initialize = ql_8000_port_initialize,
4414 static int ql_get_alt_pcie_func(
struct ql_adapter *qdev)
4418 u32 nic_func1, nic_func2;
4430 if (qdev->
func == nic_func1)
4432 else if (qdev->
func == nic_func2)
4440 static int ql_get_board_info(
struct ql_adapter *qdev)
4448 status = ql_get_alt_pcie_func(qdev);
4469 qdev->
nic_ops = &qla8012_nic_ops;
4471 qdev->
nic_ops = &qla8000_nic_ops;
4475 static void ql_release_all(
struct pci_dev *pdev)
4477 struct net_device *ndev = pci_get_drvdata(pdev);
4491 pci_set_drvdata(pdev,
NULL);
4500 memset((
void *)qdev, 0,
sizeof(*qdev));
4503 dev_err(&pdev->
dev,
"PCI device enable failed.\n");
4509 pci_set_drvdata(pdev, ndev);
4520 dev_err(&pdev->
dev,
"PCI region request failed.\n");
4527 err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(64));
4531 err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
4535 dev_err(&pdev->
dev,
"No usable DMA configuration.\n");
4546 dev_err(&pdev->
dev,
"Register mapping failed.\n");
4556 dev_err(&pdev->
dev,
"Doorbell register mapping failed.\n");
4561 err = ql_get_board_info(qdev);
4563 dev_err(&pdev->
dev,
"Register access failed.\n");
4571 if (qlge_mpi_coredump) {
4575 dev_err(&pdev->
dev,
"Coredump alloc failed.\n");
4579 if (qlge_force_coredump)
4583 err = qdev->
nic_ops->get_flash(qdev);
4618 dev_info(&pdev->
dev,
"Driver name: %s, Version: %s.\n",
4623 ql_release_all(pdev);
4630 .ndo_open = qlge_open,
4631 .ndo_stop = qlge_close,
4632 .ndo_start_xmit = qlge_send,
4633 .ndo_change_mtu = qlge_change_mtu,
4634 .ndo_get_stats = qlge_get_stats,
4635 .ndo_set_rx_mode = qlge_set_multicast_list,
4636 .ndo_set_mac_address = qlge_set_mac_address,
4638 .ndo_tx_timeout = qlge_tx_timeout,
4639 .ndo_fix_features = qlge_fix_features,
4640 .ndo_set_features = qlge_set_features,
4641 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4642 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4645 static void ql_timer(
unsigned long data)
4650 var = ql_read32(qdev,
STS);
4651 if (pci_channel_offline(qdev->
pdev)) {
4652 netif_err(qdev, ifup, qdev->
ndev,
"EEH STS = 0x%.08x.\n", var);
4664 static int cards_found = 0;
4667 ndev = alloc_etherdev_mq(
sizeof(
struct ql_adapter),
4672 err = ql_init_device(pdev, ndev, cards_found);
4678 qdev = netdev_priv(ndev);
4702 dev_err(&pdev->
dev,
"net device registration failed.\n");
4703 ql_release_all(pdev);
4712 qdev->
timer.function = ql_timer;
4716 ql_display_dev_info(ndev);
4724 return qlge_send(skb, ndev);
4729 return ql_clean_inbound_rx_ring(rx_ring, budget);
4734 struct net_device *ndev = pci_get_drvdata(pdev);
4737 ql_cancel_all_work_sync(qdev);
4739 ql_release_all(pdev);
4745 static void ql_eeh_close(
struct net_device *ndev)
4750 if (netif_carrier_ok(ndev)) {
4752 netif_stop_queue(ndev);
4757 ql_cancel_all_work_sync(qdev);
4763 ql_tx_ring_clean(qdev);
4764 ql_free_rx_buffers(qdev);
4765 ql_release_adapter_resources(qdev);
4775 struct net_device *ndev = pci_get_drvdata(pdev);
4783 if (netif_running(ndev))
4789 "%s: pci_channel_io_perm_failure.\n", __func__);
4807 struct net_device *ndev = pci_get_drvdata(pdev);
4815 "Cannot re-enable PCI device after reset.\n");
4820 if (ql_adapter_reset(qdev)) {
4829 static void qlge_io_resume(
struct pci_dev *pdev)
4831 struct net_device *ndev = pci_get_drvdata(pdev);
4835 if (netif_running(ndev)) {
4836 err = qlge_open(ndev);
4839 "Device initialization failed after reset.\n");
4844 "Device was not running prior to EEH.\n");
4851 .error_detected = qlge_io_error_detected,
4852 .slot_reset = qlge_io_slot_reset,
4853 .resume = qlge_io_resume,
4858 struct net_device *ndev = pci_get_drvdata(pdev);
4865 if (netif_running(ndev)) {
4866 err = ql_adapter_down(qdev);
4884 static int qlge_resume(
struct pci_dev *pdev)
4886 struct net_device *ndev = pci_get_drvdata(pdev);
4894 netif_err(qdev, ifup, qdev->
ndev,
"Cannot enable PCI device from suspend\n");
4902 if (netif_running(ndev)) {
4903 err = ql_adapter_up(qdev);
4915 static void qlge_shutdown(
struct pci_dev *pdev)
4922 .id_table = qlge_pci_tbl,
4923 .probe = qlge_probe,
4926 .suspend = qlge_suspend,
4927 .resume = qlge_resume,
4929 .shutdown = qlge_shutdown,
4930 .err_handler = &qlge_err_handler
4933 static int __init qlge_init_module(
void)
4935 return pci_register_driver(&qlge_driver);
4938 static void __exit qlge_exit(
void)