29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/prefetch.h>
35 static char ixgb_driver_string[] =
"Intel(R) PRO/10GbE Network Driver";
37 #define DRIVERNAPI "-NAPI"
38 #define DRV_VERSION "1.0.135-k2" DRIVERNAPI
40 static const char ixgb_copyright[] =
"Copyright (c) 1999-2008 Intel Corporation.";
42 #define IXGB_CB_LENGTH 256
46 "Maximum size of packet that is copied to a new buffer on receive");
73 static int ixgb_init_module(
void);
74 static void ixgb_exit_module(
void);
78 static int ixgb_open(
struct net_device *netdev);
79 static int ixgb_close(
struct net_device *netdev);
85 static void ixgb_set_multi(
struct net_device *netdev);
86 static void ixgb_watchdog(
unsigned long data);
90 static int ixgb_change_mtu(
struct net_device *netdev,
int new_mtu);
91 static int ixgb_set_mac(
struct net_device *netdev,
void *
p);
96 static bool ixgb_clean_rx_irq(
struct ixgb_adapter *,
int *,
int);
97 static void ixgb_alloc_rx_buffers(
struct ixgb_adapter *,
int);
108 #ifdef CONFIG_NET_POLL_CONTROLLER
116 static void ixgb_io_resume (
struct pci_dev *pdev);
119 .error_detected = ixgb_io_error_detected,
120 .slot_reset = ixgb_io_slot_reset,
121 .resume = ixgb_io_resume,
126 .id_table = ixgb_pci_tbl,
129 .err_handler = &ixgb_err_handler
137 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
138 static int debug = -1;
150 ixgb_init_module(
void)
153 pr_info(
"%s\n", ixgb_copyright);
155 return pci_register_driver(&ixgb_driver);
168 ixgb_exit_module(
void)
215 ixgb_set_multi(netdev);
217 ixgb_restore_vlan(adapter);
219 ixgb_configure_tx(adapter);
220 ixgb_setup_rctl(adapter);
221 ixgb_configure_rx(adapter);
229 err = pci_enable_msi(adapter->
pdev);
238 netdev->
name, netdev);
243 "Unable to allocate interrupt Error: %d\n", err);
268 napi_enable(&adapter->
napi);
269 ixgb_irq_enable(adapter);
271 netif_wake_queue(netdev);
286 napi_disable(&adapter->
napi);
288 ixgb_irq_disable(adapter);
300 netif_stop_queue(netdev);
303 ixgb_clean_tx_ring(adapter);
304 ixgb_clean_rx_ring(adapter);
352 if (netif_running(netdev)) {
364 .ndo_open = ixgb_open,
365 .ndo_stop = ixgb_close,
366 .ndo_start_xmit = ixgb_xmit_frame,
367 .ndo_get_stats = ixgb_get_stats,
368 .ndo_set_rx_mode = ixgb_set_multi,
370 .ndo_set_mac_address = ixgb_set_mac,
371 .ndo_change_mtu = ixgb_change_mtu,
372 .ndo_tx_timeout = ixgb_tx_timeout,
373 .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
374 .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
375 #ifdef CONFIG_NET_POLL_CONTROLLER
376 .ndo_poll_controller = ixgb_netpoll,
378 .ndo_fix_features = ixgb_fix_features,
379 .ndo_set_features = ixgb_set_features,
399 static int cards_found = 0;
420 pr_err(
"No usable DMA configuration, aborting\n");
428 goto err_request_regions;
435 goto err_alloc_etherdev;
440 pci_set_drvdata(pdev, netdev);
441 adapter = netdev_priv(netdev);
448 if (!adapter->
hw.hw_addr) {
475 err = ixgb_sw_init(adapter);
497 "The EEPROM Checksum Is Not Valid\n");
505 if (!is_valid_ether_addr(netdev->
perm_addr)) {
528 "Intel(R) PRO/10GbE Network Connection\n");
562 ixgb_remove(
struct pci_dev *pdev)
564 struct net_device *netdev = pci_get_drvdata(pdev);
657 netif_start_queue(netdev);
714 "Unable to allocate transmit descriptor ring memory\n");
728 "Unable to allocate transmit descriptor memory\n");
749 u64 tdba = adapter->tx_ring.dma;
803 "Unable to allocate receive descriptor ring\n");
818 "Unable to allocate receive descriptors\n");
929 ixgb_clean_tx_ring(adapter);
931 vfree(adapter->tx_ring.buffer_info);
932 adapter->tx_ring.buffer_info =
NULL;
935 adapter->tx_ring.desc, adapter->tx_ring.dma);
937 adapter->tx_ring.desc =
NULL;
941 ixgb_unmap_and_free_tx_resource(
struct ixgb_adapter *adapter,
944 if (buffer_info->
dma) {
951 buffer_info->
dma = 0;
954 if (buffer_info->
skb) {
979 for (i = 0; i < tx_ring->
count; i++) {
981 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1011 ixgb_clean_rx_ring(adapter);
1038 for (i = 0; i < rx_ring->
count; i++) {
1040 if (buffer_info->
dma) {
1045 buffer_info->
dma = 0;
1049 if (buffer_info->
skb) {
1050 dev_kfree_skb(buffer_info->
skb);
1083 if (!is_valid_ether_addr(addr->
sa_data))
1156 ixgb_vlan_strip_enable(adapter);
1158 ixgb_vlan_strip_disable(adapter);
1168 ixgb_watchdog(
unsigned long data)
1178 netif_stop_queue(netdev);
1181 if (adapter->
hw.link_up) {
1182 if (!netif_carrier_ok(netdev)) {
1184 "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
1196 if (netif_carrier_ok(netdev)) {
1199 netdev_info(netdev,
"NIC Link is Down\n");
1206 if (!netif_carrier_ok(netdev)) {
1228 #define IXGB_TX_FLAGS_CSUM 0x00000001
1229 #define IXGB_TX_FLAGS_VLAN 0x00000002
1230 #define IXGB_TX_FLAGS_TSO 0x00000004
1241 if (
likely(skb_is_gso(skb))) {
1245 if (skb_header_cloned(skb)) {
1251 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1252 mss = skb_shinfo(skb)->gso_size;
1259 ipcss = skb_network_offset(skb);
1260 ipcso = (
void *)&(iph->
check) - (
void *)skb->
data;
1261 ipcse = skb_transport_offset(skb) - 1;
1262 tucss = skb_transport_offset(skb);
1263 tucso = (
void *)&(tcp_hdr(skb)->check) - (
void *)skb->
data;
1266 i = adapter->tx_ring.next_to_use;
1268 buffer_info = &adapter->tx_ring.buffer_info[
i];
1271 context_desc->
ipcss = ipcss;
1272 context_desc->
ipcso = ipcso;
1274 context_desc->
tucss = tucss;
1275 context_desc->
tucso = tucso;
1279 context_desc->
status = 0;
1286 | (skb->
len - (hdr_len)));
1289 if (++i == adapter->tx_ring.count) i = 0;
1290 adapter->tx_ring.next_to_use =
i;
1307 css = skb_checksum_start_offset(skb);
1310 i = adapter->tx_ring.next_to_use;
1312 buffer_info = &adapter->tx_ring.buffer_info[
i];
1315 context_desc->
tucss = css;
1316 context_desc->
tucso = cso;
1317 context_desc->
tucse = 0;
1319 *(
u32 *)&(context_desc->
ipcss) = 0;
1320 context_desc->
status = 0;
1322 context_desc->
mss = 0;
1327 if (++i == adapter->tx_ring.count) i = 0;
1328 adapter->tx_ring.next_to_use =
i;
1336 #define IXGB_MAX_TXD_PWR 14
1337 #define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
1346 int len = skb_headlen(skb);
1348 unsigned int mss = skb_shinfo(skb)->gso_size;
1349 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1359 if (
unlikely(mss && !nr_frags && size == len && size > 8))
1378 if (i == tx_ring->
count)
1383 for (f = 0; f < nr_frags; f++) {
1386 frag = &skb_shinfo(skb)->frags[
f];
1387 len = skb_frag_size(frag);
1392 if (i == tx_ring->
count)
1400 if (
unlikely(mss && (f == (nr_frags - 1))
1401 && size == len && size > 8))
1408 skb_frag_dma_map(&pdev->
dev, frag, offset, size,
1426 buffer_info->
dma = 0;
1432 i += tx_ring->
count;
1435 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1472 tx_desc->
popts = popts;
1475 if (++i == tx_ring->
count) i = 0;
1491 static int __ixgb_maybe_stop_tx(
struct net_device *netdev,
int size)
1496 netif_stop_queue(netdev);
1508 netif_start_queue(netdev);
1513 static int ixgb_maybe_stop_tx(
struct net_device *netdev,
1518 return __ixgb_maybe_stop_tx(netdev, size);
1523 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1524 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1525 #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \
1526 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 \
1534 unsigned int tx_flags = 0;
1544 if (skb->
len <= 0) {
1549 if (
unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1558 first = adapter->tx_ring.next_to_use;
1560 tso = ixgb_tso(adapter, skb);
1568 else if (ixgb_tx_csum(adapter, skb))
1571 count = ixgb_tx_map(adapter, skb, first);
1574 ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
1576 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
DESC_NEEDED);
1580 adapter->tx_ring.buffer_info[
first].time_stamp = 0;
1581 adapter->tx_ring.next_to_use =
first;
1623 return &netdev->
stats;
1635 ixgb_change_mtu(
struct net_device *netdev,
int new_mtu)
1642 if ((new_mtu < 68) ||
1645 "Invalid MTU setting %d\n", new_mtu);
1649 if (old_max_frame == max_frame)
1652 if (netif_running(netdev))
1657 netdev->
mtu = new_mtu;
1659 if (netif_running(netdev))
1677 if (pci_channel_offline(pdev))
1685 u64 bcast = ((
u64)bcast_h << 32) | bcast_l;
1692 adapter->
stats.mprcl += (multi & 0xFFFFFFFF);
1693 adapter->
stats.mprch += (multi >> 32);
1694 adapter->
stats.bprcl += bcast_l;
1695 adapter->
stats.bprch += bcast_h;
1761 netdev->
stats.rx_packets = adapter->
stats.gprcl;
1762 netdev->
stats.tx_packets = adapter->
stats.gptcl;
1763 netdev->
stats.rx_bytes = adapter->
stats.gorcl;
1764 netdev->
stats.tx_bytes = adapter->
stats.gotcl;
1765 netdev->
stats.multicast = adapter->
stats.mprcl;
1766 netdev->
stats.collisions = 0;
1770 netdev->
stats.rx_errors =
1771 adapter->
stats.crcerrs +
1772 adapter->
stats.ruc +
1773 adapter->
stats.roc +
1774 adapter->
stats.icbc +
1781 netdev->
stats.rx_crc_errors = adapter->
stats.crcerrs;
1782 netdev->
stats.rx_fifo_errors = adapter->
stats.mpc;
1783 netdev->
stats.rx_missed_errors = adapter->
stats.mpc;
1784 netdev->
stats.rx_over_errors = adapter->
stats.mpc;
1786 netdev->
stats.tx_errors = 0;
1787 netdev->
stats.rx_frame_errors = 0;
1788 netdev->
stats.tx_aborted_errors = 0;
1789 netdev->
stats.tx_carrier_errors = 0;
1790 netdev->
stats.tx_fifo_errors = 0;
1791 netdev->
stats.tx_heartbeat_errors = 0;
1792 netdev->
stats.tx_window_errors = 0;
1795 #define IXGB_MAX_INTR 10
1803 ixgb_intr(
int irq,
void *data)
1817 if (napi_schedule_prep(&adapter->
napi)) {
1840 ixgb_clean_tx_irq(adapter);
1841 ixgb_clean_rx_irq(adapter, &work_done, budget);
1844 if (work_done < budget) {
1847 ixgb_irq_enable(adapter);
1865 unsigned int i, eop;
1866 bool cleaned =
false;
1875 for (cleaned =
false; !cleaned; ) {
1879 if (tx_desc->
popts &
1884 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1888 cleaned = (i == eop);
1889 if (++i == tx_ring->
count) i = 0;
1898 if (
unlikely(cleaned && netif_carrier_ok(netdev) &&
1904 if (netif_queue_stopped(netdev) &&
1906 netif_wake_queue(netdev);
1921 "Detected Tx Unit Hang\n"
1924 " next_to_use <%x>\n"
1925 " next_to_clean <%x>\n"
1926 "buffer_info[next_to_clean]\n"
1927 " time_stamp <%lx>\n"
1928 " next_to_watch <%x>\n"
1930 " next_to_watch.status <%x>\n",
1939 netif_stop_queue(netdev);
1963 skb_checksum_none_assert(skb);
1971 skb_checksum_none_assert(skb);
1984 static void ixgb_check_copybreak(
struct net_device *netdev,
1993 new_skb = netdev_alloc_skb_ip_align(netdev, length);
2011 ixgb_clean_rx_irq(
struct ixgb_adapter *adapter,
int *work_done,
int work_to_do)
2017 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
2020 int cleaned_count = 0;
2021 bool cleaned =
false;
2031 if (*work_done >= work_to_do)
2036 status = rx_desc->
status;
2037 skb = buffer_info->
skb;
2042 if (++i == rx_ring->
count)
2048 if (j == rx_ring->
count)
2062 buffer_info->
dma = 0;
2071 pr_debug(
"Receive packet consumed multiple buffers length<%x>\n",
2085 ixgb_check_copybreak(netdev, buffer_info, length, &skb);
2091 ixgb_rx_checksum(adapter, rx_desc, skb);
2095 __vlan_hwaccel_put_tag(skb,
2106 ixgb_alloc_rx_buffers(adapter, cleaned_count);
2112 buffer_info = next_buffer;
2119 ixgb_alloc_rx_buffers(adapter, cleaned_count);
2130 ixgb_alloc_rx_buffers(
struct ixgb_adapter *adapter,
int cleaned_count)
2147 while (--cleancount > 2 && cleaned_count--) {
2149 skb = buffer_info->
skb;
2155 skb = netdev_alloc_skb_ip_align(netdev, adapter->
rx_buffer_len);
2178 if (++i == rx_ring->
count) i = 0;
2185 i = (rx_ring->
count - 1);
2226 index = (vid >> 5) & 0x7F;
2228 vfta |= (1 << (vid & 0x1F));
2243 index = (vid >> 5) & 0x7F;
2245 vfta &= ~(1 << (vid & 0x1F));
2258 ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2261 #ifdef CONFIG_NET_POLL_CONTROLLER
2273 ixgb_intr(adapter->
pdev->irq, dev);
2289 struct net_device *netdev = pci_get_drvdata(pdev);
2297 if (netif_running(netdev))
2317 struct net_device *netdev = pci_get_drvdata(pdev);
2322 "Cannot re-enable PCI device after reset\n");
2333 netif_stop_queue(netdev);
2339 "After reset, the EEPROM checksum is not valid\n");
2345 if (!is_valid_ether_addr(netdev->
perm_addr)) {
2347 "After reset, invalid MAC address\n");
2362 static void ixgb_io_resume(
struct pci_dev *pdev)
2364 struct net_device *netdev = pci_get_drvdata(pdev);
2369 if (netif_running(netdev)) {
2371 pr_err(
"can't bring device back up after reset\n");