24 #define DRV_VERSION "1.0.0.7-NAPI"
28 #define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026
88 static const u16 atl1e_pay_load_size[] = {
89 128, 256, 512, 1024, 2048, 4096,
109 static inline void atl1e_irq_disable(
struct atl1e_adapter *adapter)
121 static inline void atl1e_irq_reset(
struct atl1e_adapter *adapter)
133 static void atl1e_phy_config(
unsigned long data)
141 spin_unlock_irqrestore(&adapter->
mdio_lock, flags);
175 if (netif_carrier_ok(netdev)) {
183 netif_stop_queue(netdev);
196 atl1e_setup_mac_ctrl(adapter);
198 "NIC Link is Up <%d Mbps %s Duplex>\n",
204 if (!netif_carrier_ok(netdev)) {
207 netif_wake_queue(netdev);
224 atl1e_check_link(adapter);
225 spin_unlock_irqrestore(&adapter->
mdio_lock, flags);
228 static void atl1e_link_chg_event(
struct atl1e_adapter *adapter)
241 if (netif_carrier_ok(netdev)) {
243 netdev_info(netdev,
"NIC Link is Down\n");
245 netif_stop_queue(netdev);
266 static void atl1e_tx_timeout(
struct net_device *netdev)
283 static void atl1e_set_multi(
struct net_device *netdev)
288 u32 mac_ctrl_data = 0;
327 static void atl1e_vlan_mode(
struct net_device *netdev,
331 u32 mac_ctrl_data = 0;
335 atl1e_irq_disable(adapter);
337 __atl1e_vlan_mode(features, &mac_ctrl_data);
339 atl1e_irq_enable(adapter);
342 static void atl1e_restore_vlan(
struct atl1e_adapter *adapter)
345 atl1e_vlan_mode(adapter->
netdev, adapter->
netdev->features);
355 static int atl1e_set_mac_addr(
struct net_device *netdev,
void *
p)
360 if (!is_valid_ether_addr(addr->
sa_data))
363 if (netif_running(netdev))
389 static int atl1e_set_features(
struct net_device *netdev,
395 atl1e_vlan_mode(netdev, features);
407 static int atl1e_change_mtu(
struct net_device *netdev,
int new_mtu)
410 int old_mtu = netdev->
mtu;
415 netdev_warn(adapter->
netdev,
"invalid MTU setting\n");
419 if (old_mtu != new_mtu && netif_running(netdev)) {
422 netdev->
mtu = new_mtu;
423 adapter->
hw.max_frame_size = new_mtu;
424 adapter->
hw.rx_jumbo_th = (max_frame + 7) >> 3;
435 static int atl1e_mdio_read(
struct net_device *netdev,
int phy_id,
int reg_num)
445 int reg_num,
int val)
452 static int atl1e_mii_ioctl(
struct net_device *netdev,
460 if (!netif_running(netdev))
497 spin_unlock_irqrestore(&adapter->
mdio_lock, flags);
502 static int atl1e_ioctl(
struct net_device *netdev,
struct ifreq *ifr,
int cmd)
508 return atl1e_mii_ioctl(netdev, ifr, cmd);
514 static void atl1e_setup_pcicmd(
struct pci_dev *pdev)
554 u32 phy_status_data = 0;
612 if (atl1e_alloc_queues(adapter)) {
613 netdev_err(adapter->
netdev,
"Unable to allocate memory for queues\n");
630 static void atl1e_clean_tx_ring(
struct atl1e_adapter *adapter)
640 ring_count = tx_ring->
count;
642 for (index = 0; index < ring_count; index++) {
644 if (tx_buffer->
dma) {
646 pci_unmap_single(pdev, tx_buffer->
dma,
649 pci_unmap_page(pdev, tx_buffer->
dma,
655 for (index = 0; index < ring_count; index++) {
657 if (tx_buffer->
skb) {
673 static void atl1e_clean_rx_ring(
struct atl1e_adapter *adapter)
707 static void atl1e_init_ring_resources(
struct atl1e_adapter *adapter)
714 + adapter->
hw.max_frame_size
718 atl1e_cal_ring_size(adapter, &adapter->
ring_size);
728 static void atl1e_init_ring_ptrs(
struct atl1e_adapter *adapter)
746 *rx_page_desc[
i].
rx_page[
j].write_offset_addr = 0;
747 rx_page_desc[
i].
rx_page[
j].read_offset = 0;
758 static void atl1e_free_ring_resources(
struct atl1e_adapter *adapter)
762 atl1e_clean_tx_ring(adapter);
763 atl1e_clean_rx_ring(adapter);
771 if (adapter->
tx_ring.tx_buffer) {
783 static int atl1e_setup_ring_resources(
struct atl1e_adapter *adapter)
806 netdev_err(adapter->
netdev,
807 "pci_alloc_consistent failed, size = D%d\n", size);
822 netdev_err(adapter->
netdev,
"kzalloc failed, size = D%d\n",
845 offset +=
sizeof(
u32);
849 rx_page_desc[
i].
rx_page[
j].write_offset_dma =
851 rx_page_desc[
i].
rx_page[
j].write_offset_addr =
853 offset +=
sizeof(
u32);
858 netdev_err(adapter->
netdev,
"offset(%d) > ring size(%d) !!\n",
874 static inline void atl1e_configure_des_ring(
struct atl1e_adapter *adapter)
901 page_phy_addr = rx_page_desc[
i].
rx_page[
j].dma;
903 rx_page_desc[
i].
rx_page[
j].write_offset_dma;
918 static inline void atl1e_configure_tx(
struct atl1e_adapter *adapter)
921 u32 dev_ctrl_data = 0;
922 u32 max_pay_load = 0;
923 u32 jumbo_thresh = 0;
961 static inline void atl1e_configure_rx(
struct atl1e_adapter *adapter)
967 u32 rxf_thresh_data = 0;
968 u32 rxq_ctrl_data = 0;
978 rxf_high = rxf_len * 4 / 5;
979 rxf_low = rxf_len / 5;
1014 static inline void atl1e_configure_dma(
struct atl1e_adapter *adapter)
1017 u32 dma_ctrl_data = 0;
1033 static void atl1e_setup_mac_ctrl(
struct atl1e_adapter *adapter)
1052 value |= (((
u32)adapter->
hw.preamble_len &
1055 __atl1e_vlan_mode(netdev->
features, &value);
1076 u32 intr_status_data = 0;
1092 atl1e_configure_des_ring(adapter);
1111 VLAN_HLEN + ETH_FCS_LEN);
1114 atl1e_configure_tx(adapter);
1117 atl1e_configure_rx(adapter);
1120 atl1e_configure_dma(adapter);
1127 netdev_err(adapter->
netdev,
1128 "atl1e_configure failed, PCIE phy link down\n");
1178 static void atl1e_update_hw_stats(
struct atl1e_adapter *adapter)
1180 u16 hw_reg_addr = 0;
1181 unsigned long *stats_item =
NULL;
1185 stats_item = &adapter->
hw_stats.rx_ok;
1193 stats_item = &adapter->
hw_stats.tx_ok;
1201 static inline void atl1e_clear_phy_int(
struct atl1e_adapter *adapter)
1210 static bool atl1e_clean_tx_irq(
struct atl1e_adapter *adapter)
1217 while (next_to_clean != hw_next_to_clean) {
1218 tx_buffer = &tx_ring->
tx_buffer[next_to_clean];
1219 if (tx_buffer->
dma) {
1221 pci_unmap_single(adapter->
pdev, tx_buffer->
dma,
1224 pci_unmap_page(adapter->
pdev, tx_buffer->
dma,
1229 if (tx_buffer->
skb) {
1234 if (++next_to_clean == tx_ring->
count)
1240 if (netif_queue_stopped(adapter->
netdev) &&
1241 netif_carrier_ok(adapter->
netdev)) {
1242 netif_wake_queue(adapter->
netdev);
1253 static irqreturn_t atl1e_intr(
int irq,
void *data)
1272 atl1e_clear_phy_int(adapter);
1279 netdev_err(adapter->
netdev,
1280 "pcie phy linkdown %x\n", status);
1281 if (netif_running(adapter->
netdev)) {
1283 atl1e_irq_reset(adapter);
1291 netdev_err(adapter->
netdev,
1292 "PCIE DMA RW error (status = 0x%x)\n",
1294 atl1e_irq_reset(adapter);
1300 atl1e_update_hw_stats(adapter);
1304 netdev->
stats.tx_carrier_errors++;
1305 atl1e_link_chg_event(adapter);
1311 atl1e_clean_tx_irq(adapter);
1319 IMR_NORMAL_MASK & ~ISR_RX_EVENT);
1321 if (
likely(napi_schedule_prep(
1325 }
while (--max_ints > 0);
1332 static inline void atl1e_rx_checksum(
struct atl1e_adapter *adapter,
1341 skb_checksum_none_assert(skb);
1346 if (pkt_flags & RRS_IS_IPV4) {
1349 iph = (
struct iphdr *) (packet + head_len);
1374 int *work_done,
int work_to_do)
1382 u32 packet_size, write_offset;
1385 write_offset = *(rx_page->write_offset_addr);
1386 if (
likely(rx_page->read_offset < write_offset)) {
1388 if (*work_done >= work_to_do)
1393 rx_page->read_offset);
1397 "rx sequence number error (rx=%d) (expect=%d)\n",
1416 "rx packet desc error %x\n",
1417 *((
u32 *)prrs + 1));
1424 skb = netdev_alloc_skb_ip_align(netdev, packet_size);
1427 "Memory squeeze, deferring packet\n");
1433 atl1e_rx_checksum(adapter, skb, prrs);
1436 u16 vlan_tag = (prrs->
vtag >> 4) |
1437 ((prrs->
vtag & 7) << 13) |
1438 ((prrs->
vtag & 8) << 9);
1440 "RXD VLAN TAG<RRD>=0x%04x\n",
1442 __vlan_hwaccel_put_tag(skb, vlan_tag);
1448 rx_page->read_offset +=
1454 if (rx_page->read_offset >= rx_ring->
page_size) {
1459 rx_page->read_offset =
1460 *(rx_page->write_offset_addr) = 0;
1461 rx_using = rx_page_desc[que].
rx_using;
1463 atl1e_rx_page_vld_regs[que][rx_using];
1466 rx_page = atl1e_get_rx_page(adapter, que);
1468 write_offset = *(rx_page->write_offset_addr);
1469 }
while (rx_page->read_offset < write_offset);
1490 if (!netif_carrier_ok(adapter->
netdev))
1493 atl1e_clean_rx_irq(adapter, 0, &work_done, budget);
1496 if (work_done < budget) {
1504 netdev_err(adapter->
netdev,
1505 "atl1e_clean is called when AT_DOWN\n");
1514 #ifdef CONFIG_NET_POLL_CONTROLLER
1521 static void atl1e_netpoll(
struct net_device *netdev)
1526 atl1e_intr(adapter->
pdev->irq, netdev);
1535 u16 next_to_clean = 0;
1541 (next_to_clean - next_to_use - 1) :
1542 (tx_ring->
count + next_to_clean - next_to_use - 1);
1553 u16 next_to_use = 0;
1572 static u16 atl1e_cal_tdp_req(
const struct sk_buff *skb)
1577 u16 proto_hdr_len = 0;
1579 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1580 fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1584 if (skb_is_gso(skb)) {
1587 proto_hdr_len = skb_transport_offset(skb) +
1589 if (proto_hdr_len < skb_headlen(skb)) {
1590 tpd_req += ((skb_headlen(skb) - proto_hdr_len +
1605 unsigned short offload_type;
1608 if (skb_is_gso(skb)) {
1609 if (skb_header_cloned(skb)) {
1614 offload_type = skb_shinfo(skb)->gso_type;
1617 real_len = (((
unsigned char *)ip_hdr(skb) - skb->
data)
1618 +
ntohs(ip_hdr(skb)->tot_len));
1620 if (real_len < skb->len)
1621 pskb_trim(skb, real_len);
1623 hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
1626 netdev_warn(adapter->
netdev,
1627 "IPV4 tso with zero data??\n");
1630 ip_hdr(skb)->check = 0;
1631 ip_hdr(skb)->tot_len = 0;
1636 tpd->
word3 |= (ip_hdr(skb)->ihl &
1639 tpd->
word3 |= ((tcp_hdrlen(skb) >> 2) &
1642 tpd->
word3 |= ((skb_shinfo(skb)->gso_size) &
1654 cso = skb_checksum_start_offset(skb);
1656 netdev_err(adapter->
netdev,
1657 "payload offset should not ant event number\n");
1677 u16 buf_len = skb_headlen(skb);
1685 nr_frags = skb_shinfo(skb)->nr_frags;
1689 map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1692 tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
1693 tx_buffer->
length = map_len;
1694 tx_buffer->
dma = pci_map_single(adapter->
pdev,
1697 mapped_len += map_len;
1704 while (mapped_len < buf_len) {
1707 if (mapped_len == 0) {
1710 use_tpd = atl1e_get_tpd(adapter);
1713 tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
1716 tx_buffer->
length = map_len =
1720 pci_map_single(adapter->
pdev, skb->
data + mapped_len,
1723 mapped_len += map_len;
1730 for (f = 0; f < nr_frags; f++) {
1735 frag = &skb_shinfo(skb)->frags[
f];
1736 buf_len = skb_frag_size(frag);
1739 for (i = 0; i < seg_num; i++) {
1740 use_tpd = atl1e_get_tpd(adapter);
1743 tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
1750 buf_len -= tx_buffer->
length;
1752 tx_buffer->
dma = skb_frag_dma_map(&adapter->
pdev->dev,
1773 tx_buffer->
skb = skb;
1792 unsigned long flags;
1805 tpd_req = atl1e_cal_tdp_req(skb);
1809 if (atl1e_tpd_avail(adapter) < tpd_req) {
1811 netif_stop_queue(netdev);
1812 spin_unlock_irqrestore(&adapter->
tx_lock, flags);
1816 tpd = atl1e_get_tpd(adapter);
1835 if (atl1e_tso_csum(adapter, skb, tpd) != 0) {
1836 spin_unlock_irqrestore(&adapter->
tx_lock, flags);
1841 atl1e_tx_map(adapter, skb, tpd);
1842 atl1e_tx_queue(adapter, tpd_req, tpd);
1845 spin_unlock_irqrestore(&adapter->
tx_lock, flags);
1867 err = pci_enable_msi(pdev);
1870 "Unable to allocate MSI interrupt Error: %d\n", err);
1879 "Unable to allocate interrupt Error: %d\n", err);
1884 netdev_dbg(netdev,
"atl1e_request_irq OK\n");
1900 atl1e_init_ring_ptrs(adapter);
1901 atl1e_set_multi(netdev);
1902 atl1e_restore_vlan(adapter);
1904 if (atl1e_configure(adapter)) {
1910 napi_enable(&adapter->
napi);
1911 atl1e_irq_enable(adapter);
1928 netif_stop_queue(netdev);
1934 napi_disable(&adapter->
napi);
1935 atl1e_del_timer(adapter);
1936 atl1e_irq_disable(adapter);
1941 atl1e_clean_tx_ring(adapter);
1942 atl1e_clean_rx_ring(adapter);
1957 static int atl1e_open(
struct net_device *netdev)
1967 atl1e_init_ring_resources(adapter);
1968 err = atl1e_setup_ring_resources(adapter);
1972 err = atl1e_request_irq(adapter);
1983 atl1e_free_irq(adapter);
1985 atl1e_free_ring_resources(adapter);
2002 static int atl1e_close(
struct net_device *netdev)
2008 atl1e_free_irq(adapter);
2009 atl1e_free_ring_resources(adapter);
2016 struct net_device *netdev = pci_get_drvdata(pdev);
2020 u32 mac_ctrl_data = 0;
2021 u32 wol_ctrl_data = 0;
2022 u16 mii_advertise_data = 0;
2023 u16 mii_bmsr_data = 0;
2024 u16 mii_intr_status_data = 0;
2031 if (netif_running(netdev)) {
2066 if (mii_bmsr_data & BMSR_LSTATUS) {
2071 if (mii_bmsr_data & BMSR_LSTATUS)
2075 if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
2077 "Link may change when suspend\n");
2083 "read write phy register failed\n");
2093 mac_ctrl_data |= (((
u32)adapter->
hw.preamble_len &
2097 __atl1e_vlan_mode(netdev->
features, &mac_ctrl_data);
2100 if (wufc & AT_WUFC_MAG)
2132 if (netif_running(netdev))
2133 atl1e_free_irq(adapter);
2143 static int atl1e_resume(
struct pci_dev *pdev)
2145 struct net_device *netdev = pci_get_drvdata(pdev);
2154 netdev_err(adapter->
netdev,
2155 "Cannot enable PCI device from suspend\n");
2168 if (netif_running(netdev)) {
2169 err = atl1e_request_irq(adapter);
2176 if (netif_running(netdev))
2185 static void atl1e_shutdown(
struct pci_dev *pdev)
2191 .ndo_open = atl1e_open,
2192 .ndo_stop = atl1e_close,
2193 .ndo_start_xmit = atl1e_xmit_frame,
2194 .ndo_get_stats = atl1e_get_stats,
2195 .ndo_set_rx_mode = atl1e_set_multi,
2197 .ndo_set_mac_address = atl1e_set_mac_addr,
2198 .ndo_fix_features = atl1e_fix_features,
2199 .ndo_set_features = atl1e_set_features,
2200 .ndo_change_mtu = atl1e_change_mtu,
2201 .ndo_do_ioctl = atl1e_ioctl,
2202 .ndo_tx_timeout = atl1e_tx_timeout,
2203 #ifdef CONFIG_NET_POLL_CONTROLLER
2204 .ndo_poll_controller = atl1e_netpoll,
2212 pci_set_drvdata(pdev, netdev);
2243 static int cards_found;
2249 dev_err(&pdev->
dev,
"cannot enable PCI device\n");
2264 (pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32)) != 0)) {
2265 dev_err(&pdev->
dev,
"No usable DMA configuration,aborting\n");
2271 dev_err(&pdev->
dev,
"cannot obtain PCI resources\n");
2278 if (netdev ==
NULL) {
2280 goto err_alloc_etherdev;
2283 err = atl1e_init_netdev(netdev, pdev);
2285 netdev_err(netdev,
"init netdevice failed\n");
2286 goto err_init_netdev;
2288 adapter = netdev_priv(netdev);
2293 adapter->
hw.hw_addr = pci_iomap(pdev,
BAR_0, 0);
2294 if (!adapter->
hw.hw_addr) {
2296 netdev_err(netdev,
"cannot map device registers\n");
2302 adapter->
mii.mdio_read = atl1e_mdio_read;
2303 adapter->
mii.mdio_write = atl1e_mdio_write;
2304 adapter->
mii.phy_id_mask = 0x1f;
2321 atl1e_setup_pcicmd(pdev);
2323 err = atl1e_sw_init(adapter);
2325 netdev_err(netdev,
"net device private data init failed\n");
2341 netdev_err(netdev,
"get mac address failed\n");
2347 netdev_dbg(netdev,
"mac address : %pM\n", adapter->
hw.mac_addr);
2353 netdev_err(netdev,
"register netdevice failed\n");
2358 netif_stop_queue(netdev);
2392 struct net_device *netdev = pci_get_drvdata(pdev);
2401 atl1e_del_timer(adapter);
2402 atl1e_cancel_work(adapter);
2405 atl1e_free_ring_resources(adapter);
2424 struct net_device *netdev = pci_get_drvdata(pdev);
2432 if (netif_running(netdev))
2450 struct net_device *netdev = pci_get_drvdata(pdev);
2454 netdev_err(adapter->
netdev,
2455 "Cannot re-enable PCI device after reset\n");
2476 static void atl1e_io_resume(
struct pci_dev *pdev)
2478 struct net_device *netdev = pci_get_drvdata(pdev);
2481 if (netif_running(netdev)) {
2483 netdev_err(adapter->
netdev,
2484 "can't bring device back up after reset\n");
2493 .error_detected = atl1e_io_error_detected,
2494 .slot_reset = atl1e_io_slot_reset,
2495 .resume = atl1e_io_resume,
2500 .id_table = atl1e_pci_tbl,
2501 .probe = atl1e_probe,
2505 .suspend = atl1e_suspend,
2506 .resume = atl1e_resume,
2508 .shutdown = atl1e_shutdown,
2509 .err_handler = &atl1e_err_handler
2518 static int __init atl1e_init_module(
void)
2520 return pci_register_driver(&atl1e_driver);
2529 static void __exit atl1e_exit_module(
void)