25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/netdevice.h>
33 #include <linux/ethtool.h>
34 #include <linux/pci.h>
37 #include <linux/slab.h>
39 #include <linux/tcp.h>
43 #include <linux/if_vlan.h>
44 #include <linux/prefetch.h>
46 #include <linux/mii.h>
52 #define DRV_NAME "sky2"
53 #define DRV_VERSION "1.30"
61 #define RX_LE_SIZE 1024
62 #define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
63 #define RX_MAX_PENDING (RX_LE_SIZE/6 - 2)
64 #define RX_DEF_PENDING RX_MAX_PENDING
68 #define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
69 #define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
70 #define TX_MAX_PENDING 1024
71 #define TX_DEF_PENDING 63
73 #define TX_WATCHDOG (5 * HZ)
74 #define NAPI_WEIGHT 64
75 #define PHY_RETRIES 1000
77 #define SKY2_EEPROM_MAGIC 0x9955aabb
79 #define RING_NEXT(x, s) (((x)+1) & ((s)-1))
81 static const u32 default_msg =
86 static int debug = -1;
94 static int disable_msi = 0;
98 static int legacy_pme = 0;
151 static const unsigned txqaddr[] = {
Q_XA1,
Q_XA2 };
152 static const unsigned rxqaddr[] = {
Q_R1,
Q_R2 };
178 dev_warn(&hw->
pdev->dev,
"%s: phy write timeout\n", hw->
dev[port]->name);
182 dev_err(&hw->
pdev->dev,
"%s: phy I/O error\n", hw->
dev[port]->name);
186 static int __gm_phy_read(
struct sky2_hw *hw,
unsigned port,
u16 reg,
u16 *val)
206 dev_warn(&hw->
pdev->dev,
"%s: phy read timeout\n", hw->
dev[port]->name);
209 dev_err(&hw->
pdev->dev,
"%s: phy I/O error\n", hw->
dev[port]->name);
213 static inline u16 gm_phy_read(
struct sky2_hw *hw,
unsigned port,
u16 reg)
216 __gm_phy_read(hw, port, reg, &v);
221 static void sky2_power_on(
struct sky2_hw *hw)
270 static void sky2_power_aux(
struct sky2_hw *hw)
292 static void sky2_gmac_reset(
struct sky2_hw *hw,
unsigned port)
310 static const u16 copper_fc_adv[] = {
318 static const u16 fiber_fc_adv[] = {
326 static const u16 gm_fc_disable[] = {
334 static void sky2_phy_init(
struct sky2_hw *hw,
unsigned port)
359 if (sky2_is_copper(hw)) {
427 if (sky2_is_copper(hw)) {
457 switch (sky2->
speed) {
476 if (sky2_is_copper(hw))
595 gm_phy_write(hw, port, 0x18, 0xaa99);
596 gm_phy_write(hw, port, 0x17, 0x2011);
600 gm_phy_write(hw, port, 0x18, 0xa204);
601 gm_phy_write(hw, port, 0x17, 0x2002);
616 gm_phy_write(hw, port, 24, 0x2800);
617 gm_phy_write(hw, port, 23, 0x2001);
639 static const struct {
665 gm_phy_write(hw, port, 1, 0x4099);
666 gm_phy_write(hw, port, 3, 0x1120);
667 gm_phy_write(hw, port, 11, 0x113c);
668 gm_phy_write(hw, port, 14, 0x8100);
669 gm_phy_write(hw, port, 15, 0x112a);
670 gm_phy_write(hw, port, 17, 0x1008);
673 gm_phy_write(hw, port, 1, 0x20b0);
679 gm_phy_write(hw, port, 17, eee_afe[i].val);
680 gm_phy_write(hw, port, 16, eee_afe[i].reg | 1
u<<13);
704 static void sky2_phy_power_up(
struct sky2_hw *hw,
unsigned port)
710 reg1 &= ~phy_power[
port];
713 reg1 |= coma_mode[
port];
725 static void sky2_phy_power_down(
struct sky2_hw *hw,
unsigned port)
775 reg1 |= phy_power[
port];
781 static void sky2_set_ipg(
struct sky2_port *sky2)
795 static void sky2_enable_rx_tx(
struct sky2_port *sky2)
798 unsigned port = sky2->
port;
807 static void sky2_phy_reinit(
struct sky2_port *sky2)
810 sky2_phy_init(sky2->
hw, sky2->
port);
811 sky2_enable_rx_tx(sky2);
816 static void sky2_wol_init(
struct sky2_port *sky2)
819 unsigned port = sky2->
port;
840 sky2_phy_power_up(hw, port);
841 sky2_phy_init(hw, port);
887 static void sky2_set_tx_stfwd(
struct sky2_hw *hw,
unsigned port)
906 static void sky2_mac_init(
struct sky2_hw *hw,
unsigned port)
939 sky2_phy_power_up(hw, port);
940 sky2_phy_init(hw, port);
948 gma_read16(hw, port, i);
1032 sky2_set_tx_stfwd(hw, port);
1052 end = start + space - 1;
1061 u32 tp = space - space/4;
1070 tp = space - 2048/8;
1085 static void sky2_qset(
struct sky2_hw *hw,
u16 q)
1096 static void sky2_prefetch_init(
struct sky2_hw *hw,
u32 qaddr,
1118 static void tx_init(
struct sky2_port *sky2)
1125 netdev_reset_queue(sky2->
netdev);
1127 le = get_tx_le(sky2, &sky2->
tx_prod);
1134 static inline void sky2_put_idx(
struct sky2_hw *hw,
unsigned q,
u16 idx)
1153 static unsigned sky2_get_rx_threshold(
struct sky2_port *sky2)
1161 return (size - 8) /
sizeof(
u32);
1164 static unsigned sky2_get_rx_data_size(
struct sky2_port *sky2)
1194 le = sky2_next_rx(sky2);
1199 le = sky2_next_rx(sky2);
1206 static void sky2_rx_submit(
struct sky2_port *sky2,
1213 for (i = 0; i < skb_shinfo(re->
skb)->nr_frags; i++)
1225 if (pci_dma_mapping_error(pdev, re->
data_addr))
1230 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1234 skb_frag_size(frag),
1238 goto map_page_error;
1245 skb_frag_size(&skb_shinfo(skb)->frags[i]),
1267 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1269 skb_frag_size(&skb_shinfo(skb)->frags[i]),
1277 static void rx_set_checksum(
struct sky2_port *sky2)
1285 sky2_write32(sky2->
hw,
1294 static const uint32_t rss_init_key[10] = {
1295 0x7c3351da, 0x51c5cf4e, 0x44adbdd1, 0xe8d38d18, 0x48897c43,
1296 0xb1d60e7e, 0x6a3dd760, 0x01a2e453, 0x16f46f13, 0x1a0e7b30
1302 struct sky2_port *sky2 = netdev_priv(dev);
1314 for (i = 0; i < nkeys; i++)
1339 static void sky2_rx_stop(
struct sky2_port *sky2)
1342 unsigned rxq = rxqaddr[sky2->
port];
1348 for (i = 0; i < 0xffff; i++)
1353 netdev_warn(sky2->
netdev,
"receiver stop failed\n");
1363 static void sky2_rx_clean(
struct sky2_port *sky2)
1372 sky2_rx_unmap_skb(sky2->
hw->pdev, re);
1383 struct sky2_port *sky2 = netdev_priv(dev);
1387 if (!netif_running(dev))
1399 err = __gm_phy_read(hw, sky2->
port, data->
reg_num & 0x1f, &val);
1408 err = gm_phy_write(hw, sky2->
port, data->
reg_num & 0x1f,
1416 #define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)
1420 struct sky2_port *sky2 = netdev_priv(dev);
1446 static inline unsigned sky2_rx_pad(
const struct sky2_hw *hw)
1467 unsigned char *
start;
1475 skb_reserve(skb, start - skb->
data);
1484 skb_fill_page_desc(skb, i, page, 0,
PAGE_SIZE);
1494 static inline void sky2_rx_update(
struct sky2_port *sky2,
unsigned rxq)
1496 sky2_put_idx(sky2->
hw, rxq, sky2->
rx_put);
1499 static int sky2_alloc_rx_skbs(
struct sky2_port *sky2)
1515 dev_kfree_skb(re->
skb);
1532 static void sky2_rx_start(
struct sky2_port *sky2)
1536 unsigned rxq = rxqaddr[sky2->
port];
1543 if (pci_is_pcie(hw->
pdev))
1555 rx_set_checksum(sky2);
1562 re = sky2->rx_ring +
i;
1563 sky2_rx_submit(sky2, re);
1572 thresh = sky2_get_rx_threshold(sky2);
1581 sky2_rx_update(sky2, rxq);
1606 static int sky2_alloc_buffers(
struct sky2_port *sky2)
1634 return sky2_alloc_rx_skbs(sky2);
1639 static void sky2_free_buffers(
struct sky2_port *sky2)
1643 sky2_rx_clean(sky2);
1657 kfree(sky2->rx_ring);
1660 sky2->rx_ring =
NULL;
1663 static void sky2_hw_up(
struct sky2_port *sky2)
1666 unsigned port = sky2->
port;
1677 if (otherdev && netif_running(otherdev) &&
1681 cmd = sky2_pci_read16(hw, cap +
PCI_X_CMD);
1683 sky2_pci_write16(hw, cap +
PCI_X_CMD, cmd);
1686 sky2_mac_init(hw, port);
1689 ramsize = sky2_read8(hw,
B2_E_0) * 4;
1695 rxspace = ramsize / 2;
1697 rxspace = 8 + (2*(ramsize - 16))/3;
1699 sky2_ramset(hw, rxqaddr[port], 0, rxspace);
1700 sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
1707 sky2_qset(hw, txqaddr[port]);
1718 sky2_prefetch_init(hw, txqaddr[port], sky2->
tx_le_map,
1724 sky2_rx_start(sky2);
1728 static int sky2_setup_irq(
struct sky2_hw *hw,
const char *
name)
1741 napi_enable(&hw->
napi);
1753 struct sky2_port *sky2 = netdev_priv(dev);
1755 unsigned port = sky2->
port;
1761 err = sky2_alloc_buffers(sky2);
1766 if (hw->
ports == 1 && (err = sky2_setup_irq(hw, dev->
name)))
1772 imask = sky2_read32(hw,
B0_IMSK);
1779 imask |= portirq_msk[
port];
1780 sky2_write32(hw,
B0_IMSK, imask);
1783 netif_info(sky2, ifup, dev,
"enabling interface\n");
1788 sky2_free_buffers(sky2);
1793 static inline int tx_inuse(
const struct sky2_port *sky2)
1799 static inline int tx_avail(
const struct sky2_port *sky2)
1805 static unsigned tx_le_req(
const struct sk_buff *skb)
1809 count = (skb_shinfo(skb)->nr_frags + 1)
1812 if (skb_is_gso(skb))
1845 struct sky2_port *sky2 = netdev_priv(dev);
1856 if (
unlikely(tx_avail(sky2) < tx_le_req(skb)))
1859 len = skb_headlen(skb);
1862 if (pci_dma_mapping_error(hw->
pdev, mapping))
1867 "tx queued, slot %u, len %d\n", slot, skb->
len);
1872 le = get_tx_le(sky2, &slot);
1879 mss = skb_shinfo(skb)->gso_size;
1883 mss +=
ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1886 le = get_tx_le(sky2, &slot);
1902 le = get_tx_le(sky2, &slot);
1917 const unsigned offset = skb_transport_offset(skb);
1920 tcpsum = offset << 16;
1930 le = get_tx_le(sky2, &slot);
1944 le = get_tx_le(sky2, &slot);
1951 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1952 const skb_frag_t *frag = &skb_shinfo(skb)->frags[
i];
1954 mapping = skb_frag_dma_map(&hw->
pdev->dev, frag, 0,
1958 goto mapping_unwind;
1962 le = get_tx_le(sky2, &slot);
1973 le = get_tx_le(sky2, &slot);
1986 netif_stop_queue(dev);
1988 netdev_sent_queue(dev, skb->
len);
1989 sky2_put_idx(hw, txqaddr[sky2->
port], sky2->
tx_prod);
1997 sky2_tx_unmap(hw->
pdev, re);
2021 unsigned int bytes_compl = 0, pkts_compl = 0;
2025 for (idx = sky2->
tx_cons; idx != done;
2030 sky2_tx_unmap(sky2->
hw->pdev, re);
2034 "tx done %u\n", idx);
2037 bytes_compl += skb->
len;
2049 netdev_completed_queue(dev, pkts_compl, bytes_compl);
2051 u64_stats_update_begin(&sky2->
tx_stats.syncp);
2052 sky2->
tx_stats.packets += pkts_compl;
2053 sky2->
tx_stats.bytes += bytes_compl;
2054 u64_stats_update_end(&sky2->
tx_stats.syncp);
2057 static void sky2_tx_reset(
struct sky2_hw *hw,
unsigned port)
2081 static void sky2_hw_down(
struct sky2_port *sky2)
2084 unsigned port = sky2->
port;
2105 port == 0 && hw->
dev[1] && netif_running(hw->
dev[1])))
2119 sky2_phy_power_down(hw, port);
2122 sky2_tx_reset(hw, port);
2125 sky2_tx_complete(sky2, sky2->
tx_prod);
2129 static int sky2_close(
struct net_device *dev)
2131 struct sky2_port *sky2 = netdev_priv(dev);
2138 netif_info(sky2, ifdown, dev,
"disabling interface\n");
2140 if (hw->
ports == 1) {
2144 napi_disable(&hw->
napi);
2151 imask = sky2_read32(hw,
B0_IMSK);
2152 imask &= ~portirq_msk[sky2->
port];
2153 sky2_write32(hw,
B0_IMSK, imask);
2162 sky2_free_buffers(sky2);
2189 static void sky2_link_up(
struct sky2_port *sky2)
2192 unsigned port = sky2->
port;
2193 static const char *fc_name[] = {
2202 sky2_enable_rx_tx(sky2);
2215 "Link is up at %d Mbps, %s duplex, flow control %s\n",
2221 static void sky2_link_down(
struct sky2_port *sky2)
2224 unsigned port = sky2->
port;
2240 sky2_phy_init(hw, port);
2251 static int sky2_autoneg_done(
struct sky2_port *sky2,
u16 aux)
2254 unsigned port = sky2->
port;
2260 netdev_err(sky2->
netdev,
"remote fault\n");
2265 netdev_err(sky2->
netdev,
"speed/duplex mismatch\n");
2269 sky2->
speed = sky2_phy_speed(hw, aux);
2314 static void sky2_phy_intr(
struct sky2_hw *hw,
unsigned port)
2317 struct sky2_port *sky2 = netdev_priv(dev);
2318 u16 istatus, phystat;
2320 if (!netif_running(dev))
2331 if (sky2_autoneg_done(sky2, phystat) == 0 &&
2332 !netif_carrier_ok(dev))
2338 sky2->
speed = sky2_phy_speed(hw, phystat);
2348 sky2_link_down(sky2);
2355 static void sky2_qlink_intr(
struct sky2_hw *hw)
2362 imask = sky2_read32(hw,
B0_IMSK);
2364 sky2_write32(hw,
B0_IMSK, imask);
2378 static void sky2_tx_timeout(
struct net_device *dev)
2380 struct sky2_port *sky2 = netdev_priv(dev);
2394 static int sky2_change_mtu(
struct net_device *dev,
int new_mtu)
2396 struct sky2_port *sky2 = netdev_priv(dev);
2398 unsigned port = sky2->
port;
2413 if (!netif_running(dev)) {
2419 imask = sky2_read32(hw,
B0_IMSK);
2423 napi_disable(&hw->
napi);
2424 netif_tx_disable(dev);
2429 sky2_set_tx_stfwd(hw, port);
2434 sky2_rx_clean(sky2);
2452 err = sky2_alloc_rx_skbs(sky2);
2454 sky2_rx_start(sky2);
2456 sky2_rx_clean(sky2);
2457 sky2_write32(hw,
B0_IMSK, imask);
2460 napi_enable(&hw->
napi);
2467 netif_wake_queue(dev);
2473 static inline bool needs_copy(
const struct rx_ring_info *re,
2476 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2491 skb = netdev_alloc_skb_ip_align(sky2->
netdev, length);
2493 pci_dma_sync_single_for_cpu(sky2->
hw->pdev, re->
data_addr,
2495 skb_copy_from_linear_data(re->
skb, skb->
data, length);
2501 pci_dma_sync_single_for_device(sky2->
hw->pdev, re->
data_addr,
2503 re->
skb->vlan_tci = 0;
2504 re->
skb->rxhash = 0;
2512 static void skb_put_frags(
struct sk_buff *skb,
unsigned int hdr_space,
2513 unsigned int length)
2519 size =
min(length, hdr_space);
2524 num_frags = skb_shinfo(skb)->nr_frags;
2525 for (i = 0; i < num_frags; i++) {
2530 __skb_frag_unref(frag);
2531 --skb_shinfo(skb)->nr_frags;
2535 skb_frag_size_set(frag, size);
2547 unsigned int length)
2557 if (sky2_rx_map_skb(sky2->
hw->pdev, &nre, hdr_space))
2561 sky2_rx_unmap_skb(sky2->
hw->pdev, re);
2565 if (skb_shinfo(skb)->nr_frags)
2566 skb_put_frags(skb, hdr_space, length);
2572 dev_kfree_skb(nre.skb);
2584 struct sky2_port *sky2 = netdev_priv(dev);
2590 "rx slot %u status 0x%x len %d\n",
2591 sky2->
rx_next, status, length);
2615 if (length != count)
2619 if (needs_copy(re, length))
2620 skb = receive_copy(sky2, re, length);
2622 skb = receive_new(sky2, re, length);
2627 sky2_rx_submit(sky2, re);
2632 ++dev->
stats.rx_errors;
2636 "rx error, status 0x%x length %d\n", status, length);
2642 static inline void sky2_tx_done(
struct net_device *dev,
u16 last)
2644 struct sky2_port *sky2 = netdev_priv(dev);
2646 if (netif_running(dev)) {
2647 sky2_tx_complete(sky2, last);
2651 netif_wake_queue(dev);
2655 static inline void sky2_skb_rx(
const struct sky2_port *sky2,
2664 static inline void sky2_rx_done(
struct sky2_hw *hw,
unsigned port,
2668 struct sky2_port *sky2 = netdev_priv(dev);
2673 u64_stats_update_begin(&sky2->
rx_stats.syncp);
2676 u64_stats_update_end(&sky2->
rx_stats.syncp);
2679 sky2_rx_update(netdev_priv(dev), rxqaddr[port]);
2682 static void sky2_rx_checksum(
struct sky2_port *sky2,
u32 status)
2697 dev_notice(&sky2->
hw->pdev->dev,
2698 "%s: receive checksum problem (status = %#x)\n",
2699 sky2->
netdev->name, status);
2711 static void sky2_rx_tag(
struct sky2_port *sky2,
u16 length)
2715 skb = sky2->rx_ring[sky2->
rx_next].skb;
2719 static void sky2_rx_hash(
struct sky2_port *sky2,
u32 status)
2723 skb = sky2->rx_ring[sky2->
rx_next].skb;
2728 static int sky2_status_intr(
struct sky2_hw *hw,
int to_do,
u16 idx)
2732 unsigned int total_packets[2] = { 0 };
2752 sky2 = netdev_priv(dev);
2757 switch (opcode & ~HW_OWNER) {
2759 total_packets[
port]++;
2762 skb = sky2_receive(dev, length, status);
2777 sky2_skb_rx(sky2, skb);
2780 if (++work_done >= to_do)
2785 sky2_rx_tag(sky2, length);
2789 sky2_rx_tag(sky2, length);
2793 sky2_rx_checksum(sky2, status);
2797 sky2_rx_hash(sky2, status);
2802 sky2_tx_done(hw->
dev[0], status & 0xfff);
2804 sky2_tx_done(hw->
dev[1],
2805 ((status >> 24) & 0xff)
2806 | (
u16)(length & 0xf) << 8);
2811 pr_warning(
"unknown status opcode 0x%x\n", opcode);
2813 }
while (hw->
st_idx != idx);
2819 sky2_rx_done(hw, 0, total_packets[0], total_bytes[0]);
2820 sky2_rx_done(hw, 1, total_packets[1], total_bytes[1]);
2825 static void sky2_hw_error(
struct sky2_hw *hw,
unsigned port,
u32 status)
2830 netdev_info(dev,
"hw error interrupt status 0x%x\n", status);
2834 netdev_err(dev,
"ram data read parity error\n");
2841 netdev_err(dev,
"ram data write parity error\n");
2848 netdev_err(dev,
"MAC parity error\n");
2854 netdev_err(dev,
"RX parity error\n");
2860 netdev_err(dev,
"TCP segmentation error\n");
2865 static void sky2_hw_intr(
struct sky2_hw *hw)
2882 dev_err(&pdev->
dev,
"PCI hardware error (0x%x)\n",
2899 dev_err(&pdev->
dev,
"PCI Express error (0x%x)\n", err);
2906 sky2_hw_error(hw, 0, status);
2908 if (status & Y2_HWE_L1_MASK)
2909 sky2_hw_error(hw, 1, status);
2912 static void sky2_mac_intr(
struct sky2_hw *hw,
unsigned port)
2915 struct sky2_port *sky2 = netdev_priv(dev);
2918 netif_info(sky2,
intr, dev,
"mac interrupt status 0x%x\n", status);
2927 ++dev->
stats.rx_fifo_errors;
2932 ++dev->
stats.tx_fifo_errors;
2938 static void sky2_le_error(
struct sky2_hw *hw,
unsigned port,
u16 q)
2943 dev_err(&hw->
pdev->dev,
"%s: descriptor error q=%#x get=%u put=%u\n",
2944 dev->
name, (
unsigned) q, (
unsigned) idx,
2950 static int sky2_rx_hung(
struct net_device *dev)
2952 struct sky2_port *sky2 = netdev_priv(dev);
2954 unsigned port = sky2->
port;
2955 unsigned rxq = rxqaddr[
port];
2963 ((mac_rp == sky2->
check.mac_rp &&
2964 mac_lev != 0 && mac_lev >= sky2->
check.mac_lev) ||
2966 (fifo_rp == sky2->
check.fifo_rp &&
2967 fifo_lev != 0 && fifo_lev >= sky2->
check.fifo_lev))) {
2969 "hung mac %d:%d fifo %d (%d:%d)\n",
2970 mac_lev, mac_rp, fifo_lev,
2975 sky2->
check.mac_rp = mac_rp;
2976 sky2->
check.mac_lev = mac_lev;
2977 sky2->
check.fifo_rp = fifo_rp;
2978 sky2->
check.fifo_lev = fifo_lev;
2983 static void sky2_watchdog(
unsigned long arg)
2988 if (sky2_read32(hw,
B0_ISRC)) {
2989 napi_schedule(&hw->
napi);
2993 for (i = 0; i < hw->
ports; i++) {
2995 if (!netif_running(dev))
3001 sky2_rx_hung(dev)) {
3002 netdev_info(dev,
"receiver hang detected\n");
3016 static void sky2_err_intr(
struct sky2_hw *hw,
u32 status)
3019 dev_warn(&hw->
pdev->dev,
"error interrupt status=%#x\n", status);
3025 sky2_mac_intr(hw, 0);
3028 sky2_mac_intr(hw, 1);
3031 sky2_le_error(hw, 0,
Q_R1);
3034 sky2_le_error(hw, 1,
Q_R2);
3037 sky2_le_error(hw, 0,
Q_XA1);
3040 sky2_le_error(hw, 1,
Q_XA2);
3051 sky2_err_intr(hw, status);
3054 sky2_phy_intr(hw, 0);
3057 sky2_phy_intr(hw, 1);
3060 sky2_qlink_intr(hw);
3063 work_done += sky2_status_intr(hw, work_limit - work_done, idx);
3065 if (work_done >= work_limit)
3083 if (status == 0 || status == ~0) {
3090 napi_schedule(&hw->
napi);
3095 #ifdef CONFIG_NET_POLL_CONTROLLER
3096 static void sky2_netpoll(
struct net_device *dev)
3098 struct sky2_port *sky2 = netdev_priv(dev);
3100 napi_schedule(&sky2->
hw->napi);
3105 static u32 sky2_mhz(
const struct sky2_hw *hw)
3132 static inline u32 sky2_us2clk(
const struct sky2_hw *hw,
u32 us)
3134 return sky2_mhz(hw) * us;
3139 return clk / sky2_mhz(hw);
3183 dev_err(&hw->
pdev->dev,
"unsupported revision Yukon-EC rev A1\n");
3229 dev_err(&hw->
pdev->dev,
"unsupported chip type 0x%x\n",
3245 if (sky2_read8(hw,
B2_E_0))
3251 static void sky2_reset(
struct sky2_hw *hw)
3262 status = sky2_read16(hw,
HCU_CCSR);
3271 sky2_write16(hw,
HCU_CCSR, status);
3291 if (pci_is_pcie(pdev)) {
3297 dev_info(&pdev->
dev,
"ignoring stuck error report bit\n");
3305 for (i = 0; i < hw->
ports; i++) {
3387 for (i = 0; i < hw->
ports; i++)
3391 for (i = 0; i < hw->
ports; i++) {
3410 for (i = 0; i < hw->
ports; i++)
3411 sky2_gmac_reset(hw, i);
3450 static void sky2_detach(
struct net_device *dev)
3452 if (netif_running(dev)) {
3455 netif_tx_unlock(dev);
3461 static int sky2_reattach(
struct net_device *dev)
3465 if (netif_running(dev)) {
3466 err = sky2_open(dev);
3468 netdev_info(dev,
"could not restart %d\n", err);
3472 sky2_set_multicast(dev);
3479 static void sky2_all_down(
struct sky2_hw *hw)
3488 napi_disable(&hw->
napi);
3491 for (i = 0; i < hw->
ports; i++) {
3493 struct sky2_port *sky2 = netdev_priv(dev);
3495 if (!netif_running(dev))
3499 netif_tx_disable(dev);
3504 static void sky2_all_up(
struct sky2_hw *hw)
3509 for (i = 0; i < hw->
ports; i++) {
3511 struct sky2_port *sky2 = netdev_priv(dev);
3513 if (!netif_running(dev))
3517 sky2_set_multicast(dev);
3518 imask |= portirq_msk[
i];
3519 netif_wake_queue(dev);
3523 sky2_write32(hw,
B0_IMSK, imask);
3526 napi_enable(&hw->
napi);
3543 static inline u8 sky2_wol_supported(
const struct sky2_hw *hw)
3550 const struct sky2_port *sky2 = netdev_priv(dev);
3558 struct sky2_port *sky2 = netdev_priv(dev);
3560 bool enable_wakeup =
false;
3563 if ((wol->
wolopts & ~sky2_wol_supported(sky2->
hw)) ||
3564 !device_can_wakeup(&hw->
pdev->dev))
3569 for (i = 0; i < hw->
ports; i++) {
3571 struct sky2_port *sky2 = netdev_priv(dev);
3574 enable_wakeup =
true;
3581 static u32 sky2_supported_modes(
const struct sky2_hw *hw)
3583 if (sky2_is_copper(hw)) {
3600 struct sky2_port *sky2 = netdev_priv(dev);
3604 ecmd->
supported = sky2_supported_modes(hw);
3606 if (sky2_is_copper(hw)) {
3608 ethtool_cmd_speed_set(ecmd, sky2->
speed);
3625 struct sky2_port *sky2 = netdev_priv(dev);
3627 u32 supported = sky2_supported_modes(hw);
3633 if (sky2_is_copper(hw))
3647 u32 speed = ethtool_cmd_speed(ecmd);
3679 if ((setting & supported) == 0)
3682 sky2->
speed = speed;
3687 if (netif_running(dev)) {
3688 sky2_phy_reinit(sky2);
3689 sky2_set_multicast(dev);
3695 static void sky2_get_drvinfo(
struct net_device *dev,
3698 struct sky2_port *sky2 = netdev_priv(dev);
3706 static const struct sky2_stat {
3752 struct sky2_port *sky2 = netdev_priv(netdev);
3756 static int sky2_nway_reset(
struct net_device *dev)
3758 struct sky2_port *sky2 = netdev_priv(dev);
3763 sky2_phy_reinit(sky2);
3764 sky2_set_multicast(dev);
3769 static void sky2_phy_stats(
struct sky2_port *sky2,
u64 * data,
unsigned count)
3772 unsigned port = sky2->
port;
3778 for (i = 2; i <
count; i++)
3779 data[i] = get_stats32(hw, port,
sky2_stats[i].offset);
3784 struct sky2_port *sky2 = netdev_priv(netdev);
3788 static int sky2_get_sset_count(
struct net_device *dev,
int sset)
3798 static void sky2_get_ethtool_stats(
struct net_device *dev,
3801 struct sky2_port *sky2 = netdev_priv(dev);
3806 static void sky2_get_strings(
struct net_device *dev,
u32 stringset,
u8 * data)
3810 switch (stringset) {
3819 static int sky2_set_mac_address(
struct net_device *dev,
void *
p)
3821 struct sky2_port *sky2 = netdev_priv(dev);
3823 unsigned port = sky2->
port;
3826 if (!is_valid_ether_addr(addr->
sa_data))
3844 static inline void sky2_add_filter(
u8 filter[8],
const u8 *addr)
3849 filter[bit >> 3] |= 1 << (bit & 7);
3852 static void sky2_set_multicast(
struct net_device *dev)
3854 struct sky2_port *sky2 = netdev_priv(dev);
3856 unsigned port = sky2->
port;
3861 static const u8 pause_mc_addr[
ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
3864 memset(filter, 0,
sizeof(filter));
3872 memset(filter, 0xff,
sizeof(filter));
3879 sky2_add_filter(filter, pause_mc_addr);
3882 sky2_add_filter(filter, ha->addr);
3886 (
u16) filter[0] | ((
u16) filter[1] << 8));
3888 (
u16) filter[2] | ((
u16) filter[3] << 8));
3890 (
u16) filter[4] | ((
u16) filter[5] << 8));
3892 (
u16) filter[6] | ((
u16) filter[7] << 8));
3900 struct sky2_port *sky2 = netdev_priv(dev);
3902 unsigned port = sky2->
port;
3904 u64 _bytes, _packets;
3907 start = u64_stats_fetch_begin_bh(&sky2->
rx_stats.syncp);
3910 }
while (u64_stats_fetch_retry_bh(&sky2->
rx_stats.syncp, start));
3912 stats->rx_packets = _packets;
3913 stats->rx_bytes = _bytes;
3916 start = u64_stats_fetch_begin_bh(&sky2->
tx_stats.syncp);
3919 }
while (u64_stats_fetch_retry_bh(&sky2->
tx_stats.syncp, start));
3921 stats->tx_packets = _packets;
3922 stats->tx_bytes = _bytes;
3927 stats->collisions = get_stats32(hw, port,
GM_TXF_COL);
3931 stats->rx_frame_errors = get_stats32(hw, port,
GM_RXF_SHT)
3935 stats->rx_dropped = dev->stats.rx_dropped;
3936 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
3937 stats->tx_fifo_errors = dev->stats.tx_fifo_errors;
3948 unsigned port = sky2->
port;
4002 static int sky2_set_phys_id(
struct net_device *dev,
4005 struct sky2_port *sky2 = netdev_priv(dev);
4024 static void sky2_get_pauseparam(
struct net_device *dev,
4027 struct sky2_port *sky2 = netdev_priv(dev);
4047 static int sky2_set_pauseparam(
struct net_device *dev,
4050 struct sky2_port *sky2 = netdev_priv(dev);
4059 if (netif_running(dev))
4060 sky2_phy_reinit(sky2);
4065 static int sky2_get_coalesce(
struct net_device *dev,
4068 struct sky2_port *sky2 = netdev_priv(dev);
4100 static int sky2_set_coalesce(
struct net_device *dev,
4103 struct sky2_port *sky2 = netdev_priv(dev);
4105 const u32 tmax = sky2_clk2us(hw, 0x0ffffff);
4153 static unsigned long roundup_ring_size(
unsigned long pending)
4158 static void sky2_get_ringparam(
struct net_device *dev,
4161 struct sky2_port *sky2 = netdev_priv(dev);
4170 static int sky2_set_ringparam(
struct net_device *dev,
4173 struct sky2_port *sky2 = netdev_priv(dev);
4187 return sky2_reattach(dev);
4190 static int sky2_get_regs_len(
struct net_device *dev)
4195 static int sky2_reg_access_ok(
struct sky2_hw *hw,
unsigned int b)
4214 return hw->
ports > 1;
4246 const struct sky2_port *sky2 = netdev_priv(dev);
4252 for (b = 0; b < 128; b++) {
4256 else if (sky2_reg_access_ok(sky2->
hw, b))
4266 static int sky2_get_eeprom_len(
struct net_device *dev)
4268 struct sky2_port *sky2 = netdev_priv(dev);
4276 static int sky2_vpd_wait(
const struct sky2_hw *hw,
int cap,
u16 busy)
4278 unsigned long start =
jiffies;
4292 static int sky2_vpd_read(
struct sky2_hw *hw,
int cap,
void *data,
4293 u16 offset,
size_t length)
4297 while (length > 0) {
4301 rc = sky2_vpd_wait(hw, cap, 0);
4307 memcpy(data, &val,
min(
sizeof(val), length));
4308 offset +=
sizeof(
u32);
4309 data +=
sizeof(
u32);
4310 length -=
sizeof(
u32);
4316 static int sky2_vpd_write(
struct sky2_hw *hw,
int cap,
const void *data,
4317 u16 offset,
unsigned int length)
4322 for (i = 0; i <
length; i +=
sizeof(
u32)) {
4323 u32 val = *(
u32 *)(data + i);
4326 sky2_pci_write32(hw, cap +
PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F);
4328 rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F);
4338 struct sky2_port *sky2 = netdev_priv(dev);
4346 return sky2_vpd_read(sky2->
hw, cap, data, eeprom->
offset, eeprom->
len);
4352 struct sky2_port *sky2 = netdev_priv(dev);
4362 if ((eeprom->
offset & 3) || (eeprom->
len & 3))
4365 return sky2_vpd_write(sky2->
hw, cap, data, eeprom->
offset, eeprom->
len);
4371 const struct sky2_port *sky2 = netdev_priv(dev);
4378 netdev_info(dev,
"checksum offload not possible with jumbo frames\n");
4383 if ( (features & NETIF_F_RXHASH) &&
4386 netdev_info(dev,
"receive hashing forces receive checksum\n");
4395 struct sky2_port *sky2 = netdev_priv(dev);
4400 sky2_write32(sky2->
hw,
4402 (features & NETIF_F_RXCSUM)
4406 if (changed & NETIF_F_RXHASH)
4407 rx_set_rss(dev, features);
4410 sky2_vlan_mode(dev, features);
4415 static const struct ethtool_ops sky2_ethtool_ops = {
4416 .get_settings = sky2_get_settings,
4417 .set_settings = sky2_set_settings,
4418 .get_drvinfo = sky2_get_drvinfo,
4419 .get_wol = sky2_get_wol,
4420 .set_wol = sky2_set_wol,
4421 .get_msglevel = sky2_get_msglevel,
4422 .set_msglevel = sky2_set_msglevel,
4423 .nway_reset = sky2_nway_reset,
4424 .get_regs_len = sky2_get_regs_len,
4425 .get_regs = sky2_get_regs,
4427 .get_eeprom_len = sky2_get_eeprom_len,
4428 .get_eeprom = sky2_get_eeprom,
4429 .set_eeprom = sky2_set_eeprom,
4430 .get_strings = sky2_get_strings,
4431 .get_coalesce = sky2_get_coalesce,
4432 .set_coalesce = sky2_set_coalesce,
4433 .get_ringparam = sky2_get_ringparam,
4434 .set_ringparam = sky2_set_ringparam,
4435 .get_pauseparam = sky2_get_pauseparam,
4436 .set_pauseparam = sky2_set_pauseparam,
4437 .set_phys_id = sky2_set_phys_id,
4438 .get_sset_count = sky2_get_sset_count,
4439 .get_ethtool_stats = sky2_get_ethtool_stats,
4442 #ifdef CONFIG_SKY2_DEBUG
4444 static struct dentry *sky2_debug;
4450 #define VPD_SIZE 128
4451 #define VPD_MAGIC 0x82
4453 static const struct vpd_tag {
4457 {
"PN",
"Part Number" },
4458 {
"EC",
"Engineering Level" },
4459 {
"MN",
"Manufacturer" },
4460 {
"SN",
"Serial Number" },
4461 {
"YA",
"Asset Tag" },
4462 {
"VL",
"First Error Log Message" },
4463 {
"VF",
"Second Error Log Message" },
4464 {
"VB",
"Boot Agent ROM Configuration" },
4465 {
"VE",
"EFI UNDI Configuration" },
4487 seq_puts(seq,
"VPD read failed\n");
4491 if (buf[0] != VPD_MAGIC) {
4492 seq_printf(seq,
"VPD tag mismatch: %#x\n", buf[0]);
4496 if (len == 0 || len > vpd_size - 4) {
4497 seq_printf(seq,
"Invalid id length: %d\n", len);
4504 while (offs < vpd_size - 4) {
4507 if (!
memcmp(
"RW", buf + offs, 2))
4509 len = buf[offs + 2];
4510 if (offs + len + 3 >= vpd_size)
4514 if (!
memcmp(vpd_tags[i].
tag, buf + offs, 2)) {
4516 vpd_tags[i].
label, len, buf + offs + 3);
4526 static int sky2_debug_show(
struct seq_file *seq,
void *
v)
4529 const struct sky2_port *sky2 = netdev_priv(dev);
4531 unsigned port = sky2->
port;
4535 sky2_show_vpd(seq, hw);
4537 seq_printf(seq,
"\nIRQ src=%x mask=%x control=%x\n",
4542 if (!netif_running(dev)) {
4547 napi_disable(&hw->
napi);
4552 seq_puts(seq,
"Status ring (empty)\n");
4564 seq_printf(seq,
"Tx ring pending=%u...%u report=%d done=%d\n",
4571 for (idx = sky2->
tx_next; idx != sky2->
tx_prod && idx < sky2->tx_ring_size;
4580 switch (le->
opcode & ~HW_OWNER) {
4613 seq_printf(seq,
"\nRx ring hw get=%d put=%d last=%d\n",
4619 napi_enable(&hw->
napi);
4630 .open = sky2_debug_open,
4644 struct sky2_port *sky2 = netdev_priv(dev);
4646 if (dev->
netdev_ops->ndo_open != sky2_open || !sky2_debug)
4651 if (sky2->debugfs) {
4653 sky2_debug, dev->
name);
4658 if (sky2->debugfs) {
4661 sky2->debugfs =
NULL;
4669 if (IS_ERR(sky2->debugfs))
4670 sky2->debugfs =
NULL;
4686 if (!ent || IS_ERR(ent))
4703 #define sky2_debug_init()
4704 #define sky2_debug_cleanup()
4711 .ndo_open = sky2_open,
4712 .ndo_stop = sky2_close,
4713 .ndo_start_xmit = sky2_xmit_frame,
4714 .ndo_do_ioctl = sky2_ioctl,
4716 .ndo_set_mac_address = sky2_set_mac_address,
4717 .ndo_set_rx_mode = sky2_set_multicast,
4718 .ndo_change_mtu = sky2_change_mtu,
4719 .ndo_fix_features = sky2_fix_features,
4720 .ndo_set_features = sky2_set_features,
4721 .ndo_tx_timeout = sky2_tx_timeout,
4722 .ndo_get_stats64 = sky2_get_stats,
4723 #ifdef CONFIG_NET_POLL_CONTROLLER
4724 .ndo_poll_controller = sky2_netpoll,
4728 .ndo_open = sky2_open,
4729 .ndo_stop = sky2_close,
4730 .ndo_start_xmit = sky2_xmit_frame,
4731 .ndo_do_ioctl = sky2_ioctl,
4733 .ndo_set_mac_address = sky2_set_mac_address,
4734 .ndo_set_rx_mode = sky2_set_multicast,
4735 .ndo_change_mtu = sky2_change_mtu,
4736 .ndo_fix_features = sky2_fix_features,
4737 .ndo_set_features = sky2_set_features,
4738 .ndo_tx_timeout = sky2_tx_timeout,
4739 .ndo_get_stats64 = sky2_get_stats,
4749 struct net_device *dev = alloc_etherdev(
sizeof(*sky2));
4760 sky2 = netdev_priv(dev);
4812 const struct sky2_port *sky2 = netdev_priv(dev);
4859 dev_info(&pdev->
dev,
"No interrupt generated using MSI, "
4860 "switching to INTx mode.\n");
4875 static const char *sky2_name(
u8 chipid,
char *buf,
int sz)
4877 const char *name[] = {
4895 snprintf(buf, sz,
"(chip %#x)", chipid);
4904 int err, using_dac = 0, wol_default;
4910 dev_err(&pdev->
dev,
"cannot enable PCI device\n");
4921 dev_err(&pdev->
dev,
"PCI read config failed\n");
4926 dev_err(&pdev->
dev,
"PCI configuration read error\n");
4933 dev_err(&pdev->
dev,
"cannot obtain PCI resources\n");
4934 goto err_out_disable;
4942 err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(64));
4944 dev_err(&pdev->
dev,
"unable to obtain 64 bit DMA "
4945 "for consistent allocations\n");
4946 goto err_out_free_regions;
4951 dev_err(&pdev->
dev,
"no usable DMA configuration\n");
4952 goto err_out_free_regions;
4964 dev_err(&pdev->
dev,
"PCI write config failed\n");
4965 goto err_out_free_regions;
4969 wol_default = device_may_wakeup(&pdev->
dev) ?
WAKE_MAGIC : 0;
4976 dev_err(&pdev->
dev,
"cannot allocate hardware struct\n");
4977 goto err_out_free_regions;
4985 dev_err(&pdev->
dev,
"cannot map device registers\n");
4986 goto err_out_free_hw;
4989 err = sky2_init(hw);
4991 goto err_out_iounmap;
5002 dev_info(&pdev->
dev,
"Yukon-2 %s chip revision %d\n",
5007 dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
5010 goto err_out_free_pci;
5013 if (!disable_msi && pci_enable_msi(pdev) == 0) {
5014 err = sky2_test_msi(hw);
5018 goto err_out_free_netdev;
5023 dev_err(&pdev->
dev,
"cannot register net device\n");
5024 goto err_out_free_netdev;
5031 sky2_show_addr(dev);
5033 if (hw->
ports > 1) {
5034 dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default);
5037 goto err_out_unregister;
5042 dev_err(&pdev->
dev,
"cannot register second net device\n");
5043 goto err_out_free_dev1;
5046 err = sky2_setup_irq(hw, hw->
irq_name);
5048 goto err_out_unregister_dev1;
5050 sky2_show_addr(dev1);
5056 pci_set_drvdata(pdev, hw);
5061 err_out_unregister_dev1:
5069 err_out_free_netdev:
5080 err_out_free_regions:
5085 pci_set_drvdata(pdev,
NULL);
5091 struct sky2_hw *hw = pci_get_drvdata(pdev);
5100 for (i = hw->
ports-1; i >= 0; --i)
5111 if (hw->
ports > 1) {
5112 napi_disable(&hw->
napi);
5123 for (i = hw->
ports-1; i >= 0; --i)
5129 pci_set_drvdata(pdev,
NULL);
5132 static int sky2_suspend(
struct device *dev)
5135 struct sky2_hw *hw = pci_get_drvdata(pdev);
5147 for (i = 0; i < hw->
ports; i++) {
5149 struct sky2_port *sky2 = netdev_priv(dev);
5152 sky2_wol_init(sky2);
5161 #ifdef CONFIG_PM_SLEEP
5162 static int sky2_resume(
struct device *dev)
5165 struct sky2_hw *hw = pci_get_drvdata(pdev);
5174 dev_err(&pdev->
dev,
"PCI write config failed\n");
5186 dev_err(&pdev->
dev,
"resume failed (%d)\n", err);
5192 #define SKY2_PM_OPS (&sky2_pm_ops)
5196 #define SKY2_PM_OPS NULL
5199 static void sky2_shutdown(
struct pci_dev *pdev)
5201 sky2_suspend(&pdev->
dev);
5208 .id_table = sky2_id_table,
5209 .probe = sky2_probe,
5211 .shutdown = sky2_shutdown,
5215 static int __init sky2_init_module(
void)
5220 return pci_register_driver(&sky2_driver);
5223 static void __exit sky2_cleanup_module(
void)