23 #include <linux/module.h>
29 #define DRV_VERSION "1.01"
32 #define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802
33 #define PCH_GBE_MAR_ENTRIES 16
34 #define PCH_GBE_SHORT_PKT 64
35 #define DSC_INIT16 0xC000
36 #define PCH_GBE_DMA_ALIGN 0
37 #define PCH_GBE_DMA_PADDING 2
38 #define PCH_GBE_WATCHDOG_PERIOD (5 * HZ)
39 #define PCH_GBE_COPYBREAK_DEFAULT 256
40 #define PCH_GBE_PCI_BAR 1
41 #define PCH_GBE_RESERVE_MEMORY 0x200000
44 #define PCI_VENDOR_ID_ROHM 0x10db
45 #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
48 #define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
50 #define PCH_GBE_TX_WEIGHT 64
51 #define PCH_GBE_RX_WEIGHT 64
52 #define PCH_GBE_RX_BUFFER_WRITE 16
55 #define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
57 #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
58 PCH_GBE_CHIP_TYPE_INTERNAL | \
59 PCH_GBE_RGMII_MODE_RGMII \
63 #define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
64 #define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
65 #define PCH_GBE_FRAME_SIZE_2048 2048
66 #define PCH_GBE_FRAME_SIZE_4096 4096
67 #define PCH_GBE_FRAME_SIZE_8192 8192
69 #define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
70 #define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
71 #define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
72 #define PCH_GBE_DESC_UNUSED(R) \
73 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
74 (R)->next_to_clean - (R)->next_to_use - 1)
77 #define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
78 #define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
79 #define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
80 #define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
91 #define PCH_GBE_INT_ENABLE_MASK ( \
92 PCH_GBE_INT_RX_DMA_CMPLT | \
93 PCH_GBE_INT_RX_DSC_EMP | \
94 PCH_GBE_INT_RX_FIFO_ERR | \
95 PCH_GBE_INT_WOL_DET | \
96 PCH_GBE_INT_TX_CMPLT \
99 #define PCH_GBE_INT_DISABLE_ALL 0
101 #ifdef CONFIG_PCH_PTP
104 #define MASTER_MODE (1<<0)
105 #define SLAVE_MODE (0)
106 #define V2_MODE (1<<31)
107 #define CAP_MODE0 (0)
108 #define CAP_MODE2 (1<<17)
111 #define TX_SNAPSHOT_LOCKED (1<<0)
112 #define RX_SNAPSHOT_LOCKED (1<<1)
114 #define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
115 #define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
123 static void pch_gbe_set_multi(
struct net_device *netdev);
125 #ifdef CONFIG_PCH_PTP
148 memcpy(&lo, &hi[1],
sizeof(lo));
150 return (uid_hi == *hi &&
179 seq = (hi >> 16) & 0xffff;
186 shhwtstamps = skb_hwtstamps(skb);
187 memset(shhwtstamps, 0,
sizeof(*shhwtstamps));
188 shhwtstamps->
hwtstamp = ns_to_ktime(ns);
202 shtx = skb_shinfo(skb);
214 for (cnt = 0; cnt < 100; cnt++) {
227 memset(&shhwtstamps, 0,
sizeof(shhwtstamps));
228 shhwtstamps.
hwtstamp = ns_to_ktime(ns);
250 switch (
cfg.tx_type) {
261 switch (
cfg.rx_filter) {
276 strcpy(station, PTP_L4_MULTICAST_SA);
282 strcpy(station, PTP_L2_MULTICAST_SA);
314 hw->
mac.addr[0] = (
u8)(adr1a & 0xFF);
315 hw->
mac.addr[1] = (
u8)((adr1a >> 8) & 0xFF);
316 hw->
mac.addr[2] = (
u8)((adr1a >> 16) & 0xFF);
317 hw->
mac.addr[3] = (
u8)((adr1a >> 24) & 0xFF);
318 hw->
mac.addr[4] = (
u8)(adr1b & 0xFF);
319 hw->
mac.addr[5] = (
u8)((adr1b >> 8) & 0xFF);
330 static void pch_gbe_wait_clr_bit(
void *
reg,
u32 bit)
335 while ((
ioread32(reg) & bit) && --tmp)
338 pr_err(
"Error: busy bit is not cleared\n");
349 u32 mar_low, mar_high, adrmask;
357 mar_high = ((
u32) addr[0] | ((
u32) addr[1] << 8) |
358 ((
u32) addr[2] << 16) | ((
u32) addr[3] << 24));
359 mar_low = ((
u32) addr[4] | ((
u32) addr[5] << 8));
362 iowrite32((adrmask | (0x0001 << index)), &hw->
reg->ADDR_MASK);
369 iowrite32((adrmask & ~(0x0001 << index)), &hw->
reg->ADDR_MASK);
378 static void pch_gbe_mac_reset_hw(
struct pch_gbe_hw *hw)
383 #ifdef PCH_GBE_MAC_IFOP_RGMII
388 pch_gbe_mac_mar_set(hw, hw->
mac.addr, 0);
392 static void pch_gbe_disable_mac_rx(
struct pch_gbe_hw *hw)
400 static void pch_gbe_enable_mac_rx(
struct pch_gbe_hw *hw)
413 static void pch_gbe_mac_init_rx_addrs(
struct pch_gbe_hw *hw,
u16 mar_count)
418 pch_gbe_mac_mar_set(hw, hw->
mac.addr, 0);
421 for (i = 1; i < mar_count; i++) {
439 static void pch_gbe_mac_mc_addr_list_update(
struct pch_gbe_hw *hw,
440 u8 *mc_addr_list,
u32 mc_addr_count,
441 u32 mar_used_count,
u32 mar_total_num)
449 for (i = mar_used_count; i < mar_total_num; i++) {
451 pch_gbe_mac_mar_set(hw, mc_addr_list, i);
458 &hw->
reg->ADDR_MASK);
502 pr_err(
"Flow control param set incorrectly\n");
508 pr_debug(
"RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
518 static void pch_gbe_mac_set_wol_event(
struct pch_gbe_hw *hw,
u32 wu_evt)
522 pr_debug(
"wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
561 for (i = 100;
i; --
i) {
567 pr_err(
"pch-gbe.miim won't go Ready\n");
568 spin_unlock_irqrestore(&hw->
miim_lock, flags);
573 dir | data), &hw->
reg->MIIM);
574 for (i = 0; i < 100; i++) {
580 spin_unlock_irqrestore(&hw->
miim_lock, flags);
582 pr_debug(
"PHY %s: reg=%d, data=0x%04X\n",
585 return (
u16) data_out;
592 static void pch_gbe_mac_set_pause_packet(
struct pch_gbe_hw *hw)
594 unsigned long tmp2, tmp3;
597 tmp2 = hw->
mac.addr[1];
598 tmp2 = (tmp2 << 8) | hw->
mac.addr[0];
601 tmp3 = hw->
mac.addr[5];
602 tmp3 = (tmp3 << 8) | hw->
mac.addr[4];
603 tmp3 = (tmp3 << 8) | hw->
mac.addr[3];
604 tmp3 = (tmp3 << 8) | hw->
mac.addr[2];
615 pr_debug(
"PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
670 adapter->
mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 :
addr;
671 bmcr = pch_gbe_mdio_read(netdev, adapter->
mii.phy_id,
MII_BMCR);
672 stat = pch_gbe_mdio_read(netdev, adapter->
mii.phy_id,
MII_BMSR);
673 stat = pch_gbe_mdio_read(netdev, adapter->
mii.phy_id,
MII_BMSR);
674 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
677 adapter->
hw.phy.addr = adapter->
mii.phy_id;
683 if (addr != adapter->
mii.phy_id) {
684 pch_gbe_mdio_write(netdev, addr,
MII_BMCR,
687 bmcr = pch_gbe_mdio_read(netdev, addr,
MII_BMCR);
688 pch_gbe_mdio_write(netdev, addr,
MII_BMCR,
694 adapter->
mii.phy_id_mask = 0x1F;
695 adapter->
mii.reg_num_mask = 0x1F;
697 adapter->
mii.mdio_read = pch_gbe_mdio_read;
698 adapter->
mii.mdio_write = pch_gbe_mdio_write;
712 static int pch_gbe_mdio_read(
struct net_device *netdev,
int addr,
int reg)
728 static void pch_gbe_mdio_write(
struct net_device *netdev,
729 int addr,
int reg,
int data)
767 pch_gbe_mac_reset_hw(&adapter->
hw);
769 pch_gbe_set_multi(adapter->
netdev);
773 pr_err(
"Hardware Error\n");
853 u32 tdba, tdlen, dctrl;
855 pr_debug(
"dma addr = 0x%08llx size = 0x%08x\n",
856 (
unsigned long long)adapter->
tx_ring->dma,
861 tdlen = adapter->
tx_ring->size - 0x10;
901 u32 rdba, rdlen, rxdma;
903 pr_debug(
"dma adr = 0x%08llx size = 0x%08x\n",
904 (
unsigned long long)adapter->
rx_ring->dma,
909 pch_gbe_disable_mac_rx(hw);
916 pr_debug(
"MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
923 rdlen = adapter->
rx_ring->size - 0x10;
934 static void pch_gbe_unmap_and_free_tx_resource(
937 if (buffer_info->
mapped) {
940 buffer_info->
mapped =
false;
942 if (buffer_info->
skb) {
953 static void pch_gbe_unmap_and_free_rx_resource(
957 if (buffer_info->
mapped) {
960 buffer_info->
mapped =
false;
962 if (buffer_info->
skb) {
982 for (i = 0; i < tx_ring->
count; i++) {
984 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
986 pr_debug(
"call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
1014 for (i = 0; i < rx_ring->
count; i++) {
1016 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
1018 pr_debug(
"call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
1034 unsigned long rgmii = 0;
1037 #ifdef PCH_GBE_MAC_IFOP_RGMII
1063 unsigned long mode = 0;
1090 static void pch_gbe_watchdog(
unsigned long data)
1096 pr_debug(
"right now = %ld\n", jiffies);
1099 if ((
mii_link_ok(&adapter->
mii)) && (!netif_carrier_ok(netdev))) {
1104 pr_err(
"ethtool get setting Error\n");
1110 hw->
mac.link_speed = ethtool_cmd_speed(&cmd);
1113 pch_gbe_set_rgmii_ctrl(adapter, hw->
mac.link_speed,
1114 hw->
mac.link_duplex);
1116 pch_gbe_set_mode(adapter, hw->
mac.link_speed,
1117 hw->
mac.link_duplex);
1119 "Link is Up %d Mbps %s-Duplex\n",
1123 netif_wake_queue(netdev);
1125 (netif_carrier_ok(netdev))) {
1130 netif_stop_queue(netdev);
1150 unsigned int frame_ctrl;
1151 unsigned int ring_num;
1169 struct iphdr *iph = ip_hdr(skb);
1171 offset = skb_transport_offset(skb);
1174 tcp_hdr(skb)->check = 0;
1176 skb->
len - offset, 0);
1177 tcp_hdr(skb)->check =
1185 udp_hdr(skb)->check = 0;
1188 skb->
len - offset, 0);
1189 udp_hdr(skb)->check =
1207 tmp_skb = buffer_info->
skb;
1222 pr_err(
"TX DMA map failed\n");
1223 buffer_info->
dma = 0;
1228 buffer_info->
mapped =
true;
1245 &hw->
reg->TX_DSC_SW_P);
1247 #ifdef CONFIG_PCH_PTP
1248 pch_tx_timestamp(adapter, skb);
1263 unsigned long flags;
1299 spin_unlock_irqrestore(&adapter->
stats_lock, flags);
1302 static void pch_gbe_disable_dma_rx(
struct pch_gbe_hw *hw)
1312 static void pch_gbe_enable_dma_rx(
struct pch_gbe_hw *hw)
1330 static irqreturn_t pch_gbe_intr(
int irq,
void *data)
1344 pr_debug(
"%s occur int_st = 0x%08x\n", __func__, int_st);
1346 adapter->
stats.intr_rx_frame_err_count++;
1349 adapter->
stats.intr_rx_fifo_err_count++;
1353 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1355 pch_gbe_disable_dma_rx(&adapter->
hw);
1360 adapter->
stats.intr_rx_dma_err_count++;
1362 adapter->
stats.intr_tx_fifo_err_count++;
1364 adapter->
stats.intr_tx_dma_err_count++;
1366 adapter->
stats.intr_tcpip_err_count++;
1369 adapter->
stats.intr_rx_dsc_empty_count++;
1370 pr_debug(
"Rx descriptor is empty\n");
1372 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->
reg->INT_EN);
1373 if (hw->
mac.tx_fc_enable) {
1375 pch_gbe_mac_set_pause_packet(hw);
1382 if (
likely(napi_schedule_prep(&adapter->
napi))) {
1393 pr_debug(
"return = 0x%08x INT_EN reg = 0x%08x\n",
1420 while ((cleaned_count--)) {
1422 skb = netdev_alloc_skb(netdev, bufsz);
1425 adapter->
stats.rx_alloc_buff_failed++;
1439 buffer_info->
dma = 0;
1440 adapter->
stats.rx_alloc_buff_failed++;
1443 buffer_info->
mapped =
true;
1448 pr_debug(
"i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
1449 i, (
unsigned long long)buffer_info->
dma,
1458 i = (rx_ring->
count - 1);
1461 &hw->
reg->RX_DSC_SW_P);
1483 pr_err(
"Unable to allocate memory for the receive pool buffer\n");
1488 for (i = 0; i < rx_ring->
count; i++) {
1491 buffer_info->
length = bufsz;
1513 for (i = 0; i < tx_ring->
count; i++) {
1515 skb = netdev_alloc_skb(adapter->
netdev, bufsz);
1540 unsigned int cleaned_count = 0;
1541 bool cleaned =
false;
1548 pr_debug(
"gbec_status:0x%04x dma_status:0x%04x\n",
1557 pr_debug(
"clean_tx: transmit queue warning (%x,%x) unused=%d\n",
1567 if (++k >= tx_ring->
count) k = 0;
1569 if (j < PCH_GBE_TX_WEIGHT) {
1570 pr_debug(
"clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n",
1579 skb = buffer_info->
skb;
1583 adapter->
stats.tx_aborted_errors++;
1584 pr_err(
"Transfer Abort Error\n");
1587 adapter->
stats.tx_carrier_errors++;
1588 pr_err(
"Transfer Carrier Sense Error\n");
1591 adapter->
stats.tx_aborted_errors++;
1592 pr_err(
"Transfer Collision Abort Error\n");
1596 adapter->
stats.collisions++;
1597 adapter->
stats.tx_packets++;
1598 adapter->
stats.tx_bytes += skb->
len;
1602 adapter->
stats.tx_packets++;
1603 adapter->
stats.tx_bytes += skb->
len;
1605 if (buffer_info->
mapped) {
1606 pr_debug(
"unmap buffer_info->dma : %d\n", i);
1609 buffer_info->
mapped =
false;
1611 if (buffer_info->
skb) {
1612 pr_debug(
"trim buffer_info->skb : %d\n", i);
1621 if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
1626 pr_debug(
"called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1628 if (cleaned_count > 0) {
1633 netif_wake_queue(adapter->
netdev);
1634 adapter->
stats.tx_restart_count++;
1641 spin_unlock(&tx_ring->
tx_lock);
1659 int *work_done,
int work_to_do)
1667 unsigned int cleaned_count = 0;
1668 bool cleaned =
false;
1676 while (*work_done < work_to_do) {
1689 skb = buffer_info->
skb;
1695 buffer_info->
mapped =
false;
1697 pr_debug(
"RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
1698 "TCP:0x%08x] BufInf = 0x%p\n",
1699 i, dma_status, gbec_status, tcp_ip_status,
1703 adapter->
stats.rx_frame_errors++;
1704 pr_err(
"Receive Not Octal Error\n");
1707 adapter->
stats.rx_frame_errors++;
1708 pr_err(
"Receive Nibble Error\n");
1711 adapter->
stats.rx_crc_errors++;
1712 pr_err(
"Receive CRC Error\n");
1718 length = length - 4;
1727 adapter->
stats.rx_packets++;
1729 adapter->
stats.multicast++;
1733 #ifdef CONFIG_PCH_PTP
1734 pch_rx_timestamp(adapter, skb);
1745 pr_debug(
"Receive skb->ip_summed: %d length: %d\n",
1750 pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1754 if (++i == rx_ring->
count)
1759 pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1788 if (!tx_ring->
desc) {
1790 pr_err(
"Unable to allocate memory for the transmit descriptor ring\n");
1799 for (desNo = 0; desNo < tx_ring->
count; desNo++) {
1803 pr_debug(
"tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx\n"
1804 "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1805 tx_ring->
desc, (
unsigned long long)tx_ring->
dma,
1835 if (!rx_ring->
desc) {
1836 pr_err(
"Unable to allocate memory for the receive descriptor ring\n");
1843 for (desNo = 0; desNo < rx_ring->
count; desNo++) {
1847 pr_debug(
"rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx "
1848 "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1849 rx_ring->
desc, (
unsigned long long)rx_ring->
dma,
1864 pch_gbe_clean_tx_ring(adapter, tx_ring);
1881 pch_gbe_clean_rx_ring(adapter, rx_ring);
1903 err = pci_enable_msi(adapter->
pdev);
1906 pr_debug(
"call pci_enable_msi - Error: %d\n", err);
1912 flags, netdev->
name, netdev);
1914 pr_err(
"Unable to allocate interrupt Error: %d\n", err);
1915 pr_debug(
"adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n",
1936 if (!is_valid_ether_addr(adapter->
hw.mac.addr)) {
1937 pr_err(
"Error: Invalid MAC address\n");
1942 pch_gbe_set_multi(netdev);
1944 pch_gbe_setup_tctl(adapter);
1945 pch_gbe_configure_tx(adapter);
1946 pch_gbe_setup_rctl(adapter);
1947 pch_gbe_configure_rx(adapter);
1949 err = pch_gbe_request_irq(adapter);
1951 pr_err(
"Error: can't bring device up - irq request failed\n");
1954 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->
count);
1956 pr_err(
"Error: can't bring device up - alloc rx buffers pool failed\n");
1959 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1960 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->
count);
1962 pch_gbe_enable_dma_rx(&adapter->
hw);
1963 pch_gbe_enable_mac_rx(&adapter->
hw);
1967 napi_enable(&adapter->
napi);
1968 pch_gbe_irq_enable(adapter);
1969 netif_start_queue(adapter->
netdev);
1974 pch_gbe_free_irq(adapter);
1991 napi_disable(&adapter->
napi);
1994 pch_gbe_irq_disable(adapter);
1995 pch_gbe_free_irq(adapter);
2001 netif_stop_queue(netdev);
2005 pch_gbe_clean_tx_ring(adapter, adapter->
tx_ring);
2006 pch_gbe_clean_rx_ring(adapter, adapter->
rx_ring);
2033 pr_err(
"Hardware Initialization Failure\n");
2036 if (pch_gbe_alloc_queues(adapter)) {
2037 pr_err(
"Unable to allocate memory for queues\n");
2044 pch_gbe_irq_disable(adapter);
2046 pch_gbe_init_stats(adapter);
2048 pr_debug(
"rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
2050 hw->
mac.min_frame_size, hw->
mac.max_frame_size);
2061 static int pch_gbe_open(
struct net_device *netdev)
2100 static int pch_gbe_stop(
struct net_device *netdev)
2125 unsigned long flags;
2132 netif_stop_queue(netdev);
2133 spin_unlock_irqrestore(&tx_ring->
tx_lock, flags);
2134 pr_debug(
"Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
2140 pch_gbe_tx_queue(adapter, tx_ring, skb);
2141 spin_unlock_irqrestore(&tx_ring->
tx_lock, flags);
2153 return &netdev->
stats;
2160 static void pch_gbe_set_multi(
struct net_device *netdev)
2206 pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
2210 pr_debug(
"RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
2222 static int pch_gbe_set_mac(
struct net_device *netdev,
void *addr)
2228 if (!is_valid_ether_addr(skaddr->
sa_data)) {
2233 pch_gbe_mac_mar_set(&adapter->
hw, adapter->
hw.mac.addr, 0);
2236 pr_debug(
"ret_val : 0x%08x\n", ret_val);
2238 pr_debug(
"mac_addr : %pM\n", adapter->
hw.mac.addr);
2239 pr_debug(
"MAC_ADR1AB reg : 0x%08x 0x%08x\n",
2253 static int pch_gbe_change_mtu(
struct net_device *netdev,
int new_mtu)
2261 if ((max_frame <
ETH_ZLEN + ETH_FCS_LEN) ||
2263 pr_err(
"Invalid MTU setting\n");
2275 if (netif_running(netdev)) {
2283 netdev->
mtu = new_mtu;
2284 adapter->
hw.mac.max_frame_size = max_frame;
2288 netdev->
mtu = new_mtu;
2289 adapter->
hw.mac.max_frame_size = max_frame;
2292 pr_debug(
"max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2294 adapter->
hw.mac.max_frame_size);
2305 static int pch_gbe_set_features(
struct net_device *netdev,
2314 if (netif_running(netdev))
2331 static int pch_gbe_ioctl(
struct net_device *netdev,
struct ifreq *ifr,
int cmd)
2337 #ifdef CONFIG_PCH_PTP
2339 return hwtstamp_ioctl(netdev, ifr, cmd);
2349 static void pch_gbe_tx_timeout(
struct net_device *netdev)
2354 adapter->
stats.tx_timeout_count++;
2371 bool poll_end_flag =
false;
2372 bool cleaned =
false;
2376 pch_gbe_clean_rx(adapter, adapter->
rx_ring, &work_done, budget);
2377 cleaned = pch_gbe_clean_tx(adapter, adapter->
tx_ring);
2384 if (work_done < budget)
2385 poll_end_flag =
true;
2387 if (poll_end_flag) {
2389 pch_gbe_irq_enable(adapter);
2394 pch_gbe_enable_dma_rx(&adapter->
hw);
2397 pr_debug(
"poll_end_flag : %d work_done : %d budget : %d\n",
2398 poll_end_flag, work_done, budget);
2403 #ifdef CONFIG_NET_POLL_CONTROLLER
2408 static void pch_gbe_netpoll(
struct net_device *netdev)
2413 pch_gbe_intr(adapter->
pdev->irq, netdev);
2419 .ndo_open = pch_gbe_open,
2420 .ndo_stop = pch_gbe_stop,
2421 .ndo_start_xmit = pch_gbe_xmit_frame,
2422 .ndo_get_stats = pch_gbe_get_stats,
2423 .ndo_set_mac_address = pch_gbe_set_mac,
2424 .ndo_tx_timeout = pch_gbe_tx_timeout,
2425 .ndo_change_mtu = pch_gbe_change_mtu,
2426 .ndo_set_features = pch_gbe_set_features,
2427 .ndo_do_ioctl = pch_gbe_ioctl,
2428 .ndo_set_rx_mode = pch_gbe_set_multi,
2429 #ifdef CONFIG_NET_POLL_CONTROLLER
2430 .ndo_poll_controller = pch_gbe_netpoll,
2437 struct net_device *netdev = pci_get_drvdata(pdev);
2441 if (netif_running(netdev))
2450 struct net_device *netdev = pci_get_drvdata(pdev);
2455 pr_err(
"Cannot re-enable PCI device after reset\n");
2459 pci_enable_wake(pdev,
PCI_D0, 0);
2463 pch_gbe_mac_set_wol_event(hw, 0);
2468 static void pch_gbe_io_resume(
struct pci_dev *pdev)
2470 struct net_device *netdev = pci_get_drvdata(pdev);
2473 if (netif_running(netdev)) {
2475 pr_debug(
"can't bring device back up after reset\n");
2482 static int __pch_gbe_suspend(
struct pci_dev *pdev)
2484 struct net_device *netdev = pci_get_drvdata(pdev);
2491 if (netif_running(netdev))
2494 pch_gbe_set_multi(netdev);
2495 pch_gbe_setup_rctl(adapter);
2496 pch_gbe_configure_rx(adapter);
2497 pch_gbe_set_rgmii_ctrl(adapter, hw->
mac.link_speed,
2498 hw->
mac.link_duplex);
2499 pch_gbe_set_mode(adapter, hw->
mac.link_speed,
2500 hw->
mac.link_duplex);
2501 pch_gbe_mac_set_wol_event(hw, wufc);
2505 pch_gbe_mac_set_wol_event(hw, wufc);
2516 return __pch_gbe_suspend(pdev);
2522 struct net_device *netdev = pci_get_drvdata(pdev);
2529 pr_err(
"Cannot enable PCI device from suspend\n");
2536 pch_gbe_mac_set_wol_event(hw, 0);
2538 if (netif_running(netdev))
2546 static void pch_gbe_shutdown(
struct pci_dev *pdev)
2548 __pch_gbe_suspend(pdev);
2555 static void pch_gbe_remove(
struct pci_dev *pdev)
2557 struct net_device *netdev = pci_get_drvdata(pdev);
2574 static int pch_gbe_probe(
struct pci_dev *pdev,
2586 || pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(64))) {
2589 ret = pci_set_consistent_dma_mask(pdev,
2593 "configuration, aborting\n");
2594 goto err_disable_device;
2602 "ERR: Can't reserve PCI I/O and memory resources\n");
2603 goto err_disable_device;
2610 goto err_release_pci;
2614 pci_set_drvdata(pdev, netdev);
2615 adapter = netdev_priv(netdev);
2620 if (!adapter->
hw.reg) {
2623 goto err_free_netdev;
2626 #ifdef CONFIG_PCH_PTP
2629 if (ptp_filter_init(ptp_filter,
ARRAY_SIZE(ptp_filter))) {
2630 pr_err(
"Bad ptp filter\n");
2645 pch_gbe_mac_reset_hw(&adapter->
hw);
2648 ret = pch_gbe_sw_init(adapter);
2653 ret = pch_gbe_init_phy(adapter);
2655 dev_err(&pdev->
dev,
"PHY initialize error\n");
2656 goto err_free_adapter;
2663 dev_err(&pdev->
dev,
"MAC address Read Error\n");
2664 goto err_free_adapter;
2668 if (!is_valid_ether_addr(netdev->
dev_addr)) {
2676 "interface disabled.\n");
2679 (
unsigned long)adapter);
2694 goto err_free_adapter;
2697 netif_stop_queue(netdev);
2699 dev_dbg(&pdev->
dev,
"PCH Network Connection\n");
2725 .class_mask = (0xFFFF00)
2732 .class_mask = (0xFFFF00)
2739 .class_mask = (0xFFFF00)
2746 static const struct dev_pm_ops pch_gbe_pm_ops = {
2748 .resume = pch_gbe_resume,
2749 .freeze = pch_gbe_suspend,
2750 .thaw = pch_gbe_resume,
2751 .poweroff = pch_gbe_suspend,
2752 .restore = pch_gbe_resume,
2757 .error_detected = pch_gbe_io_error_detected,
2758 .slot_reset = pch_gbe_io_slot_reset,
2759 .resume = pch_gbe_io_resume
2763 .name = KBUILD_MODNAME,
2764 .id_table = pch_gbe_pcidev_id,
2765 .probe = pch_gbe_probe,
2766 .remove = pch_gbe_remove,
2768 .driver.pm = &pch_gbe_pm_ops,
2770 .shutdown = pch_gbe_shutdown,
2771 .err_handler = &pch_gbe_err_handler
2775 static int __init pch_gbe_init_module(
void)
2780 ret = pci_register_driver(&pch_gbe_driver);
2783 pr_info(
"copybreak disabled\n");
2785 pr_info(
"copybreak enabled for packets <= %u bytes\n",
2792 static void __exit pch_gbe_exit_module(
void)
2808 "Maximum size of packet that is copied to a new buffer on receive");