Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pch_gbe_main.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1999 - 2010 Intel Corporation.
3  * Copyright (C) 2010 - 2012 LAPIS SEMICONDUCTOR CO., LTD.
4  *
5  * This code was derived from the Intel e1000e Linux driver.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; version 2 of the License.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19  */
20 
21 #include "pch_gbe.h"
22 #include "pch_gbe_api.h"
23 #include <linux/module.h>
24 #ifdef CONFIG_PCH_PTP
25 #include <linux/net_tstamp.h>
26 #include <linux/ptp_classify.h>
27 #endif
28 
29 #define DRV_VERSION "1.01"
31 
32 #define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 /* Pci device ID */
33 #define PCH_GBE_MAR_ENTRIES 16
34 #define PCH_GBE_SHORT_PKT 64
35 #define DSC_INIT16 0xC000
36 #define PCH_GBE_DMA_ALIGN 0
37 #define PCH_GBE_DMA_PADDING 2
38 #define PCH_GBE_WATCHDOG_PERIOD (5 * HZ) /* watchdog time */
39 #define PCH_GBE_COPYBREAK_DEFAULT 256
40 #define PCH_GBE_PCI_BAR 1
41 #define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
42 
43 /* Macros for ML7223 */
44 #define PCI_VENDOR_ID_ROHM 0x10db
45 #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
46 
47 /* Macros for ML7831 */
48 #define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
49 
50 #define PCH_GBE_TX_WEIGHT 64
51 #define PCH_GBE_RX_WEIGHT 64
52 #define PCH_GBE_RX_BUFFER_WRITE 16
53 
54 /* Initialize the wake-on-LAN settings */
55 #define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
56 
57 #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
58  PCH_GBE_CHIP_TYPE_INTERNAL | \
59  PCH_GBE_RGMII_MODE_RGMII \
60  )
61 
62 /* Ethertype field values */
63 #define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
64 #define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
65 #define PCH_GBE_FRAME_SIZE_2048 2048
66 #define PCH_GBE_FRAME_SIZE_4096 4096
67 #define PCH_GBE_FRAME_SIZE_8192 8192
68 
69 #define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
70 #define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
71 #define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
72 #define PCH_GBE_DESC_UNUSED(R) \
73  ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
74  (R)->next_to_clean - (R)->next_to_use - 1)
75 
76 /* Pause packet value */
77 #define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
78 #define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
79 #define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
80 #define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
81 
82 
83 /* This defines the bits that are set in the Interrupt Mask
84  * Set/Read Register. Each bit is documented below:
85  * o RXT0 = Receiver Timer Interrupt (ring 0)
86  * o TXDW = Transmit Descriptor Written Back
87  * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
88  * o RXSEQ = Receive Sequence Error
89  * o LSC = Link Status Change
90  */
91 #define PCH_GBE_INT_ENABLE_MASK ( \
92  PCH_GBE_INT_RX_DMA_CMPLT | \
93  PCH_GBE_INT_RX_DSC_EMP | \
94  PCH_GBE_INT_RX_FIFO_ERR | \
95  PCH_GBE_INT_WOL_DET | \
96  PCH_GBE_INT_TX_CMPLT \
97  )
98 
99 #define PCH_GBE_INT_DISABLE_ALL 0
100 
101 #ifdef CONFIG_PCH_PTP
102 /* Macros for ieee1588 */
103 /* 0x40 Time Synchronization Channel Control Register Bits */
104 #define MASTER_MODE (1<<0)
105 #define SLAVE_MODE (0)
106 #define V2_MODE (1<<31)
107 #define CAP_MODE0 (0)
108 #define CAP_MODE2 (1<<17)
109 
110 /* 0x44 Time Synchronization Channel Event Register Bits */
111 #define TX_SNAPSHOT_LOCKED (1<<0)
112 #define RX_SNAPSHOT_LOCKED (1<<1)
113 
114 #define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
115 #define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
116 #endif
117 
119 
120 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
121 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
122  int data);
123 static void pch_gbe_set_multi(struct net_device *netdev);
124 
125 #ifdef CONFIG_PCH_PTP
126 static struct sock_filter ptp_filter[] = {
127  PTP_FILTER
128 };
129 
130 static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
131 {
132  u8 *data = skb->data;
133  unsigned int offset;
134  u16 *hi, *id;
135  u32 lo;
136 
137  if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE)
138  return 0;
139 
140  offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
141 
142  if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
143  return 0;
144 
145  hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
146  id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
147 
148  memcpy(&lo, &hi[1], sizeof(lo));
149 
150  return (uid_hi == *hi &&
151  uid_lo == lo &&
152  seqid == *id);
153 }
154 
155 static void
156 pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
157 {
158  struct skb_shared_hwtstamps *shhwtstamps;
159  struct pci_dev *pdev;
160  u64 ns;
161  u32 hi, lo, val;
162  u16 uid, seq;
163 
164  if (!adapter->hwts_rx_en)
165  return;
166 
167  /* Get ieee1588's dev information */
168  pdev = adapter->ptp_pdev;
169 
170  val = pch_ch_event_read(pdev);
171 
172  if (!(val & RX_SNAPSHOT_LOCKED))
173  return;
174 
175  lo = pch_src_uuid_lo_read(pdev);
176  hi = pch_src_uuid_hi_read(pdev);
177 
178  uid = hi & 0xffff;
179  seq = (hi >> 16) & 0xffff;
180 
181  if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
182  goto out;
183 
184  ns = pch_rx_snap_read(pdev);
185 
186  shhwtstamps = skb_hwtstamps(skb);
187  memset(shhwtstamps, 0, sizeof(*shhwtstamps));
188  shhwtstamps->hwtstamp = ns_to_ktime(ns);
189 out:
190  pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
191 }
192 
193 static void
194 pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
195 {
196  struct skb_shared_hwtstamps shhwtstamps;
197  struct pci_dev *pdev;
198  struct skb_shared_info *shtx;
199  u64 ns;
200  u32 cnt, val;
201 
202  shtx = skb_shinfo(skb);
203  if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
204  return;
205 
206  shtx->tx_flags |= SKBTX_IN_PROGRESS;
207 
208  /* Get ieee1588's dev information */
209  pdev = adapter->ptp_pdev;
210 
211  /*
212  * This really stinks, but we have to poll for the Tx time stamp.
213  */
214  for (cnt = 0; cnt < 100; cnt++) {
215  val = pch_ch_event_read(pdev);
216  if (val & TX_SNAPSHOT_LOCKED)
217  break;
218  udelay(1);
219  }
220  if (!(val & TX_SNAPSHOT_LOCKED)) {
221  shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
222  return;
223  }
224 
225  ns = pch_tx_snap_read(pdev);
226 
227  memset(&shhwtstamps, 0, sizeof(shhwtstamps));
228  shhwtstamps.hwtstamp = ns_to_ktime(ns);
229  skb_tstamp_tx(skb, &shhwtstamps);
230 
231  pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
232 }
233 
234 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
235 {
236  struct hwtstamp_config cfg;
237  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
238  struct pci_dev *pdev;
239  u8 station[20];
240 
241  if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
242  return -EFAULT;
243 
244  if (cfg.flags) /* reserved for future extensions */
245  return -EINVAL;
246 
247  /* Get ieee1588's dev information */
248  pdev = adapter->ptp_pdev;
249 
250  switch (cfg.tx_type) {
251  case HWTSTAMP_TX_OFF:
252  adapter->hwts_tx_en = 0;
253  break;
254  case HWTSTAMP_TX_ON:
255  adapter->hwts_tx_en = 1;
256  break;
257  default:
258  return -ERANGE;
259  }
260 
261  switch (cfg.rx_filter) {
263  adapter->hwts_rx_en = 0;
264  break;
266  adapter->hwts_rx_en = 0;
267  pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
268  break;
270  adapter->hwts_rx_en = 1;
271  pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
272  break;
274  adapter->hwts_rx_en = 1;
275  pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
276  strcpy(station, PTP_L4_MULTICAST_SA);
277  pch_set_station_address(station, pdev);
278  break;
280  adapter->hwts_rx_en = 1;
281  pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
282  strcpy(station, PTP_L2_MULTICAST_SA);
283  pch_set_station_address(station, pdev);
284  break;
285  default:
286  return -ERANGE;
287  }
288 
289  /* Clear out any old time stamps. */
290  pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
291 
292  return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
293 }
294 #endif
295 
297 {
298  iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
299 }
300 
308 {
309  u32 adr1a, adr1b;
310 
311  adr1a = ioread32(&hw->reg->mac_adr[0].high);
312  adr1b = ioread32(&hw->reg->mac_adr[0].low);
313 
314  hw->mac.addr[0] = (u8)(adr1a & 0xFF);
315  hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
316  hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
317  hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
318  hw->mac.addr[4] = (u8)(adr1b & 0xFF);
319  hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
320 
321  pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
322  return 0;
323 }
324 
330 static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
331 {
332  u32 tmp;
333  /* wait busy */
334  tmp = 1000;
335  while ((ioread32(reg) & bit) && --tmp)
336  cpu_relax();
337  if (!tmp)
338  pr_err("Error: busy bit is not cleared\n");
339 }
340 
347 static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
348 {
349  u32 mar_low, mar_high, adrmask;
350 
351  pr_debug("index : 0x%x\n", index);
352 
353  /*
354  * HW expects these in little endian so we reverse the byte order
355  * from network order (big endian) to little endian
356  */
357  mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
358  ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
359  mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
360  /* Stop the MAC Address of index. */
361  adrmask = ioread32(&hw->reg->ADDR_MASK);
362  iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
363  /* wait busy */
364  pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
365  /* Set the MAC address to the MAC address 1A/1B register */
366  iowrite32(mar_high, &hw->reg->mac_adr[index].high);
367  iowrite32(mar_low, &hw->reg->mac_adr[index].low);
368  /* Start the MAC address of index */
369  iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
370  /* wait busy */
371  pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
372 }
373 
378 static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
379 {
380  /* Read the MAC address. and store to the private data */
382  iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
383 #ifdef PCH_GBE_MAC_IFOP_RGMII
385 #endif
386  pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
387  /* Setup the receive addresses */
388  pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
389  return;
390 }
391 
392 static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw)
393 {
394  u32 rctl;
395  /* Disables Receive MAC */
396  rctl = ioread32(&hw->reg->MAC_RX_EN);
397  iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
398 }
399 
400 static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw)
401 {
402  u32 rctl;
403  /* Enables Receive MAC */
404  rctl = ioread32(&hw->reg->MAC_RX_EN);
405  iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
406 }
407 
413 static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
414 {
415  u32 i;
416 
417  /* Setup the receive address */
418  pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
419 
420  /* Zero out the other receive addresses */
421  for (i = 1; i < mar_count; i++) {
422  iowrite32(0, &hw->reg->mac_adr[i].high);
423  iowrite32(0, &hw->reg->mac_adr[i].low);
424  }
425  iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
426  /* wait busy */
427  pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
428 }
429 
430 
439 static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
440  u8 *mc_addr_list, u32 mc_addr_count,
441  u32 mar_used_count, u32 mar_total_num)
442 {
443  u32 i, adrmask;
444 
445  /* Load the first set of multicast addresses into the exact
446  * filters (RAR). If there are not enough to fill the RAR
447  * array, clear the filters.
448  */
449  for (i = mar_used_count; i < mar_total_num; i++) {
450  if (mc_addr_count) {
451  pch_gbe_mac_mar_set(hw, mc_addr_list, i);
452  mc_addr_count--;
453  mc_addr_list += ETH_ALEN;
454  } else {
455  /* Clear MAC address mask */
456  adrmask = ioread32(&hw->reg->ADDR_MASK);
457  iowrite32((adrmask | (0x0001 << i)),
458  &hw->reg->ADDR_MASK);
459  /* wait busy */
460  pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
461  /* Clear MAC address */
462  iowrite32(0, &hw->reg->mac_adr[i].high);
463  iowrite32(0, &hw->reg->mac_adr[i].low);
464  }
465  }
466 }
467 
476 {
477  struct pch_gbe_mac_info *mac = &hw->mac;
478  u32 rx_fctrl;
479 
480  pr_debug("mac->fc = %u\n", mac->fc);
481 
482  rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
483 
484  switch (mac->fc) {
485  case PCH_GBE_FC_NONE:
486  rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
487  mac->tx_fc_enable = false;
488  break;
489  case PCH_GBE_FC_RX_PAUSE:
490  rx_fctrl |= PCH_GBE_FL_CTRL_EN;
491  mac->tx_fc_enable = false;
492  break;
493  case PCH_GBE_FC_TX_PAUSE:
494  rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
495  mac->tx_fc_enable = true;
496  break;
497  case PCH_GBE_FC_FULL:
498  rx_fctrl |= PCH_GBE_FL_CTRL_EN;
499  mac->tx_fc_enable = true;
500  break;
501  default:
502  pr_err("Flow control param set incorrectly\n");
503  return -EINVAL;
504  }
505  if (mac->link_duplex == DUPLEX_HALF)
506  rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
507  iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
508  pr_debug("RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
509  ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
510  return 0;
511 }
512 
518 static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
519 {
520  u32 addr_mask;
521 
522  pr_debug("wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
523  wu_evt, ioread32(&hw->reg->ADDR_MASK));
524 
525  if (wu_evt) {
526  /* Set Wake-On-Lan address mask */
527  addr_mask = ioread32(&hw->reg->ADDR_MASK);
528  iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
529  /* wait busy */
530  pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
531  iowrite32(0, &hw->reg->WOL_ST);
532  iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
533  iowrite32(0x02, &hw->reg->TCPIP_ACC);
534  iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
535  } else {
536  iowrite32(0, &hw->reg->WOL_CTRL);
537  iowrite32(0, &hw->reg->WOL_ST);
538  }
539  return;
540 }
541 
552 u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
553  u16 data)
554 {
555  u32 data_out = 0;
556  unsigned int i;
557  unsigned long flags;
558 
559  spin_lock_irqsave(&hw->miim_lock, flags);
560 
561  for (i = 100; i; --i) {
562  if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
563  break;
564  udelay(20);
565  }
566  if (i == 0) {
567  pr_err("pch-gbe.miim won't go Ready\n");
568  spin_unlock_irqrestore(&hw->miim_lock, flags);
569  return 0; /* No way to indicate timeout error */
570  }
572  (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
573  dir | data), &hw->reg->MIIM);
574  for (i = 0; i < 100; i++) {
575  udelay(20);
576  data_out = ioread32(&hw->reg->MIIM);
577  if ((data_out & PCH_GBE_MIIM_OPER_READY))
578  break;
579  }
580  spin_unlock_irqrestore(&hw->miim_lock, flags);
581 
582  pr_debug("PHY %s: reg=%d, data=0x%04X\n",
583  dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
584  dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
585  return (u16) data_out;
586 }
587 
592 static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
593 {
594  unsigned long tmp2, tmp3;
595 
596  /* Set Pause packet */
597  tmp2 = hw->mac.addr[1];
598  tmp2 = (tmp2 << 8) | hw->mac.addr[0];
599  tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
600 
601  tmp3 = hw->mac.addr[5];
602  tmp3 = (tmp3 << 8) | hw->mac.addr[4];
603  tmp3 = (tmp3 << 8) | hw->mac.addr[3];
604  tmp3 = (tmp3 << 8) | hw->mac.addr[2];
605 
606  iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
607  iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
608  iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
609  iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
610  iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
611 
612  /* Transmit Pause Packet */
613  iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
614 
615  pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
616  ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
617  ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
618  ioread32(&hw->reg->PAUSE_PKT5));
619 
620  return;
621 }
622 
623 
631 static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
632 {
633  adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL);
634  if (!adapter->tx_ring)
635  return -ENOMEM;
636 
637  adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL);
638  if (!adapter->rx_ring) {
639  kfree(adapter->tx_ring);
640  return -ENOMEM;
641  }
642  return 0;
643 }
644 
649 static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
650 {
651  memset(&adapter->stats, 0, sizeof(adapter->stats));
652  return;
653 }
654 
662 static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
663 {
664  struct net_device *netdev = adapter->netdev;
665  u32 addr;
666  u16 bmcr, stat;
667 
668  /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
669  for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
670  adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
671  bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
672  stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
673  stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
674  if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
675  break;
676  }
677  adapter->hw.phy.addr = adapter->mii.phy_id;
678  pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
679  if (addr == 32)
680  return -EAGAIN;
681  /* Selected the phy and isolate the rest */
682  for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
683  if (addr != adapter->mii.phy_id) {
684  pch_gbe_mdio_write(netdev, addr, MII_BMCR,
685  BMCR_ISOLATE);
686  } else {
687  bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
688  pch_gbe_mdio_write(netdev, addr, MII_BMCR,
689  bmcr & ~BMCR_ISOLATE);
690  }
691  }
692 
693  /* MII setup */
694  adapter->mii.phy_id_mask = 0x1F;
695  adapter->mii.reg_num_mask = 0x1F;
696  adapter->mii.dev = adapter->netdev;
697  adapter->mii.mdio_read = pch_gbe_mdio_read;
698  adapter->mii.mdio_write = pch_gbe_mdio_write;
699  adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
700  return 0;
701 }
702 
712 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
713 {
714  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
715  struct pch_gbe_hw *hw = &adapter->hw;
716 
717  return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
718  (u16) 0);
719 }
720 
728 static void pch_gbe_mdio_write(struct net_device *netdev,
729  int addr, int reg, int data)
730 {
731  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
732  struct pch_gbe_hw *hw = &adapter->hw;
733 
734  pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
735 }
736 
741 static void pch_gbe_reset_task(struct work_struct *work)
742 {
743  struct pch_gbe_adapter *adapter;
744  adapter = container_of(work, struct pch_gbe_adapter, reset_task);
745 
746  rtnl_lock();
747  pch_gbe_reinit_locked(adapter);
748  rtnl_unlock();
749 }
750 
756 {
757  pch_gbe_down(adapter);
758  pch_gbe_up(adapter);
759 }
760 
765 void pch_gbe_reset(struct pch_gbe_adapter *adapter)
766 {
767  pch_gbe_mac_reset_hw(&adapter->hw);
768  /* reprogram multicast address register after reset */
769  pch_gbe_set_multi(adapter->netdev);
770  /* Setup the receive address. */
771  pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
772  if (pch_gbe_hal_init_hw(&adapter->hw))
773  pr_err("Hardware Error\n");
774 }
775 
780 static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
781 {
782  struct net_device *netdev = adapter->netdev;
783 
784  free_irq(adapter->pdev->irq, netdev);
785  if (adapter->have_msi) {
786  pci_disable_msi(adapter->pdev);
787  pr_debug("call pci_disable_msi\n");
788  }
789 }
790 
795 static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
796 {
797  struct pch_gbe_hw *hw = &adapter->hw;
798 
799  atomic_inc(&adapter->irq_sem);
800  iowrite32(0, &hw->reg->INT_EN);
801  ioread32(&hw->reg->INT_ST);
802  synchronize_irq(adapter->pdev->irq);
803 
804  pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
805 }
806 
811 static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
812 {
813  struct pch_gbe_hw *hw = &adapter->hw;
814 
815  if (likely(atomic_dec_and_test(&adapter->irq_sem)))
816  iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
817  ioread32(&hw->reg->INT_ST);
818  pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
819 }
820 
821 
822 
827 static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
828 {
829  struct pch_gbe_hw *hw = &adapter->hw;
830  u32 tx_mode, tcpip;
831 
832  tx_mode = PCH_GBE_TM_LONG_PKT |
837 
838  iowrite32(tx_mode, &hw->reg->TX_MODE);
839 
840  tcpip = ioread32(&hw->reg->TCPIP_ACC);
841  tcpip |= PCH_GBE_TX_TCPIPACC_EN;
842  iowrite32(tcpip, &hw->reg->TCPIP_ACC);
843  return;
844 }
845 
850 static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
851 {
852  struct pch_gbe_hw *hw = &adapter->hw;
853  u32 tdba, tdlen, dctrl;
854 
855  pr_debug("dma addr = 0x%08llx size = 0x%08x\n",
856  (unsigned long long)adapter->tx_ring->dma,
857  adapter->tx_ring->size);
858 
859  /* Setup the HW Tx Head and Tail descriptor pointers */
860  tdba = adapter->tx_ring->dma;
861  tdlen = adapter->tx_ring->size - 0x10;
862  iowrite32(tdba, &hw->reg->TX_DSC_BASE);
863  iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
864  iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
865 
866  /* Enables Transmission DMA */
867  dctrl = ioread32(&hw->reg->DMA_CTRL);
868  dctrl |= PCH_GBE_TX_DMA_EN;
869  iowrite32(dctrl, &hw->reg->DMA_CTRL);
870 }
871 
876 static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
877 {
878  struct pch_gbe_hw *hw = &adapter->hw;
879  u32 rx_mode, tcpip;
880 
883 
884  iowrite32(rx_mode, &hw->reg->RX_MODE);
885 
886  tcpip = ioread32(&hw->reg->TCPIP_ACC);
887 
888  tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
889  tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
890  iowrite32(tcpip, &hw->reg->TCPIP_ACC);
891  return;
892 }
893 
898 static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
899 {
900  struct pch_gbe_hw *hw = &adapter->hw;
901  u32 rdba, rdlen, rxdma;
902 
903  pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
904  (unsigned long long)adapter->rx_ring->dma,
905  adapter->rx_ring->size);
906 
908 
909  pch_gbe_disable_mac_rx(hw);
910 
911  /* Disables Receive DMA */
912  rxdma = ioread32(&hw->reg->DMA_CTRL);
913  rxdma &= ~PCH_GBE_RX_DMA_EN;
914  iowrite32(rxdma, &hw->reg->DMA_CTRL);
915 
916  pr_debug("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
917  ioread32(&hw->reg->MAC_RX_EN),
918  ioread32(&hw->reg->DMA_CTRL));
919 
920  /* Setup the HW Rx Head and Tail Descriptor Pointers and
921  * the Base and Length of the Rx Descriptor Ring */
922  rdba = adapter->rx_ring->dma;
923  rdlen = adapter->rx_ring->size - 0x10;
924  iowrite32(rdba, &hw->reg->RX_DSC_BASE);
925  iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
926  iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
927 }
928 
934 static void pch_gbe_unmap_and_free_tx_resource(
935  struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
936 {
937  if (buffer_info->mapped) {
938  dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
939  buffer_info->length, DMA_TO_DEVICE);
940  buffer_info->mapped = false;
941  }
942  if (buffer_info->skb) {
943  dev_kfree_skb_any(buffer_info->skb);
944  buffer_info->skb = NULL;
945  }
946 }
947 
953 static void pch_gbe_unmap_and_free_rx_resource(
954  struct pch_gbe_adapter *adapter,
955  struct pch_gbe_buffer *buffer_info)
956 {
957  if (buffer_info->mapped) {
958  dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
959  buffer_info->length, DMA_FROM_DEVICE);
960  buffer_info->mapped = false;
961  }
962  if (buffer_info->skb) {
963  dev_kfree_skb_any(buffer_info->skb);
964  buffer_info->skb = NULL;
965  }
966 }
967 
973 static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
974  struct pch_gbe_tx_ring *tx_ring)
975 {
976  struct pch_gbe_hw *hw = &adapter->hw;
977  struct pch_gbe_buffer *buffer_info;
978  unsigned long size;
979  unsigned int i;
980 
981  /* Free all the Tx ring sk_buffs */
982  for (i = 0; i < tx_ring->count; i++) {
983  buffer_info = &tx_ring->buffer_info[i];
984  pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
985  }
986  pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
987 
988  size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
989  memset(tx_ring->buffer_info, 0, size);
990 
991  /* Zero out the descriptor ring */
992  memset(tx_ring->desc, 0, tx_ring->size);
993  tx_ring->next_to_use = 0;
994  tx_ring->next_to_clean = 0;
995  iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
996  iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
997 }
998 
1004 static void
1005 pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
1006  struct pch_gbe_rx_ring *rx_ring)
1007 {
1008  struct pch_gbe_hw *hw = &adapter->hw;
1009  struct pch_gbe_buffer *buffer_info;
1010  unsigned long size;
1011  unsigned int i;
1012 
1013  /* Free all the Rx ring sk_buffs */
1014  for (i = 0; i < rx_ring->count; i++) {
1015  buffer_info = &rx_ring->buffer_info[i];
1016  pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
1017  }
1018  pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
1019  size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1020  memset(rx_ring->buffer_info, 0, size);
1021 
1022  /* Zero out the descriptor ring */
1023  memset(rx_ring->desc, 0, rx_ring->size);
1024  rx_ring->next_to_clean = 0;
1025  rx_ring->next_to_use = 0;
1026  iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
1027  iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
1028 }
1029 
1030 static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
1031  u16 duplex)
1032 {
1033  struct pch_gbe_hw *hw = &adapter->hw;
1034  unsigned long rgmii = 0;
1035 
1036  /* Set the RGMII control. */
1037 #ifdef PCH_GBE_MAC_IFOP_RGMII
1038  switch (speed) {
1039  case SPEED_10:
1040  rgmii = (PCH_GBE_RGMII_RATE_2_5M |
1042  break;
1043  case SPEED_100:
1044  rgmii = (PCH_GBE_RGMII_RATE_25M |
1046  break;
1047  case SPEED_1000:
1048  rgmii = (PCH_GBE_RGMII_RATE_125M |
1050  break;
1051  }
1052  iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1053 #else /* GMII */
1054  rgmii = 0;
1055  iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1056 #endif
1057 }
1058 static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
1059  u16 duplex)
1060 {
1061  struct net_device *netdev = adapter->netdev;
1062  struct pch_gbe_hw *hw = &adapter->hw;
1063  unsigned long mode = 0;
1064 
1065  /* Set the communication mode */
1066  switch (speed) {
1067  case SPEED_10:
1068  mode = PCH_GBE_MODE_MII_ETHER;
1069  netdev->tx_queue_len = 10;
1070  break;
1071  case SPEED_100:
1072  mode = PCH_GBE_MODE_MII_ETHER;
1073  netdev->tx_queue_len = 100;
1074  break;
1075  case SPEED_1000:
1076  mode = PCH_GBE_MODE_GMII_ETHER;
1077  break;
1078  }
1079  if (duplex == DUPLEX_FULL)
1080  mode |= PCH_GBE_MODE_FULL_DUPLEX;
1081  else
1082  mode |= PCH_GBE_MODE_HALF_DUPLEX;
1083  iowrite32(mode, &hw->reg->MODE);
1084 }
1085 
1090 static void pch_gbe_watchdog(unsigned long data)
1091 {
1092  struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
1093  struct net_device *netdev = adapter->netdev;
1094  struct pch_gbe_hw *hw = &adapter->hw;
1095 
1096  pr_debug("right now = %ld\n", jiffies);
1097 
1098  pch_gbe_update_stats(adapter);
1099  if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
1100  struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1101  netdev->tx_queue_len = adapter->tx_queue_len;
1102  /* mii library handles link maintenance tasks */
1103  if (mii_ethtool_gset(&adapter->mii, &cmd)) {
1104  pr_err("ethtool get setting Error\n");
1105  mod_timer(&adapter->watchdog_timer,
1106  round_jiffies(jiffies +
1108  return;
1109  }
1110  hw->mac.link_speed = ethtool_cmd_speed(&cmd);
1111  hw->mac.link_duplex = cmd.duplex;
1112  /* Set the RGMII control. */
1113  pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
1114  hw->mac.link_duplex);
1115  /* Set the communication mode */
1116  pch_gbe_set_mode(adapter, hw->mac.link_speed,
1117  hw->mac.link_duplex);
1118  netdev_dbg(netdev,
1119  "Link is Up %d Mbps %s-Duplex\n",
1120  hw->mac.link_speed,
1121  cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1122  netif_carrier_on(netdev);
1123  netif_wake_queue(netdev);
1124  } else if ((!mii_link_ok(&adapter->mii)) &&
1125  (netif_carrier_ok(netdev))) {
1126  netdev_dbg(netdev, "NIC Link is Down\n");
1127  hw->mac.link_speed = SPEED_10;
1128  hw->mac.link_duplex = DUPLEX_HALF;
1129  netif_carrier_off(netdev);
1130  netif_stop_queue(netdev);
1131  }
1132  mod_timer(&adapter->watchdog_timer,
1134 }
1135 
1142 static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1143  struct pch_gbe_tx_ring *tx_ring,
1144  struct sk_buff *skb)
1145 {
1146  struct pch_gbe_hw *hw = &adapter->hw;
1147  struct pch_gbe_tx_desc *tx_desc;
1148  struct pch_gbe_buffer *buffer_info;
1149  struct sk_buff *tmp_skb;
1150  unsigned int frame_ctrl;
1151  unsigned int ring_num;
1152 
1153  /*-- Set frame control --*/
1154  frame_ctrl = 0;
1155  if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
1156  frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
1157  if (skb->ip_summed == CHECKSUM_NONE)
1158  frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1159 
1160  /* Performs checksum processing */
1161  /*
1162  * It is because the hardware accelerator does not support a checksum,
1163  * when the received data size is less than 64 bytes.
1164  */
1165  if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
1166  frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
1168  if (skb->protocol == htons(ETH_P_IP)) {
1169  struct iphdr *iph = ip_hdr(skb);
1170  unsigned int offset;
1171  offset = skb_transport_offset(skb);
1172  if (iph->protocol == IPPROTO_TCP) {
1173  skb->csum = 0;
1174  tcp_hdr(skb)->check = 0;
1175  skb->csum = skb_checksum(skb, offset,
1176  skb->len - offset, 0);
1177  tcp_hdr(skb)->check =
1178  csum_tcpudp_magic(iph->saddr,
1179  iph->daddr,
1180  skb->len - offset,
1181  IPPROTO_TCP,
1182  skb->csum);
1183  } else if (iph->protocol == IPPROTO_UDP) {
1184  skb->csum = 0;
1185  udp_hdr(skb)->check = 0;
1186  skb->csum =
1187  skb_checksum(skb, offset,
1188  skb->len - offset, 0);
1189  udp_hdr(skb)->check =
1190  csum_tcpudp_magic(iph->saddr,
1191  iph->daddr,
1192  skb->len - offset,
1193  IPPROTO_UDP,
1194  skb->csum);
1195  }
1196  }
1197  }
1198 
1199  ring_num = tx_ring->next_to_use;
1200  if (unlikely((ring_num + 1) == tx_ring->count))
1201  tx_ring->next_to_use = 0;
1202  else
1203  tx_ring->next_to_use = ring_num + 1;
1204 
1205 
1206  buffer_info = &tx_ring->buffer_info[ring_num];
1207  tmp_skb = buffer_info->skb;
1208 
1209  /* [Header:14][payload] ---> [Header:14][paddong:2][payload] */
1210  memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1211  tmp_skb->data[ETH_HLEN] = 0x00;
1212  tmp_skb->data[ETH_HLEN + 1] = 0x00;
1213  tmp_skb->len = skb->len;
1214  memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1215  (skb->len - ETH_HLEN));
1216  /*-- Set Buffer information --*/
1217  buffer_info->length = tmp_skb->len;
1218  buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1219  buffer_info->length,
1220  DMA_TO_DEVICE);
1221  if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1222  pr_err("TX DMA map failed\n");
1223  buffer_info->dma = 0;
1224  buffer_info->time_stamp = 0;
1225  tx_ring->next_to_use = ring_num;
1226  return;
1227  }
1228  buffer_info->mapped = true;
1229  buffer_info->time_stamp = jiffies;
1230 
1231  /*-- Set Tx descriptor --*/
1232  tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1233  tx_desc->buffer_addr = (buffer_info->dma);
1234  tx_desc->length = (tmp_skb->len);
1235  tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1236  tx_desc->tx_frame_ctrl = (frame_ctrl);
1237  tx_desc->gbec_status = (DSC_INIT16);
1238 
1239  if (unlikely(++ring_num == tx_ring->count))
1240  ring_num = 0;
1241 
1242  /* Update software pointer of TX descriptor */
1243  iowrite32(tx_ring->dma +
1244  (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1245  &hw->reg->TX_DSC_SW_P);
1246 
1247 #ifdef CONFIG_PCH_PTP
1248  pch_tx_timestamp(adapter, skb);
1249 #endif
1250 
1251  dev_kfree_skb_any(skb);
1252 }
1253 
1259 {
1260  struct net_device *netdev = adapter->netdev;
1261  struct pci_dev *pdev = adapter->pdev;
1262  struct pch_gbe_hw_stats *stats = &adapter->stats;
1263  unsigned long flags;
1264 
1265  /*
1266  * Prevent stats update while adapter is being reset, or if the pci
1267  * connection is down.
1268  */
1269  if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1270  return;
1271 
1272  spin_lock_irqsave(&adapter->stats_lock, flags);
1273 
1274  /* Update device status "adapter->stats" */
1275  stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1276  stats->tx_errors = stats->tx_length_errors +
1277  stats->tx_aborted_errors +
1278  stats->tx_carrier_errors + stats->tx_timeout_count;
1279 
1280  /* Update network device status "adapter->net_stats" */
1281  netdev->stats.rx_packets = stats->rx_packets;
1282  netdev->stats.rx_bytes = stats->rx_bytes;
1283  netdev->stats.rx_dropped = stats->rx_dropped;
1284  netdev->stats.tx_packets = stats->tx_packets;
1285  netdev->stats.tx_bytes = stats->tx_bytes;
1286  netdev->stats.tx_dropped = stats->tx_dropped;
1287  /* Fill out the OS statistics structure */
1288  netdev->stats.multicast = stats->multicast;
1289  netdev->stats.collisions = stats->collisions;
1290  /* Rx Errors */
1291  netdev->stats.rx_errors = stats->rx_errors;
1292  netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1293  netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1294  /* Tx Errors */
1295  netdev->stats.tx_errors = stats->tx_errors;
1296  netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1297  netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1298 
1299  spin_unlock_irqrestore(&adapter->stats_lock, flags);
1300 }
1301 
1302 static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw)
1303 {
1304  u32 rxdma;
1305 
1306  /* Disable Receive DMA */
1307  rxdma = ioread32(&hw->reg->DMA_CTRL);
1308  rxdma &= ~PCH_GBE_RX_DMA_EN;
1309  iowrite32(rxdma, &hw->reg->DMA_CTRL);
1310 }
1311 
1312 static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw)
1313 {
1314  u32 rxdma;
1315 
1316  /* Enables Receive DMA */
1317  rxdma = ioread32(&hw->reg->DMA_CTRL);
1318  rxdma |= PCH_GBE_RX_DMA_EN;
1319  iowrite32(rxdma, &hw->reg->DMA_CTRL);
1320 }
1321 
1330 static irqreturn_t pch_gbe_intr(int irq, void *data)
1331 {
1332  struct net_device *netdev = data;
1333  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1334  struct pch_gbe_hw *hw = &adapter->hw;
1335  u32 int_st;
1336  u32 int_en;
1337 
1338  /* Check request status */
1339  int_st = ioread32(&hw->reg->INT_ST);
1340  int_st = int_st & ioread32(&hw->reg->INT_EN);
1341  /* When request status is no interruption factor */
1342  if (unlikely(!int_st))
1343  return IRQ_NONE; /* Not our interrupt. End processing. */
1344  pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
1345  if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1346  adapter->stats.intr_rx_frame_err_count++;
1347  if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1348  if (!adapter->rx_stop_flag) {
1349  adapter->stats.intr_rx_fifo_err_count++;
1350  pr_debug("Rx fifo over run\n");
1351  adapter->rx_stop_flag = true;
1352  int_en = ioread32(&hw->reg->INT_EN);
1353  iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1354  &hw->reg->INT_EN);
1355  pch_gbe_disable_dma_rx(&adapter->hw);
1356  int_st |= ioread32(&hw->reg->INT_ST);
1357  int_st = int_st & ioread32(&hw->reg->INT_EN);
1358  }
1359  if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1360  adapter->stats.intr_rx_dma_err_count++;
1361  if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1362  adapter->stats.intr_tx_fifo_err_count++;
1363  if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1364  adapter->stats.intr_tx_dma_err_count++;
1365  if (int_st & PCH_GBE_INT_TCPIP_ERR)
1366  adapter->stats.intr_tcpip_err_count++;
1367  /* When Rx descriptor is empty */
1368  if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1369  adapter->stats.intr_rx_dsc_empty_count++;
1370  pr_debug("Rx descriptor is empty\n");
1371  int_en = ioread32(&hw->reg->INT_EN);
1372  iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1373  if (hw->mac.tx_fc_enable) {
1374  /* Set Pause packet */
1375  pch_gbe_mac_set_pause_packet(hw);
1376  }
1377  }
1378 
1379  /* When request status is Receive interruption */
1380  if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
1381  (adapter->rx_stop_flag)) {
1382  if (likely(napi_schedule_prep(&adapter->napi))) {
1383  /* Enable only Rx Descriptor empty */
1384  atomic_inc(&adapter->irq_sem);
1385  int_en = ioread32(&hw->reg->INT_EN);
1386  int_en &=
1388  iowrite32(int_en, &hw->reg->INT_EN);
1389  /* Start polling for NAPI */
1390  __napi_schedule(&adapter->napi);
1391  }
1392  }
1393  pr_debug("return = 0x%08x INT_EN reg = 0x%08x\n",
1394  IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1395  return IRQ_HANDLED;
1396 }
1397 
1404 static void
1405 pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1406  struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1407 {
1408  struct net_device *netdev = adapter->netdev;
1409  struct pci_dev *pdev = adapter->pdev;
1410  struct pch_gbe_hw *hw = &adapter->hw;
1411  struct pch_gbe_rx_desc *rx_desc;
1412  struct pch_gbe_buffer *buffer_info;
1413  struct sk_buff *skb;
1414  unsigned int i;
1415  unsigned int bufsz;
1416 
1417  bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1418  i = rx_ring->next_to_use;
1419 
1420  while ((cleaned_count--)) {
1421  buffer_info = &rx_ring->buffer_info[i];
1422  skb = netdev_alloc_skb(netdev, bufsz);
1423  if (unlikely(!skb)) {
1424  /* Better luck next round */
1425  adapter->stats.rx_alloc_buff_failed++;
1426  break;
1427  }
1428  /* align */
1429  skb_reserve(skb, NET_IP_ALIGN);
1430  buffer_info->skb = skb;
1431 
1432  buffer_info->dma = dma_map_single(&pdev->dev,
1433  buffer_info->rx_buffer,
1434  buffer_info->length,
1435  DMA_FROM_DEVICE);
1436  if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1437  dev_kfree_skb(skb);
1438  buffer_info->skb = NULL;
1439  buffer_info->dma = 0;
1440  adapter->stats.rx_alloc_buff_failed++;
1441  break; /* while !buffer_info->skb */
1442  }
1443  buffer_info->mapped = true;
1444  rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1445  rx_desc->buffer_addr = (buffer_info->dma);
1446  rx_desc->gbec_status = DSC_INIT16;
1447 
1448  pr_debug("i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
1449  i, (unsigned long long)buffer_info->dma,
1450  buffer_info->length);
1451 
1452  if (unlikely(++i == rx_ring->count))
1453  i = 0;
1454  }
1455  if (likely(rx_ring->next_to_use != i)) {
1456  rx_ring->next_to_use = i;
1457  if (unlikely(i-- == 0))
1458  i = (rx_ring->count - 1);
1459  iowrite32(rx_ring->dma +
1460  (int)sizeof(struct pch_gbe_rx_desc) * i,
1461  &hw->reg->RX_DSC_SW_P);
1462  }
1463  return;
1464 }
1465 
1466 static int
1467 pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1468  struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1469 {
1470  struct pci_dev *pdev = adapter->pdev;
1471  struct pch_gbe_buffer *buffer_info;
1472  unsigned int i;
1473  unsigned int bufsz;
1474  unsigned int size;
1475 
1476  bufsz = adapter->rx_buffer_len;
1477 
1478  size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1479  rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
1480  &rx_ring->rx_buff_pool_logic,
1481  GFP_KERNEL);
1482  if (!rx_ring->rx_buff_pool) {
1483  pr_err("Unable to allocate memory for the receive pool buffer\n");
1484  return -ENOMEM;
1485  }
1486  memset(rx_ring->rx_buff_pool, 0, size);
1487  rx_ring->rx_buff_pool_size = size;
1488  for (i = 0; i < rx_ring->count; i++) {
1489  buffer_info = &rx_ring->buffer_info[i];
1490  buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1491  buffer_info->length = bufsz;
1492  }
1493  return 0;
1494 }
1495 
1501 static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1502  struct pch_gbe_tx_ring *tx_ring)
1503 {
1504  struct pch_gbe_buffer *buffer_info;
1505  struct sk_buff *skb;
1506  unsigned int i;
1507  unsigned int bufsz;
1508  struct pch_gbe_tx_desc *tx_desc;
1509 
1510  bufsz =
1511  adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1512 
1513  for (i = 0; i < tx_ring->count; i++) {
1514  buffer_info = &tx_ring->buffer_info[i];
1515  skb = netdev_alloc_skb(adapter->netdev, bufsz);
1516  skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1517  buffer_info->skb = skb;
1518  tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1519  tx_desc->gbec_status = (DSC_INIT16);
1520  }
1521  return;
1522 }
1523 
1532 static bool
1533 pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1534  struct pch_gbe_tx_ring *tx_ring)
1535 {
1536  struct pch_gbe_tx_desc *tx_desc;
1537  struct pch_gbe_buffer *buffer_info;
1538  struct sk_buff *skb;
1539  unsigned int i;
1540  unsigned int cleaned_count = 0;
1541  bool cleaned = false;
1542  int unused, thresh;
1543 
1544  pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1545 
1546  i = tx_ring->next_to_clean;
1547  tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1548  pr_debug("gbec_status:0x%04x dma_status:0x%04x\n",
1549  tx_desc->gbec_status, tx_desc->dma_status);
1550 
1551  unused = PCH_GBE_DESC_UNUSED(tx_ring);
1552  thresh = tx_ring->count - PCH_GBE_TX_WEIGHT;
1553  if ((tx_desc->gbec_status == DSC_INIT16) && (unused < thresh))
1554  { /* current marked clean, tx queue filling up, do extra clean */
1555  int j, k;
1556  if (unused < 8) { /* tx queue nearly full */
1557  pr_debug("clean_tx: transmit queue warning (%x,%x) unused=%d\n",
1558  tx_ring->next_to_clean,tx_ring->next_to_use,unused);
1559  }
1560 
1561  /* current marked clean, scan for more that need cleaning. */
1562  k = i;
1563  for (j = 0; j < PCH_GBE_TX_WEIGHT; j++)
1564  {
1565  tx_desc = PCH_GBE_TX_DESC(*tx_ring, k);
1566  if (tx_desc->gbec_status != DSC_INIT16) break; /*found*/
1567  if (++k >= tx_ring->count) k = 0; /*increment, wrap*/
1568  }
1569  if (j < PCH_GBE_TX_WEIGHT) {
1570  pr_debug("clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n",
1571  unused,j, i,k, tx_ring->next_to_use, tx_desc->gbec_status);
1572  i = k; /*found one to clean, usu gbec_status==2000.*/
1573  }
1574  }
1575 
1576  while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1577  pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
1578  buffer_info = &tx_ring->buffer_info[i];
1579  skb = buffer_info->skb;
1580  cleaned = true;
1581 
1582  if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1583  adapter->stats.tx_aborted_errors++;
1584  pr_err("Transfer Abort Error\n");
1585  } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1586  ) {
1587  adapter->stats.tx_carrier_errors++;
1588  pr_err("Transfer Carrier Sense Error\n");
1589  } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1590  ) {
1591  adapter->stats.tx_aborted_errors++;
1592  pr_err("Transfer Collision Abort Error\n");
1593  } else if ((tx_desc->gbec_status &
1596  adapter->stats.collisions++;
1597  adapter->stats.tx_packets++;
1598  adapter->stats.tx_bytes += skb->len;
1599  pr_debug("Transfer Collision\n");
1600  } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1601  ) {
1602  adapter->stats.tx_packets++;
1603  adapter->stats.tx_bytes += skb->len;
1604  }
1605  if (buffer_info->mapped) {
1606  pr_debug("unmap buffer_info->dma : %d\n", i);
1607  dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1608  buffer_info->length, DMA_TO_DEVICE);
1609  buffer_info->mapped = false;
1610  }
1611  if (buffer_info->skb) {
1612  pr_debug("trim buffer_info->skb : %d\n", i);
1613  skb_trim(buffer_info->skb, 0);
1614  }
1615  tx_desc->gbec_status = DSC_INIT16;
1616  if (unlikely(++i == tx_ring->count))
1617  i = 0;
1618  tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1619 
1620  /* weight of a sort for tx, to avoid endless transmit cleanup */
1621  if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
1622  cleaned = false;
1623  break;
1624  }
1625  }
1626  pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1627  cleaned_count);
1628  if (cleaned_count > 0) { /*skip this if nothing cleaned*/
1629  /* Recover from running out of Tx resources in xmit_frame */
1630  spin_lock(&tx_ring->tx_lock);
1631  if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
1632  {
1633  netif_wake_queue(adapter->netdev);
1634  adapter->stats.tx_restart_count++;
1635  pr_debug("Tx wake queue\n");
1636  }
1637 
1638  tx_ring->next_to_clean = i;
1639 
1640  pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1641  spin_unlock(&tx_ring->tx_lock);
1642  }
1643  return cleaned;
1644 }
1645 
1656 static bool
1657 pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1658  struct pch_gbe_rx_ring *rx_ring,
1659  int *work_done, int work_to_do)
1660 {
1661  struct net_device *netdev = adapter->netdev;
1662  struct pci_dev *pdev = adapter->pdev;
1663  struct pch_gbe_buffer *buffer_info;
1664  struct pch_gbe_rx_desc *rx_desc;
1665  u32 length;
1666  unsigned int i;
1667  unsigned int cleaned_count = 0;
1668  bool cleaned = false;
1669  struct sk_buff *skb;
1670  u8 dma_status;
1671  u16 gbec_status;
1672  u32 tcp_ip_status;
1673 
1674  i = rx_ring->next_to_clean;
1675 
1676  while (*work_done < work_to_do) {
1677  /* Check Rx descriptor status */
1678  rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1679  if (rx_desc->gbec_status == DSC_INIT16)
1680  break;
1681  cleaned = true;
1682  cleaned_count++;
1683 
1684  dma_status = rx_desc->dma_status;
1685  gbec_status = rx_desc->gbec_status;
1686  tcp_ip_status = rx_desc->tcp_ip_status;
1687  rx_desc->gbec_status = DSC_INIT16;
1688  buffer_info = &rx_ring->buffer_info[i];
1689  skb = buffer_info->skb;
1690  buffer_info->skb = NULL;
1691 
1692  /* unmap dma */
1693  dma_unmap_single(&pdev->dev, buffer_info->dma,
1694  buffer_info->length, DMA_FROM_DEVICE);
1695  buffer_info->mapped = false;
1696 
1697  pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
1698  "TCP:0x%08x] BufInf = 0x%p\n",
1699  i, dma_status, gbec_status, tcp_ip_status,
1700  buffer_info);
1701  /* Error check */
1702  if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1703  adapter->stats.rx_frame_errors++;
1704  pr_err("Receive Not Octal Error\n");
1705  } else if (unlikely(gbec_status &
1707  adapter->stats.rx_frame_errors++;
1708  pr_err("Receive Nibble Error\n");
1709  } else if (unlikely(gbec_status &
1711  adapter->stats.rx_crc_errors++;
1712  pr_err("Receive CRC Error\n");
1713  } else {
1714  /* get receive length */
1715  /* length convert[-3], length includes FCS length */
1716  length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1717  if (rx_desc->rx_words_eob & 0x02)
1718  length = length - 4;
1719  /*
1720  * buffer_info->rx_buffer: [Header:14][payload]
1721  * skb->data: [Reserve:2][Header:14][payload]
1722  */
1723  memcpy(skb->data, buffer_info->rx_buffer, length);
1724 
1725  /* update status of driver */
1726  adapter->stats.rx_bytes += length;
1727  adapter->stats.rx_packets++;
1728  if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1729  adapter->stats.multicast++;
1730  /* Write meta date of skb */
1731  skb_put(skb, length);
1732 
1733 #ifdef CONFIG_PCH_PTP
1734  pch_rx_timestamp(adapter, skb);
1735 #endif
1736 
1737  skb->protocol = eth_type_trans(skb, netdev);
1738  if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
1739  skb->ip_summed = CHECKSUM_NONE;
1740  else
1742 
1743  napi_gro_receive(&adapter->napi, skb);
1744  (*work_done)++;
1745  pr_debug("Receive skb->ip_summed: %d length: %d\n",
1746  skb->ip_summed, length);
1747  }
1748  /* return some buffers to hardware, one at a time is too slow */
1749  if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1750  pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1751  cleaned_count);
1752  cleaned_count = 0;
1753  }
1754  if (++i == rx_ring->count)
1755  i = 0;
1756  }
1757  rx_ring->next_to_clean = i;
1758  if (cleaned_count)
1759  pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1760  return cleaned;
1761 }
1762 
1772  struct pch_gbe_tx_ring *tx_ring)
1773 {
1774  struct pci_dev *pdev = adapter->pdev;
1775  struct pch_gbe_tx_desc *tx_desc;
1776  int size;
1777  int desNo;
1778 
1779  size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1780  tx_ring->buffer_info = vzalloc(size);
1781  if (!tx_ring->buffer_info)
1782  return -ENOMEM;
1783 
1784  tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1785 
1786  tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1787  &tx_ring->dma, GFP_KERNEL);
1788  if (!tx_ring->desc) {
1789  vfree(tx_ring->buffer_info);
1790  pr_err("Unable to allocate memory for the transmit descriptor ring\n");
1791  return -ENOMEM;
1792  }
1793  memset(tx_ring->desc, 0, tx_ring->size);
1794 
1795  tx_ring->next_to_use = 0;
1796  tx_ring->next_to_clean = 0;
1797  spin_lock_init(&tx_ring->tx_lock);
1798 
1799  for (desNo = 0; desNo < tx_ring->count; desNo++) {
1800  tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1801  tx_desc->gbec_status = DSC_INIT16;
1802  }
1803  pr_debug("tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx\n"
1804  "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1805  tx_ring->desc, (unsigned long long)tx_ring->dma,
1806  tx_ring->next_to_clean, tx_ring->next_to_use);
1807  return 0;
1808 }
1809 
1819  struct pch_gbe_rx_ring *rx_ring)
1820 {
1821  struct pci_dev *pdev = adapter->pdev;
1822  struct pch_gbe_rx_desc *rx_desc;
1823  int size;
1824  int desNo;
1825 
1826  size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1827  rx_ring->buffer_info = vzalloc(size);
1828  if (!rx_ring->buffer_info)
1829  return -ENOMEM;
1830 
1831  rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1832  rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1833  &rx_ring->dma, GFP_KERNEL);
1834 
1835  if (!rx_ring->desc) {
1836  pr_err("Unable to allocate memory for the receive descriptor ring\n");
1837  vfree(rx_ring->buffer_info);
1838  return -ENOMEM;
1839  }
1840  memset(rx_ring->desc, 0, rx_ring->size);
1841  rx_ring->next_to_clean = 0;
1842  rx_ring->next_to_use = 0;
1843  for (desNo = 0; desNo < rx_ring->count; desNo++) {
1844  rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1845  rx_desc->gbec_status = DSC_INIT16;
1846  }
1847  pr_debug("rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx "
1848  "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1849  rx_ring->desc, (unsigned long long)rx_ring->dma,
1850  rx_ring->next_to_clean, rx_ring->next_to_use);
1851  return 0;
1852 }
1853 
1860  struct pch_gbe_tx_ring *tx_ring)
1861 {
1862  struct pci_dev *pdev = adapter->pdev;
1863 
1864  pch_gbe_clean_tx_ring(adapter, tx_ring);
1865  vfree(tx_ring->buffer_info);
1866  tx_ring->buffer_info = NULL;
1867  pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1868  tx_ring->desc = NULL;
1869 }
1870 
1877  struct pch_gbe_rx_ring *rx_ring)
1878 {
1879  struct pci_dev *pdev = adapter->pdev;
1880 
1881  pch_gbe_clean_rx_ring(adapter, rx_ring);
1882  vfree(rx_ring->buffer_info);
1883  rx_ring->buffer_info = NULL;
1884  pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1885  rx_ring->desc = NULL;
1886 }
1887 
1895 static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1896 {
1897  struct net_device *netdev = adapter->netdev;
1898  int err;
1899  int flags;
1900 
1901  flags = IRQF_SHARED;
1902  adapter->have_msi = false;
1903  err = pci_enable_msi(adapter->pdev);
1904  pr_debug("call pci_enable_msi\n");
1905  if (err) {
1906  pr_debug("call pci_enable_msi - Error: %d\n", err);
1907  } else {
1908  flags = 0;
1909  adapter->have_msi = true;
1910  }
1911  err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
1912  flags, netdev->name, netdev);
1913  if (err)
1914  pr_err("Unable to allocate interrupt Error: %d\n", err);
1915  pr_debug("adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n",
1916  adapter->have_msi, flags, err);
1917  return err;
1918 }
1919 
1920 
1928 int pch_gbe_up(struct pch_gbe_adapter *adapter)
1929 {
1930  struct net_device *netdev = adapter->netdev;
1931  struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1932  struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1933  int err = -EINVAL;
1934 
1935  /* Ensure we have a valid MAC */
1936  if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1937  pr_err("Error: Invalid MAC address\n");
1938  goto out;
1939  }
1940 
1941  /* hardware has been reset, we need to reload some things */
1942  pch_gbe_set_multi(netdev);
1943 
1944  pch_gbe_setup_tctl(adapter);
1945  pch_gbe_configure_tx(adapter);
1946  pch_gbe_setup_rctl(adapter);
1947  pch_gbe_configure_rx(adapter);
1948 
1949  err = pch_gbe_request_irq(adapter);
1950  if (err) {
1951  pr_err("Error: can't bring device up - irq request failed\n");
1952  goto out;
1953  }
1954  err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1955  if (err) {
1956  pr_err("Error: can't bring device up - alloc rx buffers pool failed\n");
1957  goto freeirq;
1958  }
1959  pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1960  pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1961  adapter->tx_queue_len = netdev->tx_queue_len;
1962  pch_gbe_enable_dma_rx(&adapter->hw);
1963  pch_gbe_enable_mac_rx(&adapter->hw);
1964 
1965  mod_timer(&adapter->watchdog_timer, jiffies);
1966 
1967  napi_enable(&adapter->napi);
1968  pch_gbe_irq_enable(adapter);
1969  netif_start_queue(adapter->netdev);
1970 
1971  return 0;
1972 
1973 freeirq:
1974  pch_gbe_free_irq(adapter);
1975 out:
1976  return err;
1977 }
1978 
1983 void pch_gbe_down(struct pch_gbe_adapter *adapter)
1984 {
1985  struct net_device *netdev = adapter->netdev;
1986  struct pci_dev *pdev = adapter->pdev;
1987  struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1988 
1989  /* signal that we're down so the interrupt handler does not
1990  * reschedule our watchdog timer */
1991  napi_disable(&adapter->napi);
1992  atomic_set(&adapter->irq_sem, 0);
1993 
1994  pch_gbe_irq_disable(adapter);
1995  pch_gbe_free_irq(adapter);
1996 
1997  del_timer_sync(&adapter->watchdog_timer);
1998 
1999  netdev->tx_queue_len = adapter->tx_queue_len;
2000  netif_carrier_off(netdev);
2001  netif_stop_queue(netdev);
2002 
2003  if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
2004  pch_gbe_reset(adapter);
2005  pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
2006  pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
2007 
2008  pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
2009  rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
2010  rx_ring->rx_buff_pool_logic = 0;
2011  rx_ring->rx_buff_pool_size = 0;
2012  rx_ring->rx_buff_pool = NULL;
2013 }
2014 
2022 static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
2023 {
2024  struct pch_gbe_hw *hw = &adapter->hw;
2025  struct net_device *netdev = adapter->netdev;
2026 
2028  hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2029  hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2030 
2031  /* Initialize the hardware-specific values */
2032  if (pch_gbe_hal_setup_init_funcs(hw)) {
2033  pr_err("Hardware Initialization Failure\n");
2034  return -EIO;
2035  }
2036  if (pch_gbe_alloc_queues(adapter)) {
2037  pr_err("Unable to allocate memory for queues\n");
2038  return -ENOMEM;
2039  }
2040  spin_lock_init(&adapter->hw.miim_lock);
2041  spin_lock_init(&adapter->stats_lock);
2042  spin_lock_init(&adapter->ethtool_lock);
2043  atomic_set(&adapter->irq_sem, 0);
2044  pch_gbe_irq_disable(adapter);
2045 
2046  pch_gbe_init_stats(adapter);
2047 
2048  pr_debug("rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
2049  (u32) adapter->rx_buffer_len,
2050  hw->mac.min_frame_size, hw->mac.max_frame_size);
2051  return 0;
2052 }
2053 
2061 static int pch_gbe_open(struct net_device *netdev)
2062 {
2063  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2064  struct pch_gbe_hw *hw = &adapter->hw;
2065  int err;
2066 
2067  /* allocate transmit descriptors */
2068  err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
2069  if (err)
2070  goto err_setup_tx;
2071  /* allocate receive descriptors */
2072  err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
2073  if (err)
2074  goto err_setup_rx;
2076  err = pch_gbe_up(adapter);
2077  if (err)
2078  goto err_up;
2079  pr_debug("Success End\n");
2080  return 0;
2081 
2082 err_up:
2083  if (!adapter->wake_up_evt)
2085  pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2086 err_setup_rx:
2087  pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2088 err_setup_tx:
2089  pch_gbe_reset(adapter);
2090  pr_err("Error End\n");
2091  return err;
2092 }
2093 
2100 static int pch_gbe_stop(struct net_device *netdev)
2101 {
2102  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2103  struct pch_gbe_hw *hw = &adapter->hw;
2104 
2105  pch_gbe_down(adapter);
2106  if (!adapter->wake_up_evt)
2108  pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2109  pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2110  return 0;
2111 }
2112 
2121 static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2122 {
2123  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2124  struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
2125  unsigned long flags;
2126 
2127  if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
2128  /* Collision - tell upper layer to requeue */
2129  return NETDEV_TX_LOCKED;
2130  }
2131  if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
2132  netif_stop_queue(netdev);
2133  spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2134  pr_debug("Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
2135  tx_ring->next_to_use, tx_ring->next_to_clean);
2136  return NETDEV_TX_BUSY;
2137  }
2138 
2139  /* CRC,ITAG no support */
2140  pch_gbe_tx_queue(adapter, tx_ring, skb);
2141  spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2142  return NETDEV_TX_OK;
2143 }
2144 
2150 static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
2151 {
2152  /* only return the current stats */
2153  return &netdev->stats;
2154 }
2155 
2160 static void pch_gbe_set_multi(struct net_device *netdev)
2161 {
2162  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2163  struct pch_gbe_hw *hw = &adapter->hw;
2164  struct netdev_hw_addr *ha;
2165  u8 *mta_list;
2166  u32 rctl;
2167  int i;
2168  int mc_count;
2169 
2170  pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
2171 
2172  /* Check for Promiscuous and All Multicast modes */
2173  rctl = ioread32(&hw->reg->RX_MODE);
2174  mc_count = netdev_mc_count(netdev);
2175  if ((netdev->flags & IFF_PROMISC)) {
2176  rctl &= ~PCH_GBE_ADD_FIL_EN;
2177  rctl &= ~PCH_GBE_MLT_FIL_EN;
2178  } else if ((netdev->flags & IFF_ALLMULTI)) {
2179  /* all the multicasting receive permissions */
2180  rctl |= PCH_GBE_ADD_FIL_EN;
2181  rctl &= ~PCH_GBE_MLT_FIL_EN;
2182  } else {
2183  if (mc_count >= PCH_GBE_MAR_ENTRIES) {
2184  /* all the multicasting receive permissions */
2185  rctl |= PCH_GBE_ADD_FIL_EN;
2186  rctl &= ~PCH_GBE_MLT_FIL_EN;
2187  } else {
2189  }
2190  }
2191  iowrite32(rctl, &hw->reg->RX_MODE);
2192 
2193  if (mc_count >= PCH_GBE_MAR_ENTRIES)
2194  return;
2195  mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
2196  if (!mta_list)
2197  return;
2198 
2199  /* The shared function expects a packed array of only addresses. */
2200  i = 0;
2201  netdev_for_each_mc_addr(ha, netdev) {
2202  if (i == mc_count)
2203  break;
2204  memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
2205  }
2206  pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
2208  kfree(mta_list);
2209 
2210  pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
2211  ioread32(&hw->reg->RX_MODE), mc_count);
2212 }
2213 
2222 static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
2223 {
2224  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2225  struct sockaddr *skaddr = addr;
2226  int ret_val;
2227 
2228  if (!is_valid_ether_addr(skaddr->sa_data)) {
2229  ret_val = -EADDRNOTAVAIL;
2230  } else {
2231  memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
2232  memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
2233  pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2234  ret_val = 0;
2235  }
2236  pr_debug("ret_val : 0x%08x\n", ret_val);
2237  pr_debug("dev_addr : %pM\n", netdev->dev_addr);
2238  pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
2239  pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
2240  ioread32(&adapter->hw.reg->mac_adr[0].high),
2241  ioread32(&adapter->hw.reg->mac_adr[0].low));
2242  return ret_val;
2243 }
2244 
2253 static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2254 {
2255  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2256  int max_frame;
2257  unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2258  int err;
2259 
2260  max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2261  if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
2262  (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
2263  pr_err("Invalid MTU setting\n");
2264  return -EINVAL;
2265  }
2266  if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
2268  else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2270  else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2272  else
2274 
2275  if (netif_running(netdev)) {
2276  pch_gbe_down(adapter);
2277  err = pch_gbe_up(adapter);
2278  if (err) {
2279  adapter->rx_buffer_len = old_rx_buffer_len;
2280  pch_gbe_up(adapter);
2281  return -ENOMEM;
2282  } else {
2283  netdev->mtu = new_mtu;
2284  adapter->hw.mac.max_frame_size = max_frame;
2285  }
2286  } else {
2287  pch_gbe_reset(adapter);
2288  netdev->mtu = new_mtu;
2289  adapter->hw.mac.max_frame_size = max_frame;
2290  }
2291 
2292  pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2293  max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2294  adapter->hw.mac.max_frame_size);
2295  return 0;
2296 }
2297 
2305 static int pch_gbe_set_features(struct net_device *netdev,
2307 {
2308  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2309  netdev_features_t changed = features ^ netdev->features;
2310 
2311  if (!(changed & NETIF_F_RXCSUM))
2312  return 0;
2313 
2314  if (netif_running(netdev))
2315  pch_gbe_reinit_locked(adapter);
2316  else
2317  pch_gbe_reset(adapter);
2318 
2319  return 0;
2320 }
2321 
2331 static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2332 {
2333  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2334 
2335  pr_debug("cmd : 0x%04x\n", cmd);
2336 
2337 #ifdef CONFIG_PCH_PTP
2338  if (cmd == SIOCSHWTSTAMP)
2339  return hwtstamp_ioctl(netdev, ifr, cmd);
2340 #endif
2341 
2342  return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2343 }
2344 
2349 static void pch_gbe_tx_timeout(struct net_device *netdev)
2350 {
2351  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2352 
2353  /* Do the reset outside of interrupt context */
2354  adapter->stats.tx_timeout_count++;
2355  schedule_work(&adapter->reset_task);
2356 }
2357 
2366 static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2367 {
2368  struct pch_gbe_adapter *adapter =
2369  container_of(napi, struct pch_gbe_adapter, napi);
2370  int work_done = 0;
2371  bool poll_end_flag = false;
2372  bool cleaned = false;
2373 
2374  pr_debug("budget : %d\n", budget);
2375 
2376  pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2377  cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2378 
2379  if (cleaned)
2380  work_done = budget;
2381  /* If no Tx and not enough Rx work done,
2382  * exit the polling mode
2383  */
2384  if (work_done < budget)
2385  poll_end_flag = true;
2386 
2387  if (poll_end_flag) {
2388  napi_complete(napi);
2389  pch_gbe_irq_enable(adapter);
2390  }
2391 
2392  if (adapter->rx_stop_flag) {
2393  adapter->rx_stop_flag = false;
2394  pch_gbe_enable_dma_rx(&adapter->hw);
2395  }
2396 
2397  pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
2398  poll_end_flag, work_done, budget);
2399 
2400  return work_done;
2401 }
2402 
2403 #ifdef CONFIG_NET_POLL_CONTROLLER
2404 
2408 static void pch_gbe_netpoll(struct net_device *netdev)
2409 {
2410  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2411 
2412  disable_irq(adapter->pdev->irq);
2413  pch_gbe_intr(adapter->pdev->irq, netdev);
2414  enable_irq(adapter->pdev->irq);
2415 }
2416 #endif
2417 
2418 static const struct net_device_ops pch_gbe_netdev_ops = {
2419  .ndo_open = pch_gbe_open,
2420  .ndo_stop = pch_gbe_stop,
2421  .ndo_start_xmit = pch_gbe_xmit_frame,
2422  .ndo_get_stats = pch_gbe_get_stats,
2423  .ndo_set_mac_address = pch_gbe_set_mac,
2424  .ndo_tx_timeout = pch_gbe_tx_timeout,
2425  .ndo_change_mtu = pch_gbe_change_mtu,
2426  .ndo_set_features = pch_gbe_set_features,
2427  .ndo_do_ioctl = pch_gbe_ioctl,
2428  .ndo_set_rx_mode = pch_gbe_set_multi,
2429 #ifdef CONFIG_NET_POLL_CONTROLLER
2430  .ndo_poll_controller = pch_gbe_netpoll,
2431 #endif
2432 };
2433 
2434 static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2436 {
2437  struct net_device *netdev = pci_get_drvdata(pdev);
2438  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2439 
2440  netif_device_detach(netdev);
2441  if (netif_running(netdev))
2442  pch_gbe_down(adapter);
2443  pci_disable_device(pdev);
2444  /* Request a slot slot reset. */
2446 }
2447 
2448 static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2449 {
2450  struct net_device *netdev = pci_get_drvdata(pdev);
2451  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2452  struct pch_gbe_hw *hw = &adapter->hw;
2453 
2454  if (pci_enable_device(pdev)) {
2455  pr_err("Cannot re-enable PCI device after reset\n");
2457  }
2458  pci_set_master(pdev);
2459  pci_enable_wake(pdev, PCI_D0, 0);
2461  pch_gbe_reset(adapter);
2462  /* Clear wake up status */
2463  pch_gbe_mac_set_wol_event(hw, 0);
2464 
2465  return PCI_ERS_RESULT_RECOVERED;
2466 }
2467 
2468 static void pch_gbe_io_resume(struct pci_dev *pdev)
2469 {
2470  struct net_device *netdev = pci_get_drvdata(pdev);
2471  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2472 
2473  if (netif_running(netdev)) {
2474  if (pch_gbe_up(adapter)) {
2475  pr_debug("can't bring device back up after reset\n");
2476  return;
2477  }
2478  }
2479  netif_device_attach(netdev);
2480 }
2481 
2482 static int __pch_gbe_suspend(struct pci_dev *pdev)
2483 {
2484  struct net_device *netdev = pci_get_drvdata(pdev);
2485  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2486  struct pch_gbe_hw *hw = &adapter->hw;
2487  u32 wufc = adapter->wake_up_evt;
2488  int retval = 0;
2489 
2490  netif_device_detach(netdev);
2491  if (netif_running(netdev))
2492  pch_gbe_down(adapter);
2493  if (wufc) {
2494  pch_gbe_set_multi(netdev);
2495  pch_gbe_setup_rctl(adapter);
2496  pch_gbe_configure_rx(adapter);
2497  pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2498  hw->mac.link_duplex);
2499  pch_gbe_set_mode(adapter, hw->mac.link_speed,
2500  hw->mac.link_duplex);
2501  pch_gbe_mac_set_wol_event(hw, wufc);
2502  pci_disable_device(pdev);
2503  } else {
2505  pch_gbe_mac_set_wol_event(hw, wufc);
2506  pci_disable_device(pdev);
2507  }
2508  return retval;
2509 }
2510 
2511 #ifdef CONFIG_PM
2512 static int pch_gbe_suspend(struct device *device)
2513 {
2514  struct pci_dev *pdev = to_pci_dev(device);
2515 
2516  return __pch_gbe_suspend(pdev);
2517 }
2518 
2519 static int pch_gbe_resume(struct device *device)
2520 {
2521  struct pci_dev *pdev = to_pci_dev(device);
2522  struct net_device *netdev = pci_get_drvdata(pdev);
2523  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2524  struct pch_gbe_hw *hw = &adapter->hw;
2525  u32 err;
2526 
2527  err = pci_enable_device(pdev);
2528  if (err) {
2529  pr_err("Cannot enable PCI device from suspend\n");
2530  return err;
2531  }
2532  pci_set_master(pdev);
2534  pch_gbe_reset(adapter);
2535  /* Clear wake on lan control and status */
2536  pch_gbe_mac_set_wol_event(hw, 0);
2537 
2538  if (netif_running(netdev))
2539  pch_gbe_up(adapter);
2540  netif_device_attach(netdev);
2541 
2542  return 0;
2543 }
2544 #endif /* CONFIG_PM */
2545 
2546 static void pch_gbe_shutdown(struct pci_dev *pdev)
2547 {
2548  __pch_gbe_suspend(pdev);
2549  if (system_state == SYSTEM_POWER_OFF) {
2550  pci_wake_from_d3(pdev, true);
2552  }
2553 }
2554 
2555 static void pch_gbe_remove(struct pci_dev *pdev)
2556 {
2557  struct net_device *netdev = pci_get_drvdata(pdev);
2558  struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2559 
2560  cancel_work_sync(&adapter->reset_task);
2561  unregister_netdev(netdev);
2562 
2563  pch_gbe_hal_phy_hw_reset(&adapter->hw);
2564 
2565  kfree(adapter->tx_ring);
2566  kfree(adapter->rx_ring);
2567 
2568  iounmap(adapter->hw.reg);
2569  pci_release_regions(pdev);
2570  free_netdev(netdev);
2571  pci_disable_device(pdev);
2572 }
2573 
2574 static int pch_gbe_probe(struct pci_dev *pdev,
2575  const struct pci_device_id *pci_id)
2576 {
2577  struct net_device *netdev;
2578  struct pch_gbe_adapter *adapter;
2579  int ret;
2580 
2581  ret = pci_enable_device(pdev);
2582  if (ret)
2583  return ret;
2584 
2585  if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2586  || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2587  ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2588  if (ret) {
2589  ret = pci_set_consistent_dma_mask(pdev,
2590  DMA_BIT_MASK(32));
2591  if (ret) {
2592  dev_err(&pdev->dev, "ERR: No usable DMA "
2593  "configuration, aborting\n");
2594  goto err_disable_device;
2595  }
2596  }
2597  }
2598 
2599  ret = pci_request_regions(pdev, KBUILD_MODNAME);
2600  if (ret) {
2601  dev_err(&pdev->dev,
2602  "ERR: Can't reserve PCI I/O and memory resources\n");
2603  goto err_disable_device;
2604  }
2605  pci_set_master(pdev);
2606 
2607  netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2608  if (!netdev) {
2609  ret = -ENOMEM;
2610  goto err_release_pci;
2611  }
2612  SET_NETDEV_DEV(netdev, &pdev->dev);
2613 
2614  pci_set_drvdata(pdev, netdev);
2615  adapter = netdev_priv(netdev);
2616  adapter->netdev = netdev;
2617  adapter->pdev = pdev;
2618  adapter->hw.back = adapter;
2619  adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
2620  if (!adapter->hw.reg) {
2621  ret = -EIO;
2622  dev_err(&pdev->dev, "Can't ioremap\n");
2623  goto err_free_netdev;
2624  }
2625 
2626 #ifdef CONFIG_PCH_PTP
2627  adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
2628  PCI_DEVFN(12, 4));
2629  if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
2630  pr_err("Bad ptp filter\n");
2631  return -EINVAL;
2632  }
2633 #endif
2634 
2635  netdev->netdev_ops = &pch_gbe_netdev_ops;
2637  netif_napi_add(netdev, &adapter->napi,
2638  pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2639  netdev->hw_features = NETIF_F_RXCSUM |
2641  netdev->features = netdev->hw_features;
2642  pch_gbe_set_ethtool_ops(netdev);
2643 
2644  pch_gbe_mac_load_mac_addr(&adapter->hw);
2645  pch_gbe_mac_reset_hw(&adapter->hw);
2646 
2647  /* setup the private structure */
2648  ret = pch_gbe_sw_init(adapter);
2649  if (ret)
2650  goto err_iounmap;
2651 
2652  /* Initialize PHY */
2653  ret = pch_gbe_init_phy(adapter);
2654  if (ret) {
2655  dev_err(&pdev->dev, "PHY initialize error\n");
2656  goto err_free_adapter;
2657  }
2658  pch_gbe_hal_get_bus_info(&adapter->hw);
2659 
2660  /* Read the MAC address. and store to the private data */
2661  ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
2662  if (ret) {
2663  dev_err(&pdev->dev, "MAC address Read Error\n");
2664  goto err_free_adapter;
2665  }
2666 
2667  memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2668  if (!is_valid_ether_addr(netdev->dev_addr)) {
2669  /*
2670  * If the MAC is invalid (or just missing), display a warning
2671  * but do not abort setting up the device. pch_gbe_up will
2672  * prevent the interface from being brought up until a valid MAC
2673  * is set.
2674  */
2675  dev_err(&pdev->dev, "Invalid MAC address, "
2676  "interface disabled.\n");
2677  }
2678  setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
2679  (unsigned long)adapter);
2680 
2681  INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2682 
2683  pch_gbe_check_options(adapter);
2684 
2685  /* initialize the wol settings based on the eeprom settings */
2687  dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2688 
2689  /* reset the hardware with the new settings */
2690  pch_gbe_reset(adapter);
2691 
2692  ret = register_netdev(netdev);
2693  if (ret)
2694  goto err_free_adapter;
2695  /* tell the stack to leave us alone until pch_gbe_open() is called */
2696  netif_carrier_off(netdev);
2697  netif_stop_queue(netdev);
2698 
2699  dev_dbg(&pdev->dev, "PCH Network Connection\n");
2700 
2701  device_set_wakeup_enable(&pdev->dev, 1);
2702  return 0;
2703 
2704 err_free_adapter:
2705  pch_gbe_hal_phy_hw_reset(&adapter->hw);
2706  kfree(adapter->tx_ring);
2707  kfree(adapter->rx_ring);
2708 err_iounmap:
2709  iounmap(adapter->hw.reg);
2710 err_free_netdev:
2711  free_netdev(netdev);
2712 err_release_pci:
2713  pci_release_regions(pdev);
2714 err_disable_device:
2715  pci_disable_device(pdev);
2716  return ret;
2717 }
2718 
2719 static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
2720  {.vendor = PCI_VENDOR_ID_INTEL,
2721  .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2722  .subvendor = PCI_ANY_ID,
2723  .subdevice = PCI_ANY_ID,
2724  .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2725  .class_mask = (0xFFFF00)
2726  },
2727  {.vendor = PCI_VENDOR_ID_ROHM,
2729  .subvendor = PCI_ANY_ID,
2730  .subdevice = PCI_ANY_ID,
2731  .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2732  .class_mask = (0xFFFF00)
2733  },
2734  {.vendor = PCI_VENDOR_ID_ROHM,
2736  .subvendor = PCI_ANY_ID,
2737  .subdevice = PCI_ANY_ID,
2738  .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2739  .class_mask = (0xFFFF00)
2740  },
2741  /* required last entry */
2742  {0}
2743 };
2744 
2745 #ifdef CONFIG_PM
2746 static const struct dev_pm_ops pch_gbe_pm_ops = {
2747  .suspend = pch_gbe_suspend,
2748  .resume = pch_gbe_resume,
2749  .freeze = pch_gbe_suspend,
2750  .thaw = pch_gbe_resume,
2751  .poweroff = pch_gbe_suspend,
2752  .restore = pch_gbe_resume,
2753 };
2754 #endif
2755 
2756 static const struct pci_error_handlers pch_gbe_err_handler = {
2757  .error_detected = pch_gbe_io_error_detected,
2758  .slot_reset = pch_gbe_io_slot_reset,
2759  .resume = pch_gbe_io_resume
2760 };
2761 
2762 static struct pci_driver pch_gbe_driver = {
2763  .name = KBUILD_MODNAME,
2764  .id_table = pch_gbe_pcidev_id,
2765  .probe = pch_gbe_probe,
2766  .remove = pch_gbe_remove,
2767 #ifdef CONFIG_PM
2768  .driver.pm = &pch_gbe_pm_ops,
2769 #endif
2770  .shutdown = pch_gbe_shutdown,
2771  .err_handler = &pch_gbe_err_handler
2772 };
2773 
2774 
2775 static int __init pch_gbe_init_module(void)
2776 {
2777  int ret;
2778 
2779  pr_info("EG20T PCH Gigabit Ethernet Driver - version %s\n",DRV_VERSION);
2780  ret = pci_register_driver(&pch_gbe_driver);
2782  if (copybreak == 0) {
2783  pr_info("copybreak disabled\n");
2784  } else {
2785  pr_info("copybreak enabled for packets <= %u bytes\n",
2786  copybreak);
2787  }
2788  }
2789  return ret;
2790 }
2791 
2792 static void __exit pch_gbe_exit_module(void)
2793 {
2794  pci_unregister_driver(&pch_gbe_driver);
2795 }
2796 
2797 module_init(pch_gbe_init_module);
2798 module_exit(pch_gbe_exit_module);
2799 
2800 MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2801 MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <[email protected]>");
2802 MODULE_LICENSE("GPL");
2804 MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2805 
2806 module_param(copybreak, uint, 0644);
2808  "Maximum size of packet that is copied to a new buffer on receive");
2809 
2810 /* pch_gbe_main.c */