Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ipg.c
Go to the documentation of this file.
1 /*
2  * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter
3  *
4  * Copyright (C) 2003, 2007 IC Plus Corp
5  *
6  * Original Author:
7  *
8  * Craig Rich
9  * Sundance Technology, Inc.
10  * www.sundanceti.com
12  *
13  * Current Maintainer:
14  *
15  * Sorbica Shieh.
16  * http://www.icplus.com.tw
18  *
19  * Jesse Huang
20  * http://www.icplus.com.tw
22  */
23 
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 
26 #include <linux/crc32.h>
27 #include <linux/ethtool.h>
28 #include <linux/interrupt.h>
29 #include <linux/gfp.h>
30 #include <linux/mii.h>
31 #include <linux/mutex.h>
32 
33 #include <asm/div64.h>
34 
35 #define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH)
36 #define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH)
37 #define IPG_RESET_MASK \
38  (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \
39  IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \
40  IPG_AC_AUTO_INIT)
41 
42 #define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg))
43 #define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg))
44 #define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg))
45 
46 #define ipg_r32(reg) ioread32(ioaddr + (reg))
47 #define ipg_r16(reg) ioread16(ioaddr + (reg))
48 #define ipg_r8(reg) ioread8(ioaddr + (reg))
49 
50 enum {
52 };
53 
54 #include "ipg.h"
55 #define DRV_NAME "ipg"
56 
57 MODULE_AUTHOR("IC Plus Corp. 2003");
58 MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver");
59 MODULE_LICENSE("GPL");
60 
61 /*
62  * Defaults
63  */
64 #define IPG_MAX_RXFRAME_SIZE 0x0600
65 #define IPG_RXFRAG_SIZE 0x0600
66 #define IPG_RXSUPPORT_SIZE 0x0600
67 #define IPG_IS_JUMBO false
68 
69 /*
70  * Variable record -- index by leading revision/length
71  * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
72  */
73 static const unsigned short DefaultPhyParam[] = {
74  /* 11/12/03 IP1000A v1-3 rev=0x40 */
75  /*--------------------------------------------------------------------------
76  (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
77  27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
78  31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700,
79  --------------------------------------------------------------------------*/
80  /* 12/17/03 IP1000A v1-4 rev=0x40 */
81  (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
82  0x0000,
83  30, 0x005e, 9, 0x0700,
84  /* 01/09/04 IP1000A v1-5 rev=0x41 */
85  (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
86  0x0000,
87  30, 0x005e, 9, 0x0700,
88  0x0000
89 };
90 
91 static const char * const ipg_brand_name[] = {
92  "IC PLUS IP1000 1000/100/10 based NIC",
93  "Sundance Technology ST2021 based NIC",
94  "Tamarack Microelectronics TC9020/9021 based NIC",
95  "D-Link NIC IP1000A"
96 };
97 
98 static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
99  { PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
100  { PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
101  { PCI_VDEVICE(DLINK, 0x9021), 2 },
102  { PCI_VDEVICE(DLINK, 0x4020), 3 },
103  { 0, }
104 };
105 
106 MODULE_DEVICE_TABLE(pci, ipg_pci_tbl);
107 
108 static inline void __iomem *ipg_ioaddr(struct net_device *dev)
109 {
110  struct ipg_nic_private *sp = netdev_priv(dev);
111  return sp->ioaddr;
112 }
113 
114 #ifdef IPG_DEBUG
115 static void ipg_dump_rfdlist(struct net_device *dev)
116 {
117  struct ipg_nic_private *sp = netdev_priv(dev);
118  void __iomem *ioaddr = sp->ioaddr;
119  unsigned int i;
120  u32 offset;
121 
122  IPG_DEBUG_MSG("_dump_rfdlist\n");
123 
124  netdev_info(dev, "rx_current = %02x\n", sp->rx_current);
125  netdev_info(dev, "rx_dirty = %02x\n", sp->rx_dirty);
126  netdev_info(dev, "RFDList start address = %016lx\n",
127  (unsigned long)sp->rxd_map);
128  netdev_info(dev, "RFDListPtr register = %08x%08x\n",
129  ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0));
130 
131  for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
132  offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd;
133  netdev_info(dev, "%02x %04x RFDNextPtr = %016lx\n",
134  i, offset, (unsigned long)sp->rxd[i].next_desc);
135  offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd;
136  netdev_info(dev, "%02x %04x RFS = %016lx\n",
137  i, offset, (unsigned long)sp->rxd[i].rfs);
138  offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd;
139  netdev_info(dev, "%02x %04x frag_info = %016lx\n",
140  i, offset, (unsigned long)sp->rxd[i].frag_info);
141  }
142 }
143 
144 static void ipg_dump_tfdlist(struct net_device *dev)
145 {
146  struct ipg_nic_private *sp = netdev_priv(dev);
147  void __iomem *ioaddr = sp->ioaddr;
148  unsigned int i;
149  u32 offset;
150 
151  IPG_DEBUG_MSG("_dump_tfdlist\n");
152 
153  netdev_info(dev, "tx_current = %02x\n", sp->tx_current);
154  netdev_info(dev, "tx_dirty = %02x\n", sp->tx_dirty);
155  netdev_info(dev, "TFDList start address = %016lx\n",
156  (unsigned long) sp->txd_map);
157  netdev_info(dev, "TFDListPtr register = %08x%08x\n",
158  ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0));
159 
160  for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
161  offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd;
162  netdev_info(dev, "%02x %04x TFDNextPtr = %016lx\n",
163  i, offset, (unsigned long)sp->txd[i].next_desc);
164 
165  offset = (u32) &sp->txd[i].tfc - (u32) sp->txd;
166  netdev_info(dev, "%02x %04x TFC = %016lx\n",
167  i, offset, (unsigned long) sp->txd[i].tfc);
168  offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd;
169  netdev_info(dev, "%02x %04x frag_info = %016lx\n",
170  i, offset, (unsigned long) sp->txd[i].frag_info);
171  }
172 }
173 #endif
174 
175 static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data)
176 {
179 }
180 
181 static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data)
182 {
183  ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data);
184  ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data);
185 }
186 
187 static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity)
188 {
189  phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR;
190 
191  ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity);
192 }
193 
194 static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity)
195 {
197  phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL);
198 }
199 
200 static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity)
201 {
202  u16 bit_data;
203 
204  ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity);
205 
206  bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1;
207 
208  ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity);
209 
210  return bit_data;
211 }
212 
213 /*
214  * Read a register from the Physical Layer device located
215  * on the IPG NIC, using the IPG PHYCTRL register.
216  */
217 static int mdio_read(struct net_device *dev, int phy_id, int phy_reg)
218 {
219  void __iomem *ioaddr = ipg_ioaddr(dev);
220  /*
221  * The GMII mangement frame structure for a read is as follows:
222  *
223  * |Preamble|st|op|phyad|regad|ta| data |idle|
224  * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
225  *
226  * <32 1s> = 32 consecutive logic 1 values
227  * A = bit of Physical Layer device address (MSB first)
228  * R = bit of register address (MSB first)
229  * z = High impedance state
230  * D = bit of read data (MSB first)
231  *
232  * Transmission order is 'Preamble' field first, bits transmitted
233  * left to right (first to last).
234  */
235  struct {
236  u32 field;
237  unsigned int len;
238  } p[] = {
239  { GMII_PREAMBLE, 32 }, /* Preamble */
240  { GMII_ST, 2 }, /* ST */
241  { GMII_READ, 2 }, /* OP */
242  { phy_id, 5 }, /* PHYAD */
243  { phy_reg, 5 }, /* REGAD */
244  { 0x0000, 2 }, /* TA */
245  { 0x0000, 16 }, /* DATA */
246  { 0x0000, 1 } /* IDLE */
247  };
248  unsigned int i, j;
249  u8 polarity, data;
250 
251  polarity = ipg_r8(PHY_CTRL);
253 
254  /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
255  for (j = 0; j < 5; j++) {
256  for (i = 0; i < p[j].len; i++) {
257  /* For each variable length field, the MSB must be
258  * transmitted first. Rotate through the field bits,
259  * starting with the MSB, and move each bit into the
260  * the 1st (2^1) bit position (this is the bit position
261  * corresponding to the MgmtData bit of the PhyCtrl
262  * register for the IPG).
263  *
264  * Example: ST = 01;
265  *
266  * First write a '0' to bit 1 of the PhyCtrl
267  * register, then write a '1' to bit 1 of the
268  * PhyCtrl register.
269  *
270  * To do this, right shift the MSB of ST by the value:
271  * [field length - 1 - #ST bits already written]
272  * then left shift this result by 1.
273  */
274  data = (p[j].field >> (p[j].len - 1 - i)) << 1;
275  data &= IPG_PC_MGMTDATA;
276  data |= polarity | IPG_PC_MGMTDIR;
277 
278  ipg_drive_phy_ctl_low_high(ioaddr, data);
279  }
280  }
281 
282  send_three_state(ioaddr, polarity);
283 
284  read_phy_bit(ioaddr, polarity);
285 
286  /*
287  * For a read cycle, the bits for the next two fields (TA and
288  * DATA) are driven by the PHY (the IPG reads these bits).
289  */
290  for (i = 0; i < p[6].len; i++) {
291  p[6].field |=
292  (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i));
293  }
294 
295  send_three_state(ioaddr, polarity);
296  send_three_state(ioaddr, polarity);
297  send_three_state(ioaddr, polarity);
298  send_end(ioaddr, polarity);
299 
300  /* Return the value of the DATA field. */
301  return p[6].field;
302 }
303 
304 /*
305  * Write to a register from the Physical Layer device located
306  * on the IPG NIC, using the IPG PHYCTRL register.
307  */
308 static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
309 {
310  void __iomem *ioaddr = ipg_ioaddr(dev);
311  /*
312  * The GMII mangement frame structure for a read is as follows:
313  *
314  * |Preamble|st|op|phyad|regad|ta| data |idle|
315  * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
316  *
317  * <32 1s> = 32 consecutive logic 1 values
318  * A = bit of Physical Layer device address (MSB first)
319  * R = bit of register address (MSB first)
320  * z = High impedance state
321  * D = bit of write data (MSB first)
322  *
323  * Transmission order is 'Preamble' field first, bits transmitted
324  * left to right (first to last).
325  */
326  struct {
327  u32 field;
328  unsigned int len;
329  } p[] = {
330  { GMII_PREAMBLE, 32 }, /* Preamble */
331  { GMII_ST, 2 }, /* ST */
332  { GMII_WRITE, 2 }, /* OP */
333  { phy_id, 5 }, /* PHYAD */
334  { phy_reg, 5 }, /* REGAD */
335  { 0x0002, 2 }, /* TA */
336  { val & 0xffff, 16 }, /* DATA */
337  { 0x0000, 1 } /* IDLE */
338  };
339  unsigned int i, j;
340  u8 polarity, data;
341 
342  polarity = ipg_r8(PHY_CTRL);
344 
345  /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
346  for (j = 0; j < 7; j++) {
347  for (i = 0; i < p[j].len; i++) {
348  /* For each variable length field, the MSB must be
349  * transmitted first. Rotate through the field bits,
350  * starting with the MSB, and move each bit into the
351  * the 1st (2^1) bit position (this is the bit position
352  * corresponding to the MgmtData bit of the PhyCtrl
353  * register for the IPG).
354  *
355  * Example: ST = 01;
356  *
357  * First write a '0' to bit 1 of the PhyCtrl
358  * register, then write a '1' to bit 1 of the
359  * PhyCtrl register.
360  *
361  * To do this, right shift the MSB of ST by the value:
362  * [field length - 1 - #ST bits already written]
363  * then left shift this result by 1.
364  */
365  data = (p[j].field >> (p[j].len - 1 - i)) << 1;
366  data &= IPG_PC_MGMTDATA;
367  data |= polarity | IPG_PC_MGMTDIR;
368 
369  ipg_drive_phy_ctl_low_high(ioaddr, data);
370  }
371  }
372 
373  /* The last cycle is a tri-state, so read from the PHY. */
374  ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
375  ipg_r8(PHY_CTRL);
376  ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
377 }
378 
379 static void ipg_set_led_mode(struct net_device *dev)
380 {
381  struct ipg_nic_private *sp = netdev_priv(dev);
382  void __iomem *ioaddr = sp->ioaddr;
383  u32 mode;
384 
385  mode = ipg_r32(ASIC_CTRL);
387 
388  if ((sp->led_mode & 0x03) > 1)
389  mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */
390 
391  if ((sp->led_mode & 0x01) == 1)
392  mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */
393 
394  if ((sp->led_mode & 0x08) == 8)
395  mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */
396 
397  ipg_w32(mode, ASIC_CTRL);
398 }
399 
400 static void ipg_set_phy_set(struct net_device *dev)
401 {
402  struct ipg_nic_private *sp = netdev_priv(dev);
403  void __iomem *ioaddr = sp->ioaddr;
404  int physet;
405 
406  physet = ipg_r8(PHY_SET);
408  physet |= ((sp->led_mode & 0x70) >> 4);
409  ipg_w8(physet, PHY_SET);
410 }
411 
412 static int ipg_reset(struct net_device *dev, u32 resetflags)
413 {
414  /* Assert functional resets via the IPG AsicCtrl
415  * register as specified by the 'resetflags' input
416  * parameter.
417  */
418  void __iomem *ioaddr = ipg_ioaddr(dev);
419  unsigned int timeout_count = 0;
420 
421  IPG_DEBUG_MSG("_reset\n");
422 
423  ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL);
424 
425  /* Delay added to account for problem with 10Mbps reset. */
427 
428  while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) {
430  if (++timeout_count > IPG_AC_RESET_TIMEOUT)
431  return -ETIME;
432  }
433  /* Set LED Mode in Asic Control */
434  ipg_set_led_mode(dev);
435 
436  /* Set PHYSet Register Value */
437  ipg_set_phy_set(dev);
438  return 0;
439 }
440 
441 /* Find the GMII PHY address. */
442 static int ipg_find_phyaddr(struct net_device *dev)
443 {
444  unsigned int phyaddr, i;
445 
446  for (i = 0; i < 32; i++) {
447  u32 status;
448 
449  /* Search for the correct PHY address among 32 possible. */
450  phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32;
451 
452  /* 10/22/03 Grace change verify from GMII_PHY_STATUS to
453  GMII_PHY_ID1
454  */
455 
456  status = mdio_read(dev, phyaddr, MII_BMSR);
457 
458  if ((status != 0xFFFF) && (status != 0))
459  return phyaddr;
460  }
461 
462  return 0x1f;
463 }
464 
465 /*
466  * Configure IPG based on result of IEEE 802.3 PHY
467  * auto-negotiation.
468  */
469 static int ipg_config_autoneg(struct net_device *dev)
470 {
471  struct ipg_nic_private *sp = netdev_priv(dev);
472  void __iomem *ioaddr = sp->ioaddr;
473  unsigned int txflowcontrol;
474  unsigned int rxflowcontrol;
475  unsigned int fullduplex;
476  u32 mac_ctrl_val;
477  u32 asicctrl;
478  u8 phyctrl;
479  const char *speed;
480  const char *duplex;
481  const char *tx_desc;
482  const char *rx_desc;
483 
484  IPG_DEBUG_MSG("_config_autoneg\n");
485 
486  asicctrl = ipg_r32(ASIC_CTRL);
487  phyctrl = ipg_r8(PHY_CTRL);
488  mac_ctrl_val = ipg_r32(MAC_CTRL);
489 
490  /* Set flags for use in resolving auto-negotiation, assuming
491  * non-1000Mbps, half duplex, no flow control.
492  */
493  fullduplex = 0;
494  txflowcontrol = 0;
495  rxflowcontrol = 0;
496 
497  /* To accommodate a problem in 10Mbps operation,
498  * set a global flag if PHY running in 10Mbps mode.
499  */
500  sp->tenmbpsmode = 0;
501 
502  /* Determine actual speed of operation. */
503  switch (phyctrl & IPG_PC_LINK_SPEED) {
505  speed = "10Mbps";
506  sp->tenmbpsmode = 1;
507  break;
509  speed = "100Mbps";
510  break;
512  speed = "1000Mbps";
513  break;
514  default:
515  speed = "undefined!";
516  return 0;
517  }
518 
519  netdev_info(dev, "Link speed = %s\n", speed);
520  if (sp->tenmbpsmode == 1)
521  netdev_info(dev, "10Mbps operational mode enabled\n");
522 
523  if (phyctrl & IPG_PC_DUPLEX_STATUS) {
524  fullduplex = 1;
525  txflowcontrol = 1;
526  rxflowcontrol = 1;
527  }
528 
529  /* Configure full duplex, and flow control. */
530  if (fullduplex == 1) {
531 
532  /* Configure IPG for full duplex operation. */
533 
534  duplex = "full";
535 
536  mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD;
537 
538  if (txflowcontrol == 1) {
539  tx_desc = "";
540  mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE;
541  } else {
542  tx_desc = "no ";
543  mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE;
544  }
545 
546  if (rxflowcontrol == 1) {
547  rx_desc = "";
548  mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE;
549  } else {
550  rx_desc = "no ";
551  mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
552  }
553  } else {
554  duplex = "half";
555  tx_desc = "no ";
556  rx_desc = "no ";
557  mac_ctrl_val &= (~IPG_MC_DUPLEX_SELECT_FD &
560  }
561 
562  netdev_info(dev, "setting %s duplex, %sTX, %sRX flow control\n",
563  duplex, tx_desc, rx_desc);
564  ipg_w32(mac_ctrl_val, MAC_CTRL);
565 
566  return 0;
567 }
568 
569 /* Determine and configure multicast operation and set
570  * receive mode for IPG.
571  */
572 static void ipg_nic_set_multicast_list(struct net_device *dev)
573 {
574  void __iomem *ioaddr = ipg_ioaddr(dev);
575  struct netdev_hw_addr *ha;
576  unsigned int hashindex;
577  u32 hashtable[2];
578  u8 receivemode;
579 
580  IPG_DEBUG_MSG("_nic_set_multicast_list\n");
581 
583 
584  if (dev->flags & IFF_PROMISC) {
585  /* NIC to be configured in promiscuous mode. */
586  receivemode = IPG_RM_RECEIVEALLFRAMES;
587  } else if ((dev->flags & IFF_ALLMULTI) ||
588  ((dev->flags & IFF_MULTICAST) &&
590  /* NIC to be configured to receive all multicast
591  * frames. */
592  receivemode |= IPG_RM_RECEIVEMULTICAST;
593  } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
594  /* NIC to be configured to receive selected
595  * multicast addresses. */
596  receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
597  }
598 
599  /* Calculate the bits to set for the 64 bit, IPG HASHTABLE.
600  * The IPG applies a cyclic-redundancy-check (the same CRC
601  * used to calculate the frame data FCS) to the destination
602  * address all incoming multicast frames whose destination
603  * address has the multicast bit set. The least significant
604  * 6 bits of the CRC result are used as an addressing index
605  * into the hash table. If the value of the bit addressed by
606  * this index is a 1, the frame is passed to the host system.
607  */
608 
609  /* Clear hashtable. */
610  hashtable[0] = 0x00000000;
611  hashtable[1] = 0x00000000;
612 
613  /* Cycle through all multicast addresses to filter. */
614  netdev_for_each_mc_addr(ha, dev) {
615  /* Calculate CRC result for each multicast address. */
616  hashindex = crc32_le(0xffffffff, ha->addr,
617  ETH_ALEN);
618 
619  /* Use only the least significant 6 bits. */
620  hashindex = hashindex & 0x3F;
621 
622  /* Within "hashtable", set bit number "hashindex"
623  * to a logic 1.
624  */
625  set_bit(hashindex, (void *)hashtable);
626  }
627 
628  /* Write the value of the hashtable, to the 4, 16 bit
629  * HASHTABLE IPG registers.
630  */
631  ipg_w32(hashtable[0], HASHTABLE_0);
632  ipg_w32(hashtable[1], HASHTABLE_1);
633 
634  ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE);
635 
636  IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE));
637 }
638 
639 static int ipg_io_config(struct net_device *dev)
640 {
641  struct ipg_nic_private *sp = netdev_priv(dev);
642  void __iomem *ioaddr = ipg_ioaddr(dev);
643  u32 origmacctrl;
644  u32 restoremacctrl;
645 
646  IPG_DEBUG_MSG("_io_config\n");
647 
648  origmacctrl = ipg_r32(MAC_CTRL);
649 
650  restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE;
651 
652  /* Based on compilation option, determine if FCS is to be
653  * stripped on receive frames by IPG.
654  */
655  if (!IPG_STRIP_FCS_ON_RX)
656  restoremacctrl |= IPG_MC_RCV_FCS;
657 
658  /* Determine if transmitter and/or receiver are
659  * enabled so we may restore MACCTRL correctly.
660  */
661  if (origmacctrl & IPG_MC_TX_ENABLED)
662  restoremacctrl |= IPG_MC_TX_ENABLE;
663 
664  if (origmacctrl & IPG_MC_RX_ENABLED)
665  restoremacctrl |= IPG_MC_RX_ENABLE;
666 
667  /* Transmitter and receiver must be disabled before setting
668  * IFSSelect.
669  */
670  ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) &
672 
673  /* Now that transmitter and receiver are disabled, write
674  * to IFSSelect.
675  */
677 
678  /* Set RECEIVEMODE register. */
679  ipg_nic_set_multicast_list(dev);
680 
682 
695 
696  /* IPG multi-frag frame bug workaround.
697  * Per silicon revision B3 eratta.
698  */
699  ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL);
700 
701  /* IPG TX poll now bug workaround.
702  * Per silicon revision B3 eratta.
703  */
704  ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL);
705 
706  /* IPG RX poll now bug workaround.
707  * Per silicon revision B3 eratta.
708  */
709  ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL);
710 
711  /* Now restore MACCTRL to original setting. */
712  ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL);
713 
714  /* Disable unused RMON statistics. */
716 
717  /* Disable unused MIB statistics. */
724 
725  return 0;
726 }
727 
728 /*
729  * Create a receive buffer within system memory and update
730  * NIC private structure appropriately.
731  */
732 static int ipg_get_rxbuff(struct net_device *dev, int entry)
733 {
734  struct ipg_nic_private *sp = netdev_priv(dev);
735  struct ipg_rx *rxfd = sp->rxd + entry;
736  struct sk_buff *skb;
737  u64 rxfragsize;
738 
739  IPG_DEBUG_MSG("_get_rxbuff\n");
740 
741  skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
742  if (!skb) {
743  sp->rx_buff[entry] = NULL;
744  return -ENOMEM;
745  }
746 
747  /* Save the address of the sk_buff structure. */
748  sp->rx_buff[entry] = skb;
749 
750  rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
752 
753  /* Set the RFD fragment length. */
754  rxfragsize = sp->rxfrag_size;
755  rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN);
756 
757  return 0;
758 }
759 
760 static int init_rfdlist(struct net_device *dev)
761 {
762  struct ipg_nic_private *sp = netdev_priv(dev);
763  void __iomem *ioaddr = sp->ioaddr;
764  unsigned int i;
765 
766  IPG_DEBUG_MSG("_init_rfdlist\n");
767 
768  for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
769  struct ipg_rx *rxfd = sp->rxd + i;
770 
771  if (sp->rx_buff[i]) {
772  pci_unmap_single(sp->pdev,
775  dev_kfree_skb_irq(sp->rx_buff[i]);
776  sp->rx_buff[i] = NULL;
777  }
778 
779  /* Clear out the RFS field. */
780  rxfd->rfs = 0x0000000000000000;
781 
782  if (ipg_get_rxbuff(dev, i) < 0) {
783  /*
784  * A receive buffer was not ready, break the
785  * RFD list here.
786  */
787  IPG_DEBUG_MSG("Cannot allocate Rx buffer\n");
788 
789  /* Just in case we cannot allocate a single RFD.
790  * Should not occur.
791  */
792  if (i == 0) {
793  netdev_err(dev, "No memory available for RFD list\n");
794  return -ENOMEM;
795  }
796  }
797 
798  rxfd->next_desc = cpu_to_le64(sp->rxd_map +
799  sizeof(struct ipg_rx)*(i + 1));
800  }
801  sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map);
802 
803  sp->rx_current = 0;
804  sp->rx_dirty = 0;
805 
806  /* Write the location of the RFDList to the IPG. */
808  ipg_w32(0x00000000, RFD_LIST_PTR_1);
809 
810  return 0;
811 }
812 
813 static void init_tfdlist(struct net_device *dev)
814 {
815  struct ipg_nic_private *sp = netdev_priv(dev);
816  void __iomem *ioaddr = sp->ioaddr;
817  unsigned int i;
818 
819  IPG_DEBUG_MSG("_init_tfdlist\n");
820 
821  for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
822  struct ipg_tx *txfd = sp->txd + i;
823 
825 
826  if (sp->tx_buff[i]) {
827  dev_kfree_skb_irq(sp->tx_buff[i]);
828  sp->tx_buff[i] = NULL;
829  }
830 
831  txfd->next_desc = cpu_to_le64(sp->txd_map +
832  sizeof(struct ipg_tx)*(i + 1));
833  }
834  sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map);
835 
836  sp->tx_current = 0;
837  sp->tx_dirty = 0;
838 
839  /* Write the location of the TFDList to the IPG. */
840  IPG_DDEBUG_MSG("Starting TFDListPtr = %08x\n",
841  (u32) sp->txd_map);
843  ipg_w32(0x00000000, TFD_LIST_PTR_1);
844 
845  sp->reset_current_tfd = 1;
846 }
847 
848 /*
849  * Free all transmit buffers which have already been transferred
850  * via DMA to the IPG.
851  */
852 static void ipg_nic_txfree(struct net_device *dev)
853 {
854  struct ipg_nic_private *sp = netdev_priv(dev);
855  unsigned int released, pending, dirty;
856 
857  IPG_DEBUG_MSG("_nic_txfree\n");
858 
859  pending = sp->tx_current - sp->tx_dirty;
860  dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH;
861 
862  for (released = 0; released < pending; released++) {
863  struct sk_buff *skb = sp->tx_buff[dirty];
864  struct ipg_tx *txfd = sp->txd + dirty;
865 
866  IPG_DEBUG_MSG("TFC = %016lx\n", (unsigned long) txfd->tfc);
867 
868  /* Look at each TFD's TFC field beginning
869  * at the last freed TFD up to the current TFD.
870  * If the TFDDone bit is set, free the associated
871  * buffer.
872  */
873  if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE)))
874  break;
875 
876  /* Free the transmit buffer. */
877  if (skb) {
878  pci_unmap_single(sp->pdev,
880  skb->len, PCI_DMA_TODEVICE);
881 
882  dev_kfree_skb_irq(skb);
883 
884  sp->tx_buff[dirty] = NULL;
885  }
886  dirty = (dirty + 1) % IPG_TFDLIST_LENGTH;
887  }
888 
889  sp->tx_dirty += released;
890 
891  if (netif_queue_stopped(dev) &&
892  (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) {
893  netif_wake_queue(dev);
894  }
895 }
896 
897 static void ipg_tx_timeout(struct net_device *dev)
898 {
899  struct ipg_nic_private *sp = netdev_priv(dev);
900  void __iomem *ioaddr = sp->ioaddr;
901 
902  ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK |
903  IPG_AC_FIFO);
904 
905  spin_lock_irq(&sp->lock);
906 
907  /* Re-configure after DMA reset. */
908  if (ipg_io_config(dev) < 0)
909  netdev_info(dev, "Error during re-configuration\n");
910 
911  init_tfdlist(dev);
912 
913  spin_unlock_irq(&sp->lock);
914 
916  MAC_CTRL);
917 }
918 
919 /*
920  * For TxComplete interrupts, free all transmit
921  * buffers which have already been transferred via DMA
922  * to the IPG.
923  */
924 static void ipg_nic_txcleanup(struct net_device *dev)
925 {
926  struct ipg_nic_private *sp = netdev_priv(dev);
927  void __iomem *ioaddr = sp->ioaddr;
928  unsigned int i;
929 
930  IPG_DEBUG_MSG("_nic_txcleanup\n");
931 
932  for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
933  /* Reading the TXSTATUS register clears the
934  * TX_COMPLETE interrupt.
935  */
936  u32 txstatusdword = ipg_r32(TX_STATUS);
937 
938  IPG_DEBUG_MSG("TxStatus = %08x\n", txstatusdword);
939 
940  /* Check for Transmit errors. Error bits only valid if
941  * TX_COMPLETE bit in the TXSTATUS register is a 1.
942  */
943  if (!(txstatusdword & IPG_TS_TX_COMPLETE))
944  break;
945 
946  /* If in 10Mbps mode, indicate transmit is ready. */
947  if (sp->tenmbpsmode) {
948  netif_wake_queue(dev);
949  }
950 
951  /* Transmit error, increment stat counters. */
952  if (txstatusdword & IPG_TS_TX_ERROR) {
953  IPG_DEBUG_MSG("Transmit error\n");
954  sp->stats.tx_errors++;
955  }
956 
957  /* Late collision, re-enable transmitter. */
958  if (txstatusdword & IPG_TS_LATE_COLLISION) {
959  IPG_DEBUG_MSG("Late collision on transmit\n");
962  }
963 
964  /* Maximum collisions, re-enable transmitter. */
965  if (txstatusdword & IPG_TS_TX_MAX_COLL) {
966  IPG_DEBUG_MSG("Maximum collisions on transmit\n");
969  }
970 
971  /* Transmit underrun, reset and re-enable
972  * transmitter.
973  */
974  if (txstatusdword & IPG_TS_TX_UNDERRUN) {
975  IPG_DEBUG_MSG("Transmitter underrun\n");
976  sp->stats.tx_fifo_errors++;
977  ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA |
979 
980  /* Re-configure after DMA reset. */
981  if (ipg_io_config(dev) < 0) {
982  netdev_info(dev, "Error during re-configuration\n");
983  }
984  init_tfdlist(dev);
985 
988  }
989  }
990 
991  ipg_nic_txfree(dev);
992 }
993 
994 /* Provides statistical information about the IPG NIC. */
995 static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
996 {
997  struct ipg_nic_private *sp = netdev_priv(dev);
998  void __iomem *ioaddr = sp->ioaddr;
999  u16 temp1;
1000  u16 temp2;
1001 
1002  IPG_DEBUG_MSG("_nic_get_stats\n");
1003 
1004  /* Check to see if the NIC has been initialized via nic_open,
1005  * before trying to read statistic registers.
1006  */
1007  if (!test_bit(__LINK_STATE_START, &dev->state))
1008  return &sp->stats;
1009 
1010  sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
1011  sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK);
1012  sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK);
1013  sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK);
1015  sp->stats.rx_errors += temp1;
1016  sp->stats.rx_missed_errors += temp1;
1020  sp->stats.collisions += temp1;
1021  sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS);
1022  sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) +
1024  sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK);
1025 
1026  /* detailed tx_errors */
1027  sp->stats.tx_carrier_errors += temp2;
1028 
1029  /* detailed rx_errors */
1030  sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) +
1032  sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS);
1033 
1034  /* Unutilized IPG statistic registers. */
1036 
1037  return &sp->stats;
1038 }
1039 
1040 /* Restore used receive buffers. */
1041 static int ipg_nic_rxrestore(struct net_device *dev)
1042 {
1043  struct ipg_nic_private *sp = netdev_priv(dev);
1044  const unsigned int curr = sp->rx_current;
1045  unsigned int dirty = sp->rx_dirty;
1046 
1047  IPG_DEBUG_MSG("_nic_rxrestore\n");
1048 
1049  for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) {
1050  unsigned int entry = dirty % IPG_RFDLIST_LENGTH;
1051 
1052  /* rx_copybreak may poke hole here and there. */
1053  if (sp->rx_buff[entry])
1054  continue;
1055 
1056  /* Generate a new receive buffer to replace the
1057  * current buffer (which will be released by the
1058  * Linux system).
1059  */
1060  if (ipg_get_rxbuff(dev, entry) < 0) {
1061  IPG_DEBUG_MSG("Cannot allocate new Rx buffer\n");
1062 
1063  break;
1064  }
1065 
1066  /* Reset the RFS field. */
1067  sp->rxd[entry].rfs = 0x0000000000000000;
1068  }
1069  sp->rx_dirty = dirty;
1070 
1071  return 0;
1072 }
1073 
1074 /* use jumboindex and jumbosize to control jumbo frame status
1075  * initial status is jumboindex=-1 and jumbosize=0
1076  * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done.
1077  * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving
1078  * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump
1079  * previous receiving and need to continue dumping the current one
1080  */
1081 enum {
1084 };
1085 
1086 enum {
1091 };
1092 
1093 static void ipg_nic_rx_free_skb(struct net_device *dev)
1094 {
1095  struct ipg_nic_private *sp = netdev_priv(dev);
1096  unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1097 
1098  if (sp->rx_buff[entry]) {
1099  struct ipg_rx *rxfd = sp->rxd + entry;
1100 
1101  pci_unmap_single(sp->pdev,
1104  dev_kfree_skb_irq(sp->rx_buff[entry]);
1105  sp->rx_buff[entry] = NULL;
1106  }
1107 }
1108 
1109 static int ipg_nic_rx_check_frame_type(struct net_device *dev)
1110 {
1111  struct ipg_nic_private *sp = netdev_priv(dev);
1112  struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH);
1114 
1115  if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART)
1116  type += FRAME_WITH_START;
1117  if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND)
1118  type += FRAME_WITH_END;
1119  return type;
1120 }
1121 
1122 static int ipg_nic_rx_check_error(struct net_device *dev)
1123 {
1124  struct ipg_nic_private *sp = netdev_priv(dev);
1125  unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1126  struct ipg_rx *rxfd = sp->rxd + entry;
1127 
1128  if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1132  IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
1133  (unsigned long) rxfd->rfs);
1134 
1135  /* Increment general receive error statistic. */
1136  sp->stats.rx_errors++;
1137 
1138  /* Increment detailed receive error statistics. */
1139  if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1140  IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
1141 
1142  sp->stats.rx_fifo_errors++;
1143  }
1144 
1145  if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1146  IPG_DEBUG_MSG("RX runt occurred\n");
1147  sp->stats.rx_length_errors++;
1148  }
1149 
1150  /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME,
1151  * error count handled by a IPG statistic register.
1152  */
1153 
1154  if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1155  IPG_DEBUG_MSG("RX alignment error occurred\n");
1156  sp->stats.rx_frame_errors++;
1157  }
1158 
1159  /* Do nothing for IPG_RFS_RXFCSERROR, error count
1160  * handled by a IPG statistic register.
1161  */
1162 
1163  /* Free the memory associated with the RX
1164  * buffer since it is erroneous and we will
1165  * not pass it to higher layer processes.
1166  */
1167  if (sp->rx_buff[entry]) {
1168  pci_unmap_single(sp->pdev,
1171 
1172  dev_kfree_skb_irq(sp->rx_buff[entry]);
1173  sp->rx_buff[entry] = NULL;
1174  }
1175  return ERROR_PACKET;
1176  }
1177  return NORMAL_PACKET;
1178 }
1179 
1180 static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
1181  struct ipg_nic_private *sp,
1182  struct ipg_rx *rxfd, unsigned entry)
1183 {
1184  struct ipg_jumbo *jumbo = &sp->jumbo;
1185  struct sk_buff *skb;
1186  int framelen;
1187 
1188  if (jumbo->found_start) {
1189  dev_kfree_skb_irq(jumbo->skb);
1190  jumbo->found_start = 0;
1191  jumbo->current_size = 0;
1192  jumbo->skb = NULL;
1193  }
1194 
1195  /* 1: found error, 0 no error */
1196  if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1197  return;
1198 
1199  skb = sp->rx_buff[entry];
1200  if (!skb)
1201  return;
1202 
1203  /* accept this frame and send to upper layer */
1204  framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1205  if (framelen > sp->rxfrag_size)
1206  framelen = sp->rxfrag_size;
1207 
1208  skb_put(skb, framelen);
1209  skb->protocol = eth_type_trans(skb, dev);
1210  skb_checksum_none_assert(skb);
1211  netif_rx(skb);
1212  sp->rx_buff[entry] = NULL;
1213 }
1214 
1215 static void ipg_nic_rx_with_start(struct net_device *dev,
1216  struct ipg_nic_private *sp,
1217  struct ipg_rx *rxfd, unsigned entry)
1218 {
1219  struct ipg_jumbo *jumbo = &sp->jumbo;
1220  struct pci_dev *pdev = sp->pdev;
1221  struct sk_buff *skb;
1222 
1223  /* 1: found error, 0 no error */
1224  if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1225  return;
1226 
1227  /* accept this frame and send to upper layer */
1228  skb = sp->rx_buff[entry];
1229  if (!skb)
1230  return;
1231 
1232  if (jumbo->found_start)
1233  dev_kfree_skb_irq(jumbo->skb);
1234 
1235  pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1237 
1238  skb_put(skb, sp->rxfrag_size);
1239 
1240  jumbo->found_start = 1;
1241  jumbo->current_size = sp->rxfrag_size;
1242  jumbo->skb = skb;
1243 
1244  sp->rx_buff[entry] = NULL;
1245 }
1246 
1247 static void ipg_nic_rx_with_end(struct net_device *dev,
1248  struct ipg_nic_private *sp,
1249  struct ipg_rx *rxfd, unsigned entry)
1250 {
1251  struct ipg_jumbo *jumbo = &sp->jumbo;
1252 
1253  /* 1: found error, 0 no error */
1254  if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1255  struct sk_buff *skb = sp->rx_buff[entry];
1256 
1257  if (!skb)
1258  return;
1259 
1260  if (jumbo->found_start) {
1261  int framelen, endframelen;
1262 
1263  framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1264 
1265  endframelen = framelen - jumbo->current_size;
1266  if (framelen > sp->rxsupport_size)
1267  dev_kfree_skb_irq(jumbo->skb);
1268  else {
1269  memcpy(skb_put(jumbo->skb, endframelen),
1270  skb->data, endframelen);
1271 
1272  jumbo->skb->protocol =
1273  eth_type_trans(jumbo->skb, dev);
1274 
1275  skb_checksum_none_assert(jumbo->skb);
1276  netif_rx(jumbo->skb);
1277  }
1278  }
1279 
1280  jumbo->found_start = 0;
1281  jumbo->current_size = 0;
1282  jumbo->skb = NULL;
1283 
1284  ipg_nic_rx_free_skb(dev);
1285  } else {
1286  dev_kfree_skb_irq(jumbo->skb);
1287  jumbo->found_start = 0;
1288  jumbo->current_size = 0;
1289  jumbo->skb = NULL;
1290  }
1291 }
1292 
1293 static void ipg_nic_rx_no_start_no_end(struct net_device *dev,
1294  struct ipg_nic_private *sp,
1295  struct ipg_rx *rxfd, unsigned entry)
1296 {
1297  struct ipg_jumbo *jumbo = &sp->jumbo;
1298 
1299  /* 1: found error, 0 no error */
1300  if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1301  struct sk_buff *skb = sp->rx_buff[entry];
1302 
1303  if (skb) {
1304  if (jumbo->found_start) {
1305  jumbo->current_size += sp->rxfrag_size;
1306  if (jumbo->current_size <= sp->rxsupport_size) {
1307  memcpy(skb_put(jumbo->skb,
1308  sp->rxfrag_size),
1309  skb->data, sp->rxfrag_size);
1310  }
1311  }
1312  ipg_nic_rx_free_skb(dev);
1313  }
1314  } else {
1315  dev_kfree_skb_irq(jumbo->skb);
1316  jumbo->found_start = 0;
1317  jumbo->current_size = 0;
1318  jumbo->skb = NULL;
1319  }
1320 }
1321 
1322 static int ipg_nic_rx_jumbo(struct net_device *dev)
1323 {
1324  struct ipg_nic_private *sp = netdev_priv(dev);
1325  unsigned int curr = sp->rx_current;
1326  void __iomem *ioaddr = sp->ioaddr;
1327  unsigned int i;
1328 
1329  IPG_DEBUG_MSG("_nic_rx\n");
1330 
1331  for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1332  unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1333  struct ipg_rx *rxfd = sp->rxd + entry;
1334 
1335  if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
1336  break;
1337 
1338  switch (ipg_nic_rx_check_frame_type(dev)) {
1340  ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry);
1341  break;
1342  case FRAME_WITH_START:
1343  ipg_nic_rx_with_start(dev, sp, rxfd, entry);
1344  break;
1345  case FRAME_WITH_END:
1346  ipg_nic_rx_with_end(dev, sp, rxfd, entry);
1347  break;
1348  case FRAME_NO_START_NO_END:
1349  ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry);
1350  break;
1351  }
1352  }
1353 
1354  sp->rx_current = curr;
1355 
1356  if (i == IPG_MAXRFDPROCESS_COUNT) {
1357  /* There are more RFDs to process, however the
1358  * allocated amount of RFD processing time has
1359  * expired. Assert Interrupt Requested to make
1360  * sure we come back to process the remaining RFDs.
1361  */
1363  }
1364 
1365  ipg_nic_rxrestore(dev);
1366 
1367  return 0;
1368 }
1369 
1370 static int ipg_nic_rx(struct net_device *dev)
1371 {
1372  /* Transfer received Ethernet frames to higher network layers. */
1373  struct ipg_nic_private *sp = netdev_priv(dev);
1374  unsigned int curr = sp->rx_current;
1375  void __iomem *ioaddr = sp->ioaddr;
1376  struct ipg_rx *rxfd;
1377  unsigned int i;
1378 
1379  IPG_DEBUG_MSG("_nic_rx\n");
1380 
1381 #define __RFS_MASK \
1382  cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND)
1383 
1384  for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1385  unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1386  struct sk_buff *skb = sp->rx_buff[entry];
1387  unsigned int framelen;
1388 
1389  rxfd = sp->rxd + entry;
1390 
1391  if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb)
1392  break;
1393 
1394  /* Get received frame length. */
1395  framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1396 
1397  /* Check for jumbo frame arrival with too small
1398  * RXFRAG_SIZE.
1399  */
1400  if (framelen > sp->rxfrag_size) {
1402  ("RFS FrameLen > allocated fragment size\n");
1403 
1404  framelen = sp->rxfrag_size;
1405  }
1406 
1407  if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1411 
1412  IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
1413  (unsigned long int) rxfd->rfs);
1414 
1415  /* Increment general receive error statistic. */
1416  sp->stats.rx_errors++;
1417 
1418  /* Increment detailed receive error statistics. */
1419  if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1420  IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
1421  sp->stats.rx_fifo_errors++;
1422  }
1423 
1424  if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1425  IPG_DEBUG_MSG("RX runt occurred\n");
1426  sp->stats.rx_length_errors++;
1427  }
1428 
1429  if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ;
1430  /* Do nothing, error count handled by a IPG
1431  * statistic register.
1432  */
1433 
1434  if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1435  IPG_DEBUG_MSG("RX alignment error occurred\n");
1436  sp->stats.rx_frame_errors++;
1437  }
1438 
1439  if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ;
1440  /* Do nothing, error count handled by a IPG
1441  * statistic register.
1442  */
1443 
1444  /* Free the memory associated with the RX
1445  * buffer since it is erroneous and we will
1446  * not pass it to higher layer processes.
1447  */
1448  if (skb) {
1449  __le64 info = rxfd->frag_info;
1450 
1451  pci_unmap_single(sp->pdev,
1452  le64_to_cpu(info) & ~IPG_RFI_FRAGLEN,
1454 
1455  dev_kfree_skb_irq(skb);
1456  }
1457  } else {
1458 
1459  /* Adjust the new buffer length to accommodate the size
1460  * of the received frame.
1461  */
1462  skb_put(skb, framelen);
1463 
1464  /* Set the buffer's protocol field to Ethernet. */
1465  skb->protocol = eth_type_trans(skb, dev);
1466 
1467  /* The IPG encountered an error with (or
1468  * there were no) IP/TCP/UDP checksums.
1469  * This may or may not indicate an invalid
1470  * IP/TCP/UDP frame was received. Let the
1471  * upper layer decide.
1472  */
1473  skb_checksum_none_assert(skb);
1474 
1475  /* Hand off frame for higher layer processing.
1476  * The function netif_rx() releases the sk_buff
1477  * when processing completes.
1478  */
1479  netif_rx(skb);
1480  }
1481 
1482  /* Assure RX buffer is not reused by IPG. */
1483  sp->rx_buff[entry] = NULL;
1484  }
1485 
1486  /*
1487  * If there are more RFDs to process and the allocated amount of RFD
1488  * processing time has expired, assert Interrupt Requested to make
1489  * sure we come back to process the remaining RFDs.
1490  */
1491  if (i == IPG_MAXRFDPROCESS_COUNT)
1493 
1494 #ifdef IPG_DEBUG
1495  /* Check if the RFD list contained no receive frame data. */
1496  if (!i)
1497  sp->EmptyRFDListCount++;
1498 #endif
1499  while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) &&
1500  !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) &&
1501  (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) {
1502  unsigned int entry = curr++ % IPG_RFDLIST_LENGTH;
1503 
1504  rxfd = sp->rxd + entry;
1505 
1506  IPG_DEBUG_MSG("Frame requires multiple RFDs\n");
1507 
1508  /* An unexpected event, additional code needed to handle
1509  * properly. So for the time being, just disregard the
1510  * frame.
1511  */
1512 
1513  /* Free the memory associated with the RX
1514  * buffer since it is erroneous and we will
1515  * not pass it to higher layer processes.
1516  */
1517  if (sp->rx_buff[entry]) {
1518  pci_unmap_single(sp->pdev,
1521  dev_kfree_skb_irq(sp->rx_buff[entry]);
1522  }
1523 
1524  /* Assure RX buffer is not reused by IPG. */
1525  sp->rx_buff[entry] = NULL;
1526  }
1527 
1528  sp->rx_current = curr;
1529 
1530  /* Check to see if there are a minimum number of used
1531  * RFDs before restoring any (should improve performance.)
1532  */
1533  if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE)
1534  ipg_nic_rxrestore(dev);
1535 
1536  return 0;
1537 }
1538 
1539 static void ipg_reset_after_host_error(struct work_struct *work)
1540 {
1541  struct ipg_nic_private *sp =
1542  container_of(work, struct ipg_nic_private, task.work);
1543  struct net_device *dev = sp->dev;
1544 
1545  /*
1546  * Acknowledge HostError interrupt by resetting
1547  * IPG DMA and HOST.
1548  */
1549  ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1550 
1551  init_rfdlist(dev);
1552  init_tfdlist(dev);
1553 
1554  if (ipg_io_config(dev) < 0) {
1555  netdev_info(dev, "Cannot recover from PCI error\n");
1556  schedule_delayed_work(&sp->task, HZ);
1557  }
1558 }
1559 
1560 static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst)
1561 {
1562  struct net_device *dev = dev_inst;
1563  struct ipg_nic_private *sp = netdev_priv(dev);
1564  void __iomem *ioaddr = sp->ioaddr;
1565  unsigned int handled = 0;
1566  u16 status;
1567 
1568  IPG_DEBUG_MSG("_interrupt_handler\n");
1569 
1570  if (sp->is_jumbo)
1571  ipg_nic_rxrestore(dev);
1572 
1573  spin_lock(&sp->lock);
1574 
1575  /* Get interrupt source information, and acknowledge
1576  * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly,
1577  * IntRequested, MacControlFrame, LinkEvent) interrupts
1578  * if issued. Also, all IPG interrupts are disabled by
1579  * reading IntStatusAck.
1580  */
1581  status = ipg_r16(INT_STATUS_ACK);
1582 
1583  IPG_DEBUG_MSG("IntStatusAck = %04x\n", status);
1584 
1585  /* Shared IRQ of remove event. */
1586  if (!(status & IPG_IS_RSVD_MASK))
1587  goto out_enable;
1588 
1589  handled = 1;
1590 
1591  if (unlikely(!netif_running(dev)))
1592  goto out_unlock;
1593 
1594  /* If RFDListEnd interrupt, restore all used RFDs. */
1595  if (status & IPG_IS_RFD_LIST_END) {
1596  IPG_DEBUG_MSG("RFDListEnd Interrupt\n");
1597 
1598  /* The RFD list end indicates an RFD was encountered
1599  * with a 0 NextPtr, or with an RFDDone bit set to 1
1600  * (indicating the RFD is not read for use by the
1601  * IPG.) Try to restore all RFDs.
1602  */
1603  ipg_nic_rxrestore(dev);
1604 
1605 #ifdef IPG_DEBUG
1606  /* Increment the RFDlistendCount counter. */
1607  sp->RFDlistendCount++;
1608 #endif
1609  }
1610 
1611  /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or
1612  * IntRequested interrupt, process received frames. */
1613  if ((status & IPG_IS_RX_DMA_PRIORITY) ||
1614  (status & IPG_IS_RFD_LIST_END) ||
1615  (status & IPG_IS_RX_DMA_COMPLETE) ||
1616  (status & IPG_IS_INT_REQUESTED)) {
1617 #ifdef IPG_DEBUG
1618  /* Increment the RFD list checked counter if interrupted
1619  * only to check the RFD list. */
1620  if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END |
1621  IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) &
1625  sp->RFDListCheckedCount++;
1626 #endif
1627 
1628  if (sp->is_jumbo)
1629  ipg_nic_rx_jumbo(dev);
1630  else
1631  ipg_nic_rx(dev);
1632  }
1633 
1634  /* If TxDMAComplete interrupt, free used TFDs. */
1635  if (status & IPG_IS_TX_DMA_COMPLETE)
1636  ipg_nic_txfree(dev);
1637 
1638  /* TxComplete interrupts indicate one of numerous actions.
1639  * Determine what action to take based on TXSTATUS register.
1640  */
1641  if (status & IPG_IS_TX_COMPLETE)
1642  ipg_nic_txcleanup(dev);
1643 
1644  /* If UpdateStats interrupt, update Linux Ethernet statistics */
1645  if (status & IPG_IS_UPDATE_STATS)
1646  ipg_nic_get_stats(dev);
1647 
1648  /* If HostError interrupt, reset IPG. */
1649  if (status & IPG_IS_HOST_ERROR) {
1650  IPG_DDEBUG_MSG("HostError Interrupt\n");
1651 
1652  schedule_delayed_work(&sp->task, 0);
1653  }
1654 
1655  /* If LinkEvent interrupt, resolve autonegotiation. */
1656  if (status & IPG_IS_LINK_EVENT) {
1657  if (ipg_config_autoneg(dev) < 0)
1658  netdev_info(dev, "Auto-negotiation error\n");
1659  }
1660 
1661  /* If MACCtrlFrame interrupt, do nothing. */
1662  if (status & IPG_IS_MAC_CTRL_FRAME)
1663  IPG_DEBUG_MSG("MACCtrlFrame interrupt\n");
1664 
1665  /* If RxComplete interrupt, do nothing. */
1666  if (status & IPG_IS_RX_COMPLETE)
1667  IPG_DEBUG_MSG("RxComplete interrupt\n");
1668 
1669  /* If RxEarly interrupt, do nothing. */
1670  if (status & IPG_IS_RX_EARLY)
1671  IPG_DEBUG_MSG("RxEarly interrupt\n");
1672 
1673 out_enable:
1674  /* Re-enable IPG interrupts. */
1678 out_unlock:
1679  spin_unlock(&sp->lock);
1680 
1681  return IRQ_RETVAL(handled);
1682 }
1683 
1684 static void ipg_rx_clear(struct ipg_nic_private *sp)
1685 {
1686  unsigned int i;
1687 
1688  for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
1689  if (sp->rx_buff[i]) {
1690  struct ipg_rx *rxfd = sp->rxd + i;
1691 
1692  dev_kfree_skb_irq(sp->rx_buff[i]);
1693  sp->rx_buff[i] = NULL;
1694  pci_unmap_single(sp->pdev,
1697  }
1698  }
1699 }
1700 
1701 static void ipg_tx_clear(struct ipg_nic_private *sp)
1702 {
1703  unsigned int i;
1704 
1705  for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
1706  if (sp->tx_buff[i]) {
1707  struct ipg_tx *txfd = sp->txd + i;
1708 
1709  pci_unmap_single(sp->pdev,
1711  sp->tx_buff[i]->len, PCI_DMA_TODEVICE);
1712 
1713  dev_kfree_skb_irq(sp->tx_buff[i]);
1714 
1715  sp->tx_buff[i] = NULL;
1716  }
1717  }
1718 }
1719 
1720 static int ipg_nic_open(struct net_device *dev)
1721 {
1722  struct ipg_nic_private *sp = netdev_priv(dev);
1723  void __iomem *ioaddr = sp->ioaddr;
1724  struct pci_dev *pdev = sp->pdev;
1725  int rc;
1726 
1727  IPG_DEBUG_MSG("_nic_open\n");
1728 
1729  sp->rx_buf_sz = sp->rxsupport_size;
1730 
1731  /* Check for interrupt line conflicts, and request interrupt
1732  * line for IPG.
1733  *
1734  * IMPORTANT: Disable IPG interrupts prior to registering
1735  * IRQ.
1736  */
1737  ipg_w16(0x0000, INT_ENABLE);
1738 
1739  /* Register the interrupt line to be used by the IPG within
1740  * the Linux system.
1741  */
1742  rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED,
1743  dev->name, dev);
1744  if (rc < 0) {
1745  netdev_info(dev, "Error when requesting interrupt\n");
1746  goto out;
1747  }
1748 
1749  dev->irq = pdev->irq;
1750 
1751  rc = -ENOMEM;
1752 
1754  &sp->rxd_map, GFP_KERNEL);
1755  if (!sp->rxd)
1756  goto err_free_irq_0;
1757 
1759  &sp->txd_map, GFP_KERNEL);
1760  if (!sp->txd)
1761  goto err_free_rx_1;
1762 
1763  rc = init_rfdlist(dev);
1764  if (rc < 0) {
1765  netdev_info(dev, "Error during configuration\n");
1766  goto err_free_tx_2;
1767  }
1768 
1769  init_tfdlist(dev);
1770 
1771  rc = ipg_io_config(dev);
1772  if (rc < 0) {
1773  netdev_info(dev, "Error during configuration\n");
1774  goto err_release_tfdlist_3;
1775  }
1776 
1777  /* Resolve autonegotiation. */
1778  if (ipg_config_autoneg(dev) < 0)
1779  netdev_info(dev, "Auto-negotiation error\n");
1780 
1781  /* initialize JUMBO Frame control variable */
1782  sp->jumbo.found_start = 0;
1783  sp->jumbo.current_size = 0;
1784  sp->jumbo.skb = NULL;
1785 
1786  /* Enable transmit and receive operation of the IPG. */
1789 
1790  netif_start_queue(dev);
1791 out:
1792  return rc;
1793 
1794 err_release_tfdlist_3:
1795  ipg_tx_clear(sp);
1796  ipg_rx_clear(sp);
1797 err_free_tx_2:
1798  dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1799 err_free_rx_1:
1800  dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1801 err_free_irq_0:
1802  free_irq(pdev->irq, dev);
1803  goto out;
1804 }
1805 
1806 static int ipg_nic_stop(struct net_device *dev)
1807 {
1808  struct ipg_nic_private *sp = netdev_priv(dev);
1809  void __iomem *ioaddr = sp->ioaddr;
1810  struct pci_dev *pdev = sp->pdev;
1811 
1812  IPG_DEBUG_MSG("_nic_stop\n");
1813 
1814  netif_stop_queue(dev);
1815 
1816  IPG_DUMPTFDLIST(dev);
1817 
1818  do {
1820 
1821  ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1822 
1823  synchronize_irq(pdev->irq);
1824  } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK);
1825 
1826  ipg_rx_clear(sp);
1827 
1828  ipg_tx_clear(sp);
1829 
1832 
1833  free_irq(pdev->irq, dev);
1834 
1835  return 0;
1836 }
1837 
1838 static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb,
1839  struct net_device *dev)
1840 {
1841  struct ipg_nic_private *sp = netdev_priv(dev);
1842  void __iomem *ioaddr = sp->ioaddr;
1843  unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH;
1844  unsigned long flags;
1845  struct ipg_tx *txfd;
1846 
1847  IPG_DDEBUG_MSG("_nic_hard_start_xmit\n");
1848 
1849  /* If in 10Mbps mode, stop the transmit queue so
1850  * no more transmit frames are accepted.
1851  */
1852  if (sp->tenmbpsmode)
1853  netif_stop_queue(dev);
1854 
1855  if (sp->reset_current_tfd) {
1856  sp->reset_current_tfd = 0;
1857  entry = 0;
1858  }
1859 
1860  txfd = sp->txd + entry;
1861 
1862  sp->tx_buff[entry] = skb;
1863 
1864  /* Clear all TFC fields, except TFDDONE. */
1865  txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
1866 
1867  /* Specify the TFC field within the TFD. */
1869  (IPG_TFC_FRAMEID & sp->tx_current) |
1870  (IPG_TFC_FRAGCOUNT & (1 << 24)));
1871  /*
1872  * 16--17 (WordAlign) <- 3 (disable),
1873  * 0--15 (FrameId) <- sp->tx_current,
1874  * 24--27 (FragCount) <- 1
1875  */
1876 
1877  /* Request TxComplete interrupts at an interval defined
1878  * by the constant IPG_FRAMESBETWEENTXCOMPLETES.
1879  * Request TxComplete interrupt for every frame
1880  * if in 10Mbps mode to accommodate problem with 10Mbps
1881  * processing.
1882  */
1883  if (sp->tenmbpsmode)
1886  /* Based on compilation option, determine if FCS is to be
1887  * appended to transmit frame by IPG.
1888  */
1889  if (!(IPG_APPEND_FCS_ON_TX))
1891 
1892  /* Based on compilation option, determine if IP, TCP and/or
1893  * UDP checksums are to be added to transmit frame by IPG.
1894  */
1897 
1900 
1903 
1904  /* Based on compilation option, determine if VLAN tag info is to be
1905  * inserted into transmit frame by IPG.
1906  */
1909  ((u64) IPG_MANUAL_VLAN_VID << 32) |
1910  ((u64) IPG_MANUAL_VLAN_CFI << 44) |
1911  ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45));
1912  }
1913 
1914  /* The fragment start location within system memory is defined
1915  * by the sk_buff structure's data field. The physical address
1916  * of this location within the system's virtual memory space
1917  * is determined using the IPG_HOST2BUS_MAP function.
1918  */
1919  txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
1920  skb->len, PCI_DMA_TODEVICE));
1921 
1922  /* The length of the fragment within system memory is defined by
1923  * the sk_buff structure's len field.
1924  */
1926  ((u64) (skb->len & 0xffff) << 48));
1927 
1928  /* Clear the TFDDone bit last to indicate the TFD is ready
1929  * for transfer to the IPG.
1930  */
1931  txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE);
1932 
1933  spin_lock_irqsave(&sp->lock, flags);
1934 
1935  sp->tx_current++;
1936 
1937  mmiowb();
1938 
1940 
1941  if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH))
1942  netif_stop_queue(dev);
1943 
1944  spin_unlock_irqrestore(&sp->lock, flags);
1945 
1946  return NETDEV_TX_OK;
1947 }
1948 
1949 static void ipg_set_phy_default_param(unsigned char rev,
1950  struct net_device *dev, int phy_address)
1951 {
1952  unsigned short length;
1953  unsigned char revision;
1954  const unsigned short *phy_param;
1955  unsigned short address, value;
1956 
1957  phy_param = &DefaultPhyParam[0];
1958  length = *phy_param & 0x00FF;
1959  revision = (unsigned char)((*phy_param) >> 8);
1960  phy_param++;
1961  while (length != 0) {
1962  if (rev == revision) {
1963  while (length > 1) {
1964  address = *phy_param;
1965  value = *(phy_param + 1);
1966  phy_param += 2;
1967  mdio_write(dev, phy_address, address, value);
1968  length -= 4;
1969  }
1970  break;
1971  } else {
1972  phy_param += length / 2;
1973  length = *phy_param & 0x00FF;
1974  revision = (unsigned char)((*phy_param) >> 8);
1975  phy_param++;
1976  }
1977  }
1978 }
1979 
1980 static int read_eeprom(struct net_device *dev, int eep_addr)
1981 {
1982  void __iomem *ioaddr = ipg_ioaddr(dev);
1983  unsigned int i;
1984  int ret = 0;
1985  u16 value;
1986 
1987  value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff);
1988  ipg_w16(value, EEPROM_CTRL);
1989 
1990  for (i = 0; i < 1000; i++) {
1991  u16 data;
1992 
1993  mdelay(10);
1994  data = ipg_r16(EEPROM_CTRL);
1995  if (!(data & IPG_EC_EEPROM_BUSY)) {
1996  ret = ipg_r16(EEPROM_DATA);
1997  break;
1998  }
1999  }
2000  return ret;
2001 }
2002 
2003 static void ipg_init_mii(struct net_device *dev)
2004 {
2005  struct ipg_nic_private *sp = netdev_priv(dev);
2006  struct mii_if_info *mii_if = &sp->mii_if;
2007  int phyaddr;
2008 
2009  mii_if->dev = dev;
2010  mii_if->mdio_read = mdio_read;
2011  mii_if->mdio_write = mdio_write;
2012  mii_if->phy_id_mask = 0x1f;
2013  mii_if->reg_num_mask = 0x1f;
2014 
2015  mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev);
2016 
2017  if (phyaddr != 0x1f) {
2018  u16 mii_phyctrl, mii_1000cr;
2019 
2020  mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000);
2021  mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
2023  mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr);
2024 
2025  mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
2026 
2027  /* Set default phyparam */
2028  ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
2029 
2030  /* Reset PHY */
2031  mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
2032  mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl);
2033 
2034  }
2035 }
2036 
2037 static int ipg_hw_init(struct net_device *dev)
2038 {
2039  struct ipg_nic_private *sp = netdev_priv(dev);
2040  void __iomem *ioaddr = sp->ioaddr;
2041  unsigned int i;
2042  int rc;
2043 
2044  /* Read/Write and Reset EEPROM Value */
2045  /* Read LED Mode Configuration from EEPROM */
2046  sp->led_mode = read_eeprom(dev, 6);
2047 
2048  /* Reset all functions within the IPG. Do not assert
2049  * RST_OUT as not compatible with some PHYs.
2050  */
2051  rc = ipg_reset(dev, IPG_RESET_MASK);
2052  if (rc < 0)
2053  goto out;
2054 
2055  ipg_init_mii(dev);
2056 
2057  /* Read MAC Address from EEPROM */
2058  for (i = 0; i < 3; i++)
2059  sp->station_addr[i] = read_eeprom(dev, 16 + i);
2060 
2061  for (i = 0; i < 3; i++)
2062  ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i);
2063 
2064  /* Set station address in ethernet_device structure. */
2065  dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff;
2066  dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8;
2067  dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff;
2068  dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8;
2069  dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff;
2070  dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8;
2071 out:
2072  return rc;
2073 }
2074 
2075 static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2076 {
2077  struct ipg_nic_private *sp = netdev_priv(dev);
2078  int rc;
2079 
2080  mutex_lock(&sp->mii_mutex);
2081  rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL);
2082  mutex_unlock(&sp->mii_mutex);
2083 
2084  return rc;
2085 }
2086 
2087 static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu)
2088 {
2089  struct ipg_nic_private *sp = netdev_priv(dev);
2090  int err;
2091 
2092  /* Function to accommodate changes to Maximum Transfer Unit
2093  * (or MTU) of IPG NIC. Cannot use default function since
2094  * the default will not allow for MTU > 1500 bytes.
2095  */
2096 
2097  IPG_DEBUG_MSG("_nic_change_mtu\n");
2098 
2099  /*
2100  * Check that the new MTU value is between 68 (14 byte header, 46 byte
2101  * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU.
2102  */
2103  if (new_mtu < 68 || new_mtu > 10240)
2104  return -EINVAL;
2105 
2106  err = ipg_nic_stop(dev);
2107  if (err)
2108  return err;
2109 
2110  dev->mtu = new_mtu;
2111 
2112  sp->max_rxframe_size = new_mtu;
2113 
2114  sp->rxfrag_size = new_mtu;
2115  if (sp->rxfrag_size > 4088)
2116  sp->rxfrag_size = 4088;
2117 
2118  sp->rxsupport_size = sp->max_rxframe_size;
2119 
2120  if (new_mtu > 0x0600)
2121  sp->is_jumbo = true;
2122  else
2123  sp->is_jumbo = false;
2124 
2125  return ipg_nic_open(dev);
2126 }
2127 
2128 static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2129 {
2130  struct ipg_nic_private *sp = netdev_priv(dev);
2131  int rc;
2132 
2133  mutex_lock(&sp->mii_mutex);
2134  rc = mii_ethtool_gset(&sp->mii_if, cmd);
2135  mutex_unlock(&sp->mii_mutex);
2136 
2137  return rc;
2138 }
2139 
2140 static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2141 {
2142  struct ipg_nic_private *sp = netdev_priv(dev);
2143  int rc;
2144 
2145  mutex_lock(&sp->mii_mutex);
2146  rc = mii_ethtool_sset(&sp->mii_if, cmd);
2147  mutex_unlock(&sp->mii_mutex);
2148 
2149  return rc;
2150 }
2151 
2152 static int ipg_nway_reset(struct net_device *dev)
2153 {
2154  struct ipg_nic_private *sp = netdev_priv(dev);
2155  int rc;
2156 
2157  mutex_lock(&sp->mii_mutex);
2158  rc = mii_nway_restart(&sp->mii_if);
2159  mutex_unlock(&sp->mii_mutex);
2160 
2161  return rc;
2162 }
2163 
2164 static const struct ethtool_ops ipg_ethtool_ops = {
2165  .get_settings = ipg_get_settings,
2166  .set_settings = ipg_set_settings,
2167  .nway_reset = ipg_nway_reset,
2168 };
2169 
2170 static void __devexit ipg_remove(struct pci_dev *pdev)
2171 {
2172  struct net_device *dev = pci_get_drvdata(pdev);
2173  struct ipg_nic_private *sp = netdev_priv(dev);
2174 
2175  IPG_DEBUG_MSG("_remove\n");
2176 
2177  /* Un-register Ethernet device. */
2178  unregister_netdev(dev);
2179 
2180  pci_iounmap(pdev, sp->ioaddr);
2181 
2182  pci_release_regions(pdev);
2183 
2184  free_netdev(dev);
2185  pci_disable_device(pdev);
2186  pci_set_drvdata(pdev, NULL);
2187 }
2188 
2189 static const struct net_device_ops ipg_netdev_ops = {
2190  .ndo_open = ipg_nic_open,
2191  .ndo_stop = ipg_nic_stop,
2192  .ndo_start_xmit = ipg_nic_hard_start_xmit,
2193  .ndo_get_stats = ipg_nic_get_stats,
2194  .ndo_set_rx_mode = ipg_nic_set_multicast_list,
2195  .ndo_do_ioctl = ipg_ioctl,
2196  .ndo_tx_timeout = ipg_tx_timeout,
2197  .ndo_change_mtu = ipg_nic_change_mtu,
2198  .ndo_set_mac_address = eth_mac_addr,
2199  .ndo_validate_addr = eth_validate_addr,
2200 };
2201 
2202 static int __devinit ipg_probe(struct pci_dev *pdev,
2203  const struct pci_device_id *id)
2204 {
2205  unsigned int i = id->driver_data;
2206  struct ipg_nic_private *sp;
2207  struct net_device *dev;
2208  void __iomem *ioaddr;
2209  int rc;
2210 
2211  rc = pci_enable_device(pdev);
2212  if (rc < 0)
2213  goto out;
2214 
2215  pr_info("%s: %s\n", pci_name(pdev), ipg_brand_name[i]);
2216 
2217  pci_set_master(pdev);
2218 
2219  rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2220  if (rc < 0) {
2221  rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2222  if (rc < 0) {
2223  pr_err("%s: DMA config failed\n", pci_name(pdev));
2224  goto err_disable_0;
2225  }
2226  }
2227 
2228  /*
2229  * Initialize net device.
2230  */
2231  dev = alloc_etherdev(sizeof(struct ipg_nic_private));
2232  if (!dev) {
2233  rc = -ENOMEM;
2234  goto err_disable_0;
2235  }
2236 
2237  sp = netdev_priv(dev);
2238  spin_lock_init(&sp->lock);
2239  mutex_init(&sp->mii_mutex);
2240 
2241  sp->is_jumbo = IPG_IS_JUMBO;
2245 
2246  /* Declare IPG NIC functions for Ethernet device methods.
2247  */
2248  dev->netdev_ops = &ipg_netdev_ops;
2249  SET_NETDEV_DEV(dev, &pdev->dev);
2250  SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops);
2251 
2252  rc = pci_request_regions(pdev, DRV_NAME);
2253  if (rc)
2254  goto err_free_dev_1;
2255 
2256  ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
2257  if (!ioaddr) {
2258  pr_err("%s: cannot map MMIO\n", pci_name(pdev));
2259  rc = -EIO;
2260  goto err_release_regions_2;
2261  }
2262 
2263  /* Save the pointer to the PCI device information. */
2264  sp->ioaddr = ioaddr;
2265  sp->pdev = pdev;
2266  sp->dev = dev;
2267 
2268  INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error);
2269 
2270  pci_set_drvdata(pdev, dev);
2271 
2272  rc = ipg_hw_init(dev);
2273  if (rc < 0)
2274  goto err_unmap_3;
2275 
2276  rc = register_netdev(dev);
2277  if (rc < 0)
2278  goto err_unmap_3;
2279 
2280  netdev_info(dev, "Ethernet device registered\n");
2281 out:
2282  return rc;
2283 
2284 err_unmap_3:
2285  pci_iounmap(pdev, ioaddr);
2286 err_release_regions_2:
2287  pci_release_regions(pdev);
2288 err_free_dev_1:
2289  free_netdev(dev);
2290 err_disable_0:
2291  pci_disable_device(pdev);
2292  goto out;
2293 }
2294 
2295 static struct pci_driver ipg_pci_driver = {
2296  .name = IPG_DRIVER_NAME,
2297  .id_table = ipg_pci_tbl,
2298  .probe = ipg_probe,
2299  .remove = __devexit_p(ipg_remove),
2300 };
2301 
2302 static int __init ipg_init_module(void)
2303 {
2304  return pci_register_driver(&ipg_pci_driver);
2305 }
2306 
2307 static void __exit ipg_exit_module(void)
2308 {
2309  pci_unregister_driver(&ipg_pci_driver);
2310 }
2311 
2312 module_init(ipg_init_module);
2313 module_exit(ipg_exit_module);