Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
atl1.c
Go to the documentation of this file.
1 /*
2  * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3  * Copyright(c) 2006 - 2007 Chris Snook <[email protected]>
4  * Copyright(c) 2006 - 2008 Jay Cliburn <[email protected]>
5  *
6  * Derived from Intel e1000 driver
7  * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  * This program is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along with
20  * this program; if not, write to the Free Software Foundation, Inc., 59
21  * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22  *
23  * The full GNU General Public License is included in this distribution in the
24  * file called COPYING.
25  *
26  * Contact Information:
27  * Xiong Huang <[email protected]>
28  * Jie Yang <[email protected]>
29  * Chris Snook <[email protected]>
30  * Jay Cliburn <[email protected]>
31  *
32  * This version is adapted from the Attansic reference driver.
33  *
34  * TODO:
35  * Add more ethtool functions.
36  * Fix abstruse irq enable/disable condition described here:
37  * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
38  *
39  * NEEDS TESTING:
40  * VLAN
41  * multicast
42  * promiscuous mode
43  * interrupt coalescing
44  * SMP torture testing
45  */
46 
47 #include <linux/atomic.h>
48 #include <asm/byteorder.h>
49 
50 #include <linux/compiler.h>
51 #include <linux/crc32.h>
52 #include <linux/delay.h>
53 #include <linux/dma-mapping.h>
54 #include <linux/etherdevice.h>
55 #include <linux/hardirq.h>
56 #include <linux/if_ether.h>
57 #include <linux/if_vlan.h>
58 #include <linux/in.h>
59 #include <linux/interrupt.h>
60 #include <linux/ip.h>
61 #include <linux/irqflags.h>
62 #include <linux/irqreturn.h>
63 #include <linux/jiffies.h>
64 #include <linux/mii.h>
65 #include <linux/module.h>
66 #include <linux/moduleparam.h>
67 #include <linux/net.h>
68 #include <linux/netdevice.h>
69 #include <linux/pci.h>
70 #include <linux/pci_ids.h>
71 #include <linux/pm.h>
72 #include <linux/skbuff.h>
73 #include <linux/slab.h>
74 #include <linux/spinlock.h>
75 #include <linux/string.h>
76 #include <linux/tcp.h>
77 #include <linux/timer.h>
78 #include <linux/types.h>
79 #include <linux/workqueue.h>
80 
81 #include <net/checksum.h>
82 
83 #include "atl1.h"
84 
85 #define ATLX_DRIVER_VERSION "2.1.3"
86 MODULE_AUTHOR("Xiong Huang <[email protected]>, "
87  "Chris Snook <[email protected]>, "
88  "Jay Cliburn <[email protected]>");
89 MODULE_LICENSE("GPL");
91 
92 /* Temporary hack for merging atl1 and atl2 */
93 #include "atlx.c"
94 
95 static const struct ethtool_ops atl1_ethtool_ops;
96 
97 /*
98  * This is the only thing that needs to be changed to adjust the
99  * maximum number of ports that the driver can manage.
100  */
101 #define ATL1_MAX_NIC 4
102 
103 #define OPTION_UNSET -1
104 #define OPTION_DISABLED 0
105 #define OPTION_ENABLED 1
106 
107 #define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
108 
109 /*
110  * Interrupt Moderate Timer in units of 2 us
111  *
112  * Valid Range: 10-65535
113  *
114  * Default Value: 100 (200us)
115  */
116 static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
117 static unsigned int num_int_mod_timer;
118 module_param_array_named(int_mod_timer, int_mod_timer, int,
119  &num_int_mod_timer, 0);
120 MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
121 
122 #define DEFAULT_INT_MOD_CNT 100 /* 200us */
123 #define MAX_INT_MOD_CNT 65000
124 #define MIN_INT_MOD_CNT 50
125 
126 struct atl1_option {
128  char *name;
129  char *err;
130  int def;
131  union {
132  struct { /* range_option info */
133  int min;
134  int max;
135  } r;
136  struct { /* list_option info */
137  int nr;
138  struct atl1_opt_list {
139  int i;
140  char *str;
141  } *p;
142  } l;
143  } arg;
144 };
145 
146 static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
147  struct pci_dev *pdev)
148 {
149  if (*value == OPTION_UNSET) {
150  *value = opt->def;
151  return 0;
152  }
153 
154  switch (opt->type) {
155  case enable_option:
156  switch (*value) {
157  case OPTION_ENABLED:
158  dev_info(&pdev->dev, "%s enabled\n", opt->name);
159  return 0;
160  case OPTION_DISABLED:
161  dev_info(&pdev->dev, "%s disabled\n", opt->name);
162  return 0;
163  }
164  break;
165  case range_option:
166  if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
167  dev_info(&pdev->dev, "%s set to %i\n", opt->name,
168  *value);
169  return 0;
170  }
171  break;
172  case list_option:{
173  int i;
174  struct atl1_opt_list *ent;
175 
176  for (i = 0; i < opt->arg.l.nr; i++) {
177  ent = &opt->arg.l.p[i];
178  if (*value == ent->i) {
179  if (ent->str[0] != '\0')
180  dev_info(&pdev->dev, "%s\n",
181  ent->str);
182  return 0;
183  }
184  }
185  }
186  break;
187 
188  default:
189  break;
190  }
191 
192  dev_info(&pdev->dev, "invalid %s specified (%i) %s\n",
193  opt->name, *value, opt->err);
194  *value = opt->def;
195  return -1;
196 }
197 
207 static void __devinit atl1_check_options(struct atl1_adapter *adapter)
208 {
209  struct pci_dev *pdev = adapter->pdev;
210  int bd = adapter->bd_number;
211  if (bd >= ATL1_MAX_NIC) {
212  dev_notice(&pdev->dev, "no configuration for board#%i\n", bd);
213  dev_notice(&pdev->dev, "using defaults for all values\n");
214  }
215  { /* Interrupt Moderate Timer */
216  struct atl1_option opt = {
217  .type = range_option,
218  .name = "Interrupt Moderator Timer",
219  .err = "using default of "
221  .def = DEFAULT_INT_MOD_CNT,
222  .arg = {.r = {.min = MIN_INT_MOD_CNT,
223  .max = MAX_INT_MOD_CNT} }
224  };
225  int val;
226  if (num_int_mod_timer > bd) {
227  val = int_mod_timer[bd];
228  atl1_validate_option(&val, &opt, pdev);
229  adapter->imt = (u16) val;
230  } else
231  adapter->imt = (u16) (opt.def);
232  }
233 }
234 
235 /*
236  * atl1_pci_tbl - PCI Device ID Table
237  */
238 static DEFINE_PCI_DEVICE_TABLE(atl1_pci_tbl) = {
240  /* required last entry */
241  {0,}
242 };
243 MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
244 
245 static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
247 
248 static int debug = -1;
249 module_param(debug, int, 0);
250 MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)");
251 
252 /*
253  * Reset the transmit and receive units; mask and clear all interrupts.
254  * hw - Struct containing variables accessed by shared code
255  * return : 0 or idle status (if error)
256  */
257 static s32 atl1_reset_hw(struct atl1_hw *hw)
258 {
259  struct pci_dev *pdev = hw->back->pdev;
260  struct atl1_adapter *adapter = hw->back;
261  u32 icr;
262  int i;
263 
264  /*
265  * Clear Interrupt mask to stop board from generating
266  * interrupts & Clear any pending interrupt events
267  */
268  /*
269  * atlx_irq_disable(adapter);
270  * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
271  */
272 
273  /*
274  * Issue Soft Reset to the MAC. This will reset the chip's
275  * transmit, receive, DMA. It will not effect
276  * the current PCI configuration. The global reset bit is self-
277  * clearing, and should clear within a microsecond.
278  */
281 
282  iowrite16(1, hw->hw_addr + REG_PHY_ENABLE);
284 
285  /* delay about 1ms */
286  msleep(1);
287 
288  /* Wait at least 10ms for All module to be Idle */
289  for (i = 0; i < 10; i++) {
290  icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
291  if (!icr)
292  break;
293  /* delay 1 ms */
294  msleep(1);
295  /* FIXME: still the right way to do this? */
296  cpu_relax();
297  }
298 
299  if (icr) {
300  if (netif_msg_hw(adapter))
301  dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
302  return icr;
303  }
304 
305  return 0;
306 }
307 
308 /* function about EEPROM
309  *
310  * check_eeprom_exist
311  * return 0 if eeprom exist
312  */
313 static int atl1_check_eeprom_exist(struct atl1_hw *hw)
314 {
315  u32 value;
316  value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
317  if (value & SPI_FLASH_CTRL_EN_VPD) {
318  value &= ~SPI_FLASH_CTRL_EN_VPD;
319  iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
320  }
321 
322  value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
323  return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
324 }
325 
326 static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
327 {
328  int i;
329  u32 control;
330 
331  if (offset & 3)
332  /* address do not align */
333  return false;
334 
335  iowrite32(0, hw->hw_addr + REG_VPD_DATA);
336  control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
337  iowrite32(control, hw->hw_addr + REG_VPD_CAP);
339 
340  for (i = 0; i < 10; i++) {
341  msleep(2);
342  control = ioread32(hw->hw_addr + REG_VPD_CAP);
343  if (control & VPD_CAP_VPD_FLAG)
344  break;
345  }
346  if (control & VPD_CAP_VPD_FLAG) {
347  *p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
348  return true;
349  }
350  /* timeout */
351  return false;
352 }
353 
354 /*
355  * Reads the value from a PHY register
356  * hw - Struct containing variables accessed by shared code
357  * reg_addr - address of the PHY register to read
358  */
359 static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
360 {
361  u32 val;
362  int i;
363 
364  val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
367  iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
369 
370  for (i = 0; i < MDIO_WAIT_TIMES; i++) {
371  udelay(2);
372  val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
373  if (!(val & (MDIO_START | MDIO_BUSY)))
374  break;
375  }
376  if (!(val & (MDIO_START | MDIO_BUSY))) {
377  *phy_data = (u16) val;
378  return 0;
379  }
380  return ATLX_ERR_PHY;
381 }
382 
383 #define CUSTOM_SPI_CS_SETUP 2
384 #define CUSTOM_SPI_CLK_HI 2
385 #define CUSTOM_SPI_CLK_LO 2
386 #define CUSTOM_SPI_CS_HOLD 2
387 #define CUSTOM_SPI_CS_HI 3
388 
389 static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
390 {
391  int i;
392  u32 value;
393 
394  iowrite32(0, hw->hw_addr + REG_SPI_DATA);
395  iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
396 
397  value = SPI_FLASH_CTRL_WAIT_READY |
409 
410  iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
411 
412  value |= SPI_FLASH_CTRL_START;
413  iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
415 
416  for (i = 0; i < 10; i++) {
417  msleep(1);
418  value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
419  if (!(value & SPI_FLASH_CTRL_START))
420  break;
421  }
422 
423  if (value & SPI_FLASH_CTRL_START)
424  return false;
425 
426  *buf = ioread32(hw->hw_addr + REG_SPI_DATA);
427 
428  return true;
429 }
430 
431 /*
432  * get_permanent_address
433  * return 0 if get valid mac address,
434  */
435 static int atl1_get_permanent_address(struct atl1_hw *hw)
436 {
437  u32 addr[2];
438  u32 i, control;
439  u16 reg;
441  bool key_valid;
442 
443  if (is_valid_ether_addr(hw->perm_mac_addr))
444  return 0;
445 
446  /* init */
447  addr[0] = addr[1] = 0;
448 
449  if (!atl1_check_eeprom_exist(hw)) {
450  reg = 0;
451  key_valid = false;
452  /* Read out all EEPROM content */
453  i = 0;
454  while (1) {
455  if (atl1_read_eeprom(hw, i + 0x100, &control)) {
456  if (key_valid) {
457  if (reg == REG_MAC_STA_ADDR)
458  addr[0] = control;
459  else if (reg == (REG_MAC_STA_ADDR + 4))
460  addr[1] = control;
461  key_valid = false;
462  } else if ((control & 0xff) == 0x5A) {
463  key_valid = true;
464  reg = (u16) (control >> 16);
465  } else
466  break;
467  } else
468  /* read error */
469  break;
470  i += 4;
471  }
472 
473  *(u32 *) &eth_addr[2] = swab32(addr[0]);
474  *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
475  if (is_valid_ether_addr(eth_addr)) {
476  memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
477  return 0;
478  }
479  }
480 
481  /* see if SPI FLAGS exist ? */
482  addr[0] = addr[1] = 0;
483  reg = 0;
484  key_valid = false;
485  i = 0;
486  while (1) {
487  if (atl1_spi_read(hw, i + 0x1f000, &control)) {
488  if (key_valid) {
489  if (reg == REG_MAC_STA_ADDR)
490  addr[0] = control;
491  else if (reg == (REG_MAC_STA_ADDR + 4))
492  addr[1] = control;
493  key_valid = false;
494  } else if ((control & 0xff) == 0x5A) {
495  key_valid = true;
496  reg = (u16) (control >> 16);
497  } else
498  /* data end */
499  break;
500  } else
501  /* read error */
502  break;
503  i += 4;
504  }
505 
506  *(u32 *) &eth_addr[2] = swab32(addr[0]);
507  *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
508  if (is_valid_ether_addr(eth_addr)) {
509  memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
510  return 0;
511  }
512 
513  /*
514  * On some motherboards, the MAC address is written by the
515  * BIOS directly to the MAC register during POST, and is
516  * not stored in eeprom. If all else thus far has failed
517  * to fetch the permanent MAC address, try reading it directly.
518  */
519  addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
520  addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
521  *(u32 *) &eth_addr[2] = swab32(addr[0]);
522  *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
523  if (is_valid_ether_addr(eth_addr)) {
524  memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
525  return 0;
526  }
527 
528  return 1;
529 }
530 
531 /*
532  * Reads the adapter's MAC address from the EEPROM
533  * hw - Struct containing variables accessed by shared code
534  */
535 static s32 atl1_read_mac_addr(struct atl1_hw *hw)
536 {
537  s32 ret = 0;
538  u16 i;
539 
540  if (atl1_get_permanent_address(hw)) {
541  eth_random_addr(hw->perm_mac_addr);
542  ret = 1;
543  }
544 
545  for (i = 0; i < ETH_ALEN; i++)
546  hw->mac_addr[i] = hw->perm_mac_addr[i];
547  return ret;
548 }
549 
550 /*
551  * Hashes an address to determine its location in the multicast table
552  * hw - Struct containing variables accessed by shared code
553  * mc_addr - the multicast address to hash
554  *
555  * atl1_hash_mc_addr
556  * purpose
557  * set hash value for a multicast address
558  * hash calcu processing :
559  * 1. calcu 32bit CRC for multicast address
560  * 2. reverse crc with MSB to LSB
561  */
562 static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
563 {
564  u32 crc32, value = 0;
565  int i;
566 
567  crc32 = ether_crc_le(6, mc_addr);
568  for (i = 0; i < 32; i++)
569  value |= (((crc32 >> i) & 1) << (31 - i));
570 
571  return value;
572 }
573 
574 /*
575  * Sets the bit in the multicast table corresponding to the hash value.
576  * hw - Struct containing variables accessed by shared code
577  * hash_value - Multicast address hash value
578  */
579 static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
580 {
581  u32 hash_bit, hash_reg;
582  u32 mta;
583 
584  /*
585  * The HASH Table is a register array of 2 32-bit registers.
586  * It is treated like an array of 64 bits. We want to set
587  * bit BitArray[hash_value]. So we figure out what register
588  * the bit is in, read it, OR in the new bit, then write
589  * back the new value. The register is determined by the
590  * upper 7 bits of the hash value and the bit within that
591  * register are determined by the lower 5 bits of the value.
592  */
593  hash_reg = (hash_value >> 31) & 0x1;
594  hash_bit = (hash_value >> 26) & 0x1F;
595  mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
596  mta |= (1 << hash_bit);
597  iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
598 }
599 
600 /*
601  * Writes a value to a PHY register
602  * hw - Struct containing variables accessed by shared code
603  * reg_addr - address of the PHY register to write
604  * data - data to write to the PHY
605  */
606 static s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
607 {
608  int i;
609  u32 val;
610 
611  val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
612  (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
615  iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
617 
618  for (i = 0; i < MDIO_WAIT_TIMES; i++) {
619  udelay(2);
620  val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
621  if (!(val & (MDIO_START | MDIO_BUSY)))
622  break;
623  }
624 
625  if (!(val & (MDIO_START | MDIO_BUSY)))
626  return 0;
627 
628  return ATLX_ERR_PHY;
629 }
630 
631 /*
632  * Make L001's PHY out of Power Saving State (bug)
633  * hw - Struct containing variables accessed by shared code
634  * when power on, L001's PHY always on Power saving State
635  * (Gigabit Link forbidden)
636  */
637 static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
638 {
639  s32 ret;
640  ret = atl1_write_phy_reg(hw, 29, 0x0029);
641  if (ret)
642  return ret;
643  return atl1_write_phy_reg(hw, 30, 0);
644 }
645 
646 /*
647  * Resets the PHY and make all config validate
648  * hw - Struct containing variables accessed by shared code
649  *
650  * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
651  */
652 static s32 atl1_phy_reset(struct atl1_hw *hw)
653 {
654  struct pci_dev *pdev = hw->back->pdev;
655  struct atl1_adapter *adapter = hw->back;
656  s32 ret_val;
657  u16 phy_data;
658 
659  if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
661  phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
662  else {
663  switch (hw->media_type) {
665  phy_data =
667  MII_CR_RESET;
668  break;
670  phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
671  break;
672  case MEDIA_TYPE_10M_FULL:
673  phy_data =
675  break;
676  default:
677  /* MEDIA_TYPE_10M_HALF: */
678  phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
679  break;
680  }
681  }
682 
683  ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
684  if (ret_val) {
685  u32 val;
686  int i;
687  /* pcie serdes link may be down! */
688  if (netif_msg_hw(adapter))
689  dev_dbg(&pdev->dev, "pcie phy link down\n");
690 
691  for (i = 0; i < 25; i++) {
692  msleep(1);
693  val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
694  if (!(val & (MDIO_START | MDIO_BUSY)))
695  break;
696  }
697 
698  if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
699  if (netif_msg_hw(adapter))
700  dev_warn(&pdev->dev,
701  "pcie link down at least 25ms\n");
702  return ret_val;
703  }
704  }
705  return 0;
706 }
707 
708 /*
709  * Configures PHY autoneg and flow control advertisement settings
710  * hw - Struct containing variables accessed by shared code
711  */
712 static s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
713 {
714  s32 ret_val;
715  s16 mii_autoneg_adv_reg;
716  s16 mii_1000t_ctrl_reg;
717 
718  /* Read the MII Auto-Neg Advertisement Register (Address 4). */
719  mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
720 
721  /* Read the MII 1000Base-T Control Register (Address 9). */
722  mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK;
723 
724  /*
725  * First we clear all the 10/100 mb speed bits in the Auto-Neg
726  * Advertisement Register (Address 4) and the 1000 mb speed bits in
727  * the 1000Base-T Control Register (Address 9).
728  */
729  mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
730  mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK;
731 
732  /*
733  * Need to parse media_type and set up
734  * the appropriate PHY registers.
735  */
736  switch (hw->media_type) {
738  mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
742  mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
743  break;
744 
746  mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
747  break;
748 
750  mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
751  break;
752 
754  mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
755  break;
756 
757  case MEDIA_TYPE_10M_FULL:
758  mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
759  break;
760 
761  default:
762  mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
763  break;
764  }
765 
766  /* flow control fixed to enable all */
767  mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
768 
769  hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
770  hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
771 
772  ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
773  if (ret_val)
774  return ret_val;
775 
776  ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg);
777  if (ret_val)
778  return ret_val;
779 
780  return 0;
781 }
782 
783 /*
784  * Configures link settings.
785  * hw - Struct containing variables accessed by shared code
786  * Assumes the hardware has previously been reset and the
787  * transmitter and receiver are not enabled.
788  */
789 static s32 atl1_setup_link(struct atl1_hw *hw)
790 {
791  struct pci_dev *pdev = hw->back->pdev;
792  struct atl1_adapter *adapter = hw->back;
793  s32 ret_val;
794 
795  /*
796  * Options:
797  * PHY will advertise value(s) parsed from
798  * autoneg_advertised and fc
799  * no matter what autoneg is , We will not wait link result.
800  */
801  ret_val = atl1_phy_setup_autoneg_adv(hw);
802  if (ret_val) {
803  if (netif_msg_link(adapter))
804  dev_dbg(&pdev->dev,
805  "error setting up autonegotiation\n");
806  return ret_val;
807  }
808  /* SW.Reset , En-Auto-Neg if needed */
809  ret_val = atl1_phy_reset(hw);
810  if (ret_val) {
811  if (netif_msg_link(adapter))
812  dev_dbg(&pdev->dev, "error resetting phy\n");
813  return ret_val;
814  }
815  hw->phy_configured = true;
816  return ret_val;
817 }
818 
819 static void atl1_init_flash_opcode(struct atl1_hw *hw)
820 {
821  if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
822  /* Atmel */
823  hw->flash_vendor = 0;
824 
825  /* Init OP table */
826  iowrite8(flash_table[hw->flash_vendor].cmd_program,
828  iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
830  iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
832  iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
834  iowrite8(flash_table[hw->flash_vendor].cmd_wren,
836  iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
838  iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
840  iowrite8(flash_table[hw->flash_vendor].cmd_read,
842 }
843 
844 /*
845  * Performs basic configuration of the adapter.
846  * hw - Struct containing variables accessed by shared code
847  * Assumes that the controller has previously been reset and is in a
848  * post-reset uninitialized state. Initializes multicast table,
849  * and Calls routines to setup link
850  * Leaves the transmit and receive units disabled and uninitialized.
851  */
852 static s32 atl1_init_hw(struct atl1_hw *hw)
853 {
854  u32 ret_val = 0;
855 
856  /* Zero out the Multicast HASH table */
858  /* clear the old settings from the multicast hash table */
859  iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
860 
861  atl1_init_flash_opcode(hw);
862 
863  if (!hw->phy_configured) {
864  /* enable GPHY LinkChange Interrupt */
865  ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
866  if (ret_val)
867  return ret_val;
868  /* make PHY out of power-saving state */
869  ret_val = atl1_phy_leave_power_saving(hw);
870  if (ret_val)
871  return ret_val;
872  /* Call a subroutine to configure the link */
873  ret_val = atl1_setup_link(hw);
874  }
875  return ret_val;
876 }
877 
878 /*
879  * Detects the current speed and duplex settings of the hardware.
880  * hw - Struct containing variables accessed by shared code
881  * speed - Speed of the connection
882  * duplex - Duplex setting of the connection
883  */
884 static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
885 {
886  struct pci_dev *pdev = hw->back->pdev;
887  struct atl1_adapter *adapter = hw->back;
888  s32 ret_val;
889  u16 phy_data;
890 
891  /* ; --- Read PHY Specific Status Register (17) */
892  ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
893  if (ret_val)
894  return ret_val;
895 
896  if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
897  return ATLX_ERR_PHY_RES;
898 
899  switch (phy_data & MII_ATLX_PSSR_SPEED) {
901  *speed = SPEED_1000;
902  break;
904  *speed = SPEED_100;
905  break;
906  case MII_ATLX_PSSR_10MBS:
907  *speed = SPEED_10;
908  break;
909  default:
910  if (netif_msg_hw(adapter))
911  dev_dbg(&pdev->dev, "error getting speed\n");
912  return ATLX_ERR_PHY_SPEED;
913  break;
914  }
915  if (phy_data & MII_ATLX_PSSR_DPLX)
916  *duplex = FULL_DUPLEX;
917  else
918  *duplex = HALF_DUPLEX;
919 
920  return 0;
921 }
922 
923 static void atl1_set_mac_addr(struct atl1_hw *hw)
924 {
925  u32 value;
926  /*
927  * 00-0B-6A-F6-00-DC
928  * 0: 6AF600DC 1: 000B
929  * low dword
930  */
931  value = (((u32) hw->mac_addr[2]) << 24) |
932  (((u32) hw->mac_addr[3]) << 16) |
933  (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
934  iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
935  /* high dword */
936  value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
937  iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
938 }
939 
948 static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
949 {
950  struct atl1_hw *hw = &adapter->hw;
951  struct net_device *netdev = adapter->netdev;
952 
953  hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
955 
956  adapter->wol = 0;
957  device_set_wakeup_enable(&adapter->pdev->dev, false);
958  adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
959  adapter->ict = 50000; /* 100ms */
960  adapter->link_speed = SPEED_0; /* hardware init */
961  adapter->link_duplex = FULL_DUPLEX;
962 
963  hw->phy_configured = false;
964  hw->preamble_len = 7;
965  hw->ipgt = 0x60;
966  hw->min_ifg = 0x50;
967  hw->ipgr1 = 0x40;
968  hw->ipgr2 = 0x60;
969  hw->max_retry = 0xf;
970  hw->lcol = 0x37;
971  hw->jam_ipg = 7;
972  hw->rfd_burst = 8;
973  hw->rrd_burst = 8;
974  hw->rfd_fetch_gap = 1;
975  hw->rx_jumbo_th = adapter->rx_buffer_len / 8;
976  hw->rx_jumbo_lkah = 1;
977  hw->rrd_ret_timer = 16;
978  hw->tpd_burst = 4;
979  hw->tpd_fetch_th = 16;
980  hw->txf_burst = 0x100;
981  hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3;
982  hw->tpd_fetch_gap = 1;
983  hw->rcb_value = atl1_rcb_64;
987  hw->cmb_rrd = 4;
988  hw->cmb_tpd = 4;
989  hw->cmb_rx_timer = 1; /* about 2us */
990  hw->cmb_tx_timer = 1; /* about 2us */
991  hw->smb_timer = 100000; /* about 200ms */
992 
993  spin_lock_init(&adapter->lock);
994  spin_lock_init(&adapter->mb_lock);
995 
996  return 0;
997 }
998 
999 static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
1000 {
1001  struct atl1_adapter *adapter = netdev_priv(netdev);
1002  u16 result;
1003 
1004  atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
1005 
1006  return result;
1007 }
1008 
1009 static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
1010  int val)
1011 {
1012  struct atl1_adapter *adapter = netdev_priv(netdev);
1013 
1014  atl1_write_phy_reg(&adapter->hw, reg_num, val);
1015 }
1016 
1017 static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1018 {
1019  struct atl1_adapter *adapter = netdev_priv(netdev);
1020  unsigned long flags;
1021  int retval;
1022 
1023  if (!netif_running(netdev))
1024  return -EINVAL;
1025 
1026  spin_lock_irqsave(&adapter->lock, flags);
1027  retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
1028  spin_unlock_irqrestore(&adapter->lock, flags);
1029 
1030  return retval;
1031 }
1032 
1039 static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
1040 {
1041  struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1042  struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1043  struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1044  struct atl1_ring_header *ring_header = &adapter->ring_header;
1045  struct pci_dev *pdev = adapter->pdev;
1046  int size;
1047  u8 offset = 0;
1048 
1049  size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
1050  tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
1051  if (unlikely(!tpd_ring->buffer_info)) {
1052  if (netif_msg_drv(adapter))
1053  dev_err(&pdev->dev, "kzalloc failed , size = D%d\n",
1054  size);
1055  goto err_nomem;
1056  }
1057  rfd_ring->buffer_info =
1058  (tpd_ring->buffer_info + tpd_ring->count);
1059 
1060  /*
1061  * real ring DMA buffer
1062  * each ring/block may need up to 8 bytes for alignment, hence the
1063  * additional 40 bytes tacked onto the end.
1064  */
1065  ring_header->size = size =
1066  sizeof(struct tx_packet_desc) * tpd_ring->count
1067  + sizeof(struct rx_free_desc) * rfd_ring->count
1068  + sizeof(struct rx_return_desc) * rrd_ring->count
1069  + sizeof(struct coals_msg_block)
1071  + 40;
1072 
1073  ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
1074  &ring_header->dma);
1075  if (unlikely(!ring_header->desc)) {
1076  if (netif_msg_drv(adapter))
1077  dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
1078  goto err_nomem;
1079  }
1080 
1081  memset(ring_header->desc, 0, ring_header->size);
1082 
1083  /* init TPD ring */
1084  tpd_ring->dma = ring_header->dma;
1085  offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
1086  tpd_ring->dma += offset;
1087  tpd_ring->desc = (u8 *) ring_header->desc + offset;
1088  tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
1089 
1090  /* init RFD ring */
1091  rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
1092  offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0;
1093  rfd_ring->dma += offset;
1094  rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
1095  rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
1096 
1097 
1098  /* init RRD ring */
1099  rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
1100  offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0;
1101  rrd_ring->dma += offset;
1102  rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
1103  rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
1104 
1105 
1106  /* init CMB */
1107  adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
1108  offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
1109  adapter->cmb.dma += offset;
1110  adapter->cmb.cmb = (struct coals_msg_block *)
1111  ((u8 *) rrd_ring->desc + (rrd_ring->size + offset));
1112 
1113  /* init SMB */
1114  adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
1115  offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
1116  adapter->smb.dma += offset;
1117  adapter->smb.smb = (struct stats_msg_block *)
1118  ((u8 *) adapter->cmb.cmb +
1119  (sizeof(struct coals_msg_block) + offset));
1120 
1121  return 0;
1122 
1123 err_nomem:
1124  kfree(tpd_ring->buffer_info);
1125  return -ENOMEM;
1126 }
1127 
1128 static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
1129 {
1130  struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1131  struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1132  struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1133 
1134  atomic_set(&tpd_ring->next_to_use, 0);
1135  atomic_set(&tpd_ring->next_to_clean, 0);
1136 
1137  rfd_ring->next_to_clean = 0;
1138  atomic_set(&rfd_ring->next_to_use, 0);
1139 
1140  rrd_ring->next_to_use = 0;
1141  atomic_set(&rrd_ring->next_to_clean, 0);
1142 }
1143 
1148 static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
1149 {
1150  struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1151  struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1152  struct atl1_buffer *buffer_info;
1153  struct pci_dev *pdev = adapter->pdev;
1154  unsigned long size;
1155  unsigned int i;
1156 
1157  /* Free all the Rx ring sk_buffs */
1158  for (i = 0; i < rfd_ring->count; i++) {
1159  buffer_info = &rfd_ring->buffer_info[i];
1160  if (buffer_info->dma) {
1161  pci_unmap_page(pdev, buffer_info->dma,
1162  buffer_info->length, PCI_DMA_FROMDEVICE);
1163  buffer_info->dma = 0;
1164  }
1165  if (buffer_info->skb) {
1166  dev_kfree_skb(buffer_info->skb);
1167  buffer_info->skb = NULL;
1168  }
1169  }
1170 
1171  size = sizeof(struct atl1_buffer) * rfd_ring->count;
1172  memset(rfd_ring->buffer_info, 0, size);
1173 
1174  /* Zero out the descriptor ring */
1175  memset(rfd_ring->desc, 0, rfd_ring->size);
1176 
1177  rfd_ring->next_to_clean = 0;
1178  atomic_set(&rfd_ring->next_to_use, 0);
1179 
1180  rrd_ring->next_to_use = 0;
1181  atomic_set(&rrd_ring->next_to_clean, 0);
1182 }
1183 
1188 static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
1189 {
1190  struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1191  struct atl1_buffer *buffer_info;
1192  struct pci_dev *pdev = adapter->pdev;
1193  unsigned long size;
1194  unsigned int i;
1195 
1196  /* Free all the Tx ring sk_buffs */
1197  for (i = 0; i < tpd_ring->count; i++) {
1198  buffer_info = &tpd_ring->buffer_info[i];
1199  if (buffer_info->dma) {
1200  pci_unmap_page(pdev, buffer_info->dma,
1201  buffer_info->length, PCI_DMA_TODEVICE);
1202  buffer_info->dma = 0;
1203  }
1204  }
1205 
1206  for (i = 0; i < tpd_ring->count; i++) {
1207  buffer_info = &tpd_ring->buffer_info[i];
1208  if (buffer_info->skb) {
1209  dev_kfree_skb_any(buffer_info->skb);
1210  buffer_info->skb = NULL;
1211  }
1212  }
1213 
1214  size = sizeof(struct atl1_buffer) * tpd_ring->count;
1215  memset(tpd_ring->buffer_info, 0, size);
1216 
1217  /* Zero out the descriptor ring */
1218  memset(tpd_ring->desc, 0, tpd_ring->size);
1219 
1220  atomic_set(&tpd_ring->next_to_use, 0);
1221  atomic_set(&tpd_ring->next_to_clean, 0);
1222 }
1223 
1230 static void atl1_free_ring_resources(struct atl1_adapter *adapter)
1231 {
1232  struct pci_dev *pdev = adapter->pdev;
1233  struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1234  struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1235  struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1236  struct atl1_ring_header *ring_header = &adapter->ring_header;
1237 
1238  atl1_clean_tx_ring(adapter);
1239  atl1_clean_rx_ring(adapter);
1240 
1241  kfree(tpd_ring->buffer_info);
1242  pci_free_consistent(pdev, ring_header->size, ring_header->desc,
1243  ring_header->dma);
1244 
1245  tpd_ring->buffer_info = NULL;
1246  tpd_ring->desc = NULL;
1247  tpd_ring->dma = 0;
1248 
1249  rfd_ring->buffer_info = NULL;
1250  rfd_ring->desc = NULL;
1251  rfd_ring->dma = 0;
1252 
1253  rrd_ring->desc = NULL;
1254  rrd_ring->dma = 0;
1255 
1256  adapter->cmb.dma = 0;
1257  adapter->cmb.cmb = NULL;
1258 
1259  adapter->smb.dma = 0;
1260  adapter->smb.smb = NULL;
1261 }
1262 
1263 static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
1264 {
1265  u32 value;
1266  struct atl1_hw *hw = &adapter->hw;
1267  struct net_device *netdev = adapter->netdev;
1268  /* Config MAC CTRL Register */
1269  value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
1270  /* duplex */
1271  if (FULL_DUPLEX == adapter->link_duplex)
1272  value |= MAC_CTRL_DUPLX;
1273  /* speed */
1274  value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
1277  /* flow control */
1278  value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
1279  /* PAD & CRC */
1280  value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
1281  /* preamble length */
1282  value |= (((u32) adapter->hw.preamble_len
1284  /* vlan */
1285  __atlx_vlan_mode(netdev->features, &value);
1286  /* rx checksum
1287  if (adapter->rx_csum)
1288  value |= MAC_CTRL_RX_CHKSUM_EN;
1289  */
1290  /* filter mode */
1291  value |= MAC_CTRL_BC_EN;
1292  if (netdev->flags & IFF_PROMISC)
1293  value |= MAC_CTRL_PROMIS_EN;
1294  else if (netdev->flags & IFF_ALLMULTI)
1295  value |= MAC_CTRL_MC_ALL_EN;
1296  /* value |= MAC_CTRL_LOOPBACK; */
1297  iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
1298 }
1299 
1300 static u32 atl1_check_link(struct atl1_adapter *adapter)
1301 {
1302  struct atl1_hw *hw = &adapter->hw;
1303  struct net_device *netdev = adapter->netdev;
1304  u32 ret_val;
1305  u16 speed, duplex, phy_data;
1306  int reconfig = 0;
1307 
1308  /* MII_BMSR must read twice */
1309  atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
1310  atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
1311  if (!(phy_data & BMSR_LSTATUS)) {
1312  /* link down */
1313  if (netif_carrier_ok(netdev)) {
1314  /* old link state: Up */
1315  if (netif_msg_link(adapter))
1316  dev_info(&adapter->pdev->dev, "link is down\n");
1317  adapter->link_speed = SPEED_0;
1318  netif_carrier_off(netdev);
1319  }
1320  return 0;
1321  }
1322 
1323  /* Link Up */
1324  ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
1325  if (ret_val)
1326  return ret_val;
1327 
1328  switch (hw->media_type) {
1329  case MEDIA_TYPE_1000M_FULL:
1330  if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
1331  reconfig = 1;
1332  break;
1333  case MEDIA_TYPE_100M_FULL:
1334  if (speed != SPEED_100 || duplex != FULL_DUPLEX)
1335  reconfig = 1;
1336  break;
1337  case MEDIA_TYPE_100M_HALF:
1338  if (speed != SPEED_100 || duplex != HALF_DUPLEX)
1339  reconfig = 1;
1340  break;
1341  case MEDIA_TYPE_10M_FULL:
1342  if (speed != SPEED_10 || duplex != FULL_DUPLEX)
1343  reconfig = 1;
1344  break;
1345  case MEDIA_TYPE_10M_HALF:
1346  if (speed != SPEED_10 || duplex != HALF_DUPLEX)
1347  reconfig = 1;
1348  break;
1349  }
1350 
1351  /* link result is our setting */
1352  if (!reconfig) {
1353  if (adapter->link_speed != speed ||
1354  adapter->link_duplex != duplex) {
1355  adapter->link_speed = speed;
1356  adapter->link_duplex = duplex;
1357  atl1_setup_mac_ctrl(adapter);
1358  if (netif_msg_link(adapter))
1359  dev_info(&adapter->pdev->dev,
1360  "%s link is up %d Mbps %s\n",
1361  netdev->name, adapter->link_speed,
1362  adapter->link_duplex == FULL_DUPLEX ?
1363  "full duplex" : "half duplex");
1364  }
1365  if (!netif_carrier_ok(netdev)) {
1366  /* Link down -> Up */
1367  netif_carrier_on(netdev);
1368  }
1369  return 0;
1370  }
1371 
1372  /* change original link status */
1373  if (netif_carrier_ok(netdev)) {
1374  adapter->link_speed = SPEED_0;
1375  netif_carrier_off(netdev);
1376  netif_stop_queue(netdev);
1377  }
1378 
1379  if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
1381  switch (hw->media_type) {
1382  case MEDIA_TYPE_100M_FULL:
1383  phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
1384  MII_CR_RESET;
1385  break;
1386  case MEDIA_TYPE_100M_HALF:
1387  phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
1388  break;
1389  case MEDIA_TYPE_10M_FULL:
1390  phy_data =
1392  break;
1393  default:
1394  /* MEDIA_TYPE_10M_HALF: */
1395  phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
1396  break;
1397  }
1398  atl1_write_phy_reg(hw, MII_BMCR, phy_data);
1399  return 0;
1400  }
1401 
1402  /* auto-neg, insert timer to re-config phy */
1403  if (!adapter->phy_timer_pending) {
1404  adapter->phy_timer_pending = true;
1405  mod_timer(&adapter->phy_config_timer,
1406  round_jiffies(jiffies + 3 * HZ));
1407  }
1408 
1409  return 0;
1410 }
1411 
1412 static void set_flow_ctrl_old(struct atl1_adapter *adapter)
1413 {
1414  u32 hi, lo, value;
1415 
1416  /* RFD Flow Control */
1417  value = adapter->rfd_ring.count;
1418  hi = value / 16;
1419  if (hi < 2)
1420  hi = 2;
1421  lo = value * 7 / 8;
1422 
1425  iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
1426 
1427  /* RRD Flow Control */
1428  value = adapter->rrd_ring.count;
1429  lo = value / 16;
1430  hi = value * 7 / 8;
1431  if (lo < 2)
1432  lo = 2;
1435  iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
1436 }
1437 
1438 static void set_flow_ctrl_new(struct atl1_hw *hw)
1439 {
1440  u32 hi, lo, value;
1441 
1442  /* RXF Flow Control */
1443  value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
1444  lo = value / 16;
1445  if (lo < 192)
1446  lo = 192;
1447  hi = value * 7 / 8;
1448  if (hi < lo)
1449  hi = lo + 16;
1453 
1454  /* RRD Flow Control */
1455  value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
1456  lo = value / 8;
1457  hi = value * 7 / 8;
1458  if (lo < 2)
1459  lo = 2;
1460  if (hi < lo)
1461  hi = lo + 3;
1465 }
1466 
1473 static u32 atl1_configure(struct atl1_adapter *adapter)
1474 {
1475  struct atl1_hw *hw = &adapter->hw;
1476  u32 value;
1477 
1478  /* clear interrupt status */
1479  iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
1480 
1481  /* set MAC Address */
1482  value = (((u32) hw->mac_addr[2]) << 24) |
1483  (((u32) hw->mac_addr[3]) << 16) |
1484  (((u32) hw->mac_addr[4]) << 8) |
1485  (((u32) hw->mac_addr[5]));
1486  iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
1487  value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
1488  iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
1489 
1490  /* tx / rx ring */
1491 
1492  /* HI base address */
1493  iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
1495  /* LO base address */
1496  iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
1498  iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
1500  iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
1502  iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
1504  iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
1506 
1507  /* element count */
1508  value = adapter->rrd_ring.count;
1509  value <<= 16;
1510  value += adapter->rfd_ring.count;
1512  iowrite32(adapter->tpd_ring.count, hw->hw_addr +
1514 
1515  /* Load Ptr */
1516  iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
1517 
1518  /* config Mailbox */
1519  value = ((atomic_read(&adapter->tpd_ring.next_to_use)
1521  ((atomic_read(&adapter->rrd_ring.next_to_clean)
1523  ((atomic_read(&adapter->rfd_ring.next_to_use)
1525  iowrite32(value, hw->hw_addr + REG_MAILBOX);
1526 
1527  /* config IPG/IFG */
1528  value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
1530  (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
1532  (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
1534  (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
1536  iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
1537 
1538  /* config Half-Duplex Control */
1539  value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
1547 
1548  /* set Interrupt Moderator Timer */
1549  iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
1551 
1552  /* set Interrupt Clear Timer */
1553  iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
1554 
1555  /* set max frame size hw will accept */
1557 
1558  /* jumbo size & rrd retirement timer */
1559  value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
1560  << RXQ_JMBOSZ_TH_SHIFT) |
1562  << RXQ_JMBO_LKAH_SHIFT) |
1564  << RXQ_RRD_TIMER_SHIFT);
1565  iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
1566 
1567  /* Flow Control */
1568  switch (hw->dev_rev) {
1569  case 0x8001:
1570  case 0x9001:
1571  case 0x9002:
1572  case 0x9003:
1573  set_flow_ctrl_old(adapter);
1574  break;
1575  default:
1576  set_flow_ctrl_new(hw);
1577  break;
1578  }
1579 
1580  /* config TXQ */
1581  value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
1587  TXQ_CTRL_EN;
1588  iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
1589 
1590  /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
1591  value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
1596 
1597  /* config RXQ */
1598  value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
1604  RXQ_CTRL_EN;
1605  iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
1606 
1607  /* config DMA Engine */
1608  value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
1613  value |= (u32) hw->dma_ord;
1614  if (atl1_rcb_128 == hw->rcb_value)
1615  value |= DMA_CTRL_RCB_VALUE;
1616  iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
1617 
1618  /* config CMB / SMB */
1619  value = (hw->cmb_tpd > adapter->tpd_ring.count) ?
1620  hw->cmb_tpd : adapter->tpd_ring.count;
1621  value <<= 16;
1622  value |= hw->cmb_rrd;
1623  iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
1624  value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
1625  iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
1627 
1628  /* --- enable CMB / SMB */
1630  iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
1631 
1632  value = ioread32(adapter->hw.hw_addr + REG_ISR);
1633  if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
1634  value = 1; /* config failed */
1635  else
1636  value = 0;
1637 
1638  /* clear all interrupt status */
1639  iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
1640  iowrite32(0, adapter->hw.hw_addr + REG_ISR);
1641  return value;
1642 }
1643 
1644 /*
1645  * atl1_pcie_patch - Patch for PCIE module
1646  */
1647 static void atl1_pcie_patch(struct atl1_adapter *adapter)
1648 {
1649  u32 value;
1650 
1651  /* much vendor magic here */
1652  value = 0x6500;
1653  iowrite32(value, adapter->hw.hw_addr + 0x12FC);
1654  /* pcie flow control mode change */
1655  value = ioread32(adapter->hw.hw_addr + 0x1008);
1656  value |= 0x8000;
1657  iowrite32(value, adapter->hw.hw_addr + 0x1008);
1658 }
1659 
1660 /*
1661  * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
1662  * on PCI Command register is disable.
1663  * The function enable this bit.
1664  * Brackett, 2006/03/15
1665  */
1666 static void atl1_via_workaround(struct atl1_adapter *adapter)
1667 {
1668  unsigned long value;
1669 
1670  value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
1671  if (value & PCI_COMMAND_INTX_DISABLE)
1672  value &= ~PCI_COMMAND_INTX_DISABLE;
1673  iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
1674 }
1675 
1676 static void atl1_inc_smb(struct atl1_adapter *adapter)
1677 {
1678  struct net_device *netdev = adapter->netdev;
1679  struct stats_msg_block *smb = adapter->smb.smb;
1680 
1681  /* Fill out the OS statistics structure */
1682  adapter->soft_stats.rx_packets += smb->rx_ok;
1683  adapter->soft_stats.tx_packets += smb->tx_ok;
1684  adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
1685  adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
1686  adapter->soft_stats.multicast += smb->rx_mcast;
1687  adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 +
1688  smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry);
1689 
1690  /* Rx Errors */
1691  adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err +
1692  smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov +
1693  smb->rx_rrd_ov + smb->rx_align_err);
1694  adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
1695  adapter->soft_stats.rx_length_errors += smb->rx_len_err;
1696  adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
1697  adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
1698  adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
1699  smb->rx_rxf_ov);
1700 
1701  adapter->soft_stats.rx_pause += smb->rx_pause;
1702  adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
1703  adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
1704 
1705  /* Tx Errors */
1706  adapter->soft_stats.tx_errors += (smb->tx_late_col +
1707  smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc);
1708  adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
1709  adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
1710  adapter->soft_stats.tx_window_errors += smb->tx_late_col;
1711 
1712  adapter->soft_stats.excecol += smb->tx_abort_col;
1713  adapter->soft_stats.deffer += smb->tx_defer;
1714  adapter->soft_stats.scc += smb->tx_1_col;
1715  adapter->soft_stats.mcc += smb->tx_2_col;
1716  adapter->soft_stats.latecol += smb->tx_late_col;
1717  adapter->soft_stats.tx_underun += smb->tx_underrun;
1718  adapter->soft_stats.tx_trunc += smb->tx_trunc;
1719  adapter->soft_stats.tx_pause += smb->tx_pause;
1720 
1721  netdev->stats.rx_packets = adapter->soft_stats.rx_packets;
1722  netdev->stats.tx_packets = adapter->soft_stats.tx_packets;
1723  netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes;
1724  netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes;
1725  netdev->stats.multicast = adapter->soft_stats.multicast;
1726  netdev->stats.collisions = adapter->soft_stats.collisions;
1727  netdev->stats.rx_errors = adapter->soft_stats.rx_errors;
1728  netdev->stats.rx_over_errors =
1729  adapter->soft_stats.rx_missed_errors;
1730  netdev->stats.rx_length_errors =
1731  adapter->soft_stats.rx_length_errors;
1732  netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
1733  netdev->stats.rx_frame_errors =
1734  adapter->soft_stats.rx_frame_errors;
1735  netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
1736  netdev->stats.rx_missed_errors =
1737  adapter->soft_stats.rx_missed_errors;
1738  netdev->stats.tx_errors = adapter->soft_stats.tx_errors;
1739  netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
1740  netdev->stats.tx_aborted_errors =
1741  adapter->soft_stats.tx_aborted_errors;
1742  netdev->stats.tx_window_errors =
1743  adapter->soft_stats.tx_window_errors;
1744  netdev->stats.tx_carrier_errors =
1745  adapter->soft_stats.tx_carrier_errors;
1746 }
1747 
1748 static void atl1_update_mailbox(struct atl1_adapter *adapter)
1749 {
1750  unsigned long flags;
1751  u32 tpd_next_to_use;
1752  u32 rfd_next_to_use;
1753  u32 rrd_next_to_clean;
1754  u32 value;
1755 
1756  spin_lock_irqsave(&adapter->mb_lock, flags);
1757 
1758  tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
1759  rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
1760  rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
1761 
1762  value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
1764  ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
1766  ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
1768  iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
1769 
1770  spin_unlock_irqrestore(&adapter->mb_lock, flags);
1771 }
1772 
1773 static void atl1_clean_alloc_flag(struct atl1_adapter *adapter,
1774  struct rx_return_desc *rrd, u16 offset)
1775 {
1776  struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1777 
1778  while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) {
1779  rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0;
1780  if (++rfd_ring->next_to_clean == rfd_ring->count) {
1781  rfd_ring->next_to_clean = 0;
1782  }
1783  }
1784 }
1785 
1786 static void atl1_update_rfd_index(struct atl1_adapter *adapter,
1787  struct rx_return_desc *rrd)
1788 {
1789  u16 num_buf;
1790 
1791  num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) /
1792  adapter->rx_buffer_len;
1793  if (rrd->num_buf == num_buf)
1794  /* clean alloc flag for bad rrd */
1795  atl1_clean_alloc_flag(adapter, rrd, num_buf);
1796 }
1797 
1798 static void atl1_rx_checksum(struct atl1_adapter *adapter,
1799  struct rx_return_desc *rrd, struct sk_buff *skb)
1800 {
1801  struct pci_dev *pdev = adapter->pdev;
1802 
1803  /*
1804  * The L1 hardware contains a bug that erroneously sets the
1805  * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a
1806  * fragmented IP packet is received, even though the packet
1807  * is perfectly valid and its checksum is correct. There's
1808  * no way to distinguish between one of these good packets
1809  * and a packet that actually contains a TCP/UDP checksum
1810  * error, so all we can do is allow it to be handed up to
1811  * the higher layers and let it be sorted out there.
1812  */
1813 
1814  skb_checksum_none_assert(skb);
1815 
1816  if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
1817  if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
1819  adapter->hw_csum_err++;
1820  if (netif_msg_rx_err(adapter))
1821  dev_printk(KERN_DEBUG, &pdev->dev,
1822  "rx checksum error\n");
1823  return;
1824  }
1825  }
1826 
1827  /* not IPv4 */
1828  if (!(rrd->pkt_flg & PACKET_FLAG_IPV4))
1829  /* checksum is invalid, but it's not an IPv4 pkt, so ok */
1830  return;
1831 
1832  /* IPv4 packet */
1833  if (likely(!(rrd->err_flg &
1836  adapter->hw_csum_good++;
1837  return;
1838  }
1839 }
1840 
1845 static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
1846 {
1847  struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1848  struct pci_dev *pdev = adapter->pdev;
1849  struct page *page;
1850  unsigned long offset;
1851  struct atl1_buffer *buffer_info, *next_info;
1852  struct sk_buff *skb;
1853  u16 num_alloc = 0;
1854  u16 rfd_next_to_use, next_next;
1855  struct rx_free_desc *rfd_desc;
1856 
1857  next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use);
1858  if (++next_next == rfd_ring->count)
1859  next_next = 0;
1860  buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1861  next_info = &rfd_ring->buffer_info[next_next];
1862 
1863  while (!buffer_info->alloced && !next_info->alloced) {
1864  if (buffer_info->skb) {
1865  buffer_info->alloced = 1;
1866  goto next;
1867  }
1868 
1869  rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
1870 
1871  skb = netdev_alloc_skb_ip_align(adapter->netdev,
1872  adapter->rx_buffer_len);
1873  if (unlikely(!skb)) {
1874  /* Better luck next round */
1875  adapter->netdev->stats.rx_dropped++;
1876  break;
1877  }
1878 
1879  buffer_info->alloced = 1;
1880  buffer_info->skb = skb;
1881  buffer_info->length = (u16) adapter->rx_buffer_len;
1882  page = virt_to_page(skb->data);
1883  offset = (unsigned long)skb->data & ~PAGE_MASK;
1884  buffer_info->dma = pci_map_page(pdev, page, offset,
1885  adapter->rx_buffer_len,
1887  rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1888  rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
1889  rfd_desc->coalese = 0;
1890 
1891 next:
1892  rfd_next_to_use = next_next;
1893  if (unlikely(++next_next == rfd_ring->count))
1894  next_next = 0;
1895 
1896  buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1897  next_info = &rfd_ring->buffer_info[next_next];
1898  num_alloc++;
1899  }
1900 
1901  if (num_alloc) {
1902  /*
1903  * Force memory writes to complete before letting h/w
1904  * know there are new descriptors to fetch. (Only
1905  * applicable for weak-ordered memory model archs,
1906  * such as IA-64).
1907  */
1908  wmb();
1909  atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use);
1910  }
1911  return num_alloc;
1912 }
1913 
1914 static int atl1_intr_rx(struct atl1_adapter *adapter, int budget)
1915 {
1916  int i, count;
1917  u16 length;
1918  u16 rrd_next_to_clean;
1919  u32 value;
1920  struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1921  struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1922  struct atl1_buffer *buffer_info;
1923  struct rx_return_desc *rrd;
1924  struct sk_buff *skb;
1925 
1926  count = 0;
1927 
1928  rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
1929 
1930  while (count < budget) {
1931  rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
1932  i = 1;
1933  if (likely(rrd->xsz.valid)) { /* packet valid */
1934 chk_rrd:
1935  /* check rrd status */
1936  if (likely(rrd->num_buf == 1))
1937  goto rrd_ok;
1938  else if (netif_msg_rx_err(adapter)) {
1939  dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1940  "unexpected RRD buffer count\n");
1941  dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1942  "rx_buf_len = %d\n",
1943  adapter->rx_buffer_len);
1944  dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1945  "RRD num_buf = %d\n",
1946  rrd->num_buf);
1947  dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1948  "RRD pkt_len = %d\n",
1949  rrd->xsz.xsum_sz.pkt_size);
1950  dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1951  "RRD pkt_flg = 0x%08X\n",
1952  rrd->pkt_flg);
1953  dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1954  "RRD err_flg = 0x%08X\n",
1955  rrd->err_flg);
1956  dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1957  "RRD vlan_tag = 0x%08X\n",
1958  rrd->vlan_tag);
1959  }
1960 
1961  /* rrd seems to be bad */
1962  if (unlikely(i-- > 0)) {
1963  /* rrd may not be DMAed completely */
1964  udelay(1);
1965  goto chk_rrd;
1966  }
1967  /* bad rrd */
1968  if (netif_msg_rx_err(adapter))
1969  dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1970  "bad RRD\n");
1971  /* see if update RFD index */
1972  if (rrd->num_buf > 1)
1973  atl1_update_rfd_index(adapter, rrd);
1974 
1975  /* update rrd */
1976  rrd->xsz.valid = 0;
1977  if (++rrd_next_to_clean == rrd_ring->count)
1978  rrd_next_to_clean = 0;
1979  count++;
1980  continue;
1981  } else { /* current rrd still not be updated */
1982 
1983  break;
1984  }
1985 rrd_ok:
1986  /* clean alloc flag for bad rrd */
1987  atl1_clean_alloc_flag(adapter, rrd, 0);
1988 
1989  buffer_info = &rfd_ring->buffer_info[rrd->buf_indx];
1990  if (++rfd_ring->next_to_clean == rfd_ring->count)
1991  rfd_ring->next_to_clean = 0;
1992 
1993  /* update rrd next to clean */
1994  if (++rrd_next_to_clean == rrd_ring->count)
1995  rrd_next_to_clean = 0;
1996  count++;
1997 
1998  if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
1999  if (!(rrd->err_flg &
2001  | ERR_FLAG_LEN))) {
2002  /* packet error, don't need upstream */
2003  buffer_info->alloced = 0;
2004  rrd->xsz.valid = 0;
2005  continue;
2006  }
2007  }
2008 
2009  /* Good Receive */
2010  pci_unmap_page(adapter->pdev, buffer_info->dma,
2011  buffer_info->length, PCI_DMA_FROMDEVICE);
2012  buffer_info->dma = 0;
2013  skb = buffer_info->skb;
2014  length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
2015 
2016  skb_put(skb, length - ETH_FCS_LEN);
2017 
2018  /* Receive Checksum Offload */
2019  atl1_rx_checksum(adapter, rrd, skb);
2020  skb->protocol = eth_type_trans(skb, adapter->netdev);
2021 
2022  if (rrd->pkt_flg & PACKET_FLAG_VLAN_INS) {
2023  u16 vlan_tag = (rrd->vlan_tag >> 4) |
2024  ((rrd->vlan_tag & 7) << 13) |
2025  ((rrd->vlan_tag & 8) << 9);
2026 
2027  __vlan_hwaccel_put_tag(skb, vlan_tag);
2028  }
2029  netif_receive_skb(skb);
2030 
2031  /* let protocol layer free skb */
2032  buffer_info->skb = NULL;
2033  buffer_info->alloced = 0;
2034  rrd->xsz.valid = 0;
2035  }
2036 
2037  atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean);
2038 
2039  atl1_alloc_rx_buffers(adapter);
2040 
2041  /* update mailbox ? */
2042  if (count) {
2043  u32 tpd_next_to_use;
2044  u32 rfd_next_to_use;
2045 
2046  spin_lock(&adapter->mb_lock);
2047 
2048  tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
2049  rfd_next_to_use =
2050  atomic_read(&adapter->rfd_ring.next_to_use);
2051  rrd_next_to_clean =
2052  atomic_read(&adapter->rrd_ring.next_to_clean);
2053  value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
2055  ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
2057  ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
2059  iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
2060  spin_unlock(&adapter->mb_lock);
2061  }
2062 
2063  return count;
2064 }
2065 
2066 static int atl1_intr_tx(struct atl1_adapter *adapter)
2067 {
2068  struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
2069  struct atl1_buffer *buffer_info;
2070  u16 sw_tpd_next_to_clean;
2071  u16 cmb_tpd_next_to_clean;
2072  int count = 0;
2073 
2074  sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
2075  cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
2076 
2077  while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
2078  buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
2079  if (buffer_info->dma) {
2080  pci_unmap_page(adapter->pdev, buffer_info->dma,
2081  buffer_info->length, PCI_DMA_TODEVICE);
2082  buffer_info->dma = 0;
2083  }
2084 
2085  if (buffer_info->skb) {
2086  dev_kfree_skb_irq(buffer_info->skb);
2087  buffer_info->skb = NULL;
2088  }
2089 
2090  if (++sw_tpd_next_to_clean == tpd_ring->count)
2091  sw_tpd_next_to_clean = 0;
2092 
2093  count++;
2094  }
2095  atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
2096 
2097  if (netif_queue_stopped(adapter->netdev) &&
2098  netif_carrier_ok(adapter->netdev))
2099  netif_wake_queue(adapter->netdev);
2100 
2101  return count;
2102 }
2103 
2104 static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
2105 {
2106  u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
2107  u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
2108  return (next_to_clean > next_to_use) ?
2109  next_to_clean - next_to_use - 1 :
2110  tpd_ring->count + next_to_clean - next_to_use - 1;
2111 }
2112 
2113 static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
2114  struct tx_packet_desc *ptpd)
2115 {
2116  u8 hdr_len, ip_off;
2117  u32 real_len;
2118  int err;
2119 
2120  if (skb_shinfo(skb)->gso_size) {
2121  if (skb_header_cloned(skb)) {
2122  err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2123  if (unlikely(err))
2124  return -1;
2125  }
2126 
2127  if (skb->protocol == htons(ETH_P_IP)) {
2128  struct iphdr *iph = ip_hdr(skb);
2129 
2130  real_len = (((unsigned char *)iph - skb->data) +
2131  ntohs(iph->tot_len));
2132  if (real_len < skb->len)
2133  pskb_trim(skb, real_len);
2134  hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
2135  if (skb->len == hdr_len) {
2136  iph->check = 0;
2137  tcp_hdr(skb)->check =
2138  ~csum_tcpudp_magic(iph->saddr,
2139  iph->daddr, tcp_hdrlen(skb),
2140  IPPROTO_TCP, 0);
2141  ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
2143  ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
2144  TPD_TCPHDRLEN_MASK) <<
2146  ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT;
2147  ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT;
2148  return 1;
2149  }
2150 
2151  iph->check = 0;
2152  tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2153  iph->daddr, 0, IPPROTO_TCP, 0);
2154  ip_off = (unsigned char *)iph -
2155  (unsigned char *) skb_network_header(skb);
2156  if (ip_off == 8) /* 802.3-SNAP frame */
2157  ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT;
2158  else if (ip_off != 0)
2159  return -2;
2160 
2161  ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
2163  ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
2165  ptpd->word3 |= (skb_shinfo(skb)->gso_size &
2167  ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
2168  return 3;
2169  }
2170  }
2171  return false;
2172 }
2173 
2174 static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
2175  struct tx_packet_desc *ptpd)
2176 {
2177  u8 css, cso;
2178 
2179  if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2180  css = skb_checksum_start_offset(skb);
2181  cso = css + (u8) skb->csum_offset;
2182  if (unlikely(css & 0x1)) {
2183  /* L1 hardware requires an even number here */
2184  if (netif_msg_tx_err(adapter))
2185  dev_printk(KERN_DEBUG, &adapter->pdev->dev,
2186  "payload offset not an even number\n");
2187  return -1;
2188  }
2189  ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) <<
2191  ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) <<
2193  ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT;
2194  return true;
2195  }
2196  return 0;
2197 }
2198 
2199 static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
2200  struct tx_packet_desc *ptpd)
2201 {
2202  struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
2203  struct atl1_buffer *buffer_info;
2204  u16 buf_len = skb->len;
2205  struct page *page;
2206  unsigned long offset;
2207  unsigned int nr_frags;
2208  unsigned int f;
2209  int retval;
2210  u16 next_to_use;
2211  u16 data_len;
2212  u8 hdr_len;
2213 
2214  buf_len -= skb->data_len;
2215  nr_frags = skb_shinfo(skb)->nr_frags;
2216  next_to_use = atomic_read(&tpd_ring->next_to_use);
2217  buffer_info = &tpd_ring->buffer_info[next_to_use];
2218  BUG_ON(buffer_info->skb);
2219  /* put skb in last TPD */
2220  buffer_info->skb = NULL;
2221 
2222  retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
2223  if (retval) {
2224  /* TSO */
2225  hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2226  buffer_info->length = hdr_len;
2227  page = virt_to_page(skb->data);
2228  offset = (unsigned long)skb->data & ~PAGE_MASK;
2229  buffer_info->dma = pci_map_page(adapter->pdev, page,
2230  offset, hdr_len,
2232 
2233  if (++next_to_use == tpd_ring->count)
2234  next_to_use = 0;
2235 
2236  if (buf_len > hdr_len) {
2237  int i, nseg;
2238 
2239  data_len = buf_len - hdr_len;
2240  nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) /
2242  for (i = 0; i < nseg; i++) {
2243  buffer_info =
2244  &tpd_ring->buffer_info[next_to_use];
2245  buffer_info->skb = NULL;
2246  buffer_info->length =
2248  data_len) ? ATL1_MAX_TX_BUF_LEN : data_len;
2249  data_len -= buffer_info->length;
2250  page = virt_to_page(skb->data +
2251  (hdr_len + i * ATL1_MAX_TX_BUF_LEN));
2252  offset = (unsigned long)(skb->data +
2253  (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) &
2254  ~PAGE_MASK;
2255  buffer_info->dma = pci_map_page(adapter->pdev,
2256  page, offset, buffer_info->length,
2258  if (++next_to_use == tpd_ring->count)
2259  next_to_use = 0;
2260  }
2261  }
2262  } else {
2263  /* not TSO */
2264  buffer_info->length = buf_len;
2265  page = virt_to_page(skb->data);
2266  offset = (unsigned long)skb->data & ~PAGE_MASK;
2267  buffer_info->dma = pci_map_page(adapter->pdev, page,
2268  offset, buf_len, PCI_DMA_TODEVICE);
2269  if (++next_to_use == tpd_ring->count)
2270  next_to_use = 0;
2271  }
2272 
2273  for (f = 0; f < nr_frags; f++) {
2274  const struct skb_frag_struct *frag;
2275  u16 i, nseg;
2276 
2277  frag = &skb_shinfo(skb)->frags[f];
2278  buf_len = skb_frag_size(frag);
2279 
2280  nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
2282  for (i = 0; i < nseg; i++) {
2283  buffer_info = &tpd_ring->buffer_info[next_to_use];
2284  BUG_ON(buffer_info->skb);
2285 
2286  buffer_info->skb = NULL;
2287  buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ?
2288  ATL1_MAX_TX_BUF_LEN : buf_len;
2289  buf_len -= buffer_info->length;
2290  buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
2291  frag, i * ATL1_MAX_TX_BUF_LEN,
2292  buffer_info->length, DMA_TO_DEVICE);
2293 
2294  if (++next_to_use == tpd_ring->count)
2295  next_to_use = 0;
2296  }
2297  }
2298 
2299  /* last tpd's buffer-info */
2300  buffer_info->skb = skb;
2301 }
2302 
2303 static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
2304  struct tx_packet_desc *ptpd)
2305 {
2306  struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
2307  struct atl1_buffer *buffer_info;
2308  struct tx_packet_desc *tpd;
2309  u16 j;
2310  u32 val;
2311  u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use);
2312 
2313  for (j = 0; j < count; j++) {
2314  buffer_info = &tpd_ring->buffer_info[next_to_use];
2315  tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use);
2316  if (tpd != ptpd)
2317  memcpy(tpd, ptpd, sizeof(struct tx_packet_desc));
2318  tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2319  tpd->word2 &= ~(TPD_BUFLEN_MASK << TPD_BUFLEN_SHIFT);
2320  tpd->word2 |= (cpu_to_le16(buffer_info->length) &
2322 
2323  /*
2324  * if this is the first packet in a TSO chain, set
2325  * TPD_HDRFLAG, otherwise, clear it.
2326  */
2327  val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) &
2329  if (val) {
2330  if (!j)
2331  tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT;
2332  else
2333  tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT);
2334  }
2335 
2336  if (j == (count - 1))
2337  tpd->word3 |= 1 << TPD_EOP_SHIFT;
2338 
2339  if (++next_to_use == tpd_ring->count)
2340  next_to_use = 0;
2341  }
2342  /*
2343  * Force memory writes to complete before letting h/w
2344  * know there are new descriptors to fetch. (Only
2345  * applicable for weak-ordered memory model archs,
2346  * such as IA-64).
2347  */
2348  wmb();
2349 
2350  atomic_set(&tpd_ring->next_to_use, next_to_use);
2351 }
2352 
2353 static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
2354  struct net_device *netdev)
2355 {
2356  struct atl1_adapter *adapter = netdev_priv(netdev);
2357  struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
2358  int len;
2359  int tso;
2360  int count = 1;
2361  int ret_val;
2362  struct tx_packet_desc *ptpd;
2363  u16 vlan_tag;
2364  unsigned int nr_frags = 0;
2365  unsigned int mss = 0;
2366  unsigned int f;
2367  unsigned int proto_hdr_len;
2368 
2369  len = skb_headlen(skb);
2370 
2371  if (unlikely(skb->len <= 0)) {
2372  dev_kfree_skb_any(skb);
2373  return NETDEV_TX_OK;
2374  }
2375 
2376  nr_frags = skb_shinfo(skb)->nr_frags;
2377  for (f = 0; f < nr_frags; f++) {
2378  unsigned int f_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
2379  count += (f_size + ATL1_MAX_TX_BUF_LEN - 1) /
2381  }
2382 
2383  mss = skb_shinfo(skb)->gso_size;
2384  if (mss) {
2385  if (skb->protocol == htons(ETH_P_IP)) {
2386  proto_hdr_len = (skb_transport_offset(skb) +
2387  tcp_hdrlen(skb));
2388  if (unlikely(proto_hdr_len > len)) {
2389  dev_kfree_skb_any(skb);
2390  return NETDEV_TX_OK;
2391  }
2392  /* need additional TPD ? */
2393  if (proto_hdr_len != len)
2394  count += (len - proto_hdr_len +
2395  ATL1_MAX_TX_BUF_LEN - 1) /
2397  }
2398  }
2399 
2400  if (atl1_tpd_avail(&adapter->tpd_ring) < count) {
2401  /* not enough descriptors */
2402  netif_stop_queue(netdev);
2403  if (netif_msg_tx_queued(adapter))
2404  dev_printk(KERN_DEBUG, &adapter->pdev->dev,
2405  "tx busy\n");
2406  return NETDEV_TX_BUSY;
2407  }
2408 
2409  ptpd = ATL1_TPD_DESC(tpd_ring,
2410  (u16) atomic_read(&tpd_ring->next_to_use));
2411  memset(ptpd, 0, sizeof(struct tx_packet_desc));
2412 
2413  if (vlan_tx_tag_present(skb)) {
2414  vlan_tag = vlan_tx_tag_get(skb);
2415  vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
2416  ((vlan_tag >> 9) & 0x8);
2417  ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
2418  ptpd->word2 |= (vlan_tag & TPD_VLANTAG_MASK) <<
2420  }
2421 
2422  tso = atl1_tso(adapter, skb, ptpd);
2423  if (tso < 0) {
2424  dev_kfree_skb_any(skb);
2425  return NETDEV_TX_OK;
2426  }
2427 
2428  if (!tso) {
2429  ret_val = atl1_tx_csum(adapter, skb, ptpd);
2430  if (ret_val < 0) {
2431  dev_kfree_skb_any(skb);
2432  return NETDEV_TX_OK;
2433  }
2434  }
2435 
2436  atl1_tx_map(adapter, skb, ptpd);
2437  atl1_tx_queue(adapter, count, ptpd);
2438  atl1_update_mailbox(adapter);
2439  mmiowb();
2440  return NETDEV_TX_OK;
2441 }
2442 
2443 static int atl1_rings_clean(struct napi_struct *napi, int budget)
2444 {
2445  struct atl1_adapter *adapter = container_of(napi, struct atl1_adapter, napi);
2446  int work_done = atl1_intr_rx(adapter, budget);
2447 
2448  if (atl1_intr_tx(adapter))
2449  work_done = budget;
2450 
2451  /* Let's come again to process some more packets */
2452  if (work_done >= budget)
2453  return work_done;
2454 
2455  napi_complete(napi);
2456  /* re-enable Interrupt */
2457  if (likely(adapter->int_enabled))
2458  atlx_imr_set(adapter, IMR_NORMAL_MASK);
2459  return work_done;
2460 }
2461 
2462 static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
2463 {
2464  if (!napi_schedule_prep(&adapter->napi))
2465  /* It is possible in case even the RX/TX ints are disabled via IMR
2466  * register the ISR bits are set anyway (but do not produce IRQ).
2467  * To handle such situation the napi functions used to check is
2468  * something scheduled or not.
2469  */
2470  return 0;
2471 
2472  __napi_schedule(&adapter->napi);
2473 
2474  /*
2475  * Disable RX/TX ints via IMR register if it is
2476  * allowed. NAPI handler must reenable them in same
2477  * way.
2478  */
2479  if (!adapter->int_enabled)
2480  return 1;
2481 
2482  atlx_imr_set(adapter, IMR_NORXTX_MASK);
2483  return 1;
2484 }
2485 
2491 static irqreturn_t atl1_intr(int irq, void *data)
2492 {
2493  struct atl1_adapter *adapter = netdev_priv(data);
2494  u32 status;
2495 
2496  status = adapter->cmb.cmb->int_stats;
2497  if (!status)
2498  return IRQ_NONE;
2499 
2500  /* clear CMB interrupt status at once,
2501  * but leave rx/tx interrupt status in case it should be dropped
2502  * only if rx/tx processing queued. In other case interrupt
2503  * can be lost.
2504  */
2505  adapter->cmb.cmb->int_stats = status & (ISR_CMB_TX | ISR_CMB_RX);
2506 
2507  if (status & ISR_GPHY) /* clear phy status */
2508  atlx_clear_phy_int(adapter);
2509 
2510  /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
2511  iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
2512 
2513  /* check if SMB intr */
2514  if (status & ISR_SMB)
2515  atl1_inc_smb(adapter);
2516 
2517  /* check if PCIE PHY Link down */
2518  if (status & ISR_PHY_LINKDOWN) {
2519  if (netif_msg_intr(adapter))
2520  dev_printk(KERN_DEBUG, &adapter->pdev->dev,
2521  "pcie phy link down %x\n", status);
2522  if (netif_running(adapter->netdev)) { /* reset MAC */
2523  atlx_irq_disable(adapter);
2524  schedule_work(&adapter->reset_dev_task);
2525  return IRQ_HANDLED;
2526  }
2527  }
2528 
2529  /* check if DMA read/write error ? */
2530  if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
2531  if (netif_msg_intr(adapter))
2532  dev_printk(KERN_DEBUG, &adapter->pdev->dev,
2533  "pcie DMA r/w error (status = 0x%x)\n",
2534  status);
2535  atlx_irq_disable(adapter);
2536  schedule_work(&adapter->reset_dev_task);
2537  return IRQ_HANDLED;
2538  }
2539 
2540  /* link event */
2541  if (status & ISR_GPHY) {
2542  adapter->soft_stats.tx_carrier_errors++;
2543  atl1_check_for_link(adapter);
2544  }
2545 
2546  /* transmit or receive event */
2547  if (status & (ISR_CMB_TX | ISR_CMB_RX) &&
2548  atl1_sched_rings_clean(adapter))
2549  adapter->cmb.cmb->int_stats = adapter->cmb.cmb->int_stats &
2550  ~(ISR_CMB_TX | ISR_CMB_RX);
2551 
2552  /* rx exception */
2553  if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
2555  ISR_HOST_RRD_OV))) {
2556  if (netif_msg_intr(adapter))
2557  dev_printk(KERN_DEBUG,
2558  &adapter->pdev->dev,
2559  "rx exception, ISR = 0x%x\n",
2560  status);
2561  atl1_sched_rings_clean(adapter);
2562  }
2563 
2564  /* re-enable Interrupt */
2565  iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
2566  return IRQ_HANDLED;
2567 }
2568 
2569 
2574 static void atl1_phy_config(unsigned long data)
2575 {
2576  struct atl1_adapter *adapter = (struct atl1_adapter *)data;
2577  struct atl1_hw *hw = &adapter->hw;
2578  unsigned long flags;
2579 
2580  spin_lock_irqsave(&adapter->lock, flags);
2581  adapter->phy_timer_pending = false;
2582  atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
2583  atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg);
2584  atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
2585  spin_unlock_irqrestore(&adapter->lock, flags);
2586 }
2587 
2588 /*
2589  * Orphaned vendor comment left intact here:
2590  * <vendor comment>
2591  * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
2592  * will assert. We do soft reset <0x1400=1> according
2593  * with the SPEC. BUT, it seemes that PCIE or DMA
2594  * state-machine will not be reset. DMAR_TO_INT will
2595  * assert again and again.
2596  * </vendor comment>
2597  */
2598 
2599 static int atl1_reset(struct atl1_adapter *adapter)
2600 {
2601  int ret;
2602  ret = atl1_reset_hw(&adapter->hw);
2603  if (ret)
2604  return ret;
2605  return atl1_init_hw(&adapter->hw);
2606 }
2607 
2608 static s32 atl1_up(struct atl1_adapter *adapter)
2609 {
2610  struct net_device *netdev = adapter->netdev;
2611  int err;
2612  int irq_flags = 0;
2613 
2614  /* hardware has been reset, we need to reload some things */
2615  atlx_set_multi(netdev);
2616  atl1_init_ring_ptrs(adapter);
2617  atlx_restore_vlan(adapter);
2618  err = atl1_alloc_rx_buffers(adapter);
2619  if (unlikely(!err))
2620  /* no RX BUFFER allocated */
2621  return -ENOMEM;
2622 
2623  if (unlikely(atl1_configure(adapter))) {
2624  err = -EIO;
2625  goto err_up;
2626  }
2627 
2628  err = pci_enable_msi(adapter->pdev);
2629  if (err) {
2630  if (netif_msg_ifup(adapter))
2631  dev_info(&adapter->pdev->dev,
2632  "Unable to enable MSI: %d\n", err);
2633  irq_flags |= IRQF_SHARED;
2634  }
2635 
2636  err = request_irq(adapter->pdev->irq, atl1_intr, irq_flags,
2637  netdev->name, netdev);
2638  if (unlikely(err))
2639  goto err_up;
2640 
2641  napi_enable(&adapter->napi);
2642  atlx_irq_enable(adapter);
2643  atl1_check_link(adapter);
2644  netif_start_queue(netdev);
2645  return 0;
2646 
2647 err_up:
2648  pci_disable_msi(adapter->pdev);
2649  /* free rx_buffers */
2650  atl1_clean_rx_ring(adapter);
2651  return err;
2652 }
2653 
2654 static void atl1_down(struct atl1_adapter *adapter)
2655 {
2656  struct net_device *netdev = adapter->netdev;
2657 
2658  napi_disable(&adapter->napi);
2659  netif_stop_queue(netdev);
2660  del_timer_sync(&adapter->phy_config_timer);
2661  adapter->phy_timer_pending = false;
2662 
2663  atlx_irq_disable(adapter);
2664  free_irq(adapter->pdev->irq, netdev);
2665  pci_disable_msi(adapter->pdev);
2666  atl1_reset_hw(&adapter->hw);
2667  adapter->cmb.cmb->int_stats = 0;
2668 
2669  adapter->link_speed = SPEED_0;
2670  adapter->link_duplex = -1;
2671  netif_carrier_off(netdev);
2672 
2673  atl1_clean_tx_ring(adapter);
2674  atl1_clean_rx_ring(adapter);
2675 }
2676 
2677 static void atl1_reset_dev_task(struct work_struct *work)
2678 {
2679  struct atl1_adapter *adapter =
2680  container_of(work, struct atl1_adapter, reset_dev_task);
2681  struct net_device *netdev = adapter->netdev;
2682 
2683  netif_device_detach(netdev);
2684  atl1_down(adapter);
2685  atl1_up(adapter);
2686  netif_device_attach(netdev);
2687 }
2688 
2696 static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
2697 {
2698  struct atl1_adapter *adapter = netdev_priv(netdev);
2699  int old_mtu = netdev->mtu;
2700  int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2701 
2702  if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
2703  (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2704  if (netif_msg_link(adapter))
2705  dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
2706  return -EINVAL;
2707  }
2708 
2709  adapter->hw.max_frame_size = max_frame;
2710  adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
2711  adapter->rx_buffer_len = (max_frame + 7) & ~7;
2712  adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
2713 
2714  netdev->mtu = new_mtu;
2715  if ((old_mtu != new_mtu) && netif_running(netdev)) {
2716  atl1_down(adapter);
2717  atl1_up(adapter);
2718  }
2719 
2720  return 0;
2721 }
2722 
2735 static int atl1_open(struct net_device *netdev)
2736 {
2737  struct atl1_adapter *adapter = netdev_priv(netdev);
2738  int err;
2739 
2740  netif_carrier_off(netdev);
2741 
2742  /* allocate transmit descriptors */
2743  err = atl1_setup_ring_resources(adapter);
2744  if (err)
2745  return err;
2746 
2747  err = atl1_up(adapter);
2748  if (err)
2749  goto err_up;
2750 
2751  return 0;
2752 
2753 err_up:
2754  atl1_reset(adapter);
2755  return err;
2756 }
2757 
2769 static int atl1_close(struct net_device *netdev)
2770 {
2771  struct atl1_adapter *adapter = netdev_priv(netdev);
2772  atl1_down(adapter);
2773  atl1_free_ring_resources(adapter);
2774  return 0;
2775 }
2776 
2777 #ifdef CONFIG_PM
2778 static int atl1_suspend(struct device *dev)
2779 {
2780  struct pci_dev *pdev = to_pci_dev(dev);
2781  struct net_device *netdev = pci_get_drvdata(pdev);
2782  struct atl1_adapter *adapter = netdev_priv(netdev);
2783  struct atl1_hw *hw = &adapter->hw;
2784  u32 ctrl = 0;
2785  u32 wufc = adapter->wol;
2786  u32 val;
2787  u16 speed;
2788  u16 duplex;
2789 
2790  netif_device_detach(netdev);
2791  if (netif_running(netdev))
2792  atl1_down(adapter);
2793 
2794  atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2795  atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2796  val = ctrl & BMSR_LSTATUS;
2797  if (val)
2798  wufc &= ~ATLX_WUFC_LNKC;
2799  if (!wufc)
2800  goto disable_wol;
2801 
2802  if (val) {
2803  val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
2804  if (val) {
2805  if (netif_msg_ifdown(adapter))
2806  dev_printk(KERN_DEBUG, &pdev->dev,
2807  "error getting speed/duplex\n");
2808  goto disable_wol;
2809  }
2810 
2811  ctrl = 0;
2812 
2813  /* enable magic packet WOL */
2814  if (wufc & ATLX_WUFC_MAG)
2815  ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN);
2816  iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
2817  ioread32(hw->hw_addr + REG_WOL_CTRL);
2818 
2819  /* configure the mac */
2820  ctrl = MAC_CTRL_RX_EN;
2821  ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 :
2823  if (duplex == FULL_DUPLEX)
2824  ctrl |= MAC_CTRL_DUPLX;
2825  ctrl |= (((u32)adapter->hw.preamble_len &
2827  __atlx_vlan_mode(netdev->features, &ctrl);
2828  if (wufc & ATLX_WUFC_MAG)
2829  ctrl |= MAC_CTRL_BC_EN;
2830  iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
2831  ioread32(hw->hw_addr + REG_MAC_CTRL);
2832 
2833  /* poke the PHY */
2834  ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
2836  iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
2838  } else {
2840  iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
2841  ioread32(hw->hw_addr + REG_WOL_CTRL);
2842  iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
2843  ioread32(hw->hw_addr + REG_MAC_CTRL);
2844  hw->phy_configured = false;
2845  }
2846 
2847  return 0;
2848 
2849  disable_wol:
2850  iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
2851  ioread32(hw->hw_addr + REG_WOL_CTRL);
2852  ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
2854  iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
2856  hw->phy_configured = false;
2857 
2858  return 0;
2859 }
2860 
2861 static int atl1_resume(struct device *dev)
2862 {
2863  struct pci_dev *pdev = to_pci_dev(dev);
2864  struct net_device *netdev = pci_get_drvdata(pdev);
2865  struct atl1_adapter *adapter = netdev_priv(netdev);
2866 
2867  iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
2868 
2869  atl1_reset_hw(&adapter->hw);
2870 
2871  if (netif_running(netdev)) {
2872  adapter->cmb.cmb->int_stats = 0;
2873  atl1_up(adapter);
2874  }
2875  netif_device_attach(netdev);
2876 
2877  return 0;
2878 }
2879 
2880 static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume);
2881 #define ATL1_PM_OPS (&atl1_pm_ops)
2882 
2883 #else
2884 
2885 static int atl1_suspend(struct device *dev) { return 0; }
2886 
2887 #define ATL1_PM_OPS NULL
2888 #endif
2889 
2890 static void atl1_shutdown(struct pci_dev *pdev)
2891 {
2892  struct net_device *netdev = pci_get_drvdata(pdev);
2893  struct atl1_adapter *adapter = netdev_priv(netdev);
2894 
2895  atl1_suspend(&pdev->dev);
2896  pci_wake_from_d3(pdev, adapter->wol);
2898 }
2899 
2900 #ifdef CONFIG_NET_POLL_CONTROLLER
2901 static void atl1_poll_controller(struct net_device *netdev)
2902 {
2903  disable_irq(netdev->irq);
2904  atl1_intr(netdev->irq, netdev);
2905  enable_irq(netdev->irq);
2906 }
2907 #endif
2908 
2909 static const struct net_device_ops atl1_netdev_ops = {
2910  .ndo_open = atl1_open,
2911  .ndo_stop = atl1_close,
2912  .ndo_start_xmit = atl1_xmit_frame,
2913  .ndo_set_rx_mode = atlx_set_multi,
2914  .ndo_validate_addr = eth_validate_addr,
2915  .ndo_set_mac_address = atl1_set_mac,
2916  .ndo_change_mtu = atl1_change_mtu,
2917  .ndo_fix_features = atlx_fix_features,
2918  .ndo_set_features = atlx_set_features,
2919  .ndo_do_ioctl = atlx_ioctl,
2920  .ndo_tx_timeout = atlx_tx_timeout,
2921 #ifdef CONFIG_NET_POLL_CONTROLLER
2922  .ndo_poll_controller = atl1_poll_controller,
2923 #endif
2924 };
2925 
2937 static int __devinit atl1_probe(struct pci_dev *pdev,
2938  const struct pci_device_id *ent)
2939 {
2940  struct net_device *netdev;
2941  struct atl1_adapter *adapter;
2942  static int cards_found = 0;
2943  int err;
2944 
2945  err = pci_enable_device(pdev);
2946  if (err)
2947  return err;
2948 
2949  /*
2950  * The atl1 chip can DMA to 64-bit addresses, but it uses a single
2951  * shared register for the high 32 bits, so only a single, aligned,
2952  * 4 GB physical address range can be used at a time.
2953  *
2954  * Supporting 64-bit DMA on this hardware is more trouble than it's
2955  * worth. It is far easier to limit to 32-bit DMA than update
2956  * various kernel subsystems to support the mechanics required by a
2957  * fixed-high-32-bit system.
2958  */
2959  err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2960  if (err) {
2961  dev_err(&pdev->dev, "no usable DMA configuration\n");
2962  goto err_dma;
2963  }
2964  /*
2965  * Mark all PCI regions associated with PCI device
2966  * pdev as being reserved by owner atl1_driver_name
2967  */
2969  if (err)
2970  goto err_request_regions;
2971 
2972  /*
2973  * Enables bus-mastering on the device and calls
2974  * pcibios_set_master to do the needed arch specific settings
2975  */
2976  pci_set_master(pdev);
2977 
2978  netdev = alloc_etherdev(sizeof(struct atl1_adapter));
2979  if (!netdev) {
2980  err = -ENOMEM;
2981  goto err_alloc_etherdev;
2982  }
2983  SET_NETDEV_DEV(netdev, &pdev->dev);
2984 
2985  pci_set_drvdata(pdev, netdev);
2986  adapter = netdev_priv(netdev);
2987  adapter->netdev = netdev;
2988  adapter->pdev = pdev;
2989  adapter->hw.back = adapter;
2990  adapter->msg_enable = netif_msg_init(debug, atl1_default_msg);
2991 
2992  adapter->hw.hw_addr = pci_iomap(pdev, 0, 0);
2993  if (!adapter->hw.hw_addr) {
2994  err = -EIO;
2995  goto err_pci_iomap;
2996  }
2997  /* get device revision number */
2998  adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
2999  (REG_MASTER_CTRL + 2));
3000  if (netif_msg_probe(adapter))
3001  dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION);
3002 
3003  /* set default ring resource counts */
3004  adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
3005  adapter->tpd_ring.count = ATL1_DEFAULT_TPD;
3006 
3007  adapter->mii.dev = netdev;
3008  adapter->mii.mdio_read = mdio_read;
3009  adapter->mii.mdio_write = mdio_write;
3010  adapter->mii.phy_id_mask = 0x1f;
3011  adapter->mii.reg_num_mask = 0x1f;
3012 
3013  netdev->netdev_ops = &atl1_netdev_ops;
3014  netdev->watchdog_timeo = 5 * HZ;
3015  netif_napi_add(netdev, &adapter->napi, atl1_rings_clean, 64);
3016 
3017  netdev->ethtool_ops = &atl1_ethtool_ops;
3018  adapter->bd_number = cards_found;
3019 
3020  /* setup the private structure */
3021  err = atl1_sw_init(adapter);
3022  if (err)
3023  goto err_common;
3024 
3025  netdev->features = NETIF_F_HW_CSUM;
3026  netdev->features |= NETIF_F_SG;
3028 
3031 
3032  /* is this valid? see atl1_setup_mac_ctrl() */
3033  netdev->features |= NETIF_F_RXCSUM;
3034 
3035  /*
3036  * patch for some L1 of old version,
3037  * the final version of L1 may not need these
3038  * patches
3039  */
3040  /* atl1_pcie_patch(adapter); */
3041 
3042  /* really reset GPHY core */
3043  iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
3044 
3045  /*
3046  * reset the controller to
3047  * put the device in a known good starting state
3048  */
3049  if (atl1_reset_hw(&adapter->hw)) {
3050  err = -EIO;
3051  goto err_common;
3052  }
3053 
3054  /* copy the MAC address out of the EEPROM */
3055  if (atl1_read_mac_addr(&adapter->hw)) {
3056  /* mark random mac */
3057  netdev->addr_assign_type |= NET_ADDR_RANDOM;
3058  }
3059  memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
3060 
3061  if (!is_valid_ether_addr(netdev->dev_addr)) {
3062  err = -EIO;
3063  goto err_common;
3064  }
3065 
3066  atl1_check_options(adapter);
3067 
3068  /* pre-init the MAC, and setup link */
3069  err = atl1_init_hw(&adapter->hw);
3070  if (err) {
3071  err = -EIO;
3072  goto err_common;
3073  }
3074 
3075  atl1_pcie_patch(adapter);
3076  /* assume we have no link for now */
3077  netif_carrier_off(netdev);
3078 
3079  setup_timer(&adapter->phy_config_timer, atl1_phy_config,
3080  (unsigned long)adapter);
3081  adapter->phy_timer_pending = false;
3082 
3083  INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
3084 
3085  INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
3086 
3087  err = register_netdev(netdev);
3088  if (err)
3089  goto err_common;
3090 
3091  cards_found++;
3092  atl1_via_workaround(adapter);
3093  return 0;
3094 
3095 err_common:
3096  pci_iounmap(pdev, adapter->hw.hw_addr);
3097 err_pci_iomap:
3098  free_netdev(netdev);
3099 err_alloc_etherdev:
3100  pci_release_regions(pdev);
3101 err_dma:
3102 err_request_regions:
3103  pci_disable_device(pdev);
3104  return err;
3105 }
3106 
3116 static void __devexit atl1_remove(struct pci_dev *pdev)
3117 {
3118  struct net_device *netdev = pci_get_drvdata(pdev);
3119  struct atl1_adapter *adapter;
3120  /* Device not available. Return. */
3121  if (!netdev)
3122  return;
3123 
3124  adapter = netdev_priv(netdev);
3125 
3126  /*
3127  * Some atl1 boards lack persistent storage for their MAC, and get it
3128  * from the BIOS during POST. If we've been messing with the MAC
3129  * address, we need to save the permanent one.
3130  */
3131  if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) {
3132  memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
3133  ETH_ALEN);
3134  atl1_set_mac_addr(&adapter->hw);
3135  }
3136 
3137  iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
3138  unregister_netdev(netdev);
3139  pci_iounmap(pdev, adapter->hw.hw_addr);
3140  pci_release_regions(pdev);
3141  free_netdev(netdev);
3142  pci_disable_device(pdev);
3143 }
3144 
3145 static struct pci_driver atl1_driver = {
3146  .name = ATLX_DRIVER_NAME,
3147  .id_table = atl1_pci_tbl,
3148  .probe = atl1_probe,
3149  .remove = __devexit_p(atl1_remove),
3150  .shutdown = atl1_shutdown,
3151  .driver.pm = ATL1_PM_OPS,
3152 };
3153 
3160 static void __exit atl1_exit_module(void)
3161 {
3162  pci_unregister_driver(&atl1_driver);
3163 }
3164 
3171 static int __init atl1_init_module(void)
3172 {
3173  return pci_register_driver(&atl1_driver);
3174 }
3175 
3176 module_init(atl1_init_module);
3177 module_exit(atl1_exit_module);
3178 
3179 struct atl1_stats {
3183 };
3184 
3185 #define ATL1_STAT(m) \
3186  sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m)
3187 
3188 static struct atl1_stats atl1_gstrings_stats[] = {
3189  {"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
3190  {"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
3191  {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
3192  {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
3193  {"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
3194  {"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
3195  {"multicast", ATL1_STAT(soft_stats.multicast)},
3196  {"collisions", ATL1_STAT(soft_stats.collisions)},
3197  {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
3198  {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
3199  {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
3200  {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
3201  {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
3202  {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
3203  {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
3204  {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
3205  {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
3206  {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
3207  {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
3208  {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
3209  {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
3210  {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
3211  {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
3212  {"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
3213  {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
3214  {"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
3215  {"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
3216  {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
3217  {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
3218 };
3219 
3220 static void atl1_get_ethtool_stats(struct net_device *netdev,
3221  struct ethtool_stats *stats, u64 *data)
3222 {
3223  struct atl1_adapter *adapter = netdev_priv(netdev);
3224  int i;
3225  char *p;
3226 
3227  for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
3228  p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
3229  data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
3230  sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
3231  }
3232 
3233 }
3234 
3235 static int atl1_get_sset_count(struct net_device *netdev, int sset)
3236 {
3237  switch (sset) {
3238  case ETH_SS_STATS:
3239  return ARRAY_SIZE(atl1_gstrings_stats);
3240  default:
3241  return -EOPNOTSUPP;
3242  }
3243 }
3244 
3245 static int atl1_get_settings(struct net_device *netdev,
3246  struct ethtool_cmd *ecmd)
3247 {
3248  struct atl1_adapter *adapter = netdev_priv(netdev);
3249  struct atl1_hw *hw = &adapter->hw;
3250 
3257  ecmd->advertising = ADVERTISED_TP;
3258  if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3261  if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
3263  ecmd->advertising |=
3269  } else
3271  }
3272  ecmd->port = PORT_TP;
3273  ecmd->phy_address = 0;
3274  ecmd->transceiver = XCVR_INTERNAL;
3275 
3276  if (netif_carrier_ok(adapter->netdev)) {
3277  u16 link_speed, link_duplex;
3278  atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
3279  ethtool_cmd_speed_set(ecmd, link_speed);
3280  if (link_duplex == FULL_DUPLEX)
3281  ecmd->duplex = DUPLEX_FULL;
3282  else
3283  ecmd->duplex = DUPLEX_HALF;
3284  } else {
3285  ethtool_cmd_speed_set(ecmd, -1);
3286  ecmd->duplex = -1;
3287  }
3288  if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3290  ecmd->autoneg = AUTONEG_ENABLE;
3291  else
3292  ecmd->autoneg = AUTONEG_DISABLE;
3293 
3294  return 0;
3295 }
3296 
3297 static int atl1_set_settings(struct net_device *netdev,
3298  struct ethtool_cmd *ecmd)
3299 {
3300  struct atl1_adapter *adapter = netdev_priv(netdev);
3301  struct atl1_hw *hw = &adapter->hw;
3302  u16 phy_data;
3303  int ret_val = 0;
3304  u16 old_media_type = hw->media_type;
3305 
3306  if (netif_running(adapter->netdev)) {
3307  if (netif_msg_link(adapter))
3308  dev_dbg(&adapter->pdev->dev,
3309  "ethtool shutting down adapter\n");
3310  atl1_down(adapter);
3311  }
3312 
3313  if (ecmd->autoneg == AUTONEG_ENABLE)
3315  else {
3316  u32 speed = ethtool_cmd_speed(ecmd);
3317  if (speed == SPEED_1000) {
3318  if (ecmd->duplex != DUPLEX_FULL) {
3319  if (netif_msg_link(adapter))
3320  dev_warn(&adapter->pdev->dev,
3321  "1000M half is invalid\n");
3322  ret_val = -EINVAL;
3323  goto exit_sset;
3324  }
3326  } else if (speed == SPEED_100) {
3327  if (ecmd->duplex == DUPLEX_FULL)
3329  else
3331  } else {
3332  if (ecmd->duplex == DUPLEX_FULL)
3334  else
3336  }
3337  }
3338  switch (hw->media_type) {
3340  ecmd->advertising =
3347  break;
3348  case MEDIA_TYPE_1000M_FULL:
3349  ecmd->advertising =
3352  break;
3353  default:
3354  ecmd->advertising = 0;
3355  break;
3356  }
3357  if (atl1_phy_setup_autoneg_adv(hw)) {
3358  ret_val = -EINVAL;
3359  if (netif_msg_link(adapter))
3360  dev_warn(&adapter->pdev->dev,
3361  "invalid ethtool speed/duplex setting\n");
3362  goto exit_sset;
3363  }
3364  if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3366  phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
3367  else {
3368  switch (hw->media_type) {
3369  case MEDIA_TYPE_100M_FULL:
3370  phy_data =
3372  MII_CR_RESET;
3373  break;
3374  case MEDIA_TYPE_100M_HALF:
3375  phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
3376  break;
3377  case MEDIA_TYPE_10M_FULL:
3378  phy_data =
3380  break;
3381  default:
3382  /* MEDIA_TYPE_10M_HALF: */
3383  phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
3384  break;
3385  }
3386  }
3387  atl1_write_phy_reg(hw, MII_BMCR, phy_data);
3388 exit_sset:
3389  if (ret_val)
3390  hw->media_type = old_media_type;
3391 
3392  if (netif_running(adapter->netdev)) {
3393  if (netif_msg_link(adapter))
3394  dev_dbg(&adapter->pdev->dev,
3395  "ethtool starting adapter\n");
3396  atl1_up(adapter);
3397  } else if (!ret_val) {
3398  if (netif_msg_link(adapter))
3399  dev_dbg(&adapter->pdev->dev,
3400  "ethtool resetting adapter\n");
3401  atl1_reset(adapter);
3402  }
3403  return ret_val;
3404 }
3405 
3406 static void atl1_get_drvinfo(struct net_device *netdev,
3407  struct ethtool_drvinfo *drvinfo)
3408 {
3409  struct atl1_adapter *adapter = netdev_priv(netdev);
3410 
3411  strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
3413  sizeof(drvinfo->version));
3414  strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
3415  sizeof(drvinfo->bus_info));
3416  drvinfo->eedump_len = ATL1_EEDUMP_LEN;
3417 }
3418 
3419 static void atl1_get_wol(struct net_device *netdev,
3420  struct ethtool_wolinfo *wol)
3421 {
3422  struct atl1_adapter *adapter = netdev_priv(netdev);
3423 
3424  wol->supported = WAKE_MAGIC;
3425  wol->wolopts = 0;
3426  if (adapter->wol & ATLX_WUFC_MAG)
3427  wol->wolopts |= WAKE_MAGIC;
3428 }
3429 
3430 static int atl1_set_wol(struct net_device *netdev,
3431  struct ethtool_wolinfo *wol)
3432 {
3433  struct atl1_adapter *adapter = netdev_priv(netdev);
3434 
3435  if (wol->wolopts & (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
3437  return -EOPNOTSUPP;
3438  adapter->wol = 0;
3439  if (wol->wolopts & WAKE_MAGIC)
3440  adapter->wol |= ATLX_WUFC_MAG;
3441 
3442  device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
3443 
3444  return 0;
3445 }
3446 
3447 static u32 atl1_get_msglevel(struct net_device *netdev)
3448 {
3449  struct atl1_adapter *adapter = netdev_priv(netdev);
3450  return adapter->msg_enable;
3451 }
3452 
3453 static void atl1_set_msglevel(struct net_device *netdev, u32 value)
3454 {
3455  struct atl1_adapter *adapter = netdev_priv(netdev);
3456  adapter->msg_enable = value;
3457 }
3458 
3459 static int atl1_get_regs_len(struct net_device *netdev)
3460 {
3461  return ATL1_REG_COUNT * sizeof(u32);
3462 }
3463 
3464 static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
3465  void *p)
3466 {
3467  struct atl1_adapter *adapter = netdev_priv(netdev);
3468  struct atl1_hw *hw = &adapter->hw;
3469  unsigned int i;
3470  u32 *regbuf = p;
3471 
3472  for (i = 0; i < ATL1_REG_COUNT; i++) {
3473  /*
3474  * This switch statement avoids reserved regions
3475  * of register space.
3476  */
3477  switch (i) {
3478  case 6 ... 9:
3479  case 14:
3480  case 29 ... 31:
3481  case 34 ... 63:
3482  case 75 ... 127:
3483  case 136 ... 1023:
3484  case 1027 ... 1087:
3485  case 1091 ... 1151:
3486  case 1194 ... 1195:
3487  case 1200 ... 1201:
3488  case 1206 ... 1213:
3489  case 1216 ... 1279:
3490  case 1290 ... 1311:
3491  case 1323 ... 1343:
3492  case 1358 ... 1359:
3493  case 1368 ... 1375:
3494  case 1378 ... 1383:
3495  case 1388 ... 1391:
3496  case 1393 ... 1395:
3497  case 1402 ... 1403:
3498  case 1410 ... 1471:
3499  case 1522 ... 1535:
3500  /* reserved region; don't read it */
3501  regbuf[i] = 0;
3502  break;
3503  default:
3504  /* unreserved region */
3505  regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32)));
3506  }
3507  }
3508 }
3509 
3510 static void atl1_get_ringparam(struct net_device *netdev,
3511  struct ethtool_ringparam *ring)
3512 {
3513  struct atl1_adapter *adapter = netdev_priv(netdev);
3514  struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
3515  struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
3516 
3517  ring->rx_max_pending = ATL1_MAX_RFD;
3518  ring->tx_max_pending = ATL1_MAX_TPD;
3519  ring->rx_pending = rxdr->count;
3520  ring->tx_pending = txdr->count;
3521 }
3522 
3523 static int atl1_set_ringparam(struct net_device *netdev,
3524  struct ethtool_ringparam *ring)
3525 {
3526  struct atl1_adapter *adapter = netdev_priv(netdev);
3527  struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
3528  struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
3529  struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
3530 
3531  struct atl1_tpd_ring tpd_old, tpd_new;
3532  struct atl1_rfd_ring rfd_old, rfd_new;
3533  struct atl1_rrd_ring rrd_old, rrd_new;
3534  struct atl1_ring_header rhdr_old, rhdr_new;
3535  struct atl1_smb smb;
3536  struct atl1_cmb cmb;
3537  int err;
3538 
3539  tpd_old = adapter->tpd_ring;
3540  rfd_old = adapter->rfd_ring;
3541  rrd_old = adapter->rrd_ring;
3542  rhdr_old = adapter->ring_header;
3543 
3544  if (netif_running(adapter->netdev))
3545  atl1_down(adapter);
3546 
3547  rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
3548  rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
3549  rfdr->count;
3550  rfdr->count = (rfdr->count + 3) & ~3;
3551  rrdr->count = rfdr->count;
3552 
3553  tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
3554  tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
3555  tpdr->count;
3556  tpdr->count = (tpdr->count + 3) & ~3;
3557 
3558  if (netif_running(adapter->netdev)) {
3559  /* try to get new resources before deleting old */
3560  err = atl1_setup_ring_resources(adapter);
3561  if (err)
3562  goto err_setup_ring;
3563 
3564  /*
3565  * save the new, restore the old in order to free it,
3566  * then restore the new back again
3567  */
3568 
3569  rfd_new = adapter->rfd_ring;
3570  rrd_new = adapter->rrd_ring;
3571  tpd_new = adapter->tpd_ring;
3572  rhdr_new = adapter->ring_header;
3573  adapter->rfd_ring = rfd_old;
3574  adapter->rrd_ring = rrd_old;
3575  adapter->tpd_ring = tpd_old;
3576  adapter->ring_header = rhdr_old;
3577  /*
3578  * Save SMB and CMB, since atl1_free_ring_resources
3579  * will clear them.
3580  */
3581  smb = adapter->smb;
3582  cmb = adapter->cmb;
3583  atl1_free_ring_resources(adapter);
3584  adapter->rfd_ring = rfd_new;
3585  adapter->rrd_ring = rrd_new;
3586  adapter->tpd_ring = tpd_new;
3587  adapter->ring_header = rhdr_new;
3588  adapter->smb = smb;
3589  adapter->cmb = cmb;
3590 
3591  err = atl1_up(adapter);
3592  if (err)
3593  return err;
3594  }
3595  return 0;
3596 
3597 err_setup_ring:
3598  adapter->rfd_ring = rfd_old;
3599  adapter->rrd_ring = rrd_old;
3600  adapter->tpd_ring = tpd_old;
3601  adapter->ring_header = rhdr_old;
3602  atl1_up(adapter);
3603  return err;
3604 }
3605 
3606 static void atl1_get_pauseparam(struct net_device *netdev,
3607  struct ethtool_pauseparam *epause)
3608 {
3609  struct atl1_adapter *adapter = netdev_priv(netdev);
3610  struct atl1_hw *hw = &adapter->hw;
3611 
3612  if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3614  epause->autoneg = AUTONEG_ENABLE;
3615  } else {
3616  epause->autoneg = AUTONEG_DISABLE;
3617  }
3618  epause->rx_pause = 1;
3619  epause->tx_pause = 1;
3620 }
3621 
3622 static int atl1_set_pauseparam(struct net_device *netdev,
3623  struct ethtool_pauseparam *epause)
3624 {
3625  struct atl1_adapter *adapter = netdev_priv(netdev);
3626  struct atl1_hw *hw = &adapter->hw;
3627 
3628  if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3630  epause->autoneg = AUTONEG_ENABLE;
3631  } else {
3632  epause->autoneg = AUTONEG_DISABLE;
3633  }
3634 
3635  epause->rx_pause = 1;
3636  epause->tx_pause = 1;
3637 
3638  return 0;
3639 }
3640 
3641 static void atl1_get_strings(struct net_device *netdev, u32 stringset,
3642  u8 *data)
3643 {
3644  u8 *p = data;
3645  int i;
3646 
3647  switch (stringset) {
3648  case ETH_SS_STATS:
3649  for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
3650  memcpy(p, atl1_gstrings_stats[i].stat_string,
3651  ETH_GSTRING_LEN);
3652  p += ETH_GSTRING_LEN;
3653  }
3654  break;
3655  }
3656 }
3657 
3658 static int atl1_nway_reset(struct net_device *netdev)
3659 {
3660  struct atl1_adapter *adapter = netdev_priv(netdev);
3661  struct atl1_hw *hw = &adapter->hw;
3662 
3663  if (netif_running(netdev)) {
3664  u16 phy_data;
3665  atl1_down(adapter);
3666 
3667  if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3669  phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
3670  } else {
3671  switch (hw->media_type) {
3672  case MEDIA_TYPE_100M_FULL:
3673  phy_data = MII_CR_FULL_DUPLEX |
3675  break;
3676  case MEDIA_TYPE_100M_HALF:
3677  phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
3678  break;
3679  case MEDIA_TYPE_10M_FULL:
3680  phy_data = MII_CR_FULL_DUPLEX |
3682  break;
3683  default:
3684  /* MEDIA_TYPE_10M_HALF */
3685  phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
3686  }
3687  }
3688  atl1_write_phy_reg(hw, MII_BMCR, phy_data);
3689  atl1_up(adapter);
3690  }
3691  return 0;
3692 }
3693 
3694 static const struct ethtool_ops atl1_ethtool_ops = {
3695  .get_settings = atl1_get_settings,
3696  .set_settings = atl1_set_settings,
3697  .get_drvinfo = atl1_get_drvinfo,
3698  .get_wol = atl1_get_wol,
3699  .set_wol = atl1_set_wol,
3700  .get_msglevel = atl1_get_msglevel,
3701  .set_msglevel = atl1_set_msglevel,
3702  .get_regs_len = atl1_get_regs_len,
3703  .get_regs = atl1_get_regs,
3704  .get_ringparam = atl1_get_ringparam,
3705  .set_ringparam = atl1_set_ringparam,
3706  .get_pauseparam = atl1_get_pauseparam,
3707  .set_pauseparam = atl1_set_pauseparam,
3708  .get_link = ethtool_op_get_link,
3709  .get_strings = atl1_get_strings,
3710  .nway_reset = atl1_nway_reset,
3711  .get_ethtool_stats = atl1_get_ethtool_stats,
3712  .get_sset_count = atl1_get_sset_count,
3713 };