Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
r8169.c
Go to the documentation of this file.
1 /*
2  * r8169.c: RealTek 8169/8168/8101 ethernet driver.
3  *
4  * Copyright (c) 2002 ShuChen <[email protected]>
5  * Copyright (c) 2003 - 2007 Francois Romieu <[email protected]>
6  * Copyright (c) a lot of people too. Please respect their work.
7  *
8  * See MAINTAINERS file for support contact information.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
21 #include <linux/in.h>
22 #include <linux/ip.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
31 
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
38 
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50 #define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
51 
52 #ifdef RTL8169_DEBUG
53 #define assert(expr) \
54  if (!(expr)) { \
55  printk( "Assertion failed! %s,%s,%s,line=%d\n", \
56  #expr,__FILE__,__func__,__LINE__); \
57  }
58 #define dprintk(fmt, args...) \
59  do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
60 #else
61 #define assert(expr) do {} while (0)
62 #define dprintk(fmt, args...) do {} while (0)
63 #endif /* RTL8169_DEBUG */
64 
65 #define R8169_MSG_DEFAULT \
66  (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
67 
68 #define TX_SLOTS_AVAIL(tp) \
69  (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
70 
71 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
72 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
73  (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
74 
75 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
76  The RTL chips use a 64 element hash table based on the Ethernet CRC. */
77 static const int multicast_filter_limit = 32;
78 
79 #define MAX_READ_REQUEST_SHIFT 12
80 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
81 #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
82 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
83 
84 #define R8169_REGS_SIZE 256
85 #define R8169_NAPI_WEIGHT 64
86 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
87 #define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
88 #define RX_BUF_SIZE 1536 /* Rx Buffer size */
89 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
90 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
91 
92 #define RTL8169_TX_TIMEOUT (6*HZ)
93 #define RTL8169_PHY_TIMEOUT (10*HZ)
94 
95 #define RTL_EEPROM_SIG cpu_to_le32(0x8129)
96 #define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
97 #define RTL_EEPROM_SIG_ADDR 0x0000
98 
99 /* write/read MMIO register */
100 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
101 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
102 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
103 #define RTL_R8(reg) readb (ioaddr + (reg))
104 #define RTL_R16(reg) readw (ioaddr + (reg))
105 #define RTL_R32(reg) readl (ioaddr + (reg))
106 
150 };
151 
153  RTL_TD_0 = 0,
154  RTL_TD_1 = 1,
155 };
156 
157 #define JUMBO_1K ETH_DATA_LEN
158 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
159 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
160 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
161 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
162 
163 #define _R(NAME,TD,FW,SZ,B) { \
164  .name = NAME, \
165  .txd_version = TD, \
166  .fw_name = FW, \
167  .jumbo_max = SZ, \
168  .jumbo_tx_csum = B \
169 }
170 
171 static const struct {
172  const char *name;
174  const char *fw_name;
177 } rtl_chip_infos[] = {
178  /* PCI devices. */
180  _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
182  _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
184  _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
186  _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
188  _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
190  _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
191  /* PCI-E devices. */
193  _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
195  _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
197  _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
199  _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
201  _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
203  _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
205  _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
207  _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
209  _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
211  _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
213  _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
215  _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
217  _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
219  _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
221  _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
223  _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
225  _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
227  _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
229  _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
230  JUMBO_9K, false),
232  _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
233  JUMBO_9K, false),
235  _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
237  _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
239  _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
240  JUMBO_1K, true),
242  _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
243  JUMBO_1K, true),
245  _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
247  _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
248  JUMBO_9K, false),
250  _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
251  JUMBO_9K, false),
253  _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
254  JUMBO_9K, false),
256  _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
257  JUMBO_9K, false),
259  _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
260  JUMBO_9K, false),
262  _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
263  JUMBO_1K, true),
265  _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
266  JUMBO_9K, false),
268  _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
269  JUMBO_1K, true),
271  _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
272  JUMBO_9K, false),
274  _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
275 };
276 #undef _R
277 
279  RTL_CFG_0 = 0x00,
282 };
283 
284 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
285  { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
286  { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
287  { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
288  { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
289  { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
290  { PCI_VENDOR_ID_DLINK, 0x4300,
291  PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
292  { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
293  { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
294  { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
295  { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
296  { PCI_VENDOR_ID_LINKSYS, 0x1032,
297  PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
298  { 0x0001, 0x8168,
299  PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
300  {0,},
301 };
302 
303 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
304 
305 static int rx_buf_sz = 16383;
306 static int use_dac;
307 static struct {
309 } debug = { -1 };
310 
312  MAC0 = 0, /* Ethernet hardware address. */
313  MAC4 = 4,
314  MAR0 = 8, /* Multicast filter. */
321  FLASH = 0x30,
322  ERSR = 0x36,
323  ChipCmd = 0x37,
324  TxPoll = 0x38,
325  IntrMask = 0x3c,
326  IntrStatus = 0x3e,
327 
328  TxConfig = 0x40,
329 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
330 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
331 
332  RxConfig = 0x44,
333 #define RX128_INT_EN (1 << 15) /* 8111c and later */
334 #define RX_MULTI_EN (1 << 14) /* 8111c only */
335 #define RXCFG_FIFO_SHIFT 13
336  /* No threshold before first PCI xfer */
337 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
338 #define RXCFG_DMA_SHIFT 8
339  /* Unlimited maximum PCI burst. */
340 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
341 
342  RxMissed = 0x4c,
343  Cfg9346 = 0x50,
344  Config0 = 0x51,
345  Config1 = 0x52,
346  Config2 = 0x53,
347 #define PME_SIGNAL (1 << 5) /* 8168c and later */
348 
349  Config3 = 0x54,
350  Config4 = 0x55,
351  Config5 = 0x56,
352  MultiIntr = 0x5c,
353  PHYAR = 0x60,
354  PHYstatus = 0x6c,
355  RxMaxSize = 0xda,
356  CPlusCmd = 0xe0,
357  IntrMitigate = 0xe2,
360  EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
361 
362 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
363 
364  MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
365 
366 #define TxPacketMax (8064 >> 7)
367 #define EarlySize 0x27
368 
369  FuncEvent = 0xf0,
373 };
374 
376  TBICSR = 0x64,
377  TBI_ANAR = 0x68,
378  TBI_LPAR = 0x6a,
379 };
380 
382  CSIDR = 0x64,
383  CSIAR = 0x68,
384 #define CSIAR_FLAG 0x80000000
385 #define CSIAR_WRITE_CMD 0x80000000
386 #define CSIAR_BYTE_ENABLE 0x0f
387 #define CSIAR_BYTE_ENABLE_SHIFT 12
388 #define CSIAR_ADDR_MASK 0x0fff
389 #define CSIAR_FUNC_CARD 0x00000000
390 #define CSIAR_FUNC_SDIO 0x00010000
391 #define CSIAR_FUNC_NIC 0x00020000
392  PMCH = 0x6f,
393  EPHYAR = 0x80,
394 #define EPHYAR_FLAG 0x80000000
395 #define EPHYAR_WRITE_CMD 0x80000000
396 #define EPHYAR_REG_MASK 0x1f
397 #define EPHYAR_REG_SHIFT 16
398 #define EPHYAR_DATA_MASK 0xffff
399  DLLPR = 0xd0,
400 #define PFM_EN (1 << 6)
401  DBG_REG = 0xd1,
402 #define FIX_NAK_1 (1 << 4)
403 #define FIX_NAK_2 (1 << 3)
404  TWSI = 0xd2,
405  MCU = 0xd3,
406 #define NOW_IS_OOB (1 << 7)
407 #define TX_EMPTY (1 << 5)
408 #define RX_EMPTY (1 << 4)
409 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
410 #define EN_NDP (1 << 3)
411 #define EN_OOB_RESET (1 << 2)
412 #define LINK_LIST_RDY (1 << 1)
413  EFUSEAR = 0xdc,
414 #define EFUSEAR_FLAG 0x80000000
415 #define EFUSEAR_WRITE_CMD 0x80000000
416 #define EFUSEAR_READ_CMD 0x00000000
417 #define EFUSEAR_REG_MASK 0x03ff
418 #define EFUSEAR_REG_SHIFT 8
419 #define EFUSEAR_DATA_MASK 0xff
420 };
421 
423  LED_FREQ = 0x1a,
424  EEE_LED = 0x1b,
425  ERIDR = 0x70,
426  ERIAR = 0x74,
427 #define ERIAR_FLAG 0x80000000
428 #define ERIAR_WRITE_CMD 0x80000000
429 #define ERIAR_READ_CMD 0x00000000
430 #define ERIAR_ADDR_BYTE_ALIGN 4
431 #define ERIAR_TYPE_SHIFT 16
432 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
433 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
434 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
435 #define ERIAR_MASK_SHIFT 12
436 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
437 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
438 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
439 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
441  OCPDR = 0xb0, /* OCP GPHY access */
442 #define OCPDR_WRITE_CMD 0x80000000
443 #define OCPDR_READ_CMD 0x00000000
444 #define OCPDR_REG_MASK 0x7f
445 #define OCPDR_GPHY_REG_SHIFT 16
446 #define OCPDR_DATA_MASK 0xffff
447  OCPAR = 0xb4,
448 #define OCPAR_FLAG 0x80000000
449 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
450 #define OCPAR_GPHY_READ_CMD 0x0000f060
451  GPHY_OCP = 0xb8,
452  RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
453  MISC = 0xf0, /* 8168e only. */
454 #define TXPLA_RST (1 << 29)
455 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
456 #define PWM_EN (1 << 22)
457 #define RXDV_GATED_EN (1 << 19)
458 #define EARLY_TALLY_EN (1 << 16)
459 };
460 
462  /* InterruptStatusBits */
463  SYSErr = 0x8000,
464  PCSTimeout = 0x4000,
465  SWInt = 0x0100,
466  TxDescUnavail = 0x0080,
467  RxFIFOOver = 0x0040,
468  LinkChg = 0x0020,
469  RxOverflow = 0x0010,
470  TxErr = 0x0008,
471  TxOK = 0x0004,
472  RxErr = 0x0002,
473  RxOK = 0x0001,
474 
475  /* RxStatusDesc */
476  RxBOVF = (1 << 24),
477  RxFOVF = (1 << 23),
478  RxRWT = (1 << 22),
479  RxRES = (1 << 21),
480  RxRUNT = (1 << 20),
481  RxCRC = (1 << 19),
482 
483  /* ChipCmdBits */
484  StopReq = 0x80,
485  CmdReset = 0x10,
486  CmdRxEnb = 0x08,
487  CmdTxEnb = 0x04,
488  RxBufEmpty = 0x01,
489 
490  /* TXPoll register p.5 */
491  HPQ = 0x80, /* Poll cmd on the high prio queue */
492  NPQ = 0x40, /* Poll cmd on the low prio queue */
493  FSWInt = 0x01, /* Forced software interrupt */
494 
495  /* Cfg9346Bits */
496  Cfg9346_Lock = 0x00,
498 
499  /* rx_mode_bits */
500  AcceptErr = 0x20,
501  AcceptRunt = 0x10,
504  AcceptMyPhys = 0x02,
506 #define RX_CONFIG_ACCEPT_MASK 0x3f
507 
508  /* TxConfigBits */
510  TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
511 
512  /* Config1 register p.24 */
513  LEDS1 = (1 << 7),
514  LEDS0 = (1 << 6),
515  Speed_down = (1 << 4),
516  MEMMAP = (1 << 3),
517  IOMAP = (1 << 2),
518  VPD = (1 << 1),
519  PMEnable = (1 << 0), /* Power Management Enable */
520 
521  /* Config2 register p. 25 */
522  MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
525 
526  /* Config3 register p.25 */
527  MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
528  LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
529  Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
530  Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
531 
532  /* Config4 register */
533  Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
534 
535  /* Config5 register p.27 */
536  BWF = (1 << 6), /* Accept Broadcast wakeup frame */
537  MWF = (1 << 5), /* Accept Multicast wakeup frame */
538  UWF = (1 << 4), /* Accept Unicast wakeup frame */
539  Spi_en = (1 << 3),
540  LanWake = (1 << 1), /* LanWake enable/disable */
541  PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
542 
543  /* TBICSR p.28 */
544  TBIReset = 0x80000000,
545  TBILoopback = 0x40000000,
546  TBINwEnable = 0x20000000,
547  TBINwRestart = 0x10000000,
548  TBILinkOk = 0x02000000,
549  TBINwComplete = 0x01000000,
550 
551  /* CPlusCmd p.31 */
552  EnableBist = (1 << 15), // 8168 8101
553  Mac_dbgo_oe = (1 << 14), // 8168 8101
554  Normal_mode = (1 << 13), // unused
555  Force_half_dup = (1 << 12), // 8168 8101
556  Force_rxflow_en = (1 << 11), // 8168 8101
557  Force_txflow_en = (1 << 10), // 8168 8101
558  Cxpl_dbg_sel = (1 << 9), // 8168 8101
559  ASF = (1 << 8), // 8168 8101
560  PktCntrDisable = (1 << 7), // 8168 8101
561  Mac_dbgo_sel = 0x001c, // 8168
562  RxVlan = (1 << 6),
563  RxChkSum = (1 << 5),
564  PCIDAC = (1 << 4),
565  PCIMulRW = (1 << 3),
566  INTT_0 = 0x0000, // 8168
567  INTT_1 = 0x0001, // 8168
568  INTT_2 = 0x0002, // 8168
569  INTT_3 = 0x0003, // 8168
570 
571  /* rtl8169_PHYstatus */
572  TBI_Enable = 0x80,
573  TxFlowCtrl = 0x40,
574  RxFlowCtrl = 0x20,
575  _1000bpsF = 0x10,
576  _100bps = 0x08,
577  _10bps = 0x04,
578  LinkStatus = 0x02,
579  FullDup = 0x01,
580 
581  /* _TBICSRBit */
582  TBILinkOK = 0x02000000,
583 
584  /* DumpCounterCommand */
585  CounterDump = 0x8,
586 };
587 
589  /* First doubleword. */
590  DescOwn = (1 << 31), /* Descriptor is owned by NIC */
591  RingEnd = (1 << 30), /* End of descriptor ring */
592  FirstFrag = (1 << 29), /* First segment of a packet */
593  LastFrag = (1 << 28), /* Final segment of a packet */
594 };
595 
596 /* Generic case. */
598  /* First doubleword. */
599  TD_LSO = (1 << 27), /* Large Send Offload */
600 #define TD_MSS_MAX 0x07ffu /* MSS value */
601 
602  /* Second doubleword. */
603  TxVlanTag = (1 << 17), /* Add VLAN tag */
604 };
605 
606 /* 8169, 8168b and 810x except 8102e. */
608  /* First doubleword. */
609 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
610  TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
611  TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
612  TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
613 };
614 
615 /* 8102e, 8168c and beyond. */
617  /* Second doubleword. */
618 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
619  TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
620  TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
621  TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
622 };
623 
624 static const struct rtl_tx_desc_info {
625  struct {
626  u32 udp;
627  u32 tcp;
628  } checksum;
629  u16 mss_shift;
630  u16 opts_offset;
631 } tx_desc_info [] = {
632  [RTL_TD_0] = {
633  .checksum = {
634  .udp = TD0_IP_CS | TD0_UDP_CS,
635  .tcp = TD0_IP_CS | TD0_TCP_CS
636  },
637  .mss_shift = TD0_MSS_SHIFT,
638  .opts_offset = 0
639  },
640  [RTL_TD_1] = {
641  .checksum = {
642  .udp = TD1_IP_CS | TD1_UDP_CS,
643  .tcp = TD1_IP_CS | TD1_TCP_CS
644  },
645  .mss_shift = TD1_MSS_SHIFT,
646  .opts_offset = 1
647  }
648 };
649 
651  /* Rx private */
652  PID1 = (1 << 18), /* Protocol ID bit 1/2 */
653  PID0 = (1 << 17), /* Protocol ID bit 2/2 */
654 
655 #define RxProtoUDP (PID1)
656 #define RxProtoTCP (PID0)
657 #define RxProtoIP (PID1 | PID0)
658 #define RxProtoMask RxProtoIP
659 
660  IPFail = (1 << 16), /* IP checksum failed */
661  UDPFail = (1 << 15), /* UDP/IP checksum failed */
662  TCPFail = (1 << 14), /* TCP/IP checksum failed */
663  RxVlanTag = (1 << 16), /* VLAN tag available */
664 };
665 
666 #define RsvdMask 0x3fffc000
667 
668 struct TxDesc {
672 };
673 
674 struct RxDesc {
678 };
679 
680 struct ring_info {
681  struct sk_buff *skb;
683  u8 __pad[sizeof(void *) - sizeof(u32)];
684 };
685 
686 enum features {
687  RTL_FEATURE_WOL = (1 << 0),
688  RTL_FEATURE_MSI = (1 << 1),
689  RTL_FEATURE_GMII = (1 << 2),
690 };
691 
706 };
707 
708 enum rtl_flag {
714 };
715 
720 };
721 
723  void __iomem *mmio_addr; /* memory map physical address */
724  struct pci_dev *pci_dev;
725  struct net_device *dev;
730  u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
731  u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
736  struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
737  struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
740  void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
741  struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
744 
746 
747  struct mdio_ops {
748  void (*write)(struct rtl8169_private *, int, int);
749  int (*read)(struct rtl8169_private *, int);
750  } mdio_ops;
751 
752  struct pll_power_ops {
753  void (*down)(struct rtl8169_private *);
754  void (*up)(struct rtl8169_private *);
755  } pll_power_ops;
756 
757  struct jumbo_ops {
760  } jumbo_ops;
761 
762  struct csi_ops {
763  void (*write)(struct rtl8169_private *, int, int);
764  u32 (*read)(struct rtl8169_private *, int);
765  } csi_ops;
766 
767  int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
768  int (*get_settings)(struct net_device *, struct ethtool_cmd *);
770  void (*hw_start)(struct net_device *);
771  unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
772  unsigned int (*link_ok)(void __iomem *);
773  int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
774 
775  struct {
777  struct mutex mutex;
779  } wk;
780 
781  unsigned features;
782 
783  struct mii_if_info mii;
787 
788  struct rtl_fw {
789  const struct firmware *fw;
790 
791 #define RTL_VER_SIZE 32
792 
794 
797  size_t size;
798  } phy_action;
799  } *rtl_fw;
800 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
801 
803 };
804 
805 MODULE_AUTHOR("Realtek and the Linux r8169 crew <[email protected]>");
806 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
807 module_param(use_dac, int, 0);
808 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
809 module_param_named(debug, debug.msg_enable, int, 0);
810 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
811 MODULE_LICENSE("GPL");
825 
826 static void rtl_lock_work(struct rtl8169_private *tp)
827 {
828  mutex_lock(&tp->wk.mutex);
829 }
830 
831 static void rtl_unlock_work(struct rtl8169_private *tp)
832 {
833  mutex_unlock(&tp->wk.mutex);
834 }
835 
836 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
837 {
839  PCI_EXP_DEVCTL_READRQ, force);
840 }
841 
842 struct rtl_cond {
843  bool (*check)(struct rtl8169_private *);
844  const char *msg;
845 };
846 
847 static void rtl_udelay(unsigned int d)
848 {
849  udelay(d);
850 }
851 
852 static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
853  void (*delay)(unsigned int), unsigned int d, int n,
854  bool high)
855 {
856  int i;
857 
858  for (i = 0; i < n; i++) {
859  delay(d);
860  if (c->check(tp) == high)
861  return true;
862  }
863  netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
864  c->msg, !high, n, d);
865  return false;
866 }
867 
868 static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
869  const struct rtl_cond *c,
870  unsigned int d, int n)
871 {
872  return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
873 }
874 
875 static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
876  const struct rtl_cond *c,
877  unsigned int d, int n)
878 {
879  return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
880 }
881 
882 static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
883  const struct rtl_cond *c,
884  unsigned int d, int n)
885 {
886  return rtl_loop_wait(tp, c, msleep, d, n, true);
887 }
888 
889 static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
890  const struct rtl_cond *c,
891  unsigned int d, int n)
892 {
893  return rtl_loop_wait(tp, c, msleep, d, n, false);
894 }
895 
896 #define DECLARE_RTL_COND(name) \
897 static bool name ## _check(struct rtl8169_private *); \
898  \
899 static const struct rtl_cond name = { \
900  .check = name ## _check, \
901  .msg = #name \
902 }; \
903  \
904 static bool name ## _check(struct rtl8169_private *tp)
905 
906 DECLARE_RTL_COND(rtl_ocpar_cond)
907 {
908  void __iomem *ioaddr = tp->mmio_addr;
909 
910  return RTL_R32(OCPAR) & OCPAR_FLAG;
911 }
912 
913 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
914 {
915  void __iomem *ioaddr = tp->mmio_addr;
916 
917  RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
918 
919  return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
920  RTL_R32(OCPDR) : ~0;
921 }
922 
923 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
924 {
925  void __iomem *ioaddr = tp->mmio_addr;
926 
927  RTL_W32(OCPDR, data);
928  RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
929 
930  rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
931 }
932 
933 DECLARE_RTL_COND(rtl_eriar_cond)
934 {
935  void __iomem *ioaddr = tp->mmio_addr;
936 
937  return RTL_R32(ERIAR) & ERIAR_FLAG;
938 }
939 
940 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
941 {
942  void __iomem *ioaddr = tp->mmio_addr;
943 
944  RTL_W8(ERIDR, cmd);
945  RTL_W32(ERIAR, 0x800010e8);
946  msleep(2);
947 
948  if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
949  return;
950 
951  ocp_write(tp, 0x1, 0x30, 0x00000001);
952 }
953 
954 #define OOB_CMD_RESET 0x00
955 #define OOB_CMD_DRIVER_START 0x05
956 #define OOB_CMD_DRIVER_STOP 0x06
957 
958 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
959 {
960  return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
961 }
962 
963 DECLARE_RTL_COND(rtl_ocp_read_cond)
964 {
965  u16 reg;
966 
967  reg = rtl8168_get_ocp_reg(tp);
968 
969  return ocp_read(tp, 0x0f, reg) & 0x00000800;
970 }
971 
972 static void rtl8168_driver_start(struct rtl8169_private *tp)
973 {
974  rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
975 
976  rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
977 }
978 
979 static void rtl8168_driver_stop(struct rtl8169_private *tp)
980 {
981  rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
982 
983  rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
984 }
985 
986 static int r8168dp_check_dash(struct rtl8169_private *tp)
987 {
988  u16 reg = rtl8168_get_ocp_reg(tp);
989 
990  return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
991 }
992 
993 static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
994 {
995  if (reg & 0xffff0001) {
996  netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
997  return true;
998  }
999  return false;
1000 }
1001 
1002 DECLARE_RTL_COND(rtl_ocp_gphy_cond)
1003 {
1004  void __iomem *ioaddr = tp->mmio_addr;
1005 
1006  return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
1007 }
1008 
1009 static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1010 {
1011  void __iomem *ioaddr = tp->mmio_addr;
1012 
1013  if (rtl_ocp_reg_failure(tp, reg))
1014  return;
1015 
1016  RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1017 
1018  rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1019 }
1020 
1021 static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1022 {
1023  void __iomem *ioaddr = tp->mmio_addr;
1024 
1025  if (rtl_ocp_reg_failure(tp, reg))
1026  return 0;
1027 
1028  RTL_W32(GPHY_OCP, reg << 15);
1029 
1030  return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1031  (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1032 }
1033 
1034 static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1035 {
1036  int val;
1037 
1038  val = r8168_phy_ocp_read(tp, reg);
1039  r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1040 }
1041 
1042 static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1043 {
1044  void __iomem *ioaddr = tp->mmio_addr;
1045 
1046  if (rtl_ocp_reg_failure(tp, reg))
1047  return;
1048 
1049  RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1050 }
1051 
1052 static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1053 {
1054  void __iomem *ioaddr = tp->mmio_addr;
1055 
1056  if (rtl_ocp_reg_failure(tp, reg))
1057  return 0;
1058 
1059  RTL_W32(OCPDR, reg << 15);
1060 
1061  return RTL_R32(OCPDR);
1062 }
1063 
1064 #define OCP_STD_PHY_BASE 0xa400
1065 
1066 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1067 {
1068  if (reg == 0x1f) {
1069  tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1070  return;
1071  }
1072 
1073  if (tp->ocp_base != OCP_STD_PHY_BASE)
1074  reg -= 0x10;
1075 
1076  r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1077 }
1078 
1079 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1080 {
1081  if (tp->ocp_base != OCP_STD_PHY_BASE)
1082  reg -= 0x10;
1083 
1084  return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1085 }
1086 
1087 DECLARE_RTL_COND(rtl_phyar_cond)
1088 {
1089  void __iomem *ioaddr = tp->mmio_addr;
1090 
1091  return RTL_R32(PHYAR) & 0x80000000;
1092 }
1093 
1094 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1095 {
1096  void __iomem *ioaddr = tp->mmio_addr;
1097 
1098  RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1099 
1100  rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1101  /*
1102  * According to hardware specs a 20us delay is required after write
1103  * complete indication, but before sending next command.
1104  */
1105  udelay(20);
1106 }
1107 
1108 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1109 {
1110  void __iomem *ioaddr = tp->mmio_addr;
1111  int value;
1112 
1113  RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1114 
1115  value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1116  RTL_R32(PHYAR) & 0xffff : ~0;
1117 
1118  /*
1119  * According to hardware specs a 20us delay is required after read
1120  * complete indication, but before sending next command.
1121  */
1122  udelay(20);
1123 
1124  return value;
1125 }
1126 
1127 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1128 {
1129  void __iomem *ioaddr = tp->mmio_addr;
1130 
1131  RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1133  RTL_W32(EPHY_RXER_NUM, 0);
1134 
1135  rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1136 }
1137 
1138 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1139 {
1140  r8168dp_1_mdio_access(tp, reg,
1141  OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1142 }
1143 
1144 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1145 {
1146  void __iomem *ioaddr = tp->mmio_addr;
1147 
1148  r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1149 
1150  mdelay(1);
1152  RTL_W32(EPHY_RXER_NUM, 0);
1153 
1154  return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1155  RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1156 }
1157 
1158 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1159 
1160 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1161 {
1162  RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1163 }
1164 
1165 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1166 {
1167  RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1168 }
1169 
1170 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1171 {
1172  void __iomem *ioaddr = tp->mmio_addr;
1173 
1174  r8168dp_2_mdio_start(ioaddr);
1175 
1176  r8169_mdio_write(tp, reg, value);
1177 
1178  r8168dp_2_mdio_stop(ioaddr);
1179 }
1180 
1181 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1182 {
1183  void __iomem *ioaddr = tp->mmio_addr;
1184  int value;
1185 
1186  r8168dp_2_mdio_start(ioaddr);
1187 
1188  value = r8169_mdio_read(tp, reg);
1189 
1190  r8168dp_2_mdio_stop(ioaddr);
1191 
1192  return value;
1193 }
1194 
1195 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1196 {
1197  tp->mdio_ops.write(tp, location, val);
1198 }
1199 
1200 static int rtl_readphy(struct rtl8169_private *tp, int location)
1201 {
1202  return tp->mdio_ops.read(tp, location);
1203 }
1204 
1205 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1206 {
1207  rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1208 }
1209 
1210 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1211 {
1212  int val;
1213 
1214  val = rtl_readphy(tp, reg_addr);
1215  rtl_writephy(tp, reg_addr, (val | p) & ~m);
1216 }
1217 
1218 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1219  int val)
1220 {
1221  struct rtl8169_private *tp = netdev_priv(dev);
1222 
1223  rtl_writephy(tp, location, val);
1224 }
1225 
1226 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1227 {
1228  struct rtl8169_private *tp = netdev_priv(dev);
1229 
1230  return rtl_readphy(tp, location);
1231 }
1232 
1233 DECLARE_RTL_COND(rtl_ephyar_cond)
1234 {
1235  void __iomem *ioaddr = tp->mmio_addr;
1236 
1237  return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1238 }
1239 
1240 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1241 {
1242  void __iomem *ioaddr = tp->mmio_addr;
1243 
1245  (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1246 
1247  rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1248 
1249  udelay(10);
1250 }
1251 
1252 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1253 {
1254  void __iomem *ioaddr = tp->mmio_addr;
1255 
1256  RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1257 
1258  return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1260 }
1261 
1262 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1263  u32 val, int type)
1264 {
1265  void __iomem *ioaddr = tp->mmio_addr;
1266 
1267  BUG_ON((addr & 3) || (mask == 0));
1268  RTL_W32(ERIDR, val);
1269  RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1270 
1271  rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1272 }
1273 
1274 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1275 {
1276  void __iomem *ioaddr = tp->mmio_addr;
1277 
1278  RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1279 
1280  return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1281  RTL_R32(ERIDR) : ~0;
1282 }
1283 
1284 static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1285  u32 m, int type)
1286 {
1287  u32 val;
1288 
1289  val = rtl_eri_read(tp, addr, type);
1290  rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1291 }
1292 
1293 struct exgmac_reg {
1297 };
1298 
1299 static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1300  const struct exgmac_reg *r, int len)
1301 {
1302  while (len-- > 0) {
1303  rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1304  r++;
1305  }
1306 }
1307 
1308 DECLARE_RTL_COND(rtl_efusear_cond)
1309 {
1310  void __iomem *ioaddr = tp->mmio_addr;
1311 
1312  return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1313 }
1314 
1315 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1316 {
1317  void __iomem *ioaddr = tp->mmio_addr;
1318 
1320 
1321  return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1323 }
1324 
1325 static u16 rtl_get_events(struct rtl8169_private *tp)
1326 {
1327  void __iomem *ioaddr = tp->mmio_addr;
1328 
1329  return RTL_R16(IntrStatus);
1330 }
1331 
1332 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1333 {
1334  void __iomem *ioaddr = tp->mmio_addr;
1335 
1336  RTL_W16(IntrStatus, bits);
1337  mmiowb();
1338 }
1339 
1340 static void rtl_irq_disable(struct rtl8169_private *tp)
1341 {
1342  void __iomem *ioaddr = tp->mmio_addr;
1343 
1344  RTL_W16(IntrMask, 0);
1345  mmiowb();
1346 }
1347 
1348 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1349 {
1350  void __iomem *ioaddr = tp->mmio_addr;
1351 
1352  RTL_W16(IntrMask, bits);
1353 }
1354 
1355 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1356 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1357 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1358 
1359 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1360 {
1361  rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1362 }
1363 
1364 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1365 {
1366  void __iomem *ioaddr = tp->mmio_addr;
1367 
1368  rtl_irq_disable(tp);
1369  rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1370  RTL_R8(ChipCmd);
1371 }
1372 
1373 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1374 {
1375  void __iomem *ioaddr = tp->mmio_addr;
1376 
1377  return RTL_R32(TBICSR) & TBIReset;
1378 }
1379 
1380 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1381 {
1382  return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1383 }
1384 
1385 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1386 {
1387  return RTL_R32(TBICSR) & TBILinkOk;
1388 }
1389 
1390 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1391 {
1392  return RTL_R8(PHYstatus) & LinkStatus;
1393 }
1394 
1395 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1396 {
1397  void __iomem *ioaddr = tp->mmio_addr;
1398 
1400 }
1401 
1402 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1403 {
1404  unsigned int val;
1405 
1406  val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1407  rtl_writephy(tp, MII_BMCR, val & 0xffff);
1408 }
1409 
1410 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1411 {
1412  void __iomem *ioaddr = tp->mmio_addr;
1413  struct net_device *dev = tp->dev;
1414 
1415  if (!netif_running(dev))
1416  return;
1417 
1418  if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1420  if (RTL_R8(PHYstatus) & _1000bpsF) {
1421  rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1422  ERIAR_EXGMAC);
1423  rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1424  ERIAR_EXGMAC);
1425  } else if (RTL_R8(PHYstatus) & _100bps) {
1426  rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1427  ERIAR_EXGMAC);
1428  rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1429  ERIAR_EXGMAC);
1430  } else {
1431  rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1432  ERIAR_EXGMAC);
1433  rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1434  ERIAR_EXGMAC);
1435  }
1436  /* Reset packet filter */
1437  rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1438  ERIAR_EXGMAC);
1439  rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1440  ERIAR_EXGMAC);
1441  } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1443  if (RTL_R8(PHYstatus) & _1000bpsF) {
1444  rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1445  ERIAR_EXGMAC);
1446  rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1447  ERIAR_EXGMAC);
1448  } else {
1449  rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1450  ERIAR_EXGMAC);
1451  rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1452  ERIAR_EXGMAC);
1453  }
1454  } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1455  if (RTL_R8(PHYstatus) & _10bps) {
1456  rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1457  ERIAR_EXGMAC);
1458  rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1459  ERIAR_EXGMAC);
1460  } else {
1461  rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1462  ERIAR_EXGMAC);
1463  }
1464  }
1465 }
1466 
1467 static void __rtl8169_check_link_status(struct net_device *dev,
1468  struct rtl8169_private *tp,
1469  void __iomem *ioaddr, bool pm)
1470 {
1471  if (tp->link_ok(ioaddr)) {
1472  rtl_link_chg_patch(tp);
1473  /* This is to cancel a scheduled suspend if there's one. */
1474  if (pm)
1475  pm_request_resume(&tp->pci_dev->dev);
1476  netif_carrier_on(dev);
1477  if (net_ratelimit())
1478  netif_info(tp, ifup, dev, "link up\n");
1479  } else {
1480  netif_carrier_off(dev);
1481  netif_info(tp, ifdown, dev, "link down\n");
1482  if (pm)
1483  pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1484  }
1485 }
1486 
1487 static void rtl8169_check_link_status(struct net_device *dev,
1488  struct rtl8169_private *tp,
1489  void __iomem *ioaddr)
1490 {
1491  __rtl8169_check_link_status(dev, tp, ioaddr, false);
1492 }
1493 
1494 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1495 
1496 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1497 {
1498  void __iomem *ioaddr = tp->mmio_addr;
1499  u8 options;
1500  u32 wolopts = 0;
1501 
1502  options = RTL_R8(Config1);
1503  if (!(options & PMEnable))
1504  return 0;
1505 
1506  options = RTL_R8(Config3);
1507  if (options & LinkUp)
1508  wolopts |= WAKE_PHY;
1509  if (options & MagicPacket)
1510  wolopts |= WAKE_MAGIC;
1511 
1512  options = RTL_R8(Config5);
1513  if (options & UWF)
1514  wolopts |= WAKE_UCAST;
1515  if (options & BWF)
1516  wolopts |= WAKE_BCAST;
1517  if (options & MWF)
1518  wolopts |= WAKE_MCAST;
1519 
1520  return wolopts;
1521 }
1522 
1523 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1524 {
1525  struct rtl8169_private *tp = netdev_priv(dev);
1526 
1527  rtl_lock_work(tp);
1528 
1529  wol->supported = WAKE_ANY;
1530  wol->wolopts = __rtl8169_get_wol(tp);
1531 
1532  rtl_unlock_work(tp);
1533 }
1534 
1535 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1536 {
1537  void __iomem *ioaddr = tp->mmio_addr;
1538  unsigned int i;
1539  static const struct {
1540  u32 opt;
1541  u16 reg;
1542  u8 mask;
1543  } cfg[] = {
1544  { WAKE_PHY, Config3, LinkUp },
1546  { WAKE_UCAST, Config5, UWF },
1547  { WAKE_BCAST, Config5, BWF },
1548  { WAKE_MCAST, Config5, MWF },
1549  { WAKE_ANY, Config5, LanWake }
1550  };
1551  u8 options;
1552 
1554 
1555  for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1556  options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1557  if (wolopts & cfg[i].opt)
1558  options |= cfg[i].mask;
1559  RTL_W8(cfg[i].reg, options);
1560  }
1561 
1562  switch (tp->mac_version) {
1564  options = RTL_R8(Config1) & ~PMEnable;
1565  if (wolopts)
1566  options |= PMEnable;
1567  RTL_W8(Config1, options);
1568  break;
1569  default:
1570  options = RTL_R8(Config2) & ~PME_SIGNAL;
1571  if (wolopts)
1572  options |= PME_SIGNAL;
1573  RTL_W8(Config2, options);
1574  break;
1575  }
1576 
1578 }
1579 
1580 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1581 {
1582  struct rtl8169_private *tp = netdev_priv(dev);
1583 
1584  rtl_lock_work(tp);
1585 
1586  if (wol->wolopts)
1587  tp->features |= RTL_FEATURE_WOL;
1588  else
1589  tp->features &= ~RTL_FEATURE_WOL;
1590  __rtl8169_set_wol(tp, wol->wolopts);
1591 
1592  rtl_unlock_work(tp);
1593 
1594  device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1595 
1596  return 0;
1597 }
1598 
1599 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1600 {
1601  return rtl_chip_infos[tp->mac_version].fw_name;
1602 }
1603 
1604 static void rtl8169_get_drvinfo(struct net_device *dev,
1605  struct ethtool_drvinfo *info)
1606 {
1607  struct rtl8169_private *tp = netdev_priv(dev);
1608  struct rtl_fw *rtl_fw = tp->rtl_fw;
1609 
1610  strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1611  strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1612  strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1613  BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1614  if (!IS_ERR_OR_NULL(rtl_fw))
1615  strlcpy(info->fw_version, rtl_fw->version,
1616  sizeof(info->fw_version));
1617 }
1618 
1619 static int rtl8169_get_regs_len(struct net_device *dev)
1620 {
1621  return R8169_REGS_SIZE;
1622 }
1623 
1624 static int rtl8169_set_speed_tbi(struct net_device *dev,
1625  u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1626 {
1627  struct rtl8169_private *tp = netdev_priv(dev);
1628  void __iomem *ioaddr = tp->mmio_addr;
1629  int ret = 0;
1630  u32 reg;
1631 
1632  reg = RTL_R32(TBICSR);
1633  if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1634  (duplex == DUPLEX_FULL)) {
1635  RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1636  } else if (autoneg == AUTONEG_ENABLE)
1638  else {
1639  netif_warn(tp, link, dev,
1640  "incorrect speed setting refused in TBI mode\n");
1641  ret = -EOPNOTSUPP;
1642  }
1643 
1644  return ret;
1645 }
1646 
1647 static int rtl8169_set_speed_xmii(struct net_device *dev,
1648  u8 autoneg, u16 speed, u8 duplex, u32 adv)
1649 {
1650  struct rtl8169_private *tp = netdev_priv(dev);
1651  int giga_ctrl, bmcr;
1652  int rc = -EINVAL;
1653 
1654  rtl_writephy(tp, 0x1f, 0x0000);
1655 
1656  if (autoneg == AUTONEG_ENABLE) {
1657  int auto_nego;
1658 
1659  auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1660  auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1662 
1663  if (adv & ADVERTISED_10baseT_Half)
1664  auto_nego |= ADVERTISE_10HALF;
1665  if (adv & ADVERTISED_10baseT_Full)
1666  auto_nego |= ADVERTISE_10FULL;
1667  if (adv & ADVERTISED_100baseT_Half)
1668  auto_nego |= ADVERTISE_100HALF;
1669  if (adv & ADVERTISED_100baseT_Full)
1670  auto_nego |= ADVERTISE_100FULL;
1671 
1673 
1674  giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1675  giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1676 
1677  /* The 8100e/8101e/8102e do Fast Ethernet only. */
1678  if (tp->mii.supports_gmii) {
1679  if (adv & ADVERTISED_1000baseT_Half)
1680  giga_ctrl |= ADVERTISE_1000HALF;
1681  if (adv & ADVERTISED_1000baseT_Full)
1682  giga_ctrl |= ADVERTISE_1000FULL;
1683  } else if (adv & (ADVERTISED_1000baseT_Half |
1685  netif_info(tp, link, dev,
1686  "PHY does not support 1000Mbps\n");
1687  goto out;
1688  }
1689 
1690  bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1691 
1692  rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1693  rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1694  } else {
1695  giga_ctrl = 0;
1696 
1697  if (speed == SPEED_10)
1698  bmcr = 0;
1699  else if (speed == SPEED_100)
1700  bmcr = BMCR_SPEED100;
1701  else
1702  goto out;
1703 
1704  if (duplex == DUPLEX_FULL)
1705  bmcr |= BMCR_FULLDPLX;
1706  }
1707 
1708  rtl_writephy(tp, MII_BMCR, bmcr);
1709 
1710  if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1712  if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1713  rtl_writephy(tp, 0x17, 0x2138);
1714  rtl_writephy(tp, 0x0e, 0x0260);
1715  } else {
1716  rtl_writephy(tp, 0x17, 0x2108);
1717  rtl_writephy(tp, 0x0e, 0x0000);
1718  }
1719  }
1720 
1721  rc = 0;
1722 out:
1723  return rc;
1724 }
1725 
1726 static int rtl8169_set_speed(struct net_device *dev,
1727  u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1728 {
1729  struct rtl8169_private *tp = netdev_priv(dev);
1730  int ret;
1731 
1732  ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1733  if (ret < 0)
1734  goto out;
1735 
1736  if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1737  (advertising & ADVERTISED_1000baseT_Full)) {
1738  mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1739  }
1740 out:
1741  return ret;
1742 }
1743 
1744 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1745 {
1746  struct rtl8169_private *tp = netdev_priv(dev);
1747  int ret;
1748 
1749  del_timer_sync(&tp->timer);
1750 
1751  rtl_lock_work(tp);
1752  ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1753  cmd->duplex, cmd->advertising);
1754  rtl_unlock_work(tp);
1755 
1756  return ret;
1757 }
1758 
1759 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1761 {
1762  struct rtl8169_private *tp = netdev_priv(dev);
1763 
1764  if (dev->mtu > TD_MSS_MAX)
1765  features &= ~NETIF_F_ALL_TSO;
1766 
1767  if (dev->mtu > JUMBO_1K &&
1768  !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1769  features &= ~NETIF_F_IP_CSUM;
1770 
1771  return features;
1772 }
1773 
1774 static void __rtl8169_set_features(struct net_device *dev,
1775  netdev_features_t features)
1776 {
1777  struct rtl8169_private *tp = netdev_priv(dev);
1778  netdev_features_t changed = features ^ dev->features;
1779  void __iomem *ioaddr = tp->mmio_addr;
1780 
1781  if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1782  return;
1783 
1784  if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1785  if (features & NETIF_F_RXCSUM)
1786  tp->cp_cmd |= RxChkSum;
1787  else
1788  tp->cp_cmd &= ~RxChkSum;
1789 
1790  if (dev->features & NETIF_F_HW_VLAN_RX)
1791  tp->cp_cmd |= RxVlan;
1792  else
1793  tp->cp_cmd &= ~RxVlan;
1794 
1795  RTL_W16(CPlusCmd, tp->cp_cmd);
1796  RTL_R16(CPlusCmd);
1797  }
1798  if (changed & NETIF_F_RXALL) {
1799  int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1800  if (features & NETIF_F_RXALL)
1801  tmp |= (AcceptErr | AcceptRunt);
1802  RTL_W32(RxConfig, tmp);
1803  }
1804 }
1805 
1806 static int rtl8169_set_features(struct net_device *dev,
1807  netdev_features_t features)
1808 {
1809  struct rtl8169_private *tp = netdev_priv(dev);
1810 
1811  rtl_lock_work(tp);
1812  __rtl8169_set_features(dev, features);
1813  rtl_unlock_work(tp);
1814 
1815  return 0;
1816 }
1817 
1818 
1819 static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1820  struct sk_buff *skb)
1821 {
1822  return (vlan_tx_tag_present(skb)) ?
1823  TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1824 }
1825 
1826 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1827 {
1828  u32 opts2 = le32_to_cpu(desc->opts2);
1829 
1830  if (opts2 & RxVlanTag)
1831  __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1832 
1833  desc->opts2 = 0;
1834 }
1835 
1836 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1837 {
1838  struct rtl8169_private *tp = netdev_priv(dev);
1839  void __iomem *ioaddr = tp->mmio_addr;
1840  u32 status;
1841 
1842  cmd->supported =
1844  cmd->port = PORT_FIBRE;
1845  cmd->transceiver = XCVR_INTERNAL;
1846 
1847  status = RTL_R32(TBICSR);
1848  cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1849  cmd->autoneg = !!(status & TBINwEnable);
1850 
1851  ethtool_cmd_speed_set(cmd, SPEED_1000);
1852  cmd->duplex = DUPLEX_FULL; /* Always set */
1853 
1854  return 0;
1855 }
1856 
1857 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1858 {
1859  struct rtl8169_private *tp = netdev_priv(dev);
1860 
1861  return mii_ethtool_gset(&tp->mii, cmd);
1862 }
1863 
1864 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1865 {
1866  struct rtl8169_private *tp = netdev_priv(dev);
1867  int rc;
1868 
1869  rtl_lock_work(tp);
1870  rc = tp->get_settings(dev, cmd);
1871  rtl_unlock_work(tp);
1872 
1873  return rc;
1874 }
1875 
1876 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1877  void *p)
1878 {
1879  struct rtl8169_private *tp = netdev_priv(dev);
1880 
1881  if (regs->len > R8169_REGS_SIZE)
1882  regs->len = R8169_REGS_SIZE;
1883 
1884  rtl_lock_work(tp);
1885  memcpy_fromio(p, tp->mmio_addr, regs->len);
1886  rtl_unlock_work(tp);
1887 }
1888 
1889 static u32 rtl8169_get_msglevel(struct net_device *dev)
1890 {
1891  struct rtl8169_private *tp = netdev_priv(dev);
1892 
1893  return tp->msg_enable;
1894 }
1895 
1896 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1897 {
1898  struct rtl8169_private *tp = netdev_priv(dev);
1899 
1900  tp->msg_enable = value;
1901 }
1902 
1903 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1904  "tx_packets",
1905  "rx_packets",
1906  "tx_errors",
1907  "rx_errors",
1908  "rx_missed",
1909  "align_errors",
1910  "tx_single_collisions",
1911  "tx_multi_collisions",
1912  "unicast",
1913  "broadcast",
1914  "multicast",
1915  "tx_aborted",
1916  "tx_underrun",
1917 };
1918 
1919 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1920 {
1921  switch (sset) {
1922  case ETH_SS_STATS:
1923  return ARRAY_SIZE(rtl8169_gstrings);
1924  default:
1925  return -EOPNOTSUPP;
1926  }
1927 }
1928 
1929 DECLARE_RTL_COND(rtl_counters_cond)
1930 {
1931  void __iomem *ioaddr = tp->mmio_addr;
1932 
1934 }
1935 
1936 static void rtl8169_update_counters(struct net_device *dev)
1937 {
1938  struct rtl8169_private *tp = netdev_priv(dev);
1939  void __iomem *ioaddr = tp->mmio_addr;
1940  struct device *d = &tp->pci_dev->dev;
1941  struct rtl8169_counters *counters;
1942  dma_addr_t paddr;
1943  u32 cmd;
1944 
1945  /*
1946  * Some chips are unable to dump tally counters when the receiver
1947  * is disabled.
1948  */
1949  if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1950  return;
1951 
1952  counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1953  if (!counters)
1954  return;
1955 
1956  RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1957  cmd = (u64)paddr & DMA_BIT_MASK(32);
1958  RTL_W32(CounterAddrLow, cmd);
1960 
1961  if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
1962  memcpy(&tp->counters, counters, sizeof(*counters));
1963 
1964  RTL_W32(CounterAddrLow, 0);
1966 
1967  dma_free_coherent(d, sizeof(*counters), counters, paddr);
1968 }
1969 
1970 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1971  struct ethtool_stats *stats, u64 *data)
1972 {
1973  struct rtl8169_private *tp = netdev_priv(dev);
1974 
1975  ASSERT_RTNL();
1976 
1977  rtl8169_update_counters(dev);
1978 
1979  data[0] = le64_to_cpu(tp->counters.tx_packets);
1980  data[1] = le64_to_cpu(tp->counters.rx_packets);
1981  data[2] = le64_to_cpu(tp->counters.tx_errors);
1982  data[3] = le32_to_cpu(tp->counters.rx_errors);
1983  data[4] = le16_to_cpu(tp->counters.rx_missed);
1984  data[5] = le16_to_cpu(tp->counters.align_errors);
1985  data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1986  data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1987  data[8] = le64_to_cpu(tp->counters.rx_unicast);
1988  data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1989  data[10] = le32_to_cpu(tp->counters.rx_multicast);
1990  data[11] = le16_to_cpu(tp->counters.tx_aborted);
1991  data[12] = le16_to_cpu(tp->counters.tx_underun);
1992 }
1993 
1994 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1995 {
1996  switch(stringset) {
1997  case ETH_SS_STATS:
1998  memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1999  break;
2000  }
2001 }
2002 
2003 static const struct ethtool_ops rtl8169_ethtool_ops = {
2004  .get_drvinfo = rtl8169_get_drvinfo,
2005  .get_regs_len = rtl8169_get_regs_len,
2006  .get_link = ethtool_op_get_link,
2007  .get_settings = rtl8169_get_settings,
2008  .set_settings = rtl8169_set_settings,
2009  .get_msglevel = rtl8169_get_msglevel,
2010  .set_msglevel = rtl8169_set_msglevel,
2011  .get_regs = rtl8169_get_regs,
2012  .get_wol = rtl8169_get_wol,
2013  .set_wol = rtl8169_set_wol,
2014  .get_strings = rtl8169_get_strings,
2015  .get_sset_count = rtl8169_get_sset_count,
2016  .get_ethtool_stats = rtl8169_get_ethtool_stats,
2017  .get_ts_info = ethtool_op_get_ts_info,
2018 };
2019 
2020 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2021  struct net_device *dev, u8 default_version)
2022 {
2023  void __iomem *ioaddr = tp->mmio_addr;
2024  /*
2025  * The driver currently handles the 8168Bf and the 8168Be identically
2026  * but they can be identified more specifically through the test below
2027  * if needed:
2028  *
2029  * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2030  *
2031  * Same thing for the 8101Eb and the 8101Ec:
2032  *
2033  * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2034  */
2035  static const struct rtl_mac_info {
2036  u32 mask;
2037  u32 val;
2038  int mac_version;
2039  } mac_info[] = {
2040  /* 8168G family. */
2041  { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2042  { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2043 
2044  /* 8168F family. */
2045  { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
2046  { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
2047  { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
2048 
2049  /* 8168E family. */
2050  { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
2051  { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
2052  { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
2053  { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
2054 
2055  /* 8168D family. */
2056  { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
2057  { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
2058  { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
2059 
2060  /* 8168DP family. */
2061  { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
2062  { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
2063  { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
2064 
2065  /* 8168C family. */
2066  { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
2067  { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
2068  { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
2069  { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
2070  { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
2071  { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
2072  { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
2073  { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
2074  { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
2075 
2076  /* 8168B family. */
2077  { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
2078  { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
2079  { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
2080  { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
2081 
2082  /* 8101 family. */
2083  { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
2084  { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
2085  { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
2086  { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
2087  { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
2088  { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
2089  { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
2090  { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
2091  { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
2092  { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
2093  { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
2094  { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
2095  { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
2096  { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
2097  { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
2098  { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
2099  { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
2100  { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
2101  { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
2102  /* FIXME: where did these entries come from ? -- FR */
2103  { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
2104  { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
2105 
2106  /* 8110 family. */
2107  { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
2108  { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
2109  { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
2110  { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
2111  { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
2112  { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
2113 
2114  /* Catch-all */
2115  { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
2116  };
2117  const struct rtl_mac_info *p = mac_info;
2118  u32 reg;
2119 
2120  reg = RTL_R32(TxConfig);
2121  while ((reg & p->mask) != p->val)
2122  p++;
2123  tp->mac_version = p->mac_version;
2124 
2125  if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2126  netif_notice(tp, probe, dev,
2127  "unknown MAC, using family default\n");
2128  tp->mac_version = default_version;
2129  }
2130 }
2131 
2132 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2133 {
2134  dprintk("mac_version = 0x%02x\n", tp->mac_version);
2135 }
2136 
2137 struct phy_reg {
2140 };
2141 
2142 static void rtl_writephy_batch(struct rtl8169_private *tp,
2143  const struct phy_reg *regs, int len)
2144 {
2145  while (len-- > 0) {
2146  rtl_writephy(tp, regs->reg, regs->val);
2147  regs++;
2148  }
2149 }
2150 
2151 #define PHY_READ 0x00000000
2152 #define PHY_DATA_OR 0x10000000
2153 #define PHY_DATA_AND 0x20000000
2154 #define PHY_BJMPN 0x30000000
2155 #define PHY_READ_EFUSE 0x40000000
2156 #define PHY_READ_MAC_BYTE 0x50000000
2157 #define PHY_WRITE_MAC_BYTE 0x60000000
2158 #define PHY_CLEAR_READCOUNT 0x70000000
2159 #define PHY_WRITE 0x80000000
2160 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2161 #define PHY_COMP_EQ_SKIPN 0xa0000000
2162 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2163 #define PHY_WRITE_PREVIOUS 0xc0000000
2164 #define PHY_SKIPN 0xd0000000
2165 #define PHY_DELAY_MS 0xe0000000
2166 #define PHY_WRITE_ERI_WORD 0xf0000000
2167 
2168 struct fw_info {
2174 } __packed;
2175 
2176 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2177 
2178 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2179 {
2180  const struct firmware *fw = rtl_fw->fw;
2181  struct fw_info *fw_info = (struct fw_info *)fw->data;
2182  struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2183  char *version = rtl_fw->version;
2184  bool rc = false;
2185 
2186  if (fw->size < FW_OPCODE_SIZE)
2187  goto out;
2188 
2189  if (!fw_info->magic) {
2190  size_t i, size, start;
2191  u8 checksum = 0;
2192 
2193  if (fw->size < sizeof(*fw_info))
2194  goto out;
2195 
2196  for (i = 0; i < fw->size; i++)
2197  checksum += fw->data[i];
2198  if (checksum != 0)
2199  goto out;
2200 
2201  start = le32_to_cpu(fw_info->fw_start);
2202  if (start > fw->size)
2203  goto out;
2204 
2205  size = le32_to_cpu(fw_info->fw_len);
2206  if (size > (fw->size - start) / FW_OPCODE_SIZE)
2207  goto out;
2208 
2209  memcpy(version, fw_info->version, RTL_VER_SIZE);
2210 
2211  pa->code = (__le32 *)(fw->data + start);
2212  pa->size = size;
2213  } else {
2214  if (fw->size % FW_OPCODE_SIZE)
2215  goto out;
2216 
2217  strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2218 
2219  pa->code = (__le32 *)fw->data;
2220  pa->size = fw->size / FW_OPCODE_SIZE;
2221  }
2222  version[RTL_VER_SIZE - 1] = 0;
2223 
2224  rc = true;
2225 out:
2226  return rc;
2227 }
2228 
2229 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2230  struct rtl_fw_phy_action *pa)
2231 {
2232  bool rc = false;
2233  size_t index;
2234 
2235  for (index = 0; index < pa->size; index++) {
2236  u32 action = le32_to_cpu(pa->code[index]);
2237  u32 regno = (action & 0x0fff0000) >> 16;
2238 
2239  switch(action & 0xf0000000) {
2240  case PHY_READ:
2241  case PHY_DATA_OR:
2242  case PHY_DATA_AND:
2243  case PHY_READ_EFUSE:
2244  case PHY_CLEAR_READCOUNT:
2245  case PHY_WRITE:
2246  case PHY_WRITE_PREVIOUS:
2247  case PHY_DELAY_MS:
2248  break;
2249 
2250  case PHY_BJMPN:
2251  if (regno > index) {
2252  netif_err(tp, ifup, tp->dev,
2253  "Out of range of firmware\n");
2254  goto out;
2255  }
2256  break;
2257  case PHY_READCOUNT_EQ_SKIP:
2258  if (index + 2 >= pa->size) {
2259  netif_err(tp, ifup, tp->dev,
2260  "Out of range of firmware\n");
2261  goto out;
2262  }
2263  break;
2264  case PHY_COMP_EQ_SKIPN:
2265  case PHY_COMP_NEQ_SKIPN:
2266  case PHY_SKIPN:
2267  if (index + 1 + regno >= pa->size) {
2268  netif_err(tp, ifup, tp->dev,
2269  "Out of range of firmware\n");
2270  goto out;
2271  }
2272  break;
2273 
2274  case PHY_READ_MAC_BYTE:
2275  case PHY_WRITE_MAC_BYTE:
2276  case PHY_WRITE_ERI_WORD:
2277  default:
2278  netif_err(tp, ifup, tp->dev,
2279  "Invalid action 0x%08x\n", action);
2280  goto out;
2281  }
2282  }
2283  rc = true;
2284 out:
2285  return rc;
2286 }
2287 
2288 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2289 {
2290  struct net_device *dev = tp->dev;
2291  int rc = -EINVAL;
2292 
2293  if (!rtl_fw_format_ok(tp, rtl_fw)) {
2294  netif_err(tp, ifup, dev, "invalid firwmare\n");
2295  goto out;
2296  }
2297 
2298  if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2299  rc = 0;
2300 out:
2301  return rc;
2302 }
2303 
2304 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2305 {
2306  struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2307  u32 predata, count;
2308  size_t index;
2309 
2310  predata = count = 0;
2311 
2312  for (index = 0; index < pa->size; ) {
2313  u32 action = le32_to_cpu(pa->code[index]);
2314  u32 data = action & 0x0000ffff;
2315  u32 regno = (action & 0x0fff0000) >> 16;
2316 
2317  if (!action)
2318  break;
2319 
2320  switch(action & 0xf0000000) {
2321  case PHY_READ:
2322  predata = rtl_readphy(tp, regno);
2323  count++;
2324  index++;
2325  break;
2326  case PHY_DATA_OR:
2327  predata |= data;
2328  index++;
2329  break;
2330  case PHY_DATA_AND:
2331  predata &= data;
2332  index++;
2333  break;
2334  case PHY_BJMPN:
2335  index -= regno;
2336  break;
2337  case PHY_READ_EFUSE:
2338  predata = rtl8168d_efuse_read(tp, regno);
2339  index++;
2340  break;
2341  case PHY_CLEAR_READCOUNT:
2342  count = 0;
2343  index++;
2344  break;
2345  case PHY_WRITE:
2346  rtl_writephy(tp, regno, data);
2347  index++;
2348  break;
2349  case PHY_READCOUNT_EQ_SKIP:
2350  index += (count == data) ? 2 : 1;
2351  break;
2352  case PHY_COMP_EQ_SKIPN:
2353  if (predata == data)
2354  index += regno;
2355  index++;
2356  break;
2357  case PHY_COMP_NEQ_SKIPN:
2358  if (predata != data)
2359  index += regno;
2360  index++;
2361  break;
2362  case PHY_WRITE_PREVIOUS:
2363  rtl_writephy(tp, regno, predata);
2364  index++;
2365  break;
2366  case PHY_SKIPN:
2367  index += regno + 1;
2368  break;
2369  case PHY_DELAY_MS:
2370  mdelay(data);
2371  index++;
2372  break;
2373 
2374  case PHY_READ_MAC_BYTE:
2375  case PHY_WRITE_MAC_BYTE:
2376  case PHY_WRITE_ERI_WORD:
2377  default:
2378  BUG();
2379  }
2380  }
2381 }
2382 
2383 static void rtl_release_firmware(struct rtl8169_private *tp)
2384 {
2385  if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2386  release_firmware(tp->rtl_fw->fw);
2387  kfree(tp->rtl_fw);
2388  }
2390 }
2391 
2392 static void rtl_apply_firmware(struct rtl8169_private *tp)
2393 {
2394  struct rtl_fw *rtl_fw = tp->rtl_fw;
2395 
2396  /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2397  if (!IS_ERR_OR_NULL(rtl_fw))
2398  rtl_phy_write_fw(tp, rtl_fw);
2399 }
2400 
2401 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2402 {
2403  if (rtl_readphy(tp, reg) != val)
2404  netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2405  else
2406  rtl_apply_firmware(tp);
2407 }
2408 
2409 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2410 {
2411  static const struct phy_reg phy_reg_init[] = {
2412  { 0x1f, 0x0001 },
2413  { 0x06, 0x006e },
2414  { 0x08, 0x0708 },
2415  { 0x15, 0x4000 },
2416  { 0x18, 0x65c7 },
2417 
2418  { 0x1f, 0x0001 },
2419  { 0x03, 0x00a1 },
2420  { 0x02, 0x0008 },
2421  { 0x01, 0x0120 },
2422  { 0x00, 0x1000 },
2423  { 0x04, 0x0800 },
2424  { 0x04, 0x0000 },
2425 
2426  { 0x03, 0xff41 },
2427  { 0x02, 0xdf60 },
2428  { 0x01, 0x0140 },
2429  { 0x00, 0x0077 },
2430  { 0x04, 0x7800 },
2431  { 0x04, 0x7000 },
2432 
2433  { 0x03, 0x802f },
2434  { 0x02, 0x4f02 },
2435  { 0x01, 0x0409 },
2436  { 0x00, 0xf0f9 },
2437  { 0x04, 0x9800 },
2438  { 0x04, 0x9000 },
2439 
2440  { 0x03, 0xdf01 },
2441  { 0x02, 0xdf20 },
2442  { 0x01, 0xff95 },
2443  { 0x00, 0xba00 },
2444  { 0x04, 0xa800 },
2445  { 0x04, 0xa000 },
2446 
2447  { 0x03, 0xff41 },
2448  { 0x02, 0xdf20 },
2449  { 0x01, 0x0140 },
2450  { 0x00, 0x00bb },
2451  { 0x04, 0xb800 },
2452  { 0x04, 0xb000 },
2453 
2454  { 0x03, 0xdf41 },
2455  { 0x02, 0xdc60 },
2456  { 0x01, 0x6340 },
2457  { 0x00, 0x007d },
2458  { 0x04, 0xd800 },
2459  { 0x04, 0xd000 },
2460 
2461  { 0x03, 0xdf01 },
2462  { 0x02, 0xdf20 },
2463  { 0x01, 0x100a },
2464  { 0x00, 0xa0ff },
2465  { 0x04, 0xf800 },
2466  { 0x04, 0xf000 },
2467 
2468  { 0x1f, 0x0000 },
2469  { 0x0b, 0x0000 },
2470  { 0x00, 0x9200 }
2471  };
2472 
2473  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2474 }
2475 
2476 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2477 {
2478  static const struct phy_reg phy_reg_init[] = {
2479  { 0x1f, 0x0002 },
2480  { 0x01, 0x90d0 },
2481  { 0x1f, 0x0000 }
2482  };
2483 
2484  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2485 }
2486 
2487 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2488 {
2489  struct pci_dev *pdev = tp->pci_dev;
2490 
2491  if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2492  (pdev->subsystem_device != 0xe000))
2493  return;
2494 
2495  rtl_writephy(tp, 0x1f, 0x0001);
2496  rtl_writephy(tp, 0x10, 0xf01b);
2497  rtl_writephy(tp, 0x1f, 0x0000);
2498 }
2499 
2500 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2501 {
2502  static const struct phy_reg phy_reg_init[] = {
2503  { 0x1f, 0x0001 },
2504  { 0x04, 0x0000 },
2505  { 0x03, 0x00a1 },
2506  { 0x02, 0x0008 },
2507  { 0x01, 0x0120 },
2508  { 0x00, 0x1000 },
2509  { 0x04, 0x0800 },
2510  { 0x04, 0x9000 },
2511  { 0x03, 0x802f },
2512  { 0x02, 0x4f02 },
2513  { 0x01, 0x0409 },
2514  { 0x00, 0xf099 },
2515  { 0x04, 0x9800 },
2516  { 0x04, 0xa000 },
2517  { 0x03, 0xdf01 },
2518  { 0x02, 0xdf20 },
2519  { 0x01, 0xff95 },
2520  { 0x00, 0xba00 },
2521  { 0x04, 0xa800 },
2522  { 0x04, 0xf000 },
2523  { 0x03, 0xdf01 },
2524  { 0x02, 0xdf20 },
2525  { 0x01, 0x101a },
2526  { 0x00, 0xa0ff },
2527  { 0x04, 0xf800 },
2528  { 0x04, 0x0000 },
2529  { 0x1f, 0x0000 },
2530 
2531  { 0x1f, 0x0001 },
2532  { 0x10, 0xf41b },
2533  { 0x14, 0xfb54 },
2534  { 0x18, 0xf5c7 },
2535  { 0x1f, 0x0000 },
2536 
2537  { 0x1f, 0x0001 },
2538  { 0x17, 0x0cc0 },
2539  { 0x1f, 0x0000 }
2540  };
2541 
2542  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2543 
2544  rtl8169scd_hw_phy_config_quirk(tp);
2545 }
2546 
2547 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2548 {
2549  static const struct phy_reg phy_reg_init[] = {
2550  { 0x1f, 0x0001 },
2551  { 0x04, 0x0000 },
2552  { 0x03, 0x00a1 },
2553  { 0x02, 0x0008 },
2554  { 0x01, 0x0120 },
2555  { 0x00, 0x1000 },
2556  { 0x04, 0x0800 },
2557  { 0x04, 0x9000 },
2558  { 0x03, 0x802f },
2559  { 0x02, 0x4f02 },
2560  { 0x01, 0x0409 },
2561  { 0x00, 0xf099 },
2562  { 0x04, 0x9800 },
2563  { 0x04, 0xa000 },
2564  { 0x03, 0xdf01 },
2565  { 0x02, 0xdf20 },
2566  { 0x01, 0xff95 },
2567  { 0x00, 0xba00 },
2568  { 0x04, 0xa800 },
2569  { 0x04, 0xf000 },
2570  { 0x03, 0xdf01 },
2571  { 0x02, 0xdf20 },
2572  { 0x01, 0x101a },
2573  { 0x00, 0xa0ff },
2574  { 0x04, 0xf800 },
2575  { 0x04, 0x0000 },
2576  { 0x1f, 0x0000 },
2577 
2578  { 0x1f, 0x0001 },
2579  { 0x0b, 0x8480 },
2580  { 0x1f, 0x0000 },
2581 
2582  { 0x1f, 0x0001 },
2583  { 0x18, 0x67c7 },
2584  { 0x04, 0x2000 },
2585  { 0x03, 0x002f },
2586  { 0x02, 0x4360 },
2587  { 0x01, 0x0109 },
2588  { 0x00, 0x3022 },
2589  { 0x04, 0x2800 },
2590  { 0x1f, 0x0000 },
2591 
2592  { 0x1f, 0x0001 },
2593  { 0x17, 0x0cc0 },
2594  { 0x1f, 0x0000 }
2595  };
2596 
2597  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2598 }
2599 
2600 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2601 {
2602  static const struct phy_reg phy_reg_init[] = {
2603  { 0x10, 0xf41b },
2604  { 0x1f, 0x0000 }
2605  };
2606 
2607  rtl_writephy(tp, 0x1f, 0x0001);
2608  rtl_patchphy(tp, 0x16, 1 << 0);
2609 
2610  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2611 }
2612 
2613 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2614 {
2615  static const struct phy_reg phy_reg_init[] = {
2616  { 0x1f, 0x0001 },
2617  { 0x10, 0xf41b },
2618  { 0x1f, 0x0000 }
2619  };
2620 
2621  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2622 }
2623 
2624 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2625 {
2626  static const struct phy_reg phy_reg_init[] = {
2627  { 0x1f, 0x0000 },
2628  { 0x1d, 0x0f00 },
2629  { 0x1f, 0x0002 },
2630  { 0x0c, 0x1ec8 },
2631  { 0x1f, 0x0000 }
2632  };
2633 
2634  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2635 }
2636 
2637 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2638 {
2639  static const struct phy_reg phy_reg_init[] = {
2640  { 0x1f, 0x0001 },
2641  { 0x1d, 0x3d98 },
2642  { 0x1f, 0x0000 }
2643  };
2644 
2645  rtl_writephy(tp, 0x1f, 0x0000);
2646  rtl_patchphy(tp, 0x14, 1 << 5);
2647  rtl_patchphy(tp, 0x0d, 1 << 5);
2648 
2649  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2650 }
2651 
2652 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2653 {
2654  static const struct phy_reg phy_reg_init[] = {
2655  { 0x1f, 0x0001 },
2656  { 0x12, 0x2300 },
2657  { 0x1f, 0x0002 },
2658  { 0x00, 0x88d4 },
2659  { 0x01, 0x82b1 },
2660  { 0x03, 0x7002 },
2661  { 0x08, 0x9e30 },
2662  { 0x09, 0x01f0 },
2663  { 0x0a, 0x5500 },
2664  { 0x0c, 0x00c8 },
2665  { 0x1f, 0x0003 },
2666  { 0x12, 0xc096 },
2667  { 0x16, 0x000a },
2668  { 0x1f, 0x0000 },
2669  { 0x1f, 0x0000 },
2670  { 0x09, 0x2000 },
2671  { 0x09, 0x0000 }
2672  };
2673 
2674  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2675 
2676  rtl_patchphy(tp, 0x14, 1 << 5);
2677  rtl_patchphy(tp, 0x0d, 1 << 5);
2678  rtl_writephy(tp, 0x1f, 0x0000);
2679 }
2680 
2681 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2682 {
2683  static const struct phy_reg phy_reg_init[] = {
2684  { 0x1f, 0x0001 },
2685  { 0x12, 0x2300 },
2686  { 0x03, 0x802f },
2687  { 0x02, 0x4f02 },
2688  { 0x01, 0x0409 },
2689  { 0x00, 0xf099 },
2690  { 0x04, 0x9800 },
2691  { 0x04, 0x9000 },
2692  { 0x1d, 0x3d98 },
2693  { 0x1f, 0x0002 },
2694  { 0x0c, 0x7eb8 },
2695  { 0x06, 0x0761 },
2696  { 0x1f, 0x0003 },
2697  { 0x16, 0x0f0a },
2698  { 0x1f, 0x0000 }
2699  };
2700 
2701  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2702 
2703  rtl_patchphy(tp, 0x16, 1 << 0);
2704  rtl_patchphy(tp, 0x14, 1 << 5);
2705  rtl_patchphy(tp, 0x0d, 1 << 5);
2706  rtl_writephy(tp, 0x1f, 0x0000);
2707 }
2708 
2709 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2710 {
2711  static const struct phy_reg phy_reg_init[] = {
2712  { 0x1f, 0x0001 },
2713  { 0x12, 0x2300 },
2714  { 0x1d, 0x3d98 },
2715  { 0x1f, 0x0002 },
2716  { 0x0c, 0x7eb8 },
2717  { 0x06, 0x5461 },
2718  { 0x1f, 0x0003 },
2719  { 0x16, 0x0f0a },
2720  { 0x1f, 0x0000 }
2721  };
2722 
2723  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2724 
2725  rtl_patchphy(tp, 0x16, 1 << 0);
2726  rtl_patchphy(tp, 0x14, 1 << 5);
2727  rtl_patchphy(tp, 0x0d, 1 << 5);
2728  rtl_writephy(tp, 0x1f, 0x0000);
2729 }
2730 
2731 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2732 {
2733  rtl8168c_3_hw_phy_config(tp);
2734 }
2735 
2736 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2737 {
2738  static const struct phy_reg phy_reg_init_0[] = {
2739  /* Channel Estimation */
2740  { 0x1f, 0x0001 },
2741  { 0x06, 0x4064 },
2742  { 0x07, 0x2863 },
2743  { 0x08, 0x059c },
2744  { 0x09, 0x26b4 },
2745  { 0x0a, 0x6a19 },
2746  { 0x0b, 0xdcc8 },
2747  { 0x10, 0xf06d },
2748  { 0x14, 0x7f68 },
2749  { 0x18, 0x7fd9 },
2750  { 0x1c, 0xf0ff },
2751  { 0x1d, 0x3d9c },
2752  { 0x1f, 0x0003 },
2753  { 0x12, 0xf49f },
2754  { 0x13, 0x070b },
2755  { 0x1a, 0x05ad },
2756  { 0x14, 0x94c0 },
2757 
2758  /*
2759  * Tx Error Issue
2760  * Enhance line driver power
2761  */
2762  { 0x1f, 0x0002 },
2763  { 0x06, 0x5561 },
2764  { 0x1f, 0x0005 },
2765  { 0x05, 0x8332 },
2766  { 0x06, 0x5561 },
2767 
2768  /*
2769  * Can not link to 1Gbps with bad cable
2770  * Decrease SNR threshold form 21.07dB to 19.04dB
2771  */
2772  { 0x1f, 0x0001 },
2773  { 0x17, 0x0cc0 },
2774 
2775  { 0x1f, 0x0000 },
2776  { 0x0d, 0xf880 }
2777  };
2778 
2779  rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2780 
2781  /*
2782  * Rx Error Issue
2783  * Fine Tune Switching regulator parameter
2784  */
2785  rtl_writephy(tp, 0x1f, 0x0002);
2786  rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2787  rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2788 
2789  if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2790  static const struct phy_reg phy_reg_init[] = {
2791  { 0x1f, 0x0002 },
2792  { 0x05, 0x669a },
2793  { 0x1f, 0x0005 },
2794  { 0x05, 0x8330 },
2795  { 0x06, 0x669a },
2796  { 0x1f, 0x0002 }
2797  };
2798  int val;
2799 
2800  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2801 
2802  val = rtl_readphy(tp, 0x0d);
2803 
2804  if ((val & 0x00ff) != 0x006c) {
2805  static const u32 set[] = {
2806  0x0065, 0x0066, 0x0067, 0x0068,
2807  0x0069, 0x006a, 0x006b, 0x006c
2808  };
2809  int i;
2810 
2811  rtl_writephy(tp, 0x1f, 0x0002);
2812 
2813  val &= 0xff00;
2814  for (i = 0; i < ARRAY_SIZE(set); i++)
2815  rtl_writephy(tp, 0x0d, val | set[i]);
2816  }
2817  } else {
2818  static const struct phy_reg phy_reg_init[] = {
2819  { 0x1f, 0x0002 },
2820  { 0x05, 0x6662 },
2821  { 0x1f, 0x0005 },
2822  { 0x05, 0x8330 },
2823  { 0x06, 0x6662 }
2824  };
2825 
2826  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2827  }
2828 
2829  /* RSET couple improve */
2830  rtl_writephy(tp, 0x1f, 0x0002);
2831  rtl_patchphy(tp, 0x0d, 0x0300);
2832  rtl_patchphy(tp, 0x0f, 0x0010);
2833 
2834  /* Fine tune PLL performance */
2835  rtl_writephy(tp, 0x1f, 0x0002);
2836  rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2837  rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2838 
2839  rtl_writephy(tp, 0x1f, 0x0005);
2840  rtl_writephy(tp, 0x05, 0x001b);
2841 
2842  rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2843 
2844  rtl_writephy(tp, 0x1f, 0x0000);
2845 }
2846 
2847 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2848 {
2849  static const struct phy_reg phy_reg_init_0[] = {
2850  /* Channel Estimation */
2851  { 0x1f, 0x0001 },
2852  { 0x06, 0x4064 },
2853  { 0x07, 0x2863 },
2854  { 0x08, 0x059c },
2855  { 0x09, 0x26b4 },
2856  { 0x0a, 0x6a19 },
2857  { 0x0b, 0xdcc8 },
2858  { 0x10, 0xf06d },
2859  { 0x14, 0x7f68 },
2860  { 0x18, 0x7fd9 },
2861  { 0x1c, 0xf0ff },
2862  { 0x1d, 0x3d9c },
2863  { 0x1f, 0x0003 },
2864  { 0x12, 0xf49f },
2865  { 0x13, 0x070b },
2866  { 0x1a, 0x05ad },
2867  { 0x14, 0x94c0 },
2868 
2869  /*
2870  * Tx Error Issue
2871  * Enhance line driver power
2872  */
2873  { 0x1f, 0x0002 },
2874  { 0x06, 0x5561 },
2875  { 0x1f, 0x0005 },
2876  { 0x05, 0x8332 },
2877  { 0x06, 0x5561 },
2878 
2879  /*
2880  * Can not link to 1Gbps with bad cable
2881  * Decrease SNR threshold form 21.07dB to 19.04dB
2882  */
2883  { 0x1f, 0x0001 },
2884  { 0x17, 0x0cc0 },
2885 
2886  { 0x1f, 0x0000 },
2887  { 0x0d, 0xf880 }
2888  };
2889 
2890  rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2891 
2892  if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2893  static const struct phy_reg phy_reg_init[] = {
2894  { 0x1f, 0x0002 },
2895  { 0x05, 0x669a },
2896  { 0x1f, 0x0005 },
2897  { 0x05, 0x8330 },
2898  { 0x06, 0x669a },
2899 
2900  { 0x1f, 0x0002 }
2901  };
2902  int val;
2903 
2904  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2905 
2906  val = rtl_readphy(tp, 0x0d);
2907  if ((val & 0x00ff) != 0x006c) {
2908  static const u32 set[] = {
2909  0x0065, 0x0066, 0x0067, 0x0068,
2910  0x0069, 0x006a, 0x006b, 0x006c
2911  };
2912  int i;
2913 
2914  rtl_writephy(tp, 0x1f, 0x0002);
2915 
2916  val &= 0xff00;
2917  for (i = 0; i < ARRAY_SIZE(set); i++)
2918  rtl_writephy(tp, 0x0d, val | set[i]);
2919  }
2920  } else {
2921  static const struct phy_reg phy_reg_init[] = {
2922  { 0x1f, 0x0002 },
2923  { 0x05, 0x2642 },
2924  { 0x1f, 0x0005 },
2925  { 0x05, 0x8330 },
2926  { 0x06, 0x2642 }
2927  };
2928 
2929  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2930  }
2931 
2932  /* Fine tune PLL performance */
2933  rtl_writephy(tp, 0x1f, 0x0002);
2934  rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2935  rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2936 
2937  /* Switching regulator Slew rate */
2938  rtl_writephy(tp, 0x1f, 0x0002);
2939  rtl_patchphy(tp, 0x0f, 0x0017);
2940 
2941  rtl_writephy(tp, 0x1f, 0x0005);
2942  rtl_writephy(tp, 0x05, 0x001b);
2943 
2944  rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2945 
2946  rtl_writephy(tp, 0x1f, 0x0000);
2947 }
2948 
2949 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2950 {
2951  static const struct phy_reg phy_reg_init[] = {
2952  { 0x1f, 0x0002 },
2953  { 0x10, 0x0008 },
2954  { 0x0d, 0x006c },
2955 
2956  { 0x1f, 0x0000 },
2957  { 0x0d, 0xf880 },
2958 
2959  { 0x1f, 0x0001 },
2960  { 0x17, 0x0cc0 },
2961 
2962  { 0x1f, 0x0001 },
2963  { 0x0b, 0xa4d8 },
2964  { 0x09, 0x281c },
2965  { 0x07, 0x2883 },
2966  { 0x0a, 0x6b35 },
2967  { 0x1d, 0x3da4 },
2968  { 0x1c, 0xeffd },
2969  { 0x14, 0x7f52 },
2970  { 0x18, 0x7fc6 },
2971  { 0x08, 0x0601 },
2972  { 0x06, 0x4063 },
2973  { 0x10, 0xf074 },
2974  { 0x1f, 0x0003 },
2975  { 0x13, 0x0789 },
2976  { 0x12, 0xf4bd },
2977  { 0x1a, 0x04fd },
2978  { 0x14, 0x84b0 },
2979  { 0x1f, 0x0000 },
2980  { 0x00, 0x9200 },
2981 
2982  { 0x1f, 0x0005 },
2983  { 0x01, 0x0340 },
2984  { 0x1f, 0x0001 },
2985  { 0x04, 0x4000 },
2986  { 0x03, 0x1d21 },
2987  { 0x02, 0x0c32 },
2988  { 0x01, 0x0200 },
2989  { 0x00, 0x5554 },
2990  { 0x04, 0x4800 },
2991  { 0x04, 0x4000 },
2992  { 0x04, 0xf000 },
2993  { 0x03, 0xdf01 },
2994  { 0x02, 0xdf20 },
2995  { 0x01, 0x101a },
2996  { 0x00, 0xa0ff },
2997  { 0x04, 0xf800 },
2998  { 0x04, 0xf000 },
2999  { 0x1f, 0x0000 },
3000 
3001  { 0x1f, 0x0007 },
3002  { 0x1e, 0x0023 },
3003  { 0x16, 0x0000 },
3004  { 0x1f, 0x0000 }
3005  };
3006 
3007  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3008 }
3009 
3010 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3011 {
3012  static const struct phy_reg phy_reg_init[] = {
3013  { 0x1f, 0x0001 },
3014  { 0x17, 0x0cc0 },
3015 
3016  { 0x1f, 0x0007 },
3017  { 0x1e, 0x002d },
3018  { 0x18, 0x0040 },
3019  { 0x1f, 0x0000 }
3020  };
3021 
3022  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3023  rtl_patchphy(tp, 0x0d, 1 << 5);
3024 }
3025 
3026 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3027 {
3028  static const struct phy_reg phy_reg_init[] = {
3029  /* Enable Delay cap */
3030  { 0x1f, 0x0005 },
3031  { 0x05, 0x8b80 },
3032  { 0x06, 0xc896 },
3033  { 0x1f, 0x0000 },
3034 
3035  /* Channel estimation fine tune */
3036  { 0x1f, 0x0001 },
3037  { 0x0b, 0x6c20 },
3038  { 0x07, 0x2872 },
3039  { 0x1c, 0xefff },
3040  { 0x1f, 0x0003 },
3041  { 0x14, 0x6420 },
3042  { 0x1f, 0x0000 },
3043 
3044  /* Update PFM & 10M TX idle timer */
3045  { 0x1f, 0x0007 },
3046  { 0x1e, 0x002f },
3047  { 0x15, 0x1919 },
3048  { 0x1f, 0x0000 },
3049 
3050  { 0x1f, 0x0007 },
3051  { 0x1e, 0x00ac },
3052  { 0x18, 0x0006 },
3053  { 0x1f, 0x0000 }
3054  };
3055 
3056  rtl_apply_firmware(tp);
3057 
3058  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3059 
3060  /* DCO enable for 10M IDLE Power */
3061  rtl_writephy(tp, 0x1f, 0x0007);
3062  rtl_writephy(tp, 0x1e, 0x0023);
3063  rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3064  rtl_writephy(tp, 0x1f, 0x0000);
3065 
3066  /* For impedance matching */
3067  rtl_writephy(tp, 0x1f, 0x0002);
3068  rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
3069  rtl_writephy(tp, 0x1f, 0x0000);
3070 
3071  /* PHY auto speed down */
3072  rtl_writephy(tp, 0x1f, 0x0007);
3073  rtl_writephy(tp, 0x1e, 0x002d);
3074  rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
3075  rtl_writephy(tp, 0x1f, 0x0000);
3076  rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3077 
3078  rtl_writephy(tp, 0x1f, 0x0005);
3079  rtl_writephy(tp, 0x05, 0x8b86);
3080  rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3081  rtl_writephy(tp, 0x1f, 0x0000);
3082 
3083  rtl_writephy(tp, 0x1f, 0x0005);
3084  rtl_writephy(tp, 0x05, 0x8b85);
3085  rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3086  rtl_writephy(tp, 0x1f, 0x0007);
3087  rtl_writephy(tp, 0x1e, 0x0020);
3088  rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
3089  rtl_writephy(tp, 0x1f, 0x0006);
3090  rtl_writephy(tp, 0x00, 0x5a00);
3091  rtl_writephy(tp, 0x1f, 0x0000);
3092  rtl_writephy(tp, 0x0d, 0x0007);
3093  rtl_writephy(tp, 0x0e, 0x003c);
3094  rtl_writephy(tp, 0x0d, 0x4007);
3095  rtl_writephy(tp, 0x0e, 0x0000);
3096  rtl_writephy(tp, 0x0d, 0x0000);
3097 }
3098 
3099 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3100 {
3101  static const struct phy_reg phy_reg_init[] = {
3102  /* Enable Delay cap */
3103  { 0x1f, 0x0004 },
3104  { 0x1f, 0x0007 },
3105  { 0x1e, 0x00ac },
3106  { 0x18, 0x0006 },
3107  { 0x1f, 0x0002 },
3108  { 0x1f, 0x0000 },
3109  { 0x1f, 0x0000 },
3110 
3111  /* Channel estimation fine tune */
3112  { 0x1f, 0x0003 },
3113  { 0x09, 0xa20f },
3114  { 0x1f, 0x0000 },
3115  { 0x1f, 0x0000 },
3116 
3117  /* Green Setting */
3118  { 0x1f, 0x0005 },
3119  { 0x05, 0x8b5b },
3120  { 0x06, 0x9222 },
3121  { 0x05, 0x8b6d },
3122  { 0x06, 0x8000 },
3123  { 0x05, 0x8b76 },
3124  { 0x06, 0x8000 },
3125  { 0x1f, 0x0000 }
3126  };
3127 
3128  rtl_apply_firmware(tp);
3129 
3130  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3131 
3132  /* For 4-corner performance improve */
3133  rtl_writephy(tp, 0x1f, 0x0005);
3134  rtl_writephy(tp, 0x05, 0x8b80);
3135  rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3136  rtl_writephy(tp, 0x1f, 0x0000);
3137 
3138  /* PHY auto speed down */
3139  rtl_writephy(tp, 0x1f, 0x0004);
3140  rtl_writephy(tp, 0x1f, 0x0007);
3141  rtl_writephy(tp, 0x1e, 0x002d);
3142  rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3143  rtl_writephy(tp, 0x1f, 0x0002);
3144  rtl_writephy(tp, 0x1f, 0x0000);
3145  rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3146 
3147  /* improve 10M EEE waveform */
3148  rtl_writephy(tp, 0x1f, 0x0005);
3149  rtl_writephy(tp, 0x05, 0x8b86);
3150  rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3151  rtl_writephy(tp, 0x1f, 0x0000);
3152 
3153  /* Improve 2-pair detection performance */
3154  rtl_writephy(tp, 0x1f, 0x0005);
3155  rtl_writephy(tp, 0x05, 0x8b85);
3156  rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3157  rtl_writephy(tp, 0x1f, 0x0000);
3158 
3159  /* EEE setting */
3160  rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3161  rtl_writephy(tp, 0x1f, 0x0005);
3162  rtl_writephy(tp, 0x05, 0x8b85);
3163  rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3164  rtl_writephy(tp, 0x1f, 0x0004);
3165  rtl_writephy(tp, 0x1f, 0x0007);
3166  rtl_writephy(tp, 0x1e, 0x0020);
3167  rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3168  rtl_writephy(tp, 0x1f, 0x0002);
3169  rtl_writephy(tp, 0x1f, 0x0000);
3170  rtl_writephy(tp, 0x0d, 0x0007);
3171  rtl_writephy(tp, 0x0e, 0x003c);
3172  rtl_writephy(tp, 0x0d, 0x4007);
3173  rtl_writephy(tp, 0x0e, 0x0000);
3174  rtl_writephy(tp, 0x0d, 0x0000);
3175 
3176  /* Green feature */
3177  rtl_writephy(tp, 0x1f, 0x0003);
3178  rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3179  rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3180  rtl_writephy(tp, 0x1f, 0x0000);
3181 }
3182 
3183 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3184 {
3185  /* For 4-corner performance improve */
3186  rtl_writephy(tp, 0x1f, 0x0005);
3187  rtl_writephy(tp, 0x05, 0x8b80);
3188  rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3189  rtl_writephy(tp, 0x1f, 0x0000);
3190 
3191  /* PHY auto speed down */
3192  rtl_writephy(tp, 0x1f, 0x0007);
3193  rtl_writephy(tp, 0x1e, 0x002d);
3194  rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3195  rtl_writephy(tp, 0x1f, 0x0000);
3196  rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3197 
3198  /* Improve 10M EEE waveform */
3199  rtl_writephy(tp, 0x1f, 0x0005);
3200  rtl_writephy(tp, 0x05, 0x8b86);
3201  rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3202  rtl_writephy(tp, 0x1f, 0x0000);
3203 }
3204 
3205 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3206 {
3207  static const struct phy_reg phy_reg_init[] = {
3208  /* Channel estimation fine tune */
3209  { 0x1f, 0x0003 },
3210  { 0x09, 0xa20f },
3211  { 0x1f, 0x0000 },
3212 
3213  /* Modify green table for giga & fnet */
3214  { 0x1f, 0x0005 },
3215  { 0x05, 0x8b55 },
3216  { 0x06, 0x0000 },
3217  { 0x05, 0x8b5e },
3218  { 0x06, 0x0000 },
3219  { 0x05, 0x8b67 },
3220  { 0x06, 0x0000 },
3221  { 0x05, 0x8b70 },
3222  { 0x06, 0x0000 },
3223  { 0x1f, 0x0000 },
3224  { 0x1f, 0x0007 },
3225  { 0x1e, 0x0078 },
3226  { 0x17, 0x0000 },
3227  { 0x19, 0x00fb },
3228  { 0x1f, 0x0000 },
3229 
3230  /* Modify green table for 10M */
3231  { 0x1f, 0x0005 },
3232  { 0x05, 0x8b79 },
3233  { 0x06, 0xaa00 },
3234  { 0x1f, 0x0000 },
3235 
3236  /* Disable hiimpedance detection (RTCT) */
3237  { 0x1f, 0x0003 },
3238  { 0x01, 0x328a },
3239  { 0x1f, 0x0000 }
3240  };
3241 
3242  rtl_apply_firmware(tp);
3243 
3244  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3245 
3246  rtl8168f_hw_phy_config(tp);
3247 
3248  /* Improve 2-pair detection performance */
3249  rtl_writephy(tp, 0x1f, 0x0005);
3250  rtl_writephy(tp, 0x05, 0x8b85);
3251  rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3252  rtl_writephy(tp, 0x1f, 0x0000);
3253 }
3254 
3255 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3256 {
3257  rtl_apply_firmware(tp);
3258 
3259  rtl8168f_hw_phy_config(tp);
3260 }
3261 
3262 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3263 {
3264  static const struct phy_reg phy_reg_init[] = {
3265  /* Channel estimation fine tune */
3266  { 0x1f, 0x0003 },
3267  { 0x09, 0xa20f },
3268  { 0x1f, 0x0000 },
3269 
3270  /* Modify green table for giga & fnet */
3271  { 0x1f, 0x0005 },
3272  { 0x05, 0x8b55 },
3273  { 0x06, 0x0000 },
3274  { 0x05, 0x8b5e },
3275  { 0x06, 0x0000 },
3276  { 0x05, 0x8b67 },
3277  { 0x06, 0x0000 },
3278  { 0x05, 0x8b70 },
3279  { 0x06, 0x0000 },
3280  { 0x1f, 0x0000 },
3281  { 0x1f, 0x0007 },
3282  { 0x1e, 0x0078 },
3283  { 0x17, 0x0000 },
3284  { 0x19, 0x00aa },
3285  { 0x1f, 0x0000 },
3286 
3287  /* Modify green table for 10M */
3288  { 0x1f, 0x0005 },
3289  { 0x05, 0x8b79 },
3290  { 0x06, 0xaa00 },
3291  { 0x1f, 0x0000 },
3292 
3293  /* Disable hiimpedance detection (RTCT) */
3294  { 0x1f, 0x0003 },
3295  { 0x01, 0x328a },
3296  { 0x1f, 0x0000 }
3297  };
3298 
3299 
3300  rtl_apply_firmware(tp);
3301 
3302  rtl8168f_hw_phy_config(tp);
3303 
3304  /* Improve 2-pair detection performance */
3305  rtl_writephy(tp, 0x1f, 0x0005);
3306  rtl_writephy(tp, 0x05, 0x8b85);
3307  rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3308  rtl_writephy(tp, 0x1f, 0x0000);
3309 
3310  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3311 
3312  /* Modify green table for giga */
3313  rtl_writephy(tp, 0x1f, 0x0005);
3314  rtl_writephy(tp, 0x05, 0x8b54);
3315  rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3316  rtl_writephy(tp, 0x05, 0x8b5d);
3317  rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3318  rtl_writephy(tp, 0x05, 0x8a7c);
3319  rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3320  rtl_writephy(tp, 0x05, 0x8a7f);
3321  rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3322  rtl_writephy(tp, 0x05, 0x8a82);
3323  rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3324  rtl_writephy(tp, 0x05, 0x8a85);
3325  rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3326  rtl_writephy(tp, 0x05, 0x8a88);
3327  rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3328  rtl_writephy(tp, 0x1f, 0x0000);
3329 
3330  /* uc same-seed solution */
3331  rtl_writephy(tp, 0x1f, 0x0005);
3332  rtl_writephy(tp, 0x05, 0x8b85);
3333  rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3334  rtl_writephy(tp, 0x1f, 0x0000);
3335 
3336  /* eee setting */
3337  rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3338  rtl_writephy(tp, 0x1f, 0x0005);
3339  rtl_writephy(tp, 0x05, 0x8b85);
3340  rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3341  rtl_writephy(tp, 0x1f, 0x0004);
3342  rtl_writephy(tp, 0x1f, 0x0007);
3343  rtl_writephy(tp, 0x1e, 0x0020);
3344  rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3345  rtl_writephy(tp, 0x1f, 0x0000);
3346  rtl_writephy(tp, 0x0d, 0x0007);
3347  rtl_writephy(tp, 0x0e, 0x003c);
3348  rtl_writephy(tp, 0x0d, 0x4007);
3349  rtl_writephy(tp, 0x0e, 0x0000);
3350  rtl_writephy(tp, 0x0d, 0x0000);
3351 
3352  /* Green feature */
3353  rtl_writephy(tp, 0x1f, 0x0003);
3354  rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3355  rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3356  rtl_writephy(tp, 0x1f, 0x0000);
3357 }
3358 
3359 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3360 {
3361  static const u16 mac_ocp_patch[] = {
3362  0xe008, 0xe01b, 0xe01d, 0xe01f,
3363  0xe021, 0xe023, 0xe025, 0xe027,
3364  0x49d2, 0xf10d, 0x766c, 0x49e2,
3365  0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3366 
3367  0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3368  0xc707, 0x8ee1, 0x9d6c, 0xc603,
3369  0xbe00, 0xb416, 0x0076, 0xe86c,
3370  0xc602, 0xbe00, 0x0000, 0xc602,
3371 
3372  0xbe00, 0x0000, 0xc602, 0xbe00,
3373  0x0000, 0xc602, 0xbe00, 0x0000,
3374  0xc602, 0xbe00, 0x0000, 0xc602,
3375  0xbe00, 0x0000, 0xc602, 0xbe00,
3376 
3377  0x0000, 0x0000, 0x0000, 0x0000
3378  };
3379  u32 i;
3380 
3381  /* Patch code for GPHY reset */
3382  for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
3383  r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
3384  r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
3385  r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
3386 
3387  rtl_apply_firmware(tp);
3388 
3389  if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
3390  rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
3391  else
3392  rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
3393 
3394  if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
3395  rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
3396  else
3397  rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3398 
3399  rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
3400  rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
3401 
3402  r8168_phy_ocp_write(tp, 0xa436, 0x8012);
3403  rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
3404 
3405  rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
3406 }
3407 
3408 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3409 {
3410  static const struct phy_reg phy_reg_init[] = {
3411  { 0x1f, 0x0003 },
3412  { 0x08, 0x441d },
3413  { 0x01, 0x9100 },
3414  { 0x1f, 0x0000 }
3415  };
3416 
3417  rtl_writephy(tp, 0x1f, 0x0000);
3418  rtl_patchphy(tp, 0x11, 1 << 12);
3419  rtl_patchphy(tp, 0x19, 1 << 13);
3420  rtl_patchphy(tp, 0x10, 1 << 15);
3421 
3422  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3423 }
3424 
3425 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3426 {
3427  static const struct phy_reg phy_reg_init[] = {
3428  { 0x1f, 0x0005 },
3429  { 0x1a, 0x0000 },
3430  { 0x1f, 0x0000 },
3431 
3432  { 0x1f, 0x0004 },
3433  { 0x1c, 0x0000 },
3434  { 0x1f, 0x0000 },
3435 
3436  { 0x1f, 0x0001 },
3437  { 0x15, 0x7701 },
3438  { 0x1f, 0x0000 }
3439  };
3440 
3441  /* Disable ALDPS before ram code */
3442  rtl_writephy(tp, 0x1f, 0x0000);
3443  rtl_writephy(tp, 0x18, 0x0310);
3444  msleep(100);
3445 
3446  rtl_apply_firmware(tp);
3447 
3448  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3449 }
3450 
3451 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3452 {
3453  /* Disable ALDPS before setting firmware */
3454  rtl_writephy(tp, 0x1f, 0x0000);
3455  rtl_writephy(tp, 0x18, 0x0310);
3456  msleep(20);
3457 
3458  rtl_apply_firmware(tp);
3459 
3460  /* EEE setting */
3461  rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3462  rtl_writephy(tp, 0x1f, 0x0004);
3463  rtl_writephy(tp, 0x10, 0x401f);
3464  rtl_writephy(tp, 0x19, 0x7030);
3465  rtl_writephy(tp, 0x1f, 0x0000);
3466 }
3467 
3468 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3469 {
3470  static const struct phy_reg phy_reg_init[] = {
3471  { 0x1f, 0x0004 },
3472  { 0x10, 0xc07f },
3473  { 0x19, 0x7030 },
3474  { 0x1f, 0x0000 }
3475  };
3476 
3477  /* Disable ALDPS before ram code */
3478  rtl_writephy(tp, 0x1f, 0x0000);
3479  rtl_writephy(tp, 0x18, 0x0310);
3480  msleep(100);
3481 
3482  rtl_apply_firmware(tp);
3483 
3484  rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3485  rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3486 
3487  rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3488 }
3489 
3490 static void rtl_hw_phy_config(struct net_device *dev)
3491 {
3492  struct rtl8169_private *tp = netdev_priv(dev);
3493 
3494  rtl8169_print_mac_version(tp);
3495 
3496  switch (tp->mac_version) {
3497  case RTL_GIGA_MAC_VER_01:
3498  break;
3499  case RTL_GIGA_MAC_VER_02:
3500  case RTL_GIGA_MAC_VER_03:
3501  rtl8169s_hw_phy_config(tp);
3502  break;
3503  case RTL_GIGA_MAC_VER_04:
3504  rtl8169sb_hw_phy_config(tp);
3505  break;
3506  case RTL_GIGA_MAC_VER_05:
3507  rtl8169scd_hw_phy_config(tp);
3508  break;
3509  case RTL_GIGA_MAC_VER_06:
3510  rtl8169sce_hw_phy_config(tp);
3511  break;
3512  case RTL_GIGA_MAC_VER_07:
3513  case RTL_GIGA_MAC_VER_08:
3514  case RTL_GIGA_MAC_VER_09:
3515  rtl8102e_hw_phy_config(tp);
3516  break;
3517  case RTL_GIGA_MAC_VER_11:
3518  rtl8168bb_hw_phy_config(tp);
3519  break;
3520  case RTL_GIGA_MAC_VER_12:
3521  rtl8168bef_hw_phy_config(tp);
3522  break;
3523  case RTL_GIGA_MAC_VER_17:
3524  rtl8168bef_hw_phy_config(tp);
3525  break;
3526  case RTL_GIGA_MAC_VER_18:
3527  rtl8168cp_1_hw_phy_config(tp);
3528  break;
3529  case RTL_GIGA_MAC_VER_19:
3530  rtl8168c_1_hw_phy_config(tp);
3531  break;
3532  case RTL_GIGA_MAC_VER_20:
3533  rtl8168c_2_hw_phy_config(tp);
3534  break;
3535  case RTL_GIGA_MAC_VER_21:
3536  rtl8168c_3_hw_phy_config(tp);
3537  break;
3538  case RTL_GIGA_MAC_VER_22:
3539  rtl8168c_4_hw_phy_config(tp);
3540  break;
3541  case RTL_GIGA_MAC_VER_23:
3542  case RTL_GIGA_MAC_VER_24:
3543  rtl8168cp_2_hw_phy_config(tp);
3544  break;
3545  case RTL_GIGA_MAC_VER_25:
3546  rtl8168d_1_hw_phy_config(tp);
3547  break;
3548  case RTL_GIGA_MAC_VER_26:
3549  rtl8168d_2_hw_phy_config(tp);
3550  break;
3551  case RTL_GIGA_MAC_VER_27:
3552  rtl8168d_3_hw_phy_config(tp);
3553  break;
3554  case RTL_GIGA_MAC_VER_28:
3555  rtl8168d_4_hw_phy_config(tp);
3556  break;
3557  case RTL_GIGA_MAC_VER_29:
3558  case RTL_GIGA_MAC_VER_30:
3559  rtl8105e_hw_phy_config(tp);
3560  break;
3561  case RTL_GIGA_MAC_VER_31:
3562  /* None. */
3563  break;
3564  case RTL_GIGA_MAC_VER_32:
3565  case RTL_GIGA_MAC_VER_33:
3566  rtl8168e_1_hw_phy_config(tp);
3567  break;
3568  case RTL_GIGA_MAC_VER_34:
3569  rtl8168e_2_hw_phy_config(tp);
3570  break;
3571  case RTL_GIGA_MAC_VER_35:
3572  rtl8168f_1_hw_phy_config(tp);
3573  break;
3574  case RTL_GIGA_MAC_VER_36:
3575  rtl8168f_2_hw_phy_config(tp);
3576  break;
3577 
3578  case RTL_GIGA_MAC_VER_37:
3579  rtl8402_hw_phy_config(tp);
3580  break;
3581 
3582  case RTL_GIGA_MAC_VER_38:
3583  rtl8411_hw_phy_config(tp);
3584  break;
3585 
3586  case RTL_GIGA_MAC_VER_39:
3587  rtl8106e_hw_phy_config(tp);
3588  break;
3589 
3590  case RTL_GIGA_MAC_VER_40:
3591  rtl8168g_1_hw_phy_config(tp);
3592  break;
3593 
3594  case RTL_GIGA_MAC_VER_41:
3595  default:
3596  break;
3597  }
3598 }
3599 
3600 static void rtl_phy_work(struct rtl8169_private *tp)
3601 {
3602  struct timer_list *timer = &tp->timer;
3603  void __iomem *ioaddr = tp->mmio_addr;
3604  unsigned long timeout = RTL8169_PHY_TIMEOUT;
3605 
3607 
3608  if (tp->phy_reset_pending(tp)) {
3609  /*
3610  * A busy loop could burn quite a few cycles on nowadays CPU.
3611  * Let's delay the execution of the timer for a few ticks.
3612  */
3613  timeout = HZ/10;
3614  goto out_mod_timer;
3615  }
3616 
3617  if (tp->link_ok(ioaddr))
3618  return;
3619 
3620  netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3621 
3622  tp->phy_reset_enable(tp);
3623 
3624 out_mod_timer:
3625  mod_timer(timer, jiffies + timeout);
3626 }
3627 
3628 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3629 {
3630  if (!test_and_set_bit(flag, tp->wk.flags))
3631  schedule_work(&tp->wk.work);
3632 }
3633 
3634 static void rtl8169_phy_timer(unsigned long __opaque)
3635 {
3636  struct net_device *dev = (struct net_device *)__opaque;
3637  struct rtl8169_private *tp = netdev_priv(dev);
3638 
3639  rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3640 }
3641 
3642 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3643  void __iomem *ioaddr)
3644 {
3645  iounmap(ioaddr);
3646  pci_release_regions(pdev);
3647  pci_clear_mwi(pdev);
3648  pci_disable_device(pdev);
3649  free_netdev(dev);
3650 }
3651 
3652 DECLARE_RTL_COND(rtl_phy_reset_cond)
3653 {
3654  return tp->phy_reset_pending(tp);
3655 }
3656 
3657 static void rtl8169_phy_reset(struct net_device *dev,
3658  struct rtl8169_private *tp)
3659 {
3660  tp->phy_reset_enable(tp);
3661  rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3662 }
3663 
3664 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3665 {
3666  void __iomem *ioaddr = tp->mmio_addr;
3667 
3668  return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3670 }
3671 
3672 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3673 {
3674  void __iomem *ioaddr = tp->mmio_addr;
3675 
3676  rtl_hw_phy_config(dev);
3677 
3678  if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3679  dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3680  RTL_W8(0x82, 0x01);
3681  }
3682 
3683  pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3684 
3685  if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3686  pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3687 
3688  if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3689  dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3690  RTL_W8(0x82, 0x01);
3691  dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3692  rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3693  }
3694 
3695  rtl8169_phy_reset(dev, tp);
3696 
3697  rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3698  ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3699  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3700  (tp->mii.supports_gmii ?
3701  ADVERTISED_1000baseT_Half |
3703 
3704  if (rtl_tbi_enabled(tp))
3705  netif_info(tp, link, dev, "TBI auto-negotiating\n");
3706 }
3707 
3708 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3709 {
3710  void __iomem *ioaddr = tp->mmio_addr;
3711  u32 high;
3712  u32 low;
3713 
3714  low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
3715  high = addr[4] | (addr[5] << 8);
3716 
3717  rtl_lock_work(tp);
3718 
3720 
3721  RTL_W32(MAC4, high);
3722  RTL_R32(MAC4);
3723 
3724  RTL_W32(MAC0, low);
3725  RTL_R32(MAC0);
3726 
3727  if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
3728  const struct exgmac_reg e[] = {
3729  { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
3730  { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
3731  { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
3732  { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
3733  low >> 16 },
3734  };
3735 
3736  rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3737  }
3738 
3740 
3741  rtl_unlock_work(tp);
3742 }
3743 
3744 static int rtl_set_mac_address(struct net_device *dev, void *p)
3745 {
3746  struct rtl8169_private *tp = netdev_priv(dev);
3747  struct sockaddr *addr = p;
3748 
3749  if (!is_valid_ether_addr(addr->sa_data))
3750  return -EADDRNOTAVAIL;
3751 
3752  memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3753 
3754  rtl_rar_set(tp, dev->dev_addr);
3755 
3756  return 0;
3757 }
3758 
3759 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3760 {
3761  struct rtl8169_private *tp = netdev_priv(dev);
3762  struct mii_ioctl_data *data = if_mii(ifr);
3763 
3764  return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3765 }
3766 
3767 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3768  struct mii_ioctl_data *data, int cmd)
3769 {
3770  switch (cmd) {
3771  case SIOCGMIIPHY:
3772  data->phy_id = 32; /* Internal PHY */
3773  return 0;
3774 
3775  case SIOCGMIIREG:
3776  data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3777  return 0;
3778 
3779  case SIOCSMIIREG:
3780  rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3781  return 0;
3782  }
3783  return -EOPNOTSUPP;
3784 }
3785 
3786 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3787 {
3788  return -EOPNOTSUPP;
3789 }
3790 
3791 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3792 {
3793  if (tp->features & RTL_FEATURE_MSI) {
3794  pci_disable_msi(pdev);
3795  tp->features &= ~RTL_FEATURE_MSI;
3796  }
3797 }
3798 
3799 static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
3800 {
3801  struct mdio_ops *ops = &tp->mdio_ops;
3802 
3803  switch (tp->mac_version) {
3804  case RTL_GIGA_MAC_VER_27:
3805  ops->write = r8168dp_1_mdio_write;
3806  ops->read = r8168dp_1_mdio_read;
3807  break;
3808  case RTL_GIGA_MAC_VER_28:
3809  case RTL_GIGA_MAC_VER_31:
3810  ops->write = r8168dp_2_mdio_write;
3811  ops->read = r8168dp_2_mdio_read;
3812  break;
3813  case RTL_GIGA_MAC_VER_40:
3814  case RTL_GIGA_MAC_VER_41:
3815  ops->write = r8168g_mdio_write;
3816  ops->read = r8168g_mdio_read;
3817  break;
3818  default:
3819  ops->write = r8169_mdio_write;
3820  ops->read = r8169_mdio_read;
3821  break;
3822  }
3823 }
3824 
3825 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3826 {
3827  void __iomem *ioaddr = tp->mmio_addr;
3828 
3829  switch (tp->mac_version) {
3830  case RTL_GIGA_MAC_VER_25:
3831  case RTL_GIGA_MAC_VER_26:
3832  case RTL_GIGA_MAC_VER_29:
3833  case RTL_GIGA_MAC_VER_30:
3834  case RTL_GIGA_MAC_VER_32:
3835  case RTL_GIGA_MAC_VER_33:
3836  case RTL_GIGA_MAC_VER_34:
3837  case RTL_GIGA_MAC_VER_37:
3838  case RTL_GIGA_MAC_VER_38:
3839  case RTL_GIGA_MAC_VER_39:
3840  case RTL_GIGA_MAC_VER_40:
3841  case RTL_GIGA_MAC_VER_41:
3844  break;
3845  default:
3846  break;
3847  }
3848 }
3849 
3850 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3851 {
3852  if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3853  return false;
3854 
3855  rtl_writephy(tp, 0x1f, 0x0000);
3856  rtl_writephy(tp, MII_BMCR, 0x0000);
3857 
3858  rtl_wol_suspend_quirk(tp);
3859 
3860  return true;
3861 }
3862 
3863 static void r810x_phy_power_down(struct rtl8169_private *tp)
3864 {
3865  rtl_writephy(tp, 0x1f, 0x0000);
3866  rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3867 }
3868 
3869 static void r810x_phy_power_up(struct rtl8169_private *tp)
3870 {
3871  rtl_writephy(tp, 0x1f, 0x0000);
3872  rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3873 }
3874 
3875 static void r810x_pll_power_down(struct rtl8169_private *tp)
3876 {
3877  void __iomem *ioaddr = tp->mmio_addr;
3878 
3879  if (rtl_wol_pll_power_down(tp))
3880  return;
3881 
3882  r810x_phy_power_down(tp);
3883 
3884  switch (tp->mac_version) {
3885  case RTL_GIGA_MAC_VER_07:
3886  case RTL_GIGA_MAC_VER_08:
3887  case RTL_GIGA_MAC_VER_09:
3888  case RTL_GIGA_MAC_VER_10:
3889  case RTL_GIGA_MAC_VER_13:
3890  case RTL_GIGA_MAC_VER_16:
3891  break;
3892  default:
3893  RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3894  break;
3895  }
3896 }
3897 
3898 static void r810x_pll_power_up(struct rtl8169_private *tp)
3899 {
3900  void __iomem *ioaddr = tp->mmio_addr;
3901 
3902  r810x_phy_power_up(tp);
3903 
3904  switch (tp->mac_version) {
3905  case RTL_GIGA_MAC_VER_07:
3906  case RTL_GIGA_MAC_VER_08:
3907  case RTL_GIGA_MAC_VER_09:
3908  case RTL_GIGA_MAC_VER_10:
3909  case RTL_GIGA_MAC_VER_13:
3910  case RTL_GIGA_MAC_VER_16:
3911  break;
3912  default:
3913  RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3914  break;
3915  }
3916 }
3917 
3918 static void r8168_phy_power_up(struct rtl8169_private *tp)
3919 {
3920  rtl_writephy(tp, 0x1f, 0x0000);
3921  switch (tp->mac_version) {
3922  case RTL_GIGA_MAC_VER_11:
3923  case RTL_GIGA_MAC_VER_12:
3924  case RTL_GIGA_MAC_VER_17:
3925  case RTL_GIGA_MAC_VER_18:
3926  case RTL_GIGA_MAC_VER_19:
3927  case RTL_GIGA_MAC_VER_20:
3928  case RTL_GIGA_MAC_VER_21:
3929  case RTL_GIGA_MAC_VER_22:
3930  case RTL_GIGA_MAC_VER_23:
3931  case RTL_GIGA_MAC_VER_24:
3932  case RTL_GIGA_MAC_VER_25:
3933  case RTL_GIGA_MAC_VER_26:
3934  case RTL_GIGA_MAC_VER_27:
3935  case RTL_GIGA_MAC_VER_28:
3936  case RTL_GIGA_MAC_VER_31:
3937  rtl_writephy(tp, 0x0e, 0x0000);
3938  break;
3939  default:
3940  break;
3941  }
3942  rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3943 }
3944 
3945 static void r8168_phy_power_down(struct rtl8169_private *tp)
3946 {
3947  rtl_writephy(tp, 0x1f, 0x0000);
3948  switch (tp->mac_version) {
3949  case RTL_GIGA_MAC_VER_32:
3950  case RTL_GIGA_MAC_VER_33:
3951  rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3952  break;
3953 
3954  case RTL_GIGA_MAC_VER_11:
3955  case RTL_GIGA_MAC_VER_12:
3956  case RTL_GIGA_MAC_VER_17:
3957  case RTL_GIGA_MAC_VER_18:
3958  case RTL_GIGA_MAC_VER_19:
3959  case RTL_GIGA_MAC_VER_20:
3960  case RTL_GIGA_MAC_VER_21:
3961  case RTL_GIGA_MAC_VER_22:
3962  case RTL_GIGA_MAC_VER_23:
3963  case RTL_GIGA_MAC_VER_24:
3964  case RTL_GIGA_MAC_VER_25:
3965  case RTL_GIGA_MAC_VER_26:
3966  case RTL_GIGA_MAC_VER_27:
3967  case RTL_GIGA_MAC_VER_28:
3968  case RTL_GIGA_MAC_VER_31:
3969  rtl_writephy(tp, 0x0e, 0x0200);
3970  default:
3971  rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3972  break;
3973  }
3974 }
3975 
3976 static void r8168_pll_power_down(struct rtl8169_private *tp)
3977 {
3978  void __iomem *ioaddr = tp->mmio_addr;
3979 
3980  if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3982  tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3983  r8168dp_check_dash(tp)) {
3984  return;
3985  }
3986 
3987  if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
3988  tp->mac_version == RTL_GIGA_MAC_VER_24) &&
3989  (RTL_R16(CPlusCmd) & ASF)) {
3990  return;
3991  }
3992 
3993  if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3995  rtl_ephy_write(tp, 0x19, 0xff64);
3996 
3997  if (rtl_wol_pll_power_down(tp))
3998  return;
3999 
4000  r8168_phy_power_down(tp);
4001 
4002  switch (tp->mac_version) {
4003  case RTL_GIGA_MAC_VER_25:
4004  case RTL_GIGA_MAC_VER_26:
4005  case RTL_GIGA_MAC_VER_27:
4006  case RTL_GIGA_MAC_VER_28:
4007  case RTL_GIGA_MAC_VER_31:
4008  case RTL_GIGA_MAC_VER_32:
4009  case RTL_GIGA_MAC_VER_33:
4010  RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4011  break;
4012  }
4013 }
4014 
4015 static void r8168_pll_power_up(struct rtl8169_private *tp)
4016 {
4017  void __iomem *ioaddr = tp->mmio_addr;
4018 
4019  switch (tp->mac_version) {
4020  case RTL_GIGA_MAC_VER_25:
4021  case RTL_GIGA_MAC_VER_26:
4022  case RTL_GIGA_MAC_VER_27:
4023  case RTL_GIGA_MAC_VER_28:
4024  case RTL_GIGA_MAC_VER_31:
4025  case RTL_GIGA_MAC_VER_32:
4026  case RTL_GIGA_MAC_VER_33:
4027  RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4028  break;
4029  }
4030 
4031  r8168_phy_power_up(tp);
4032 }
4033 
4034 static void rtl_generic_op(struct rtl8169_private *tp,
4035  void (*op)(struct rtl8169_private *))
4036 {
4037  if (op)
4038  op(tp);
4039 }
4040 
4041 static void rtl_pll_power_down(struct rtl8169_private *tp)
4042 {
4043  rtl_generic_op(tp, tp->pll_power_ops.down);
4044 }
4045 
4046 static void rtl_pll_power_up(struct rtl8169_private *tp)
4047 {
4048  rtl_generic_op(tp, tp->pll_power_ops.up);
4049 }
4050 
4051 static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
4052 {
4053  struct pll_power_ops *ops = &tp->pll_power_ops;
4054 
4055  switch (tp->mac_version) {
4056  case RTL_GIGA_MAC_VER_07:
4057  case RTL_GIGA_MAC_VER_08:
4058  case RTL_GIGA_MAC_VER_09:
4059  case RTL_GIGA_MAC_VER_10:
4060  case RTL_GIGA_MAC_VER_16:
4061  case RTL_GIGA_MAC_VER_29:
4062  case RTL_GIGA_MAC_VER_30:
4063  case RTL_GIGA_MAC_VER_37:
4064  case RTL_GIGA_MAC_VER_39:
4065  ops->down = r810x_pll_power_down;
4066  ops->up = r810x_pll_power_up;
4067  break;
4068 
4069  case RTL_GIGA_MAC_VER_11:
4070  case RTL_GIGA_MAC_VER_12:
4071  case RTL_GIGA_MAC_VER_17:
4072  case RTL_GIGA_MAC_VER_18:
4073  case RTL_GIGA_MAC_VER_19:
4074  case RTL_GIGA_MAC_VER_20:
4075  case RTL_GIGA_MAC_VER_21:
4076  case RTL_GIGA_MAC_VER_22:
4077  case RTL_GIGA_MAC_VER_23:
4078  case RTL_GIGA_MAC_VER_24:
4079  case RTL_GIGA_MAC_VER_25:
4080  case RTL_GIGA_MAC_VER_26:
4081  case RTL_GIGA_MAC_VER_27:
4082  case RTL_GIGA_MAC_VER_28:
4083  case RTL_GIGA_MAC_VER_31:
4084  case RTL_GIGA_MAC_VER_32:
4085  case RTL_GIGA_MAC_VER_33:
4086  case RTL_GIGA_MAC_VER_34:
4087  case RTL_GIGA_MAC_VER_35:
4088  case RTL_GIGA_MAC_VER_36:
4089  case RTL_GIGA_MAC_VER_38:
4090  case RTL_GIGA_MAC_VER_40:
4091  case RTL_GIGA_MAC_VER_41:
4092  ops->down = r8168_pll_power_down;
4093  ops->up = r8168_pll_power_up;
4094  break;
4095 
4096  default:
4097  ops->down = NULL;
4098  ops->up = NULL;
4099  break;
4100  }
4101 }
4102 
4103 static void rtl_init_rxcfg(struct rtl8169_private *tp)
4104 {
4105  void __iomem *ioaddr = tp->mmio_addr;
4106 
4107  switch (tp->mac_version) {
4108  case RTL_GIGA_MAC_VER_01:
4109  case RTL_GIGA_MAC_VER_02:
4110  case RTL_GIGA_MAC_VER_03:
4111  case RTL_GIGA_MAC_VER_04:
4112  case RTL_GIGA_MAC_VER_05:
4113  case RTL_GIGA_MAC_VER_06:
4114  case RTL_GIGA_MAC_VER_10:
4115  case RTL_GIGA_MAC_VER_11:
4116  case RTL_GIGA_MAC_VER_12:
4117  case RTL_GIGA_MAC_VER_13:
4118  case RTL_GIGA_MAC_VER_14:
4119  case RTL_GIGA_MAC_VER_15:
4120  case RTL_GIGA_MAC_VER_16:
4121  case RTL_GIGA_MAC_VER_17:
4123  break;
4124  case RTL_GIGA_MAC_VER_18:
4125  case RTL_GIGA_MAC_VER_19:
4126  case RTL_GIGA_MAC_VER_20:
4127  case RTL_GIGA_MAC_VER_21:
4128  case RTL_GIGA_MAC_VER_22:
4129  case RTL_GIGA_MAC_VER_23:
4130  case RTL_GIGA_MAC_VER_24:
4131  case RTL_GIGA_MAC_VER_34:
4133  break;
4134  default:
4136  break;
4137  }
4138 }
4139 
4140 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4141 {
4142  tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
4143 }
4144 
4145 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
4146 {
4147  void __iomem *ioaddr = tp->mmio_addr;
4148 
4150  rtl_generic_op(tp, tp->jumbo_ops.enable);
4152 }
4153 
4154 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4155 {
4156  void __iomem *ioaddr = tp->mmio_addr;
4157 
4159  rtl_generic_op(tp, tp->jumbo_ops.disable);
4161 }
4162 
4163 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4164 {
4165  void __iomem *ioaddr = tp->mmio_addr;
4166 
4169  rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4170 }
4171 
4172 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
4173 {
4174  void __iomem *ioaddr = tp->mmio_addr;
4175 
4178  rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4179 }
4180 
4181 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
4182 {
4183  void __iomem *ioaddr = tp->mmio_addr;
4184 
4186 }
4187 
4188 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
4189 {
4190  void __iomem *ioaddr = tp->mmio_addr;
4191 
4193 }
4194 
4195 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4196 {
4197  void __iomem *ioaddr = tp->mmio_addr;
4198 
4199  RTL_W8(MaxTxPacketSize, 0x3f);
4201  RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4202  rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4203 }
4204 
4205 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4206 {
4207  void __iomem *ioaddr = tp->mmio_addr;
4208 
4209  RTL_W8(MaxTxPacketSize, 0x0c);
4211  RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4212  rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4213 }
4214 
4215 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4216 {
4217  rtl_tx_performance_tweak(tp->pci_dev,
4219 }
4220 
4221 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
4222 {
4223  rtl_tx_performance_tweak(tp->pci_dev,
4225 }
4226 
4227 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
4228 {
4229  void __iomem *ioaddr = tp->mmio_addr;
4230 
4231  r8168b_0_hw_jumbo_enable(tp);
4232 
4233  RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
4234 }
4235 
4236 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4237 {
4238  void __iomem *ioaddr = tp->mmio_addr;
4239 
4240  r8168b_0_hw_jumbo_disable(tp);
4241 
4242  RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4243 }
4244 
4245 static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
4246 {
4247  struct jumbo_ops *ops = &tp->jumbo_ops;
4248 
4249  switch (tp->mac_version) {
4250  case RTL_GIGA_MAC_VER_11:
4251  ops->disable = r8168b_0_hw_jumbo_disable;
4252  ops->enable = r8168b_0_hw_jumbo_enable;
4253  break;
4254  case RTL_GIGA_MAC_VER_12:
4255  case RTL_GIGA_MAC_VER_17:
4256  ops->disable = r8168b_1_hw_jumbo_disable;
4257  ops->enable = r8168b_1_hw_jumbo_enable;
4258  break;
4259  case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
4260  case RTL_GIGA_MAC_VER_19:
4261  case RTL_GIGA_MAC_VER_20:
4262  case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
4263  case RTL_GIGA_MAC_VER_22:
4264  case RTL_GIGA_MAC_VER_23:
4265  case RTL_GIGA_MAC_VER_24:
4266  case RTL_GIGA_MAC_VER_25:
4267  case RTL_GIGA_MAC_VER_26:
4268  ops->disable = r8168c_hw_jumbo_disable;
4269  ops->enable = r8168c_hw_jumbo_enable;
4270  break;
4271  case RTL_GIGA_MAC_VER_27:
4272  case RTL_GIGA_MAC_VER_28:
4273  ops->disable = r8168dp_hw_jumbo_disable;
4274  ops->enable = r8168dp_hw_jumbo_enable;
4275  break;
4276  case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
4277  case RTL_GIGA_MAC_VER_32:
4278  case RTL_GIGA_MAC_VER_33:
4279  case RTL_GIGA_MAC_VER_34:
4280  ops->disable = r8168e_hw_jumbo_disable;
4281  ops->enable = r8168e_hw_jumbo_enable;
4282  break;
4283 
4284  /*
4285  * No action needed for jumbo frames with 8169.
4286  * No jumbo for 810x at all.
4287  */
4288  case RTL_GIGA_MAC_VER_40:
4289  case RTL_GIGA_MAC_VER_41:
4290  default:
4291  ops->disable = NULL;
4292  ops->enable = NULL;
4293  break;
4294  }
4295 }
4296 
4297 DECLARE_RTL_COND(rtl_chipcmd_cond)
4298 {
4299  void __iomem *ioaddr = tp->mmio_addr;
4300 
4301  return RTL_R8(ChipCmd) & CmdReset;
4302 }
4303 
4304 static void rtl_hw_reset(struct rtl8169_private *tp)
4305 {
4306  void __iomem *ioaddr = tp->mmio_addr;
4307 
4309 
4310  rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4311 }
4312 
4313 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
4314 {
4315  struct rtl_fw *rtl_fw;
4316  const char *name;
4317  int rc = -ENOMEM;
4318 
4319  name = rtl_lookup_firmware_name(tp);
4320  if (!name)
4321  goto out_no_firmware;
4322 
4323  rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4324  if (!rtl_fw)
4325  goto err_warn;
4326 
4327  rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4328  if (rc < 0)
4329  goto err_free;
4330 
4331  rc = rtl_check_firmware(tp, rtl_fw);
4332  if (rc < 0)
4333  goto err_release_firmware;
4334 
4335  tp->rtl_fw = rtl_fw;
4336 out:
4337  return;
4338 
4339 err_release_firmware:
4340  release_firmware(rtl_fw->fw);
4341 err_free:
4342  kfree(rtl_fw);
4343 err_warn:
4344  netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4345  name, rc);
4346 out_no_firmware:
4347  tp->rtl_fw = NULL;
4348  goto out;
4349 }
4350 
4351 static void rtl_request_firmware(struct rtl8169_private *tp)
4352 {
4353  if (IS_ERR(tp->rtl_fw))
4354  rtl_request_uncached_firmware(tp);
4355 }
4356 
4357 static void rtl_rx_close(struct rtl8169_private *tp)
4358 {
4359  void __iomem *ioaddr = tp->mmio_addr;
4360 
4362 }
4363 
4364 DECLARE_RTL_COND(rtl_npq_cond)
4365 {
4366  void __iomem *ioaddr = tp->mmio_addr;
4367 
4368  return RTL_R8(TxPoll) & NPQ;
4369 }
4370 
4371 DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4372 {
4373  void __iomem *ioaddr = tp->mmio_addr;
4374 
4375  return RTL_R32(TxConfig) & TXCFG_EMPTY;
4376 }
4377 
4378 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4379 {
4380  void __iomem *ioaddr = tp->mmio_addr;
4381 
4382  /* Disable interrupts */
4383  rtl8169_irq_mask_and_ack(tp);
4384 
4385  rtl_rx_close(tp);
4386 
4387  if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4390  rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4391  } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4399  rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4400  } else {
4402  udelay(100);
4403  }
4404 
4405  rtl_hw_reset(tp);
4406 }
4407 
4408 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4409 {
4410  void __iomem *ioaddr = tp->mmio_addr;
4411 
4412  /* Set DMA burst size and Interframe Gap Time */
4415 }
4416 
4417 static void rtl_hw_start(struct net_device *dev)
4418 {
4419  struct rtl8169_private *tp = netdev_priv(dev);
4420 
4421  tp->hw_start(dev);
4422 
4423  rtl_irq_enable_all(tp);
4424 }
4425 
4426 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4427  void __iomem *ioaddr)
4428 {
4429  /*
4430  * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4431  * register to be written before TxDescAddrLow to work.
4432  * Switching from MMIO to I/O access fixes the issue as well.
4433  */
4434  RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4436  RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4438 }
4439 
4440 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4441 {
4442  u16 cmd;
4443 
4444  cmd = RTL_R16(CPlusCmd);
4445  RTL_W16(CPlusCmd, cmd);
4446  return cmd;
4447 }
4448 
4449 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4450 {
4451  /* Low hurts. Let's disable the filtering. */
4452  RTL_W16(RxMaxSize, rx_buf_sz + 1);
4453 }
4454 
4455 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4456 {
4457  static const struct rtl_cfg2_info {
4458  u32 mac_version;
4459  u32 clk;
4460  u32 val;
4461  } cfg2_info [] = {
4462  { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4463  { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4464  { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4465  { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4466  };
4467  const struct rtl_cfg2_info *p = cfg2_info;
4468  unsigned int i;
4469  u32 clk;
4470 
4471  clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4472  for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4473  if ((p->mac_version == mac_version) && (p->clk == clk)) {
4474  RTL_W32(0x7c, p->val);
4475  break;
4476  }
4477  }
4478 }
4479 
4480 static void rtl_set_rx_mode(struct net_device *dev)
4481 {
4482  struct rtl8169_private *tp = netdev_priv(dev);
4483  void __iomem *ioaddr = tp->mmio_addr;
4484  u32 mc_filter[2]; /* Multicast hash filter */
4485  int rx_mode;
4486  u32 tmp = 0;
4487 
4488  if (dev->flags & IFF_PROMISC) {
4489  /* Unconditionally log net taps. */
4490  netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4491  rx_mode =
4493  AcceptAllPhys;
4494  mc_filter[1] = mc_filter[0] = 0xffffffff;
4495  } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4496  (dev->flags & IFF_ALLMULTI)) {
4497  /* Too many to filter perfectly -- accept all multicasts. */
4499  mc_filter[1] = mc_filter[0] = 0xffffffff;
4500  } else {
4501  struct netdev_hw_addr *ha;
4502 
4503  rx_mode = AcceptBroadcast | AcceptMyPhys;
4504  mc_filter[1] = mc_filter[0] = 0;
4505  netdev_for_each_mc_addr(ha, dev) {
4506  int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4507  mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4508  rx_mode |= AcceptMulticast;
4509  }
4510  }
4511 
4512  if (dev->features & NETIF_F_RXALL)
4513  rx_mode |= (AcceptErr | AcceptRunt);
4514 
4515  tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4516 
4517  if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4518  u32 data = mc_filter[0];
4519 
4520  mc_filter[0] = swab32(mc_filter[1]);
4521  mc_filter[1] = swab32(data);
4522  }
4523 
4524  if (tp->mac_version == RTL_GIGA_MAC_VER_35)
4525  mc_filter[1] = mc_filter[0] = 0xffffffff;
4526 
4527  RTL_W32(MAR0 + 4, mc_filter[1]);
4528  RTL_W32(MAR0 + 0, mc_filter[0]);
4529 
4530  RTL_W32(RxConfig, tmp);
4531 }
4532 
4533 static void rtl_hw_start_8169(struct net_device *dev)
4534 {
4535  struct rtl8169_private *tp = netdev_priv(dev);
4536  void __iomem *ioaddr = tp->mmio_addr;
4537  struct pci_dev *pdev = tp->pci_dev;
4538 
4539  if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4541  pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4542  }
4543 
4545  if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4549  RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4550 
4551  rtl_init_rxcfg(tp);
4552 
4554 
4555  rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4556 
4557  if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4561  rtl_set_rx_tx_config_registers(tp);
4562 
4563  tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4564 
4565  if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4567  dprintk("Set MAC Reg C+CR Offset 0xE0. "
4568  "Bit-3 and bit-14 MUST be 1\n");
4569  tp->cp_cmd |= (1 << 14);
4570  }
4571 
4572  RTL_W16(CPlusCmd, tp->cp_cmd);
4573 
4574  rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4575 
4576  /*
4577  * Undocumented corner. Supposedly:
4578  * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4579  */
4580  RTL_W16(IntrMitigate, 0x0000);
4581 
4582  rtl_set_rx_tx_desc_registers(tp, ioaddr);
4583 
4584  if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4588  RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4589  rtl_set_rx_tx_config_registers(tp);
4590  }
4591 
4593 
4594  /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4595  RTL_R8(IntrMask);
4596 
4597  RTL_W32(RxMissed, 0);
4598 
4599  rtl_set_rx_mode(dev);
4600 
4601  /* no early-rx interrupts */
4602  RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4603 }
4604 
4605 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4606 {
4607  if (tp->csi_ops.write)
4608  tp->csi_ops.write(tp, addr, value);
4609 }
4610 
4611 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4612 {
4613  return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4614 }
4615 
4616 static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4617 {
4618  u32 csi;
4619 
4620  csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4621  rtl_csi_write(tp, 0x070c, csi | bits);
4622 }
4623 
4624 static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4625 {
4626  rtl_csi_access_enable(tp, 0x17000000);
4627 }
4628 
4629 static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4630 {
4631  rtl_csi_access_enable(tp, 0x27000000);
4632 }
4633 
4634 DECLARE_RTL_COND(rtl_csiar_cond)
4635 {
4636  void __iomem *ioaddr = tp->mmio_addr;
4637 
4638  return RTL_R32(CSIAR) & CSIAR_FLAG;
4639 }
4640 
4641 static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4642 {
4643  void __iomem *ioaddr = tp->mmio_addr;
4644 
4645  RTL_W32(CSIDR, value);
4648 
4649  rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4650 }
4651 
4652 static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4653 {
4654  void __iomem *ioaddr = tp->mmio_addr;
4655 
4656  RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4658 
4659  return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4660  RTL_R32(CSIDR) : ~0;
4661 }
4662 
4663 static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4664 {
4665  void __iomem *ioaddr = tp->mmio_addr;
4666 
4667  RTL_W32(CSIDR, value);
4670  CSIAR_FUNC_NIC);
4671 
4672  rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4673 }
4674 
4675 static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4676 {
4677  void __iomem *ioaddr = tp->mmio_addr;
4678 
4681 
4682  return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4683  RTL_R32(CSIDR) : ~0;
4684 }
4685 
4686 static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
4687 {
4688  struct csi_ops *ops = &tp->csi_ops;
4689 
4690  switch (tp->mac_version) {
4691  case RTL_GIGA_MAC_VER_01:
4692  case RTL_GIGA_MAC_VER_02:
4693  case RTL_GIGA_MAC_VER_03:
4694  case RTL_GIGA_MAC_VER_04:
4695  case RTL_GIGA_MAC_VER_05:
4696  case RTL_GIGA_MAC_VER_06:
4697  case RTL_GIGA_MAC_VER_10:
4698  case RTL_GIGA_MAC_VER_11:
4699  case RTL_GIGA_MAC_VER_12:
4700  case RTL_GIGA_MAC_VER_13:
4701  case RTL_GIGA_MAC_VER_14:
4702  case RTL_GIGA_MAC_VER_15:
4703  case RTL_GIGA_MAC_VER_16:
4704  case RTL_GIGA_MAC_VER_17:
4705  ops->write = NULL;
4706  ops->read = NULL;
4707  break;
4708 
4709  case RTL_GIGA_MAC_VER_37:
4710  case RTL_GIGA_MAC_VER_38:
4711  ops->write = r8402_csi_write;
4712  ops->read = r8402_csi_read;
4713  break;
4714 
4715  default:
4716  ops->write = r8169_csi_write;
4717  ops->read = r8169_csi_read;
4718  break;
4719  }
4720 }
4721 
4722 struct ephy_info {
4723  unsigned int offset;
4726 };
4727 
4728 static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4729  int len)
4730 {
4731  u16 w;
4732 
4733  while (len-- > 0) {
4734  w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4735  rtl_ephy_write(tp, e->offset, w);
4736  e++;
4737  }
4738 }
4739 
4740 static void rtl_disable_clock_request(struct pci_dev *pdev)
4741 {
4742  pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
4744 }
4745 
4746 static void rtl_enable_clock_request(struct pci_dev *pdev)
4747 {
4748  pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
4750 }
4751 
4752 #define R8168_CPCMD_QUIRK_MASK (\
4753  EnableBist | \
4754  Mac_dbgo_oe | \
4755  Force_half_dup | \
4756  Force_rxflow_en | \
4757  Force_txflow_en | \
4758  Cxpl_dbg_sel | \
4759  ASF | \
4760  PktCntrDisable | \
4761  Mac_dbgo_sel)
4762 
4763 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4764 {
4765  void __iomem *ioaddr = tp->mmio_addr;
4766  struct pci_dev *pdev = tp->pci_dev;
4767 
4769 
4771 
4772  rtl_tx_performance_tweak(pdev,
4774 }
4775 
4776 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4777 {
4778  void __iomem *ioaddr = tp->mmio_addr;
4779 
4780  rtl_hw_start_8168bb(tp);
4781 
4783 
4784  RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4785 }
4786 
4787 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4788 {
4789  void __iomem *ioaddr = tp->mmio_addr;
4790  struct pci_dev *pdev = tp->pci_dev;
4791 
4793 
4795 
4796  rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4797 
4798  rtl_disable_clock_request(pdev);
4799 
4801 }
4802 
4803 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4804 {
4805  static const struct ephy_info e_info_8168cp[] = {
4806  { 0x01, 0, 0x0001 },
4807  { 0x02, 0x0800, 0x1000 },
4808  { 0x03, 0, 0x0042 },
4809  { 0x06, 0x0080, 0x0000 },
4810  { 0x07, 0, 0x2000 }
4811  };
4812 
4813  rtl_csi_access_enable_2(tp);
4814 
4815  rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4816 
4817  __rtl_hw_start_8168cp(tp);
4818 }
4819 
4820 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4821 {
4822  void __iomem *ioaddr = tp->mmio_addr;
4823  struct pci_dev *pdev = tp->pci_dev;
4824 
4825  rtl_csi_access_enable_2(tp);
4826 
4828 
4829  rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4830 
4832 }
4833 
4834 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4835 {
4836  void __iomem *ioaddr = tp->mmio_addr;
4837  struct pci_dev *pdev = tp->pci_dev;
4838 
4839  rtl_csi_access_enable_2(tp);
4840 
4842 
4843  /* Magic. */
4844  RTL_W8(DBG_REG, 0x20);
4845 
4847 
4848  rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4849 
4851 }
4852 
4853 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4854 {
4855  void __iomem *ioaddr = tp->mmio_addr;
4856  static const struct ephy_info e_info_8168c_1[] = {
4857  { 0x02, 0x0800, 0x1000 },
4858  { 0x03, 0, 0x0002 },
4859  { 0x06, 0x0080, 0x0000 }
4860  };
4861 
4862  rtl_csi_access_enable_2(tp);
4863 
4864  RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4865 
4866  rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4867 
4868  __rtl_hw_start_8168cp(tp);
4869 }
4870 
4871 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4872 {
4873  static const struct ephy_info e_info_8168c_2[] = {
4874  { 0x01, 0, 0x0001 },
4875  { 0x03, 0x0400, 0x0220 }
4876  };
4877 
4878  rtl_csi_access_enable_2(tp);
4879 
4880  rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4881 
4882  __rtl_hw_start_8168cp(tp);
4883 }
4884 
4885 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4886 {
4887  rtl_hw_start_8168c_2(tp);
4888 }
4889 
4890 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4891 {
4892  rtl_csi_access_enable_2(tp);
4893 
4894  __rtl_hw_start_8168cp(tp);
4895 }
4896 
4897 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4898 {
4899  void __iomem *ioaddr = tp->mmio_addr;
4900  struct pci_dev *pdev = tp->pci_dev;
4901 
4902  rtl_csi_access_enable_2(tp);
4903 
4904  rtl_disable_clock_request(pdev);
4905 
4907 
4908  rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4909 
4911 }
4912 
4913 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4914 {
4915  void __iomem *ioaddr = tp->mmio_addr;
4916  struct pci_dev *pdev = tp->pci_dev;
4917 
4918  rtl_csi_access_enable_1(tp);
4919 
4920  rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4921 
4923 
4924  rtl_disable_clock_request(pdev);
4925 }
4926 
4927 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4928 {
4929  void __iomem *ioaddr = tp->mmio_addr;
4930  struct pci_dev *pdev = tp->pci_dev;
4931  static const struct ephy_info e_info_8168d_4[] = {
4932  { 0x0b, ~0, 0x48 },
4933  { 0x19, 0x20, 0x50 },
4934  { 0x0c, ~0, 0x20 }
4935  };
4936  int i;
4937 
4938  rtl_csi_access_enable_1(tp);
4939 
4940  rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4941 
4943 
4944  for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4945  const struct ephy_info *e = e_info_8168d_4 + i;
4946  u16 w;
4947 
4948  w = rtl_ephy_read(tp, e->offset);
4949  rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
4950  }
4951 
4952  rtl_enable_clock_request(pdev);
4953 }
4954 
4955 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4956 {
4957  void __iomem *ioaddr = tp->mmio_addr;
4958  struct pci_dev *pdev = tp->pci_dev;
4959  static const struct ephy_info e_info_8168e_1[] = {
4960  { 0x00, 0x0200, 0x0100 },
4961  { 0x00, 0x0000, 0x0004 },
4962  { 0x06, 0x0002, 0x0001 },
4963  { 0x06, 0x0000, 0x0030 },
4964  { 0x07, 0x0000, 0x2000 },
4965  { 0x00, 0x0000, 0x0020 },
4966  { 0x03, 0x5800, 0x2000 },
4967  { 0x03, 0x0000, 0x0001 },
4968  { 0x01, 0x0800, 0x1000 },
4969  { 0x07, 0x0000, 0x4000 },
4970  { 0x1e, 0x0000, 0x2000 },
4971  { 0x19, 0xffff, 0xfe6c },
4972  { 0x0a, 0x0000, 0x0040 }
4973  };
4974 
4975  rtl_csi_access_enable_2(tp);
4976 
4977  rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4978 
4979  rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4980 
4982 
4983  rtl_disable_clock_request(pdev);
4984 
4985  /* Reset tx FIFO pointer */
4988 
4990 }
4991 
4992 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
4993 {
4994  void __iomem *ioaddr = tp->mmio_addr;
4995  struct pci_dev *pdev = tp->pci_dev;
4996  static const struct ephy_info e_info_8168e_2[] = {
4997  { 0x09, 0x0000, 0x0080 },
4998  { 0x19, 0x0000, 0x0224 }
4999  };
5000 
5001  rtl_csi_access_enable_1(tp);
5002 
5003  rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5004 
5005  rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5006 
5007  rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5008  rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5009  rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5010  rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5011  rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5012  rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5013  rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5014  rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5015 
5017 
5018  rtl_disable_clock_request(pdev);
5019 
5021  RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5022 
5023  /* Adjust EEE LED frequency */
5024  RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5025 
5029 }
5030 
5031 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5032 {
5033  void __iomem *ioaddr = tp->mmio_addr;
5034  struct pci_dev *pdev = tp->pci_dev;
5035 
5036  rtl_csi_access_enable_2(tp);
5037 
5038  rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5039 
5040  rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5041  rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5042  rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5043  rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5044  rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5045  rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5046  rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5047  rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5048  rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5049  rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5050 
5052 
5053  rtl_disable_clock_request(pdev);
5054 
5056  RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5060 }
5061 
5062 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5063 {
5064  void __iomem *ioaddr = tp->mmio_addr;
5065  static const struct ephy_info e_info_8168f_1[] = {
5066  { 0x06, 0x00c0, 0x0020 },
5067  { 0x08, 0x0001, 0x0002 },
5068  { 0x09, 0x0000, 0x0080 },
5069  { 0x19, 0x0000, 0x0224 }
5070  };
5071 
5072  rtl_hw_start_8168f(tp);
5073 
5074  rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5075 
5076  rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5077 
5078  /* Adjust EEE LED frequency */
5079  RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5080 }
5081 
5082 static void rtl_hw_start_8411(struct rtl8169_private *tp)
5083 {
5084  static const struct ephy_info e_info_8168f_1[] = {
5085  { 0x06, 0x00c0, 0x0020 },
5086  { 0x0f, 0xffff, 0x5200 },
5087  { 0x1e, 0x0000, 0x4000 },
5088  { 0x19, 0x0000, 0x0224 }
5089  };
5090 
5091  rtl_hw_start_8168f(tp);
5092 
5093  rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5094 
5095  rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5096 }
5097 
5098 static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5099 {
5100  void __iomem *ioaddr = tp->mmio_addr;
5101  struct pci_dev *pdev = tp->pci_dev;
5102 
5103  rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5104  rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5105  rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5106  rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5107 
5108  rtl_csi_access_enable_1(tp);
5109 
5110  rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5111 
5112  rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5113  rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5114 
5115  RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5118 
5119  rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5120  rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5121 
5122  /* Adjust EEE LED frequency */
5123  RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5124 
5125  rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
5126 }
5127 
5128 static void rtl_hw_start_8168(struct net_device *dev)
5129 {
5130  struct rtl8169_private *tp = netdev_priv(dev);
5131  void __iomem *ioaddr = tp->mmio_addr;
5132 
5134 
5136 
5137  rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5138 
5140 
5141  RTL_W16(CPlusCmd, tp->cp_cmd);
5142 
5143  RTL_W16(IntrMitigate, 0x5151);
5144 
5145  /* Work around for RxFIFO overflow. */
5146  if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
5147  tp->event_slow |= RxFIFOOver | PCSTimeout;
5148  tp->event_slow &= ~RxOverflow;
5149  }
5150 
5151  rtl_set_rx_tx_desc_registers(tp, ioaddr);
5152 
5153  rtl_set_rx_mode(dev);
5154 
5157 
5158  RTL_R8(IntrMask);
5159 
5160  switch (tp->mac_version) {
5161  case RTL_GIGA_MAC_VER_11:
5162  rtl_hw_start_8168bb(tp);
5163  break;
5164 
5165  case RTL_GIGA_MAC_VER_12:
5166  case RTL_GIGA_MAC_VER_17:
5167  rtl_hw_start_8168bef(tp);
5168  break;
5169 
5170  case RTL_GIGA_MAC_VER_18:
5171  rtl_hw_start_8168cp_1(tp);
5172  break;
5173 
5174  case RTL_GIGA_MAC_VER_19:
5175  rtl_hw_start_8168c_1(tp);
5176  break;
5177 
5178  case RTL_GIGA_MAC_VER_20:
5179  rtl_hw_start_8168c_2(tp);
5180  break;
5181 
5182  case RTL_GIGA_MAC_VER_21:
5183  rtl_hw_start_8168c_3(tp);
5184  break;
5185 
5186  case RTL_GIGA_MAC_VER_22:
5187  rtl_hw_start_8168c_4(tp);
5188  break;
5189 
5190  case RTL_GIGA_MAC_VER_23:
5191  rtl_hw_start_8168cp_2(tp);
5192  break;
5193 
5194  case RTL_GIGA_MAC_VER_24:
5195  rtl_hw_start_8168cp_3(tp);
5196  break;
5197 
5198  case RTL_GIGA_MAC_VER_25:
5199  case RTL_GIGA_MAC_VER_26:
5200  case RTL_GIGA_MAC_VER_27:
5201  rtl_hw_start_8168d(tp);
5202  break;
5203 
5204  case RTL_GIGA_MAC_VER_28:
5205  rtl_hw_start_8168d_4(tp);
5206  break;
5207 
5208  case RTL_GIGA_MAC_VER_31:
5209  rtl_hw_start_8168dp(tp);
5210  break;
5211 
5212  case RTL_GIGA_MAC_VER_32:
5213  case RTL_GIGA_MAC_VER_33:
5214  rtl_hw_start_8168e_1(tp);
5215  break;
5216  case RTL_GIGA_MAC_VER_34:
5217  rtl_hw_start_8168e_2(tp);
5218  break;
5219 
5220  case RTL_GIGA_MAC_VER_35:
5221  case RTL_GIGA_MAC_VER_36:
5222  rtl_hw_start_8168f_1(tp);
5223  break;
5224 
5225  case RTL_GIGA_MAC_VER_38:
5226  rtl_hw_start_8411(tp);
5227  break;
5228 
5229  case RTL_GIGA_MAC_VER_40:
5230  case RTL_GIGA_MAC_VER_41:
5231  rtl_hw_start_8168g_1(tp);
5232  break;
5233 
5234  default:
5235  printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5236  dev->name, tp->mac_version);
5237  break;
5238  }
5239 
5240  RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5241 
5243 
5244  RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5245 }
5246 
5247 #define R810X_CPCMD_QUIRK_MASK (\
5248  EnableBist | \
5249  Mac_dbgo_oe | \
5250  Force_half_dup | \
5251  Force_rxflow_en | \
5252  Force_txflow_en | \
5253  Cxpl_dbg_sel | \
5254  ASF | \
5255  PktCntrDisable | \
5256  Mac_dbgo_sel)
5257 
5258 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5259 {
5260  void __iomem *ioaddr = tp->mmio_addr;
5261  struct pci_dev *pdev = tp->pci_dev;
5262  static const struct ephy_info e_info_8102e_1[] = {
5263  { 0x01, 0, 0x6e65 },
5264  { 0x02, 0, 0x091f },
5265  { 0x03, 0, 0xc2f9 },
5266  { 0x06, 0, 0xafb5 },
5267  { 0x07, 0, 0x0e00 },
5268  { 0x19, 0, 0xec80 },
5269  { 0x01, 0, 0x2e65 },
5270  { 0x01, 0, 0x6e65 }
5271  };
5272  u8 cfg1;
5273 
5274  rtl_csi_access_enable_2(tp);
5275 
5277 
5278  rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5279 
5280  RTL_W8(Config1,
5281  LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5283 
5284  cfg1 = RTL_R8(Config1);
5285  if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5286  RTL_W8(Config1, cfg1 & ~LEDS0);
5287 
5288  rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5289 }
5290 
5291 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5292 {
5293  void __iomem *ioaddr = tp->mmio_addr;
5294  struct pci_dev *pdev = tp->pci_dev;
5295 
5296  rtl_csi_access_enable_2(tp);
5297 
5298  rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5299 
5302 }
5303 
5304 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5305 {
5306  rtl_hw_start_8102e_2(tp);
5307 
5308  rtl_ephy_write(tp, 0x03, 0xc2f9);
5309 }
5310 
5311 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5312 {
5313  void __iomem *ioaddr = tp->mmio_addr;
5314  static const struct ephy_info e_info_8105e_1[] = {
5315  { 0x07, 0, 0x4000 },
5316  { 0x19, 0, 0x0200 },
5317  { 0x19, 0, 0x0020 },
5318  { 0x1e, 0, 0x2000 },
5319  { 0x03, 0, 0x0001 },
5320  { 0x19, 0, 0x0100 },
5321  { 0x19, 0, 0x0004 },
5322  { 0x0a, 0, 0x0020 }
5323  };
5324 
5325  /* Force LAN exit from ASPM if Rx/Tx are not idle */
5326  RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5327 
5328  /* Disable Early Tally Counter */
5329  RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5330 
5333 
5334  rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5335 }
5336 
5337 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5338 {
5339  rtl_hw_start_8105e_1(tp);
5340  rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5341 }
5342 
5343 static void rtl_hw_start_8402(struct rtl8169_private *tp)
5344 {
5345  void __iomem *ioaddr = tp->mmio_addr;
5346  static const struct ephy_info e_info_8402[] = {
5347  { 0x19, 0xffff, 0xff64 },
5348  { 0x1e, 0, 0x4000 }
5349  };
5350 
5351  rtl_csi_access_enable_2(tp);
5352 
5353  /* Force LAN exit from ASPM if Rx/Tx are not idle */
5354  RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5355 
5357  RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5358 
5359  rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5360 
5361  rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5362 
5363  rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5364  rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5365  rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5366  rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5367  rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5368  rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5369  rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5370 }
5371 
5372 static void rtl_hw_start_8106(struct rtl8169_private *tp)
5373 {
5374  void __iomem *ioaddr = tp->mmio_addr;
5375 
5376  /* Force LAN exit from ASPM if Rx/Tx are not idle */
5377  RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5378 
5381  RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5382 }
5383 
5384 static void rtl_hw_start_8101(struct net_device *dev)
5385 {
5386  struct rtl8169_private *tp = netdev_priv(dev);
5387  void __iomem *ioaddr = tp->mmio_addr;
5388  struct pci_dev *pdev = tp->pci_dev;
5389 
5390  if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5391  tp->event_slow &= ~RxFIFOOver;
5392 
5393  if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5395  pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
5397 
5399 
5400  switch (tp->mac_version) {
5401  case RTL_GIGA_MAC_VER_07:
5402  rtl_hw_start_8102e_1(tp);
5403  break;
5404 
5405  case RTL_GIGA_MAC_VER_08:
5406  rtl_hw_start_8102e_3(tp);
5407  break;
5408 
5409  case RTL_GIGA_MAC_VER_09:
5410  rtl_hw_start_8102e_2(tp);
5411  break;
5412 
5413  case RTL_GIGA_MAC_VER_29:
5414  rtl_hw_start_8105e_1(tp);
5415  break;
5416  case RTL_GIGA_MAC_VER_30:
5417  rtl_hw_start_8105e_2(tp);
5418  break;
5419 
5420  case RTL_GIGA_MAC_VER_37:
5421  rtl_hw_start_8402(tp);
5422  break;
5423 
5424  case RTL_GIGA_MAC_VER_39:
5425  rtl_hw_start_8106(tp);
5426  break;
5427  }
5428 
5430 
5432 
5433  rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5434 
5436  RTL_W16(CPlusCmd, tp->cp_cmd);
5437 
5438  RTL_W16(IntrMitigate, 0x0000);
5439 
5440  rtl_set_rx_tx_desc_registers(tp, ioaddr);
5441 
5442  RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5443  rtl_set_rx_tx_config_registers(tp);
5444 
5445  RTL_R8(IntrMask);
5446 
5447  rtl_set_rx_mode(dev);
5448 
5449  RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5450 }
5451 
5452 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5453 {
5454  struct rtl8169_private *tp = netdev_priv(dev);
5455 
5456  if (new_mtu < ETH_ZLEN ||
5457  new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
5458  return -EINVAL;
5459 
5460  if (new_mtu > ETH_DATA_LEN)
5461  rtl_hw_jumbo_enable(tp);
5462  else
5463  rtl_hw_jumbo_disable(tp);
5464 
5465  dev->mtu = new_mtu;
5467 
5468  return 0;
5469 }
5470 
5471 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5472 {
5473  desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5474  desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5475 }
5476 
5477 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5478  void **data_buff, struct RxDesc *desc)
5479 {
5480  dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
5481  DMA_FROM_DEVICE);
5482 
5483  kfree(*data_buff);
5484  *data_buff = NULL;
5485  rtl8169_make_unusable_by_asic(desc);
5486 }
5487 
5488 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5489 {
5490  u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5491 
5492  desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5493 }
5494 
5495 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5496  u32 rx_buf_sz)
5497 {
5498  desc->addr = cpu_to_le64(mapping);
5499  wmb();
5500  rtl8169_mark_to_asic(desc, rx_buf_sz);
5501 }
5502 
5503 static inline void *rtl8169_align(void *data)
5504 {
5505  return (void *)ALIGN((long)data, 16);
5506 }
5507 
5508 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5509  struct RxDesc *desc)
5510 {
5511  void *data;
5513  struct device *d = &tp->pci_dev->dev;
5514  struct net_device *dev = tp->dev;
5515  int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
5516 
5517  data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5518  if (!data)
5519  return NULL;
5520 
5521  if (rtl8169_align(data) != data) {
5522  kfree(data);
5523  data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5524  if (!data)
5525  return NULL;
5526  }
5527 
5528  mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
5529  DMA_FROM_DEVICE);
5530  if (unlikely(dma_mapping_error(d, mapping))) {
5531  if (net_ratelimit())
5532  netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5533  goto err_out;
5534  }
5535 
5536  rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
5537  return data;
5538 
5539 err_out:
5540  kfree(data);
5541  return NULL;
5542 }
5543 
5544 static void rtl8169_rx_clear(struct rtl8169_private *tp)
5545 {
5546  unsigned int i;
5547 
5548  for (i = 0; i < NUM_RX_DESC; i++) {
5549  if (tp->Rx_databuff[i]) {
5550  rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5551  tp->RxDescArray + i);
5552  }
5553  }
5554 }
5555 
5556 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5557 {
5558  desc->opts1 |= cpu_to_le32(RingEnd);
5559 }
5560 
5561 static int rtl8169_rx_fill(struct rtl8169_private *tp)
5562 {
5563  unsigned int i;
5564 
5565  for (i = 0; i < NUM_RX_DESC; i++) {
5566  void *data;
5567 
5568  if (tp->Rx_databuff[i])
5569  continue;
5570 
5571  data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5572  if (!data) {
5573  rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5574  goto err_out;
5575  }
5576  tp->Rx_databuff[i] = data;
5577  }
5578 
5579  rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5580  return 0;
5581 
5582 err_out:
5583  rtl8169_rx_clear(tp);
5584  return -ENOMEM;
5585 }
5586 
5587 static int rtl8169_init_ring(struct net_device *dev)
5588 {
5589  struct rtl8169_private *tp = netdev_priv(dev);
5590 
5591  rtl8169_init_ring_indexes(tp);
5592 
5593  memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
5594  memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
5595 
5596  return rtl8169_rx_fill(tp);
5597 }
5598 
5599 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5600  struct TxDesc *desc)
5601 {
5602  unsigned int len = tx_skb->len;
5603 
5605 
5606  desc->opts1 = 0x00;
5607  desc->opts2 = 0x00;
5608  desc->addr = 0x00;
5609  tx_skb->len = 0;
5610 }
5611 
5612 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5613  unsigned int n)
5614 {
5615  unsigned int i;
5616 
5617  for (i = 0; i < n; i++) {
5618  unsigned int entry = (start + i) % NUM_TX_DESC;
5619  struct ring_info *tx_skb = tp->tx_skb + entry;
5620  unsigned int len = tx_skb->len;
5621 
5622  if (len) {
5623  struct sk_buff *skb = tx_skb->skb;
5624 
5625  rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5626  tp->TxDescArray + entry);
5627  if (skb) {
5628  tp->dev->stats.tx_dropped++;
5629  dev_kfree_skb(skb);
5630  tx_skb->skb = NULL;
5631  }
5632  }
5633  }
5634 }
5635 
5636 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5637 {
5638  rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5639  tp->cur_tx = tp->dirty_tx = 0;
5640 }
5641 
5642 static void rtl_reset_work(struct rtl8169_private *tp)
5643 {
5644  struct net_device *dev = tp->dev;
5645  int i;
5646 
5647  napi_disable(&tp->napi);
5648  netif_stop_queue(dev);
5650 
5651  rtl8169_hw_reset(tp);
5652 
5653  for (i = 0; i < NUM_RX_DESC; i++)
5654  rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5655 
5656  rtl8169_tx_clear(tp);
5657  rtl8169_init_ring_indexes(tp);
5658 
5659  napi_enable(&tp->napi);
5660  rtl_hw_start(dev);
5661  netif_wake_queue(dev);
5662  rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5663 }
5664 
5665 static void rtl8169_tx_timeout(struct net_device *dev)
5666 {
5667  struct rtl8169_private *tp = netdev_priv(dev);
5668 
5669  rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5670 }
5671 
5672 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5673  u32 *opts)
5674 {
5675  struct skb_shared_info *info = skb_shinfo(skb);
5676  unsigned int cur_frag, entry;
5677  struct TxDesc * uninitialized_var(txd);
5678  struct device *d = &tp->pci_dev->dev;
5679 
5680  entry = tp->cur_tx;
5681  for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5682  const skb_frag_t *frag = info->frags + cur_frag;
5684  u32 status, len;
5685  void *addr;
5686 
5687  entry = (entry + 1) % NUM_TX_DESC;
5688 
5689  txd = tp->TxDescArray + entry;
5690  len = skb_frag_size(frag);
5691  addr = skb_frag_address(frag);
5692  mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5693  if (unlikely(dma_mapping_error(d, mapping))) {
5694  if (net_ratelimit())
5695  netif_err(tp, drv, tp->dev,
5696  "Failed to map TX fragments DMA!\n");
5697  goto err_out;
5698  }
5699 
5700  /* Anti gcc 2.95.3 bugware (sic) */
5701  status = opts[0] | len |
5702  (RingEnd * !((entry + 1) % NUM_TX_DESC));
5703 
5704  txd->opts1 = cpu_to_le32(status);
5705  txd->opts2 = cpu_to_le32(opts[1]);
5706  txd->addr = cpu_to_le64(mapping);
5707 
5708  tp->tx_skb[entry].len = len;
5709  }
5710 
5711  if (cur_frag) {
5712  tp->tx_skb[entry].skb = skb;
5713  txd->opts1 |= cpu_to_le32(LastFrag);
5714  }
5715 
5716  return cur_frag;
5717 
5718 err_out:
5719  rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5720  return -EIO;
5721 }
5722 
5723 static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5724  struct sk_buff *skb, u32 *opts)
5725 {
5726  const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5727  u32 mss = skb_shinfo(skb)->gso_size;
5728  int offset = info->opts_offset;
5729 
5730  if (mss) {
5731  opts[0] |= TD_LSO;
5732  opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5733  } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5734  const struct iphdr *ip = ip_hdr(skb);
5735 
5736  if (ip->protocol == IPPROTO_TCP)
5737  opts[offset] |= info->checksum.tcp;
5738  else if (ip->protocol == IPPROTO_UDP)
5739  opts[offset] |= info->checksum.udp;
5740  else
5741  WARN_ON_ONCE(1);
5742  }
5743 }
5744 
5745 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5746  struct net_device *dev)
5747 {
5748  struct rtl8169_private *tp = netdev_priv(dev);
5749  unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5750  struct TxDesc *txd = tp->TxDescArray + entry;
5751  void __iomem *ioaddr = tp->mmio_addr;
5752  struct device *d = &tp->pci_dev->dev;
5754  u32 status, len;
5755  u32 opts[2];
5756  int frags;
5757 
5758  if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5759  netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5760  goto err_stop_0;
5761  }
5762 
5763  if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5764  goto err_stop_0;
5765 
5766  len = skb_headlen(skb);
5767  mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5768  if (unlikely(dma_mapping_error(d, mapping))) {
5769  if (net_ratelimit())
5770  netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5771  goto err_dma_0;
5772  }
5773 
5774  tp->tx_skb[entry].len = len;
5775  txd->addr = cpu_to_le64(mapping);
5776 
5777  opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
5778  opts[0] = DescOwn;
5779 
5780  rtl8169_tso_csum(tp, skb, opts);
5781 
5782  frags = rtl8169_xmit_frags(tp, skb, opts);
5783  if (frags < 0)
5784  goto err_dma_1;
5785  else if (frags)
5786  opts[0] |= FirstFrag;
5787  else {
5788  opts[0] |= FirstFrag | LastFrag;
5789  tp->tx_skb[entry].skb = skb;
5790  }
5791 
5792  txd->opts2 = cpu_to_le32(opts[1]);
5793 
5794  skb_tx_timestamp(skb);
5795 
5796  wmb();
5797 
5798  /* Anti gcc 2.95.3 bugware (sic) */
5799  status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5800  txd->opts1 = cpu_to_le32(status);
5801 
5802  tp->cur_tx += frags + 1;
5803 
5804  wmb();
5805 
5806  RTL_W8(TxPoll, NPQ);
5807 
5808  mmiowb();
5809 
5810  if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5811  /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5812  * not miss a ring update when it notices a stopped queue.
5813  */
5814  smp_wmb();
5815  netif_stop_queue(dev);
5816  /* Sync with rtl_tx:
5817  * - publish queue status and cur_tx ring index (write barrier)
5818  * - refresh dirty_tx ring index (read barrier).
5819  * May the current thread have a pessimistic view of the ring
5820  * status and forget to wake up queue, a racing rtl_tx thread
5821  * can't.
5822  */
5823  smp_mb();
5825  netif_wake_queue(dev);
5826  }
5827 
5828  return NETDEV_TX_OK;
5829 
5830 err_dma_1:
5831  rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5832 err_dma_0:
5833  dev_kfree_skb(skb);
5834  dev->stats.tx_dropped++;
5835  return NETDEV_TX_OK;
5836 
5837 err_stop_0:
5838  netif_stop_queue(dev);
5839  dev->stats.tx_dropped++;
5840  return NETDEV_TX_BUSY;
5841 }
5842 
5843 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5844 {
5845  struct rtl8169_private *tp = netdev_priv(dev);
5846  struct pci_dev *pdev = tp->pci_dev;
5847  u16 pci_status, pci_cmd;
5848 
5849  pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5850  pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5851 
5852  netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5853  pci_cmd, pci_status);
5854 
5855  /*
5856  * The recovery sequence below admits a very elaborated explanation:
5857  * - it seems to work;
5858  * - I did not see what else could be done;
5859  * - it makes iop3xx happy.
5860  *
5861  * Feel free to adjust to your needs.
5862  */
5863  if (pdev->broken_parity_status)
5864  pci_cmd &= ~PCI_COMMAND_PARITY;
5865  else
5866  pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5867 
5868  pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5869 
5870  pci_write_config_word(pdev, PCI_STATUS,
5871  pci_status & (PCI_STATUS_DETECTED_PARITY |
5874 
5875  /* The infamous DAC f*ckup only happens at boot time */
5876  if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
5877  void __iomem *ioaddr = tp->mmio_addr;
5878 
5879  netif_info(tp, intr, dev, "disabling PCI DAC\n");
5880  tp->cp_cmd &= ~PCIDAC;
5881  RTL_W16(CPlusCmd, tp->cp_cmd);
5882  dev->features &= ~NETIF_F_HIGHDMA;
5883  }
5884 
5885  rtl8169_hw_reset(tp);
5886 
5887  rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5888 }
5889 
5890 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5891 {
5892  unsigned int dirty_tx, tx_left;
5893 
5894  dirty_tx = tp->dirty_tx;
5895  smp_rmb();
5896  tx_left = tp->cur_tx - dirty_tx;
5897 
5898  while (tx_left > 0) {
5899  unsigned int entry = dirty_tx % NUM_TX_DESC;
5900  struct ring_info *tx_skb = tp->tx_skb + entry;
5901  u32 status;
5902 
5903  rmb();
5904  status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5905  if (status & DescOwn)
5906  break;
5907 
5908  rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5909  tp->TxDescArray + entry);
5910  if (status & LastFrag) {
5911  u64_stats_update_begin(&tp->tx_stats.syncp);
5912  tp->tx_stats.packets++;
5913  tp->tx_stats.bytes += tx_skb->skb->len;
5914  u64_stats_update_end(&tp->tx_stats.syncp);
5915  dev_kfree_skb(tx_skb->skb);
5916  tx_skb->skb = NULL;
5917  }
5918  dirty_tx++;
5919  tx_left--;
5920  }
5921 
5922  if (tp->dirty_tx != dirty_tx) {
5923  tp->dirty_tx = dirty_tx;
5924  /* Sync with rtl8169_start_xmit:
5925  * - publish dirty_tx ring index (write barrier)
5926  * - refresh cur_tx ring index and queue status (read barrier)
5927  * May the current thread miss the stopped queue condition,
5928  * a racing xmit thread can only have a right view of the
5929  * ring status.
5930  */
5931  smp_mb();
5932  if (netif_queue_stopped(dev) &&
5934  netif_wake_queue(dev);
5935  }
5936  /*
5937  * 8168 hack: TxPoll requests are lost when the Tx packets are
5938  * too close. Let's kick an extra TxPoll request when a burst
5939  * of start_xmit activity is detected (if it is not detected,
5940  * it is slow enough). -- FR
5941  */
5942  if (tp->cur_tx != dirty_tx) {
5943  void __iomem *ioaddr = tp->mmio_addr;
5944 
5945  RTL_W8(TxPoll, NPQ);
5946  }
5947  }
5948 }
5949 
5950 static inline int rtl8169_fragmented_frame(u32 status)
5951 {
5952  return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5953 }
5954 
5955 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
5956 {
5957  u32 status = opts1 & RxProtoMask;
5958 
5959  if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
5960  ((status == RxProtoUDP) && !(opts1 & UDPFail)))
5962  else
5963  skb_checksum_none_assert(skb);
5964 }
5965 
5966 static struct sk_buff *rtl8169_try_rx_copy(void *data,
5967  struct rtl8169_private *tp,
5968  int pkt_size,
5969  dma_addr_t addr)
5970 {
5971  struct sk_buff *skb;
5972  struct device *d = &tp->pci_dev->dev;
5973 
5974  data = rtl8169_align(data);
5975  dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
5976  prefetch(data);
5977  skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
5978  if (skb)
5979  memcpy(skb->data, data, pkt_size);
5980  dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
5981 
5982  return skb;
5983 }
5984 
5985 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
5986 {
5987  unsigned int cur_rx, rx_left;
5988  unsigned int count;
5989 
5990  cur_rx = tp->cur_rx;
5991  rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
5992  rx_left = min(rx_left, budget);
5993 
5994  for (; rx_left > 0; rx_left--, cur_rx++) {
5995  unsigned int entry = cur_rx % NUM_RX_DESC;
5996  struct RxDesc *desc = tp->RxDescArray + entry;
5997  u32 status;
5998 
5999  rmb();
6000  status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
6001 
6002  if (status & DescOwn)
6003  break;
6004  if (unlikely(status & RxRES)) {
6005  netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
6006  status);
6007  dev->stats.rx_errors++;
6008  if (status & (RxRWT | RxRUNT))
6009  dev->stats.rx_length_errors++;
6010  if (status & RxCRC)
6011  dev->stats.rx_crc_errors++;
6012  if (status & RxFOVF) {
6013  rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6014  dev->stats.rx_fifo_errors++;
6015  }
6016  if ((status & (RxRUNT | RxCRC)) &&
6017  !(status & (RxRWT | RxFOVF)) &&
6018  (dev->features & NETIF_F_RXALL))
6019  goto process_pkt;
6020 
6021  rtl8169_mark_to_asic(desc, rx_buf_sz);
6022  } else {
6023  struct sk_buff *skb;
6024  dma_addr_t addr;
6025  int pkt_size;
6026 
6027 process_pkt:
6028  addr = le64_to_cpu(desc->addr);
6029  if (likely(!(dev->features & NETIF_F_RXFCS)))
6030  pkt_size = (status & 0x00003fff) - 4;
6031  else
6032  pkt_size = status & 0x00003fff;
6033 
6034  /*
6035  * The driver does not support incoming fragmented
6036  * frames. They are seen as a symptom of over-mtu
6037  * sized frames.
6038  */
6039  if (unlikely(rtl8169_fragmented_frame(status))) {
6040  dev->stats.rx_dropped++;
6041  dev->stats.rx_length_errors++;
6042  rtl8169_mark_to_asic(desc, rx_buf_sz);
6043  continue;
6044  }
6045 
6046  skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
6047  tp, pkt_size, addr);
6048  rtl8169_mark_to_asic(desc, rx_buf_sz);
6049  if (!skb) {
6050  dev->stats.rx_dropped++;
6051  continue;
6052  }
6053 
6054  rtl8169_rx_csum(skb, status);
6055  skb_put(skb, pkt_size);
6056  skb->protocol = eth_type_trans(skb, dev);
6057 
6058  rtl8169_rx_vlan_tag(desc, skb);
6059 
6060  napi_gro_receive(&tp->napi, skb);
6061 
6062  u64_stats_update_begin(&tp->rx_stats.syncp);
6063  tp->rx_stats.packets++;
6064  tp->rx_stats.bytes += pkt_size;
6065  u64_stats_update_end(&tp->rx_stats.syncp);
6066  }
6067 
6068  /* Work around for AMD plateform. */
6069  if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
6070  (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
6071  desc->opts2 = 0;
6072  cur_rx++;
6073  }
6074  }
6075 
6076  count = cur_rx - tp->cur_rx;
6077  tp->cur_rx = cur_rx;
6078 
6079  tp->dirty_rx += count;
6080 
6081  return count;
6082 }
6083 
6084 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6085 {
6086  struct net_device *dev = dev_instance;
6087  struct rtl8169_private *tp = netdev_priv(dev);
6088  int handled = 0;
6089  u16 status;
6090 
6091  status = rtl_get_events(tp);
6092  if (status && status != 0xffff) {
6093  status &= RTL_EVENT_NAPI | tp->event_slow;
6094  if (status) {
6095  handled = 1;
6096 
6097  rtl_irq_disable(tp);
6098  napi_schedule(&tp->napi);
6099  }
6100  }
6101  return IRQ_RETVAL(handled);
6102 }
6103 
6104 /*
6105  * Workqueue context.
6106  */
6107 static void rtl_slow_event_work(struct rtl8169_private *tp)
6108 {
6109  struct net_device *dev = tp->dev;
6110  u16 status;
6111 
6112  status = rtl_get_events(tp) & tp->event_slow;
6113  rtl_ack_events(tp, status);
6114 
6115  if (unlikely(status & RxFIFOOver)) {
6116  switch (tp->mac_version) {
6117  /* Work around for rx fifo overflow */
6118  case RTL_GIGA_MAC_VER_11:
6119  netif_stop_queue(dev);
6120  /* XXX - Hack alert. See rtl_task(). */
6122  default:
6123  break;
6124  }
6125  }
6126 
6127  if (unlikely(status & SYSErr))
6128  rtl8169_pcierr_interrupt(dev);
6129 
6130  if (status & LinkChg)
6131  __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
6132 
6133  rtl_irq_enable_all(tp);
6134 }
6135 
6136 static void rtl_task(struct work_struct *work)
6137 {
6138  static const struct {
6139  int bitnr;
6140  void (*action)(struct rtl8169_private *);
6141  } rtl_work[] = {
6142  /* XXX - keep rtl_slow_event_work() as first element. */
6143  { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
6144  { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
6145  { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
6146  };
6147  struct rtl8169_private *tp =
6148  container_of(work, struct rtl8169_private, wk.work);
6149  struct net_device *dev = tp->dev;
6150  int i;
6151 
6152  rtl_lock_work(tp);
6153 
6154  if (!netif_running(dev) ||
6155  !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
6156  goto out_unlock;
6157 
6158  for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
6159  bool pending;
6160 
6161  pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
6162  if (pending)
6163  rtl_work[i].action(tp);
6164  }
6165 
6166 out_unlock:
6167  rtl_unlock_work(tp);
6168 }
6169 
6170 static int rtl8169_poll(struct napi_struct *napi, int budget)
6171 {
6172  struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6173  struct net_device *dev = tp->dev;
6174  u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
6175  int work_done= 0;
6176  u16 status;
6177 
6178  status = rtl_get_events(tp);
6179  rtl_ack_events(tp, status & ~tp->event_slow);
6180 
6181  if (status & RTL_EVENT_NAPI_RX)
6182  work_done = rtl_rx(dev, tp, (u32) budget);
6183 
6184  if (status & RTL_EVENT_NAPI_TX)
6185  rtl_tx(dev, tp);
6186 
6187  if (status & tp->event_slow) {
6188  enable_mask &= ~tp->event_slow;
6189 
6190  rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
6191  }
6192 
6193  if (work_done < budget) {
6194  napi_complete(napi);
6195 
6196  rtl_irq_enable(tp, enable_mask);
6197  mmiowb();
6198  }
6199 
6200  return work_done;
6201 }
6202 
6203 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
6204 {
6205  struct rtl8169_private *tp = netdev_priv(dev);
6206 
6207  if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6208  return;
6209 
6210  dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
6211  RTL_W32(RxMissed, 0);
6212 }
6213 
6214 static void rtl8169_down(struct net_device *dev)
6215 {
6216  struct rtl8169_private *tp = netdev_priv(dev);
6217  void __iomem *ioaddr = tp->mmio_addr;
6218 
6219  del_timer_sync(&tp->timer);
6220 
6221  napi_disable(&tp->napi);
6222  netif_stop_queue(dev);
6223 
6224  rtl8169_hw_reset(tp);
6225  /*
6226  * At this point device interrupts can not be enabled in any function,
6227  * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6228  * and napi is disabled (rtl8169_poll).
6229  */
6230  rtl8169_rx_missed(dev, ioaddr);
6231 
6232  /* Give a racing hard_start_xmit a few cycles to complete. */
6234 
6235  rtl8169_tx_clear(tp);
6236 
6237  rtl8169_rx_clear(tp);
6238 
6239  rtl_pll_power_down(tp);
6240 }
6241 
6242 static int rtl8169_close(struct net_device *dev)
6243 {
6244  struct rtl8169_private *tp = netdev_priv(dev);
6245  struct pci_dev *pdev = tp->pci_dev;
6246 
6247  pm_runtime_get_sync(&pdev->dev);
6248 
6249  /* Update counters before going down */
6250  rtl8169_update_counters(dev);
6251 
6252  rtl_lock_work(tp);
6253  clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6254 
6255  rtl8169_down(dev);
6256  rtl_unlock_work(tp);
6257 
6258  free_irq(pdev->irq, dev);
6259 
6261  tp->RxPhyAddr);
6263  tp->TxPhyAddr);
6264  tp->TxDescArray = NULL;
6265  tp->RxDescArray = NULL;
6266 
6267  pm_runtime_put_sync(&pdev->dev);
6268 
6269  return 0;
6270 }
6271 
6272 #ifdef CONFIG_NET_POLL_CONTROLLER
6273 static void rtl8169_netpoll(struct net_device *dev)
6274 {
6275  struct rtl8169_private *tp = netdev_priv(dev);
6276 
6277  rtl8169_interrupt(tp->pci_dev->irq, dev);
6278 }
6279 #endif
6280 
6281 static int rtl_open(struct net_device *dev)
6282 {
6283  struct rtl8169_private *tp = netdev_priv(dev);
6284  void __iomem *ioaddr = tp->mmio_addr;
6285  struct pci_dev *pdev = tp->pci_dev;
6286  int retval = -ENOMEM;
6287 
6288  pm_runtime_get_sync(&pdev->dev);
6289 
6290  /*
6291  * Rx and Tx descriptors needs 256 bytes alignment.
6292  * dma_alloc_coherent provides more.
6293  */
6295  &tp->TxPhyAddr, GFP_KERNEL);
6296  if (!tp->TxDescArray)
6297  goto err_pm_runtime_put;
6298 
6300  &tp->RxPhyAddr, GFP_KERNEL);
6301  if (!tp->RxDescArray)
6302  goto err_free_tx_0;
6303 
6304  retval = rtl8169_init_ring(dev);
6305  if (retval < 0)
6306  goto err_free_rx_1;
6307 
6308  INIT_WORK(&tp->wk.work, rtl_task);
6309 
6310  smp_mb();
6311 
6312  rtl_request_firmware(tp);
6313 
6314  retval = request_irq(pdev->irq, rtl8169_interrupt,
6315  (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
6316  dev->name, dev);
6317  if (retval < 0)
6318  goto err_release_fw_2;
6319 
6320  rtl_lock_work(tp);
6321 
6322  set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6323 
6324  napi_enable(&tp->napi);
6325 
6326  rtl8169_init_phy(dev, tp);
6327 
6328  __rtl8169_set_features(dev, dev->features);
6329 
6330  rtl_pll_power_up(tp);
6331 
6332  rtl_hw_start(dev);
6333 
6334  netif_start_queue(dev);
6335 
6336  rtl_unlock_work(tp);
6337 
6338  tp->saved_wolopts = 0;
6339  pm_runtime_put_noidle(&pdev->dev);
6340 
6341  rtl8169_check_link_status(dev, tp, ioaddr);
6342 out:
6343  return retval;
6344 
6345 err_release_fw_2:
6346  rtl_release_firmware(tp);
6347  rtl8169_rx_clear(tp);
6348 err_free_rx_1:
6350  tp->RxPhyAddr);
6351  tp->RxDescArray = NULL;
6352 err_free_tx_0:
6354  tp->TxPhyAddr);
6355  tp->TxDescArray = NULL;
6356 err_pm_runtime_put:
6357  pm_runtime_put_noidle(&pdev->dev);
6358  goto out;
6359 }
6360 
6361 static struct rtnl_link_stats64 *
6362 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6363 {
6364  struct rtl8169_private *tp = netdev_priv(dev);
6365  void __iomem *ioaddr = tp->mmio_addr;
6366  unsigned int start;
6367 
6368  if (netif_running(dev))
6369  rtl8169_rx_missed(dev, ioaddr);
6370 
6371  do {
6372  start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
6373  stats->rx_packets = tp->rx_stats.packets;
6374  stats->rx_bytes = tp->rx_stats.bytes;
6375  } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
6376 
6377 
6378  do {
6379  start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
6380  stats->tx_packets = tp->tx_stats.packets;
6381  stats->tx_bytes = tp->tx_stats.bytes;
6382  } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
6383 
6384  stats->rx_dropped = dev->stats.rx_dropped;
6385  stats->tx_dropped = dev->stats.tx_dropped;
6386  stats->rx_length_errors = dev->stats.rx_length_errors;
6387  stats->rx_errors = dev->stats.rx_errors;
6388  stats->rx_crc_errors = dev->stats.rx_crc_errors;
6389  stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
6390  stats->rx_missed_errors = dev->stats.rx_missed_errors;
6391 
6392  return stats;
6393 }
6394 
6395 static void rtl8169_net_suspend(struct net_device *dev)
6396 {
6397  struct rtl8169_private *tp = netdev_priv(dev);
6398 
6399  if (!netif_running(dev))
6400  return;
6401 
6402  netif_device_detach(dev);
6403  netif_stop_queue(dev);
6404 
6405  rtl_lock_work(tp);
6406  napi_disable(&tp->napi);
6407  clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6408  rtl_unlock_work(tp);
6409 
6410  rtl_pll_power_down(tp);
6411 }
6412 
6413 #ifdef CONFIG_PM
6414 
6415 static int rtl8169_suspend(struct device *device)
6416 {
6417  struct pci_dev *pdev = to_pci_dev(device);
6418  struct net_device *dev = pci_get_drvdata(pdev);
6419 
6420  rtl8169_net_suspend(dev);
6421 
6422  return 0;
6423 }
6424 
6425 static void __rtl8169_resume(struct net_device *dev)
6426 {
6427  struct rtl8169_private *tp = netdev_priv(dev);
6428 
6429  netif_device_attach(dev);
6430 
6431  rtl_pll_power_up(tp);
6432 
6433  rtl_lock_work(tp);
6434  napi_enable(&tp->napi);
6435  set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6436  rtl_unlock_work(tp);
6437 
6438  rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6439 }
6440 
6441 static int rtl8169_resume(struct device *device)
6442 {
6443  struct pci_dev *pdev = to_pci_dev(device);
6444  struct net_device *dev = pci_get_drvdata(pdev);
6445  struct rtl8169_private *tp = netdev_priv(dev);
6446 
6447  rtl8169_init_phy(dev, tp);
6448 
6449  if (netif_running(dev))
6450  __rtl8169_resume(dev);
6451 
6452  return 0;
6453 }
6454 
6455 static int rtl8169_runtime_suspend(struct device *device)
6456 {
6457  struct pci_dev *pdev = to_pci_dev(device);
6458  struct net_device *dev = pci_get_drvdata(pdev);
6459  struct rtl8169_private *tp = netdev_priv(dev);
6460 
6461  if (!tp->TxDescArray)
6462  return 0;
6463 
6464  rtl_lock_work(tp);
6465  tp->saved_wolopts = __rtl8169_get_wol(tp);
6466  __rtl8169_set_wol(tp, WAKE_ANY);
6467  rtl_unlock_work(tp);
6468 
6469  rtl8169_net_suspend(dev);
6470 
6471  return 0;
6472 }
6473 
6474 static int rtl8169_runtime_resume(struct device *device)
6475 {
6476  struct pci_dev *pdev = to_pci_dev(device);
6477  struct net_device *dev = pci_get_drvdata(pdev);
6478  struct rtl8169_private *tp = netdev_priv(dev);
6479 
6480  if (!tp->TxDescArray)
6481  return 0;
6482 
6483  rtl_lock_work(tp);
6484  __rtl8169_set_wol(tp, tp->saved_wolopts);
6485  tp->saved_wolopts = 0;
6486  rtl_unlock_work(tp);
6487 
6488  rtl8169_init_phy(dev, tp);
6489 
6490  __rtl8169_resume(dev);
6491 
6492  return 0;
6493 }
6494 
6495 static int rtl8169_runtime_idle(struct device *device)
6496 {
6497  struct pci_dev *pdev = to_pci_dev(device);
6498  struct net_device *dev = pci_get_drvdata(pdev);
6499  struct rtl8169_private *tp = netdev_priv(dev);
6500 
6501  return tp->TxDescArray ? -EBUSY : 0;
6502 }
6503 
6504 static const struct dev_pm_ops rtl8169_pm_ops = {
6505  .suspend = rtl8169_suspend,
6506  .resume = rtl8169_resume,
6507  .freeze = rtl8169_suspend,
6508  .thaw = rtl8169_resume,
6509  .poweroff = rtl8169_suspend,
6510  .restore = rtl8169_resume,
6511  .runtime_suspend = rtl8169_runtime_suspend,
6512  .runtime_resume = rtl8169_runtime_resume,
6513  .runtime_idle = rtl8169_runtime_idle,
6514 };
6515 
6516 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6517 
6518 #else /* !CONFIG_PM */
6519 
6520 #define RTL8169_PM_OPS NULL
6521 
6522 #endif /* !CONFIG_PM */
6523 
6524 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6525 {
6526  void __iomem *ioaddr = tp->mmio_addr;
6527 
6528  /* WoL fails with 8168b when the receiver is disabled. */
6529  switch (tp->mac_version) {
6530  case RTL_GIGA_MAC_VER_11:
6531  case RTL_GIGA_MAC_VER_12:
6532  case RTL_GIGA_MAC_VER_17:
6534 
6535  RTL_W8(ChipCmd, CmdRxEnb);
6536  /* PCI commit */
6537  RTL_R8(ChipCmd);
6538  break;
6539  default:
6540  break;
6541  }
6542 }
6543 
6544 static void rtl_shutdown(struct pci_dev *pdev)
6545 {
6546  struct net_device *dev = pci_get_drvdata(pdev);
6547  struct rtl8169_private *tp = netdev_priv(dev);
6548  struct device *d = &pdev->dev;
6549 
6550  pm_runtime_get_sync(d);
6551 
6552  rtl8169_net_suspend(dev);
6553 
6554  /* Restore original MAC address */
6555  rtl_rar_set(tp, dev->perm_addr);
6556 
6557  rtl8169_hw_reset(tp);
6558 
6559  if (system_state == SYSTEM_POWER_OFF) {
6560  if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6561  rtl_wol_suspend_quirk(tp);
6562  rtl_wol_shutdown_quirk(tp);
6563  }
6564 
6565  pci_wake_from_d3(pdev, true);
6567  }
6568 
6569  pm_runtime_put_noidle(d);
6570 }
6571 
6572 static void __devexit rtl_remove_one(struct pci_dev *pdev)
6573 {
6574  struct net_device *dev = pci_get_drvdata(pdev);
6575  struct rtl8169_private *tp = netdev_priv(dev);
6576 
6577  if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6580  rtl8168_driver_stop(tp);
6581  }
6582 
6583  cancel_work_sync(&tp->wk.work);
6584 
6585  netif_napi_del(&tp->napi);
6586 
6587  unregister_netdev(dev);
6588 
6589  rtl_release_firmware(tp);
6590 
6591  if (pci_dev_run_wake(pdev))
6592  pm_runtime_get_noresume(&pdev->dev);
6593 
6594  /* restore original MAC address */
6595  rtl_rar_set(tp, dev->perm_addr);
6596 
6597  rtl_disable_msi(pdev, tp);
6598  rtl8169_release_board(pdev, dev, tp->mmio_addr);
6599  pci_set_drvdata(pdev, NULL);
6600 }
6601 
6602 static const struct net_device_ops rtl_netdev_ops = {
6603  .ndo_open = rtl_open,
6604  .ndo_stop = rtl8169_close,
6605  .ndo_get_stats64 = rtl8169_get_stats64,
6606  .ndo_start_xmit = rtl8169_start_xmit,
6607  .ndo_tx_timeout = rtl8169_tx_timeout,
6608  .ndo_validate_addr = eth_validate_addr,
6609  .ndo_change_mtu = rtl8169_change_mtu,
6610  .ndo_fix_features = rtl8169_fix_features,
6611  .ndo_set_features = rtl8169_set_features,
6612  .ndo_set_mac_address = rtl_set_mac_address,
6613  .ndo_do_ioctl = rtl8169_ioctl,
6614  .ndo_set_rx_mode = rtl_set_rx_mode,
6615 #ifdef CONFIG_NET_POLL_CONTROLLER
6616  .ndo_poll_controller = rtl8169_netpoll,
6617 #endif
6618 
6619 };
6620 
6621 static const struct rtl_cfg_info {
6622  void (*hw_start)(struct net_device *);
6623  unsigned int region;
6624  unsigned int align;
6625  u16 event_slow;
6626  unsigned features;
6627  u8 default_ver;
6628 } rtl_cfg_infos [] = {
6629  [RTL_CFG_0] = {
6630  .hw_start = rtl_hw_start_8169,
6631  .region = 1,
6632  .align = 0,
6633  .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6634  .features = RTL_FEATURE_GMII,
6635  .default_ver = RTL_GIGA_MAC_VER_01,
6636  },
6637  [RTL_CFG_1] = {
6638  .hw_start = rtl_hw_start_8168,
6639  .region = 2,
6640  .align = 8,
6641  .event_slow = SYSErr | LinkChg | RxOverflow,
6642  .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6643  .default_ver = RTL_GIGA_MAC_VER_11,
6644  },
6645  [RTL_CFG_2] = {
6646  .hw_start = rtl_hw_start_8101,
6647  .region = 2,
6648  .align = 8,
6649  .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6650  PCSTimeout,
6651  .features = RTL_FEATURE_MSI,
6652  .default_ver = RTL_GIGA_MAC_VER_13,
6653  }
6654 };
6655 
6656 /* Cfg9346_Unlock assumed. */
6657 static unsigned rtl_try_msi(struct rtl8169_private *tp,
6658  const struct rtl_cfg_info *cfg)
6659 {
6660  void __iomem *ioaddr = tp->mmio_addr;
6661  unsigned msi = 0;
6662  u8 cfg2;
6663 
6664  cfg2 = RTL_R8(Config2) & ~MSIEnable;
6665  if (cfg->features & RTL_FEATURE_MSI) {
6666  if (pci_enable_msi(tp->pci_dev)) {
6667  netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6668  } else {
6669  cfg2 |= MSIEnable;
6670  msi = RTL_FEATURE_MSI;
6671  }
6672  }
6673  if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6674  RTL_W8(Config2, cfg2);
6675  return msi;
6676 }
6677 
6678 DECLARE_RTL_COND(rtl_link_list_ready_cond)
6679 {
6680  void __iomem *ioaddr = tp->mmio_addr;
6681 
6682  return RTL_R8(MCU) & LINK_LIST_RDY;
6683 }
6684 
6685 DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6686 {
6687  void __iomem *ioaddr = tp->mmio_addr;
6688 
6689  return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6690 }
6691 
6692 static void __devinit rtl_hw_init_8168g(struct rtl8169_private *tp)
6693 {
6694  void __iomem *ioaddr = tp->mmio_addr;
6695  u32 data;
6696 
6697  tp->ocp_base = OCP_STD_PHY_BASE;
6698 
6700 
6701  if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6702  return;
6703 
6704  if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6705  return;
6706 
6707  RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6708  msleep(1);
6709  RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6710 
6711  data = r8168_mac_ocp_read(tp, 0xe8de);
6712  data &= ~(1 << 14);
6713  r8168_mac_ocp_write(tp, 0xe8de, data);
6714 
6715  if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6716  return;
6717 
6718  data = r8168_mac_ocp_read(tp, 0xe8de);
6719  data |= (1 << 15);
6720  r8168_mac_ocp_write(tp, 0xe8de, data);
6721 
6722  if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6723  return;
6724 }
6725 
6726 static void __devinit rtl_hw_initialize(struct rtl8169_private *tp)
6727 {
6728  switch (tp->mac_version) {
6729  case RTL_GIGA_MAC_VER_40:
6730  case RTL_GIGA_MAC_VER_41:
6731  rtl_hw_init_8168g(tp);
6732  break;
6733 
6734  default:
6735  break;
6736  }
6737 }
6738 
6739 static int __devinit
6740 rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6741 {
6742  const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6743  const unsigned int region = cfg->region;
6744  struct rtl8169_private *tp;
6745  struct mii_if_info *mii;
6746  struct net_device *dev;
6747  void __iomem *ioaddr;
6748  int chipset, i;
6749  int rc;
6750 
6751  if (netif_msg_drv(&debug)) {
6752  printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6754  }
6755 
6756  dev = alloc_etherdev(sizeof (*tp));
6757  if (!dev) {
6758  rc = -ENOMEM;
6759  goto out;
6760  }
6761 
6762  SET_NETDEV_DEV(dev, &pdev->dev);
6763  dev->netdev_ops = &rtl_netdev_ops;
6764  tp = netdev_priv(dev);
6765  tp->dev = dev;
6766  tp->pci_dev = pdev;
6767  tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6768 
6769  mii = &tp->mii;
6770  mii->dev = dev;
6771  mii->mdio_read = rtl_mdio_read;
6772  mii->mdio_write = rtl_mdio_write;
6773  mii->phy_id_mask = 0x1f;
6774  mii->reg_num_mask = 0x1f;
6775  mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6776 
6777  /* disable ASPM completely as that cause random device stop working
6778  * problems as well as full system hangs for some PCIe devices users */
6781 
6782  /* enable device (incl. PCI PM wakeup and hotplug setup) */
6783  rc = pci_enable_device(pdev);
6784  if (rc < 0) {
6785  netif_err(tp, probe, dev, "enable failure\n");
6786  goto err_out_free_dev_1;
6787  }
6788 
6789  if (pci_set_mwi(pdev) < 0)
6790  netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6791 
6792  /* make sure PCI base addr 1 is MMIO */
6793  if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6794  netif_err(tp, probe, dev,
6795  "region #%d not an MMIO resource, aborting\n",
6796  region);
6797  rc = -ENODEV;
6798  goto err_out_mwi_2;
6799  }
6800 
6801  /* check for weird/broken PCI region reporting */
6802  if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6803  netif_err(tp, probe, dev,
6804  "Invalid PCI region size(s), aborting\n");
6805  rc = -ENODEV;
6806  goto err_out_mwi_2;
6807  }
6808 
6809  rc = pci_request_regions(pdev, MODULENAME);
6810  if (rc < 0) {
6811  netif_err(tp, probe, dev, "could not request regions\n");
6812  goto err_out_mwi_2;
6813  }
6814 
6815  tp->cp_cmd = RxChkSum;
6816 
6817  if ((sizeof(dma_addr_t) > 4) &&
6818  !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6819  tp->cp_cmd |= PCIDAC;
6820  dev->features |= NETIF_F_HIGHDMA;
6821  } else {
6822  rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6823  if (rc < 0) {
6824  netif_err(tp, probe, dev, "DMA configuration failed\n");
6825  goto err_out_free_res_3;
6826  }
6827  }
6828 
6829  /* ioremap MMIO region */
6830  ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6831  if (!ioaddr) {
6832  netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6833  rc = -EIO;
6834  goto err_out_free_res_3;
6835  }
6836  tp->mmio_addr = ioaddr;
6837 
6838  if (!pci_is_pcie(pdev))
6839  netif_info(tp, probe, dev, "not PCI Express\n");
6840 
6841  /* Identify chip attached to board */
6842  rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6843 
6844  rtl_init_rxcfg(tp);
6845 
6846  rtl_irq_disable(tp);
6847 
6848  rtl_hw_initialize(tp);
6849 
6850  rtl_hw_reset(tp);
6851 
6852  rtl_ack_events(tp, 0xffff);
6853 
6854  pci_set_master(pdev);
6855 
6856  /*
6857  * Pretend we are using VLANs; This bypasses a nasty bug where
6858  * Interrupts stop flowing on high load on 8110SCd controllers.
6859  */
6860  if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6861  tp->cp_cmd |= RxVlan;
6862 
6863  rtl_init_mdio_ops(tp);
6864  rtl_init_pll_power_ops(tp);
6865  rtl_init_jumbo_ops(tp);
6866  rtl_init_csi_ops(tp);
6867 
6868  rtl8169_print_mac_version(tp);
6869 
6870  chipset = tp->mac_version;
6871  tp->txd_version = rtl_chip_infos[chipset].txd_version;
6872 
6876  if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6877  tp->features |= RTL_FEATURE_WOL;
6878  if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6879  tp->features |= RTL_FEATURE_WOL;
6880  tp->features |= rtl_try_msi(tp, cfg);
6882 
6883  if (rtl_tbi_enabled(tp)) {
6884  tp->set_speed = rtl8169_set_speed_tbi;
6885  tp->get_settings = rtl8169_gset_tbi;
6886  tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6887  tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6888  tp->link_ok = rtl8169_tbi_link_ok;
6889  tp->do_ioctl = rtl_tbi_ioctl;
6890  } else {
6891  tp->set_speed = rtl8169_set_speed_xmii;
6892  tp->get_settings = rtl8169_gset_xmii;
6893  tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6894  tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6895  tp->link_ok = rtl8169_xmii_link_ok;
6896  tp->do_ioctl = rtl_xmii_ioctl;
6897  }
6898 
6899  mutex_init(&tp->wk.mutex);
6900 
6901  /* Get MAC address */
6902  for (i = 0; i < ETH_ALEN; i++)
6903  dev->dev_addr[i] = RTL_R8(MAC0 + i);
6904  memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6905 
6906  SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6908 
6909  netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6910 
6911  /* don't enable SG, IP_CSUM and TSO by default - it might not work
6912  * properly for all devices */
6913  dev->features |= NETIF_F_RXCSUM |
6915 
6917  NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6920 
6921  if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6922  /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6924 
6925  dev->hw_features |= NETIF_F_RXALL;
6926  dev->hw_features |= NETIF_F_RXFCS;
6927 
6928  tp->hw_start = cfg->hw_start;
6929  tp->event_slow = cfg->event_slow;
6930 
6931  tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6932  ~(RxBOVF | RxFOVF) : ~0;
6933 
6934  init_timer(&tp->timer);
6935  tp->timer.data = (unsigned long) dev;
6936  tp->timer.function = rtl8169_phy_timer;
6937 
6939 
6940  rc = register_netdev(dev);
6941  if (rc < 0)
6942  goto err_out_msi_4;
6943 
6944  pci_set_drvdata(pdev, dev);
6945 
6946  netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6947  rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6948  (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
6949  if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6950  netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6951  "tx checksumming: %s]\n",
6952  rtl_chip_infos[chipset].jumbo_max,
6953  rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6954  }
6955 
6956  if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6959  rtl8168_driver_start(tp);
6960  }
6961 
6963 
6964  if (pci_dev_run_wake(pdev))
6965  pm_runtime_put_noidle(&pdev->dev);
6966 
6967  netif_carrier_off(dev);
6968 
6969 out:
6970  return rc;
6971 
6972 err_out_msi_4:
6973  netif_napi_del(&tp->napi);
6974  rtl_disable_msi(pdev, tp);
6975  iounmap(ioaddr);
6976 err_out_free_res_3:
6977  pci_release_regions(pdev);
6978 err_out_mwi_2:
6979  pci_clear_mwi(pdev);
6980  pci_disable_device(pdev);
6981 err_out_free_dev_1:
6982  free_netdev(dev);
6983  goto out;
6984 }
6985 
6986 static struct pci_driver rtl8169_pci_driver = {
6987  .name = MODULENAME,
6988  .id_table = rtl8169_pci_tbl,
6989  .probe = rtl_init_one,
6990  .remove = __devexit_p(rtl_remove_one),
6991  .shutdown = rtl_shutdown,
6992  .driver.pm = RTL8169_PM_OPS,
6993 };
6994 
6995 static int __init rtl8169_init_module(void)
6996 {
6997  return pci_register_driver(&rtl8169_pci_driver);
6998 }
6999 
7000 static void __exit rtl8169_cleanup_module(void)
7001 {
7002  pci_unregister_driver(&rtl8169_pci_driver);
7003 }
7004 
7005 module_init(rtl8169_init_module);
7006 module_exit(rtl8169_cleanup_module);