43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #define FORCEDETH_VERSION "0.64"
46 #define DRV_NAME "forcedeth"
48 #include <linux/module.h>
49 #include <linux/types.h>
50 #include <linux/pci.h>
52 #include <linux/netdevice.h>
55 #include <linux/sched.h>
57 #include <linux/ethtool.h>
60 #include <linux/mii.h>
61 #include <linux/random.h>
63 #include <linux/if_vlan.h>
65 #include <linux/slab.h>
67 #include <linux/prefetch.h>
73 #define TX_WORK_PER_LOOP 64
74 #define RX_WORK_PER_LOOP 64
80 #define DEV_NEED_TIMERIRQ 0x0000001
81 #define DEV_NEED_LINKTIMER 0x0000002
82 #define DEV_HAS_LARGEDESC 0x0000004
83 #define DEV_HAS_HIGH_DMA 0x0000008
84 #define DEV_HAS_CHECKSUM 0x0000010
85 #define DEV_HAS_VLAN 0x0000020
86 #define DEV_HAS_MSI 0x0000040
87 #define DEV_HAS_MSI_X 0x0000080
88 #define DEV_HAS_POWER_CNTRL 0x0000100
89 #define DEV_HAS_STATISTICS_V1 0x0000200
90 #define DEV_HAS_STATISTICS_V2 0x0000400
91 #define DEV_HAS_STATISTICS_V3 0x0000800
92 #define DEV_HAS_STATISTICS_V12 0x0000600
93 #define DEV_HAS_STATISTICS_V123 0x0000e00
94 #define DEV_HAS_TEST_EXTENDED 0x0001000
95 #define DEV_HAS_MGMT_UNIT 0x0002000
96 #define DEV_HAS_CORRECT_MACADDR 0x0004000
97 #define DEV_HAS_COLLISION_FIX 0x0008000
98 #define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000
99 #define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000
100 #define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000
101 #define DEV_NEED_TX_LIMIT 0x0080000
102 #define DEV_NEED_TX_LIMIT2 0x0180000
103 #define DEV_HAS_GEAR_MODE 0x0200000
104 #define DEV_NEED_PHY_INIT_FIX 0x0400000
105 #define DEV_NEED_LOW_POWER_FIX 0x0800000
106 #define DEV_NEED_MSI_FIX 0x1000000
110 #define NVREG_IRQSTAT_MIIEVENT 0x040
111 #define NVREG_IRQSTAT_MASK 0x83ff
113 #define NVREG_IRQ_RX_ERROR 0x0001
114 #define NVREG_IRQ_RX 0x0002
115 #define NVREG_IRQ_RX_NOBUF 0x0004
116 #define NVREG_IRQ_TX_ERR 0x0008
117 #define NVREG_IRQ_TX_OK 0x0010
118 #define NVREG_IRQ_TIMER 0x0020
119 #define NVREG_IRQ_LINK 0x0040
120 #define NVREG_IRQ_RX_FORCED 0x0080
121 #define NVREG_IRQ_TX_FORCED 0x0100
122 #define NVREG_IRQ_RECOVER_ERROR 0x8200
123 #define NVREG_IRQMASK_THROUGHPUT 0x00df
124 #define NVREG_IRQMASK_CPU 0x0060
125 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
126 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
127 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
130 #define NVREG_UNKSETUP6_VAL 3
137 #define NVREG_POLL_DEFAULT_THROUGHPUT 65535
138 #define NVREG_POLL_DEFAULT_CPU 13
142 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
144 #define NVREG_MISC1_PAUSE_TX 0x01
145 #define NVREG_MISC1_HD 0x02
146 #define NVREG_MISC1_FORCE 0x3b0f3c
149 #define NVREG_MAC_RESET_ASSERT 0x0F3
151 #define NVREG_XMITCTL_START 0x01
152 #define NVREG_XMITCTL_MGMT_ST 0x40000000
153 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
154 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
155 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
156 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
157 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
158 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
159 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
160 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
161 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
162 #define NVREG_XMITCTL_DATA_START 0x00100000
163 #define NVREG_XMITCTL_DATA_READY 0x00010000
164 #define NVREG_XMITCTL_DATA_ERROR 0x00020000
166 #define NVREG_XMITSTAT_BUSY 0x01
169 #define NVREG_PFF_PAUSE_RX 0x08
170 #define NVREG_PFF_ALWAYS 0x7F0000
171 #define NVREG_PFF_PROMISC 0x80
172 #define NVREG_PFF_MYADDR 0x20
173 #define NVREG_PFF_LOOPBACK 0x10
176 #define NVREG_OFFLOAD_HOMEPHY 0x601
177 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
179 #define NVREG_RCVCTL_START 0x01
180 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
182 #define NVREG_RCVSTAT_BUSY 0x01
185 #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
186 #define NVREG_SLOTTIME_10_100_FULL 0x00007f00
187 #define NVREG_SLOTTIME_1000_FULL 0x0003ff00
188 #define NVREG_SLOTTIME_HALF 0x0000ff00
189 #define NVREG_SLOTTIME_DEFAULT 0x00007f00
190 #define NVREG_SLOTTIME_MASK 0x000000ff
193 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
194 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
195 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
196 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
197 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
198 #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
200 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
204 #define NVREG_MCASTADDRA_FORCE 0x01
207 #define NVREG_MCASTMASKA_NONE 0xffffffff
209 #define NVREG_MCASTMASKB_NONE 0xffff
212 #define PHY_RGMII 0x10000000
214 #define NVREG_BKOFFCTRL_DEFAULT 0x70000000
215 #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
216 #define NVREG_BKOFFCTRL_SELECT 24
217 #define NVREG_BKOFFCTRL_GEAR 12
222 #define NVREG_RINGSZ_TXSHIFT 0
223 #define NVREG_RINGSZ_RXSHIFT 16
225 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
227 #define NVREG_LINKSPEED_FORCE 0x10000
228 #define NVREG_LINKSPEED_10 1000
229 #define NVREG_LINKSPEED_100 100
230 #define NVREG_LINKSPEED_1000 50
231 #define NVREG_LINKSPEED_MASK (0xFFF)
233 #define NVREG_UNKSETUP5_BIT31 (1<<31)
235 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
236 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
237 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
239 #define NVREG_TXRXCTL_KICK 0x0001
240 #define NVREG_TXRXCTL_BIT1 0x0002
241 #define NVREG_TXRXCTL_BIT2 0x0004
242 #define NVREG_TXRXCTL_IDLE 0x0008
243 #define NVREG_TXRXCTL_RESET 0x0010
244 #define NVREG_TXRXCTL_RXCHECK 0x0400
245 #define NVREG_TXRXCTL_DESC_1 0
246 #define NVREG_TXRXCTL_DESC_2 0x002100
247 #define NVREG_TXRXCTL_DESC_3 0xc02200
248 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
249 #define NVREG_TXRXCTL_VLANINS 0x00080
253 #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
254 #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
255 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
256 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
258 #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
260 #define NVREG_MIISTAT_ERROR 0x0001
261 #define NVREG_MIISTAT_LINKCHANGE 0x0008
262 #define NVREG_MIISTAT_MASK_RW 0x0007
263 #define NVREG_MIISTAT_MASK_ALL 0x000f
265 #define NVREG_MII_LINKCHANGE 0x0008
268 #define NVREG_ADAPTCTL_START 0x02
269 #define NVREG_ADAPTCTL_LINKUP 0x04
270 #define NVREG_ADAPTCTL_PHYVALID 0x40000
271 #define NVREG_ADAPTCTL_RUNNING 0x100000
272 #define NVREG_ADAPTCTL_PHYSHIFT 24
274 #define NVREG_MIISPEED_BIT8 (1<<8)
275 #define NVREG_MIIDELAY 5
277 #define NVREG_MIICTL_INUSE 0x08000
278 #define NVREG_MIICTL_WRITE 0x00400
279 #define NVREG_MIICTL_ADDRSHIFT 5
285 #define NVREG_WAKEUPFLAGS_VAL 0x7770
286 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
287 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
288 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
289 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
290 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
291 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
292 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
293 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
294 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
295 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
298 #define NVREG_MGMTUNITGETVERSION 0x01
300 #define NVREG_MGMTUNITVERSION 0x08
302 #define NVREG_POWERCAP_D3SUPP (1<<30)
303 #define NVREG_POWERCAP_D2SUPP (1<<26)
304 #define NVREG_POWERCAP_D1SUPP (1<<25)
306 #define NVREG_POWERSTATE_POWEREDUP 0x8000
307 #define NVREG_POWERSTATE_VALID 0x0100
308 #define NVREG_POWERSTATE_MASK 0x0003
309 #define NVREG_POWERSTATE_D0 0x0000
310 #define NVREG_POWERSTATE_D1 0x0001
311 #define NVREG_POWERSTATE_D2 0x0002
312 #define NVREG_POWERSTATE_D3 0x0003
314 #define NVREG_MGMTUNITCONTROL_INUSE 0x20000
343 #define NVREG_VLANCONTROL_ENABLE 0x2000
349 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
350 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
351 #define NVREG_POWERSTATE2_PHY_RESET 0x0004
352 #define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00
373 #define FLAG_MASK_V1 0xffff0000
374 #define FLAG_MASK_V2 0xffffc000
375 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
376 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
378 #define NV_TX_LASTPACKET (1<<16)
379 #define NV_TX_RETRYERROR (1<<19)
380 #define NV_TX_RETRYCOUNT_MASK (0xF<<20)
381 #define NV_TX_FORCED_INTERRUPT (1<<24)
382 #define NV_TX_DEFERRED (1<<26)
383 #define NV_TX_CARRIERLOST (1<<27)
384 #define NV_TX_LATECOLLISION (1<<28)
385 #define NV_TX_UNDERFLOW (1<<29)
386 #define NV_TX_ERROR (1<<30)
387 #define NV_TX_VALID (1<<31)
389 #define NV_TX2_LASTPACKET (1<<29)
390 #define NV_TX2_RETRYERROR (1<<18)
391 #define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
392 #define NV_TX2_FORCED_INTERRUPT (1<<30)
393 #define NV_TX2_DEFERRED (1<<25)
394 #define NV_TX2_CARRIERLOST (1<<26)
395 #define NV_TX2_LATECOLLISION (1<<27)
396 #define NV_TX2_UNDERFLOW (1<<28)
398 #define NV_TX2_ERROR (1<<30)
399 #define NV_TX2_VALID (1<<31)
400 #define NV_TX2_TSO (1<<28)
401 #define NV_TX2_TSO_SHIFT 14
402 #define NV_TX2_TSO_MAX_SHIFT 14
403 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
404 #define NV_TX2_CHECKSUM_L3 (1<<27)
405 #define NV_TX2_CHECKSUM_L4 (1<<26)
407 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
409 #define NV_RX_DESCRIPTORVALID (1<<16)
410 #define NV_RX_MISSEDFRAME (1<<17)
411 #define NV_RX_SUBSTRACT1 (1<<18)
412 #define NV_RX_ERROR1 (1<<23)
413 #define NV_RX_ERROR2 (1<<24)
414 #define NV_RX_ERROR3 (1<<25)
415 #define NV_RX_ERROR4 (1<<26)
416 #define NV_RX_CRCERR (1<<27)
417 #define NV_RX_OVERFLOW (1<<28)
418 #define NV_RX_FRAMINGERR (1<<29)
419 #define NV_RX_ERROR (1<<30)
420 #define NV_RX_AVAIL (1<<31)
421 #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
423 #define NV_RX2_CHECKSUMMASK (0x1C000000)
424 #define NV_RX2_CHECKSUM_IP (0x10000000)
425 #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
426 #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
427 #define NV_RX2_DESCRIPTORVALID (1<<29)
428 #define NV_RX2_SUBSTRACT1 (1<<25)
429 #define NV_RX2_ERROR1 (1<<18)
430 #define NV_RX2_ERROR2 (1<<19)
431 #define NV_RX2_ERROR3 (1<<20)
432 #define NV_RX2_ERROR4 (1<<21)
433 #define NV_RX2_CRCERR (1<<22)
434 #define NV_RX2_OVERFLOW (1<<23)
435 #define NV_RX2_FRAMINGERR (1<<24)
437 #define NV_RX2_ERROR (1<<30)
438 #define NV_RX2_AVAIL (1<<31)
439 #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
441 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
442 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
445 #define NV_PCI_REGSZ_VER1 0x270
446 #define NV_PCI_REGSZ_VER2 0x2d4
447 #define NV_PCI_REGSZ_VER3 0x604
448 #define NV_PCI_REGSZ_MAX 0x604
451 #define NV_TXRX_RESET_DELAY 4
452 #define NV_TXSTOP_DELAY1 10
453 #define NV_TXSTOP_DELAY1MAX 500000
454 #define NV_TXSTOP_DELAY2 100
455 #define NV_RXSTOP_DELAY1 10
456 #define NV_RXSTOP_DELAY1MAX 500000
457 #define NV_RXSTOP_DELAY2 100
458 #define NV_SETUP5_DELAY 5
459 #define NV_SETUP5_DELAYMAX 50000
460 #define NV_POWERUP_DELAY 5
461 #define NV_POWERUP_DELAYMAX 5000
462 #define NV_MIIBUSY_DELAY 50
463 #define NV_MIIPHY_DELAY 10
464 #define NV_MIIPHY_DELAYMAX 10000
465 #define NV_MAC_RESET_DELAY 64
467 #define NV_WAKEUPPATTERNS 5
468 #define NV_WAKEUPMASKENTRIES 4
471 #define NV_WATCHDOG_TIMEO (5*HZ)
473 #define RX_RING_DEFAULT 512
474 #define TX_RING_DEFAULT 256
475 #define RX_RING_MIN 128
476 #define TX_RING_MIN 64
477 #define RING_MAX_DESC_VER_1 1024
478 #define RING_MAX_DESC_VER_2_3 16384
481 #define NV_RX_HEADERS (64)
483 #define NV_RX_ALLOC_PAD (64)
486 #define NV_PKTLIMIT_1 ETH_DATA_LEN
487 #define NV_PKTLIMIT_2 9100
489 #define OOM_REFILL (1+HZ/20)
490 #define POLL_WAIT (1+HZ/100)
491 #define LINK_TIMEOUT (3*HZ)
492 #define STATS_INTERVAL (10*HZ)
506 #define PHY_OUI_MARVELL 0x5043
507 #define PHY_OUI_CICADA 0x03f1
508 #define PHY_OUI_VITESSE 0x01c1
509 #define PHY_OUI_REALTEK 0x0732
510 #define PHY_OUI_REALTEK2 0x0020
511 #define PHYID1_OUI_MASK 0x03ff
512 #define PHYID1_OUI_SHFT 6
513 #define PHYID2_OUI_MASK 0xfc00
514 #define PHYID2_OUI_SHFT 10
515 #define PHYID2_MODEL_MASK 0x03f0
516 #define PHY_MODEL_REALTEK_8211 0x0110
517 #define PHY_REV_MASK 0x0001
518 #define PHY_REV_REALTEK_8211B 0x0000
519 #define PHY_REV_REALTEK_8211C 0x0001
520 #define PHY_MODEL_REALTEK_8201 0x0200
521 #define PHY_MODEL_MARVELL_E3016 0x0220
522 #define PHY_MARVELL_E3016_INITMASK 0x0300
523 #define PHY_CICADA_INIT1 0x0f000
524 #define PHY_CICADA_INIT2 0x0e00
525 #define PHY_CICADA_INIT3 0x01000
526 #define PHY_CICADA_INIT4 0x0200
527 #define PHY_CICADA_INIT5 0x0004
528 #define PHY_CICADA_INIT6 0x02000
529 #define PHY_VITESSE_INIT_REG1 0x1f
530 #define PHY_VITESSE_INIT_REG2 0x10
531 #define PHY_VITESSE_INIT_REG3 0x11
532 #define PHY_VITESSE_INIT_REG4 0x12
533 #define PHY_VITESSE_INIT_MSK1 0xc
534 #define PHY_VITESSE_INIT_MSK2 0x0180
535 #define PHY_VITESSE_INIT1 0x52b5
536 #define PHY_VITESSE_INIT2 0xaf8a
537 #define PHY_VITESSE_INIT3 0x8
538 #define PHY_VITESSE_INIT4 0x8f8a
539 #define PHY_VITESSE_INIT5 0xaf86
540 #define PHY_VITESSE_INIT6 0x8f86
541 #define PHY_VITESSE_INIT7 0xaf82
542 #define PHY_VITESSE_INIT8 0x0100
543 #define PHY_VITESSE_INIT9 0x8f82
544 #define PHY_VITESSE_INIT10 0x0
545 #define PHY_REALTEK_INIT_REG1 0x1f
546 #define PHY_REALTEK_INIT_REG2 0x19
547 #define PHY_REALTEK_INIT_REG3 0x13
548 #define PHY_REALTEK_INIT_REG4 0x14
549 #define PHY_REALTEK_INIT_REG5 0x18
550 #define PHY_REALTEK_INIT_REG6 0x11
551 #define PHY_REALTEK_INIT_REG7 0x01
552 #define PHY_REALTEK_INIT1 0x0000
553 #define PHY_REALTEK_INIT2 0x8e00
554 #define PHY_REALTEK_INIT3 0x0001
555 #define PHY_REALTEK_INIT4 0xad17
556 #define PHY_REALTEK_INIT5 0xfb54
557 #define PHY_REALTEK_INIT6 0xf5c7
558 #define PHY_REALTEK_INIT7 0x1000
559 #define PHY_REALTEK_INIT8 0x0003
560 #define PHY_REALTEK_INIT9 0x0008
561 #define PHY_REALTEK_INIT10 0x0005
562 #define PHY_REALTEK_INIT11 0x0200
563 #define PHY_REALTEK_INIT_MSK1 0x0003
565 #define PHY_GIGABIT 0x0100
567 #define PHY_TIMEOUT 0x1
568 #define PHY_ERROR 0x2
572 #define PHY_HALF 0x100
574 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
575 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
576 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
577 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
578 #define NV_PAUSEFRAME_RX_REQ 0x0010
579 #define NV_PAUSEFRAME_TX_REQ 0x0020
580 #define NV_PAUSEFRAME_AUTONEG 0x0040
583 #define NV_MSI_X_MAX_VECTORS 8
584 #define NV_MSI_X_VECTORS_MASK 0x000f
585 #define NV_MSI_CAPABLE 0x0010
586 #define NV_MSI_X_CAPABLE 0x0020
587 #define NV_MSI_ENABLED 0x0040
588 #define NV_MSI_X_ENABLED 0x0080
590 #define NV_MSI_X_VECTOR_ALL 0x0
591 #define NV_MSI_X_VECTOR_RX 0x0
592 #define NV_MSI_X_VECTOR_TX 0x1
593 #define NV_MSI_X_VECTOR_OTHER 0x2
595 #define NV_MSI_PRIV_OFFSET 0x68
596 #define NV_MSI_PRIV_VALUE 0xffffffff
598 #define NV_RESTART_TX 0x1
599 #define NV_RESTART_RX 0x2
601 #define NV_TX_LIMIT_COUNT 16
603 #define NV_DYNAMIC_THRESHOLD 4
604 #define NV_DYNAMIC_MAX_QUIET_COUNT 2048
616 {
"tx_late_collision" },
617 {
"tx_fifo_errors" },
618 {
"tx_carrier_errors" },
619 {
"tx_excess_deferral" },
620 {
"tx_retry_error" },
621 {
"rx_frame_error" },
623 {
"rx_late_collision" },
625 {
"rx_frame_too_long" },
626 {
"rx_over_errors" },
628 {
"rx_frame_align_error" },
629 {
"rx_length_error" },
634 {
"rx_errors_total" },
635 {
"tx_errors_total" },
691 #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
692 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
693 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
696 #define NV_TEST_COUNT_BASE 3
697 #define NV_TEST_COUNT_EXTENDED 4
700 {
"link (online/offline)" },
701 {
"register (offline) " },
702 {
"interrupt (offline) " },
703 {
"loopback (offline) " }
867 static int max_interrupt_work = 4;
889 static int poll_interval = -1;
921 static bool debug_tx_timeout =
false;
937 static int phy_power_down;
941 return netdev_priv(dev);
946 return ((
struct fe_priv *)netdev_priv(dev))->base;
966 static bool nv_optimized(
struct fe_priv *np)
974 int delay,
int delaymax)
984 }
while ((
readl(base + offset) & mask) != target);
988 #define NV_SETUP_RX_RING 0x01
989 #define NV_SETUP_TX_RING 0x02
1001 static void setup_hw_rings(
struct net_device *dev,
int rxtx_flags)
1003 struct fe_priv *np = get_nvpriv(dev);
1006 if (!nv_optimized(np)) {
1012 if (rxtx_flags & NV_SETUP_RX_RING) {
1016 if (rxtx_flags & NV_SETUP_TX_RING) {
1023 static void free_rings(
struct net_device *dev)
1025 struct fe_priv *np = get_nvpriv(dev);
1027 if (!nv_optimized(np)) {
1040 static int using_multi_irqs(
struct net_device *dev)
1042 struct fe_priv *np = get_nvpriv(dev);
1052 static void nv_txrx_gate(
struct net_device *dev,
bool gate)
1054 struct fe_priv *np = get_nvpriv(dev);
1069 static void nv_enable_irq(
struct net_device *dev)
1071 struct fe_priv *np = get_nvpriv(dev);
1073 if (!using_multi_irqs(dev)) {
1085 static void nv_disable_irq(
struct net_device *dev)
1087 struct fe_priv *np = get_nvpriv(dev);
1089 if (!using_multi_irqs(dev)) {
1102 static void nv_enable_hw_interrupts(
struct net_device *dev,
u32 mask)
1109 static void nv_disable_hw_interrupts(
struct net_device *dev,
u32 mask)
1111 struct fe_priv *np = get_nvpriv(dev);
1123 static void nv_napi_enable(
struct net_device *dev)
1125 struct fe_priv *np = get_nvpriv(dev);
1127 napi_enable(&np->
napi);
1130 static void nv_napi_disable(
struct net_device *dev)
1132 struct fe_priv *np = get_nvpriv(dev);
1134 napi_disable(&np->
napi);
1137 #define MII_READ (-1)
1178 static int phy_reset(
struct net_device *dev,
u32 bmcr_setup)
1180 struct fe_priv *np = netdev_priv(dev);
1182 unsigned int tries = 0;
1204 static const struct {
1219 if (mii_rw(dev, np->
phyaddr, ri[i].reg, ri[i].init))
1266 phy_reserved = mii_rw(dev, np->
phyaddr,
1285 phy_reserved = mii_rw(dev, np->
phyaddr,
1334 phy_reserved = mii_rw(dev, np->
phyaddr,
1338 phy_reserved = mii_rw(dev, np->
phyaddr,
1350 phy_reserved = mii_rw(dev, np->
phyaddr,
1356 phy_reserved = mii_rw(dev, np->
phyaddr,
1366 phy_reserved = mii_rw(dev, np->
phyaddr,
1370 phy_reserved = mii_rw(dev, np->
phyaddr,
1388 struct fe_priv *np = get_nvpriv(dev);
1391 u32 mii_status, mii_control, mii_control_1000,
reg;
1398 netdev_info(dev,
"%s: phy write to errata reg failed\n",
1406 if (init_realtek_8211b(dev, np)) {
1407 netdev_info(dev,
"%s: phy init failed\n",
1413 if (init_realtek_8211c(dev, np)) {
1414 netdev_info(dev,
"%s: phy init failed\n",
1419 if (init_realtek_8201(dev, np)) {
1420 netdev_info(dev,
"%s: phy init failed\n",
1433 netdev_info(dev,
"%s: phy write to advertise failed\n",
1445 mii_control_1000 = mii_rw(dev, np->
phyaddr,
1448 if (phyinterface & PHY_RGMII)
1454 netdev_info(dev,
"%s: phy init failed\n",
1470 netdev_info(dev,
"%s: phy init failed\n",
1478 if (phy_reset(dev, mii_control)) {
1479 netdev_info(dev,
"%s: phy reset failed\n",
1487 if (init_cicada(dev, np, phyinterface)) {
1488 netdev_info(dev,
"%s: phy init failed\n",
1493 if (init_vitesse(dev, np)) {
1494 netdev_info(dev,
"%s: phy init failed\n",
1502 if (init_realtek_8211b(dev, np)) {
1503 netdev_info(dev,
"%s: phy init failed\n",
1508 if (init_realtek_8201(dev, np) ||
1509 init_realtek_8201_cross(dev, np)) {
1510 netdev_info(dev,
"%s: phy init failed\n",
1531 static void nv_start_rx(
struct net_device *dev)
1533 struct fe_priv *np = netdev_priv(dev);
1539 rx_ctrl &= ~NVREG_RCVCTL_START;
1552 static void nv_stop_rx(
struct net_device *dev)
1554 struct fe_priv *np = netdev_priv(dev);
1559 rx_ctrl &= ~NVREG_RCVCTL_START;
1565 netdev_info(dev,
"%s: ReceiverStatus remained busy\n",
1573 static void nv_start_tx(
struct net_device *dev)
1575 struct fe_priv *np = netdev_priv(dev);
1586 static void nv_stop_tx(
struct net_device *dev)
1588 struct fe_priv *np = netdev_priv(dev);
1599 netdev_info(dev,
"%s: TransmitterStatus remained busy\n",
1608 static void nv_start_rxtx(
struct net_device *dev)
1614 static void nv_stop_rxtx(
struct net_device *dev)
1620 static void nv_txrx_reset(
struct net_device *dev)
1622 struct fe_priv *np = netdev_priv(dev);
1632 static void nv_mac_reset(
struct net_device *dev)
1634 struct fe_priv *np = netdev_priv(dev);
1663 static void nv_update_stats(
struct net_device *dev)
1665 struct fe_priv *np = netdev_priv(dev);
1671 WARN_ONCE(
in_irq(),
"forcedeth: estats spin_lock(_bh) from top-half");
1698 np->
estats.rx_multicast +
1700 np->
estats.rx_errors_total =
1701 np->
estats.rx_crc_errors +
1702 np->
estats.rx_over_errors +
1703 np->
estats.rx_frame_error +
1704 (np->
estats.rx_frame_align_error - np->
estats.rx_extra_byte) +
1705 np->
estats.rx_late_collision +
1707 np->
estats.rx_frame_too_long;
1708 np->
estats.tx_errors_total =
1709 np->
estats.tx_late_collision +
1710 np->
estats.tx_fifo_errors +
1711 np->
estats.tx_carrier_errors +
1712 np->
estats.tx_excess_deferral +
1713 np->
estats.tx_retry_error;
1722 np->
estats.rx_errors_total += np->
estats.rx_drop_frame;
1743 struct fe_priv *np = netdev_priv(dev);
1744 unsigned int syncp_start;
1776 nv_update_stats(dev);
1779 storage->rx_errors = np->
estats.rx_errors_total;
1780 storage->tx_errors = np->
estats.tx_errors_total;
1783 storage->multicast = np->
estats.rx_multicast;
1786 storage->rx_length_errors = np->
estats.rx_length_error;
1787 storage->rx_over_errors = np->
estats.rx_over_errors;
1788 storage->rx_crc_errors = np->
estats.rx_crc_errors;
1789 storage->rx_frame_errors = np->
estats.rx_frame_align_error;
1790 storage->rx_fifo_errors = np->
estats.rx_drop_frame;
1793 storage->tx_carrier_errors = np->
estats.tx_carrier_errors;
1794 storage->tx_fifo_errors = np->
estats.tx_fifo_errors;
1807 static int nv_alloc_rx(
struct net_device *dev)
1809 struct fe_priv *np = netdev_priv(dev);
1812 less_rx = np->get_rx.orig;
1813 if (less_rx-- == np->first_rx.orig)
1816 while (np->put_rx.orig != less_rx) {
1829 np->put_rx.orig = np->first_rx.orig;
1842 static int nv_alloc_rx_optimized(
struct net_device *dev)
1844 struct fe_priv *np = netdev_priv(dev);
1847 less_rx = np->get_rx.ex;
1848 if (less_rx-- == np->first_rx.ex)
1851 while (np->put_rx.ex != less_rx) {
1865 np->put_rx.ex = np->first_rx.ex;
1879 static void nv_do_rx_refill(
unsigned long data)
1882 struct fe_priv *np = netdev_priv(dev);
1885 napi_schedule(&np->
napi);
1888 static void nv_init_rx(
struct net_device *dev)
1890 struct fe_priv *np = netdev_priv(dev);
1893 np->get_rx = np->put_rx = np->first_rx = np->
rx_ring;
1895 if (!nv_optimized(np))
1903 if (!nv_optimized(np)) {
1917 static void nv_init_tx(
struct net_device *dev)
1919 struct fe_priv *np = netdev_priv(dev);
1922 np->get_tx = np->put_tx = np->first_tx = np->
tx_ring;
1924 if (!nv_optimized(np))
1930 netdev_reset_queue(np->
dev);
1937 if (!nv_optimized(np)) {
1955 static int nv_init_ring(
struct net_device *dev)
1957 struct fe_priv *np = netdev_priv(dev);
1962 if (!nv_optimized(np))
1963 return nv_alloc_rx(dev);
1965 return nv_alloc_rx_optimized(dev);
1985 nv_unmap_txskb(np, tx_skb);
1994 static void nv_drain_tx(
struct net_device *dev)
1996 struct fe_priv *np = netdev_priv(dev);
2000 if (!nv_optimized(np)) {
2009 if (nv_release_txskb(np, &np->
tx_skb[i])) {
2025 static void nv_drain_rx(
struct net_device *dev)
2027 struct fe_priv *np = netdev_priv(dev);
2031 if (!nv_optimized(np)) {
2043 (skb_end_pointer(np->
rx_skb[i].skb) -
2044 np->
rx_skb[i].skb->data),
2046 dev_kfree_skb(np->
rx_skb[i].skb);
2052 static void nv_drain_rxtx(
struct net_device *dev)
2058 static inline u32 nv_get_empty_tx_slots(
struct fe_priv *np)
2063 static void nv_legacybackoff_reseed(
struct net_device *dev)
2088 #define BACKOFF_SEEDSET_ROWS 8
2089 #define BACKOFF_SEEDSET_LFSRS 15
2093 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2094 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2095 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2096 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2097 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2098 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2099 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
2100 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
2103 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2104 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2105 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2106 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2107 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2108 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2109 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2110 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
2112 static void nv_gear_backoff_reseed(
struct net_device *dev)
2115 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
2116 u32 temp, seedset, combinedSeed;
2123 miniseed1 &= 0x0fff;
2128 miniseed2 &= 0x0fff;
2131 miniseed2_reversed =
2132 ((miniseed2 & 0xF00) >> 8) |
2133 (miniseed2 & 0x0F0) |
2134 ((miniseed2 & 0x00F) << 8);
2137 miniseed3 &= 0x0fff;
2140 miniseed3_reversed =
2141 ((miniseed3 & 0xF00) >> 8) |
2142 (miniseed3 & 0x0F0) |
2143 ((miniseed3 & 0x00F) << 8);
2145 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
2146 (miniseed2 ^ miniseed3_reversed);
2150 combinedSeed |= 0x08;
2152 combinedSeed |= 0x8000;
2165 temp |= main_seedset[seedset][i-1] & 0x3ff;
2166 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
2177 struct fe_priv *np = netdev_priv(dev);
2180 unsigned int fragments = skb_shinfo(skb)->nr_frags;
2191 unsigned long flags;
2194 for (i = 0; i < fragments; i++) {
2195 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2202 empty_slots = nv_get_empty_tx_slots(np);
2203 if (
unlikely(empty_slots <= entries)) {
2204 netif_stop_queue(dev);
2206 spin_unlock_irqrestore(&np->
lock, flags);
2209 spin_unlock_irqrestore(&np->
lock, flags);
2211 start_tx = put_tx = np->put_tx.orig;
2229 put_tx = np->first_tx.orig;
2235 for (i = 0; i < fragments; i++) {
2237 u32 frag_size = skb_frag_size(frag);
2257 put_tx = np->first_tx.orig;
2260 }
while (frag_size);
2269 if (skb_is_gso(skb))
2280 netdev_sent_queue(np->
dev, skb->
len);
2282 skb_tx_timestamp(skb);
2284 np->put_tx.orig = put_tx;
2286 spin_unlock_irqrestore(&np->
lock, flags);
2295 struct fe_priv *np = netdev_priv(dev);
2298 unsigned int fragments = skb_shinfo(skb)->nr_frags;
2302 u32 size = skb_headlen(skb);
2310 unsigned long flags;
2313 for (i = 0; i < fragments; i++) {
2314 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2321 empty_slots = nv_get_empty_tx_slots(np);
2322 if (
unlikely(empty_slots <= entries)) {
2323 netif_stop_queue(dev);
2325 spin_unlock_irqrestore(&np->
lock, flags);
2328 spin_unlock_irqrestore(&np->
lock, flags);
2330 start_tx = put_tx = np->put_tx.ex;
2350 put_tx = np->first_tx.ex;
2356 for (i = 0; i < fragments; i++) {
2358 u32 frag_size = skb_frag_size(frag);
2379 put_tx = np->first_tx.ex;
2382 }
while (frag_size);
2391 if (skb_is_gso(skb))
2429 netdev_sent_queue(np->
dev, skb->
len);
2431 skb_tx_timestamp(skb);
2433 np->put_tx.ex = put_tx;
2435 spin_unlock_irqrestore(&np->
lock, flags);
2441 static inline void nv_tx_flip_ownership(
struct net_device *dev)
2443 struct fe_priv *np = netdev_priv(dev);
2466 struct fe_priv *np = netdev_priv(dev);
2469 struct ring_desc *orig_get_tx = np->get_tx.orig;
2470 unsigned int bytes_compl = 0;
2472 while ((np->get_tx.orig != np->put_tx.orig) &&
2474 (tx_work < limit)) {
2483 nv_legacybackoff_reseed(dev);
2500 nv_legacybackoff_reseed(dev);
2514 np->get_tx.orig = np->first_tx.orig;
2519 netdev_completed_queue(np->
dev, tx_work, bytes_compl);
2521 if (
unlikely((np->
tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2523 netif_wake_queue(dev);
2528 static int nv_tx_done_optimized(
struct net_device *dev,
int limit)
2530 struct fe_priv *np = netdev_priv(dev);
2534 unsigned long bytes_cleaned = 0;
2536 while ((np->get_tx.ex != np->put_tx.ex) &&
2538 (tx_work < limit)) {
2542 if (flags & NV_TX2_LASTPACKET) {
2543 if (flags & NV_TX2_ERROR) {
2547 nv_gear_backoff_reseed(dev);
2549 nv_legacybackoff_reseed(dev);
2564 nv_tx_flip_ownership(dev);
2568 np->get_tx.ex = np->first_tx.ex;
2573 netdev_completed_queue(np->
dev, tx_work, bytes_cleaned);
2577 netif_wake_queue(dev);
2586 static void nv_tx_timeout(
struct net_device *dev)
2588 struct fe_priv *np = netdev_priv(dev);
2599 netdev_warn(dev,
"Got tx_timeout. irq status: %08x\n", status);
2604 netdev_info(dev,
"Ring at %lx\n", (
unsigned long)np->
ring_addr);
2605 netdev_info(dev,
"Dumping tx registers\n");
2608 "%3x: %08x %08x %08x %08x "
2609 "%08x %08x %08x %08x\n",
2616 netdev_info(dev,
"Dumping tx ring\n");
2618 if (!nv_optimized(np)) {
2620 "%03x: %08x %08x // %08x %08x "
2621 "// %08x %08x // %08x %08x\n",
2633 "%03x: %08x %08x %08x "
2634 "// %08x %08x %08x "
2635 "// %08x %08x %08x "
2636 "// %08x %08x %08x\n",
2654 spin_lock_irq(&np->
lock);
2663 if (!nv_optimized(np))
2672 put_tx = np->put_tx;
2679 np->get_tx = np->put_tx = put_tx;
2684 netif_wake_queue(dev);
2685 spin_unlock_irq(&np->
lock);
2699 protolen =
ntohs(((
struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
2711 if (datalen >= protolen) {
2731 static int nv_rx_process(
struct net_device *dev,
int limit)
2733 struct fe_priv *np = netdev_priv(dev);
2739 while ((np->get_rx.orig != np->put_rx.orig) &&
2741 (rx_work < limit)) {
2760 len = nv_getlen(dev, skb->
data, len);
2791 len = nv_getlen(dev, skb->
data, len);
2826 np->get_rx.orig = np->first_rx.orig;
2836 static int nv_rx_process_optimized(
struct net_device *dev,
int limit)
2838 struct fe_priv *np = netdev_priv(dev);
2845 while ((np->get_rx.ex != np->put_rx.ex) &&
2847 (rx_work < limit)) {
2861 if (
likely(flags & NV_RX2_DESCRIPTORVALID)) {
2863 if (
unlikely(flags & NV_RX2_ERROR)) {
2865 len = nv_getlen(dev, skb->
data, len);
2903 __vlan_hwaccel_put_tag(skb, vid);
2915 np->get_rx.ex = np->first_rx.ex;
2925 static void set_bufsize(
struct net_device *dev)
2927 struct fe_priv *np = netdev_priv(dev);
2939 static int nv_change_mtu(
struct net_device *dev,
int new_mtu)
2941 struct fe_priv *np = netdev_priv(dev);
2944 if (new_mtu < 64 || new_mtu > np->
pkt_limit)
2953 if (old_mtu == new_mtu)
2957 if (netif_running(dev)) {
2965 nv_disable_irq(dev);
2966 nv_napi_disable(dev);
2967 netif_tx_lock_bh(dev);
2968 netif_addr_lock(dev);
2969 spin_lock(&np->
lock);
2977 if (nv_init_ring(dev)) {
2983 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2992 spin_unlock(&np->
lock);
2993 netif_addr_unlock(dev);
2994 netif_tx_unlock_bh(dev);
2995 nv_napi_enable(dev);
3001 static void nv_copy_mac_to_hw(
struct net_device *dev)
3018 static int nv_set_mac_address(
struct net_device *dev,
void *
addr)
3020 struct fe_priv *np = netdev_priv(dev);
3023 if (!is_valid_ether_addr(macaddr->
sa_data))
3030 if (netif_running(dev)) {
3031 netif_tx_lock_bh(dev);
3032 netif_addr_lock(dev);
3033 spin_lock_irq(&np->
lock);
3039 nv_copy_mac_to_hw(dev);
3043 spin_unlock_irq(&np->
lock);
3044 netif_addr_unlock(dev);
3045 netif_tx_unlock_bh(dev);
3047 nv_copy_mac_to_hw(dev);
3056 static void nv_set_multicast(
struct net_device *dev)
3058 struct fe_priv *np = netdev_priv(dev);
3064 memset(addr, 0,
sizeof(addr));
3065 memset(mask, 0,
sizeof(mask));
3076 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
3078 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
3083 unsigned char *hw_addr = ha->
addr;
3094 addr[0] = alwaysOn[0];
3095 addr[1] = alwaysOn[1];
3096 mask[0] = alwaysOn[0] | alwaysOff[0];
3097 mask[1] = alwaysOn[1] | alwaysOff[1];
3105 spin_lock_irq(&np->
lock);
3113 spin_unlock_irq(&np->
lock);
3116 static void nv_update_pause(
struct net_device *dev,
u32 pause_flags)
3118 struct fe_priv *np = netdev_priv(dev);
3153 static void nv_force_linkspeed(
struct net_device *dev,
int speed,
int duplex)
3155 struct fe_priv *np = netdev_priv(dev);
3165 if (mii_status & PHY_GIGABIT) {
3168 phyreg &= ~(0x3FF00);
3189 if (phyreg & PHY_RGMII) {
3231 static int nv_update_linkspeed(
struct net_device *dev)
3233 struct fe_priv *np = netdev_priv(dev);
3237 int adv_lpa, adv_pause, lpa_pause;
3252 if (netif_running(dev)) {
3254 if (!netif_carrier_ok(dev))
3303 if (np->
gigabit == PHY_GIGABIT) {
3316 adv_lpa = lpa & adv;
3351 if (np->
gigabit == PHY_GIGABIT) {
3353 phyreg &= ~(0x3FF00);
3373 if (phyreg & PHY_RGMII) {
3412 if (netif_running(dev) && (np->
duplex != 0)) {
3417 switch (adv_pause) {
3427 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3430 if (lpa_pause & LPA_PAUSE_CAP) {
3443 nv_update_pause(dev, pause_flags);
3453 static void nv_linkchange(
struct net_device *dev)
3455 if (nv_update_linkspeed(dev)) {
3456 if (!netif_carrier_ok(dev)) {
3458 netdev_info(dev,
"link up\n");
3459 nv_txrx_gate(dev,
false);
3463 if (netif_carrier_ok(dev)) {
3465 netdev_info(dev,
"link down\n");
3466 nv_txrx_gate(dev,
true);
3472 static void nv_link_irq(
struct net_device *dev)
3484 static void nv_msi_workaround(
struct fe_priv *np)
3498 static inline int nv_change_interrupt_mode(
struct net_device *dev,
int total_work)
3500 struct fe_priv *np = netdev_priv(dev);
3529 struct fe_priv *np = netdev_priv(dev);
3542 nv_msi_workaround(np);
3544 if (napi_schedule_prep(&np->
napi)) {
3559 static irqreturn_t nv_nic_irq_optimized(
int foo,
void *data)
3562 struct fe_priv *np = netdev_priv(dev);
3575 nv_msi_workaround(np);
3577 if (napi_schedule_prep(&np->
napi)) {
3588 static irqreturn_t nv_nic_irq_tx(
int foo,
void *data)
3591 struct fe_priv *np = netdev_priv(dev);
3595 unsigned long flags;
3600 netdev_dbg(dev,
"tx irq events: %08x\n", events);
3606 spin_unlock_irqrestore(&np->
lock, flags);
3608 if (
unlikely(i > max_interrupt_work)) {
3618 spin_unlock_irqrestore(&np->
lock, flags);
3619 netdev_dbg(dev,
"%s: too many iterations (%d)\n",
3634 unsigned long flags;
3636 int rx_count, tx_work = 0, rx_work = 0;
3639 if (!nv_optimized(np)) {
3642 spin_unlock_irqrestore(&np->
lock, flags);
3644 rx_count = nv_rx_process(dev, budget - rx_work);
3645 retcode = nv_alloc_rx(dev);
3648 tx_work += nv_tx_done_optimized(dev, np->
tx_ring_size);
3649 spin_unlock_irqrestore(&np->
lock, flags);
3651 rx_count = nv_rx_process_optimized(dev,
3653 retcode = nv_alloc_rx_optimized(dev);
3655 }
while (retcode == 0 &&
3656 rx_count > 0 && (rx_work += rx_count) < budget);
3662 spin_unlock_irqrestore(&np->
lock, flags);
3665 nv_change_interrupt_mode(dev, tx_work + rx_work);
3670 spin_unlock_irqrestore(&np->
lock, flags);
3675 spin_unlock_irqrestore(&np->
lock, flags);
3685 spin_unlock_irqrestore(&np->
lock, flags);
3690 if (rx_work < budget) {
3700 static irqreturn_t nv_nic_irq_rx(
int foo,
void *data)
3703 struct fe_priv *np = netdev_priv(dev);
3707 unsigned long flags;
3712 netdev_dbg(dev,
"rx irq events: %08x\n", events);
3717 if (
unlikely(nv_alloc_rx_optimized(dev))) {
3721 spin_unlock_irqrestore(&np->
lock, flags);
3725 if (
unlikely(i > max_interrupt_work)) {
3735 spin_unlock_irqrestore(&np->
lock, flags);
3736 netdev_dbg(dev,
"%s: too many iterations (%d)\n",
3745 static irqreturn_t nv_nic_irq_other(
int foo,
void *data)
3748 struct fe_priv *np = netdev_priv(dev);
3752 unsigned long flags;
3757 netdev_dbg(dev,
"irq events: %08x\n", events);
3764 spin_unlock_irqrestore(&np->
lock, flags);
3769 spin_unlock_irqrestore(&np->
lock, flags);
3774 spin_unlock_irqrestore(&np->
lock, flags);
3788 spin_unlock_irqrestore(&np->
lock, flags);
3791 if (
unlikely(i > max_interrupt_work)) {
3801 spin_unlock_irqrestore(&np->
lock, flags);
3802 netdev_dbg(dev,
"%s: too many iterations (%d)\n",
3812 static irqreturn_t nv_nic_irq_test(
int foo,
void *data)
3815 struct fe_priv *np = netdev_priv(dev);
3830 nv_msi_workaround(np);
3832 spin_lock(&np->
lock);
3834 spin_unlock(&np->
lock);
3849 for (i = 0; i < 8; i++) {
3850 if ((irqmask >> i) & 0x1)
3851 msixmap |= vector << (i << 2);
3856 for (i = 0; i < 8; i++) {
3857 if ((irqmask >> (i + 8)) & 0x1)
3858 msixmap |= vector << (i << 2);
3865 struct fe_priv *np = get_nvpriv(dev);
3874 if (nv_optimized(np))
3875 handler = nv_nic_irq_optimized;
3892 "request_irq failed for rx %d\n",
3903 "request_irq failed for tx %d\n",
3914 "request_irq failed for link %d\n",
3930 "request_irq failed %d\n",
3941 netdev_info(dev,
"MSI-X enabled\n");
3945 ret = pci_enable_msi(np->
pci_dev);
3949 netdev_info(dev,
"request_irq failed %d\n",
3961 netdev_info(dev,
"MSI enabled\n");
3979 static void nv_free_irq(
struct net_device *dev)
3981 struct fe_priv *np = get_nvpriv(dev);
3998 static void nv_do_nic_poll(
unsigned long data)
4001 struct fe_priv *np = netdev_priv(dev);
4011 if (!using_multi_irqs(dev)) {
4035 netdev_info(dev,
"MAC in recoverable error state\n");
4036 if (netif_running(dev)) {
4037 netif_tx_lock_bh(dev);
4038 netif_addr_lock(dev);
4039 spin_lock(&np->
lock);
4049 if (nv_init_ring(dev)) {
4055 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4069 spin_unlock(&np->
lock);
4070 netif_addr_unlock(dev);
4071 netif_tx_unlock_bh(dev);
4078 if (!using_multi_irqs(dev)) {
4080 if (nv_optimized(np))
4081 nv_nic_irq_optimized(0, dev);
4091 nv_nic_irq_rx(0, dev);
4096 nv_nic_irq_tx(0, dev);
4101 nv_nic_irq_other(0, dev);
4108 #ifdef CONFIG_NET_POLL_CONTROLLER
4109 static void nv_poll_controller(
struct net_device *dev)
4111 nv_do_nic_poll((
unsigned long) dev);
4115 static void nv_do_stats_poll(
unsigned long data)
4120 struct fe_priv *np = netdev_priv(dev);
4125 nv_update_stats(dev);
4136 struct fe_priv *np = netdev_priv(dev);
4144 struct fe_priv *np = netdev_priv(dev);
4147 spin_lock_irq(&np->
lock);
4150 spin_unlock_irq(&np->
lock);
4155 struct fe_priv *np = netdev_priv(dev);
4165 if (netif_running(dev)) {
4166 spin_lock_irq(&np->
lock);
4168 spin_unlock_irq(&np->
lock);
4176 struct fe_priv *np = netdev_priv(dev);
4180 spin_lock_irq(&np->
lock);
4182 if (!netif_running(dev)) {
4185 if (nv_update_linkspeed(dev)) {
4186 if (!netif_carrier_ok(dev))
4189 if (netif_carrier_ok(dev))
4194 if (netif_carrier_ok(dev)) {
4216 ethtool_cmd_speed_set(ecmd, speed);
4231 if (np->
gigabit == PHY_GIGABIT) {
4233 if (adv & ADVERTISE_1000FULL)
4241 if (np->
gigabit == PHY_GIGABIT)
4248 spin_unlock_irq(&np->
lock);
4254 struct fe_priv *np = netdev_priv(dev);
4255 u32 speed = ethtool_cmd_speed(ecmd);
4271 if (np->
gigabit == PHY_GIGABIT)
4290 if (netif_running(dev)) {
4291 unsigned long flags;
4293 nv_disable_irq(dev);
4294 netif_tx_lock_bh(dev);
4295 netif_addr_lock(dev);
4308 spin_unlock_irqrestore(&np->
lock, flags);
4309 netif_addr_unlock(dev);
4310 netif_tx_unlock_bh(dev);
4335 if (np->
gigabit == PHY_GIGABIT) {
4337 adv &= ~ADVERTISE_1000FULL;
4343 if (netif_running(dev))
4344 netdev_info(dev,
"link down\n");
4350 if (phy_reset(dev, bmcr)) {
4351 netdev_info(dev,
"phy reset failed\n");
4385 if (np->
gigabit == PHY_GIGABIT) {
4387 adv &= ~ADVERTISE_1000FULL;
4393 if (np->
fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4395 if (np->
fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4399 if (phy_reset(dev, bmcr)) {
4400 netdev_info(dev,
"phy reset failed\n");
4405 if (netif_running(dev)) {
4413 if (netif_running(dev)) {
4421 #define FORCEDETH_REGS_VER 1
4423 static int nv_get_regs_len(
struct net_device *dev)
4425 struct fe_priv *np = netdev_priv(dev);
4431 struct fe_priv *np = netdev_priv(dev);
4437 spin_lock_irq(&np->
lock);
4439 rbuf[i] =
readl(base + i*
sizeof(
u32));
4440 spin_unlock_irq(&np->
lock);
4443 static int nv_nway_reset(
struct net_device *dev)
4445 struct fe_priv *np = netdev_priv(dev);
4452 if (netif_running(dev)) {
4453 nv_disable_irq(dev);
4454 netif_tx_lock_bh(dev);
4455 netif_addr_lock(dev);
4456 spin_lock(&np->
lock);
4459 spin_unlock(&np->
lock);
4460 netif_addr_unlock(dev);
4461 netif_tx_unlock_bh(dev);
4462 netdev_info(dev,
"link down\n");
4469 if (phy_reset(dev, bmcr)) {
4470 netdev_info(dev,
"phy reset failed\n");
4478 if (netif_running(dev)) {
4492 struct fe_priv *np = netdev_priv(dev);
4503 struct fe_priv *np = netdev_priv(dev);
4505 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4522 if (!nv_optimized(np)) {
4533 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4535 if (!nv_optimized(np)) {
4538 rxtx_ring, ring_addr);
4542 rxtx_ring, ring_addr);
4550 if (netif_running(dev)) {
4551 nv_disable_irq(dev);
4552 nv_napi_disable(dev);
4553 netif_tx_lock_bh(dev);
4554 netif_addr_lock(dev);
4555 spin_lock(&np->
lock);
4569 if (!nv_optimized(np)) {
4583 if (netif_running(dev)) {
4586 if (nv_init_ring(dev)) {
4593 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4602 spin_unlock(&np->
lock);
4603 netif_addr_unlock(dev);
4604 netif_tx_unlock_bh(dev);
4605 nv_napi_enable(dev);
4615 struct fe_priv *np = netdev_priv(dev);
4624 struct fe_priv *np = netdev_priv(dev);
4629 netdev_info(dev,
"can not set pause settings when forced link is in half duplex\n");
4633 netdev_info(dev,
"hardware does not support tx pause frames\n");
4638 if (netif_running(dev)) {
4639 nv_disable_irq(dev);
4640 netif_tx_lock_bh(dev);
4641 netif_addr_lock(dev);
4642 spin_lock(&np->
lock);
4645 spin_unlock(&np->
lock);
4646 netif_addr_unlock(dev);
4647 netif_tx_unlock_bh(dev);
4667 if (netif_running(dev))
4668 netdev_info(dev,
"link down\n");
4679 if (!netif_running(dev))
4680 nv_update_linkspeed(dev);
4685 if (netif_running(dev)) {
4694 struct fe_priv *np = netdev_priv(dev);
4695 unsigned long flags;
4697 int err, retval = 0;
4702 if (miicontrol & BMCR_LOOPBACK) {
4703 spin_unlock_irqrestore(&np->
lock, flags);
4704 netdev_info(dev,
"Loopback already enabled\n");
4707 nv_disable_irq(dev);
4713 spin_unlock_irqrestore(&np->
lock, flags);
4716 if (netif_running(dev)) {
4723 spin_unlock_irqrestore(&np->
lock, flags);
4725 "Internal PHY loopback mode enabled.\n");
4728 if (!(miicontrol & BMCR_LOOPBACK)) {
4729 spin_unlock_irqrestore(&np->
lock, flags);
4730 netdev_info(dev,
"Loopback already disabled\n");
4733 nv_disable_irq(dev);
4735 spin_unlock_irqrestore(&np->
lock, flags);
4736 netdev_info(dev,
"Internal PHY loopback mode disabled.\n");
4742 spin_unlock_irqrestore(&np->
lock, flags);
4759 struct fe_priv *np = get_nvpriv(dev);
4761 spin_lock_irq(&np->
lock);
4775 spin_unlock_irq(&np->
lock);
4780 struct fe_priv *np = netdev_priv(dev);
4785 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) {
4786 retval = nv_set_loopback(dev, features);
4792 spin_lock_irq(&np->
lock);
4794 if (features & NETIF_F_RXCSUM)
4799 if (netif_running(dev))
4802 spin_unlock_irq(&np->
lock);
4805 if (changed & (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX))
4806 nv_vlan_mode(dev, features);
4811 static int nv_get_sset_count(
struct net_device *dev,
int sset)
4813 struct fe_priv *np = netdev_priv(dev);
4835 static void nv_get_ethtool_stats(
struct net_device *dev,
4840 struct fe_priv *np = netdev_priv(dev);
4843 nv_update_stats(dev);
4849 static int nv_link_test(
struct net_device *dev)
4851 struct fe_priv *np = netdev_priv(dev);
4858 if (!(mii_status & BMSR_LSTATUS))
4864 static int nv_register_test(
struct net_device *dev)
4868 u32 orig_read, new_read;
4871 orig_read =
readl(base + nv_registers_test[i].reg);
4874 orig_read ^= nv_registers_test[
i].
mask;
4876 writel(orig_read, base + nv_registers_test[i].reg);
4878 new_read =
readl(base + nv_registers_test[i].reg);
4880 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4884 orig_read ^= nv_registers_test[
i].
mask;
4885 writel(orig_read, base + nv_registers_test[i].reg);
4887 }
while (nv_registers_test[++i].reg != 0);
4892 static int nv_interrupt_test(
struct net_device *dev)
4894 struct fe_priv *np = netdev_priv(dev);
4898 u32 save_msi_flags, save_poll_interval = 0;
4900 if (netif_running(dev)) {
4913 if (nv_request_irq(dev, 1))
4925 spin_lock_irq(&np->
lock);
4938 spin_unlock_irq(&np->
lock);
4944 if (netif_running(dev)) {
4948 if (nv_request_irq(dev, 0))
4955 static int nv_loopback_test(
struct net_device *dev)
4957 struct fe_priv *np = netdev_priv(dev);
4959 struct sk_buff *tx_skb, *rx_skb;
4965 u32 filter_flags = 0;
4966 u32 misc1_flags = 0;
4969 if (netif_running(dev)) {
4970 nv_disable_irq(dev);
4987 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4997 tx_skb = netdev_alloc_skb(dev, pkt_len);
4999 netdev_err(dev,
"netdev_alloc_skb() failed during loopback test\n");
5003 test_dma_addr = pci_map_single(np->
pci_dev, tx_skb->
data,
5004 skb_tailroom(tx_skb),
5006 pkt_data =
skb_put(tx_skb, pkt_len);
5008 pkt_data[i] = (
u8)(i & 0xff);
5010 if (!nv_optimized(np)) {
5019 pci_push(get_hwbase(dev));
5024 if (!nv_optimized(np)) {
5036 if (flags & NV_RX_ERROR)
5039 if (flags & NV_RX2_ERROR)
5044 if (len != pkt_len) {
5047 rx_skb = np->
rx_skb[0].skb;
5048 for (i = 0; i <
pkt_len; i++) {
5049 if (rx_skb->
data[i] != (
u8)(i & 0xff)) {
5057 pci_unmap_single(np->
pci_dev, test_dma_addr,
5058 (skb_end_pointer(tx_skb) - tx_skb->
data),
5068 if (netif_running(dev)) {
5079 struct fe_priv *np = netdev_priv(dev);
5084 if (!nv_link_test(dev)) {
5090 if (netif_running(dev)) {
5091 netif_stop_queue(dev);
5092 nv_napi_disable(dev);
5093 netif_tx_lock_bh(dev);
5094 netif_addr_lock(dev);
5095 spin_lock_irq(&np->
lock);
5096 nv_disable_hw_interrupts(dev, np->
irqmask);
5106 spin_unlock_irq(&np->
lock);
5107 netif_addr_unlock(dev);
5108 netif_tx_unlock_bh(dev);
5111 if (!nv_register_test(dev)) {
5116 result = nv_interrupt_test(dev);
5126 if (!nv_loopback_test(dev)) {
5131 if (netif_running(dev)) {
5134 if (nv_init_ring(dev)) {
5140 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5148 netif_start_queue(dev);
5149 nv_napi_enable(dev);
5150 nv_enable_hw_interrupts(dev, np->
irqmask);
5155 static void nv_get_strings(
struct net_device *dev,
u32 stringset,
u8 *buffer)
5157 switch (stringset) {
5168 .get_drvinfo = nv_get_drvinfo,
5170 .get_wol = nv_get_wol,
5171 .set_wol = nv_set_wol,
5172 .get_settings = nv_get_settings,
5173 .set_settings = nv_set_settings,
5174 .get_regs_len = nv_get_regs_len,
5175 .get_regs = nv_get_regs,
5176 .nway_reset = nv_nway_reset,
5177 .get_ringparam = nv_get_ringparam,
5178 .set_ringparam = nv_set_ringparam,
5179 .get_pauseparam = nv_get_pauseparam,
5180 .set_pauseparam = nv_set_pauseparam,
5181 .get_strings = nv_get_strings,
5182 .get_ethtool_stats = nv_get_ethtool_stats,
5183 .get_sset_count = nv_get_sset_count,
5184 .self_test = nv_self_test,
5189 static int nv_mgmt_acquire_sema(
struct net_device *dev)
5191 struct fe_priv *np = netdev_priv(dev);
5196 for (i = 0; i < 10; i++) {
5206 for (i = 0; i < 2; i++) {
5224 static void nv_mgmt_release_sema(
struct net_device *dev)
5226 struct fe_priv *np = netdev_priv(dev);
5240 static int nv_mgmt_get_version(
struct net_device *dev)
5242 struct fe_priv *np = netdev_priv(dev);
5245 u32 data_ready2 = 0;
5246 unsigned long start;
5271 struct fe_priv *np = netdev_priv(dev);
5281 nv_txrx_gate(dev,
false);
5301 oom = nv_init_ring(dev);
5311 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5328 "%s: SetupReg5, Bit 31 remained off\n", __func__);
5351 nv_gear_backoff_reseed(dev);
5356 if (poll_interval == -1) {
5379 nv_disable_hw_interrupts(dev, np->
irqmask);
5385 if (nv_request_irq(dev, 0))
5389 nv_enable_hw_interrupts(dev, np->
irqmask);
5391 spin_lock_irq(&np->
lock);
5408 ret = nv_update_linkspeed(dev);
5410 netif_start_queue(dev);
5411 nv_napi_enable(dev);
5416 netdev_info(dev,
"no link during initialization\n");
5427 spin_unlock_irq(&np->
lock);
5432 if (dev->
features & NETIF_F_LOOPBACK)
5433 nv_set_loopback(dev, dev->
features);
5443 struct fe_priv *np = netdev_priv(dev);
5446 spin_lock_irq(&np->
lock);
5448 spin_unlock_irq(&np->
lock);
5449 nv_napi_disable(dev);
5456 netif_stop_queue(dev);
5457 spin_lock_irq(&np->
lock);
5458 nv_update_pause(dev, 0);
5463 base = get_hwbase(dev);
5464 nv_disable_hw_interrupts(dev, np->
irqmask);
5467 spin_unlock_irq(&np->
lock);
5474 nv_txrx_gate(dev,
false);
5481 nv_txrx_gate(dev,
true);
5490 .ndo_open = nv_open,
5491 .ndo_stop = nv_close,
5492 .ndo_get_stats64 = nv_get_stats64,
5493 .ndo_start_xmit = nv_start_xmit,
5494 .ndo_tx_timeout = nv_tx_timeout,
5495 .ndo_change_mtu = nv_change_mtu,
5496 .ndo_fix_features = nv_fix_features,
5497 .ndo_set_features = nv_set_features,
5499 .ndo_set_mac_address = nv_set_mac_address,
5500 .ndo_set_rx_mode = nv_set_multicast,
5501 #ifdef CONFIG_NET_POLL_CONTROLLER
5502 .ndo_poll_controller = nv_poll_controller,
5507 .ndo_open = nv_open,
5508 .ndo_stop = nv_close,
5509 .ndo_get_stats64 = nv_get_stats64,
5510 .ndo_start_xmit = nv_start_xmit_optimized,
5511 .ndo_tx_timeout = nv_tx_timeout,
5512 .ndo_change_mtu = nv_change_mtu,
5513 .ndo_fix_features = nv_fix_features,
5514 .ndo_set_features = nv_set_features,
5516 .ndo_set_mac_address = nv_set_mac_address,
5517 .ndo_set_rx_mode = nv_set_multicast,
5518 #ifdef CONFIG_NET_POLL_CONTROLLER
5519 .ndo_poll_controller = nv_poll_controller,
5530 u32 powerstate, txreg;
5531 u32 phystate_orig = 0, phystate;
5532 int phyinitialized = 0;
5533 static int printed_version;
5535 if (!printed_version++)
5536 pr_info(
"Reverse Engineered nForce ethernet driver. Version %s.\n",
5539 dev = alloc_etherdev(
sizeof(
struct fe_priv));
5544 np = netdev_priv(dev);
5553 np->
oom_kick.function = nv_do_rx_refill;
5556 np->
nic_poll.function = nv_do_nic_poll;
5587 if (i == DEVICE_COUNT_RESOURCE) {
5588 dev_info(&pci_dev->
dev,
"Couldn't find register window\n");
5605 "64-bit DMA failed, using 32-bit addressing\n");
5608 if (pci_set_consistent_dma_mask(pci_dev,
DMA_BIT_MASK(39))) {
5610 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5659 if (!nv_optimized(np)) {
5679 if (!nv_optimized(np))
5688 pci_set_drvdata(pci_dev, dev);
5691 base = get_hwbase(dev);
5731 "%s: set workaround bit for reversed mac addr\n",
5736 if (!is_valid_ether_addr(dev->
perm_addr)) {
5742 "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
5744 eth_hw_addr_random(dev);
5746 "Using random MAC address: %pM\n", dev->
dev_addr);
5750 nv_copy_mac_to_hw(dev);
5825 phystate &= ~NVREG_ADAPTCTL_RUNNING;
5834 nv_mgmt_acquire_sema(dev) &&
5835 nv_mgmt_get_version(dev)) {
5842 NVREG_XMITCTL_SYNC_PHY_INIT)) {
5852 for (i = 1; i <= 32; i++) {
5856 spin_lock_irq(&np->
lock);
5858 spin_unlock_irq(&np->
lock);
5859 if (id1 < 0 || id1 == 0xffff)
5861 spin_lock_irq(&np->
lock);
5863 spin_unlock_irq(&np->
lock);
5864 if (id2 < 0 || id2 == 0xffff)
5883 dev_info(&pci_dev->
dev,
"open: Could not find a valid PHY\n");
5887 if (!phyinitialized) {
5893 if (mii_status & PHY_GIGABIT)
5904 dev_info(&pci_dev->
dev,
"unable to register netdev: %d\n", err);
5914 nv_update_pause(dev, 0);
5921 dev_info(&pci_dev->
dev,
"ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
5924 dev_info(&pci_dev->
dev,
"%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5928 dev->
features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
5930 dev->
features & (NETIF_F_LOOPBACK) ?
5935 np->
gigabit == PHY_GIGABIT ?
"gbit " :
"",
5946 pci_set_drvdata(pci_dev,
NULL);
5961 static void nv_restore_phy(
struct net_device *dev)
5963 struct fe_priv *np = netdev_priv(dev);
5964 u16 phy_reserved, mii_control;
5983 static void nv_restore_mac_addr(
struct pci_dev *pci_dev)
5985 struct net_device *dev = pci_get_drvdata(pci_dev);
5986 struct fe_priv *np = netdev_priv(dev);
5998 static void __devexit nv_remove(
struct pci_dev *pci_dev)
6000 struct net_device *dev = pci_get_drvdata(pci_dev);
6004 nv_restore_mac_addr(pci_dev);
6007 nv_restore_phy(dev);
6009 nv_mgmt_release_sema(dev);
6017 pci_set_drvdata(pci_dev,
NULL);
6020 #ifdef CONFIG_PM_SLEEP
6024 struct net_device *dev = pci_get_drvdata(pdev);
6025 struct fe_priv *np = netdev_priv(dev);
6029 if (netif_running(dev)) {
6042 static int nv_resume(
struct device *device)
6045 struct net_device *dev = pci_get_drvdata(pdev);
6046 struct fe_priv *np = netdev_priv(dev);
6061 if (netif_running(dev)) {
6063 nv_set_multicast(dev);
6069 #define NV_PM_OPS (&nv_pm_ops)
6072 #define NV_PM_OPS NULL
6078 struct net_device *dev = pci_get_drvdata(pdev);
6079 struct fe_priv *np = netdev_priv(dev);
6081 if (netif_running(dev))
6090 nv_restore_mac_addr(pdev);
6103 #define nv_shutdown NULL
6272 .id_table = pci_tbl,
6279 static int __init init_nic(
void)
6281 return pci_register_driver(&driver);
6284 static void __exit exit_nic(
void)
6290 MODULE_PARM_DESC(max_interrupt_work,
"forcedeth maximum events handled per interrupt");
6292 MODULE_PARM_DESC(optimization_mode,
"In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
6294 MODULE_PARM_DESC(poll_interval,
"Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6296 MODULE_PARM_DESC(msi,
"MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6298 MODULE_PARM_DESC(msix,
"MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6300 MODULE_PARM_DESC(dma_64bit,
"High DMA is enabled by setting to 1 and disabled by setting to 0.");
6302 MODULE_PARM_DESC(phy_cross,
"Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6304 MODULE_PARM_DESC(phy_power_down,
"Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
6307 "Dump tx related registers and ring when tx_timeout happens");