30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/string.h>
34 #include <linux/errno.h>
36 #include <linux/slab.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
44 #include <linux/ethtool.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/mii.h>
49 #include <linux/bitops.h>
50 #include <linux/prefetch.h>
51 #include <asm/processor.h>
54 #include <asm/uaccess.h>
56 #define DRV_NAME "natsemi"
57 #define DRV_VERSION "2.1"
58 #define DRV_RELDATE "Sept 11, 2006"
67 #define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \
72 static int debug = -1;
78 static const int multicast_filter_limit = 100;
82 static int rx_copybreak;
84 static int dspcfg_workaround = 1;
102 #define TX_RING_SIZE 16
103 #define TX_QUEUE_LEN 10
104 #define RX_RING_SIZE 32
108 #define TX_TIMEOUT (2*HZ)
110 #define NATSEMI_HW_TIMEOUT 400
111 #define NATSEMI_TIMER_FREQ 5*HZ
112 #define NATSEMI_PG0_NREGS 64
113 #define NATSEMI_RFDR_NREGS 8
114 #define NATSEMI_PG1_NREGS 4
115 #define NATSEMI_NREGS (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
117 #define NATSEMI_REGS_VER 1
118 #define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32))
124 #define NATSEMI_HEADERS 22
125 #define NATSEMI_PADDING 16
126 #define NATSEMI_LONGPKT 1518
127 #define NATSEMI_RX_LIMIT 2046
134 " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
149 "DP8381x copy breakpoint for copy-only-tiny-frames");
152 "DP8381x: Bits 0-3: media type, bit 17: full duplex");
228 #define PHYID_AM79C874 0x0022561b
247 {
"NatSemi DP8381[56]", 0, 24 },
306 #define PMDCSR_VAL 0x189c
307 #define TSTDAT_VAL 0x0
308 #define DSPCFG_VAL 0x5040
309 #define SDCFG_VAL 0x008c
310 #define DSPCFG_LOCK 0x20
311 #define DSPCFG_COEF 0x1000
312 #define TSTDAT_FIXED 0xe8
390 #define DEFAULT_INTR 0x00f1cd65
422 #define TX_FLTH_VAL ((512/32) << 8)
423 #define TX_DRTH_VAL_START (64/32)
424 #define TX_DRTH_VAL_INC 2
425 #define TX_DRTH_VAL_LIMIT (1472/32)
443 #define RX_DRTH_VAL (128/8)
501 #define PHY_ADDR_NONE 32
502 #define PHY_ADDR_INTERNAL 1
505 #define SRR_DP83815_C 0x0302
506 #define SRR_DP83815_D 0x0403
507 #define SRR_DP83816_A4 0x0504
508 #define SRR_DP83816_A5 0x0505
613 static void netdev_timer(
unsigned long data);
626 static void netdev_error(
struct net_device *
dev,
int intr_status);
628 static void netdev_rx(
struct net_device *
dev,
int *work_done,
int work_to_do);
630 static int natsemi_change_mtu(
struct net_device *
dev,
int new_mtu);
631 #ifdef CONFIG_NET_POLL_CONTROLLER
645 static void enable_wol_mode(
struct net_device *
dev,
int enable_intr);
651 #define NATSEMI_ATTR(_name) \
652 static ssize_t natsemi_show_##_name(struct device *dev, \
653 struct device_attribute *attr, char *buf); \
654 static ssize_t natsemi_set_##_name(struct device *dev, \
655 struct device_attribute *attr, \
656 const char *buf, size_t count); \
657 static DEVICE_ATTR(_name, 0644, natsemi_show_##_name, natsemi_set_##_name)
659 #define NATSEMI_CREATE_FILE(_dev, _name) \
660 device_create_file(&_dev->dev, &dev_attr_##_name)
661 #define NATSEMI_REMOVE_FILE(_dev, _name) \
662 device_remove_file(&_dev->dev, &dev_attr_##_name)
675 static ssize_t natsemi_set_dspcfg_workaround(
struct device *dev,
677 const char *buf,
size_t count)
684 if (!
strncmp(
"on", buf, count - 1) || !
strncmp(
"1", buf, count - 1))
686 else if (!
strncmp(
"off", buf, count - 1) ||
696 spin_unlock_irqrestore(&np->
lock, flags);
708 static inline void natsemi_irq_enable(
struct net_device *dev)
714 static inline void natsemi_irq_disable(
struct net_device *dev)
768 "enabled, advertise" :
"disabled, force",
778 "natsemi %s: Transceiver status %#04x advertising %#04x.\n",
785 .ndo_open = netdev_open,
786 .ndo_stop = netdev_close,
787 .ndo_start_xmit = start_tx,
789 .ndo_set_rx_mode = set_rx_mode,
790 .ndo_change_mtu = natsemi_change_mtu,
791 .ndo_do_ioctl = netdev_ioctl,
792 .ndo_tx_timeout = ns_tx_timeout,
795 #ifdef CONFIG_NET_POLL_CONTROLLER
796 .ndo_poll_controller = natsemi_poll_controller,
806 static int find_cnt = -1;
810 const int pcibar = 1;
816 static int printed_version;
817 if (!printed_version++)
828 pci_read_config_dword(pdev,
PCIPM, &tmp);
831 u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
832 pci_write_config_dword(pdev,
PCIPM, newtmp);
849 goto err_pci_request_regions;
851 ioaddr =
ioremap(iostart, iosize);
858 prev_eedata = eeprom_read(ioaddr, 6);
859 for (i = 0; i < 3; i++) {
860 int eedata = eeprom_read(ioaddr, i + 7);
861 dev->
dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
863 prev_eedata = eedata;
869 np = netdev_priv(dev);
876 pci_set_drvdata(pdev, dev);
882 np->
eeprom_size = natsemi_pci_info[chip_idx].eeprom_size;
903 natsemi_reload_eeprom(dev);
925 "natsemi %s: ignoring user supplied media type %d",
926 pci_name(np->
pci_dev), option & 15);
939 natsemi_init_media(dev);
949 goto err_register_netdev;
952 goto err_create_file;
957 dev->
name, natsemi_pci_info[chip_idx].name,
958 (
unsigned long long)iostart, pci_name(np->
pci_dev),
963 printk(
", port MII, ignoring PHY\n");
977 pci_set_drvdata(pdev,
NULL);
979 err_pci_request_regions:
995 #define eeprom_delay(ee_addr) readl(ee_addr)
997 #define EE_Write0 (EE_ChipSelect)
998 #define EE_Write1 (EE_ChipSelect | EE_DataIn)
1015 for (i = 10; i >= 0; i--) {
1017 writel(dataval, ee_addr);
1025 for (i = 0; i < 16; i++) {
1048 #define mii_delay(ioaddr) readl(ioaddr + EECtrl)
1050 static int mii_getbit (
struct net_device *dev)
1053 void __iomem *ioaddr = ns_ioaddr(dev);
1065 void __iomem *ioaddr = ns_ioaddr(dev);
1067 for (i = (1 << (len-1));
i; i >>= 1)
1086 mii_send_bits (dev, 0xffffffff, 32);
1089 cmd = (0x06 << 10) | (phy_id << 5) |
reg;
1090 mii_send_bits (dev, cmd, 14);
1092 if (mii_getbit (dev))
1095 for (i = 0; i < 16; i++) {
1097 retval |= mii_getbit (dev);
1104 static void miiport_write(
struct net_device *dev,
int phy_id,
int reg,
u16 data)
1109 mii_send_bits (dev, 0xffffffff, 32);
1112 cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data;
1113 mii_send_bits (dev, cmd, 32);
1118 static int mdio_read(
struct net_device *dev,
int reg)
1121 void __iomem *ioaddr = ns_ioaddr(dev);
1133 static void mdio_write(
struct net_device *dev,
int reg,
u16 data)
1136 void __iomem *ioaddr = ns_ioaddr(dev);
1145 static void init_phy_fixup(
struct net_device *dev)
1148 void __iomem *ioaddr = ns_ioaddr(dev);
1234 if (np->
dspcfg == dspcfg)
1239 if (i==NATSEMI_HW_TIMEOUT) {
1241 "%s: DSPCFG mismatch after retrying for %d usec.\n",
1245 "%s: DSPCFG accepted after %d usec.\n",
1258 static int switch_port_external(
struct net_device *dev)
1261 void __iomem *ioaddr = ns_ioaddr(dev);
1265 if (cfg & CfgExtPhy)
1286 init_phy_fixup(dev);
1291 static int switch_port_internal(
struct net_device *dev)
1294 void __iomem *ioaddr = ns_ioaddr(dev);
1300 if (!(cfg &CfgExtPhy))
1326 "%s: phy reset did not complete in %d usec.\n",
1330 init_phy_fixup(dev);
1351 did_switch = switch_port_external(dev);
1359 for (i = 1; i <= 31; i++) {
1360 move_int_phy(dev, i);
1361 tmp = miiport_read(dev, i,
MII_BMSR);
1362 if (tmp != 0xffff && tmp != 0x0000) {
1367 printk(
KERN_INFO "natsemi %s: found external phy %08x at address %d.\n",
1375 switch_port_internal(dev);
1380 #define CFG_RESET_SAVE 0xfde000
1382 #define WCSR_RESET_SAVE 0x61f
1384 #define RFCR_RESET_SAVE 0xf8500000
1386 static void natsemi_reset(
struct net_device *dev)
1395 void __iomem *ioaddr = ns_ioaddr(dev);
1412 for (i = 0; i < 3; i++) {
1417 for (i = 0; i < 3; i++) {
1429 if (i==NATSEMI_HW_TIMEOUT) {
1451 for (i = 0; i < 3; i++) {
1455 for (i = 0; i < 3; i++) {
1467 void __iomem *ioaddr = ns_ioaddr(dev);
1479 if (i==NATSEMI_HW_TIMEOUT) {
1488 static void natsemi_reload_eeprom(
struct net_device *dev)
1491 void __iomem *ioaddr = ns_ioaddr(dev);
1500 if (i==NATSEMI_HW_TIMEOUT) {
1509 static void natsemi_stop_rxtx(
struct net_device *dev)
1511 void __iomem * ioaddr = ns_ioaddr(dev);
1521 if (i==NATSEMI_HW_TIMEOUT) {
1530 static int netdev_open(
struct net_device *dev)
1533 void __iomem * ioaddr = ns_ioaddr(dev);
1534 const int irq = np->
pci_dev->irq;
1546 i = alloc_ring(dev);
1551 napi_enable(&np->
napi);
1554 spin_lock_irq(&np->
lock);
1557 for (i = 0; i < 3; i++) {
1564 spin_unlock_irq(&np->
lock);
1566 netif_start_queue(dev);
1576 np->
timer.function = netdev_timer;
1582 static void do_cable_magic(
struct net_device *dev)
1585 void __iomem *ioaddr = ns_ioaddr(dev);
1612 if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
1613 np = netdev_priv(dev);
1626 static void undo_cable_magic(
struct net_device *dev)
1630 void __iomem * ioaddr = ns_ioaddr(dev);
1646 static void check_link(
struct net_device *dev)
1649 void __iomem * ioaddr = ns_ioaddr(dev);
1655 goto propagate_state;
1665 if (netif_carrier_ok(dev)) {
1670 undo_cable_magic(dev);
1674 if (!netif_carrier_ok(dev)) {
1678 do_cable_magic(dev);
1684 int tmp = mii_nway_result(
1697 "%s: Setting %s-duplex based on negotiated "
1698 "link capability.\n", dev->
name,
1699 duplex ?
"full" :
"half");
1715 void __iomem * ioaddr = ns_ioaddr(dev);
1717 init_phy_fixup(dev);
1772 natsemi_irq_enable(dev);
1791 static void netdev_timer(
unsigned long data)
1795 void __iomem * ioaddr = ns_ioaddr(dev);
1797 const int irq = np->
pci_dev->irq;
1810 spin_lock_irq(&np->
lock);
1816 if (!netif_queue_stopped(dev)) {
1817 spin_unlock_irq(&np->
lock);
1820 "re-initializing\n", dev->
name);
1822 spin_lock_irq(&np->
lock);
1823 natsemi_stop_rxtx(dev);
1827 spin_unlock_irq(&np->
lock);
1832 spin_unlock_irq(&np->
lock);
1837 spin_unlock_irq(&np->
lock);
1840 spin_lock_irq(&np->
lock);
1842 spin_unlock_irq(&np->
lock);
1862 static void dump_ring(
struct net_device *dev)
1885 static void ns_tx_timeout(
struct net_device *dev)
1888 void __iomem * ioaddr = ns_ioaddr(dev);
1889 const int irq = np->
pci_dev->irq;
1892 spin_lock_irq(&np->
lock);
1896 "%s: Transmit timed out, status %#08x,"
1906 "%s: tx_timeout while in hands_off state?\n",
1909 spin_unlock_irq(&np->
lock);
1913 dev->
stats.tx_errors++;
1914 netif_wake_queue(dev);
1917 static int alloc_ring(
struct net_device *dev)
1929 static void refill_rx(
struct net_device *dev)
1939 skb = netdev_alloc_skb(dev, buflen);
1956 static void set_bufsize(
struct net_device *dev)
1966 static void init_ring(
struct net_device *dev)
1996 *((i+1)%RX_RING_SIZE));
2015 dev->
stats.tx_dropped++;
2041 static void drain_ring(
struct net_device *dev)
2047 static void free_ring(
struct net_device *dev)
2051 sizeof(
struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
2055 static void reinit_rx(
struct net_device *dev)
2071 static void reinit_ring(
struct net_device *dev)
2080 np->
tx_ring[i].cmd_status = 0;
2088 void __iomem * ioaddr = ns_ioaddr(dev);
2090 unsigned long flags;
2113 netdev_tx_done(dev);
2115 netif_stop_queue(dev);
2121 dev->
stats.tx_dropped++;
2123 spin_unlock_irqrestore(&np->
lock, flags);
2132 static void netdev_tx_done(
struct net_device *dev)
2142 "%s: tx frame #%d finished, status %#08x.\n",
2146 dev->
stats.tx_packets++;
2152 dev->
stats.tx_aborted_errors++;
2154 dev->
stats.tx_fifo_errors++;
2156 dev->
stats.tx_carrier_errors++;
2158 dev->
stats.tx_window_errors++;
2159 dev->
stats.tx_errors++;
2168 if (netif_queue_stopped(dev) &&
2171 netif_wake_queue(dev);
2181 void __iomem * ioaddr = ns_ioaddr(dev);
2196 "%s: Interrupt, status %#08x, mask %#08x.\n",
2202 if (napi_schedule_prep(&np->
napi)) {
2204 natsemi_irq_disable(dev);
2208 "%s: Ignoring interrupt, status %#08x, mask %#08x.\n",
2222 void __iomem * ioaddr = ns_ioaddr(dev);
2228 "%s: Poll, status %#08x, mask %#08x.\n",
2237 netdev_rx(dev, &work_done, budget);
2242 spin_lock(&np->
lock);
2243 netdev_tx_done(dev);
2244 spin_unlock(&np->
lock);
2251 if (work_done >= budget)
2261 spin_lock(&np->
lock);
2263 natsemi_irq_enable(dev);
2264 spin_unlock(&np->
lock);
2271 static void netdev_rx(
struct net_device *dev,
int *work_done,
int work_to_do)
2278 void __iomem * ioaddr = ns_ioaddr(dev);
2281 while (desc_status < 0) {
2285 " netdev_rx() entry %d status was %#08x.\n",
2286 entry, desc_status);
2290 if (*work_done >= work_to_do)
2298 unsigned long flags;
2302 "%s: Oversized(?) Ethernet "
2303 "frame spanned multiple "
2304 "buffers, entry %#08x "
2305 "status %#08x.\n", dev->
name,
2306 np->
cur_rx, desc_status);
2307 dev->
stats.rx_length_errors++;
2319 spin_unlock_irqrestore(&np->
lock, flags);
2327 dev->
stats.rx_errors++;
2329 dev->
stats.rx_over_errors++;
2331 dev->
stats.rx_length_errors++;
2333 dev->
stats.rx_frame_errors++;
2335 dev->
stats.rx_crc_errors++;
2347 if (pkt_len < rx_copybreak &&
2348 (skb = netdev_alloc_skb(dev, pkt_len +
RX_OFFSET)) !=
NULL) {
2351 pci_dma_sync_single_for_cpu(np->
pci_dev,
2355 skb_copy_to_linear_data(skb,
2358 pci_dma_sync_single_for_device(np->
pci_dev,
2371 dev->
stats.rx_packets++;
2374 entry = (++np->
cur_rx) % RX_RING_SIZE;
2387 static void netdev_error(
struct net_device *dev,
int intr_status)
2390 void __iomem * ioaddr = ns_ioaddr(dev);
2392 spin_lock(&np->
lock);
2395 if (mdio_read(dev,
MII_BMCR) & BMCR_ANENABLE &&
2398 "%s: Autonegotiation advertising"
2399 " %#04x partner %#04x.\n", dev->
name,
2415 "%s: increased tx threshold, txcfg %#08x.\n",
2420 "%s: tx underrun with maximum tx threshold, txcfg %#08x.\n",
2428 dev->
name, wol_status);
2435 dev->
stats.rx_fifo_errors++;
2436 dev->
stats.rx_errors++;
2441 intr_status & IntrPCIErr);
2442 dev->
stats.tx_fifo_errors++;
2443 dev->
stats.tx_errors++;
2444 dev->
stats.rx_fifo_errors++;
2445 dev->
stats.rx_errors++;
2447 spin_unlock(&np->
lock);
2450 static void __get_stats(
struct net_device *dev)
2452 void __iomem * ioaddr = ns_ioaddr(dev);
2464 spin_lock_irq(&np->
lock);
2465 if (netif_running(dev) && !np->
hands_off)
2467 spin_unlock_irq(&np->
lock);
2472 #ifdef CONFIG_NET_POLL_CONTROLLER
2473 static void natsemi_poll_controller(
struct net_device *dev)
2476 const int irq = np->
pci_dev->irq;
2484 #define HASH_TABLE 0x200
2485 static void __set_rx_mode(
struct net_device *dev)
2487 void __iomem * ioaddr = ns_ioaddr(dev);
2503 memset(mc_filter, 0,
sizeof(mc_filter));
2506 mc_filter[b/8] |= (1 << (b & 0x07));
2510 for (i = 0; i < 64; i += 2) {
2512 writel((mc_filter[i + 1] << 8) + mc_filter[i],
2520 static int natsemi_change_mtu(
struct net_device *dev,
int new_mtu)
2528 if (netif_running(dev)) {
2530 void __iomem * ioaddr = ns_ioaddr(dev);
2531 const int irq = np->
pci_dev->irq;
2534 spin_lock(&np->
lock);
2536 natsemi_stop_rxtx(dev);
2545 spin_unlock(&np->
lock);
2551 static void set_rx_mode(
struct net_device *dev)
2554 spin_lock_irq(&np->
lock);
2557 spin_unlock_irq(&np->
lock);
2568 static int get_regs_len(
struct net_device *dev)
2573 static int get_eeprom_len(
struct net_device *dev)
2582 spin_lock_irq(&np->
lock);
2583 netdev_get_ecmd(dev, ecmd);
2584 spin_unlock_irq(&np->
lock);
2592 spin_lock_irq(&np->
lock);
2593 res = netdev_set_ecmd(dev, ecmd);
2594 spin_unlock_irq(&np->
lock);
2601 spin_lock_irq(&np->
lock);
2603 netdev_get_sopass(dev, wol->
sopass);
2604 spin_unlock_irq(&np->
lock);
2611 spin_lock_irq(&np->
lock);
2612 netdev_set_wol(dev, wol->
wolopts);
2613 res = netdev_set_sopass(dev, wol->
sopass);
2614 spin_unlock_irq(&np->
lock);
2622 spin_lock_irq(&np->
lock);
2623 netdev_get_regs(dev, buf);
2624 spin_unlock_irq(&np->
lock);
2639 static int nway_reset(
struct net_device *dev)
2645 if (tmp & BMCR_ANENABLE) {
2657 return (mdio_read(dev,
MII_BMSR)&BMSR_LSTATUS) ? 1:0;
2671 spin_lock_irq(&np->
lock);
2672 res = netdev_get_eeprom(dev, eebuf);
2673 spin_unlock_irq(&np->
lock);
2696 static int netdev_set_wol(
struct net_device *dev,
u32 newval)
2699 void __iomem * ioaddr = ns_ioaddr(dev);
2729 void __iomem * ioaddr = ns_ioaddr(dev);
2732 *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
2762 static int netdev_set_sopass(
struct net_device *dev,
u8 *newval)
2765 void __iomem * ioaddr = ns_ioaddr(dev);
2766 u16 *sval = (
u16 *)newval;
2794 static int netdev_get_sopass(
struct net_device *dev,
u8 *data)
2797 void __iomem * ioaddr = ns_ioaddr(dev);
2802 sval[0] = sval[1] = sval[2] = 0;
2829 ethtool_cmd_speed_set(ecmd, np->
speed);
2865 switch (ecmd->
port) {
2884 tmp = mii_nway_result(
2889 ethtool_cmd_speed_set(ecmd,
SPEED_10);
2917 u32 speed = ethtool_cmd_speed(ecmd);
2966 np->
speed = ethtool_cmd_speed(ecmd);
2975 switch_port_internal(dev);
2977 switch_port_external(dev);
2980 init_phy_fixup(dev);
2985 static int netdev_get_regs(
struct net_device *dev,
u8 *buf)
2991 void __iomem * ioaddr = ns_ioaddr(dev);
2995 rbuf[
i] =
readl(ioaddr + i*4);
3000 rbuf[i] = mdio_read(dev, i & 0x1f);
3019 if (rbuf[4] & rbuf[5]) {
3021 "%s: shoot, we dropped an interrupt (%#08x)\n",
3022 dev->
name, rbuf[4] & rbuf[5]);
3028 #define SWAP_BITS(x) ( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \
3029 | (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9) \
3030 | (((x) & 0x0010) << 7) | (((x) & 0x0020) << 5) \
3031 | (((x) & 0x0040) << 3) | (((x) & 0x0080) << 1) \
3032 | (((x) & 0x0100) >> 1) | (((x) & 0x0200) >> 3) \
3033 | (((x) & 0x0400) >> 5) | (((x) & 0x0800) >> 7) \
3034 | (((x) & 0x1000) >> 9) | (((x) & 0x2000) >> 11) \
3035 | (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) )
3037 static int netdev_get_eeprom(
struct net_device *dev,
u8 *buf)
3041 void __iomem * ioaddr = ns_ioaddr(dev);
3046 ebuf[
i] = eeprom_read(ioaddr, i);
3072 data->
val_out = mdio_read(dev,
3077 move_int_phy(dev, data->
phy_id & 0x1f);
3088 mdio_write(dev, data->
reg_num & 0x1f,
3096 move_int_phy(dev, data->
phy_id & 0x1f);
3097 miiport_write(dev, data->
phy_id & 0x1f,
3107 static void enable_wol_mode(
struct net_device *dev,
int enable_intr)
3109 void __iomem * ioaddr = ns_ioaddr(dev);
3136 natsemi_irq_enable(dev);
3140 static int netdev_close(
struct net_device *dev)
3142 void __iomem * ioaddr = ns_ioaddr(dev);
3144 const int irq = np->
pci_dev->irq;
3148 "%s: Shutting down ethercard, status was %#04x.\n",
3152 "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
3156 napi_disable(&np->
napi);
3167 spin_lock_irq(&np->
lock);
3168 natsemi_irq_disable(dev);
3170 spin_unlock_irq(&np->
lock);
3179 spin_lock_irq(&np->
lock);
3188 natsemi_stop_rxtx(dev);
3191 spin_unlock_irq(&np->
lock);
3195 netif_stop_queue(dev);
3207 enable_wol_mode(dev, 0);
3219 struct net_device *dev = pci_get_drvdata(pdev);
3220 void __iomem * ioaddr = ns_ioaddr(dev);
3227 pci_set_drvdata(pdev,
NULL);
3260 struct net_device *dev = pci_get_drvdata (pdev);
3262 void __iomem * ioaddr = ns_ioaddr(dev);
3265 if (netif_running (dev)) {
3266 const int irq = np->
pci_dev->irq;
3271 spin_lock_irq(&np->
lock);
3273 natsemi_irq_disable(dev);
3275 natsemi_stop_rxtx(dev);
3276 netif_stop_queue(dev);
3278 spin_unlock_irq(&np->
lock);
3281 napi_disable(&np->
napi);
3296 enable_wol_mode(dev, 0);
3309 static int natsemi_resume (
struct pci_dev *pdev)
3311 struct net_device *dev = pci_get_drvdata (pdev);
3316 if (netif_device_present(dev))
3318 if (netif_running(dev)) {
3319 const int irq = np->
pci_dev->irq;
3325 "pci_enable_device() failed: %d\n", ret);
3330 napi_enable(&np->
napi);
3335 spin_lock_irq(&np->
lock);
3339 spin_unlock_irq(&np->
lock);
3354 .id_table = natsemi_pci_tbl,
3355 .probe = natsemi_probe1,
3358 .suspend = natsemi_suspend,
3359 .resume = natsemi_resume,
3363 static int __init natsemi_init_mod (
void)
3370 return pci_register_driver(&natsemi_driver);
3373 static void __exit natsemi_exit_mod (
void)