13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/kernel.h>
16 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/mii.h>
22 #include <linux/if_ether.h>
23 #include <linux/if_vlan.h>
25 #include <linux/pci.h>
31 #include <linux/slab.h>
33 #include <asm/uaccess.h>
40 #define DRV_MODULE_NAME "b44"
41 #define DRV_MODULE_VERSION "2.0"
42 #define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
44 #define B44_DEF_MSG_ENABLE \
57 #define B44_TX_TIMEOUT (5 * HZ)
60 #define B44_MIN_MTU 60
61 #define B44_MAX_MTU 1500
63 #define B44_RX_RING_SIZE 512
64 #define B44_DEF_RX_RING_PENDING 200
65 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
67 #define B44_TX_RING_SIZE 512
68 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
69 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
72 #define TX_RING_GAP(BP) \
73 (B44_TX_RING_SIZE - (BP)->tx_pending)
74 #define TX_BUFFS_AVAIL(BP) \
75 (((BP)->tx_cons <= (BP)->tx_prod) ? \
76 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
77 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
78 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
80 #define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
81 #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
84 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
87 #define B44_PATTERN_BASE 0x400
88 #define B44_PATTERN_SIZE 0x80
89 #define B44_PMASK_BASE 0x600
90 #define B44_PMASK_SIZE 0x10
91 #define B44_MAX_PATTERNS 16
92 #define B44_ETHIPV6UDP_HLEN 62
93 #define B44_ETHIPV4UDP_HLEN 42
95 MODULE_AUTHOR(
"Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
100 static int b44_debug = -1;
102 MODULE_PARM_DESC(b44_debug,
"B44 bitmapped debugging message enable value");
105 #ifdef CONFIG_B44_PCI
116 .id_table = b44_pci_tbl,
126 static void b44_halt(
struct b44 *);
127 static void b44_init_rings(
struct b44 *);
129 #define B44_FULL_RESET 1
130 #define B44_FULL_RESET_SKIP_PHY 2
131 #define B44_PARTIAL_RESET 3
132 #define B44_CHIP_RESET_FULL 4
133 #define B44_CHIP_RESET_PARTIAL 5
135 static void b44_init_hw(
struct b44 *,
int);
137 static int dma_desc_sync_size;
141 #define _B44(x...) # x,
146 static inline void b44_sync_dma_desc_for_device(
struct ssb_device *sdev,
152 dma_desc_sync_size, dir);
155 static inline void b44_sync_dma_desc_for_cpu(
struct ssb_device *sdev,
161 dma_desc_sync_size, dir);
164 static inline unsigned long br32(
const struct b44 *bp,
unsigned long reg)
166 return ssb_read32(bp->
sdev, reg);
169 static inline void bw32(
const struct b44 *bp,
170 unsigned long reg,
unsigned long val)
172 ssb_write32(bp->
sdev, reg, val);
175 static int b44_wait_bit(
struct b44 *bp,
unsigned long reg,
180 for (i = 0; i < timeout; i++) {
183 if (clear && !(val & bit))
185 if (!clear && (val & bit))
191 netdev_err(bp->
dev,
"BUG! Timeout waiting for bit %08x of register %lx to %s\n",
192 bit, reg, clear ?
"clear" :
"set");
199 static inline void __b44_cam_read(
struct b44 *bp,
unsigned char *
data,
int index)
210 data[2] = (val >> 24) & 0xFF;
211 data[3] = (val >> 16) & 0xFF;
212 data[4] = (val >> 8) & 0xFF;
213 data[5] = (val >> 0) & 0xFF;
217 data[0] = (val >> 8) & 0xFF;
218 data[1] = (val >> 0) & 0xFF;
221 static inline void __b44_cam_write(
struct b44 *bp,
unsigned char *data,
int index)
225 val = ((
u32) data[2]) << 24;
226 val |= ((
u32) data[3]) << 16;
227 val |= ((
u32) data[4]) << 8;
228 val |= ((
u32) data[5]) << 0;
231 (((
u32) data[0]) << 8) |
232 (((
u32) data[1]) << 0));
239 static inline void __b44_disable_ints(
struct b44 *bp)
244 static void b44_disable_ints(
struct b44 *bp)
246 __b44_disable_ints(bp);
252 static void b44_enable_ints(
struct b44 *bp)
257 static int __b44_readphy(
struct b44 *bp,
int phy_addr,
int reg,
u32 *val)
273 static int __b44_writephy(
struct b44 *bp,
int phy_addr,
int reg,
u32 val)
285 static inline int b44_readphy(
struct b44 *bp,
int reg,
u32 *val)
290 return __b44_readphy(bp, bp->
phy_addr, reg, val);
293 static inline int b44_writephy(
struct b44 *bp,
int reg,
u32 val)
298 return __b44_writephy(bp, bp->
phy_addr, reg, val);
305 struct b44 *bp = netdev_priv(dev);
306 int rc = __b44_readphy(bp, phy_id, location, &val);
312 static void b44_mii_write(
struct net_device *dev,
int phy_id,
int location,
315 struct b44 *bp = netdev_priv(dev);
316 __b44_writephy(bp, phy_id, location, val);
319 static int b44_phy_reset(
struct b44 *bp)
330 err = b44_readphy(bp,
MII_BMCR, &val);
333 netdev_err(bp->
dev,
"PHY Reset would not complete\n");
341 static void __b44_set_flow_ctrl(
struct b44 *bp,
u32 pause_flags)
346 bp->
flags |= pause_flags;
364 static void b44_set_flow_ctrl(
struct b44 *bp,
u32 local,
u32 remote)
380 __b44_set_flow_ctrl(bp, pause_enab);
383 #ifdef CONFIG_BCM47XX
385 static void b44_wap54g10_workaround(
struct b44 *bp)
399 err = __b44_readphy(bp, 0,
MII_BMCR, &val);
404 val &= ~BMCR_ISOLATE;
405 err = __b44_writephy(bp, 0,
MII_BMCR, val);
411 pr_warning(
"PHY: cannot reset MII transceiver isolate bit\n");
414 static inline void b44_wap54g10_workaround(
struct b44 *bp)
419 static int b44_setup_phy(
struct b44 *bp)
424 b44_wap54g10_workaround(bp);
462 if ((err = b44_readphy(bp,
MII_BMCR, &bmcr)) != 0)
469 if ((err = b44_writephy(bp,
MII_BMCR, bmcr)) != 0)
476 b44_set_flow_ctrl(bp, 0, 0);
483 static void b44_stats_update(
struct b44 *bp)
489 u64_stats_update_begin(&bp->
hw_stats.syncp);
492 *val++ += br32(bp, reg);
499 *val++ += br32(bp, reg);
502 u64_stats_update_end(&bp->
hw_stats.syncp);
505 static void b44_link_report(
struct b44 *bp)
507 if (!netif_carrier_ok(bp->
dev)) {
508 netdev_info(bp->
dev,
"Link is down\n");
510 netdev_info(bp->
dev,
"Link is up at %d Mbps, %s duplex\n",
514 netdev_info(bp->
dev,
"Flow control is %s for TX and %s for RX\n",
516 (bp->
flags & B44_FLAG_RX_PAUSE) ?
"on" :
"off");
520 static void b44_check_phy(
struct b44 *bp)
527 if (!netif_carrier_ok(bp->
dev)) {
537 if (!b44_readphy(bp,
MII_BMSR, &bmsr) &&
549 if (!netif_carrier_ok(bp->
dev) &&
552 u32 local_adv, remote_adv;
562 !b44_readphy(bp,
MII_LPA, &remote_adv))
563 b44_set_flow_ctrl(bp, local_adv, remote_adv);
568 }
else if (netif_carrier_ok(bp->
dev) && !(bmsr & BMSR_LSTATUS)) {
575 netdev_warn(bp->
dev,
"Remote fault detected in PHY\n");
577 netdev_warn(bp->
dev,
"Jabber detected in PHY\n");
581 static void b44_timer(
unsigned long __opaque)
583 struct b44 *bp = (
struct b44 *) __opaque;
585 spin_lock_irq(&bp->
lock);
589 b44_stats_update(bp);
591 spin_unlock_irq(&bp->
lock);
596 static void b44_tx(
struct b44 *bp)
619 if (netif_queue_stopped(bp->
dev) &&
621 netif_wake_queue(bp->
dev);
631 static int b44_alloc_rx_skb(
struct b44 *bp,
int src_idx,
u32 dest_idx_unmasked)
685 map->mapping = mapping;
700 dest_idx *
sizeof(*dp),
706 static void b44_recycle_rx(
struct b44 *bp,
int src_idx,
u32 dest_idx_unmasked)
715 dest_desc = &bp->
rx_ring[dest_idx];
717 src_desc = &bp->
rx_ring[src_idx];
720 dest_map->
skb = src_map->
skb;
728 src_idx *
sizeof(*src_desc),
731 ctrl = src_desc->
ctrl;
737 dest_desc->ctrl =
ctrl;
738 dest_desc->addr = src_desc->
addr;
744 dest_idx *
sizeof(*dest_desc),
752 static int b44_rx(
struct b44 *bp,
int budget)
762 while (cons != prod && budget > 0) {
777 b44_recycle_rx(bp, cons, bp->
rx_prod);
779 bp->
dev->stats.rx_dropped++;
790 }
while (len == 0 && i++ < 5);
800 skb_size = b44_alloc_rx_skb(bp, cons, bp->
rx_prod);
811 b44_recycle_rx(bp, cons, bp->
rx_prod);
812 copy_skb = netdev_alloc_skb(bp->
dev, len + 2);
813 if (copy_skb ==
NULL)
814 goto drop_it_no_recycle;
816 skb_reserve(copy_skb, 2);
820 copy_skb->
data, len);
823 skb_checksum_none_assert(skb);
855 b44_disable_ints(bp);
859 netif_wake_queue(bp->
dev);
862 spin_unlock_irqrestore(&bp->
lock, flags);
866 work_done += b44_rx(bp, budget);
873 netif_wake_queue(bp->
dev);
874 spin_unlock_irqrestore(&bp->
lock, flags);
878 if (work_done < budget) {
889 struct b44 *bp = netdev_priv(dev);
893 spin_lock(&bp->
lock);
906 if (
unlikely(!netif_running(dev))) {
907 netdev_info(dev,
"late interrupt\n");
911 if (napi_schedule_prep(&bp->
napi)) {
916 __b44_disable_ints(bp);
924 spin_unlock(&bp->
lock);
928 static void b44_tx_timeout(
struct net_device *dev)
930 struct b44 *bp = netdev_priv(dev);
932 netdev_err(dev,
"transmit timed out, resetting\n");
934 spin_lock_irq(&bp->
lock);
940 spin_unlock_irq(&bp->
lock);
944 netif_wake_queue(dev);
949 struct b44 *bp = netdev_priv(dev);
960 netif_stop_queue(dev);
961 netdev_err(dev,
"BUG! Tx Ring full when queue awake!\n");
988 skb_copy_from_linear_data(skb,
skb_put(bounce_skb, len), len);
1007 entry *
sizeof(bp->
tx_ring[0]),
1023 netif_stop_queue(dev);
1026 spin_unlock_irqrestore(&bp->
lock, flags);
1035 static int b44_change_mtu(
struct net_device *dev,
int new_mtu)
1037 struct b44 *bp = netdev_priv(dev);
1039 if (new_mtu < B44_MIN_MTU || new_mtu >
B44_MAX_MTU)
1042 if (!netif_running(dev)) {
1050 spin_lock_irq(&bp->
lock);
1055 spin_unlock_irq(&bp->
lock);
1057 b44_enable_ints(bp);
1069 static void b44_free_rings(
struct b44 *bp)
1104 static void b44_init_rings(
struct b44 *bp)
1122 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1131 static void b44_free_consistent(
struct b44 *bp)
1165 static int b44_alloc_consistent(
struct b44 *bp,
gfp_t gfp)
1169 size = B44_RX_RING_SIZE *
sizeof(
struct ring_info);
1174 size = B44_TX_RING_SIZE *
sizeof(
struct ring_info);
1189 rx_ring = kzalloc(size, gfp);
1217 tx_ring = kzalloc(size, gfp);
1239 b44_free_consistent(bp);
1244 static void b44_clear_stats(
struct b44 *bp)
1256 static void b44_chip_reset(
struct b44 *bp,
int reset_kind)
1280 b44_clear_stats(bp);
1289 switch (sdev->
bus->bustype) {
1325 static void b44_halt(
struct b44 *bp)
1327 b44_disable_ints(bp);
1331 netdev_info(bp->
dev,
"powering down PHY\n");
1339 static void __b44_set_mac_addr(
struct b44 *bp)
1345 __b44_cam_write(bp, bp->
dev->dev_addr, 0);
1351 static int b44_set_mac_addr(
struct net_device *dev,
void *
p)
1353 struct b44 *bp = netdev_priv(dev);
1357 if (netif_running(dev))
1360 if (!is_valid_ether_addr(addr->
sa_data))
1365 spin_lock_irq(&bp->
lock);
1369 __b44_set_mac_addr(bp);
1371 spin_unlock_irq(&bp->
lock);
1379 static void __b44_set_rx_mode(
struct net_device *);
1380 static void b44_init_hw(
struct b44 *bp,
int reset_kind)
1395 __b44_set_rx_mode(bp->
dev);
1424 struct b44 *bp = netdev_priv(dev);
1431 napi_enable(&bp->
napi);
1440 napi_disable(&bp->
napi);
1443 b44_free_consistent(bp);
1450 bp->
timer.function = b44_timer;
1453 b44_enable_ints(bp);
1454 netif_start_queue(dev);
1459 #ifdef CONFIG_NET_POLL_CONTROLLER
1464 static void b44_poll_controller(
struct net_device *dev)
1467 b44_interrupt(dev->
irq, dev);
1477 for (i = 0; i <
bytes; i +=
sizeof(
u32)) {
1489 memset(ppattern + offset, 0xff, magicsync);
1490 for (j = 0; j < magicsync; j++)
1491 set_bit(len++, (
unsigned long *) pmask);
1498 if (ethaddr_bytes <=0)
1500 for (k = 0; k< ethaddr_bytes; k++) {
1501 ppattern[offset + magicsync +
1503 set_bit(len++, (
unsigned long *) pmask);
1512 static void b44_setup_pseudo_magicp(
struct b44 *bp)
1516 int plen0, plen1, plen2;
1521 if (!pwol_pattern) {
1522 pr_err(
"Memory not available for WOL\n");
1528 plen0 = b44_magic_pattern(bp->
dev->dev_addr, pwol_pattern, pwol_mask,
1537 plen1 = b44_magic_pattern(bp->
dev->dev_addr, pwol_pattern, pwol_mask,
1548 plen2 = b44_magic_pattern(bp->
dev->dev_addr, pwol_pattern, pwol_mask,
1556 kfree(pwol_pattern);
1568 #ifdef CONFIG_B44_PCI
1569 static void b44_setup_wol_pci(
struct b44 *bp)
1575 pci_read_config_word(bp->
sdev->bus->host_pci,
SSB_PMCSR, &val);
1580 static inline void b44_setup_wol_pci(
struct b44 *bp) { }
1583 static void b44_setup_wol(
struct b44 *bp)
1593 val = bp->
dev->dev_addr[2] << 24 |
1594 bp->
dev->dev_addr[3] << 16 |
1595 bp->
dev->dev_addr[4] << 8 |
1596 bp->
dev->dev_addr[5];
1599 val = bp->
dev->dev_addr[0] << 8 |
1600 bp->
dev->dev_addr[1];
1607 b44_setup_pseudo_magicp(bp);
1609 b44_setup_wol_pci(bp);
1614 struct b44 *bp = netdev_priv(dev);
1616 netif_stop_queue(dev);
1618 napi_disable(&bp->
napi);
1622 spin_lock_irq(&bp->
lock);
1628 spin_unlock_irq(&bp->
lock);
1637 b44_free_consistent(bp);
1645 struct b44 *bp = netdev_priv(dev);
1650 start = u64_stats_fetch_begin_bh(&hwstat->
syncp);
1655 nstat->
rx_bytes = hwstat->rx_octets;
1656 nstat->
tx_bytes = hwstat->tx_octets;
1657 nstat->
tx_errors = (hwstat->tx_jabber_pkts +
1658 hwstat->tx_oversize_pkts +
1659 hwstat->tx_underruns +
1660 hwstat->tx_excessive_cols +
1661 hwstat->tx_late_cols);
1662 nstat->
multicast = hwstat->tx_multicast_pkts;
1666 hwstat->rx_undersize);
1670 nstat->
rx_errors = (hwstat->rx_jabber_pkts +
1671 hwstat->rx_oversize_pkts +
1672 hwstat->rx_missed_pkts +
1673 hwstat->rx_crc_align_errs +
1674 hwstat->rx_undersize +
1675 hwstat->rx_crc_errs +
1676 hwstat->rx_align_errs +
1677 hwstat->rx_symbol_errs);
1684 }
while (u64_stats_fetch_retry_bh(&hwstat->
syncp, start));
1689 static int __b44_load_mcast(
struct b44 *bp,
struct net_device *dev)
1699 __b44_cam_write(bp, ha->
addr, i++ + 1);
1704 static void __b44_set_rx_mode(
struct net_device *dev)
1706 struct b44 *bp = netdev_priv(dev);
1715 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1718 __b44_set_mac_addr(bp);
1724 i = __b44_load_mcast(bp, dev);
1727 __b44_cam_write(bp, zero, i);
1735 static void b44_set_rx_mode(
struct net_device *dev)
1737 struct b44 *bp = netdev_priv(dev);
1739 spin_lock_irq(&bp->
lock);
1740 __b44_set_rx_mode(dev);
1741 spin_unlock_irq(&bp->
lock);
1746 struct b44 *bp = netdev_priv(dev);
1752 struct b44 *bp = netdev_priv(dev);
1758 struct b44 *bp = netdev_priv(dev);
1777 static int b44_nway_reset(
struct net_device *dev)
1779 struct b44 *bp = netdev_priv(dev);
1783 spin_lock_irq(&bp->
lock);
1792 spin_unlock_irq(&bp->
lock);
1799 struct b44 *bp = netdev_priv(dev);
1830 if (!netif_running(dev)){
1831 ethtool_cmd_speed_set(cmd, 0);
1841 struct b44 *bp = netdev_priv(dev);
1842 u32 speed = ethtool_cmd_speed(cmd);
1857 spin_lock_irq(&bp->
lock);
1891 if (netif_running(dev))
1894 spin_unlock_irq(&bp->
lock);
1899 static void b44_get_ringparam(
struct net_device *dev,
1902 struct b44 *bp = netdev_priv(dev);
1910 static int b44_set_ringparam(
struct net_device *dev,
1913 struct b44 *bp = netdev_priv(dev);
1915 if ((ering->
rx_pending > B44_RX_RING_SIZE - 1) ||
1921 spin_lock_irq(&bp->
lock);
1929 netif_wake_queue(bp->
dev);
1930 spin_unlock_irq(&bp->
lock);
1932 b44_enable_ints(bp);
1937 static void b44_get_pauseparam(
struct net_device *dev,
1940 struct b44 *bp = netdev_priv(dev);
1950 static int b44_set_pauseparam(
struct net_device *dev,
1953 struct b44 *bp = netdev_priv(dev);
1955 spin_lock_irq(&bp->
lock);
1963 bp->
flags &= ~B44_FLAG_RX_PAUSE;
1973 __b44_set_flow_ctrl(bp, bp->
flags);
1975 spin_unlock_irq(&bp->
lock);
1977 b44_enable_ints(bp);
1982 static void b44_get_strings(
struct net_device *dev,
u32 stringset,
u8 *data)
1986 memcpy(data, *b44_gstrings,
sizeof(b44_gstrings));
1991 static int b44_get_sset_count(
struct net_device *dev,
int sset)
2001 static void b44_get_ethtool_stats(
struct net_device *dev,
2004 struct b44 *bp = netdev_priv(dev);
2006 u64 *data_src, *data_dst;
2010 spin_lock_irq(&bp->
lock);
2011 b44_stats_update(bp);
2012 spin_unlock_irq(&bp->
lock);
2015 data_src = &hwstat->tx_good_octets;
2017 start = u64_stats_fetch_begin_bh(&hwstat->
syncp);
2019 for (i = 0; i <
ARRAY_SIZE(b44_gstrings); i++)
2020 *data_dst++ = *data_src++;
2022 }
while (u64_stats_fetch_retry_bh(&hwstat->
syncp, start));
2027 struct b44 *bp = netdev_priv(dev);
2039 struct b44 *bp = netdev_priv(dev);
2041 spin_lock_irq(&bp->
lock);
2046 spin_unlock_irq(&bp->
lock);
2051 static const struct ethtool_ops b44_ethtool_ops = {
2052 .get_drvinfo = b44_get_drvinfo,
2053 .get_settings = b44_get_settings,
2054 .set_settings = b44_set_settings,
2055 .nway_reset = b44_nway_reset,
2057 .get_wol = b44_get_wol,
2058 .set_wol = b44_set_wol,
2059 .get_ringparam = b44_get_ringparam,
2060 .set_ringparam = b44_set_ringparam,
2061 .get_pauseparam = b44_get_pauseparam,
2062 .set_pauseparam = b44_set_pauseparam,
2063 .get_msglevel = b44_get_msglevel,
2064 .set_msglevel = b44_set_msglevel,
2065 .get_strings = b44_get_strings,
2066 .get_sset_count = b44_get_sset_count,
2067 .get_ethtool_stats = b44_get_ethtool_stats,
2070 static int b44_ioctl(
struct net_device *dev,
struct ifreq *ifr,
int cmd)
2073 struct b44 *bp = netdev_priv(dev);
2076 if (!netif_running(dev))
2079 spin_lock_irq(&bp->
lock);
2081 spin_unlock_irq(&bp->
lock);
2086 static int __devinit b44_get_invariants(
struct b44 *bp)
2096 addr = sdev->
bus->sprom.et1mac;
2099 addr = sdev->
bus->sprom.et0mac;
2109 if (!is_valid_ether_addr(&bp->
dev->dev_addr[0])){
2110 pr_err(
"Invalid MAC address found in EEPROM\n");
2122 if (bp->
sdev->id.revision >= 7)
2129 .ndo_open = b44_open,
2130 .ndo_stop = b44_close,
2131 .ndo_start_xmit = b44_start_xmit,
2132 .ndo_get_stats64 = b44_get_stats64,
2133 .ndo_set_rx_mode = b44_set_rx_mode,
2134 .ndo_set_mac_address = b44_set_mac_addr,
2136 .ndo_do_ioctl = b44_ioctl,
2137 .ndo_tx_timeout = b44_tx_timeout,
2138 .ndo_change_mtu = b44_change_mtu,
2139 #ifdef CONFIG_NET_POLL_CONTROLLER
2140 .ndo_poll_controller = b44_poll_controller,
2155 dev = alloc_etherdev(
sizeof(*bp));
2166 bp = netdev_priv(dev);
2187 "Failed to powerup the bus\n");
2188 goto err_out_free_dev;
2194 "Required 30BIT DMA mask unsupported by the system\n");
2195 goto err_out_powerdown;
2198 err = b44_get_invariants(bp);
2201 "Problem fetching invariants of chip, aborting\n");
2202 goto err_out_powerdown;
2206 bp->
mii_if.mdio_read = b44_mii_read;
2207 bp->
mii_if.mdio_write = b44_mii_write;
2209 bp->
mii_if.phy_id_mask = 0x1f;
2210 bp->
mii_if.reg_num_mask = 0x1f;
2221 dev_err(sdev->
dev,
"Cannot register net device, aborting\n");
2222 goto err_out_powerdown;
2227 ssb_set_drvdata(sdev, dev);
2235 if (b44_phy_reset(bp) < 0)
2254 struct net_device *dev = ssb_get_drvdata(sdev);
2260 ssb_pcihost_set_power_state(sdev,
PCI_D3hot);
2261 ssb_set_drvdata(sdev,
NULL);
2266 struct net_device *dev = ssb_get_drvdata(sdev);
2267 struct b44 *bp = netdev_priv(dev);
2269 if (!netif_running(dev))
2274 spin_lock_irq(&bp->
lock);
2281 spin_unlock_irq(&bp->
lock);
2289 ssb_pcihost_set_power_state(sdev,
PCI_D3hot);
2293 static int b44_resume(
struct ssb_device *sdev)
2295 struct net_device *dev = ssb_get_drvdata(sdev);
2296 struct b44 *bp = netdev_priv(dev);
2302 "Failed to powerup the bus\n");
2306 if (!netif_running(dev))
2309 spin_lock_irq(&bp->
lock);
2312 spin_unlock_irq(&bp->
lock);
2321 netdev_err(dev,
"request_irq failed\n");
2322 spin_lock_irq(&bp->
lock);
2325 spin_unlock_irq(&bp->
lock);
2331 b44_enable_ints(bp);
2332 netif_wake_queue(dev);
2341 .id_table = b44_ssb_tbl,
2342 .probe = b44_init_one,
2344 .suspend = b44_suspend,
2345 .resume = b44_resume,
2348 static inline int __init b44_pci_init(
void)
2351 #ifdef CONFIG_B44_PCI
2357 static inline void b44_pci_exit(
void)
2359 #ifdef CONFIG_B44_PCI
2360 ssb_pcihost_unregister(&b44_pci_driver);
2364 static int __init b44_init(
void)
2370 dma_desc_sync_size =
max_t(
unsigned int, dma_desc_align_size,
sizeof(
struct dma_desc));
2372 err = b44_pci_init();
2381 static void __exit b44_cleanup(
void)