45 #include <linux/module.h>
46 #include <linux/types.h>
47 #include <linux/bitops.h>
50 #include <linux/errno.h>
52 #include <linux/pci.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
59 #include <linux/slab.h>
61 #include <linux/string.h>
62 #include <linux/wait.h>
68 #include <linux/reboot.h>
69 #include <linux/ethtool.h>
70 #include <linux/mii.h>
72 #include <linux/if_arp.h>
73 #include <linux/if_vlan.h>
75 #include <linux/tcp.h>
76 #include <linux/udp.h>
83 static int velocity_nics;
101 writeb(0, ®s->CAMADDR);
104 for (i = 0; i < 8; i++)
105 *mask++ =
readb(&(regs->MARCAM[i]));
108 writeb(0, ®s->CAMADDR);
129 for (i = 0; i < 8; i++)
130 writeb(*mask++, &(regs->MARCAM[i]));
133 writeb(0, ®s->CAMADDR);
147 for (i = 0; i < 8; i++)
148 writeb(*mask++, &(regs->MARCAM[i]));
151 writeb(0, ®s->CAMADDR);
176 for (i = 0; i < 6; i++)
177 writeb(*addr++, &(regs->MARCAM[i]));
183 writeb(0, ®s->CAMADDR);
199 writew(*((
u16 *) addr), ®s->MARCAM[0]);
205 writeb(0, ®s->CAMADDR);
233 writew(0xFFFF, ®s->WOLCRClr);
235 writew(0xFFFF, ®s->WOLSRClr);
238 static const struct ethtool_ops velocity_ethtool_ops;
248 #define VELOCITY_PARAM(N, D) \
249 static int N[MAX_UNITS] = OPTION_DEFAULT;\
250 module_param_array(N, int, NULL, 0); \
251 MODULE_PARM_DESC(N, D);
253 #define RX_DESC_MIN 64
254 #define RX_DESC_MAX 255
255 #define RX_DESC_DEF 64
258 #define TX_DESC_MIN 16
259 #define TX_DESC_MAX 256
260 #define TX_DESC_DEF 64
263 #define RX_THRESH_MIN 0
264 #define RX_THRESH_MAX 3
265 #define RX_THRESH_DEF 0
274 #define DMA_LENGTH_MIN 0
275 #define DMA_LENGTH_MAX 7
276 #define DMA_LENGTH_DEF 6
290 #define IP_ALIG_DEF 0
299 #define FLOW_CNTL_DEF 1
300 #define FLOW_CNTL_MIN 1
301 #define FLOW_CNTL_MAX 5
312 #define MED_LNK_DEF 0
313 #define MED_LNK_MIN 0
314 #define MED_LNK_MAX 5
327 VELOCITY_PARAM(speed_duplex,
"Setting the speed and duplex mode");
329 #define VAL_PKT_LEN_DEF 0
334 VELOCITY_PARAM(ValPktLen,
"Receiving or Drop invalid 802.3 frame");
336 #define WOL_OPT_DEF 0
337 #define WOL_OPT_MIN 0
338 #define WOL_OPT_MAX 7
348 static int rx_copybreak = 200;
350 MODULE_PARM_DESC(rx_copybreak,
"Copy breakpoint for copy-only-tiny-frames");
356 {
CHIP_TYPE_VT6110,
"VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFF
UL},
381 for (i = 0; chip_info_table[
i].
name !=
NULL; i++)
382 if (chip_info_table[i].chip_id == chip_id)
384 return chip_info_table[
i].
name;
404 pci_set_drvdata(pdev,
NULL);
424 static void __devinit velocity_set_int_opt(
int *
opt,
int val,
int min,
int max,
int def,
char *
name,
const char *devname)
428 else if (val < min || val > max) {
430 devname, name, min, max);
452 static void __devinit velocity_set_bool_opt(
u32 *opt,
int val,
int def,
u32 flag,
char *name,
const char *devname)
456 *opt |= (def ? flag : 0);
457 else if (val < 0 || val > 1) {
458 printk(
KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
460 *opt |= (def ? flag : 0);
463 devname, name, val ?
"TRUE" :
"FALSE");
464 *opt |= (val ? flag : 0);
500 static void velocity_init_cam_filter(
struct velocity_info *vptr)
503 unsigned int vid, i = 0;
512 mac_set_vlan_cam_mask(regs, vptr->
vCAMmask);
513 mac_set_cam_mask(regs, vptr->
mCAMmask);
517 mac_set_vlan_cam(regs, i, (
u8 *) &vid);
518 vptr->
vCAMmask[i / 8] |= 0x1 << (i % 8);
522 mac_set_vlan_cam_mask(regs, vptr->
vCAMmask);
525 static int velocity_vlan_rx_add_vid(
struct net_device *
dev,
unsigned short vid)
529 spin_lock_irq(&vptr->
lock);
531 velocity_init_cam_filter(vptr);
532 spin_unlock_irq(&vptr->
lock);
536 static int velocity_vlan_rx_kill_vid(
struct net_device *
dev,
unsigned short vid)
540 spin_lock_irq(&vptr->
lock);
542 velocity_init_cam_filter(vptr);
543 spin_unlock_irq(&vptr->
lock);
547 static void velocity_init_rx_ring_indexes(
struct velocity_info *vptr)
549 vptr->
rx.dirty = vptr->
rx.filled = vptr->
rx.curr = 0;
565 velocity_init_rx_ring_indexes(vptr);
570 for (i = 0; i < vptr->
options.numrx; ++
i)
591 switch (vptr->
options.spd_dpx) {
645 writeb(0, &(regs->MIICR));
680 safe_disable_mii_autopoll(regs);
682 writeb(index, ®s->MIIADR);
691 *data =
readw(®s->MIIDATA);
693 enable_mii_autopoll(regs);
694 if (ww == W_MAX_TIMEOUT)
758 safe_disable_mii_autopoll(regs);
761 writeb(mii_addr, ®s->MIIADR);
763 writew(data, ®s->MIIDATA);
774 enable_mii_autopoll(regs);
776 if (ww == W_MAX_TIMEOUT)
791 switch (vptr->
options.flow_cntl) {
835 PHYSR0 =
readb(®s->PHYSR0);
872 static int velocity_set_media_mode(
struct velocity_info *vptr,
u32 mii_status)
881 set_mii_flow_control(vptr);
910 mii_set_auto_on(vptr);
949 if ((mii_status & VELOCITY_SPEED_1000) &&
964 if (mii_status & VELOCITY_DUPLEX_FULL)
968 }
else if (mii_status & VELOCITY_SPEED_10) {
969 if (mii_status & VELOCITY_DUPLEX_FULL)
976 mii_set_auto_on(vptr);
992 static void velocity_print_link_status(
struct velocity_info *vptr)
1002 else if (vptr->
mii_status & VELOCITY_SPEED_100)
1013 switch (vptr->
options.spd_dpx) {
1042 static void enable_flow_control_ability(
struct velocity_info *vptr)
1047 switch (vptr->
options.flow_cntl) {
1107 if (i == W_MAX_TIMEOUT) {
1143 mac_get_cam_mask(regs, vptr->
mCAMmask);
1147 mac_set_cam(regs, i + offset, ha->
addr);
1148 vptr->
mCAMmask[(offset +
i) / 8] |= 1 << ((offset + i) & 7);
1152 mac_set_cam_mask(regs, vptr->
mCAMmask);
1155 if (dev->
mtu > 1500)
1230 BMCR &= ~BMCR_ISOLATE;
1245 u8 txqueue_timer = 0;
1246 u8 rxqueue_timer = 0;
1248 if (vptr->
mii_status & (VELOCITY_SPEED_1000 |
1249 VELOCITY_SPEED_100)) {
1250 txqueue_timer = vptr->
options.txqueue_timer;
1251 rxqueue_timer = vptr->
options.rxqueue_timer;
1267 static void setup_adaptive_interrupts(
struct velocity_info *vptr)
1278 if (tx_intsup != 0) {
1287 if (rx_intsup != 0) {
1305 static void velocity_init_registers(
struct velocity_info *vptr,
1311 mac_wol_reset(regs);
1317 netif_stop_queue(vptr->
dev);
1322 velocity_rx_reset(vptr);
1326 mii_status = velocity_get_opt_media_mode(vptr);
1328 velocity_print_link_status(vptr);
1330 netif_wake_queue(vptr->
dev);
1333 enable_flow_control_ability(vptr);
1347 velocity_soft_reset(vptr);
1350 mac_eeprom_reload(regs);
1351 for (i = 0; i < 6; i++)
1370 velocity_init_cam_filter(vptr);
1375 velocity_set_multi(vptr->
dev);
1380 enable_mii_autopoll(regs);
1382 setup_adaptive_interrupts(vptr);
1391 for (i = 0; i < vptr->
tx.numq; i++) {
1396 init_flow_control_register(vptr);
1401 mii_status = velocity_get_opt_media_mode(vptr);
1402 netif_stop_queue(vptr->
dev);
1404 mii_init(vptr, mii_status);
1407 velocity_print_link_status(vptr);
1409 netif_wake_queue(vptr->
dev);
1412 enable_flow_control_ability(vptr);
1420 static void velocity_give_many_rx_descs(
struct velocity_info *vptr)
1429 if (vptr->
rx.filled < 4)
1434 unusable = vptr->
rx.filled & 0x0003;
1435 dirty = vptr->
rx.dirty - unusable;
1436 for (avail = vptr->
rx.filled & 0xfffc; avail; avail--) {
1437 dirty = (dirty > 0) ? dirty - 1 : vptr->
options.numrx - 1;
1442 vptr->
rx.filled = unusable;
1452 static int velocity_init_dma_rings(
struct velocity_info *vptr)
1455 const unsigned int rx_ring_size = opt->
numrx *
sizeof(
struct rx_desc);
1456 const unsigned int tx_ring_size = opt->
numtx *
sizeof(
struct tx_desc);
1469 rx_ring_size, &pool_dma);
1471 dev_err(&pdev->
dev,
"%s : DMA memory allocation failed.\n",
1476 vptr->
rx.ring = pool;
1477 vptr->
rx.pool_dma = pool_dma;
1479 pool += rx_ring_size;
1480 pool_dma += rx_ring_size;
1482 for (i = 0; i < vptr->
tx.numq; i++) {
1483 vptr->
tx.rings[
i] = pool;
1484 vptr->
tx.pool_dma[
i] = pool_dma;
1485 pool += tx_ring_size;
1486 pool_dma += tx_ring_size;
1492 static void velocity_set_rxbufsize(
struct velocity_info *vptr,
int mtu)
1507 static int velocity_alloc_rx_buf(
struct velocity_info *vptr,
int idx)
1512 rd_info->
skb = netdev_alloc_skb(vptr->
dev, vptr->
rx.buf_sz + 64);
1520 skb_reserve(rd_info->
skb,
1521 64 - ((
unsigned long) rd_info->
skb->data & 63));
1522 rd_info->
skb_dma = pci_map_single(vptr->
pdev, rd_info->
skb->data,
1539 int dirty = vptr->
rx.dirty,
done = 0;
1548 if (!vptr->
rx.info[dirty].skb) {
1549 if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1553 dirty = (dirty < vptr->
options.numrx - 1) ? dirty + 1 : 0;
1554 }
while (dirty != vptr->
rx.curr);
1571 static void velocity_free_rd_ring(
struct velocity_info *vptr)
1575 if (vptr->
rx.info ==
NULL)
1578 for (i = 0; i < vptr->
options.numrx; i++) {
1582 memset(rd, 0,
sizeof(*rd));
1586 pci_unmap_single(vptr->
pdev, rd_info->
skb_dma, vptr->
rx.buf_sz,
1590 dev_kfree_skb(rd_info->
skb);
1605 static int velocity_init_rd_ring(
struct velocity_info *vptr)
1609 vptr->
rx.info = kcalloc(vptr->
options.numrx,
1614 velocity_init_rx_ring_indexes(vptr);
1616 if (velocity_rx_refill(vptr) != vptr->
options.numrx) {
1618 "%s: failed to allocate RX buffer.\n", vptr->
dev->name);
1619 velocity_free_rd_ring(vptr);
1636 static int velocity_init_td_ring(
struct velocity_info *vptr)
1641 for (j = 0; j < vptr->
tx.numq; j++) {
1643 vptr->
tx.infos[
j] = kcalloc(vptr->
options.numtx,
1646 if (!vptr->
tx.infos[j]) {
1652 vptr->
tx.tail[
j] = vptr->
tx.curr[
j] = vptr->
tx.used[
j] = 0;
1663 static void velocity_free_dma_rings(
struct velocity_info *vptr)
1671 static int velocity_init_rings(
struct velocity_info *vptr,
int mtu)
1675 velocity_set_rxbufsize(vptr, mtu);
1677 ret = velocity_init_dma_rings(vptr);
1681 ret = velocity_init_rd_ring(vptr);
1683 goto err_free_dma_rings_0;
1685 ret = velocity_init_td_ring(vptr);
1687 goto err_free_rd_ring_1;
1692 velocity_free_rd_ring(vptr);
1693 err_free_dma_rings_0:
1694 velocity_free_dma_rings(vptr);
1706 static void velocity_free_tx_buf(
struct velocity_info *vptr,
1717 for (i = 0; i < tdinfo->
nskb_dma; i++) {
1721 if (skb_shinfo(skb)->nr_frags > 0)
1722 pktlen =
max_t(
size_t, pktlen,
1736 static void velocity_free_td_ring_entry(
struct velocity_info *vptr,
1742 if (td_info ==
NULL)
1746 for (i = 0; i < td_info->
nskb_dma; i++) {
1753 dev_kfree_skb(td_info->
skb);
1765 static void velocity_free_td_ring(
struct velocity_info *vptr)
1769 for (j = 0; j < vptr->
tx.numq; j++) {
1770 if (vptr->
tx.infos[j] ==
NULL)
1772 for (i = 0; i < vptr->
options.numtx; i++)
1773 velocity_free_td_ring_entry(vptr, j, i);
1782 velocity_free_td_ring(vptr);
1783 velocity_free_rd_ring(vptr);
1784 velocity_free_dma_rings(vptr);
1798 static void velocity_error(
struct velocity_info *vptr,
int status)
1807 netif_stop_queue(vptr->
dev);
1818 vptr->
mii_status = check_connection_type(regs);
1839 setup_queue_timers(vptr);
1854 velocity_print_link_status(vptr);
1855 enable_flow_control_ability(vptr);
1862 enable_mii_autopoll(regs);
1865 netif_stop_queue(vptr->
dev);
1867 netif_wake_queue(vptr->
dev);
1871 velocity_update_hw_mibs(vptr);
1894 for (qnum = 0; qnum < vptr->
tx.numq; qnum++) {
1895 for (idx = vptr->
tx.tail[qnum]; vptr->
tx.used[qnum] > 0;
1896 idx = (idx + 1) % vptr->
options.numtx) {
1901 td = &(vptr->
tx.rings[qnum][
idx]);
1902 tdinfo = &(vptr->
tx.infos[qnum][
idx]);
1925 velocity_free_tx_buf(vptr, tdinfo, td);
1926 vptr->
tx.used[qnum]--;
1928 vptr->
tx.tail[qnum] =
idx;
1937 if (netif_queue_stopped(vptr->
dev) && (full == 0) &&
1939 netif_wake_queue(vptr->
dev);
1952 static inline void velocity_rx_csum(
struct rx_desc *rd,
struct sk_buff *skb)
1954 skb_checksum_none_assert(skb);
1980 static int velocity_rx_copy(
struct sk_buff **rx_skb,
int pkt_size,
1984 if (pkt_size < rx_copybreak) {
1987 new_skb = netdev_alloc_skb_ip_align(vptr->
dev, pkt_size);
1990 skb_copy_from_linear_data(*rx_skb, new_skb->
data, pkt_size);
2008 static inline void velocity_iph_realign(
struct velocity_info *vptr,
2009 struct sk_buff *skb,
int pkt_size)
2013 skb_reserve(skb, 2);
2025 static int velocity_receive_frame(
struct velocity_info *vptr,
int idx)
2045 pci_dma_sync_single_for_cpu(vptr->
pdev, rd_info->
skb_dma,
2059 pci_action = pci_dma_sync_single_for_device;
2061 velocity_rx_csum(rd, skb);
2063 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2064 velocity_iph_realign(vptr, skb, pkt_len);
2065 pci_action = pci_unmap_single;
2078 __vlan_hwaccel_put_tag(skb, vid);
2096 static int velocity_rx_srv(
struct velocity_info *vptr,
int budget_left)
2099 int rd_curr = vptr->
rx.curr;
2102 while (works < budget_left) {
2103 struct rx_desc *rd = vptr->
rx.ring + rd_curr;
2105 if (!vptr->
rx.info[rd_curr].skb)
2117 if (velocity_receive_frame(vptr, rd_curr) < 0)
2131 if (rd_curr >= vptr->
options.numrx)
2136 vptr->
rx.curr = rd_curr;
2138 if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2139 velocity_give_many_rx_descs(vptr);
2150 unsigned long flags;
2157 rx_done = velocity_rx_srv(vptr, budget / 2);
2158 velocity_tx_srv(vptr);
2159 rx_done += velocity_rx_srv(vptr, budget - rx_done);
2160 velocity_tx_srv(vptr);
2163 if (rx_done < budget) {
2167 spin_unlock_irqrestore(&vptr->
lock, flags);
2182 static irqreturn_t velocity_intr(
int irq,
void *dev_instance)
2188 spin_lock(&vptr->
lock);
2192 if (isr_status == 0) {
2193 spin_unlock(&vptr->
lock);
2200 if (
likely(napi_schedule_prep(&vptr->
napi))) {
2206 velocity_error(vptr, isr_status);
2208 spin_unlock(&vptr->
lock);
2223 static int velocity_open(
struct net_device *dev)
2228 ret = velocity_init_rings(vptr, dev->
mtu);
2242 velocity_free_rings(vptr);
2246 velocity_give_many_rx_descs(vptr);
2249 netif_start_queue(dev);
2250 napi_enable(&vptr->
napi);
2270 safe_disable_mii_autopoll(regs);
2283 static int velocity_change_mtu(
struct net_device *dev,
int new_mtu)
2295 if (!netif_running(dev)) {
2300 if (dev->
mtu != new_mtu) {
2302 unsigned long flags;
2306 tmp_vptr = kzalloc(
sizeof(*tmp_vptr),
GFP_KERNEL);
2312 tmp_vptr->dev =
dev;
2313 tmp_vptr->pdev = vptr->
pdev;
2314 tmp_vptr->options = vptr->
options;
2315 tmp_vptr->tx.numq = vptr->
tx.numq;
2317 ret = velocity_init_rings(tmp_vptr, new_mtu);
2319 goto out_free_tmp_vptr_1;
2323 netif_stop_queue(dev);
2324 velocity_shutdown(vptr);
2329 vptr->
rx = tmp_vptr->rx;
2330 vptr->
tx = tmp_vptr->tx;
2339 velocity_give_many_rx_descs(vptr);
2342 netif_start_queue(dev);
2344 spin_unlock_irqrestore(&vptr->
lock, flags);
2346 velocity_free_rings(tmp_vptr);
2348 out_free_tmp_vptr_1:
2369 unsigned long flags;
2384 spin_unlock_irqrestore(&vptr->
lock, flags);
2385 check_connection_type(vptr->
mac_regs);
2412 if (!netif_running(dev))
2419 ret = velocity_mii_ioctl(dev, rq, cmd);
2425 if (!netif_running(dev))
2447 if (!netif_running(dev))
2450 spin_lock_irq(&vptr->
lock);
2451 velocity_update_hw_mibs(vptr);
2452 spin_unlock_irq(&vptr->
lock);
2481 static int velocity_close(
struct net_device *dev)
2485 napi_disable(&vptr->
napi);
2486 netif_stop_queue(dev);
2487 velocity_shutdown(vptr);
2490 velocity_get_ip(vptr);
2494 velocity_free_rings(vptr);
2515 unsigned long flags;
2525 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2530 pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2536 index = vptr->
tx.curr[qnum];
2537 td_ptr = &(vptr->
tx.rings[qnum][
index]);
2538 tdinfo = &(vptr->
tx.infos[qnum][
index]);
2551 td_ptr->
td_buf[0].pa_high = 0;
2555 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2558 tdinfo->
skb_dma[i + 1] = skb_frag_dma_map(&vptr->
pdev->dev,
2560 skb_frag_size(frag),
2564 td_ptr->
td_buf[i + 1].pa_high = 0;
2580 const struct iphdr *
ip = ip_hdr(skb);
2590 prev = vptr->
options.numtx - 1;
2592 vptr->
tx.used[qnum]++;
2593 vptr->
tx.curr[qnum] = (index + 1) % vptr->
options.numtx;
2596 netif_stop_queue(dev);
2598 td_ptr = &(vptr->
tx.rings[qnum][
prev]);
2602 spin_unlock_irqrestore(&vptr->
lock, flags);
2608 .ndo_open = velocity_open,
2609 .ndo_stop = velocity_close,
2610 .ndo_start_xmit = velocity_xmit,
2611 .ndo_get_stats = velocity_get_stats,
2614 .ndo_set_rx_mode = velocity_set_multi,
2615 .ndo_change_mtu = velocity_change_mtu,
2616 .ndo_do_ioctl = velocity_ioctl,
2617 .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
2618 .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
2662 "region #0 is not an I/O resource, aborting.\n");
2668 "region #1 is an I/O resource, aborting.\n");
2673 dev_err(&pdev->
dev,
"region #1 is too small.\n");
2714 static int first = 1;
2717 const char *drv_string;
2727 dev_notice(&pdev->
dev,
"already found %d NICs.\n",
2739 vptr = netdev_priv(dev);
2745 printk(
KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2750 velocity_init_info(pdev, vptr, info);
2758 ret = velocity_get_pci_info(vptr, pdev);
2773 goto err_release_res;
2778 mac_wol_reset(regs);
2780 for (i = 0; i < 6; i++)
2786 velocity_get_options(&vptr->
options, velocity_nics, drv_string);
2817 if (!velocity_get_link(dev)) {
2822 velocity_print_info(vptr);
2823 pci_set_drvdata(pdev, dev);
2858 for (i = 0; i <
size; i++) {
2859 mask = mask_pattern[
i];
2865 for (j = 0; j < 8; j++) {
2866 if ((mask & 0x01) == 0) {
2871 crc =
crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
2895 static u32 mask_pattern[2][4] = {
2896 {0x00203000, 0x000003C0, 0x00000000, 0x0000000},
2897 {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}
2917 for (i = 0; i < 4; i++)
2925 crc = wol_calc_crc((
sizeof(
struct arp_packet) + 7) / 8, buf,
2926 (
u8 *) & mask_pattern[0][0]);
2941 goto advertise_done;
2943 if (vptr->
mii_status & VELOCITY_AUTONEG_ENABLE) {
3002 struct net_device *dev = pci_get_drvdata(pdev);
3004 unsigned long flags;
3006 if (!netif_running(vptr->
dev))
3015 velocity_get_ip(vptr);
3016 velocity_save_context(vptr, &vptr->
context);
3017 velocity_shutdown(vptr);
3018 velocity_set_wol(vptr);
3022 velocity_save_context(vptr, &vptr->
context);
3023 velocity_shutdown(vptr);
3028 spin_unlock_irqrestore(&vptr->
lock, flags);
3067 static int velocity_resume(
struct pci_dev *pdev)
3069 struct net_device *dev = pci_get_drvdata(pdev);
3071 unsigned long flags;
3074 if (!netif_running(vptr->
dev))
3078 pci_enable_wake(pdev, 0, 0);
3084 velocity_restore_context(vptr, &vptr->
context);
3088 velocity_tx_srv(vptr);
3090 for (i = 0; i < vptr->
tx.numq; i++) {
3091 if (vptr->
tx.used[i])
3096 spin_unlock_irqrestore(&vptr->
lock, flags);
3109 .id_table = velocity_id_table,
3110 .probe = velocity_found1,
3113 .suspend = velocity_suspend,
3114 .resume = velocity_resume,
3126 static int velocity_ethtool_up(
struct net_device *dev)
3129 if (!netif_running(dev))
3141 static void velocity_ethtool_down(
struct net_device *dev)
3144 if (!netif_running(dev))
3148 static int velocity_get_settings(
struct net_device *dev,
3154 status = check_connection_type(vptr->
mac_regs);
3175 switch (vptr->
options.spd_dpx) {
3196 if (status & VELOCITY_SPEED_1000)
3198 else if (status & VELOCITY_SPEED_100)
3201 ethtool_cmd_speed_set(cmd,
SPEED_10);
3208 if (status & VELOCITY_DUPLEX_FULL)
3216 static int velocity_set_settings(
struct net_device *dev,
3220 u32 speed = ethtool_cmd_speed(cmd);
3225 curr_status = check_connection_type(vptr->
mac_regs);
3228 new_status |= ((cmd->
autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3229 new_status |= ((speed ==
SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3230 new_status |= ((speed ==
SPEED_100) ? VELOCITY_SPEED_100 : 0);
3231 new_status |= ((speed ==
SPEED_10) ? VELOCITY_SPEED_10 : 0);
3234 if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3240 if (new_status & VELOCITY_AUTONEG_ENABLE)
3242 else if ((new_status & VELOCITY_SPEED_1000) &&
3245 }
else if (new_status & VELOCITY_SPEED_100)
3248 else if (new_status & VELOCITY_SPEED_10)
3254 vptr->
options.spd_dpx = spd_dpx;
3256 velocity_set_media_mode(vptr, new_status);
3327 static int get_pending_timer_val(
int val)
3329 int mult_bits = val >> 6;
3345 return (val & 0x3f) *
mult;
3348 static void set_pending_timer_val(
int *val,
u32 us)
3357 if (us >= 0x3f * 4) {
3361 if (us >= 0x3f * 16) {
3366 *val = (mult << 6) | ((us >> shift) & 0x3f);
3370 static int velocity_get_coalesce(
struct net_device *dev,
3384 static int velocity_set_coalesce(
struct net_device *dev,
3388 int max_us = 0x3f * 64;
3389 unsigned long flags;
3405 set_pending_timer_val(&vptr->
options.rxqueue_timer,
3407 set_pending_timer_val(&vptr->
options.txqueue_timer,
3413 setup_adaptive_interrupts(vptr);
3414 setup_queue_timers(vptr);
3419 spin_unlock_irqrestore(&vptr->
lock, flags);
3443 "tx_ether_collisions",
3447 "rx_mac_control_frames",
3448 "tx_mac_control_frames",
3449 "rx_frame_alignement_errors",
3455 "in_range_length_errors",
3459 static void velocity_get_strings(
struct net_device *dev,
u32 sset,
u8 *data)
3463 memcpy(data, *velocity_gstrings,
sizeof(velocity_gstrings));
3468 static int velocity_get_sset_count(
struct net_device *dev,
int sset)
3478 static void velocity_get_ethtool_stats(
struct net_device *dev,
3481 if (netif_running(dev)) {
3486 spin_lock_irq(&vptr->
lock);
3487 velocity_update_hw_mibs(vptr);
3488 spin_unlock_irq(&vptr->
lock);
3490 for (i = 0; i <
ARRAY_SIZE(velocity_gstrings); i++)
3495 static const struct ethtool_ops velocity_ethtool_ops = {
3496 .get_settings = velocity_get_settings,
3497 .set_settings = velocity_set_settings,
3498 .get_drvinfo = velocity_get_drvinfo,
3499 .get_wol = velocity_ethtool_get_wol,
3500 .set_wol = velocity_ethtool_set_wol,
3501 .get_msglevel = velocity_get_msglevel,
3502 .set_msglevel = velocity_set_msglevel,
3503 .get_link = velocity_get_link,
3504 .get_strings = velocity_get_strings,
3505 .get_sset_count = velocity_get_sset_count,
3506 .get_ethtool_stats = velocity_get_ethtool_stats,
3507 .get_coalesce = velocity_get_coalesce,
3508 .set_coalesce = velocity_set_coalesce,
3509 .begin = velocity_ethtool_up,
3510 .complete = velocity_ethtool_down
3513 #if defined(CONFIG_PM) && defined(CONFIG_INET)
3516 struct in_ifaddr *ifa =
ptr;
3521 velocity_get_ip(netdev_priv(dev));
3542 #define velocity_register_notifier() do {} while (0)
3543 #define velocity_unregister_notifier() do {} while (0)
3555 static int __init velocity_init_module(
void)
3560 ret = pci_register_driver(&velocity_driver);
3574 static void __exit velocity_cleanup_module(
void)