9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
22 #include <linux/netdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/bitops.h>
33 #include <asm/byteorder.h>
34 #include <asm/idprom.h>
35 #include <asm/openprom.h>
36 #include <asm/oplib.h>
38 #include <asm/pgtable.h>
43 #define DRV_NAME "sunqe"
44 #define DRV_VERSION "4.1"
45 #define DRV_RELDATE "August 27, 2008"
56 static struct sunqec *root_qec_dev;
60 #define QEC_RESET_TRIES 200
81 #define MACE_RESET_RETRIES 200
82 #define QE_RESET_RETRIES 200
84 static inline int qe_stop(
struct sunqe *qep)
123 static void qe_init_rings(
struct sunqe *qep)
140 static int qe_init(
struct sunqe *qep,
int from_irq)
146 unsigned char *
e = &qep->
dev->dev_addr[0];
216 for (i = 0; i < 8; i++)
248 qe_set_multicast(qep->
dev);
257 static int qe_is_bolixed(
struct sunqe *qep,
u32 qe_status)
260 int mace_hwbug_workaround = 0;
264 dev->
stats.tx_errors++;
269 dev->
stats.tx_errors++;
270 dev->
stats.tx_carrier_errors++;
275 dev->
stats.tx_errors++;
276 mace_hwbug_workaround = 1;
281 dev->
stats.tx_errors++;
282 dev->
stats.collisions++;
283 mace_hwbug_workaround = 1;
288 dev->
stats.tx_errors++;
289 mace_hwbug_workaround = 1;
301 dev->
stats.tx_errors += 256;
302 dev->
stats.collisions += 256;
307 dev->
stats.tx_errors++;
308 dev->
stats.tx_aborted_errors++;
309 mace_hwbug_workaround = 1;
314 dev->
stats.tx_errors++;
315 mace_hwbug_workaround = 1;
320 dev->
stats.tx_errors++;
321 dev->
stats.tx_aborted_errors++;
322 mace_hwbug_workaround = 1;
327 dev->
stats.tx_errors++;
328 dev->
stats.tx_aborted_errors++;
329 mace_hwbug_workaround = 1;
333 dev->
stats.rx_errors += 256;
334 dev->
stats.collisions += 256;
338 dev->
stats.rx_errors += 256;
339 dev->
stats.rx_over_errors += 256;
343 dev->
stats.rx_errors += 256;
344 dev->
stats.rx_missed_errors += 256;
349 dev->
stats.rx_errors++;
350 dev->
stats.rx_over_errors++;
355 dev->
stats.rx_errors++;
356 dev->
stats.collisions++;
360 dev->
stats.rx_errors += 256;
361 dev->
stats.rx_frame_errors += 256;
365 dev->
stats.rx_errors += 256;
366 dev->
stats.rx_crc_errors += 256;
371 dev->
stats.rx_errors++;
372 dev->
stats.rx_dropped++;
373 dev->
stats.rx_missed_errors++;
378 dev->
stats.rx_errors++;
379 dev->
stats.rx_length_errors++;
384 dev->
stats.rx_errors++;
385 mace_hwbug_workaround = 1;
390 dev->
stats.rx_errors++;
391 dev->
stats.rx_missed_errors++;
392 mace_hwbug_workaround = 1;
397 dev->
stats.rx_errors++;
398 dev->
stats.rx_missed_errors++;
399 mace_hwbug_workaround = 1;
402 if (mace_hwbug_workaround)
404 return mace_hwbug_workaround;
410 static void qe_rx(
struct sunqe *qep)
420 this = &rxbase[
elem];
421 while (!((flags = this->rx_flags) &
RXD_OWN)) {
423 unsigned char *this_qbuf =
425 __u32 this_qbuf_dvma = qbufs_dvma +
433 dev->
stats.rx_errors++;
434 dev->
stats.rx_length_errors++;
435 dev->
stats.rx_dropped++;
437 skb = netdev_alloc_skb(dev, len + 2);
440 dev->
stats.rx_dropped++;
444 skb_copy_to_linear_data(skb, this_qbuf,
448 dev->
stats.rx_packets++;
449 dev->
stats.rx_bytes += len;
452 end_rxd->
rx_addr = this_qbuf_dvma;
456 this = &rxbase[
elem];
463 static void qe_tx_reclaim(
struct sunqe *qep);
477 while (channel < 4) {
478 if (qec_status & 0xf) {
484 if (qe_is_bolixed(qep, qe_status))
489 if (netif_queue_stopped(qep->
dev) &&
491 spin_lock(&qep->
lock);
497 netif_wake_queue(qep->
dev);
500 spin_unlock(&qep->
lock);
514 struct sunqe *qep = netdev_priv(dev);
519 return qe_init(qep, 0);
524 struct sunqe *qep = netdev_priv(dev);
533 static void qe_tx_reclaim(
struct sunqe *qep)
538 while (elem != qep->
tx_new) {
548 static void qe_tx_timeout(
struct net_device *dev)
550 struct sunqe *qep = netdev_priv(dev);
553 spin_lock_irq(&qep->
lock);
561 spin_unlock_irq(&qep->
lock);
570 netif_wake_queue(dev);
576 struct sunqe *qep = netdev_priv(dev);
579 unsigned char *txbuf;
582 spin_lock_irq(&qep->
lock);
590 txbuf_dvma = qbufs_dvma +
596 skb_copy_from_linear_data(skb, txbuf, len);
606 dev->
stats.tx_packets++;
607 dev->
stats.tx_bytes += len;
615 netif_stop_queue(dev);
618 spin_unlock_irq(&qep->
lock);
625 static void qe_set_multicast(
struct net_device *dev)
627 struct sunqe *qep = netdev_priv(dev);
634 netif_stop_queue(dev);
641 for (i = 0; i < 8; i++)
648 u8 *hbytes = (
unsigned char *) &hash_table[0];
650 memset(hash_table, 0,
sizeof(hash_table));
654 hash_table[crc >> 4] |= 1 << (crc & 0xf);
661 for (i = 0; i < 8; i++) {
678 netif_wake_queue(dev);
685 struct sunqe *qep = netdev_priv(dev);
700 struct sunqe *qep = netdev_priv(dev);
704 spin_lock_irq(&qep->
lock);
706 spin_unlock_irq(&qep->
lock);
712 .get_drvinfo = qe_get_drvinfo,
713 .get_link = qe_get_link,
721 if (sbus_can_burst64() && (bsizes &
DMA_BURST64)) {
749 u8 bsizes, bsizes_more;
758 if (bsizes_more != 0xff)
759 bsizes &= bsizes_more;
760 if (bsizes == 0xff || (bsizes &
DMA_BURST16) == 0 ||
761 (bsizes & DMA_BURST32)==0)
762 bsizes = (DMA_BURST32 - 1);
781 "QEC Global Registers");
793 if (qec_global_reset(qecp->
gregs))
798 qec_init_once(qecp, op);
824 .ndo_stop = qe_close,
825 .ndo_start_xmit = qe_start_xmit,
826 .ndo_set_rx_mode = qe_set_multicast,
827 .ndo_tx_timeout = qe_tx_timeout,
835 static unsigned version_printed;
841 if (version_printed++ == 0)
844 dev = alloc_etherdev(
sizeof(
struct sunqe));
850 qe = netdev_priv(dev);
934 return qec_ether_init(op);
971 .of_match_table = qec_sbus_match,
973 .probe = qec_sbus_probe,
977 static int __init qec_init(
void)
982 static void __exit qec_exit(
void)
986 while (root_qec_dev) {