8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/netdevice.h>
13 #include <linux/string.h>
20 #include <linux/slab.h>
24 #include <asm/pgtable.h>
29 static int port_aaui = -1;
33 #define MAX_TX_ACTIVE 1
35 #define RX_BUFLEN (ETH_FRAME_LEN + 8)
39 #define BROKEN_ADDRCHG_REV 0x0941
42 #define TX_DMA_ERR 0x80
76 #define PRIV_BYTES (sizeof(struct mace_data) \
77 + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
89 static void mace_tx_timeout(
unsigned long data);
90 static inline void dbdma_reset(
volatile struct dbdma_regs
__iomem *
dma);
91 static inline void mace_clean_rings(
struct mace_data *
mp);
97 static unsigned char *dummy_buf;
100 .ndo_open = mace_open,
101 .ndo_stop = mace_close,
102 .ndo_start_xmit = mace_xmit_start,
103 .ndo_set_rx_mode = mace_set_multicast,
104 .ndo_set_mac_address = mace_set_address,
114 const unsigned char *
addr;
117 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
137 if (dummy_buf ==
NULL) {
139 if (dummy_buf ==
NULL)
155 mp = netdev_priv(dev);
157 macio_set_drvdata(mdev, dev);
159 dev->
base_addr = macio_resource_start(mdev, 0);
166 dev->
irq = macio_irq(mdev, 0);
168 rev = addr[0] == 0 && addr[1] == 0xA0;
169 for (j = 0; j < 6; ++
j) {
170 dev->
dev_addr[
j] = rev ? bitrev8(addr[j]): addr[j];
176 mp = netdev_priv(dev);
191 goto err_unmap_tx_dma;
195 mp->
tx_cmds = (
volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1);
211 #ifdef CONFIG_MACE_AAUI_PORT
229 goto err_unmap_rx_dma;
239 goto err_free_tx_irq;
245 goto err_free_rx_irq;
274 static int __devexit mace_remove(
struct macio_dev *mdev)
276 struct net_device *dev = macio_get_drvdata(mdev);
281 macio_set_drvdata(mdev,
NULL);
283 mp = netdev_priv(dev);
302 static void dbdma_reset(
volatile struct dbdma_regs
__iomem *
dma)
312 for (i = 200; i > 0; --
i)
313 if (ld_le32(&dma->control) &
RUN)
317 static void mace_reset(
struct net_device *dev)
338 out_8(&mb->imr, 0xff);
340 out_8(&mb->maccc, 0);
346 out_8(&mb->rcvfc, 0);
349 __mace_set_address(dev, dev->
dev_addr);
359 for (i = 0; i < 8; ++
i)
360 out_8(&mb->ladrf, 0);
372 static void __mace_set_address(
struct net_device *dev,
void *addr)
376 unsigned char *
p =
addr;
387 for (i = 0; i < 6; ++
i)
393 static int mace_set_address(
struct net_device *dev,
void *addr)
401 __mace_set_address(dev, addr);
406 spin_unlock_irqrestore(&mp->
lock, flags);
410 static inline void mace_clean_rings(
struct mace_data *mp)
423 if (++i >= N_TX_RING)
434 volatile struct dbdma_cmd *
cp;
443 mace_clean_rings(mp);
444 memset((
char *)mp->
rx_cmds, 0, N_RX_RING *
sizeof(
struct dbdma_cmd));
446 for (i = 0; i < N_RX_RING - 1; ++
i) {
447 skb = netdev_alloc_skb(dev,
RX_BUFLEN + 2);
462 st_le16(&cp->command, DBDMA_STOP);
468 st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
478 st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
506 out_8(&mb->maccc, 0);
507 out_8(&mb->imr, 0xff);
513 mace_clean_rings(mp);
518 static inline void mace_set_timeout(
struct net_device *dev)
535 volatile struct dbdma_cmd *
cp, *np;
543 if (next >= N_TX_RING)
546 netif_stop_queue(dev);
548 spin_unlock_irqrestore(&mp->
lock, flags);
551 spin_unlock_irqrestore(&mp->
lock, flags);
561 st_le16(&cp->req_count, len);
572 out_le16(&cp->command, OUTPUT_LAST);
575 mace_set_timeout(dev);
577 if (++next >= N_TX_RING)
580 netif_stop_queue(dev);
581 spin_unlock_irqrestore(&mp->
lock, flags);
586 static void mace_set_multicast(
struct net_device *dev)
599 unsigned char multicast_filter[8];
603 for (i = 0; i < 8; i++)
604 multicast_filter[i] = 0xff;
606 for (i = 0; i < 8; i++)
607 multicast_filter[i] = 0;
611 multicast_filter[i >> 3] |= 1 << (i & 7);
615 printk(
"Multicast filter :");
616 for (i = 0; i < 8; i++)
617 printk(
"%02x ", multicast_filter[i]);
628 for (i = 0; i < 8; ++
i)
629 out_8(&mb->ladrf, multicast_filter[i]);
635 spin_unlock_irqrestore(&mp->
lock, flags);
641 static int mace_babbles, mace_jabbers;
644 dev->
stats.rx_missed_errors += 256;
645 dev->
stats.rx_missed_errors +=
in_8(&mb->mpc);
647 dev->
stats.rx_length_errors += 256;
648 dev->
stats.rx_length_errors +=
in_8(&mb->rntpc);
650 ++dev->
stats.tx_heartbeat_errors;
652 if (mace_babbles++ < 4)
655 if (mace_jabbers++ < 4)
665 volatile struct dbdma_cmd *
cp;
672 intr =
in_8(&mb->ir);
674 mace_handle_misc_intrs(mp, intr, dev);
685 intr =
in_8(&mb->ir);
687 mace_handle_misc_intrs(mp, intr, dev);
689 fs =
in_8(&mb->xmtfs);
694 dstat = ld_le32(&td->status);
702 if (xcount == 0 || (dstat &
DEAD)) {
716 fs =
in_8(&mb->xmtfs);
717 if ((fs &
XMTSV) == 0) {
727 stat = ld_le16(&cp->xfer_status);
738 mace_set_timeout(dev);
761 ++dev->
stats.tx_errors;
763 ++dev->
stats.tx_carrier_errors;
765 ++dev->
stats.tx_aborted_errors;
768 ++dev->
stats.tx_packets;
772 if (++i >= N_TX_RING)
776 mace_last_xcount = xcount;
782 netif_wake_queue(dev);
793 out_le16(&cp->command, OUTPUT_LAST);
795 if (++i >= N_TX_RING)
799 mace_set_timeout(dev);
801 spin_unlock_irqrestore(&mp->
lock, flags);
805 static void mace_tx_timeout(
unsigned long data)
812 volatile struct dbdma_cmd *
cp;
822 mace_handle_misc_intrs(mp,
in_8(&mb->ir), dev);
827 out_8(&mb->maccc, 0);
842 ++dev->
stats.tx_errors;
847 if (++i >= N_TX_RING)
852 netif_wake_queue(dev);
856 out_le16(&cp->command, OUTPUT_LAST);
860 mace_set_timeout(dev);
868 spin_unlock_irqrestore(&mp->
lock, flags);
871 static irqreturn_t mace_txdma_intr(
int irq,
void *dev_id)
876 static irqreturn_t mace_rxdma_intr(
int irq,
void *dev_id)
881 volatile struct dbdma_cmd *
cp, *np;
885 static int mace_lost_status;
892 stat = ld_le16(&cp->xfer_status);
893 if ((stat &
ACTIVE) == 0) {
895 if (next >= N_RX_RING)
899 (ld_le16(&np->xfer_status) & ACTIVE) != 0) {
905 nb = ld_le16(&cp->req_count) - ld_le16(&cp->res_count);
910 ++dev->
stats.rx_dropped;
913 frame_status = (data[nb-3] << 8) + data[nb-4];
915 ++dev->
stats.rx_errors;
917 ++dev->
stats.rx_over_errors;
919 ++dev->
stats.rx_frame_errors;
921 ++dev->
stats.rx_crc_errors;
927 if (*(
unsigned short *)(data+12) < 1536)
936 ++dev->
stats.rx_packets;
939 ++dev->
stats.rx_errors;
940 ++dev->
stats.rx_length_errors;
944 if (++i >= N_RX_RING)
952 if (next >= N_RX_RING)
959 skb = netdev_alloc_skb(dev,
RX_BUFLEN + 2);
966 data = skb? skb->
data: dummy_buf;
971 if ((ld_le32(&rd->status) & ACTIVE) != 0) {
973 while ((
in_le32(&rd->status) & ACTIVE) != 0)
983 spin_unlock_irqrestore(&mp->
lock, flags);
996 static struct macio_driver mace_driver =
1001 .of_match_table = mace_match,
1003 .probe = mace_probe,
1004 .remove = mace_remove,
1008 static int __init mace_init(
void)
1013 static void __exit mace_cleanup(
void)