62 #include <linux/kernel.h>
63 #include <linux/string.h>
64 #include <linux/errno.h>
66 #include <linux/slab.h>
70 #include <linux/netdevice.h>
73 #include <linux/module.h>
74 #include <linux/bitops.h>
90 #define RCV_PARANOIA_CHECK
92 #define MID_PERFORMANCE
94 #if defined( LOW_PERFORMANCE )
95 static int isa0=7,isa1=7,csr80=0x0c10;
96 #elif defined( MID_PERFORMANCE )
97 static int isa0=5,isa1=5,csr80=0x2810;
99 static int isa0=4,isa1=4,csr80=0x0017;
105 #define NI65_ID0 0x00
106 #define NI65_ID1 0x55
107 #define NI65_EB_ID0 0x52
108 #define NI65_EB_ID1 0x44
109 #define NE2100_ID0 0x57
110 #define NE2100_ID1 0x57
112 #define PORT p->cmdr_addr
119 #define RMDNUMMASK 0x80000000
122 #define RMDNUMMASK 0x60000000
127 #define TMDNUMMASK 0x00000000
130 #define TMDNUMMASK 0x40000000
134 #define R_BUF_SIZE 1544
135 #define T_BUF_SIZE 1544
140 #define L_DATAREG 0x00
141 #define L_ADDRREG 0x02
143 #define L_CONFIG 0x05
155 #define INIT_RING_BEFORE_START 0x1
156 #define FULL_RESET_ON_ERROR 0x2
159 #define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
160 outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
161 #define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
164 #define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
166 #define writedatareg(val) { writereg(val,CSR0); }
169 #define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
170 #define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
171 #define writedatareg(val) { writereg(val,CSR0); }
174 static unsigned char ni_vendor[] = { 0x02,0x07,0x01 };
193 .vendor_id = ni_vendor,
194 .cardname =
"ni6510",
204 .vendor_id = ni_vendor,
205 .cardname =
"ni6510 EtherBlaster",
216 .cardname =
"generic NE2100",
254 static void ni65_init_lance(
struct priv *
p,
unsigned char*,
int,
int);
260 static void ni65_free_buffer(
struct priv *
p);
263 static int irqtab[]
__initdata = { 9,12,15,5 };
264 static int dmatab[]
__initdata = { 0,3,5,6,7 };
271 static void ni65_set_performance(
struct priv *
p)
275 if( !(cards[p->
cardno].config & 0x02) )
298 cards[p->
cardno].cardname,dev);
305 if(ni65_lance_reinit(dev))
307 netif_start_queue(dev);
324 netif_stop_queue(dev);
334 dev_kfree_skb(p->tmd_skb[i]);
335 p->tmd_skb[
i] =
NULL;
344 static void cleanup_card(
struct net_device *dev)
364 static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 };
385 for (port = ports; *port && ni65_probe1(dev, *port); port++)
405 .ndo_open = ni65_open,
406 .ndo_stop = ni65_close,
407 .ndo_start_xmit = ni65_send_packet,
408 .ndo_tx_timeout = ni65_timeout,
409 .ndo_set_rx_mode = set_multicast_list,
430 if(cards[i].id_offset >= 0) {
431 if(
inb(ioaddr+cards[i].id_offset+0) != cards[i].
id0 ||
432 inb(ioaddr+cards[i].id_offset+1) != cards[i].
id1) {
439 if(
inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) {
450 dev->
dev_addr[j] =
inb(ioaddr+cards[i].addr_offset+j);
452 if( (j=ni65_alloc_buffer(dev)) < 0) {
479 printk(
"Version %#08lx, ",v);
483 printk(
"ancient LANCE, ");
490 printk(
"IRQ %d (from card), DMA %d (from card).\n",dev->
irq,dev->
dma);
495 unsigned long dma_channels =
509 ni65_init_lance(p,dev->
dev_addr,0,0);
526 dev->
dma = dmatab[
i];
527 printk(
"DMA %d (autodetected), ",dev->
dma);
534 unsigned long irq_mask;
536 ni65_init_lance(p,dev->
dev_addr,0,0);
543 printk(
"Failed to detect IRQ line!\n");
548 printk(
"IRQ %d (autodetected).\n",dev->
irq);
580 p->
ib.eaddr[i] = daddr[i];
589 pib = (
u32) isa_virt_to_bus(&p->
ib);
618 skb_reserve(skb,2+16);
641 static int ni65_alloc_buffer(
struct net_device *dev)
650 ptr = ni65_alloc_mem(dev,
"BUFFER",
sizeof(
struct priv)+8,0);
654 p = dev->
ml_priv = (
struct priv *) (((
unsigned long) ptr + 7) & ~0x7);
661 p->tmd_skb[
i] =
NULL;
673 p->recv_skb[
i] = ni65_alloc_mem(dev,
"RECV",
R_BUF_SIZE,1);
674 if(!p->recv_skb[i]) {
693 static void ni65_free_buffer(
struct priv *p)
704 dev_kfree_skb(p->tmd_skb[i]);
712 dev_kfree_skb(p->recv_skb[i]);
724 static void ni65_stop_start(
struct net_device *dev,
struct priv *p)
754 skb_save[
i] = p->tmd_skb[
i];
757 blen[
i] = tmdp->
blen;
758 tmdp->
u.
s.status = 0x0;
770 p->
tmdhead[
i].u.buffer = (
u32) isa_virt_to_bus((
char *)buffer[num]);
778 p->tmd_skb[
i] = skb_save[num];
784 netif_wake_queue(dev);
794 static int ni65_lance_reinit(
struct net_device *dev)
813 cards[p->
cardno].cardname,(
int) i);
826 dev_kfree_skb(p->tmd_skb[i]);
827 p->tmd_skb[
i] =
NULL;
839 rmdp->
u.
buffer = (
u32) isa_virt_to_bus(p->recv_skb[i]->data);
851 ni65_init_lance(p,dev->
dev_addr,0xff,0x0);
853 ni65_init_lance(p,dev->
dev_addr,0x00,0x00);
861 ni65_set_performance(p);
900 ni65_recv_intr(dev,csr0);
902 ni65_xmit_intr(dev,csr0);
909 dev->
stats.tx_errors++;
915 dev->
stats.rx_errors++;
920 ni65_stop_start(dev,p);
925 #ifdef RCV_PARANOIA_CHECK
931 for(i=RMDNUM-1;i>0;i--) {
932 num2 = (p->
rmdnum +
i) & (RMDNUM-1);
940 num1 = (p->
rmdnum +
k) & (RMDNUM-1);
960 ni65_recv_intr(dev,csr0);
972 ni65_stop_start(dev,p);
985 static void ni65_xmit_intr(
struct net_device *dev,
int csr0)
992 int tmdstat = tmdp->
u.
s.status;
994 if(tmdstat & XMIT_OWN)
1005 dev->
stats.tx_aborted_errors++;
1007 dev->
stats.tx_carrier_errors++;
1010 dev->
stats.tx_fifo_errors++;
1015 ni65_stop_start(dev,p);
1019 ni65_stop_start(dev,p);
1023 if(!(csr0 & CSR0_BABL))
1024 dev->
stats.tx_errors++;
1029 dev->
stats.tx_packets++;
1043 netif_wake_queue(dev);
1049 static void ni65_recv_intr(
struct net_device *dev,
int csr0)
1057 while(!( (rmdstat = rmdp->
u.
s.status) &
RCV_OWN))
1065 dev->
stats.rx_length_errors++;
1071 printk(
KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
1074 dev->
stats.rx_frame_errors++;
1076 dev->
stats.rx_over_errors++;
1078 dev->
stats.rx_crc_errors++;
1080 dev->
stats.rx_fifo_errors++;
1082 if(!(csr0 & CSR0_MISS))
1083 dev->
stats.rx_errors++;
1085 else if( (len = (rmdp->
mlen & 0x0fff) - 4) >= 60)
1090 skb_reserve(skb,16);
1092 struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
1100 skb_copy_to_linear_data(skb, (
unsigned char *)(p->recv_skb[p->
rmdnum]->data),len);
1112 skb_copy_to_linear_data(skb, (
unsigned char *) p->
recvbounce[p->
rmdnum],len);
1114 dev->
stats.rx_packets++;
1122 dev->
stats.rx_dropped++;
1127 dev->
stats.rx_errors++;
1141 static void ni65_timeout(
struct net_device *dev)
1150 ni65_lance_reinit(dev);
1152 netif_wake_queue(dev);
1164 netif_stop_queue(dev);
1174 unsigned long flags;
1177 if( (
unsigned long) (skb->
data + skb->
len) > 0x1000000) {
1185 dev_kfree_skb (skb);
1211 netif_wake_queue(dev);
1215 spin_unlock_irqrestore(&p->
ring_lock, flags);
1221 static void set_multicast_list(
struct net_device *dev)
1223 if(!ni65_lance_reinit(dev))
1225 netif_wake_queue(dev);
1241 return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
1247 cleanup_card(dev_ni65);