91 #include <linux/module.h>
92 #include <linux/kernel.h>
93 #include <linux/types.h>
94 #include <linux/fcntl.h>
96 #include <linux/string.h>
97 #include <linux/slab.h>
98 #include <linux/if_ether.h>
100 #include <linux/errno.h>
103 #include <linux/netdevice.h>
111 #include <linux/parport.h>
112 #include <linux/bitops.h>
117 #include <asm/byteorder.h>
128 #define ENABLE(irq) if (irq != -1) enable_irq(irq)
129 #define DISABLE(irq) if (irq != -1) disable_irq(irq)
132 #define PLIP_DELAY_UNIT 1
135 #define PLIP_TRIGGER_WAIT 500
138 #define PLIP_NIBBLE_WAIT 3000
146 static void plip_interrupt(
void *
dev_id);
152 const void *
saddr,
unsigned len);
153 static int plip_hard_header_cache(
const struct neighbour *neigh,
158 static int plip_preempt(
void *
handle);
159 static void plip_wakeup(
void *
handle);
189 #if defined(__LITTLE_ENDIAN)
192 #elif defined(__BIG_ENDIAN)
196 #error "Please fix the endianness defines in <asm/byteorder.h>"
227 static inline void enable_parport_interrupts (
struct net_device *
dev)
233 port->
ops->enable_irq (port);
237 static inline void disable_parport_interrupts (
struct net_device *dev)
243 port->
ops->disable_irq (port);
247 static inline void write_data (
struct net_device *dev,
unsigned char data)
252 port->
ops->write_data (port, data);
255 static inline unsigned char read_status (
struct net_device *dev)
260 return port->
ops->read_status (port);
263 static const struct header_ops plip_header_ops = {
264 .create = plip_hard_header,
265 .cache = plip_hard_header_cache,
269 .ndo_open = plip_open,
270 .ndo_stop = plip_close,
271 .ndo_start_xmit = plip_tx_packet,
272 .ndo_do_ioctl = plip_ioctl,
354 static const plip_func connection_state_table[] =
359 plip_connection_close,
375 if ((r = (*f)(nl->
dev, nl, snd, rcv)) !=
OK &&
376 (r = plip_bh_timeout_error(nl->
dev, nl, snd, rcv, r)) !=
OK) {
389 plip_interrupt (nl->
dev);
414 spin_lock_irq(&nl->
lock);
417 if (error !=
ERROR) {
421 spin_unlock_irq(&nl->
lock);
425 c0 = read_status(dev);
430 dev->
stats.tx_errors++;
431 dev->
stats.tx_aborted_errors++;
435 spin_unlock_irq(&nl->
lock);
438 if (error !=
ERROR) {
440 spin_unlock_irq(&nl->
lock);
444 c0 = read_status(dev);
448 dev->
stats.rx_dropped++;
457 dev_kfree_skb(snd->
skb);
460 spin_unlock_irq(&nl->
lock);
465 disable_parport_interrupts (dev);
466 netif_stop_queue (dev);
468 write_data (dev, 0x00);
483 plip_receive(
unsigned short nibble_timeout,
struct net_device *dev,
486 unsigned char c0, c1;
493 c0 = read_status(dev);
495 if ((c0 & 0x80) == 0) {
496 c1 = read_status(dev);
503 *data_p = (c0 >> 3) & 0x0f;
504 write_data (dev, 0x10);
510 c0 = read_status(dev);
513 c1 = read_status(dev);
520 *data_p |= (c0 << 1) & 0xf0;
521 write_data (dev, 0x00);
546 skb_reset_mac_header(skb);
574 if (*(
unsigned short *)rawp == 0xFFFF)
588 unsigned short nibble_timeout = nl->
nibble;
591 switch (rcv->
state) {
595 disable_parport_interrupts (dev);
596 write_data (dev, 0x01);
604 if (plip_receive(nl->
trigger, dev,
611 enable_parport_interrupts (dev);
616 if (plip_receive(nibble_timeout, dev,
623 if (plip_receive(nibble_timeout, dev,
637 skb_reserve(rcv->
skb, 2);
645 lbuf = rcv->
skb->data;
647 if (plip_receive(nibble_timeout, dev,
657 if (plip_receive(nibble_timeout, dev,
661 dev->
stats.rx_crc_errors++;
670 rcv->
skb->protocol=plip_type_trans(rcv->
skb, dev);
673 dev->
stats.rx_packets++;
679 write_data (dev, 0x00);
680 spin_lock_irq(&nl->
lock);
683 spin_unlock_irq(&nl->
lock);
685 enable_parport_interrupts (dev);
690 spin_unlock_irq(&nl->
lock);
691 enable_parport_interrupts (dev);
702 plip_send(
unsigned short nibble_timeout,
struct net_device *dev,
710 write_data (dev, data & 0x0f);
714 write_data (dev, 0x10 | (data & 0x0f));
717 c0 = read_status(dev);
718 if ((c0 & 0x80) == 0)
724 write_data (dev, 0x10 | (data >> 4));
728 write_data (dev, (data >> 4));
731 c0 = read_status(dev);
749 unsigned short nibble_timeout = nl->
nibble;
761 switch (snd->
state) {
763 if ((read_status(dev) & 0xf8) != 0x80)
767 write_data (dev, 0x08);
771 spin_lock_irq(&nl->
lock);
773 spin_unlock_irq(&nl->
lock);
775 dev->
stats.collisions++;
778 c0 = read_status(dev);
780 spin_unlock_irq(&nl->
lock);
791 dev->
stats.collisions++;
794 disable_parport_interrupts (dev);
802 spin_unlock_irq(&nl->
lock);
804 write_data (dev, 0x00);
810 if (plip_send(nibble_timeout, dev,
816 if (plip_send(nibble_timeout, dev,
825 if (plip_send(nibble_timeout, dev,
835 if (plip_send(nibble_timeout, dev,
839 dev->
stats.tx_bytes += snd->
skb->len;
840 dev_kfree_skb(snd->
skb);
841 dev->
stats.tx_packets++;
846 write_data (dev, 0x00);
853 enable_parport_interrupts (dev);
864 spin_lock_irq(&nl->
lock);
867 netif_wake_queue (dev);
869 spin_unlock_irq(&nl->
lock);
884 status = read_status(dev);
885 if ((status & 0xf8) == 0x80) {
890 netif_start_queue (dev);
891 enable_parport_interrupts (dev);
893 netif_wake_queue (dev);
904 plip_interrupt(
void *
dev_id)
912 nl = netdev_priv(dev);
917 c0 = read_status(dev);
918 if ((c0 & 0xf8) != 0xc0) {
921 spin_unlock_irqrestore (&nl->
lock, flags);
930 netif_wake_queue (dev);
950 spin_unlock_irqrestore(&nl->
lock, flags);
959 if (netif_queue_stopped(dev))
969 netif_stop_queue (dev);
973 netif_start_queue (dev);
980 spin_lock_irq(&nl->
lock);
989 spin_unlock_irq(&nl->
lock);
997 const struct in_device *in_dev;
1000 in_dev = __in_dev_get_rcu(dev);
1003 const struct in_ifaddr *ifa = in_dev->ifa_list;
1016 const void *
saddr,
unsigned len)
1020 ret =
eth_header(skb, dev, type, daddr, saddr, len);
1022 plip_rewrite_address (dev, (
struct ethhdr *)skb->
data);
1027 static int plip_hard_header_cache(
const struct neighbour *neigh,
1038 plip_rewrite_address (neigh->
dev, eth);
1053 struct net_local *nl = netdev_priv(dev);
1054 struct in_device *in_dev;
1065 write_data (dev, 0x00);
1068 enable_parport_interrupts (dev);
1093 in_dev=__in_dev_get_rtnl(dev);
1098 struct in_ifaddr *ifa=in_dev->ifa_list;
1104 netif_start_queue (dev);
1113 struct net_local *nl = netdev_priv(dev);
1117 netif_stop_queue (dev);
1140 dev_kfree_skb(snd->
skb);
1157 plip_preempt(
void *
handle)
1160 struct net_local *nl = netdev_priv(dev);
1173 plip_wakeup(
void *handle)
1176 struct net_local *nl = netdev_priv(dev);
1195 write_data (dev, 0x00);
1202 struct net_local *nl = netdev_priv(dev);
1230 MODULE_PARM_DESC(parport,
"List of parport device numbers to use by plip");
1235 plip_searchfor(
int list[],
int a)
1238 for (i = 0; i <
PLIP_MAX && list[
i] != -1; i++) {
1239 if (list[i] == a)
return 1;
1246 static void plip_attach (
struct parport *port)
1253 if ((parport[0] == -1 && (!timid || !port->
devices)) ||
1254 plip_searchfor(parport, port->
number)) {
1260 sprintf(name,
"plip%d", unit);
1261 dev = alloc_etherdev(
sizeof(
struct net_local));
1269 if (port->
irq == -1) {
1271 "which is fairly inefficient!\n", port->
name);
1274 nl = netdev_priv(dev);
1277 plip_wakeup, plip_interrupt,
1285 plip_init_netdev(dev);
1289 goto err_parport_unregister;
1301 dev_plip[unit++] =
dev;
1305 err_parport_unregister:
1313 static void plip_detach (
struct parport *port)
1320 .attach = plip_attach,
1321 .detach = plip_detach
1324 static void __exit plip_cleanup_module (
void)
1332 if ((dev = dev_plip[i])) {
1333 struct net_local *nl = netdev_priv(dev);
1346 static int parport_ptr;
1355 if (!
strncmp(str,
"parport", 7)) {
1357 if (parport_ptr < PLIP_MAX)
1358 parport[parport_ptr++] =
n;
1362 }
else if (!
strcmp(str,
"timid")) {
1365 if (ints[0] == 0 || ints[1] == 0) {
1380 static int __init plip_init (
void)
1382 if (parport[0] == -2)
1385 if (parport[0] != -1 && timid) {
1386 printk(
KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");