27 #include <linux/bitops.h>
28 #include <linux/errno.h>
29 #include <linux/fcntl.h>
30 #include <linux/hdlc.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/netdevice.h>
40 #include <linux/string.h>
41 #include <linux/types.h>
43 #include <asm/uaccess.h>
46 #define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET)
47 #define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
48 #define get_dmac_tx(port) (phy_node(port) ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
50 #define SCA_INTR_MSCI(node) (node ? 0x10 : 0x01)
51 #define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02)
52 #define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04)
60 static inline int sca_intr_status(
card_t *
card)
85 return dev_to_hdlc(dev)->priv;
90 return (desc + 1) % (transmit ?
port_to_card(port)->tx_ring_buffers
95 static inline u16 desc_abs_number(
port_t *port,
u16 desc,
int transmit)
100 desc %= (transmit ? tx_buffs : rx_buffs);
101 return log_node(port) * (rx_buffs + tx_buffs) +
102 transmit * rx_buffs + desc;
106 static inline u16 desc_offset(
port_t *port,
u16 desc,
int transmit)
109 return desc_abs_number(port, desc, transmit) *
sizeof(
pkt_desc);
116 #ifdef PAGE0_ALWAYS_MAPPED
118 + desc_offset(port, desc, transmit));
121 + desc_offset(port, desc, transmit));
126 static inline u32 buffer_offset(
port_t *port,
u16 desc,
int transmit)
133 static inline void sca_set_carrier(
port_t *port)
151 static void sca_init_port(
port_t *port)
160 #ifndef PAGE0_ALWAYS_MAPPED
164 for (transmit = 0; transmit < 2; transmit++) {
166 u16 buffs = transmit ? card->tx_ring_buffers
167 : card->rx_ring_buffers;
169 for (i = 0; i < buffs; i++) {
171 u16 chain_off = desc_offset(port, i + 1, transmit);
172 u32 buff_off = buffer_offset(port, i, transmit);
174 writew(chain_off, &desc->cp);
175 writel(buff_off, &desc->bp);
189 sca_outw(desc_offset(port, 0, transmit), dmac +
CDAL, card);
191 sca_outw(desc_offset(port, buffs - 1, transmit),
217 sca_set_carrier(port);
221 #ifdef NEED_SCA_MSCI_INTR
223 static inline void sca_msci_intr(
port_t *port)
239 sca_set_carrier(port);
254 len =
readw(&desc->len);
255 skb = dev_alloc_skb(len);
257 dev->
stats.rx_dropped++;
261 buff = buffer_offset(port, rxin, 0);
270 openwin(card, page + 1);
275 #ifndef PAGE0_ALWAYS_MAPPED
283 dev->
stats.rx_packets++;
285 skb->
protocol = hdlc_type_trans(skb, dev);
291 static inline void sca_rx_intr(
port_t *port)
304 dev->
stats.rx_over_errors++;
307 u32 desc_off = desc_offset(port, port->rxin, 0);
311 if ((cda >= desc_off) && (cda < desc_off +
sizeof(
pkt_desc)))
314 desc = desc_address(port, port->rxin, 0);
315 stat =
readb(&desc->stat);
319 dev->
stats.rx_errors++;
321 dev->
stats.rx_fifo_errors++;
324 dev->
stats.rx_frame_errors++;
326 dev->
stats.rx_crc_errors++;
327 if (stat & ST_RX_EOM)
330 sca_rx(card, port, desc, port->rxin);
334 port->rxin =
next_desc(port, port->rxin, 0);
343 static inline void sca_tx_intr(
port_t *port)
350 spin_lock(&port->
lock);
361 u32 desc_off = desc_offset(port, port->txlast, 1);
363 if ((cda >= desc_off) && (cda < desc_off +
sizeof(
pkt_desc)))
366 desc = desc_address(port, port->txlast, 1);
367 dev->
stats.tx_packets++;
370 port->txlast =
next_desc(port, port->txlast, 1);
373 netif_wake_queue(dev);
374 spin_unlock(&port->
lock);
384 u8 page = sca_get_page(card);
386 while((stat = sca_intr_status(card)) != 0) {
388 for (i = 0; i < 2; i++) {
408 static void sca_set_port(
port_t *port)
413 unsigned int tmc, br = 10, brv = 1024;
416 if (port->settings.clock_rate > 0) {
423 tmc =
CLOCK_BASE / brv / port->settings.clock_rate;
424 }
while (br > 1 && tmc <= 128);
430 }
else if (tmc > 255)
433 port->settings.clock_rate =
CLOCK_BASE / brv / tmc;
437 port->settings.clock_rate =
CLOCK_BASE / (256 * 512);
451 if (port->settings.loopback)
468 switch(port->encoding) {
476 if (port->settings.loopback)
479 switch(port->parity) {
504 sca_set_carrier(port);
521 netif_start_queue(dev);
539 netif_stop_queue(dev);
543 static int sca_attach(
struct net_device *dev,
unsigned short encoding,
544 unsigned short parity)
567 static void sca_dump_rings(
struct net_device *dev)
572 #ifndef PAGE0_ALWAYS_MAPPED
573 u8 page = sca_get_page(card);
583 for (cnt = 0; cnt <
port_to_card(port)->rx_ring_buffers; cnt++)
584 pr_cont(
" %02X",
readb(&(desc_address(port, cnt, 0)->stat)));
594 for (cnt = 0; cnt <
port_to_card(port)->tx_ring_buffers; cnt++)
595 pr_cont(
" %02X",
readb(&(desc_address(port, cnt, 1)->stat)));
599 " FST: %02x CST: %02x %02x\n",
614 #ifndef PAGE0_ALWAYS_MAPPED
630 spin_lock_irq(&port->
lock);
632 desc = desc_address(port, port->txin + 1, 1);
640 desc = desc_address(port, port->txin, 1);
641 buff = buffer_offset(port, port->txin, 1);
650 openwin(card, page + 1);
655 #ifndef PAGE0_ALWAYS_MAPPED
661 port->txin =
next_desc(port, port->txin, 1);
662 sca_outw(desc_offset(port, port->txin, 1),
663 get_dmac_tx(port) +
EDAL, card);
667 desc = desc_address(port, port->txin + 1, 1);
668 if (
readb(&desc->stat))
669 netif_stop_queue(dev);
671 spin_unlock_irq(&port->
lock);
678 #ifdef NEED_DETECT_RAM
683 u32 i = ramsize &= ~3;
686 openwin(card, (i - 4) / size);
690 if ((i + 4) % size == 0)
691 openwin(card, i / size);
692 writel(i ^ 0x12345678, rambase + i % size);
695 for (i = 0; i < ramsize ; i += 4) {
697 openwin(card, i / size);
699 if (
readl(rambase + i % size) != (i ^ 0x12345678))