83 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
85 #include <linux/module.h>
86 #include <linux/sched.h>
87 #include <linux/types.h>
88 #include <linux/errno.h>
89 #include <linux/list.h>
91 #include <linux/pci.h>
92 #include <linux/kernel.h>
94 #include <linux/slab.h>
96 #include <asm/cache.h>
97 #include <asm/byteorder.h>
98 #include <asm/uaccess.h>
104 #include <linux/string.h>
106 #include <linux/if_arp.h>
107 #include <linux/netdevice.h>
110 #include <linux/hdlc.h>
114 static const char version[] =
"$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n";
118 #ifdef CONFIG_DSCC4_PCI_RST
120 static u32 dscc4_pci_config_store[16];
123 #define DRV_NAME "dscc4"
162 #define DUMMY_SKB_SIZE 64
164 #define TX_RING_SIZE 32
165 #define RX_RING_SIZE 32
166 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct TxFD)
167 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct RxFD)
168 #define IRQ_RING_SIZE 64
169 #define TX_TIMEOUT (HZ/10)
170 #define DSCC4_HZ_MAX 33000000
171 #define BRR_DIVIDER_MAX 64*0x00004000
172 #define dev_per_card 4
173 #define SCC_REGISTERS_MAX 23
175 #define SOURCE_ID(flags) (((flags) >> 28) & 0x03)
176 #define TO_SIZE(state) (((state) >> 16) & 0x1fff)
183 #define TO_STATE_TX(len) cpu_to_le32(((len) & TxSizeMax) << 16)
184 #define TO_STATE_RX(len) cpu_to_le32((RX_MAX(len) % RxSizeMax) << 16)
185 #define RX_MAX(len) ((((len) >> 5) + 1) << 5)
186 #define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET)
265 #define SCC_START 0x0100
266 #define SCC_OFFSET 0x80
278 #define GPDATA 0x0404
282 #define EncodingMask 0x00700000
283 #define CrcMask 0x00000003
285 #define IntRxScc0 0x10000000
286 #define IntTxScc0 0x01000000
288 #define TxPollCmd 0x00000400
289 #define RxActivate 0x08000000
290 #define MTFi 0x04000000
291 #define Rdr 0x00400000
292 #define Rdt 0x00200000
293 #define Idr 0x00100000
294 #define Idt 0x00080000
295 #define TxSccRes 0x01000000
296 #define RxSccRes 0x00010000
297 #define TxSizeMax 0x1fff
298 #define RxSizeMax 0x1ffc
300 #define Ccr0ClockMask 0x0000003f
301 #define Ccr1LoopMask 0x00000200
302 #define IsrMask 0x000fffff
303 #define BrrExpMask 0x00000f00
304 #define BrrMultMask 0x0000003f
305 #define EncodingMask 0x00700000
306 #define Hold cpu_to_le32(0x40000000)
307 #define SccBusy 0x10000000
308 #define PowerUp 0x80000000
309 #define Vis 0x00001000
310 #define FrameOk (FrameVfr | FrameCrc)
311 #define FrameVfr 0x80
312 #define FrameRdo 0x40
313 #define FrameCrc 0x20
314 #define FrameRab 0x10
315 #define FrameAborted cpu_to_le32(0x00000200)
316 #define FrameEnd cpu_to_le32(0x80000000)
317 #define DataComplete cpu_to_le32(0x40000000)
318 #define LengthCheck 0x00008000
319 #define SccEvt 0x02000000
320 #define NoAck 0x00000200
321 #define Action 0x00000001
322 #define HiDesc cpu_to_le32(0x20000000)
325 #define RxEvt 0xf0000000
326 #define TxEvt 0x0f000000
327 #define Alls 0x00040000
328 #define Xdu 0x00010000
329 #define Cts 0x00004000
330 #define Xmr 0x00002000
331 #define Xpr 0x00001000
332 #define Rdo 0x00000080
333 #define Rfs 0x00000040
334 #define Cd 0x00000004
335 #define Rfo 0x00000002
336 #define Flex 0x00000001
339 #define Cfg 0x00200000
340 #define Hi 0x00040000
341 #define Fi 0x00020000
342 #define Err 0x00010000
343 #define Arf 0x00000002
344 #define ArAck 0x00000001
347 #define Ready 0x00000000
348 #define NeedIDR 0x00000001
349 #define NeedIDT 0x00000002
350 #define RdoSet 0x00000004
351 #define FakeReset 0x00000008
355 #define EventsMask 0xfffeef7f
357 #define EventsMask 0xfffa8f7a
370 static int dscc4_init_ring(
struct net_device *);
372 static void dscc4_timer(
unsigned long);
373 static void dscc4_tx_timeout(
struct net_device *);
375 static int dscc4_hdlc_attach(
struct net_device *,
unsigned short,
unsigned short);
383 return dev_to_hdlc(dev)->priv;
397 state = dpriv->
scc_regs[offset >> 2];
417 return dpriv->
scc_regs[offset >> 2];
445 static inline unsigned int dscc4_tx_done(
struct dscc4_dev_priv *dpriv)
450 static inline unsigned int dscc4_tx_quiescent(
struct dscc4_dev_priv *dpriv,
467 if (state & 0x0df80c00) {
469 dev->
name, msg, state);
476 static void dscc4_tx_print(
struct net_device *dev,
500 dev_kfree_skb(*skbuff);
511 dev_kfree_skb(*skbuff);
527 skb = dev_alloc_skb(len);
530 skb->
protocol = hdlc_type_trans(skb, dev);
549 if (!(scc_readl_star(dpriv, dev) &
SccBusy)) {
557 netdev_err(dev,
"%s timeout\n", msg);
559 return (i >= 0) ? i : -
EAGAIN;
562 static int dscc4_do_action(
struct net_device *dev,
char *msg)
564 void __iomem *ioaddr = dscc4_priv(dev)->base_addr;
576 }
else if (state &
Arf) {
577 netdev_err(dev,
"%s failed\n", msg);
584 netdev_err(dev,
"%s timeout\n", msg);
602 return (i >= 0 ) ? i : -
EAGAIN;
617 spin_unlock_irqrestore(&dpriv->
pci_priv->lock, flags);
629 scc_writel(0x00050000, dpriv, dev,
CCR2);
633 while (!dscc4_tx_quiescent(dpriv, dev) && ++i) {
639 if (dscc4_do_action(dev,
"Rdt") < 0)
640 netdev_err(dev,
"Tx reset failed\n");
662 dev->
stats.rx_packets++;
665 if (netif_running(dev))
666 skb->
protocol = hdlc_type_trans(skb, dev);
670 dev->
stats.rx_fifo_errors++;
672 dev->
stats.rx_crc_errors++;
675 dev->
stats.rx_length_errors++;
676 dev->
stats.rx_errors++;
681 if (try_get_rx_skb(dpriv, dev) < 0)
685 dscc4_rx_update(dpriv, dev);
686 rx_fd->
state2 = 0x00000000;
690 static void dscc4_free1(
struct pci_dev *pdev)
696 ppriv = pci_get_drvdata(pdev);
702 pci_set_drvdata(pdev,
NULL);
726 pr_err(
"can't reserve MMIO region (regs)\n");
731 pr_err(
"can't reserve MMIO region (lbi)\n");
732 goto err_free_mmio_region_1;
737 pr_err(
"cannot remap MMIO region %llx @ %llx\n",
741 goto err_free_mmio_regions_2;
743 printk(
KERN_DEBUG "Siemens DSCC4, MMIO at %#llx (regs), %#llx (lbi), IRQ %d\n",
751 rc = dscc4_found1(pdev, ioaddr);
755 priv = pci_get_drvdata(pdev);
791 dpriv = priv->
root +
i;
795 goto err_free_iqtx_6;
799 dpriv = priv->
root +
i;
803 goto err_free_iqrx_7;
824 dpriv = priv->
root +
i;
831 dpriv = priv->
root +
i;
843 err_free_mmio_regions_2:
845 err_free_mmio_region_1:
860 scc_writel(0x00000000, dpriv, dev,
CCR0);
870 scc_writel(0x02408000, dpriv, dev,
CCR1);
878 static inline int dscc4_set_quartz(
struct dscc4_dev_priv *dpriv,
int hz)
891 .ndo_open = dscc4_open,
892 .ndo_stop = dscc4_close,
895 .ndo_do_ioctl = dscc4_ioctl,
896 .ndo_tx_timeout = dscc4_tx_timeout,
899 static int dscc4_found1(
struct pci_dev *pdev,
void __iomem *ioaddr)
905 root = kcalloc(dev_per_card,
sizeof(*root),
GFP_KERNEL);
938 hdlc->
xmit = dscc4_start_xmit;
939 hdlc->
attach = dscc4_hdlc_attach;
941 dscc4_init_registers(dpriv, d);
945 ret = dscc4_init_ring(d);
951 pr_err(
"unable to register\n");
952 dscc4_release_ring(dpriv);
957 ret = dscc4_set_quartz(root, quartz);
961 pci_set_drvdata(pdev, ppriv);
966 dscc4_release_ring(root + i);
980 static void dscc4_timer(
unsigned long data)
992 static void dscc4_tx_timeout(
struct net_device *dev)
1002 struct net_device *dev = dscc4_to_dev(dpriv);
1004 netdev_info(dev,
"loopback requires clock\n");
1010 #ifdef CONFIG_DSCC4_PCI_RST
1023 for (i = 0; i < 16; i++)
1024 pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i);
1041 for (i = 0; i < 16; i++)
1042 pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
1046 #define dscc4_pci_reset(pdev,ioaddr) do {} while (0)
1049 static int dscc4_open(
struct net_device *dev)
1055 if ((dscc4_loopback_check(dpriv) < 0))
1075 scc_patchl(0, 0x00050000, dpriv, dev,
CCR2);
1077 netdev_info(dev,
"up again\n");
1093 if (scc_readl_star(dpriv, dev) &
SccBusy) {
1094 netdev_err(dev,
"busy - try later\n");
1098 netdev_info(dev,
"available - good\n");
1105 if ((ret = dscc4_wait_ack_cec(dpriv, dev,
"Cec")) < 0)
1106 goto err_disable_scc_events;
1115 if ((ret = dscc4_xpr_ack(dpriv)) < 0) {
1117 goto err_disable_scc_events;
1121 dscc4_tx_print(dev, dpriv,
"Open");
1124 netif_start_queue(dev);
1129 dpriv->
timer.function = dscc4_timer;
1135 err_disable_scc_events:
1136 scc_writel(0xffffffff, dpriv, dev,
IMR);
1144 #ifdef DSCC4_POLLING
1169 #ifdef DSCC4_POLLING
1170 spin_lock(&dpriv->
lock);
1171 while (dscc4_tx_poll(dpriv, dev));
1172 spin_unlock(&dpriv->
lock);
1176 dscc4_tx_print(dev, dpriv,
"Xmit");
1179 netif_stop_queue(dev);
1181 if (dscc4_tx_quiescent(dpriv, dev))
1182 dscc4_do_tx(dpriv, dev);
1187 static int dscc4_close(
struct net_device *dev)
1192 netif_stop_queue(dev);
1195 scc_patchl(0x00050000, 0, dpriv, dev,
CCR2);
1196 scc_writel(0xffffffff, dpriv, dev,
IMR);
1205 static inline int dscc4_check_clock_ability(
int port)
1209 #ifdef CONFIG_DSCC4_PCISYNC
1267 u32 n = 0,
m = 0, divider;
1273 if (dscc4_check_clock_ability(dpriv->
dev_id) < 0)
1275 divider = xtal / *bps;
1278 *state |= 0x00000036;
1280 *state |= 0x00000037;
1281 if (divider >> 22) {
1284 }
else if (divider) {
1287 while (0xffffffc0 & divider) {
1295 if (!(*state & 0x00000001))
1297 *bps = xtal / divider;
1306 scc_writel(brr, dpriv, dev,
BRR);
1325 switch(ifr->ifr_settings.type) {
1328 if (ifr->ifr_settings.size < size) {
1329 ifr->ifr_settings.size =
size;
1341 netdev_info(dev,
"please reset the device before this command\n");
1346 ret = dscc4_set_iface(dpriv, dev);
1357 static int dscc4_match(
const struct thingie *
p,
int value)
1361 for (i = 0; p[
i].
define != -1; i++) {
1362 if (value == p[i].define)
1365 if (p[i].define == -1)
1379 state = scc_readl(dpriv,
CCR0);
1380 if (dscc4_set_clock(dev, &bps, &state) < 0)
1393 scc_writel(state, dpriv, dev,
CCR0);
1402 static const struct thingie encoding[] = {
1412 i = dscc4_match(encoding, dpriv->
encoding);
1426 state = scc_readl(dpriv,
CCR1);
1429 state |= 0x00000100;
1432 state &= ~0x00000100;
1434 scc_writel(state, dpriv, dev,
CCR1);
1449 i = dscc4_match(crc, dpriv->
parity);
1451 scc_patchl(
CrcMask, crc[i].bits, dpriv, dev,
CCR1);
1461 } *
p, do_setting[] = {
1462 { dscc4_encoding_setting },
1463 { dscc4_clock_setting },
1464 { dscc4_loopback_setting },
1465 { dscc4_crc_setting },
1470 for (p = do_setting; p->action; p++) {
1471 if ((ret = p->action(dpriv, dev)) < 0)
1484 unsigned long flags;
1488 dev = dscc4_to_dev(root);
1504 netdev_err(dev,
"failure (Arf). Harass the maintainer\n");
1512 netdev_err(dev,
"CFG failed\n");
1513 if (!(state &= ~Cfg))
1516 if (state &
RxEvt) {
1517 i = dev_per_card - 1;
1519 dscc4_rx_irq(priv, root + i);
1523 if (state &
TxEvt) {
1524 i = dev_per_card - 1;
1526 dscc4_tx_irq(priv, root + i);
1531 spin_unlock_irqrestore(&priv->
lock, flags);
1538 struct net_device *dev = dscc4_to_dev(dpriv);
1549 if ((
debug > 1) && (loop > 1))
1551 if (loop && netif_queue_stopped(dev))
1553 netif_wake_queue(dev);
1555 if (netif_running(dev) && dscc4_tx_quiescent(dpriv, dev) &&
1556 !dscc4_tx_done(dpriv))
1557 dscc4_do_tx(dpriv, dev);
1564 if (state_check(state, dpriv, dev,
"Tx") < 0)
1573 dscc4_tx_print(dev, dpriv,
"Alls");
1585 dev->
stats.tx_packets++;
1593 netdev_err(dev,
"Tx: NULL skb %d\n",
1606 if (!(state &= ~Alls))
1613 netdev_err(dev,
"Tx Data Underrun. Ask maintainer\n");
1622 netdev_info(dev,
"CTS transition\n");
1623 if (!(state &= ~Cts))
1628 netdev_err(dev,
"Tx ReTx. Ask maintainer\n");
1629 if (!(state &= ~Xmr))
1641 for (i = 1;
i; i <<= 1) {
1642 if (!(scc_readl_star(dpriv, dev) &
SccBusy))
1646 netdev_info(dev,
"busy in irq\n");
1652 dscc4_tx_print(dev, dpriv,
"Xpr");
1655 sizeof(
struct TxFD);
1657 dscc4_do_tx(dpriv, dev);
1659 if (dscc4_do_action(dev,
"IDT") < 0)
1666 sizeof(
struct RxFD);
1668 dscc4_rx_update(dpriv, dev);
1670 if (dscc4_do_action(dev,
"IDR") < 0)
1675 scc_writel(0x08050008, dpriv, dev,
CCR2);
1678 if (!(state &= ~Xpr))
1683 netdev_info(dev,
"CD transition\n");
1684 if (!(state &= ~Cd))
1689 #ifdef DSCC4_POLLING
1690 while (!dscc4_tx_poll(dpriv, dev));
1692 netdev_info(dev,
"Tx Hi\n");
1696 netdev_info(dev,
"Tx ERR\n");
1697 dev->
stats.tx_errors++;
1707 struct net_device *dev = dscc4_to_dev(dpriv);
1719 if (state_check(state, dpriv, dev,
"Rx") < 0)
1722 if (!(state & SccEvt)){
1728 state &= 0x00ffffff;
1748 if (!(cur = cur%RX_RING_SIZE))
1749 rx_fd = dpriv->
rx_fd;
1752 try_get_rx_skb(dpriv, dev);
1756 rx_fd->
state2 = 0x00000000;
1762 dscc4_rx_skb(dpriv, dev);
1766 netdev_info(dev,
"Rx Hi\n");
1775 const char *irq_name;
1777 { 0x00008000,
"TIN"},
1778 { 0x00000020,
"RSC"},
1779 { 0x00000010,
"PCE"},
1780 { 0x00000008,
"PLLA"},
1785 if (state &
evt->mask) {
1788 if (!(state &= ~
evt->mask))
1793 if (!(state &= ~0x0000c03c))
1797 netdev_info(dev,
"CTS transition\n");
1798 if (!(state &= ~Cts))
1833 dev->
stats.rx_over_errors++;
1835 rx_fd->
state2 = 0x00000000;
1838 dscc4_rx_skb(dpriv, dev);
1844 "%s: no RDO in Rx data\n",
DRV_NAME);
1846 #ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY
1850 #warning "FIXME: CH0BRDA"
1855 if (dscc4_do_action(dev,
"RDR") < 0) {
1856 netdev_err(dev,
"RDO recovery failed(RDR)\n");
1860 if (dscc4_do_action(dev,
"IDR") < 0) {
1861 netdev_err(dev,
"RDO recovery failed(IDR)\n");
1870 netdev_info(dev,
"CD transition\n");
1871 if (!(state &= ~Cd))
1876 if (!(state &= ~Flex))
1898 struct TxFD *tx_fd = dpriv->
tx_fd + last;
1901 skb_copy_to_linear_data(skb,
version,
1912 static int dscc4_init_ring(
struct net_device *dev)
1924 dpriv->
rx_fd = rx_fd = (
struct RxFD *) ring;
1928 goto err_free_dma_rx;
1929 dpriv->
tx_fd = tx_fd = (
struct TxFD *) ring;
1940 (++i%TX_RING_SIZE)*
sizeof(*tx_fd));
1941 }
while (i < TX_RING_SIZE);
1943 if (!dscc4_init_dummy_skb(dpriv))
1944 goto err_free_dma_tx;
1951 rx_fd->
state2 = 0x00000000;
1955 if (try_get_rx_skb(dpriv, dev) >= 0)
1958 (++i%RX_RING_SIZE)*
sizeof(*rx_fd));
1959 }
while (i < RX_RING_SIZE);
1978 ppriv = pci_get_drvdata(pdev);
1991 dscc4_release_ring(dpriv);
2008 static int dscc4_hdlc_attach(
struct net_device *dev,
unsigned short encoding,
2033 static int __init dscc4_setup(
char *
str)
2042 __setup(
"dscc4.setup=", dscc4_setup);
2054 .id_table = dscc4_pci_tbl,
2055 .probe = dscc4_init_one,