Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
z85230.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version
5  * 2 of the License, or (at your option) any later version.
6  *
7  * (c) Copyright 1998 Alan Cox <[email protected]>
8  * (c) Copyright 2000, 2001 Red Hat Inc
9  *
10  * Development of this driver was funded by Equiinet Ltd
11  * http://www.equiinet.com
12  *
13  * ChangeLog:
14  *
15  * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
16  * unification of all the Z85x30 asynchronous drivers for real.
17  *
18  * DMA now uses get_free_page as kmalloc buffers may span a 64K
19  * boundary.
20  *
21  * Modified for SMP safety and SMP locking by Alan Cox
23  *
24  * Performance
25  *
26  * Z85230:
27  * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
28  * X.25 is not unrealistic on all machines. DMA mode can in theory
29  * handle T1/E1 quite nicely. In practice the limit seems to be about
30  * 512Kbit->1Mbit depending on motherboard.
31  *
32  * Z85C30:
33  * 64K will take DMA, 9600 baud X.25 should be ok.
34  *
35  * Z8530:
36  * Synchronous mode without DMA is unlikely to pass about 2400 baud.
37  */
38 
39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 
41 #include <linux/module.h>
42 #include <linux/kernel.h>
43 #include <linux/mm.h>
44 #include <linux/net.h>
45 #include <linux/skbuff.h>
46 #include <linux/netdevice.h>
47 #include <linux/if_arp.h>
48 #include <linux/delay.h>
49 #include <linux/hdlc.h>
50 #include <linux/ioport.h>
51 #include <linux/init.h>
52 #include <linux/gfp.h>
53 #include <asm/dma.h>
54 #include <asm/io.h>
55 #define RT_LOCK
56 #define RT_UNLOCK
57 #include <linux/spinlock.h>
58 
59 #include "z85230.h"
60 
61 
78 static inline int z8530_read_port(unsigned long p)
79 {
80  u8 r=inb(Z8530_PORT_OF(p));
81  if(p&Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
82  udelay(5);
83  return r;
84 }
85 
102 static inline void z8530_write_port(unsigned long p, u8 d)
103 {
104  outb(d,Z8530_PORT_OF(p));
105  if(p&Z8530_PORT_SLEEP)
106  udelay(5);
107 }
108 
109 
110 
111 static void z8530_rx_done(struct z8530_channel *c);
112 static void z8530_tx_done(struct z8530_channel *c);
113 
114 
126 static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
127 {
128  if(reg)
129  z8530_write_port(c->ctrlio, reg);
130  return z8530_read_port(c->ctrlio);
131 }
132 
141 static inline u8 read_zsdata(struct z8530_channel *c)
142 {
143  u8 r;
144  r=z8530_read_port(c->dataio);
145  return r;
146 }
147 
160 static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
161 {
162  if(reg)
163  z8530_write_port(c->ctrlio, reg);
164  z8530_write_port(c->ctrlio, val);
165 
166 }
167 
176 static inline void write_zsctrl(struct z8530_channel *c, u8 val)
177 {
178  z8530_write_port(c->ctrlio, val);
179 }
180 
190 static inline void write_zsdata(struct z8530_channel *c, u8 val)
191 {
192  z8530_write_port(c->dataio, val);
193 }
194 
195 /*
196  * Register loading parameters for a dead port
197  */
198 
200 {
201  255
202 };
203 
204 EXPORT_SYMBOL(z8530_dead_port);
205 
206 /*
207  * Register loading parameters for currently supported circuit types
208  */
209 
210 
211 /*
212  * Data clocked by telco end. This is the correct data for the UK
213  * "kilostream" service, and most other similar services.
214  */
215 
217 {
218  4, SYNC_ENAB|SDLC|X1CLK,
219  2, 0, /* No vector */
220  1, 0,
221  3, ENT_HM|RxCRC_ENAB|Rx8,
223  9, 0, /* Disable interrupts */
224  6, 0xFF,
225  7, FLAG,
226  10, ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
227  11, TCTRxCP,
228  14, DISDPLL,
231  9, NV|MIE|NORESET,
232  255
233 };
234 
235 EXPORT_SYMBOL(z8530_hdlc_kilostream);
236 
237 /*
238  * As above but for enhanced chips.
239  */
240 
242 {
243  4, SYNC_ENAB|SDLC|X1CLK,
244  2, 0, /* No vector */
245  1, 0,
246  3, ENT_HM|RxCRC_ENAB|Rx8,
248  9, 0, /* Disable interrupts */
249  6, 0xFF,
250  7, FLAG,
251  10, ABUNDER|NRZ|CRCPS, /* MARKIDLE?? */
252  11, TCTRxCP,
253  14, DISDPLL,
256  9, NV|MIE|NORESET,
257  23, 3, /* Extended mode AUTO TX and EOM*/
258 
259  255
260 };
261 
262 EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
263 
276 static void z8530_flush_fifo(struct z8530_channel *c)
277 {
278  read_zsreg(c, R1);
279  read_zsreg(c, R1);
280  read_zsreg(c, R1);
281  read_zsreg(c, R1);
282  if(c->dev->type==Z85230)
283  {
284  read_zsreg(c, R1);
285  read_zsreg(c, R1);
286  read_zsreg(c, R1);
287  read_zsreg(c, R1);
288  }
289 }
290 
302 static void z8530_rtsdtr(struct z8530_channel *c, int set)
303 {
304  if (set)
305  c->regs[5] |= (RTS | DTR);
306  else
307  c->regs[5] &= ~(RTS | DTR);
308  write_zsreg(c, R5, c->regs[5]);
309 }
310 
335 static void z8530_rx(struct z8530_channel *c)
336 {
337  u8 ch,stat;
338 
339  while(1)
340  {
341  /* FIFO empty ? */
342  if(!(read_zsreg(c, R0)&1))
343  break;
344  ch=read_zsdata(c);
345  stat=read_zsreg(c, R1);
346 
347  /*
348  * Overrun ?
349  */
350  if(c->count < c->max)
351  {
352  *c->dptr++=ch;
353  c->count++;
354  }
355 
356  if(stat&END_FR)
357  {
358 
359  /*
360  * Error ?
361  */
362  if(stat&(Rx_OVR|CRC_ERR))
363  {
364  /* Rewind the buffer and return */
365  if(c->skb)
366  c->dptr=c->skb->data;
367  c->count=0;
368  if(stat&Rx_OVR)
369  {
370  pr_warn("%s: overrun\n", c->dev->name);
371  c->rx_overrun++;
372  }
373  if(stat&CRC_ERR)
374  {
375  c->rx_crc_err++;
376  /* printk("crc error\n"); */
377  }
378  /* Shove the frame upstream */
379  }
380  else
381  {
382  /*
383  * Drop the lock for RX processing, or
384  * there are deadlocks
385  */
386  z8530_rx_done(c);
387  write_zsctrl(c, RES_Rx_CRC);
388  }
389  }
390  }
391  /*
392  * Clear irq
393  */
394  write_zsctrl(c, ERR_RES);
395  write_zsctrl(c, RES_H_IUS);
396 }
397 
398 
409 static void z8530_tx(struct z8530_channel *c)
410 {
411  while(c->txcount) {
412  /* FIFO full ? */
413  if(!(read_zsreg(c, R0)&4))
414  return;
415  c->txcount--;
416  /*
417  * Shovel out the byte
418  */
419  write_zsreg(c, R8, *c->tx_ptr++);
420  write_zsctrl(c, RES_H_IUS);
421  /* We are about to underflow */
422  if(c->txcount==0)
423  {
424  write_zsctrl(c, RES_EOM_L);
425  write_zsreg(c, R10, c->regs[10]&~ABUNDER);
426  }
427  }
428 
429 
430  /*
431  * End of frame TX - fire another one
432  */
433 
434  write_zsctrl(c, RES_Tx_P);
435 
436  z8530_tx_done(c);
437  write_zsctrl(c, RES_H_IUS);
438 }
439 
450 static void z8530_status(struct z8530_channel *chan)
451 {
452  u8 status, altered;
453 
454  status = read_zsreg(chan, R0);
455  altered = chan->status ^ status;
456 
457  chan->status = status;
458 
459  if (status & TxEOM) {
460 /* printk("%s: Tx underrun.\n", chan->dev->name); */
461  chan->netdevice->stats.tx_fifo_errors++;
462  write_zsctrl(chan, ERR_RES);
463  z8530_tx_done(chan);
464  }
465 
466  if (altered & chan->dcdcheck)
467  {
468  if (status & chan->dcdcheck) {
469  pr_info("%s: DCD raised\n", chan->dev->name);
470  write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
471  if (chan->netdevice)
473  } else {
474  pr_info("%s: DCD lost\n", chan->dev->name);
475  write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
476  z8530_flush_fifo(chan);
477  if (chan->netdevice)
479  }
480 
481  }
482  write_zsctrl(chan, RES_EXT_INT);
483  write_zsctrl(chan, RES_H_IUS);
484 }
485 
487 {
488  z8530_rx,
489  z8530_tx,
490  z8530_status
491 };
492 
493 EXPORT_SYMBOL(z8530_sync);
494 
505 static void z8530_dma_rx(struct z8530_channel *chan)
506 {
507  if(chan->rxdma_on)
508  {
509  /* Special condition check only */
510  u8 status;
511 
512  read_zsreg(chan, R7);
513  read_zsreg(chan, R6);
514 
515  status=read_zsreg(chan, R1);
516 
517  if(status&END_FR)
518  {
519  z8530_rx_done(chan); /* Fire up the next one */
520  }
521  write_zsctrl(chan, ERR_RES);
522  write_zsctrl(chan, RES_H_IUS);
523  }
524  else
525  {
526  /* DMA is off right now, drain the slow way */
527  z8530_rx(chan);
528  }
529 }
530 
539 static void z8530_dma_tx(struct z8530_channel *chan)
540 {
541  if(!chan->dma_tx)
542  {
543  pr_warn("Hey who turned the DMA off?\n");
544  z8530_tx(chan);
545  return;
546  }
547  /* This shouldn't occur in DMA mode */
548  pr_err("DMA tx - bogus event!\n");
549  z8530_tx(chan);
550 }
551 
562 static void z8530_dma_status(struct z8530_channel *chan)
563 {
564  u8 status, altered;
565 
566  status=read_zsreg(chan, R0);
567  altered=chan->status^status;
568 
569  chan->status=status;
570 
571 
572  if(chan->dma_tx)
573  {
574  if(status&TxEOM)
575  {
576  unsigned long flags;
577 
578  flags=claim_dma_lock();
579  disable_dma(chan->txdma);
580  clear_dma_ff(chan->txdma);
581  chan->txdma_on=0;
582  release_dma_lock(flags);
583  z8530_tx_done(chan);
584  }
585  }
586 
587  if (altered & chan->dcdcheck)
588  {
589  if (status & chan->dcdcheck) {
590  pr_info("%s: DCD raised\n", chan->dev->name);
591  write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
592  if (chan->netdevice)
594  } else {
595  pr_info("%s: DCD lost\n", chan->dev->name);
596  write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
597  z8530_flush_fifo(chan);
598  if (chan->netdevice)
600  }
601  }
602 
603  write_zsctrl(chan, RES_EXT_INT);
604  write_zsctrl(chan, RES_H_IUS);
605 }
606 
607 static struct z8530_irqhandler z8530_dma_sync = {
608  z8530_dma_rx,
609  z8530_dma_tx,
610  z8530_dma_status
611 };
612 
613 static struct z8530_irqhandler z8530_txdma_sync = {
614  z8530_rx,
615  z8530_dma_tx,
616  z8530_dma_status
617 };
618 
629 static void z8530_rx_clear(struct z8530_channel *c)
630 {
631  /*
632  * Data and status bytes
633  */
634  u8 stat;
635 
636  read_zsdata(c);
637  stat=read_zsreg(c, R1);
638 
639  if(stat&END_FR)
640  write_zsctrl(c, RES_Rx_CRC);
641  /*
642  * Clear irq
643  */
644  write_zsctrl(c, ERR_RES);
645  write_zsctrl(c, RES_H_IUS);
646 }
647 
657 static void z8530_tx_clear(struct z8530_channel *c)
658 {
659  write_zsctrl(c, RES_Tx_P);
660  write_zsctrl(c, RES_H_IUS);
661 }
662 
672 static void z8530_status_clear(struct z8530_channel *chan)
673 {
674  u8 status=read_zsreg(chan, R0);
675  if(status&TxEOM)
676  write_zsctrl(chan, ERR_RES);
677  write_zsctrl(chan, RES_EXT_INT);
678  write_zsctrl(chan, RES_H_IUS);
679 }
680 
682 {
683  z8530_rx_clear,
684  z8530_tx_clear,
685  z8530_status_clear
686 };
687 
688 
689 EXPORT_SYMBOL(z8530_nop);
690 
708 {
709  struct z8530_dev *dev=dev_id;
711  static volatile int locker=0;
712  int work=0;
713  struct z8530_irqhandler *irqs;
714 
715  if(locker)
716  {
717  pr_err("IRQ re-enter\n");
718  return IRQ_NONE;
719  }
720  locker=1;
721 
722  spin_lock(&dev->lock);
723 
724  while(++work<5000)
725  {
726 
727  intr = read_zsreg(&dev->chanA, R3);
729  break;
730 
731  /* This holds the IRQ status. On the 8530 you must read it from chan
732  A even though it applies to the whole chip */
733 
734  /* Now walk the chip and see what it is wanting - it may be
735  an IRQ for someone else remember */
736 
737  irqs=dev->chanA.irqs;
738 
739  if(intr & (CHARxIP|CHATxIP|CHAEXT))
740  {
741  if(intr&CHARxIP)
742  irqs->rx(&dev->chanA);
743  if(intr&CHATxIP)
744  irqs->tx(&dev->chanA);
745  if(intr&CHAEXT)
746  irqs->status(&dev->chanA);
747  }
748 
749  irqs=dev->chanB.irqs;
750 
751  if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
752  {
753  if(intr&CHBRxIP)
754  irqs->rx(&dev->chanB);
755  if(intr&CHBTxIP)
756  irqs->tx(&dev->chanB);
757  if(intr&CHBEXT)
758  irqs->status(&dev->chanB);
759  }
760  }
761  spin_unlock(&dev->lock);
762  if(work==5000)
763  pr_err("%s: interrupt jammed - abort(0x%X)!\n",
764  dev->name, intr);
765  /* Ok all done */
766  locker=0;
767  return IRQ_HANDLED;
768 }
769 
771 
772 static const u8 reg_init[16]=
773 {
774  0,0,0,0,
775  0,0,0,0,
776  0,0,0,0,
777  0x55,0,0,0
778 };
779 
780 
791 {
792  unsigned long flags;
793 
794  spin_lock_irqsave(c->lock, flags);
795 
796  c->sync = 1;
797  c->mtu = dev->mtu+64;
798  c->count = 0;
799  c->skb = NULL;
800  c->skb2 = NULL;
801  c->irqs = &z8530_sync;
802 
803  /* This loads the double buffer up */
804  z8530_rx_done(c); /* Load the frame ring */
805  z8530_rx_done(c); /* Load the backup frame */
806  z8530_rtsdtr(c,1);
807  c->dma_tx = 0;
808  c->regs[R1]|=TxINT_ENAB;
809  write_zsreg(c, R1, c->regs[R1]);
810  write_zsreg(c, R3, c->regs[R3]|RxENABLE);
811 
812  spin_unlock_irqrestore(c->lock, flags);
813  return 0;
814 }
815 
816 
818 
829 {
830  u8 chk;
831  unsigned long flags;
832 
833  spin_lock_irqsave(c->lock, flags);
834  c->irqs = &z8530_nop;
835  c->max = 0;
836  c->sync = 0;
837 
838  chk=read_zsreg(c,R0);
839  write_zsreg(c, R3, c->regs[R3]);
840  z8530_rtsdtr(c,0);
841 
842  spin_unlock_irqrestore(c->lock, flags);
843  return 0;
844 }
845 
847 
859 {
860  unsigned long cflags, dflags;
861 
862  c->sync = 1;
863  c->mtu = dev->mtu+64;
864  c->count = 0;
865  c->skb = NULL;
866  c->skb2 = NULL;
867  /*
868  * Load the DMA interfaces up
869  */
870  c->rxdma_on = 0;
871  c->txdma_on = 0;
872 
873  /*
874  * Allocate the DMA flip buffers. Limit by page size.
875  * Everyone runs 1500 mtu or less on wan links so this
876  * should be fine.
877  */
878 
879  if(c->mtu > PAGE_SIZE/2)
880  return -EMSGSIZE;
881 
882  c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
883  if(c->rx_buf[0]==NULL)
884  return -ENOBUFS;
885  c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
886 
888  if(c->tx_dma_buf[0]==NULL)
889  {
890  free_page((unsigned long)c->rx_buf[0]);
891  c->rx_buf[0]=NULL;
892  return -ENOBUFS;
893  }
894  c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
895 
896  c->tx_dma_used=0;
897  c->dma_tx = 1;
898  c->dma_num=0;
899  c->dma_ready=1;
900 
901  /*
902  * Enable DMA control mode
903  */
904 
905  spin_lock_irqsave(c->lock, cflags);
906 
907  /*
908  * TX DMA via DIR/REQ
909  */
910 
911  c->regs[R14]|= DTRREQ;
912  write_zsreg(c, R14, c->regs[R14]);
913 
914  c->regs[R1]&= ~TxINT_ENAB;
915  write_zsreg(c, R1, c->regs[R1]);
916 
917  /*
918  * RX DMA via W/Req
919  */
920 
921  c->regs[R1]|= WT_FN_RDYFN;
922  c->regs[R1]|= WT_RDY_RT;
923  c->regs[R1]|= INT_ERR_Rx;
924  c->regs[R1]&= ~TxINT_ENAB;
925  write_zsreg(c, R1, c->regs[R1]);
926  c->regs[R1]|= WT_RDY_ENAB;
927  write_zsreg(c, R1, c->regs[R1]);
928 
929  /*
930  * DMA interrupts
931  */
932 
933  /*
934  * Set up the DMA configuration
935  */
936 
937  dflags=claim_dma_lock();
938 
939  disable_dma(c->rxdma);
940  clear_dma_ff(c->rxdma);
941  set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
942  set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
943  set_dma_count(c->rxdma, c->mtu);
944  enable_dma(c->rxdma);
945 
946  disable_dma(c->txdma);
947  clear_dma_ff(c->txdma);
949  disable_dma(c->txdma);
950 
951  release_dma_lock(dflags);
952 
953  /*
954  * Select the DMA interrupt handlers
955  */
956 
957  c->rxdma_on = 1;
958  c->txdma_on = 1;
959  c->tx_dma_used = 1;
960 
961  c->irqs = &z8530_dma_sync;
962  z8530_rtsdtr(c,1);
963  write_zsreg(c, R3, c->regs[R3]|RxENABLE);
964 
965  spin_unlock_irqrestore(c->lock, cflags);
966 
967  return 0;
968 }
969 
971 
982 {
983  u8 chk;
984  unsigned long flags;
985 
986  c->irqs = &z8530_nop;
987  c->max = 0;
988  c->sync = 0;
989 
990  /*
991  * Disable the PC DMA channels
992  */
993 
994  flags=claim_dma_lock();
995  disable_dma(c->rxdma);
996  clear_dma_ff(c->rxdma);
997 
998  c->rxdma_on = 0;
999 
1000  disable_dma(c->txdma);
1001  clear_dma_ff(c->txdma);
1002  release_dma_lock(flags);
1003 
1004  c->txdma_on = 0;
1005  c->tx_dma_used = 0;
1006 
1007  spin_lock_irqsave(c->lock, flags);
1008 
1009  /*
1010  * Disable DMA control mode
1011  */
1012 
1013  c->regs[R1]&= ~WT_RDY_ENAB;
1014  write_zsreg(c, R1, c->regs[R1]);
1016  c->regs[R1]|= INT_ALL_Rx;
1017  write_zsreg(c, R1, c->regs[R1]);
1018  c->regs[R14]&= ~DTRREQ;
1019  write_zsreg(c, R14, c->regs[R14]);
1020 
1021  if(c->rx_buf[0])
1022  {
1023  free_page((unsigned long)c->rx_buf[0]);
1024  c->rx_buf[0]=NULL;
1025  }
1026  if(c->tx_dma_buf[0])
1027  {
1028  free_page((unsigned long)c->tx_dma_buf[0]);
1029  c->tx_dma_buf[0]=NULL;
1030  }
1031  chk=read_zsreg(c,R0);
1032  write_zsreg(c, R3, c->regs[R3]);
1033  z8530_rtsdtr(c,0);
1034 
1035  spin_unlock_irqrestore(c->lock, flags);
1036 
1037  return 0;
1038 }
1039 
1041 
1053 {
1054  unsigned long cflags, dflags;
1055 
1056  printk("Opening sync interface for TX-DMA\n");
1057  c->sync = 1;
1058  c->mtu = dev->mtu+64;
1059  c->count = 0;
1060  c->skb = NULL;
1061  c->skb2 = NULL;
1062 
1063  /*
1064  * Allocate the DMA flip buffers. Limit by page size.
1065  * Everyone runs 1500 mtu or less on wan links so this
1066  * should be fine.
1067  */
1068 
1069  if(c->mtu > PAGE_SIZE/2)
1070  return -EMSGSIZE;
1071 
1073  if(c->tx_dma_buf[0]==NULL)
1074  return -ENOBUFS;
1075 
1076  c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
1077 
1078 
1079  spin_lock_irqsave(c->lock, cflags);
1080 
1081  /*
1082  * Load the PIO receive ring
1083  */
1084 
1085  z8530_rx_done(c);
1086  z8530_rx_done(c);
1087 
1088  /*
1089  * Load the DMA interfaces up
1090  */
1091 
1092  c->rxdma_on = 0;
1093  c->txdma_on = 0;
1094 
1095  c->tx_dma_used=0;
1096  c->dma_num=0;
1097  c->dma_ready=1;
1098  c->dma_tx = 1;
1099 
1100  /*
1101  * Enable DMA control mode
1102  */
1103 
1104  /*
1105  * TX DMA via DIR/REQ
1106  */
1107  c->regs[R14]|= DTRREQ;
1108  write_zsreg(c, R14, c->regs[R14]);
1109 
1110  c->regs[R1]&= ~TxINT_ENAB;
1111  write_zsreg(c, R1, c->regs[R1]);
1112 
1113  /*
1114  * Set up the DMA configuration
1115  */
1116 
1117  dflags = claim_dma_lock();
1118 
1119  disable_dma(c->txdma);
1120  clear_dma_ff(c->txdma);
1122  disable_dma(c->txdma);
1123 
1124  release_dma_lock(dflags);
1125 
1126  /*
1127  * Select the DMA interrupt handlers
1128  */
1129 
1130  c->rxdma_on = 0;
1131  c->txdma_on = 1;
1132  c->tx_dma_used = 1;
1133 
1134  c->irqs = &z8530_txdma_sync;
1135  z8530_rtsdtr(c,1);
1136  write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1137  spin_unlock_irqrestore(c->lock, cflags);
1138 
1139  return 0;
1140 }
1141 
1143 
1154 {
1155  unsigned long dflags, cflags;
1156  u8 chk;
1157 
1158 
1159  spin_lock_irqsave(c->lock, cflags);
1160 
1161  c->irqs = &z8530_nop;
1162  c->max = 0;
1163  c->sync = 0;
1164 
1165  /*
1166  * Disable the PC DMA channels
1167  */
1168 
1169  dflags = claim_dma_lock();
1170 
1171  disable_dma(c->txdma);
1172  clear_dma_ff(c->txdma);
1173  c->txdma_on = 0;
1174  c->tx_dma_used = 0;
1175 
1176  release_dma_lock(dflags);
1177 
1178  /*
1179  * Disable DMA control mode
1180  */
1181 
1182  c->regs[R1]&= ~WT_RDY_ENAB;
1183  write_zsreg(c, R1, c->regs[R1]);
1185  c->regs[R1]|= INT_ALL_Rx;
1186  write_zsreg(c, R1, c->regs[R1]);
1187  c->regs[R14]&= ~DTRREQ;
1188  write_zsreg(c, R14, c->regs[R14]);
1189 
1190  if(c->tx_dma_buf[0])
1191  {
1192  free_page((unsigned long)c->tx_dma_buf[0]);
1193  c->tx_dma_buf[0]=NULL;
1194  }
1195  chk=read_zsreg(c,R0);
1196  write_zsreg(c, R3, c->regs[R3]);
1197  z8530_rtsdtr(c,0);
1198 
1199  spin_unlock_irqrestore(c->lock, cflags);
1200  return 0;
1201 }
1202 
1203 
1205 
1206 
1207 /*
1208  * Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
1209  * it exists...
1210  */
1211 
1212 static const char *z8530_type_name[]={
1213  "Z8530",
1214  "Z85C30",
1215  "Z85230"
1216 };
1217 
1229 void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
1230 {
1231  pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
1232  dev->name,
1233  z8530_type_name[dev->type],
1234  mapping,
1235  Z8530_PORT_OF(io),
1236  dev->irq);
1237 }
1238 
1240 
1241 /*
1242  * Locked operation part of the z8530 init code
1243  */
1244 
1245 static inline int do_z8530_init(struct z8530_dev *dev)
1246 {
1247  /* NOP the interrupt handlers first - we might get a
1248  floating IRQ transition when we reset the chip */
1249  dev->chanA.irqs=&z8530_nop;
1250  dev->chanB.irqs=&z8530_nop;
1251  dev->chanA.dcdcheck=DCD;
1252  dev->chanB.dcdcheck=DCD;
1253 
1254  /* Reset the chip */
1255  write_zsreg(&dev->chanA, R9, 0xC0);
1256  udelay(200);
1257  /* Now check its valid */
1258  write_zsreg(&dev->chanA, R12, 0xAA);
1259  if(read_zsreg(&dev->chanA, R12)!=0xAA)
1260  return -ENODEV;
1261  write_zsreg(&dev->chanA, R12, 0x55);
1262  if(read_zsreg(&dev->chanA, R12)!=0x55)
1263  return -ENODEV;
1264 
1265  dev->type=Z8530;
1266 
1267  /*
1268  * See the application note.
1269  */
1270 
1271  write_zsreg(&dev->chanA, R15, 0x01);
1272 
1273  /*
1274  * If we can set the low bit of R15 then
1275  * the chip is enhanced.
1276  */
1277 
1278  if(read_zsreg(&dev->chanA, R15)==0x01)
1279  {
1280  /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
1281  /* Put a char in the fifo */
1282  write_zsreg(&dev->chanA, R8, 0);
1283  if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
1284  dev->type = Z85230; /* Has a FIFO */
1285  else
1286  dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
1287  }
1288 
1289  /*
1290  * The code assumes R7' and friends are
1291  * off. Use write_zsext() for these and keep
1292  * this bit clear.
1293  */
1294 
1295  write_zsreg(&dev->chanA, R15, 0);
1296 
1297  /*
1298  * At this point it looks like the chip is behaving
1299  */
1300 
1301  memcpy(dev->chanA.regs, reg_init, 16);
1302  memcpy(dev->chanB.regs, reg_init ,16);
1303 
1304  return 0;
1305 }
1306 
1324 int z8530_init(struct z8530_dev *dev)
1325 {
1326  unsigned long flags;
1327  int ret;
1328 
1329  /* Set up the chip level lock */
1330  spin_lock_init(&dev->lock);
1331  dev->chanA.lock = &dev->lock;
1332  dev->chanB.lock = &dev->lock;
1333 
1334  spin_lock_irqsave(&dev->lock, flags);
1335  ret = do_z8530_init(dev);
1336  spin_unlock_irqrestore(&dev->lock, flags);
1337 
1338  return ret;
1339 }
1340 
1341 
1342 EXPORT_SYMBOL(z8530_init);
1343 
1355 int z8530_shutdown(struct z8530_dev *dev)
1356 {
1357  unsigned long flags;
1358  /* Reset the chip */
1359 
1360  spin_lock_irqsave(&dev->lock, flags);
1361  dev->chanA.irqs=&z8530_nop;
1362  dev->chanB.irqs=&z8530_nop;
1363  write_zsreg(&dev->chanA, R9, 0xC0);
1364  /* We must lock the udelay, the chip is offlimits here */
1365  udelay(100);
1366  spin_unlock_irqrestore(&dev->lock, flags);
1367  return 0;
1368 }
1369 
1371 
1384 {
1385  unsigned long flags;
1386 
1387  spin_lock_irqsave(c->lock, flags);
1388 
1389  while(*rtable!=255)
1390  {
1391  int reg=*rtable++;
1392  if(reg>0x0F)
1393  write_zsreg(c, R15, c->regs[15]|1);
1394  write_zsreg(c, reg&0x0F, *rtable);
1395  if(reg>0x0F)
1396  write_zsreg(c, R15, c->regs[15]&~1);
1397  c->regs[reg]=*rtable++;
1398  }
1400  c->skb=NULL;
1401  c->tx_skb=NULL;
1402  c->tx_next_skb=NULL;
1403  c->mtu=1500;
1404  c->max=0;
1405  c->count=0;
1406  c->status=read_zsreg(c, R0);
1407  c->sync=1;
1408  write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1409 
1410  spin_unlock_irqrestore(c->lock, flags);
1411  return 0;
1412 }
1413 
1415 
1416 
1431 static void z8530_tx_begin(struct z8530_channel *c)
1432 {
1433  unsigned long flags;
1434  if(c->tx_skb)
1435  return;
1436 
1437  c->tx_skb=c->tx_next_skb;
1438  c->tx_next_skb=NULL;
1439  c->tx_ptr=c->tx_next_ptr;
1440 
1441  if(c->tx_skb==NULL)
1442  {
1443  /* Idle on */
1444  if(c->dma_tx)
1445  {
1446  flags=claim_dma_lock();
1447  disable_dma(c->txdma);
1448  /*
1449  * Check if we crapped out.
1450  */
1451  if (get_dma_residue(c->txdma))
1452  {
1453  c->netdevice->stats.tx_dropped++;
1454  c->netdevice->stats.tx_fifo_errors++;
1455  }
1456  release_dma_lock(flags);
1457  }
1458  c->txcount=0;
1459  }
1460  else
1461  {
1462  c->txcount=c->tx_skb->len;
1463 
1464 
1465  if(c->dma_tx)
1466  {
1467  /*
1468  * FIXME. DMA is broken for the original 8530,
1469  * on the older parts we need to set a flag and
1470  * wait for a further TX interrupt to fire this
1471  * stage off
1472  */
1473 
1474  flags=claim_dma_lock();
1475  disable_dma(c->txdma);
1476 
1477  /*
1478  * These two are needed by the 8530/85C30
1479  * and must be issued when idling.
1480  */
1481 
1482  if(c->dev->type!=Z85230)
1483  {
1484  write_zsctrl(c, RES_Tx_CRC);
1485  write_zsctrl(c, RES_EOM_L);
1486  }
1487  write_zsreg(c, R10, c->regs[10]&~ABUNDER);
1488  clear_dma_ff(c->txdma);
1489  set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1490  set_dma_count(c->txdma, c->txcount);
1491  enable_dma(c->txdma);
1492  release_dma_lock(flags);
1493  write_zsctrl(c, RES_EOM_L);
1494  write_zsreg(c, R5, c->regs[R5]|TxENAB);
1495  }
1496  else
1497  {
1498 
1499  /* ABUNDER off */
1500  write_zsreg(c, R10, c->regs[10]);
1501  write_zsctrl(c, RES_Tx_CRC);
1502 
1503  while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
1504  {
1505  write_zsreg(c, R8, *c->tx_ptr++);
1506  c->txcount--;
1507  }
1508 
1509  }
1510  }
1511  /*
1512  * Since we emptied tx_skb we can ask for more
1513  */
1514  netif_wake_queue(c->netdevice);
1515 }
1516 
1528 static void z8530_tx_done(struct z8530_channel *c)
1529 {
1530  struct sk_buff *skb;
1531 
1532  /* Actually this can happen.*/
1533  if (c->tx_skb == NULL)
1534  return;
1535 
1536  skb = c->tx_skb;
1537  c->tx_skb = NULL;
1538  z8530_tx_begin(c);
1539  c->netdevice->stats.tx_packets++;
1540  c->netdevice->stats.tx_bytes += skb->len;
1541  dev_kfree_skb_irq(skb);
1542 }
1543 
1553 void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1554 {
1555  dev_kfree_skb_any(skb);
1556 }
1557 
1559 
1573 static void z8530_rx_done(struct z8530_channel *c)
1574 {
1575  struct sk_buff *skb;
1576  int ct;
1577 
1578  /*
1579  * Is our receive engine in DMA mode
1580  */
1581 
1582  if(c->rxdma_on)
1583  {
1584  /*
1585  * Save the ready state and the buffer currently
1586  * being used as the DMA target
1587  */
1588 
1589  int ready=c->dma_ready;
1590  unsigned char *rxb=c->rx_buf[c->dma_num];
1591  unsigned long flags;
1592 
1593  /*
1594  * Complete this DMA. Necessary to find the length
1595  */
1596 
1597  flags=claim_dma_lock();
1598 
1599  disable_dma(c->rxdma);
1600  clear_dma_ff(c->rxdma);
1601  c->rxdma_on=0;
1602  ct=c->mtu-get_dma_residue(c->rxdma);
1603  if(ct<0)
1604  ct=2; /* Shit happens.. */
1605  c->dma_ready=0;
1606 
1607  /*
1608  * Normal case: the other slot is free, start the next DMA
1609  * into it immediately.
1610  */
1611 
1612  if(ready)
1613  {
1614  c->dma_num^=1;
1615  set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
1616  set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1617  set_dma_count(c->rxdma, c->mtu);
1618  c->rxdma_on = 1;
1619  enable_dma(c->rxdma);
1620  /* Stop any frames that we missed the head of
1621  from passing */
1622  write_zsreg(c, R0, RES_Rx_CRC);
1623  }
1624  else
1625  /* Can't occur as we dont reenable the DMA irq until
1626  after the flip is done */
1627  netdev_warn(c->netdevice, "DMA flip overrun!\n");
1628 
1629  release_dma_lock(flags);
1630 
1631  /*
1632  * Shove the old buffer into an sk_buff. We can't DMA
1633  * directly into one on a PC - it might be above the 16Mb
1634  * boundary. Optimisation - we could check to see if we
1635  * can avoid the copy. Optimisation 2 - make the memcpy
1636  * a copychecksum.
1637  */
1638 
1639  skb = dev_alloc_skb(ct);
1640  if (skb == NULL) {
1641  c->netdevice->stats.rx_dropped++;
1642  netdev_warn(c->netdevice, "Memory squeeze\n");
1643  } else {
1644  skb_put(skb, ct);
1645  skb_copy_to_linear_data(skb, rxb, ct);
1646  c->netdevice->stats.rx_packets++;
1647  c->netdevice->stats.rx_bytes += ct;
1648  }
1649  c->dma_ready = 1;
1650  } else {
1651  RT_LOCK;
1652  skb = c->skb;
1653 
1654  /*
1655  * The game we play for non DMA is similar. We want to
1656  * get the controller set up for the next packet as fast
1657  * as possible. We potentially only have one byte + the
1658  * fifo length for this. Thus we want to flip to the new
1659  * buffer and then mess around copying and allocating
1660  * things. For the current case it doesn't matter but
1661  * if you build a system where the sync irq isn't blocked
1662  * by the kernel IRQ disable then you need only block the
1663  * sync IRQ for the RT_LOCK area.
1664  *
1665  */
1666  ct=c->count;
1667 
1668  c->skb = c->skb2;
1669  c->count = 0;
1670  c->max = c->mtu;
1671  if (c->skb) {
1672  c->dptr = c->skb->data;
1673  c->max = c->mtu;
1674  } else {
1675  c->count = 0;
1676  c->max = 0;
1677  }
1678  RT_UNLOCK;
1679 
1680  c->skb2 = dev_alloc_skb(c->mtu);
1681  if (c->skb2 == NULL)
1682  netdev_warn(c->netdevice, "memory squeeze\n");
1683  else
1684  skb_put(c->skb2, c->mtu);
1685  c->netdevice->stats.rx_packets++;
1686  c->netdevice->stats.rx_bytes += ct;
1687  }
1688  /*
1689  * If we received a frame we must now process it.
1690  */
1691  if (skb) {
1692  skb_trim(skb, ct);
1693  c->rx_function(c, skb);
1694  } else {
1695  c->netdevice->stats.rx_dropped++;
1696  netdev_err(c->netdevice, "Lost a frame\n");
1697  }
1698 }
1699 
1708 static inline int spans_boundary(struct sk_buff *skb)
1709 {
1710  unsigned long a=(unsigned long)skb->data;
1711  a^=(a+skb->len);
1712  if(a&0x00010000) /* If the 64K bit is different.. */
1713  return 1;
1714  return 0;
1715 }
1716 
1732 {
1733  unsigned long flags;
1734 
1735  netif_stop_queue(c->netdevice);
1736  if(c->tx_next_skb)
1737  return NETDEV_TX_BUSY;
1738 
1739 
1740  /* PC SPECIFIC - DMA limits */
1741 
1742  /*
1743  * If we will DMA the transmit and its gone over the ISA bus
1744  * limit, then copy to the flip buffer
1745  */
1746 
1747  if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
1748  {
1749  /*
1750  * Send the flip buffer, and flip the flippy bit.
1751  * We don't care which is used when just so long as
1752  * we never use the same buffer twice in a row. Since
1753  * only one buffer can be going out at a time the other
1754  * has to be safe.
1755  */
1757  c->tx_dma_used^=1; /* Flip temp buffer */
1758  skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
1759  }
1760  else
1761  c->tx_next_ptr=skb->data;
1762  RT_LOCK;
1763  c->tx_next_skb=skb;
1764  RT_UNLOCK;
1765 
1766  spin_lock_irqsave(c->lock, flags);
1767  z8530_tx_begin(c);
1768  spin_unlock_irqrestore(c->lock, flags);
1769 
1770  return NETDEV_TX_OK;
1771 }
1772 
1774 
1775 /*
1776  * Module support
1777  */
1778 static const char banner[] __initconst =
1779  KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
1780 
1781 static int __init z85230_init_driver(void)
1782 {
1783  printk(banner);
1784  return 0;
1785 }
1786 module_init(z85230_init_driver);
1787 
1788 static void __exit z85230_cleanup_driver(void)
1789 {
1790 }
1791 module_exit(z85230_cleanup_driver);
1792 
1793 MODULE_AUTHOR("Red Hat Inc.");
1794 MODULE_DESCRIPTION("Z85x30 synchronous driver core");
1795 MODULE_LICENSE("GPL");