41 #include <linux/module.h>
42 #include <linux/kernel.h>
43 #include <linux/types.h>
45 #include <linux/netdevice.h>
50 #include <linux/rtnetlink.h>
51 #include <linux/pci.h>
57 #include <asm/byteorder.h>
67 #define VIA_MODULE_NAME "via-ircc"
68 #define CHIP_IO_EXTENT 0x40
73 static int qos_mtt_bits = 0x07;
74 static int dongle_id = 0;
82 static int via_ircc_dma_receive(
struct via_ircc_cb *
self);
83 static int via_ircc_dma_receive_complete(
struct via_ircc_cb *
self,
92 static int via_ircc_is_receiving(
struct via_ircc_cb *
self);
93 static int via_ircc_read_dongle_id(
int iobase);
99 static void via_ircc_change_dongle_speed(
int iobase,
int speed,
109 static void iodelay(
int udelay)
114 for (i = 0; i <
udelay; i++) {
133 .id_table = via_pci_tbl,
134 .probe = via_init_one,
144 static int __init via_ircc_init(
void)
150 rc = pci_register_driver(&via_driver);
152 IRDA_DEBUG(0,
"%s(): error rc = %d, returning -ENODEV...\n",
162 u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
163 u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
170 IRDA_DEBUG(0,
"%s(): error rc = %d\n", __func__, rc);
175 if ( ReadLPCReg(0x20) != 0x3C )
180 if (Chipset==0x3076) {
181 IRDA_DEBUG(2,
"%s(): Chipset = 3076\n", __func__);
183 WriteLPCReg(7,0x0c );
184 temp=ReadLPCReg(0x30);
186 WriteLPCReg(0x1d, 0x82 );
187 WriteLPCReg(0x23,0x18);
188 temp=ReadLPCReg(0xF0);
190 temp=(ReadLPCReg(0x74)&0x03);
192 temp=(ReadLPCReg(0x74)&0x0C) >> 2;
195 temp=(ReadLPCReg(0x74)&0x0C) >> 2;
199 FirIRQ=(ReadLPCReg(0x70)&0x0f);
200 FirIOBase=ReadLPCReg(0x60 ) << 8;
201 FirIOBase=FirIOBase| ReadLPCReg(0x61) ;
202 FirIOBase=FirIOBase ;
207 pci_read_config_byte(pcidev,0x40,&bTmp);
208 pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
209 pci_read_config_byte(pcidev,0x42,&bTmp);
210 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
211 pci_write_config_byte(pcidev,0x5a,0xc0);
212 WriteLPCReg(0x28, 0x70 );
213 if (via_ircc_open(pcidev, &info, 0x3076) == 0)
218 IRDA_DEBUG(2,
"%s(): Chipset = 3096\n", __func__);
220 pci_read_config_byte(pcidev,0x67,&bTmp);
223 pci_read_config_byte(pcidev,0x42,&oldPCI_40);
224 pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
225 pci_read_config_byte(pcidev,0x40,&oldPCI_40);
226 pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
227 pci_read_config_byte(pcidev,0x44,&oldPCI_44);
228 pci_write_config_byte(pcidev,0x44,0x4e);
231 pci_read_config_byte(pcidev,0x44,&bTmp1);
232 FirDRQ0 = (bTmp1 & 0x30) >> 4;
233 pci_read_config_byte(pcidev,0x44,&bTmp1);
234 FirDRQ1 = (bTmp1 & 0xc0) >> 6;
236 pci_read_config_byte(pcidev,0x44,&bTmp1);
237 FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
240 pci_read_config_byte(pcidev,0x47,&bTmp1);
241 FirIRQ = bTmp1 & 0x0f;
243 pci_read_config_byte(pcidev,0x69,&bTmp);
244 FirIOBase = bTmp << 8;
245 pci_read_config_byte(pcidev,0x68,&bTmp);
246 FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
252 if (via_ircc_open(pcidev, &info, 0x3096) == 0)
258 IRDA_DEBUG(2,
"%s(): End - rc = %d\n", __func__, rc);
262 static void __exit via_ircc_cleanup(
void)
271 .ndo_start_xmit = via_ircc_hard_xmit_sir,
272 .ndo_open = via_ircc_net_open,
273 .ndo_stop = via_ircc_net_close,
274 .ndo_do_ioctl = via_ircc_net_ioctl,
277 .ndo_start_xmit = via_ircc_hard_xmit_fir,
278 .ndo_open = via_ircc_net_open,
279 .ndo_stop = via_ircc_net_close,
280 .ndo_do_ioctl = via_ircc_net_ioctl,
303 self = netdev_priv(dev);
307 pci_set_drvdata(pdev,
self);
312 self->io.irq = info->
irq;
314 self->io.dma = info->
dma;
315 self->io.dma2 = info->
dma2;
316 self->io.fifo_size = 32;
318 self->st_fifo.len = 0;
319 self->RxDataReady = 0;
323 IRDA_DEBUG(0,
"%s(), can't get iobase of 0x%03x\n",
324 __func__, self->io.fir_base);
334 dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
335 self->io.dongle_id = dongle_id;
339 switch( self->io.dongle_id ){
341 self->qos.baud_rate.bits =
346 self->qos.baud_rate.bits =
358 self->qos.min_turn_time.bits = qos_mtt_bits;
362 self->rx_buff.truesize = 14384 + 2048;
363 self->tx_buff.truesize = 14384 + 2048;
369 if (self->rx_buff.head ==
NULL) {
373 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
378 if (self->tx_buff.head ==
NULL) {
382 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
384 self->rx_buff.in_frame =
FALSE;
386 self->tx_buff.data =
self->tx_buff.head;
387 self->rx_buff.data =
self->rx_buff.head;
390 self->tx_fifo.len =
self->tx_fifo.ptr =
self->tx_fifo.free = 0;
391 self->tx_fifo.tail =
self->tx_buff.head;
404 self->io.speed = 9600;
409 self->tx_buff.head, self->tx_buff_dma);
412 self->rx_buff.head, self->rx_buff_dma);
416 pci_set_drvdata(pdev,
NULL);
434 iobase =
self->io.fir_base;
436 ResetChip(iobase, 5);
441 IRDA_DEBUG(2,
"%s(), Releasing Region %03x\n",
442 __func__, self->io.fir_base);
444 if (self->tx_buff.head)
446 self->tx_buff.head, self->tx_buff_dma);
447 if (self->rx_buff.head)
449 self->rx_buff.head, self->rx_buff_dma);
450 pci_set_drvdata(pdev,
NULL);
466 int iobase =
self->io.fir_base;
470 SetMaxRxPacketSize(iobase, 0x0fff);
480 if (ReadLPCReg(0x20) == 0x3c)
481 WriteLPCReg(0xF0, 0);
487 ResetChip(iobase, 5);
502 SetBaudRate(iobase, 9600);
503 SetPulseWidth(iobase, 12);
504 SetSendPreambleCount(iobase, 0);
506 self->io.speed = 9600;
507 self->st_fifo.len = 0;
509 via_ircc_change_dongle_speed(iobase, self->io.speed,
519 static int via_ircc_read_dongle_id(
int iobase)
523 IRDA_ERROR(
"via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
532 static void via_ircc_change_dongle_speed(
int iobase,
int speed,
540 IRDA_DEBUG(1,
"%s(): change_dongle_speed to %d for 0x%x, %d\n",
541 __func__, speed, iobase, dongle_id);
630 IRDA_DEBUG(2,
"%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
648 IRDA_DEBUG(0,
"%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
661 SI_SetMode(iobase, mode);
665 IRDA_ERROR(
"%s: Error: dongle_id %d unsupported !\n",
666 __func__, dongle_id);
682 iobase =
self->io.fir_base;
684 self->io.speed = speed;
685 IRDA_DEBUG(1,
"%s: change_speed to %d bps.\n", __func__, speed);
697 value = (115200/speed)-1;
716 SetPulseWidth(iobase, 0);
717 SetSendPreambleCount(iobase, 14);
732 bTmp = (ReadReg(iobase,
I_CF_H_1) & 0x03);
737 via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
758 if (speed > 115200) {
761 via_ircc_dma_receive(
self);
766 netif_wake_queue(dev);
783 self = netdev_priv(dev);
785 iobase =
self->io.fir_base;
787 netif_stop_queue(dev);
789 speed = irda_get_next_speed(skb);
790 if ((speed != self->io.speed) && (speed != -1)) {
793 via_ircc_change_speed(
self, speed);
798 self->new_speed = speed;
809 self->tx_buff.data =
self->tx_buff.head;
812 self->tx_buff.truesize);
814 dev->
stats.tx_bytes +=
self->tx_buff.len;
816 SetBaudRate(iobase, self->io.speed);
817 SetPulseWidth(iobase, 12);
818 SetSendPreambleCount(iobase, 0);
824 ResetChip(iobase, 0);
825 ResetChip(iobase, 1);
826 ResetChip(iobase, 2);
827 ResetChip(iobase, 3);
828 ResetChip(iobase, 4);
834 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
837 SetSendByte(iobase, self->tx_buff.len);
842 spin_unlock_irqrestore(&self->lock, flags);
855 self = netdev_priv(dev);
856 iobase =
self->io.fir_base;
858 if (self->st_fifo.len)
860 if (self->chip_id == 0x3076)
864 netif_stop_queue(dev);
865 speed = irda_get_next_speed(skb);
866 if ((speed != self->io.speed) && (speed != -1)) {
868 via_ircc_change_speed(
self, speed);
873 self->new_speed = speed;
876 self->tx_fifo.queue[
self->tx_fifo.free].start =
self->tx_fifo.tail;
877 self->tx_fifo.queue[
self->tx_fifo.free].len = skb->
len;
879 self->tx_fifo.tail += skb->
len;
881 skb_copy_from_linear_data(skb,
882 self->tx_fifo.queue[self->tx_fifo.free].start, skb->
len);
884 self->tx_fifo.free++;
886 via_ircc_dma_xmit(
self, iobase);
891 spin_unlock_irqrestore(&self->lock, flags);
896 static int via_ircc_dma_xmit(
struct via_ircc_cb *
self,
u16 iobase)
903 ResetChip(iobase, 0);
904 ResetChip(iobase, 1);
905 ResetChip(iobase, 2);
906 ResetChip(iobase, 3);
907 ResetChip(iobase, 4);
912 ((
u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
913 self->tx_buff.head) + self->tx_buff_dma,
914 self->tx_fifo.queue[self->tx_fifo.ptr].len,
DMA_TX_MODE);
915 IRDA_DEBUG(1,
"%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
916 __func__, self->tx_fifo.ptr,
917 self->tx_fifo.queue[self->tx_fifo.ptr].len,
920 SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
934 static int via_ircc_dma_xmit_complete(
struct via_ircc_cb *
self)
942 iobase =
self->io.fir_base;
948 if (Tx_status & 0x08) {
949 self->netdev->stats.tx_errors++;
950 self->netdev->stats.tx_fifo_errors++;
954 self->netdev->stats.tx_packets++;
955 ResetChip(iobase, 3);
956 ResetChip(iobase, 4);
959 if (self->new_speed) {
960 via_ircc_change_speed(
self, self->new_speed);
966 if (self->tx_fifo.len) {
972 "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
974 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
984 self->tx_fifo.len =
self->tx_fifo.ptr =
self->tx_fifo.free = 0;
985 self->tx_fifo.tail =
self->tx_buff.head;
992 netif_wake_queue(self->netdev);
1003 static int via_ircc_dma_receive(
struct via_ircc_cb *
self)
1007 iobase =
self->io.fir_base;
1011 self->tx_fifo.len =
self->tx_fifo.ptr =
self->tx_fifo.free = 0;
1012 self->tx_fifo.tail =
self->tx_buff.head;
1013 self->RxDataReady = 0;
1015 self->rx_buff.data =
self->rx_buff.head;
1016 self->st_fifo.len =
self->st_fifo.pending_bytes = 0;
1017 self->st_fifo.tail =
self->st_fifo.head = 0;
1023 ResetChip(iobase, 0);
1024 ResetChip(iobase, 1);
1025 ResetChip(iobase, 2);
1026 ResetChip(iobase, 3);
1027 ResetChip(iobase, 4);
1047 static int via_ircc_dma_receive_complete(
struct via_ircc_cb *
self,
1055 iobase =
self->io.fir_base;
1056 st_fifo = &
self->st_fifo;
1058 if (self->io.speed < 4000000) {
1059 len = GetRecvByte(iobase,
self);
1060 skb = dev_alloc_skb(len + 1);
1064 skb_reserve(skb, 1);
1066 if (self->chip_id == 0x3076) {
1067 for (i = 0; i < len - 2; i++)
1068 skb->
data[i] = self->rx_buff.data[i * 2];
1070 if (self->chip_id == 0x3096) {
1071 for (i = 0; i < len - 2; i++)
1073 self->rx_buff.data[i];
1077 self->rx_buff.data +=
len;
1078 self->netdev->stats.rx_bytes +=
len;
1079 self->netdev->stats.rx_packets++;
1080 skb->
dev =
self->netdev;
1081 skb_reset_mac_header(skb);
1088 len = GetRecvByte(iobase,
self);
1091 if (((len - 4) < 2) || ((len - 4) > 2048)) {
1092 IRDA_DEBUG(1,
"%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
1093 __func__, len, RxCurCount(iobase,
self),
1098 IRDA_DEBUG(2,
"%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1100 st_fifo->
len, len - 4, RxCurCount(iobase,
self));
1109 self->RxDataReady = 0;
1132 skb = dev_alloc_skb(len + 1 - 4);
1138 (self->rx_buff.data ==
NULL) || (len < 6)) {
1139 self->netdev->stats.rx_dropped++;
1143 skb_reserve(skb, 1);
1146 skb_copy_to_linear_data(skb, self->rx_buff.
data, len - 4);
1147 IRDA_DEBUG(2,
"%s(): len=%x.rx_buff=%p\n", __func__,
1148 len - 4, self->rx_buff.data);
1151 self->rx_buff.data +=
len;
1152 self->netdev->stats.rx_bytes +=
len;
1153 self->netdev->stats.rx_packets++;
1154 skb->
dev =
self->netdev;
1155 skb_reset_mac_header(skb);
1168 static int upload_rxdata(
struct via_ircc_cb *
self,
int iobase)
1172 struct st_fifo *st_fifo;
1173 st_fifo = &
self->st_fifo;
1175 len = GetRecvByte(iobase,
self);
1177 IRDA_DEBUG(2,
"%s(): len=%x\n", __func__, len);
1179 if ((len - 4) < 2) {
1180 self->netdev->stats.rx_dropped++;
1184 skb = dev_alloc_skb(len + 1);
1186 self->netdev->stats.rx_dropped++;
1189 skb_reserve(skb, 1);
1191 skb_copy_to_linear_data(skb, self->rx_buff.
data, len - 4 + 1);
1197 self->rx_buff.data +=
len;
1198 self->netdev->stats.rx_bytes +=
len;
1199 self->netdev->stats.rx_packets++;
1200 skb->
dev =
self->netdev;
1201 skb_reset_mac_header(skb);
1218 static int RxTimerHandler(
struct via_ircc_cb *
self,
int iobase)
1220 struct st_fifo *st_fifo;
1225 st_fifo = &
self->st_fifo;
1227 if (CkRxRecv(iobase,
self)) {
1229 self->RetryCount = 0;
1230 SetTimer(iobase, 20);
1231 self->RxDataReady++;
1236 if ((self->RetryCount >= 1) ||
1237 ((st_fifo->
pending_bytes + 2048) > self->rx_buff.truesize) ||
1239 while (st_fifo->
len > 0) {
1248 skb = dev_alloc_skb(len + 1 - 4);
1254 (self->rx_buff.data ==
NULL) || (len < 6)) {
1255 self->netdev->stats.rx_dropped++;
1258 skb_reserve(skb, 1);
1260 skb_copy_to_linear_data(skb, self->rx_buff.
data, len - 4);
1262 IRDA_DEBUG(2,
"%s(): len=%x.head=%x\n", __func__,
1263 len - 4, st_fifo->
head);
1266 self->rx_buff.data +=
len;
1267 self->netdev->stats.rx_bytes +=
len;
1268 self->netdev->stats.rx_packets++;
1269 skb->
dev =
self->netdev;
1270 skb_reset_mac_header(skb);
1274 self->RetryCount = 0;
1277 "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1286 (RxCurCount(iobase,
self) != self->RxLastCount)) {
1287 upload_rxdata(
self, iobase);
1288 if (irda_device_txqueue_empty(self->netdev))
1289 via_ircc_dma_receive(
self);
1293 SetTimer(iobase, 4);
1311 u8 iHostIntType, iRxIntType, iTxIntType;
1313 iobase =
self->io.fir_base;
1314 spin_lock(&self->lock);
1317 IRDA_DEBUG(4,
"%s(): iHostIntType %02x: %s %s %s %02x\n",
1318 __func__, iHostIntType,
1319 (iHostIntType & 0x40) ?
"Timer" :
"",
1320 (iHostIntType & 0x20) ?
"Tx" :
"",
1321 (iHostIntType & 0x10) ?
"Rx" :
"",
1322 (iHostIntType & 0x0e) >> 1);
1324 if ((iHostIntType & 0x40) != 0) {
1325 self->EventFlag.TimeOut++;
1327 if (self->io.direction ==
IO_XMIT) {
1328 via_ircc_dma_xmit(
self, iobase);
1330 if (self->io.direction ==
IO_RECV) {
1334 if (self->RxDataReady > 30) {
1336 if (irda_device_txqueue_empty(self->netdev)) {
1337 via_ircc_dma_receive(
self);
1340 RxTimerHandler(
self, iobase);
1344 if ((iHostIntType & 0x20) != 0) {
1347 IRDA_DEBUG(4,
"%s(): iTxIntType %02x: %s %s %s %s\n",
1348 __func__, iTxIntType,
1349 (iTxIntType & 0x08) ?
"FIFO underr." :
"",
1350 (iTxIntType & 0x04) ?
"EOM" :
"",
1351 (iTxIntType & 0x02) ?
"FIFO ready" :
"",
1352 (iTxIntType & 0x01) ?
"Early EOM" :
"");
1354 if (iTxIntType & 0x4) {
1355 self->EventFlag.EOMessage++;
1356 if (via_ircc_dma_xmit_complete(
self)) {
1357 if (irda_device_txqueue_empty
1359 via_ircc_dma_receive(
self);
1362 self->EventFlag.Unknown++;
1367 if ((iHostIntType & 0x10) != 0) {
1371 IRDA_DEBUG(4,
"%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
1372 __func__, iRxIntType,
1373 (iRxIntType & 0x80) ?
"PHY err." :
"",
1374 (iRxIntType & 0x40) ?
"CRC err" :
"",
1375 (iRxIntType & 0x20) ?
"FIFO overr." :
"",
1376 (iRxIntType & 0x10) ?
"EOF" :
"",
1377 (iRxIntType & 0x08) ?
"RxData" :
"",
1378 (iRxIntType & 0x02) ?
"RxMaxLen" :
"",
1379 (iRxIntType & 0x01) ?
"SIR bad" :
"");
1383 if (iRxIntType & 0x10) {
1384 if (via_ircc_dma_receive_complete(
self, iobase)) {
1386 via_ircc_dma_receive(
self);
1390 IRDA_DEBUG(4,
"%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1391 __func__, iRxIntType, iHostIntType,
1392 RxCurCount(iobase,
self),
1395 if (iRxIntType & 0x20) {
1396 ResetChip(iobase, 0);
1397 ResetChip(iobase, 1);
1400 if (iRxIntType != 0x08)
1403 via_ircc_dma_receive(
self);
1407 spin_unlock(&self->lock);
1414 iobase =
self->io.fir_base;
1418 ResetChip(iobase, 5);
1433 SetBaudRate(iobase, 9600);
1434 SetPulseWidth(iobase, 12);
1435 SetSendPreambleCount(iobase, 0);
1439 via_ircc_change_speed(
self, self->io.speed);
1441 self->st_fifo.len = 0;
1450 static int via_ircc_is_receiving(
struct via_ircc_cb *
self)
1457 iobase =
self->io.fir_base;
1458 if (CkRxRecv(iobase,
self))
1461 IRDA_DEBUG(2,
"%s(): status=%x....\n", __func__, status);
1473 static int via_ircc_net_open(
struct net_device *dev)
1482 self = netdev_priv(dev);
1483 dev->
stats.rx_packets = 0;
1485 iobase =
self->io.fir_base;
1486 if (
request_irq(self->io.irq, via_ircc_interrupt, 0, dev->
name, dev)) {
1501 if (self->io.dma2 != self->io.dma) {
1518 via_ircc_dma_receive(
self);
1521 netif_start_queue(dev);
1527 sprintf(hwname,
"VIA @ 0x%x", iobase);
1528 self->irlap =
irlap_open(dev, &self->qos, hwname);
1530 self->RxLastCount = 0;
1541 static int via_ircc_net_close(
struct net_device *dev)
1549 self = netdev_priv(dev);
1553 netif_stop_queue(dev);
1558 iobase =
self->io.fir_base;
1561 DisableDmaChannel(self->io.dma);
1567 if (self->io.dma2 != self->io.dma)
1584 unsigned long flags;
1588 self = netdev_priv(dev);
1600 via_ircc_change_speed(
self, irq->ifr_baudrate);
1610 irq->ifr_receiving = via_ircc_is_receiving(
self);
1616 spin_unlock_irqrestore(&self->lock, flags);