27 #include <linux/module.h>
34 #define DRIVER_NAME "sh_irda"
36 #if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
37 #define __IRDARAM_LEN 0x13FF
39 #define __IRDARAM_LEN 0x1039
53 #define MFIRISR 0x1F70
54 #define MFIRIMR 0x1F72
55 #define MFIRICR 0x1F74
62 #define FIFORSMSK 0x1FC4
63 #define FIFORSOR 0x1FC6
64 #define FIFOSEL 0x1FC8
66 #define FIFORFL 0x1FCC
67 #define FIFORAMCP 0x1FCE
68 #define FIFORAMFP 0x1FD0
70 #define IRDARAM 0x0000
71 #define IRDARAM_LEN __IRDARAM_LEN
74 #define TMD_MASK (0x3 << 14)
75 #define TMD_SIR (0x0 << 14)
76 #define TMD_MIR (0x3 << 14)
77 #define TMD_FIR (0x2 << 14)
79 #define FIFORIM (1 << 8)
82 #define xIM_MASK (FIFORIM | MIM | SIM)
86 #define RTO (0x3 << RTO_SHIFT)
89 #define ARMOD (1 << 15)
93 #define RFL_MASK (0x1FFF)
102 #define FRE (1 << 15)
103 #define TROV (1 << 11)
104 #define xIR_9 (1 << 9)
107 #define xIR_8 (1 << 8)
111 #define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
114 #define BRC_MASK (0x3F)
117 #define CRC_RST (1 << 15)
118 #define CRC_CT_MASK 0x0FFF
121 #define CRC_IN_MASK 0x0FFF
178 spin_unlock_irqrestore(&self->lock, flags);
187 ret =
ioread16(self->membase + offset);
188 spin_unlock_irqrestore(&self->lock, flags);
200 old =
ioread16(self->membase + offset);
201 new = (old & ~mask) | data;
204 spin_unlock_irqrestore(&self->lock, flags);
223 sh_irda_update_bits(
self,
IRRCTR,
RE, enable ?
RE : 0);
224 dev_dbg(dev,
"recv %s\n", enable ?
"enable" :
"disable");
235 dev_err(dev,
"unsupported timeout interval\n");
245 struct device *dev = &
self->ndev->dev;
252 dev_err(dev,
"it is not SIR mode\n");
260 val = (48000000 / 26 / 16 /
baudrate) - 1;
261 dev_dbg(dev,
"baudrate = %d, val = 0x%02x\n", baudrate, val);
268 static int sh_irda_get_rcv_length(
struct sh_irda_self *
self)
280 struct device *dev = &
self->ndev->dev;
281 dev_err(dev,
"none mode: frame recv\n");
287 struct device *dev = &
self->ndev->dev;
288 dev_err(dev,
"none mode: buffer ram over\n");
294 struct device *dev = &
self->ndev->dev;
295 dev_err(dev,
"none mode: time over\n");
301 struct device *dev = &
self->ndev->dev;
302 dev_err(dev,
"none mode: framing error\n");
308 struct device *dev = &
self->ndev->dev;
309 dev_err(dev,
"none mode: frame transmit end\n");
315 .xir_trov = sh_irda_xir_trov,
316 .xir_9 = sh_irda_xir_9,
317 .xir_8 = sh_irda_xir_8,
318 .xir_fte = sh_irda_xir_fte,
327 static struct sh_irda_xir_func sh_irda_mfir_func = {
328 .xir_fre = sh_irda_xir_fre,
329 .xir_trov = sh_irda_xir_trov,
330 .xir_9 = sh_irda_xir_9,
331 .xir_8 = sh_irda_xir_8,
332 .xir_fte = sh_irda_xir_fte,
342 struct device *dev = &
self->ndev->dev;
344 u8 *data = (
u8 *)&data16;
345 int len = sh_irda_get_rcv_length(
self);
351 dev_dbg(dev,
"frame recv length = %d\n", len);
353 for (i = 0; i < len; i++) {
356 data16 = sh_irda_read(
self,
IRDARAM + i);
359 &self->rx_buff, data[j]);
363 sh_irda_rcv_ctrl(
self, 1);
370 struct device *dev = &
self->ndev->dev;
372 dev_err(dev,
"buffer ram over\n");
373 sh_irda_rcv_ctrl(
self, 1);
379 struct device *dev = &
self->ndev->dev;
382 sh_irda_set_baudrate(
self, 9600);
383 sh_irda_rcv_ctrl(
self, 1);
389 struct device *dev = &
self->ndev->dev;
391 dev_err(dev,
"framing error\n");
392 sh_irda_rcv_ctrl(
self, 1);
398 struct device *dev = &
self->ndev->dev;
400 dev_dbg(dev,
"frame transmit end\n");
401 netif_wake_queue(self->ndev);
406 static struct sh_irda_xir_func sh_irda_sir_func = {
407 .xir_fre = sh_irda_sir_fre,
408 .xir_trov = sh_irda_sir_trov,
409 .xir_9 = sh_irda_sir_tot,
410 .xir_8 = sh_irda_sir_fer,
411 .xir_fte = sh_irda_sir_fte,
416 struct device *dev = &
self->ndev->dev;
417 struct sh_irda_xir_func *
func;
425 func = &sh_irda_sir_func;
430 func = &sh_irda_mfir_func;
435 func = &sh_irda_mfir_func;
440 func = &sh_irda_xir_func;
445 self->xir_func =
func;
448 dev_dbg(dev,
"switch to %s mode", name);
458 static void sh_irda_set_irq_mask(
struct sh_irda_self *
self)
472 switch (self->mode) {
490 sh_irda_update_bits(
self,
IRTMR, tmr_hole, 0);
491 sh_irda_update_bits(
self, xir_reg,
xIR_MASK, 0);
498 struct sh_irda_xir_func *func =
self->xir_func;
502 sh_irda_write(
self,
SIRICR, isr);
525 static void sh_irda_crc_reset(
struct sh_irda_self *
self)
542 return sh_irda_read(
self,
CRCOR);
547 struct device *dev = &
self->ndev->dev;
551 sh_irda_crc_reset(
self);
553 sh_irda_crc_add(
self, 0xCC);
554 sh_irda_crc_add(
self, 0xF5);
555 sh_irda_crc_add(
self, 0xF1);
556 sh_irda_crc_add(
self, 0xA7);
558 val = sh_irda_crc_cnt(
self);
560 dev_err(dev,
"CRC count error %x\n", val);
564 val = sh_irda_crc_out(
self);
566 dev_err(dev,
"CRC result error%x\n", val);
574 sh_irda_crc_reset(
self);
585 static void sh_irda_remove_iobuf(
struct sh_irda_self *
self)
587 kfree(self->rx_buff.head);
589 self->tx_buff.head =
NULL;
590 self->tx_buff.data =
NULL;
591 self->rx_buff.head =
NULL;
592 self->rx_buff.data =
NULL;
595 static int sh_irda_init_iobuf(
struct sh_irda_self *
self,
int rxsize,
int txsize)
597 if (self->rx_buff.head ||
598 self->tx_buff.head) {
599 dev_err(&self->ndev->dev,
"iobuff has already existed.");
605 if (!self->rx_buff.head)
608 self->rx_buff.truesize = rxsize;
609 self->rx_buff.in_frame =
FALSE;
611 self->rx_buff.data =
self->rx_buff.head;
614 self->tx_buff.head =
self->membase +
IRDARAM;
630 struct device *dev = &
self->ndev->dev;
631 int speed = irda_get_next_speed(skb);
636 netif_stop_queue(ndev);
637 sh_irda_rcv_ctrl(
self, 0);
639 ret = sh_irda_set_baudrate(
self, speed);
641 goto sh_irda_hard_xmit_end;
643 self->tx_buff.len = 0;
650 self->tx_buff.truesize);
651 spin_unlock_irqrestore(&self->lock, flags);
653 if (self->tx_buff.len > self->tx_buff.truesize)
654 self->tx_buff.len =
self->tx_buff.truesize;
656 sh_irda_write(
self,
IRTFLR, self->tx_buff.len);
659 goto sh_irda_hard_xmit_end;
665 sh_irda_hard_xmit_end:
666 sh_irda_set_baudrate(
self, 9600);
667 netif_wake_queue(self->ndev);
668 sh_irda_rcv_ctrl(
self, 1);
690 return &
self->ndev->stats;
693 static int sh_irda_open(
struct net_device *ndev)
698 pm_runtime_get_sync(&self->pdev->dev);
699 err = sh_irda_crc_init(
self);
704 sh_irda_set_timeout(
self, 2);
705 sh_irda_set_baudrate(
self, 9600);
713 netif_start_queue(ndev);
714 sh_irda_rcv_ctrl(
self, 1);
715 sh_irda_set_irq_mask(
self);
722 pm_runtime_put_sync(&self->pdev->dev);
727 static int sh_irda_stop(
struct net_device *ndev)
737 netif_stop_queue(ndev);
738 pm_runtime_put_sync(&self->pdev->dev);
746 .ndo_open = sh_irda_open,
747 .ndo_stop = sh_irda_stop,
748 .ndo_start_xmit = sh_irda_hard_xmit,
749 .ndo_do_ioctl = sh_irda_ioctl,
750 .ndo_get_stats = sh_irda_stats,
770 if (!res || irq < 0) {
771 dev_err(&pdev->
dev,
"Not enough platform resources.\n");
779 self = netdev_priv(ndev);
781 if (!self->membase) {
800 self->qos.baud_rate.bits &=
IR_9600;
801 self->qos.min_turn_time.bits = 1;
810 platform_set_drvdata(pdev, ndev);
813 dev_warn(&pdev->
dev,
"Unable to attach sh_irda interrupt\n");
822 pm_runtime_disable(&pdev->
dev);
823 sh_irda_remove_iobuf(
self);
834 struct net_device *ndev = platform_get_drvdata(pdev);
841 pm_runtime_disable(&pdev->
dev);
842 sh_irda_remove_iobuf(
self);
845 platform_set_drvdata(pdev,
NULL);
850 static int sh_irda_runtime_nop(
struct device *dev)
862 static const struct dev_pm_ops sh_irda_pm_ops = {
863 .runtime_suspend = sh_irda_runtime_nop,
864 .runtime_resume = sh_irda_runtime_nop,
868 .probe = sh_irda_probe,
872 .pm = &sh_irda_pm_ops,