Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sh_irda.c
Go to the documentation of this file.
1 /*
2  * SuperH IrDA Driver
3  *
4  * Copyright (C) 2010 Renesas Solutions Corp.
5  * Kuninori Morimoto <[email protected]>
6  *
7  * Based on sh_sir.c
8  * Copyright (C) 2009 Renesas Solutions Corp.
9  * Copyright 2006-2009 Analog Devices Inc.
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  */
15 
16 /*
17  * CAUTION
18  *
19  * This driver is very simple.
20  * So, it doesn't have below support now
21  * - MIR/FIR support
22  * - DMA transfer support
23  * - FIFO mode support
24  */
25 #include <linux/io.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/clk.h>
31 #include <net/irda/wrapper.h>
32 #include <net/irda/irda_device.h>
33 
34 #define DRIVER_NAME "sh_irda"
35 
36 #if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
37 #define __IRDARAM_LEN 0x13FF
38 #else
39 #define __IRDARAM_LEN 0x1039
40 #endif
41 
42 #define IRTMR 0x1F00 /* Transfer mode */
43 #define IRCFR 0x1F02 /* Configuration */
44 #define IRCTR 0x1F04 /* IR control */
45 #define IRTFLR 0x1F20 /* Transmit frame length */
46 #define IRTCTR 0x1F22 /* Transmit control */
47 #define IRRFLR 0x1F40 /* Receive frame length */
48 #define IRRCTR 0x1F42 /* Receive control */
49 #define SIRISR 0x1F60 /* SIR-UART mode interrupt source */
50 #define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */
51 #define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */
52 #define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */
53 #define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */
54 #define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */
55 #define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */
56 #define CRCCTR 0x1F80 /* CRC engine control */
57 #define CRCIR 0x1F86 /* CRC engine input data */
58 #define CRCCR 0x1F8A /* CRC engine calculation */
59 #define CRCOR 0x1F8E /* CRC engine output data */
60 #define FIFOCP 0x1FC0 /* FIFO current pointer */
61 #define FIFOFP 0x1FC2 /* FIFO follow pointer */
62 #define FIFORSMSK 0x1FC4 /* FIFO receive status mask */
63 #define FIFORSOR 0x1FC6 /* FIFO receive status OR */
64 #define FIFOSEL 0x1FC8 /* FIFO select */
65 #define FIFORS 0x1FCA /* FIFO receive status */
66 #define FIFORFL 0x1FCC /* FIFO receive frame length */
67 #define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */
68 #define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */
69 #define BIFCTL 0x1FD2 /* BUS interface control */
70 #define IRDARAM 0x0000 /* IrDA buffer RAM */
71 #define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
72 
73 /* IRTMR */
74 #define TMD_MASK (0x3 << 14) /* Transfer Mode */
75 #define TMD_SIR (0x0 << 14)
76 #define TMD_MIR (0x3 << 14)
77 #define TMD_FIR (0x2 << 14)
78 
79 #define FIFORIM (1 << 8) /* FIFO receive interrupt mask */
80 #define MIM (1 << 4) /* MIR/FIR Interrupt Mask */
81 #define SIM (1 << 0) /* SIR Interrupt Mask */
82 #define xIM_MASK (FIFORIM | MIM | SIM)
83 
84 /* IRCFR */
85 #define RTO_SHIFT 8 /* shift for Receive Timeout */
86 #define RTO (0x3 << RTO_SHIFT)
87 
88 /* IRTCTR */
89 #define ARMOD (1 << 15) /* Auto-Receive Mode */
90 #define TE (1 << 0) /* Transmit Enable */
91 
92 /* IRRFLR */
93 #define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */
94 
95 /* IRRCTR */
96 #define RE (1 << 0) /* Receive Enable */
97 
98 /*
99  * SIRISR, SIRIMR, SIRICR,
100  * MFIRISR, MFIRIMR, MFIRICR
101  */
102 #define FRE (1 << 15) /* Frame Receive End */
103 #define TROV (1 << 11) /* Transfer Area Overflow */
104 #define xIR_9 (1 << 9)
105 #define TOT xIR_9 /* for SIR Timeout */
106 #define ABTD xIR_9 /* for MIR/FIR Abort Detection */
107 #define xIR_8 (1 << 8)
108 #define FER xIR_8 /* for SIR Framing Error */
109 #define CRCER xIR_8 /* for MIR/FIR CRC error */
110 #define FTE (1 << 7) /* Frame Transmit End */
111 #define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
112 
113 /* SIRBCR */
114 #define BRC_MASK (0x3F) /* mask for Baud Rate Count */
115 
116 /* CRCCTR */
117 #define CRC_RST (1 << 15) /* CRC Engine Reset */
118 #define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */
119 
120 /* CRCIR */
121 #define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */
122 
123 /************************************************************************
124 
125 
126  enum / structure
127 
128 
129 ************************************************************************/
135 };
136 
137 struct sh_irda_self;
139  int (*xir_fre) (struct sh_irda_self *self);
140  int (*xir_trov) (struct sh_irda_self *self);
141  int (*xir_9) (struct sh_irda_self *self);
142  int (*xir_8) (struct sh_irda_self *self);
143  int (*xir_fte) (struct sh_irda_self *self);
144 };
145 
146 struct sh_irda_self {
148  unsigned int irq;
150 
151  struct net_device *ndev;
152 
153  struct irlap_cb *irlap;
154  struct qos_info qos;
155 
158 
161 
163 };
164 
165 /************************************************************************
166 
167 
168  common function
169 
170 
171 ************************************************************************/
172 static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
173 {
174  unsigned long flags;
175 
176  spin_lock_irqsave(&self->lock, flags);
177  iowrite16(data, self->membase + offset);
178  spin_unlock_irqrestore(&self->lock, flags);
179 }
180 
181 static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
182 {
183  unsigned long flags;
184  u16 ret;
185 
186  spin_lock_irqsave(&self->lock, flags);
187  ret = ioread16(self->membase + offset);
188  spin_unlock_irqrestore(&self->lock, flags);
189 
190  return ret;
191 }
192 
193 static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
194  u16 mask, u16 data)
195 {
196  unsigned long flags;
197  u16 old, new;
198 
199  spin_lock_irqsave(&self->lock, flags);
200  old = ioread16(self->membase + offset);
201  new = (old & ~mask) | data;
202  if (old != new)
203  iowrite16(data, self->membase + offset);
204  spin_unlock_irqrestore(&self->lock, flags);
205 }
206 
207 /************************************************************************
208 
209 
210  mode function
211 
212 
213 ************************************************************************/
214 /*=====================================
215  *
216  * common
217  *
218  *=====================================*/
219 static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
220 {
221  struct device *dev = &self->ndev->dev;
222 
223  sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
224  dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
225 }
226 
227 static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
228 {
229  struct device *dev = &self->ndev->dev;
230 
231  if (SH_IRDA_SIR != self->mode)
232  interval = 0;
233 
235  dev_err(dev, "unsupported timeout interval\n");
236  return -EINVAL;
237  }
238 
239  sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
240  return 0;
241 }
242 
243 static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
244 {
245  struct device *dev = &self->ndev->dev;
246  u16 val;
247 
248  if (baudrate < 0)
249  return 0;
250 
251  if (SH_IRDA_SIR != self->mode) {
252  dev_err(dev, "it is not SIR mode\n");
253  return -EINVAL;
254  }
255 
256  /*
257  * Baud rate (bits/s) =
258  * (48 MHz / 26) / (baud rate counter value + 1) x 16
259  */
260  val = (48000000 / 26 / 16 / baudrate) - 1;
261  dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val);
262 
263  sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
264 
265  return 0;
266 }
267 
268 static int sh_irda_get_rcv_length(struct sh_irda_self *self)
269 {
270  return RFL_MASK & sh_irda_read(self, IRRFLR);
271 }
272 
273 /*=====================================
274  *
275  * NONE MODE
276  *
277  *=====================================*/
278 static int sh_irda_xir_fre(struct sh_irda_self *self)
279 {
280  struct device *dev = &self->ndev->dev;
281  dev_err(dev, "none mode: frame recv\n");
282  return 0;
283 }
284 
285 static int sh_irda_xir_trov(struct sh_irda_self *self)
286 {
287  struct device *dev = &self->ndev->dev;
288  dev_err(dev, "none mode: buffer ram over\n");
289  return 0;
290 }
291 
292 static int sh_irda_xir_9(struct sh_irda_self *self)
293 {
294  struct device *dev = &self->ndev->dev;
295  dev_err(dev, "none mode: time over\n");
296  return 0;
297 }
298 
299 static int sh_irda_xir_8(struct sh_irda_self *self)
300 {
301  struct device *dev = &self->ndev->dev;
302  dev_err(dev, "none mode: framing error\n");
303  return 0;
304 }
305 
306 static int sh_irda_xir_fte(struct sh_irda_self *self)
307 {
308  struct device *dev = &self->ndev->dev;
309  dev_err(dev, "none mode: frame transmit end\n");
310  return 0;
311 }
312 
313 static struct sh_irda_xir_func sh_irda_xir_func = {
314  .xir_fre = sh_irda_xir_fre,
315  .xir_trov = sh_irda_xir_trov,
316  .xir_9 = sh_irda_xir_9,
317  .xir_8 = sh_irda_xir_8,
318  .xir_fte = sh_irda_xir_fte,
319 };
320 
321 /*=====================================
322  *
323  * MIR/FIR MODE
324  *
325  * MIR/FIR are not supported now
326  *=====================================*/
327 static struct sh_irda_xir_func sh_irda_mfir_func = {
328  .xir_fre = sh_irda_xir_fre,
329  .xir_trov = sh_irda_xir_trov,
330  .xir_9 = sh_irda_xir_9,
331  .xir_8 = sh_irda_xir_8,
332  .xir_fte = sh_irda_xir_fte,
333 };
334 
335 /*=====================================
336  *
337  * SIR MODE
338  *
339  *=====================================*/
340 static int sh_irda_sir_fre(struct sh_irda_self *self)
341 {
342  struct device *dev = &self->ndev->dev;
343  u16 data16;
344  u8 *data = (u8 *)&data16;
345  int len = sh_irda_get_rcv_length(self);
346  int i, j;
347 
348  if (len > IRDARAM_LEN)
349  len = IRDARAM_LEN;
350 
351  dev_dbg(dev, "frame recv length = %d\n", len);
352 
353  for (i = 0; i < len; i++) {
354  j = i % 2;
355  if (!j)
356  data16 = sh_irda_read(self, IRDARAM + i);
357 
358  async_unwrap_char(self->ndev, &self->ndev->stats,
359  &self->rx_buff, data[j]);
360  }
361  self->ndev->last_rx = jiffies;
362 
363  sh_irda_rcv_ctrl(self, 1);
364 
365  return 0;
366 }
367 
368 static int sh_irda_sir_trov(struct sh_irda_self *self)
369 {
370  struct device *dev = &self->ndev->dev;
371 
372  dev_err(dev, "buffer ram over\n");
373  sh_irda_rcv_ctrl(self, 1);
374  return 0;
375 }
376 
377 static int sh_irda_sir_tot(struct sh_irda_self *self)
378 {
379  struct device *dev = &self->ndev->dev;
380 
381  dev_err(dev, "time over\n");
382  sh_irda_set_baudrate(self, 9600);
383  sh_irda_rcv_ctrl(self, 1);
384  return 0;
385 }
386 
387 static int sh_irda_sir_fer(struct sh_irda_self *self)
388 {
389  struct device *dev = &self->ndev->dev;
390 
391  dev_err(dev, "framing error\n");
392  sh_irda_rcv_ctrl(self, 1);
393  return 0;
394 }
395 
396 static int sh_irda_sir_fte(struct sh_irda_self *self)
397 {
398  struct device *dev = &self->ndev->dev;
399 
400  dev_dbg(dev, "frame transmit end\n");
401  netif_wake_queue(self->ndev);
402 
403  return 0;
404 }
405 
406 static struct sh_irda_xir_func sh_irda_sir_func = {
407  .xir_fre = sh_irda_sir_fre,
408  .xir_trov = sh_irda_sir_trov,
409  .xir_9 = sh_irda_sir_tot,
410  .xir_8 = sh_irda_sir_fer,
411  .xir_fte = sh_irda_sir_fte,
412 };
413 
414 static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
415 {
416  struct device *dev = &self->ndev->dev;
417  struct sh_irda_xir_func *func;
418  const char *name;
419  u16 data;
420 
421  switch (mode) {
422  case SH_IRDA_SIR:
423  name = "SIR";
424  data = TMD_SIR;
425  func = &sh_irda_sir_func;
426  break;
427  case SH_IRDA_MIR:
428  name = "MIR";
429  data = TMD_MIR;
430  func = &sh_irda_mfir_func;
431  break;
432  case SH_IRDA_FIR:
433  name = "FIR";
434  data = TMD_FIR;
435  func = &sh_irda_mfir_func;
436  break;
437  default:
438  name = "NONE";
439  data = 0;
440  func = &sh_irda_xir_func;
441  break;
442  }
443 
444  self->mode = mode;
445  self->xir_func = func;
446  sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
447 
448  dev_dbg(dev, "switch to %s mode", name);
449 }
450 
451 /************************************************************************
452 
453 
454  irq function
455 
456 
457 ************************************************************************/
458 static void sh_irda_set_irq_mask(struct sh_irda_self *self)
459 {
460  u16 tmr_hole;
461  u16 xir_reg;
462 
463  /* set all mask */
464  sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK);
465  sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK);
466  sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
467 
468  /* clear irq */
469  sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK);
470  sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
471 
472  switch (self->mode) {
473  case SH_IRDA_SIR:
474  tmr_hole = SIM;
475  xir_reg = SIRIMR;
476  break;
477  case SH_IRDA_MIR:
478  case SH_IRDA_FIR:
479  tmr_hole = MIM;
480  xir_reg = MFIRIMR;
481  break;
482  default:
483  tmr_hole = 0;
484  xir_reg = 0;
485  break;
486  }
487 
488  /* open mask */
489  if (xir_reg) {
490  sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
491  sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
492  }
493 }
494 
495 static irqreturn_t sh_irda_irq(int irq, void *dev_id)
496 {
497  struct sh_irda_self *self = dev_id;
498  struct sh_irda_xir_func *func = self->xir_func;
499  u16 isr = sh_irda_read(self, SIRISR);
500 
501  /* clear irq */
502  sh_irda_write(self, SIRICR, isr);
503 
504  if (isr & FRE)
505  func->xir_fre(self);
506  if (isr & TROV)
507  func->xir_trov(self);
508  if (isr & xIR_9)
509  func->xir_9(self);
510  if (isr & xIR_8)
511  func->xir_8(self);
512  if (isr & FTE)
513  func->xir_fte(self);
514 
515  return IRQ_HANDLED;
516 }
517 
518 /************************************************************************
519 
520 
521  CRC function
522 
523 
524 ************************************************************************/
525 static void sh_irda_crc_reset(struct sh_irda_self *self)
526 {
527  sh_irda_write(self, CRCCTR, CRC_RST);
528 }
529 
530 static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
531 {
532  sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
533 }
534 
535 static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
536 {
537  return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
538 }
539 
540 static u16 sh_irda_crc_out(struct sh_irda_self *self)
541 {
542  return sh_irda_read(self, CRCOR);
543 }
544 
545 static int sh_irda_crc_init(struct sh_irda_self *self)
546 {
547  struct device *dev = &self->ndev->dev;
548  int ret = -EIO;
549  u16 val;
550 
551  sh_irda_crc_reset(self);
552 
553  sh_irda_crc_add(self, 0xCC);
554  sh_irda_crc_add(self, 0xF5);
555  sh_irda_crc_add(self, 0xF1);
556  sh_irda_crc_add(self, 0xA7);
557 
558  val = sh_irda_crc_cnt(self);
559  if (4 != val) {
560  dev_err(dev, "CRC count error %x\n", val);
561  goto crc_init_out;
562  }
563 
564  val = sh_irda_crc_out(self);
565  if (0x51DF != val) {
566  dev_err(dev, "CRC result error%x\n", val);
567  goto crc_init_out;
568  }
569 
570  ret = 0;
571 
572 crc_init_out:
573 
574  sh_irda_crc_reset(self);
575  return ret;
576 }
577 
578 /************************************************************************
579 
580 
581  iobuf function
582 
583 
584 ************************************************************************/
585 static void sh_irda_remove_iobuf(struct sh_irda_self *self)
586 {
587  kfree(self->rx_buff.head);
588 
589  self->tx_buff.head = NULL;
590  self->tx_buff.data = NULL;
591  self->rx_buff.head = NULL;
592  self->rx_buff.data = NULL;
593 }
594 
595 static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
596 {
597  if (self->rx_buff.head ||
598  self->tx_buff.head) {
599  dev_err(&self->ndev->dev, "iobuff has already existed.");
600  return -EINVAL;
601  }
602 
603  /* rx_buff */
604  self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
605  if (!self->rx_buff.head)
606  return -ENOMEM;
607 
608  self->rx_buff.truesize = rxsize;
609  self->rx_buff.in_frame = FALSE;
610  self->rx_buff.state = OUTSIDE_FRAME;
611  self->rx_buff.data = self->rx_buff.head;
612 
613  /* tx_buff */
614  self->tx_buff.head = self->membase + IRDARAM;
615  self->tx_buff.truesize = IRDARAM_LEN;
616 
617  return 0;
618 }
619 
620 /************************************************************************
621 
622 
623  net_device_ops function
624 
625 
626 ************************************************************************/
627 static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
628 {
629  struct sh_irda_self *self = netdev_priv(ndev);
630  struct device *dev = &self->ndev->dev;
631  int speed = irda_get_next_speed(skb);
632  int ret;
633 
634  dev_dbg(dev, "hard xmit\n");
635 
636  netif_stop_queue(ndev);
637  sh_irda_rcv_ctrl(self, 0);
638 
639  ret = sh_irda_set_baudrate(self, speed);
640  if (ret < 0)
641  goto sh_irda_hard_xmit_end;
642 
643  self->tx_buff.len = 0;
644  if (skb->len) {
645  unsigned long flags;
646 
647  spin_lock_irqsave(&self->lock, flags);
648  self->tx_buff.len = async_wrap_skb(skb,
649  self->tx_buff.head,
650  self->tx_buff.truesize);
651  spin_unlock_irqrestore(&self->lock, flags);
652 
653  if (self->tx_buff.len > self->tx_buff.truesize)
654  self->tx_buff.len = self->tx_buff.truesize;
655 
656  sh_irda_write(self, IRTFLR, self->tx_buff.len);
657  sh_irda_write(self, IRTCTR, ARMOD | TE);
658  } else
659  goto sh_irda_hard_xmit_end;
660 
661  dev_kfree_skb(skb);
662 
663  return 0;
664 
665 sh_irda_hard_xmit_end:
666  sh_irda_set_baudrate(self, 9600);
667  netif_wake_queue(self->ndev);
668  sh_irda_rcv_ctrl(self, 1);
669  dev_kfree_skb(skb);
670 
671  return ret;
672 
673 }
674 
675 static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
676 {
677  /*
678  * FIXME
679  *
680  * This function is needed for irda framework.
681  * But nothing to do now
682  */
683  return 0;
684 }
685 
686 static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
687 {
688  struct sh_irda_self *self = netdev_priv(ndev);
689 
690  return &self->ndev->stats;
691 }
692 
693 static int sh_irda_open(struct net_device *ndev)
694 {
695  struct sh_irda_self *self = netdev_priv(ndev);
696  int err;
697 
698  pm_runtime_get_sync(&self->pdev->dev);
699  err = sh_irda_crc_init(self);
700  if (err)
701  goto open_err;
702 
703  sh_irda_set_mode(self, SH_IRDA_SIR);
704  sh_irda_set_timeout(self, 2);
705  sh_irda_set_baudrate(self, 9600);
706 
707  self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
708  if (!self->irlap) {
709  err = -ENODEV;
710  goto open_err;
711  }
712 
713  netif_start_queue(ndev);
714  sh_irda_rcv_ctrl(self, 1);
715  sh_irda_set_irq_mask(self);
716 
717  dev_info(&ndev->dev, "opened\n");
718 
719  return 0;
720 
721 open_err:
722  pm_runtime_put_sync(&self->pdev->dev);
723 
724  return err;
725 }
726 
727 static int sh_irda_stop(struct net_device *ndev)
728 {
729  struct sh_irda_self *self = netdev_priv(ndev);
730 
731  /* Stop IrLAP */
732  if (self->irlap) {
733  irlap_close(self->irlap);
734  self->irlap = NULL;
735  }
736 
737  netif_stop_queue(ndev);
738  pm_runtime_put_sync(&self->pdev->dev);
739 
740  dev_info(&ndev->dev, "stopped\n");
741 
742  return 0;
743 }
744 
745 static const struct net_device_ops sh_irda_ndo = {
746  .ndo_open = sh_irda_open,
747  .ndo_stop = sh_irda_stop,
748  .ndo_start_xmit = sh_irda_hard_xmit,
749  .ndo_do_ioctl = sh_irda_ioctl,
750  .ndo_get_stats = sh_irda_stats,
751 };
752 
753 /************************************************************************
754 
755 
756  platform_driver function
757 
758 
759 ************************************************************************/
760 static int __devinit sh_irda_probe(struct platform_device *pdev)
761 {
762  struct net_device *ndev;
763  struct sh_irda_self *self;
764  struct resource *res;
765  int irq;
766  int err = -ENOMEM;
767 
768  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
769  irq = platform_get_irq(pdev, 0);
770  if (!res || irq < 0) {
771  dev_err(&pdev->dev, "Not enough platform resources.\n");
772  goto exit;
773  }
774 
775  ndev = alloc_irdadev(sizeof(*self));
776  if (!ndev)
777  goto exit;
778 
779  self = netdev_priv(ndev);
780  self->membase = ioremap_nocache(res->start, resource_size(res));
781  if (!self->membase) {
782  err = -ENXIO;
783  dev_err(&pdev->dev, "Unable to ioremap.\n");
784  goto err_mem_1;
785  }
786 
787  err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
788  if (err)
789  goto err_mem_2;
790 
791  self->pdev = pdev;
792  pm_runtime_enable(&pdev->dev);
793 
794  irda_init_max_qos_capabilies(&self->qos);
795 
796  ndev->netdev_ops = &sh_irda_ndo;
797  ndev->irq = irq;
798 
799  self->ndev = ndev;
800  self->qos.baud_rate.bits &= IR_9600; /* FIXME */
801  self->qos.min_turn_time.bits = 1; /* 10 ms or more */
802  spin_lock_init(&self->lock);
803 
804  irda_qos_bits_to_value(&self->qos);
805 
806  err = register_netdev(ndev);
807  if (err)
808  goto err_mem_4;
809 
810  platform_set_drvdata(pdev, ndev);
811  err = request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self);
812  if (err) {
813  dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
814  goto err_mem_4;
815  }
816 
817  dev_info(&pdev->dev, "SuperH IrDA probed\n");
818 
819  goto exit;
820 
821 err_mem_4:
822  pm_runtime_disable(&pdev->dev);
823  sh_irda_remove_iobuf(self);
824 err_mem_2:
825  iounmap(self->membase);
826 err_mem_1:
827  free_netdev(ndev);
828 exit:
829  return err;
830 }
831 
832 static int __devexit sh_irda_remove(struct platform_device *pdev)
833 {
834  struct net_device *ndev = platform_get_drvdata(pdev);
835  struct sh_irda_self *self = netdev_priv(ndev);
836 
837  if (!self)
838  return 0;
839 
840  unregister_netdev(ndev);
841  pm_runtime_disable(&pdev->dev);
842  sh_irda_remove_iobuf(self);
843  iounmap(self->membase);
844  free_netdev(ndev);
845  platform_set_drvdata(pdev, NULL);
846 
847  return 0;
848 }
849 
850 static int sh_irda_runtime_nop(struct device *dev)
851 {
852  /* Runtime PM callback shared between ->runtime_suspend()
853  * and ->runtime_resume(). Simply returns success.
854  *
855  * This driver re-initializes all registers after
856  * pm_runtime_get_sync() anyway so there is no need
857  * to save and restore registers here.
858  */
859  return 0;
860 }
861 
862 static const struct dev_pm_ops sh_irda_pm_ops = {
863  .runtime_suspend = sh_irda_runtime_nop,
864  .runtime_resume = sh_irda_runtime_nop,
865 };
866 
867 static struct platform_driver sh_irda_driver = {
868  .probe = sh_irda_probe,
869  .remove = __devexit_p(sh_irda_remove),
870  .driver = {
871  .name = DRIVER_NAME,
872  .pm = &sh_irda_pm_ops,
873  },
874 };
875 
876 module_platform_driver(sh_irda_driver);
877 
878 MODULE_AUTHOR("Kuninori Morimoto <[email protected]>");
879 MODULE_DESCRIPTION("SuperH IrDA driver");
880 MODULE_LICENSE("GPL");