Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
au1k_ir.c
Go to the documentation of this file.
1 /*
2  * Alchemy Semi Au1000 IrDA driver
3  *
4  * Copyright 2001 MontaVista Software Inc.
5  * Author: MontaVista Software, Inc.
7  *
8  * This program is free software; you can distribute it and/or modify it
9  * under the terms of the GNU General Public License (Version 2) as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15  * for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with this program; if not, write to the Free Software Foundation, Inc.,
19  * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
20  */
21 
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/netdevice.h>
25 #include <linux/interrupt.h>
26 #include <linux/platform_device.h>
27 #include <linux/slab.h>
28 #include <linux/time.h>
29 #include <linux/types.h>
30 
31 #include <net/irda/irda.h>
32 #include <net/irda/irmod.h>
33 #include <net/irda/wrapper.h>
34 #include <net/irda/irda_device.h>
35 #include <asm/mach-au1x00/au1000.h>
36 
37 /* registers */
38 #define IR_RING_PTR_STATUS 0x00
39 #define IR_RING_BASE_ADDR_H 0x04
40 #define IR_RING_BASE_ADDR_L 0x08
41 #define IR_RING_SIZE 0x0C
42 #define IR_RING_PROMPT 0x10
43 #define IR_RING_ADDR_CMPR 0x14
44 #define IR_INT_CLEAR 0x18
45 #define IR_CONFIG_1 0x20
46 #define IR_SIR_FLAGS 0x24
47 #define IR_STATUS 0x28
48 #define IR_READ_PHY_CONFIG 0x2C
49 #define IR_WRITE_PHY_CONFIG 0x30
50 #define IR_MAX_PKT_LEN 0x34
51 #define IR_RX_BYTE_CNT 0x38
52 #define IR_CONFIG_2 0x3C
53 #define IR_ENABLE 0x40
54 
55 /* Config1 */
56 #define IR_RX_INVERT_LED (1 << 0)
57 #define IR_TX_INVERT_LED (1 << 1)
58 #define IR_ST (1 << 2)
59 #define IR_SF (1 << 3)
60 #define IR_SIR (1 << 4)
61 #define IR_MIR (1 << 5)
62 #define IR_FIR (1 << 6)
63 #define IR_16CRC (1 << 7)
64 #define IR_TD (1 << 8)
65 #define IR_RX_ALL (1 << 9)
66 #define IR_DMA_ENABLE (1 << 10)
67 #define IR_RX_ENABLE (1 << 11)
68 #define IR_TX_ENABLE (1 << 12)
69 #define IR_LOOPBACK (1 << 14)
70 #define IR_SIR_MODE (IR_SIR | IR_DMA_ENABLE | \
71  IR_RX_ALL | IR_RX_ENABLE | IR_SF | \
72  IR_16CRC)
73 
74 /* ir_status */
75 #define IR_RX_STATUS (1 << 9)
76 #define IR_TX_STATUS (1 << 10)
77 #define IR_PHYEN (1 << 15)
78 
79 /* ir_write_phy_config */
80 #define IR_BR(x) (((x) & 0x3f) << 10) /* baud rate */
81 #define IR_PW(x) (((x) & 0x1f) << 5) /* pulse width */
82 #define IR_P(x) ((x) & 0x1f) /* preamble bits */
83 
84 /* Config2 */
85 #define IR_MODE_INV (1 << 0)
86 #define IR_ONE_PIN (1 << 1)
87 #define IR_PHYCLK_40MHZ (0 << 2)
88 #define IR_PHYCLK_48MHZ (1 << 2)
89 #define IR_PHYCLK_56MHZ (2 << 2)
90 #define IR_PHYCLK_64MHZ (3 << 2)
91 #define IR_DP (1 << 4)
92 #define IR_DA (1 << 5)
93 #define IR_FLT_HIGH (0 << 6)
94 #define IR_FLT_MEDHI (1 << 6)
95 #define IR_FLT_MEDLO (2 << 6)
96 #define IR_FLT_LO (3 << 6)
97 #define IR_IEN (1 << 8)
98 
99 /* ir_enable */
100 #define IR_HC (1 << 3) /* divide SBUS clock by 2 */
101 #define IR_CE (1 << 2) /* clock enable */
102 #define IR_C (1 << 1) /* coherency bit */
103 #define IR_BE (1 << 0) /* set in big endian mode */
104 
105 #define NUM_IR_DESC 64
106 #define RING_SIZE_4 0x0
107 #define RING_SIZE_16 0x3
108 #define RING_SIZE_64 0xF
109 #define MAX_NUM_IR_DESC 64
110 #define MAX_BUF_SIZE 2048
111 
112 /* Ring descriptor flags */
113 #define AU_OWN (1 << 7) /* tx,rx */
114 #define IR_DIS_CRC (1 << 6) /* tx */
115 #define IR_BAD_CRC (1 << 5) /* tx */
116 #define IR_NEED_PULSE (1 << 4) /* tx */
117 #define IR_FORCE_UNDER (1 << 3) /* tx */
118 #define IR_DISABLE_TX (1 << 2) /* tx */
119 #define IR_HW_UNDER (1 << 0) /* tx */
120 #define IR_TX_ERROR (IR_DIS_CRC | IR_BAD_CRC | IR_HW_UNDER)
121 
122 #define IR_PHY_ERROR (1 << 6) /* rx */
123 #define IR_CRC_ERROR (1 << 5) /* rx */
124 #define IR_MAX_LEN (1 << 4) /* rx */
125 #define IR_FIFO_OVER (1 << 3) /* rx */
126 #define IR_SIR_ERROR (1 << 2) /* rx */
127 #define IR_RX_ERROR (IR_PHY_ERROR | IR_CRC_ERROR | \
128  IR_MAX_LEN | IR_FIFO_OVER | IR_SIR_ERROR)
129 
130 struct db_dest {
131  struct db_dest *pnext;
132  volatile u32 *vaddr;
134 };
135 
136 struct ring_dest {
137  u8 count_0; /* 7:0 */
138  u8 count_1; /* 12:8 */
141  u8 addr_0; /* 7:0 */
142  u8 addr_1; /* 15:8 */
143  u8 addr_2; /* 23:16 */
144  u8 addr_3; /* 31:24 */
145 };
146 
147 /* Private data for each instance */
148 struct au1k_private {
151 
152  struct db_dest *pDBfree;
153  struct db_dest db[2 * NUM_IR_DESC];
154  volatile struct ring_dest *rx_ring[NUM_IR_DESC];
155  volatile struct ring_dest *tx_ring[NUM_IR_DESC];
162 
164 
166  struct timeval stamp;
167  struct timeval now;
168  struct qos_info qos;
169  struct irlap_cb *irlap;
170 
174 
176 
177  struct resource *ioarea;
179 };
180 
181 static int qos_mtt_bits = 0x07; /* 1 ms or more */
182 
183 #define RUN_AT(x) (jiffies + (x))
184 
185 static void au1k_irda_plat_set_phy_mode(struct au1k_private *p, int mode)
186 {
187  if (p->platdata && p->platdata->set_phy_mode)
188  p->platdata->set_phy_mode(mode);
189 }
190 
191 static inline unsigned long irda_read(struct au1k_private *p,
192  unsigned long ofs)
193 {
194  /*
195  * IrDA peripheral bug. You have to read the register
196  * twice to get the right value.
197  */
198  (void)__raw_readl(p->iobase + ofs);
199  return __raw_readl(p->iobase + ofs);
200 }
201 
202 static inline void irda_write(struct au1k_private *p, unsigned long ofs,
203  unsigned long val)
204 {
205  __raw_writel(val, p->iobase + ofs);
206  wmb();
207 }
208 
209 /*
210  * Buffer allocation/deallocation routines. The buffer descriptor returned
211  * has the virtual and dma address of a buffer suitable for
212  * both, receive and transmit operations.
213  */
214 static struct db_dest *GetFreeDB(struct au1k_private *aup)
215 {
216  struct db_dest *db;
217  db = aup->pDBfree;
218 
219  if (db)
220  aup->pDBfree = db->pnext;
221  return db;
222 }
223 
224 /*
225  DMA memory allocation, derived from pci_alloc_consistent.
226  However, the Au1000 data cache is coherent (when programmed
227  so), therefore we return KSEG0 address, not KSEG1.
228 */
229 static void *dma_alloc(size_t size, dma_addr_t *dma_handle)
230 {
231  void *ret;
232  int gfp = GFP_ATOMIC | GFP_DMA;
233 
234  ret = (void *)__get_free_pages(gfp, get_order(size));
235 
236  if (ret != NULL) {
237  memset(ret, 0, size);
238  *dma_handle = virt_to_bus(ret);
239  ret = (void *)KSEG0ADDR(ret);
240  }
241  return ret;
242 }
243 
244 static void dma_free(void *vaddr, size_t size)
245 {
246  vaddr = (void *)KSEG0ADDR(vaddr);
247  free_pages((unsigned long) vaddr, get_order(size));
248 }
249 
250 
251 static void setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
252 {
253  int i;
254  for (i = 0; i < NUM_IR_DESC; i++) {
255  aup->rx_ring[i] = (volatile struct ring_dest *)
256  (rx_base + sizeof(struct ring_dest) * i);
257  }
258  for (i = 0; i < NUM_IR_DESC; i++) {
259  aup->tx_ring[i] = (volatile struct ring_dest *)
260  (tx_base + sizeof(struct ring_dest) * i);
261  }
262 }
263 
264 static int au1k_irda_init_iobuf(iobuff_t *io, int size)
265 {
266  io->head = kmalloc(size, GFP_KERNEL);
267  if (io->head != NULL) {
268  io->truesize = size;
269  io->in_frame = FALSE;
270  io->state = OUTSIDE_FRAME;
271  io->data = io->head;
272  }
273  return io->head ? 0 : -ENOMEM;
274 }
275 
276 /*
277  * Set the IrDA communications speed.
278  */
279 static int au1k_irda_set_speed(struct net_device *dev, int speed)
280 {
281  struct au1k_private *aup = netdev_priv(dev);
282  volatile struct ring_dest *ptxd;
283  unsigned long control;
284  int ret = 0, timeout = 10, i;
285 
286  if (speed == aup->speed)
287  return ret;
288 
289  /* disable PHY first */
290  au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
291  irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
292 
293  /* disable RX/TX */
294  irda_write(aup, IR_CONFIG_1,
295  irda_read(aup, IR_CONFIG_1) & ~(IR_RX_ENABLE | IR_TX_ENABLE));
296  msleep(20);
297  while (irda_read(aup, IR_STATUS) & (IR_RX_STATUS | IR_TX_STATUS)) {
298  msleep(20);
299  if (!timeout--) {
300  printk(KERN_ERR "%s: rx/tx disable timeout\n",
301  dev->name);
302  break;
303  }
304  }
305 
306  /* disable DMA */
307  irda_write(aup, IR_CONFIG_1,
308  irda_read(aup, IR_CONFIG_1) & ~IR_DMA_ENABLE);
309  msleep(20);
310 
311  /* After we disable tx/rx. the index pointers go back to zero. */
312  aup->tx_head = aup->tx_tail = aup->rx_head = 0;
313  for (i = 0; i < NUM_IR_DESC; i++) {
314  ptxd = aup->tx_ring[i];
315  ptxd->flags = 0;
316  ptxd->count_0 = 0;
317  ptxd->count_1 = 0;
318  }
319 
320  for (i = 0; i < NUM_IR_DESC; i++) {
321  ptxd = aup->rx_ring[i];
322  ptxd->count_0 = 0;
323  ptxd->count_1 = 0;
324  ptxd->flags = AU_OWN;
325  }
326 
327  if (speed == 4000000)
328  au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_FIR);
329  else
330  au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
331 
332  switch (speed) {
333  case 9600:
334  irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(11) | IR_PW(12));
335  irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
336  break;
337  case 19200:
338  irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(5) | IR_PW(12));
339  irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
340  break;
341  case 38400:
342  irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(2) | IR_PW(12));
343  irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
344  break;
345  case 57600:
346  irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(1) | IR_PW(12));
347  irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
348  break;
349  case 115200:
350  irda_write(aup, IR_WRITE_PHY_CONFIG, IR_PW(12));
351  irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
352  break;
353  case 4000000:
354  irda_write(aup, IR_WRITE_PHY_CONFIG, IR_P(15));
355  irda_write(aup, IR_CONFIG_1, IR_FIR | IR_DMA_ENABLE |
356  IR_RX_ENABLE);
357  break;
358  default:
359  printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
360  ret = -EINVAL;
361  break;
362  }
363 
364  aup->speed = speed;
365  irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) | IR_PHYEN);
366 
367  control = irda_read(aup, IR_STATUS);
368  irda_write(aup, IR_RING_PROMPT, 0);
369 
370  if (control & (1 << 14)) {
371  printk(KERN_ERR "%s: configuration error\n", dev->name);
372  } else {
373  if (control & (1 << 11))
374  printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
375  if (control & (1 << 12))
376  printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
377  if (control & (1 << 13))
378  printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
379  if (control & (1 << 10))
380  printk(KERN_DEBUG "%s TX enabled\n", dev->name);
381  if (control & (1 << 9))
382  printk(KERN_DEBUG "%s RX enabled\n", dev->name);
383  }
384 
385  return ret;
386 }
387 
388 static void update_rx_stats(struct net_device *dev, u32 status, u32 count)
389 {
390  struct net_device_stats *ps = &dev->stats;
391 
392  ps->rx_packets++;
393 
394  if (status & IR_RX_ERROR) {
395  ps->rx_errors++;
396  if (status & (IR_PHY_ERROR | IR_FIFO_OVER))
397  ps->rx_missed_errors++;
398  if (status & IR_MAX_LEN)
399  ps->rx_length_errors++;
400  if (status & IR_CRC_ERROR)
401  ps->rx_crc_errors++;
402  } else
403  ps->rx_bytes += count;
404 }
405 
406 static void update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
407 {
408  struct net_device_stats *ps = &dev->stats;
409 
410  ps->tx_packets++;
411  ps->tx_bytes += pkt_len;
412 
413  if (status & IR_TX_ERROR) {
414  ps->tx_errors++;
415  ps->tx_aborted_errors++;
416  }
417 }
418 
419 static void au1k_tx_ack(struct net_device *dev)
420 {
421  struct au1k_private *aup = netdev_priv(dev);
422  volatile struct ring_dest *ptxd;
423 
424  ptxd = aup->tx_ring[aup->tx_tail];
425  while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
426  update_tx_stats(dev, ptxd->flags,
427  (ptxd->count_1 << 8) | ptxd->count_0);
428  ptxd->count_0 = 0;
429  ptxd->count_1 = 0;
430  wmb();
431  aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
432  ptxd = aup->tx_ring[aup->tx_tail];
433 
434  if (aup->tx_full) {
435  aup->tx_full = 0;
436  netif_wake_queue(dev);
437  }
438  }
439 
440  if (aup->tx_tail == aup->tx_head) {
441  if (aup->newspeed) {
442  au1k_irda_set_speed(dev, aup->newspeed);
443  aup->newspeed = 0;
444  } else {
445  irda_write(aup, IR_CONFIG_1,
446  irda_read(aup, IR_CONFIG_1) & ~IR_TX_ENABLE);
447  irda_write(aup, IR_CONFIG_1,
448  irda_read(aup, IR_CONFIG_1) | IR_RX_ENABLE);
449  irda_write(aup, IR_RING_PROMPT, 0);
450  }
451  }
452 }
453 
454 static int au1k_irda_rx(struct net_device *dev)
455 {
456  struct au1k_private *aup = netdev_priv(dev);
457  volatile struct ring_dest *prxd;
458  struct sk_buff *skb;
459  struct db_dest *pDB;
460  u32 flags, count;
461 
462  prxd = aup->rx_ring[aup->rx_head];
463  flags = prxd->flags;
464 
465  while (!(flags & AU_OWN)) {
466  pDB = aup->rx_db_inuse[aup->rx_head];
467  count = (prxd->count_1 << 8) | prxd->count_0;
468  if (!(flags & IR_RX_ERROR)) {
469  /* good frame */
470  update_rx_stats(dev, flags, count);
471  skb = alloc_skb(count + 1, GFP_ATOMIC);
472  if (skb == NULL) {
473  dev->stats.rx_dropped++;
474  continue;
475  }
476  skb_reserve(skb, 1);
477  if (aup->speed == 4000000)
478  skb_put(skb, count);
479  else
480  skb_put(skb, count - 2);
481  skb_copy_to_linear_data(skb, (void *)pDB->vaddr,
482  count - 2);
483  skb->dev = dev;
484  skb_reset_mac_header(skb);
485  skb->protocol = htons(ETH_P_IRDA);
486  netif_rx(skb);
487  prxd->count_0 = 0;
488  prxd->count_1 = 0;
489  }
490  prxd->flags |= AU_OWN;
491  aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
492  irda_write(aup, IR_RING_PROMPT, 0);
493 
494  /* next descriptor */
495  prxd = aup->rx_ring[aup->rx_head];
496  flags = prxd->flags;
497 
498  }
499  return 0;
500 }
501 
502 static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
503 {
504  struct net_device *dev = dev_id;
505  struct au1k_private *aup = netdev_priv(dev);
506 
507  irda_write(aup, IR_INT_CLEAR, 0); /* ack irda interrupts */
508 
509  au1k_irda_rx(dev);
510  au1k_tx_ack(dev);
511 
512  return IRQ_HANDLED;
513 }
514 
515 static int au1k_init(struct net_device *dev)
516 {
517  struct au1k_private *aup = netdev_priv(dev);
518  u32 enable, ring_address;
519  int i;
520 
521  enable = IR_HC | IR_CE | IR_C;
522 #ifndef CONFIG_CPU_LITTLE_ENDIAN
523  enable |= IR_BE;
524 #endif
525  aup->tx_head = 0;
526  aup->tx_tail = 0;
527  aup->rx_head = 0;
528 
529  for (i = 0; i < NUM_IR_DESC; i++)
530  aup->rx_ring[i]->flags = AU_OWN;
531 
532  irda_write(aup, IR_ENABLE, enable);
533  msleep(20);
534 
535  /* disable PHY */
536  au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
537  irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
538  msleep(20);
539 
540  irda_write(aup, IR_MAX_PKT_LEN, MAX_BUF_SIZE);
541 
542  ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]);
543  irda_write(aup, IR_RING_BASE_ADDR_H, ring_address >> 26);
544  irda_write(aup, IR_RING_BASE_ADDR_L, (ring_address >> 10) & 0xffff);
545 
546  irda_write(aup, IR_RING_SIZE,
547  (RING_SIZE_64 << 8) | (RING_SIZE_64 << 12));
548 
549  irda_write(aup, IR_CONFIG_2, IR_PHYCLK_48MHZ | IR_ONE_PIN);
550  irda_write(aup, IR_RING_ADDR_CMPR, 0);
551 
552  au1k_irda_set_speed(dev, 9600);
553  return 0;
554 }
555 
556 static int au1k_irda_start(struct net_device *dev)
557 {
558  struct au1k_private *aup = netdev_priv(dev);
559  char hwname[32];
560  int retval;
561 
562  retval = au1k_init(dev);
563  if (retval) {
564  printk(KERN_ERR "%s: error in au1k_init\n", dev->name);
565  return retval;
566  }
567 
568  retval = request_irq(aup->irq_tx, &au1k_irda_interrupt, 0,
569  dev->name, dev);
570  if (retval) {
571  printk(KERN_ERR "%s: unable to get IRQ %d\n",
572  dev->name, dev->irq);
573  return retval;
574  }
575  retval = request_irq(aup->irq_rx, &au1k_irda_interrupt, 0,
576  dev->name, dev);
577  if (retval) {
578  free_irq(aup->irq_tx, dev);
579  printk(KERN_ERR "%s: unable to get IRQ %d\n",
580  dev->name, dev->irq);
581  return retval;
582  }
583 
584  /* Give self a hardware name */
585  sprintf(hwname, "Au1000 SIR/FIR");
586  aup->irlap = irlap_open(dev, &aup->qos, hwname);
587  netif_start_queue(dev);
588 
589  /* int enable */
590  irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) | IR_IEN);
591 
592  /* power up */
593  au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
594 
595  aup->timer.expires = RUN_AT((3 * HZ));
596  aup->timer.data = (unsigned long)dev;
597  return 0;
598 }
599 
600 static int au1k_irda_stop(struct net_device *dev)
601 {
602  struct au1k_private *aup = netdev_priv(dev);
603 
604  au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
605 
606  /* disable interrupts */
607  irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) & ~IR_IEN);
608  irda_write(aup, IR_CONFIG_1, 0);
609  irda_write(aup, IR_ENABLE, 0); /* disable clock */
610 
611  if (aup->irlap) {
612  irlap_close(aup->irlap);
613  aup->irlap = NULL;
614  }
615 
616  netif_stop_queue(dev);
617  del_timer(&aup->timer);
618 
619  /* disable the interrupt */
620  free_irq(aup->irq_tx, dev);
621  free_irq(aup->irq_rx, dev);
622 
623  return 0;
624 }
625 
626 /*
627  * Au1000 transmit routine.
628  */
629 static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
630 {
631  struct au1k_private *aup = netdev_priv(dev);
632  int speed = irda_get_next_speed(skb);
633  volatile struct ring_dest *ptxd;
634  struct db_dest *pDB;
635  u32 len, flags;
636 
637  if (speed != aup->speed && speed != -1)
638  aup->newspeed = speed;
639 
640  if ((skb->len == 0) && (aup->newspeed)) {
641  if (aup->tx_tail == aup->tx_head) {
642  au1k_irda_set_speed(dev, speed);
643  aup->newspeed = 0;
644  }
645  dev_kfree_skb(skb);
646  return NETDEV_TX_OK;
647  }
648 
649  ptxd = aup->tx_ring[aup->tx_head];
650  flags = ptxd->flags;
651 
652  if (flags & AU_OWN) {
653  printk(KERN_DEBUG "%s: tx_full\n", dev->name);
654  netif_stop_queue(dev);
655  aup->tx_full = 1;
656  return 1;
657  } else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
658  printk(KERN_DEBUG "%s: tx_full\n", dev->name);
659  netif_stop_queue(dev);
660  aup->tx_full = 1;
661  return 1;
662  }
663 
664  pDB = aup->tx_db_inuse[aup->tx_head];
665 
666 #if 0
667  if (irda_read(aup, IR_RX_BYTE_CNT) != 0) {
668  printk(KERN_DEBUG "tx warning: rx byte cnt %x\n",
669  irda_read(aup, IR_RX_BYTE_CNT));
670  }
671 #endif
672 
673  if (aup->speed == 4000000) {
674  /* FIR */
675  skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
676  ptxd->count_0 = skb->len & 0xff;
677  ptxd->count_1 = (skb->len >> 8) & 0xff;
678  } else {
679  /* SIR */
680  len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE);
681  ptxd->count_0 = len & 0xff;
682  ptxd->count_1 = (len >> 8) & 0xff;
683  ptxd->flags |= IR_DIS_CRC;
684  }
685  ptxd->flags |= AU_OWN;
686  wmb();
687 
688  irda_write(aup, IR_CONFIG_1,
689  irda_read(aup, IR_CONFIG_1) | IR_TX_ENABLE);
690  irda_write(aup, IR_RING_PROMPT, 0);
691 
692  dev_kfree_skb(skb);
693  aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
694  return NETDEV_TX_OK;
695 }
696 
697 /*
698  * The Tx ring has been full longer than the watchdog timeout
699  * value. The transmitter must be hung?
700  */
701 static void au1k_tx_timeout(struct net_device *dev)
702 {
703  u32 speed;
704  struct au1k_private *aup = netdev_priv(dev);
705 
706  printk(KERN_ERR "%s: tx timeout\n", dev->name);
707  speed = aup->speed;
708  aup->speed = 0;
709  au1k_irda_set_speed(dev, speed);
710  aup->tx_full = 0;
711  netif_wake_queue(dev);
712 }
713 
714 static int au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
715 {
716  struct if_irda_req *rq = (struct if_irda_req *)ifreq;
717  struct au1k_private *aup = netdev_priv(dev);
718  int ret = -EOPNOTSUPP;
719 
720  switch (cmd) {
721  case SIOCSBANDWIDTH:
722  if (capable(CAP_NET_ADMIN)) {
723  /*
724  * We are unable to set the speed if the
725  * device is not running.
726  */
727  if (aup->open)
728  ret = au1k_irda_set_speed(dev,
729  rq->ifr_baudrate);
730  else {
731  printk(KERN_ERR "%s ioctl: !netif_running\n",
732  dev->name);
733  ret = 0;
734  }
735  }
736  break;
737 
738  case SIOCSMEDIABUSY:
739  ret = -EPERM;
740  if (capable(CAP_NET_ADMIN)) {
742  ret = 0;
743  }
744  break;
745 
746  case SIOCGRECEIVING:
747  rq->ifr_receiving = 0;
748  break;
749  default:
750  break;
751  }
752  return ret;
753 }
754 
755 static const struct net_device_ops au1k_irda_netdev_ops = {
756  .ndo_open = au1k_irda_start,
757  .ndo_stop = au1k_irda_stop,
758  .ndo_start_xmit = au1k_irda_hard_xmit,
759  .ndo_tx_timeout = au1k_tx_timeout,
760  .ndo_do_ioctl = au1k_irda_ioctl,
761 };
762 
763 static int __devinit au1k_irda_net_init(struct net_device *dev)
764 {
765  struct au1k_private *aup = netdev_priv(dev);
766  struct db_dest *pDB, *pDBfree;
767  int i, err, retval = 0;
769 
770  err = au1k_irda_init_iobuf(&aup->rx_buff, 14384);
771  if (err)
772  goto out1;
773 
774  dev->netdev_ops = &au1k_irda_netdev_ops;
775 
777 
778  /* The only value we must override it the baudrate */
779  aup->qos.baud_rate.bits = IR_9600 | IR_19200 | IR_38400 |
780  IR_57600 | IR_115200 | IR_576000 | (IR_4000000 << 8);
781 
782  aup->qos.min_turn_time.bits = qos_mtt_bits;
784 
785  retval = -ENOMEM;
786 
787  /* Tx ring follows rx ring + 512 bytes */
788  /* we need a 1k aligned buffer */
789  aup->rx_ring[0] = (struct ring_dest *)
790  dma_alloc(2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)),
791  &temp);
792  if (!aup->rx_ring[0])
793  goto out2;
794 
795  /* allocate the data buffers */
796  aup->db[0].vaddr =
797  dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
798  if (!aup->db[0].vaddr)
799  goto out3;
800 
801  setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512);
802 
803  pDBfree = NULL;
804  pDB = aup->db;
805  for (i = 0; i < (2 * NUM_IR_DESC); i++) {
806  pDB->pnext = pDBfree;
807  pDBfree = pDB;
808  pDB->vaddr =
809  (u32 *)((unsigned)aup->db[0].vaddr + (MAX_BUF_SIZE * i));
810  pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
811  pDB++;
812  }
813  aup->pDBfree = pDBfree;
814 
815  /* attach a data buffer to each descriptor */
816  for (i = 0; i < NUM_IR_DESC; i++) {
817  pDB = GetFreeDB(aup);
818  if (!pDB)
819  goto out3;
820  aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
821  aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
822  aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
823  aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
824  aup->rx_db_inuse[i] = pDB;
825  }
826  for (i = 0; i < NUM_IR_DESC; i++) {
827  pDB = GetFreeDB(aup);
828  if (!pDB)
829  goto out3;
830  aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
831  aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
832  aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
833  aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
834  aup->tx_ring[i]->count_0 = 0;
835  aup->tx_ring[i]->count_1 = 0;
836  aup->tx_ring[i]->flags = 0;
837  aup->tx_db_inuse[i] = pDB;
838  }
839 
840  return 0;
841 
842 out3:
843  dma_free((void *)aup->rx_ring[0],
844  2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
845 out2:
846  kfree(aup->rx_buff.head);
847 out1:
848  printk(KERN_ERR "au1k_irda_net_init() failed. Returns %d\n", retval);
849  return retval;
850 }
851 
852 static int __devinit au1k_irda_probe(struct platform_device *pdev)
853 {
854  struct au1k_private *aup;
855  struct net_device *dev;
856  struct resource *r;
857  int err;
858 
859  dev = alloc_irdadev(sizeof(struct au1k_private));
860  if (!dev)
861  return -ENOMEM;
862 
863  aup = netdev_priv(dev);
864 
865  aup->platdata = pdev->dev.platform_data;
866 
867  err = -EINVAL;
869  if (!r)
870  goto out;
871 
872  aup->irq_tx = r->start;
873 
875  if (!r)
876  goto out;
877 
878  aup->irq_rx = r->start;
879 
881  if (!r)
882  goto out;
883 
884  err = -EBUSY;
885  aup->ioarea = request_mem_region(r->start, r->end - r->start + 1,
886  pdev->name);
887  if (!aup->ioarea)
888  goto out;
889 
890  aup->iobase = ioremap_nocache(r->start, r->end - r->start + 1);
891  if (!aup->iobase)
892  goto out2;
893 
894  dev->irq = aup->irq_rx;
895 
896  err = au1k_irda_net_init(dev);
897  if (err)
898  goto out3;
899  err = register_netdev(dev);
900  if (err)
901  goto out4;
902 
903  platform_set_drvdata(pdev, dev);
904 
905  printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
906  return 0;
907 
908 out4:
909  dma_free((void *)aup->db[0].vaddr,
910  MAX_BUF_SIZE * 2 * NUM_IR_DESC);
911  dma_free((void *)aup->rx_ring[0],
912  2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
913  kfree(aup->rx_buff.head);
914 out3:
915  iounmap(aup->iobase);
916 out2:
917  release_resource(aup->ioarea);
918  kfree(aup->ioarea);
919 out:
920  free_netdev(dev);
921  return err;
922 }
923 
924 static int __devexit au1k_irda_remove(struct platform_device *pdev)
925 {
926  struct net_device *dev = platform_get_drvdata(pdev);
927  struct au1k_private *aup = netdev_priv(dev);
928 
929  unregister_netdev(dev);
930 
931  dma_free((void *)aup->db[0].vaddr,
932  MAX_BUF_SIZE * 2 * NUM_IR_DESC);
933  dma_free((void *)aup->rx_ring[0],
934  2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
935  kfree(aup->rx_buff.head);
936 
937  iounmap(aup->iobase);
938  release_resource(aup->ioarea);
939  kfree(aup->ioarea);
940 
941  free_netdev(dev);
942 
943  return 0;
944 }
945 
946 static struct platform_driver au1k_irda_driver = {
947  .driver = {
948  .name = "au1000-irda",
949  .owner = THIS_MODULE,
950  },
951  .probe = au1k_irda_probe,
952  .remove = __devexit_p(au1k_irda_remove),
953 };
954 
955 static int __init au1k_irda_load(void)
956 {
957  return platform_driver_register(&au1k_irda_driver);
958 }
959 
960 static void __exit au1k_irda_unload(void)
961 {
962  return platform_driver_unregister(&au1k_irda_driver);
963 }
964 
965 MODULE_AUTHOR("Pete Popov <[email protected]>");
966 MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
967 
968 module_init(au1k_irda_load);
969 module_exit(au1k_irda_unload);