Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
spi-davinci.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2009 Texas Instruments.
3  * Copyright (C) 2010 EF Johnson Technologies
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18  */
19 
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/gpio.h>
23 #include <linux/module.h>
24 #include <linux/delay.h>
25 #include <linux/platform_device.h>
26 #include <linux/err.h>
27 #include <linux/clk.h>
28 #include <linux/dmaengine.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/edma.h>
31 #include <linux/spi/spi.h>
32 #include <linux/spi/spi_bitbang.h>
33 #include <linux/slab.h>
34 
36 
37 #define SPI_NO_RESOURCE ((resource_size_t)-1)
38 
39 #define SPI_MAX_CHIPSELECT 2
40 
41 #define CS_DEFAULT 0xFF
42 
43 #define SPIFMT_PHASE_MASK BIT(16)
44 #define SPIFMT_POLARITY_MASK BIT(17)
45 #define SPIFMT_DISTIMER_MASK BIT(18)
46 #define SPIFMT_SHIFTDIR_MASK BIT(20)
47 #define SPIFMT_WAITENA_MASK BIT(21)
48 #define SPIFMT_PARITYENA_MASK BIT(22)
49 #define SPIFMT_ODD_PARITY_MASK BIT(23)
50 #define SPIFMT_WDELAY_MASK 0x3f000000u
51 #define SPIFMT_WDELAY_SHIFT 24
52 #define SPIFMT_PRESCALE_SHIFT 8
53 
54 /* SPIPC0 */
55 #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
56 #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
57 #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
58 #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
59 
60 #define SPIINT_MASKALL 0x0101035F
61 #define SPIINT_MASKINT 0x0000015F
62 #define SPI_INTLVL_1 0x000001FF
63 #define SPI_INTLVL_0 0x00000000
64 
65 /* SPIDAT1 (upper 16 bit defines) */
66 #define SPIDAT1_CSHOLD_MASK BIT(12)
67 
68 /* SPIGCR1 */
69 #define SPIGCR1_CLKMOD_MASK BIT(1)
70 #define SPIGCR1_MASTER_MASK BIT(0)
71 #define SPIGCR1_POWERDOWN_MASK BIT(8)
72 #define SPIGCR1_LOOPBACK_MASK BIT(16)
73 #define SPIGCR1_SPIENA_MASK BIT(24)
74 
75 /* SPIBUF */
76 #define SPIBUF_TXFULL_MASK BIT(29)
77 #define SPIBUF_RXEMPTY_MASK BIT(31)
78 
79 /* SPIDELAY */
80 #define SPIDELAY_C2TDELAY_SHIFT 24
81 #define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT)
82 #define SPIDELAY_T2CDELAY_SHIFT 16
83 #define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT)
84 #define SPIDELAY_T2EDELAY_SHIFT 8
85 #define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT)
86 #define SPIDELAY_C2EDELAY_SHIFT 0
87 #define SPIDELAY_C2EDELAY_MASK 0xFF
88 
89 /* Error Masks */
90 #define SPIFLG_DLEN_ERR_MASK BIT(0)
91 #define SPIFLG_TIMEOUT_MASK BIT(1)
92 #define SPIFLG_PARERR_MASK BIT(2)
93 #define SPIFLG_DESYNC_MASK BIT(3)
94 #define SPIFLG_BITERR_MASK BIT(4)
95 #define SPIFLG_OVRRUN_MASK BIT(6)
96 #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
97 #define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \
98  | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \
99  | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \
100  | SPIFLG_OVRRUN_MASK)
101 
102 #define SPIINT_DMA_REQ_EN BIT(16)
103 
104 /* SPI Controller registers */
105 #define SPIGCR0 0x00
106 #define SPIGCR1 0x04
107 #define SPIINT 0x08
108 #define SPILVL 0x0c
109 #define SPIFLG 0x10
110 #define SPIPC0 0x14
111 #define SPIDAT1 0x3c
112 #define SPIBUF 0x40
113 #define SPIDELAY 0x48
114 #define SPIDEF 0x4c
115 #define SPIFMT0 0x50
116 
117 /* SPI Controller driver's private data. */
118 struct davinci_spi {
120  struct clk *clk;
121 
124  void __iomem *base;
126  struct completion done;
127 
128  const void *tx;
129  void *rx;
130  int rcount;
131  int wcount;
132 
133  struct dma_chan *dma_rx;
134  struct dma_chan *dma_tx;
137 
139 
141  u32 (*get_tx)(struct davinci_spi *);
142 
144 };
145 
146 static struct davinci_spi_config davinci_spi_default_cfg;
147 
148 static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi)
149 {
150  if (dspi->rx) {
151  u8 *rx = dspi->rx;
152  *rx++ = (u8)data;
153  dspi->rx = rx;
154  }
155 }
156 
157 static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi)
158 {
159  if (dspi->rx) {
160  u16 *rx = dspi->rx;
161  *rx++ = (u16)data;
162  dspi->rx = rx;
163  }
164 }
165 
166 static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi)
167 {
168  u32 data = 0;
169  if (dspi->tx) {
170  const u8 *tx = dspi->tx;
171  data = *tx++;
172  dspi->tx = tx;
173  }
174  return data;
175 }
176 
177 static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi)
178 {
179  u32 data = 0;
180  if (dspi->tx) {
181  const u16 *tx = dspi->tx;
182  data = *tx++;
183  dspi->tx = tx;
184  }
185  return data;
186 }
187 
188 static inline void set_io_bits(void __iomem *addr, u32 bits)
189 {
190  u32 v = ioread32(addr);
191 
192  v |= bits;
193  iowrite32(v, addr);
194 }
195 
196 static inline void clear_io_bits(void __iomem *addr, u32 bits)
197 {
198  u32 v = ioread32(addr);
199 
200  v &= ~bits;
201  iowrite32(v, addr);
202 }
203 
204 /*
205  * Interface to control the chip select signal
206  */
207 static void davinci_spi_chipselect(struct spi_device *spi, int value)
208 {
209  struct davinci_spi *dspi;
211  u8 chip_sel = spi->chip_select;
212  u16 spidat1 = CS_DEFAULT;
213  bool gpio_chipsel = false;
214 
215  dspi = spi_master_get_devdata(spi->master);
216  pdata = dspi->pdata;
217 
218  if (pdata->chip_sel && chip_sel < pdata->num_chipselect &&
219  pdata->chip_sel[chip_sel] != SPI_INTERN_CS)
220  gpio_chipsel = true;
221 
222  /*
223  * Board specific chip select logic decides the polarity and cs
224  * line for the controller
225  */
226  if (gpio_chipsel) {
227  if (value == BITBANG_CS_ACTIVE)
228  gpio_set_value(pdata->chip_sel[chip_sel], 0);
229  else
230  gpio_set_value(pdata->chip_sel[chip_sel], 1);
231  } else {
232  if (value == BITBANG_CS_ACTIVE) {
233  spidat1 |= SPIDAT1_CSHOLD_MASK;
234  spidat1 &= ~(0x1 << chip_sel);
235  }
236 
237  iowrite16(spidat1, dspi->base + SPIDAT1 + 2);
238  }
239 }
240 
251 static inline int davinci_spi_get_prescale(struct davinci_spi *dspi,
252  u32 max_speed_hz)
253 {
254  int ret;
255 
256  ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz);
257 
258  if (ret < 3 || ret > 256)
259  return -EINVAL;
260 
261  return ret - 1;
262 }
263 
273 static int davinci_spi_setup_transfer(struct spi_device *spi,
274  struct spi_transfer *t)
275 {
276 
277  struct davinci_spi *dspi;
278  struct davinci_spi_config *spicfg;
279  u8 bits_per_word = 0;
280  u32 hz = 0, spifmt = 0, prescale = 0;
281 
282  dspi = spi_master_get_devdata(spi->master);
283  spicfg = (struct davinci_spi_config *)spi->controller_data;
284  if (!spicfg)
285  spicfg = &davinci_spi_default_cfg;
286 
287  if (t) {
288  bits_per_word = t->bits_per_word;
289  hz = t->speed_hz;
290  }
291 
292  /* if bits_per_word is not set then set it default */
293  if (!bits_per_word)
294  bits_per_word = spi->bits_per_word;
295 
296  /*
297  * Assign function pointer to appropriate transfer method
298  * 8bit, 16bit or 32bit transfer
299  */
300  if (bits_per_word <= 8 && bits_per_word >= 2) {
301  dspi->get_rx = davinci_spi_rx_buf_u8;
302  dspi->get_tx = davinci_spi_tx_buf_u8;
303  dspi->bytes_per_word[spi->chip_select] = 1;
304  } else if (bits_per_word <= 16 && bits_per_word >= 2) {
305  dspi->get_rx = davinci_spi_rx_buf_u16;
306  dspi->get_tx = davinci_spi_tx_buf_u16;
307  dspi->bytes_per_word[spi->chip_select] = 2;
308  } else
309  return -EINVAL;
310 
311  if (!hz)
312  hz = spi->max_speed_hz;
313 
314  /* Set up SPIFMTn register, unique to this chipselect. */
315 
316  prescale = davinci_spi_get_prescale(dspi, hz);
317  if (prescale < 0)
318  return prescale;
319 
320  spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f);
321 
322  if (spi->mode & SPI_LSB_FIRST)
323  spifmt |= SPIFMT_SHIFTDIR_MASK;
324 
325  if (spi->mode & SPI_CPOL)
326  spifmt |= SPIFMT_POLARITY_MASK;
327 
328  if (!(spi->mode & SPI_CPHA))
329  spifmt |= SPIFMT_PHASE_MASK;
330 
331  /*
332  * Version 1 hardware supports two basic SPI modes:
333  * - Standard SPI mode uses 4 pins, with chipselect
334  * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
335  * (distinct from SPI_3WIRE, with just one data wire;
336  * or similar variants without MOSI or without MISO)
337  *
338  * Version 2 hardware supports an optional handshaking signal,
339  * so it can support two more modes:
340  * - 5 pin SPI variant is standard SPI plus SPI_READY
341  * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
342  */
343 
344  if (dspi->version == SPI_VERSION_2) {
345 
346  u32 delay = 0;
347 
348  spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT)
350 
351  if (spicfg->odd_parity)
352  spifmt |= SPIFMT_ODD_PARITY_MASK;
353 
354  if (spicfg->parity_enable)
355  spifmt |= SPIFMT_PARITYENA_MASK;
356 
357  if (spicfg->timer_disable) {
358  spifmt |= SPIFMT_DISTIMER_MASK;
359  } else {
360  delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT)
362  delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT)
364  }
365 
366  if (spi->mode & SPI_READY) {
367  spifmt |= SPIFMT_WAITENA_MASK;
368  delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT)
370  delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT)
372  }
373 
374  iowrite32(delay, dspi->base + SPIDELAY);
375  }
376 
377  iowrite32(spifmt, dspi->base + SPIFMT0);
378 
379  return 0;
380 }
381 
388 static int davinci_spi_setup(struct spi_device *spi)
389 {
390  int retval = 0;
391  struct davinci_spi *dspi;
393 
394  dspi = spi_master_get_devdata(spi->master);
395  pdata = dspi->pdata;
396 
397  /* if bits per word length is zero then set it default 8 */
398  if (!spi->bits_per_word)
399  spi->bits_per_word = 8;
400 
401  if (!(spi->mode & SPI_NO_CS)) {
402  if ((pdata->chip_sel == NULL) ||
403  (pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS))
404  set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select);
405 
406  }
407 
408  if (spi->mode & SPI_READY)
409  set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK);
410 
411  if (spi->mode & SPI_LOOP)
412  set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
413  else
414  clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
415 
416  return retval;
417 }
418 
419 static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status)
420 {
421  struct device *sdev = dspi->bitbang.master->dev.parent;
422 
423  if (int_status & SPIFLG_TIMEOUT_MASK) {
424  dev_dbg(sdev, "SPI Time-out Error\n");
425  return -ETIMEDOUT;
426  }
427  if (int_status & SPIFLG_DESYNC_MASK) {
428  dev_dbg(sdev, "SPI Desynchronization Error\n");
429  return -EIO;
430  }
431  if (int_status & SPIFLG_BITERR_MASK) {
432  dev_dbg(sdev, "SPI Bit error\n");
433  return -EIO;
434  }
435 
436  if (dspi->version == SPI_VERSION_2) {
437  if (int_status & SPIFLG_DLEN_ERR_MASK) {
438  dev_dbg(sdev, "SPI Data Length Error\n");
439  return -EIO;
440  }
441  if (int_status & SPIFLG_PARERR_MASK) {
442  dev_dbg(sdev, "SPI Parity Error\n");
443  return -EIO;
444  }
445  if (int_status & SPIFLG_OVRRUN_MASK) {
446  dev_dbg(sdev, "SPI Data Overrun error\n");
447  return -EIO;
448  }
449  if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
450  dev_dbg(sdev, "SPI Buffer Init Active\n");
451  return -EBUSY;
452  }
453  }
454 
455  return 0;
456 }
457 
465 static int davinci_spi_process_events(struct davinci_spi *dspi)
466 {
467  u32 buf, status, errors = 0, spidat1;
468 
469  buf = ioread32(dspi->base + SPIBUF);
470 
471  if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) {
472  dspi->get_rx(buf & 0xFFFF, dspi);
473  dspi->rcount--;
474  }
475 
476  status = ioread32(dspi->base + SPIFLG);
477 
478  if (unlikely(status & SPIFLG_ERROR_MASK)) {
479  errors = status & SPIFLG_ERROR_MASK;
480  goto out;
481  }
482 
483  if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) {
484  spidat1 = ioread32(dspi->base + SPIDAT1);
485  dspi->wcount--;
486  spidat1 &= ~0xFFFF;
487  spidat1 |= 0xFFFF & dspi->get_tx(dspi);
488  iowrite32(spidat1, dspi->base + SPIDAT1);
489  }
490 
491 out:
492  return errors;
493 }
494 
495 static void davinci_spi_dma_rx_callback(void *data)
496 {
497  struct davinci_spi *dspi = (struct davinci_spi *)data;
498 
499  dspi->rcount = 0;
500 
501  if (!dspi->wcount && !dspi->rcount)
502  complete(&dspi->done);
503 }
504 
505 static void davinci_spi_dma_tx_callback(void *data)
506 {
507  struct davinci_spi *dspi = (struct davinci_spi *)data;
508 
509  dspi->wcount = 0;
510 
511  if (!dspi->wcount && !dspi->rcount)
512  complete(&dspi->done);
513 }
514 
524 static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
525 {
526  struct davinci_spi *dspi;
527  int data_type, ret = -ENOMEM;
528  u32 tx_data, spidat1;
529  u32 errors = 0;
530  struct davinci_spi_config *spicfg;
532  unsigned uninitialized_var(rx_buf_count);
533  void *dummy_buf = NULL;
534  struct scatterlist sg_rx, sg_tx;
535 
536  dspi = spi_master_get_devdata(spi->master);
537  pdata = dspi->pdata;
538  spicfg = (struct davinci_spi_config *)spi->controller_data;
539  if (!spicfg)
540  spicfg = &davinci_spi_default_cfg;
541 
542  /* convert len to words based on bits_per_word */
543  data_type = dspi->bytes_per_word[spi->chip_select];
544 
545  dspi->tx = t->tx_buf;
546  dspi->rx = t->rx_buf;
547  dspi->wcount = t->len / data_type;
548  dspi->rcount = dspi->wcount;
549 
550  spidat1 = ioread32(dspi->base + SPIDAT1);
551 
552  clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
553  set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
554 
555  INIT_COMPLETION(dspi->done);
556 
557  if (spicfg->io_type == SPI_IO_TYPE_INTR)
558  set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
559 
560  if (spicfg->io_type != SPI_IO_TYPE_DMA) {
561  /* start the transfer */
562  dspi->wcount--;
563  tx_data = dspi->get_tx(dspi);
564  spidat1 &= 0xFFFF0000;
565  spidat1 |= tx_data & 0xFFFF;
566  iowrite32(spidat1, dspi->base + SPIDAT1);
567  } else {
568  struct dma_slave_config dma_rx_conf = {
570  .src_addr = (unsigned long)dspi->pbase + SPIBUF,
571  .src_addr_width = data_type,
572  .src_maxburst = 1,
573  };
574  struct dma_slave_config dma_tx_conf = {
576  .dst_addr = (unsigned long)dspi->pbase + SPIDAT1,
577  .dst_addr_width = data_type,
578  .dst_maxburst = 1,
579  };
582  void *buf;
583 
584  dummy_buf = kzalloc(t->len, GFP_KERNEL);
585  if (!dummy_buf)
586  goto err_alloc_dummy_buf;
587 
588  dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf);
589  dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf);
590 
591  sg_init_table(&sg_rx, 1);
592  if (!t->rx_buf)
593  buf = dummy_buf;
594  else
595  buf = t->rx_buf;
596  t->rx_dma = dma_map_single(&spi->dev, buf,
597  t->len, DMA_FROM_DEVICE);
598  if (!t->rx_dma) {
599  ret = -EFAULT;
600  goto err_rx_map;
601  }
602  sg_dma_address(&sg_rx) = t->rx_dma;
603  sg_dma_len(&sg_rx) = t->len;
604 
605  sg_init_table(&sg_tx, 1);
606  if (!t->tx_buf)
607  buf = dummy_buf;
608  else
609  buf = (void *)t->tx_buf;
610  t->tx_dma = dma_map_single(&spi->dev, buf,
611  t->len, DMA_FROM_DEVICE);
612  if (!t->tx_dma) {
613  ret = -EFAULT;
614  goto err_tx_map;
615  }
616  sg_dma_address(&sg_tx) = t->tx_dma;
617  sg_dma_len(&sg_tx) = t->len;
618 
619  rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx,
620  &sg_rx, 1, DMA_DEV_TO_MEM,
622  if (!rxdesc)
623  goto err_desc;
624 
625  txdesc = dmaengine_prep_slave_sg(dspi->dma_tx,
626  &sg_tx, 1, DMA_MEM_TO_DEV,
628  if (!txdesc)
629  goto err_desc;
630 
631  rxdesc->callback = davinci_spi_dma_rx_callback;
632  rxdesc->callback_param = (void *)dspi;
633  txdesc->callback = davinci_spi_dma_tx_callback;
634  txdesc->callback_param = (void *)dspi;
635 
636  if (pdata->cshold_bug)
637  iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2);
638 
639  dmaengine_submit(rxdesc);
640  dmaengine_submit(txdesc);
641 
642  dma_async_issue_pending(dspi->dma_rx);
643  dma_async_issue_pending(dspi->dma_tx);
644 
645  set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
646  }
647 
648  /* Wait for the transfer to complete */
649  if (spicfg->io_type != SPI_IO_TYPE_POLL) {
650  wait_for_completion_interruptible(&(dspi->done));
651  } else {
652  while (dspi->rcount > 0 || dspi->wcount > 0) {
653  errors = davinci_spi_process_events(dspi);
654  if (errors)
655  break;
656  cpu_relax();
657  }
658  }
659 
660  clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL);
661  if (spicfg->io_type == SPI_IO_TYPE_DMA) {
662  clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
663 
664  dma_unmap_single(&spi->dev, t->rx_dma,
665  t->len, DMA_FROM_DEVICE);
666  dma_unmap_single(&spi->dev, t->tx_dma,
667  t->len, DMA_TO_DEVICE);
668  kfree(dummy_buf);
669  }
670 
671  clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
672  set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
673 
674  /*
675  * Check for bit error, desync error,parity error,timeout error and
676  * receive overflow errors
677  */
678  if (errors) {
679  ret = davinci_spi_check_error(dspi, errors);
680  WARN(!ret, "%s: error reported but no error found!\n",
681  dev_name(&spi->dev));
682  return ret;
683  }
684 
685  if (dspi->rcount != 0 || dspi->wcount != 0) {
686  dev_err(&spi->dev, "SPI data transfer error\n");
687  return -EIO;
688  }
689 
690  return t->len;
691 
692 err_desc:
693  dma_unmap_single(&spi->dev, t->tx_dma, t->len, DMA_TO_DEVICE);
694 err_tx_map:
696 err_rx_map:
697  kfree(dummy_buf);
698 err_alloc_dummy_buf:
699  return ret;
700 }
701 
713 static irqreturn_t davinci_spi_irq(s32 irq, void *data)
714 {
715  struct davinci_spi *dspi = data;
716  int status;
717 
718  status = davinci_spi_process_events(dspi);
719  if (unlikely(status != 0))
720  clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
721 
722  if ((!dspi->rcount && !dspi->wcount) || status)
723  complete(&dspi->done);
724 
725  return IRQ_HANDLED;
726 }
727 
728 static int davinci_spi_request_dma(struct davinci_spi *dspi)
729 {
731  struct device *sdev = dspi->bitbang.master->dev.parent;
732  int r;
733 
734  dma_cap_zero(mask);
735  dma_cap_set(DMA_SLAVE, mask);
736 
738  &dspi->dma_rx_chnum);
739  if (!dspi->dma_rx) {
740  dev_err(sdev, "request RX DMA channel failed\n");
741  r = -ENODEV;
742  goto rx_dma_failed;
743  }
744 
746  &dspi->dma_tx_chnum);
747  if (!dspi->dma_tx) {
748  dev_err(sdev, "request TX DMA channel failed\n");
749  r = -ENODEV;
750  goto tx_dma_failed;
751  }
752 
753  return 0;
754 
755 tx_dma_failed:
757 rx_dma_failed:
758  return r;
759 }
760 
772 static int __devinit davinci_spi_probe(struct platform_device *pdev)
773 {
774  struct spi_master *master;
775  struct davinci_spi *dspi;
777  struct resource *r, *mem;
778  resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
779  resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
780  int i = 0, ret = 0;
781  u32 spipc0;
782 
783  pdata = pdev->dev.platform_data;
784  if (pdata == NULL) {
785  ret = -ENODEV;
786  goto err;
787  }
788 
789  master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
790  if (master == NULL) {
791  ret = -ENOMEM;
792  goto err;
793  }
794 
795  dev_set_drvdata(&pdev->dev, master);
796 
797  dspi = spi_master_get_devdata(master);
798  if (dspi == NULL) {
799  ret = -ENOENT;
800  goto free_master;
801  }
802 
804  if (r == NULL) {
805  ret = -ENOENT;
806  goto free_master;
807  }
808 
809  dspi->pbase = r->start;
810  dspi->pdata = pdata;
811 
812  mem = request_mem_region(r->start, resource_size(r), pdev->name);
813  if (mem == NULL) {
814  ret = -EBUSY;
815  goto free_master;
816  }
817 
818  dspi->base = ioremap(r->start, resource_size(r));
819  if (dspi->base == NULL) {
820  ret = -ENOMEM;
821  goto release_region;
822  }
823 
824  dspi->irq = platform_get_irq(pdev, 0);
825  if (dspi->irq <= 0) {
826  ret = -EINVAL;
827  goto unmap_io;
828  }
829 
830  ret = request_irq(dspi->irq, davinci_spi_irq, 0, dev_name(&pdev->dev),
831  dspi);
832  if (ret)
833  goto unmap_io;
834 
835  dspi->bitbang.master = spi_master_get(master);
836  if (dspi->bitbang.master == NULL) {
837  ret = -ENODEV;
838  goto irq_free;
839  }
840 
841  dspi->clk = clk_get(&pdev->dev, NULL);
842  if (IS_ERR(dspi->clk)) {
843  ret = -ENODEV;
844  goto put_master;
845  }
846  clk_enable(dspi->clk);
847 
848  master->bus_num = pdev->id;
849  master->num_chipselect = pdata->num_chipselect;
850  master->setup = davinci_spi_setup;
851 
852  dspi->bitbang.chipselect = davinci_spi_chipselect;
853  dspi->bitbang.setup_transfer = davinci_spi_setup_transfer;
854 
855  dspi->version = pdata->version;
856 
857  dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
858  if (dspi->version == SPI_VERSION_2)
859  dspi->bitbang.flags |= SPI_READY;
860 
862  if (r)
863  dma_rx_chan = r->start;
865  if (r)
866  dma_tx_chan = r->start;
867 
868  dspi->bitbang.txrx_bufs = davinci_spi_bufs;
869  if (dma_rx_chan != SPI_NO_RESOURCE &&
870  dma_tx_chan != SPI_NO_RESOURCE) {
871  dspi->dma_rx_chnum = dma_rx_chan;
872  dspi->dma_tx_chnum = dma_tx_chan;
873 
874  ret = davinci_spi_request_dma(dspi);
875  if (ret)
876  goto free_clk;
877 
878  dev_info(&pdev->dev, "DMA: supported\n");
879  dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, "
880  "event queue: %d\n", dma_rx_chan, dma_tx_chan,
881  pdata->dma_event_q);
882  }
883 
884  dspi->get_rx = davinci_spi_rx_buf_u8;
885  dspi->get_tx = davinci_spi_tx_buf_u8;
886 
887  init_completion(&dspi->done);
888 
889  /* Reset In/OUT SPI module */
890  iowrite32(0, dspi->base + SPIGCR0);
891  udelay(100);
892  iowrite32(1, dspi->base + SPIGCR0);
893 
894  /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */
896  iowrite32(spipc0, dspi->base + SPIPC0);
897 
898  /* initialize chip selects */
899  if (pdata->chip_sel) {
900  for (i = 0; i < pdata->num_chipselect; i++) {
901  if (pdata->chip_sel[i] != SPI_INTERN_CS)
902  gpio_direction_output(pdata->chip_sel[i], 1);
903  }
904  }
905 
906  if (pdata->intr_line)
907  iowrite32(SPI_INTLVL_1, dspi->base + SPILVL);
908  else
909  iowrite32(SPI_INTLVL_0, dspi->base + SPILVL);
910 
911  iowrite32(CS_DEFAULT, dspi->base + SPIDEF);
912 
913  /* master mode default */
914  set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK);
915  set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
916  set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
917 
918  ret = spi_bitbang_start(&dspi->bitbang);
919  if (ret)
920  goto free_dma;
921 
922  dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base);
923 
924  return ret;
925 
926 free_dma:
929 free_clk:
930  clk_disable(dspi->clk);
931  clk_put(dspi->clk);
932 put_master:
933  spi_master_put(master);
934 irq_free:
935  free_irq(dspi->irq, dspi);
936 unmap_io:
937  iounmap(dspi->base);
939  release_mem_region(dspi->pbase, resource_size(r));
940 free_master:
941  kfree(master);
942 err:
943  return ret;
944 }
945 
955 static int __devexit davinci_spi_remove(struct platform_device *pdev)
956 {
957  struct davinci_spi *dspi;
958  struct spi_master *master;
959  struct resource *r;
960 
961  master = dev_get_drvdata(&pdev->dev);
962  dspi = spi_master_get_devdata(master);
963 
964  spi_bitbang_stop(&dspi->bitbang);
965 
966  clk_disable(dspi->clk);
967  clk_put(dspi->clk);
968  spi_master_put(master);
969  free_irq(dspi->irq, dspi);
970  iounmap(dspi->base);
972  release_mem_region(dspi->pbase, resource_size(r));
973 
974  return 0;
975 }
976 
977 static struct platform_driver davinci_spi_driver = {
978  .driver = {
979  .name = "spi_davinci",
980  .owner = THIS_MODULE,
981  },
982  .probe = davinci_spi_probe,
983  .remove = __devexit_p(davinci_spi_remove),
984 };
985 module_platform_driver(davinci_spi_driver);
986 
987 MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
988 MODULE_LICENSE("GPL");