Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dma_lib.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006-2007 PA Semi, Inc
3  *
4  * Common functions for DMA access on PA Semi PWRficient
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/init.h>
22 #include <linux/export.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/of.h>
26 #include <linux/sched.h>
27 
28 #include <asm/pasemi_dma.h>
29 
30 #define MAX_TXCH 64
31 #define MAX_RXCH 64
32 #define MAX_FLAGS 64
33 #define MAX_FUN 8
34 
35 static struct pasdma_status *dma_status;
36 
37 static void __iomem *iob_regs;
38 static void __iomem *mac_regs[6];
39 static void __iomem *dma_regs;
40 
41 static int base_hw_irq;
42 
43 static int num_txch, num_rxch;
44 
45 static struct pci_dev *dma_pdev;
46 
47 /* Bitmaps to handle allocation of channels */
48 
49 static DECLARE_BITMAP(txch_free, MAX_TXCH);
50 static DECLARE_BITMAP(rxch_free, MAX_RXCH);
51 static DECLARE_BITMAP(flags_free, MAX_FLAGS);
52 static DECLARE_BITMAP(fun_free, MAX_FUN);
53 
54 /* pasemi_read_iob_reg - read IOB register
55  * @reg: Register to read (offset into PCI CFG space)
56  */
57 unsigned int pasemi_read_iob_reg(unsigned int reg)
58 {
59  return in_le32(iob_regs+reg);
60 }
62 
63 /* pasemi_write_iob_reg - write IOB register
64  * @reg: Register to write to (offset into PCI CFG space)
65  * @val: Value to write
66  */
67 void pasemi_write_iob_reg(unsigned int reg, unsigned int val)
68 {
69  out_le32(iob_regs+reg, val);
70 }
72 
73 /* pasemi_read_mac_reg - read MAC register
74  * @intf: MAC interface
75  * @reg: Register to read (offset into PCI CFG space)
76  */
77 unsigned int pasemi_read_mac_reg(int intf, unsigned int reg)
78 {
79  return in_le32(mac_regs[intf]+reg);
80 }
82 
83 /* pasemi_write_mac_reg - write MAC register
84  * @intf: MAC interface
85  * @reg: Register to write to (offset into PCI CFG space)
86  * @val: Value to write
87  */
88 void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val)
89 {
90  out_le32(mac_regs[intf]+reg, val);
91 }
93 
94 /* pasemi_read_dma_reg - read DMA register
95  * @reg: Register to read (offset into PCI CFG space)
96  */
97 unsigned int pasemi_read_dma_reg(unsigned int reg)
98 {
99  return in_le32(dma_regs+reg);
100 }
102 
103 /* pasemi_write_dma_reg - write DMA register
104  * @reg: Register to write to (offset into PCI CFG space)
105  * @val: Value to write
106  */
107 void pasemi_write_dma_reg(unsigned int reg, unsigned int val)
108 {
109  out_le32(dma_regs+reg, val);
110 }
112 
113 static int pasemi_alloc_tx_chan(enum pasemi_dmachan_type type)
114 {
115  int bit;
116  int start, limit;
117 
118  switch (type & (TXCHAN_EVT0|TXCHAN_EVT1)) {
119  case TXCHAN_EVT0:
120  start = 0;
121  limit = 10;
122  break;
123  case TXCHAN_EVT1:
124  start = 10;
125  limit = MAX_TXCH;
126  break;
127  default:
128  start = 0;
129  limit = MAX_TXCH;
130  break;
131  }
132 retry:
133  bit = find_next_bit(txch_free, MAX_TXCH, start);
134  if (bit >= limit)
135  return -ENOSPC;
136  if (!test_and_clear_bit(bit, txch_free))
137  goto retry;
138 
139  return bit;
140 }
141 
142 static void pasemi_free_tx_chan(int chan)
143 {
144  BUG_ON(test_bit(chan, txch_free));
145  set_bit(chan, txch_free);
146 }
147 
148 static int pasemi_alloc_rx_chan(void)
149 {
150  int bit;
151 retry:
152  bit = find_first_bit(rxch_free, MAX_RXCH);
153  if (bit >= MAX_TXCH)
154  return -ENOSPC;
155  if (!test_and_clear_bit(bit, rxch_free))
156  goto retry;
157 
158  return bit;
159 }
160 
161 static void pasemi_free_rx_chan(int chan)
162 {
163  BUG_ON(test_bit(chan, rxch_free));
164  set_bit(chan, rxch_free);
165 }
166 
167 /* pasemi_dma_alloc_chan - Allocate a DMA channel
168  * @type: Type of channel to allocate
169  * @total_size: Total size of structure to allocate (to allow for more
170  * room behind the structure to be used by the client)
171  * @offset: Offset in bytes from start of the total structure to the beginning
172  * of struct pasemi_dmachan. Needed when struct pasemi_dmachan is
173  * not the first member of the client structure.
174  *
175  * pasemi_dma_alloc_chan allocates a DMA channel for use by a client. The
176  * type argument specifies whether it's a RX or TX channel, and in the case
177  * of TX channels which group it needs to belong to (if any).
178  *
179  * Returns a pointer to the total structure allocated on success, NULL
180  * on failure.
181  */
183  int total_size, int offset)
184 {
185  void *buf;
186  struct pasemi_dmachan *chan;
187  int chno;
188 
189  BUG_ON(total_size < sizeof(struct pasemi_dmachan));
190 
191  buf = kzalloc(total_size, GFP_KERNEL);
192 
193  if (!buf)
194  return NULL;
195  chan = buf + offset;
196 
197  chan->priv = buf;
198 
199  switch (type & (TXCHAN|RXCHAN)) {
200  case RXCHAN:
201  chno = pasemi_alloc_rx_chan();
202  chan->chno = chno;
203  chan->irq = irq_create_mapping(NULL,
204  base_hw_irq + num_txch + chno);
205  chan->status = &dma_status->rx_sta[chno];
206  break;
207  case TXCHAN:
208  chno = pasemi_alloc_tx_chan(type);
209  chan->chno = chno;
210  chan->irq = irq_create_mapping(NULL, base_hw_irq + chno);
211  chan->status = &dma_status->tx_sta[chno];
212  break;
213  }
214 
215  chan->chan_type = type;
216 
217  return chan;
218 }
220 
221 /* pasemi_dma_free_chan - Free a previously allocated channel
222  * @chan: Channel to free
223  *
224  * Frees a previously allocated channel. It will also deallocate any
225  * descriptor ring associated with the channel, if allocated.
226  */
228 {
229  if (chan->ring_virt)
230  pasemi_dma_free_ring(chan);
231 
232  switch (chan->chan_type & (RXCHAN|TXCHAN)) {
233  case RXCHAN:
234  pasemi_free_rx_chan(chan->chno);
235  break;
236  case TXCHAN:
237  pasemi_free_tx_chan(chan->chno);
238  break;
239  }
240 
241  kfree(chan->priv);
242 }
244 
245 /* pasemi_dma_alloc_ring - Allocate descriptor ring for a channel
246  * @chan: Channel for which to allocate
247  * @ring_size: Ring size in 64-bit (8-byte) words
248  *
249  * Allocate a descriptor ring for a channel. Returns 0 on success, errno
250  * on failure. The passed in struct pasemi_dmachan is updated with the
251  * virtual and DMA addresses of the ring.
252  */
254 {
255  BUG_ON(chan->ring_virt);
256 
257  chan->ring_size = ring_size;
258 
259  chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
260  ring_size * sizeof(u64),
261  &chan->ring_dma, GFP_KERNEL);
262 
263  if (!chan->ring_virt)
264  return -ENOMEM;
265 
266  memset(chan->ring_virt, 0, ring_size * sizeof(u64));
267 
268  return 0;
269 }
271 
272 /* pasemi_dma_free_ring - Free an allocated descriptor ring for a channel
273  * @chan: Channel for which to free the descriptor ring
274  *
275  * Frees a previously allocated descriptor ring for a channel.
276  */
278 {
279  BUG_ON(!chan->ring_virt);
280 
281  dma_free_coherent(&dma_pdev->dev, chan->ring_size * sizeof(u64),
282  chan->ring_virt, chan->ring_dma);
283  chan->ring_virt = NULL;
284  chan->ring_size = 0;
285  chan->ring_dma = 0;
286 }
288 
289 /* pasemi_dma_start_chan - Start a DMA channel
290  * @chan: Channel to start
291  * @cmdsta: Additional CCMDSTA/TCMDSTA bits to write
292  *
293  * Enables (starts) a DMA channel with optional additional arguments.
294  */
295 void pasemi_dma_start_chan(const struct pasemi_dmachan *chan, const u32 cmdsta)
296 {
297  if (chan->chan_type == RXCHAN)
299  cmdsta | PAS_DMA_RXCHAN_CCMDSTA_EN);
300  else
302  cmdsta | PAS_DMA_TXCHAN_TCMDSTA_EN);
303 }
305 
306 /* pasemi_dma_stop_chan - Stop a DMA channel
307  * @chan: Channel to stop
308  *
309  * Stops (disables) a DMA channel. This is done by setting the ST bit in the
310  * CMDSTA register and waiting on the ACT (active) bit to clear, then
311  * finally disabling the whole channel.
312  *
313  * This function will only try for a short while for the channel to stop, if
314  * it doesn't it will return failure.
315  *
316  * Returns 1 on success, 0 on failure.
317  */
318 #define MAX_RETRIES 5000
319 int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan)
320 {
321  int reg, retries;
322  u32 sta;
323 
324  if (chan->chan_type == RXCHAN) {
325  reg = PAS_DMA_RXCHAN_CCMDSTA(chan->chno);
327  for (retries = 0; retries < MAX_RETRIES; retries++) {
328  sta = pasemi_read_dma_reg(reg);
329  if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) {
330  pasemi_write_dma_reg(reg, 0);
331  return 1;
332  }
333  cond_resched();
334  }
335  } else {
336  reg = PAS_DMA_TXCHAN_TCMDSTA(chan->chno);
338  for (retries = 0; retries < MAX_RETRIES; retries++) {
339  sta = pasemi_read_dma_reg(reg);
340  if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) {
341  pasemi_write_dma_reg(reg, 0);
342  return 1;
343  }
344  cond_resched();
345  }
346  }
347 
348  return 0;
349 }
351 
352 /* pasemi_dma_alloc_buf - Allocate a buffer to use for DMA
353  * @chan: Channel to allocate for
354  * @size: Size of buffer in bytes
355  * @handle: DMA handle
356  *
357  * Allocate a buffer to be used by the DMA engine for read/write,
358  * similar to dma_alloc_coherent().
359  *
360  * Returns the virtual address of the buffer, or NULL in case of failure.
361  */
362 void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
364 {
365  return dma_alloc_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
366 }
368 
369 /* pasemi_dma_free_buf - Free a buffer used for DMA
370  * @chan: Channel the buffer was allocated for
371  * @size: Size of buffer in bytes
372  * @handle: DMA handle
373  *
374  * Frees a previously allocated buffer.
375  */
376 void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
378 {
379  dma_free_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
380 }
382 
383 /* pasemi_dma_alloc_flag - Allocate a flag (event) for channel synchronization
384  *
385  * Allocates a flag for use with channel synchronization (event descriptors).
386  * Returns allocated flag (0-63), < 0 on error.
387  */
389 {
390  int bit;
391 
392 retry:
393  bit = find_next_bit(flags_free, MAX_FLAGS, 0);
394  if (bit >= MAX_FLAGS)
395  return -ENOSPC;
396  if (!test_and_clear_bit(bit, flags_free))
397  goto retry;
398 
399  return bit;
400 }
402 
403 
404 /* pasemi_dma_free_flag - Deallocates a flag (event)
405  * @flag: Flag number to deallocate
406  *
407  * Frees up a flag so it can be reused for other purposes.
408  */
410 {
411  BUG_ON(test_bit(flag, flags_free));
412  BUG_ON(flag >= MAX_FLAGS);
413  set_bit(flag, flags_free);
414 }
416 
417 
418 /* pasemi_dma_set_flag - Sets a flag (event) to 1
419  * @flag: Flag number to set active
420  *
421  * Sets the flag provided to 1.
422  */
424 {
425  BUG_ON(flag >= MAX_FLAGS);
426  if (flag < 32)
428  else
430 }
432 
433 /* pasemi_dma_clear_flag - Sets a flag (event) to 0
434  * @flag: Flag number to set inactive
435  *
436  * Sets the flag provided to 0.
437  */
439 {
440  BUG_ON(flag >= MAX_FLAGS);
441  if (flag < 32)
443  else
445 }
447 
448 /* pasemi_dma_alloc_fun - Allocate a function engine
449  *
450  * Allocates a function engine to use for crypto/checksum offload
451  * Returns allocated engine (0-8), < 0 on error.
452  */
454 {
455  int bit;
456 
457 retry:
458  bit = find_next_bit(fun_free, MAX_FLAGS, 0);
459  if (bit >= MAX_FLAGS)
460  return -ENOSPC;
461  if (!test_and_clear_bit(bit, fun_free))
462  goto retry;
463 
464  return bit;
465 }
467 
468 
469 /* pasemi_dma_free_fun - Deallocates a function engine
470  * @flag: Engine number to deallocate
471  *
472  * Frees up a function engine so it can be used for other purposes.
473  */
475 {
476  BUG_ON(test_bit(fun, fun_free));
477  BUG_ON(fun >= MAX_FLAGS);
478  set_bit(fun, fun_free);
479 }
481 
482 
483 static void *map_onedev(struct pci_dev *p, int index)
484 {
485  struct device_node *dn;
486  void __iomem *ret;
487 
488  dn = pci_device_to_OF_node(p);
489  if (!dn)
490  goto fallback;
491 
492  ret = of_iomap(dn, index);
493  if (!ret)
494  goto fallback;
495 
496  return ret;
497 fallback:
498  /* This is hardcoded and ugly, but we have some firmware versions
499  * that don't provide the register space in the device tree. Luckily
500  * they are at well-known locations so we can just do the math here.
501  */
502  return ioremap(0xe0000000 + (p->devfn << 12), 0x2000);
503 }
504 
505 /* pasemi_dma_init - Initialize the PA Semi DMA library
506  *
507  * This function initializes the DMA library. It must be called before
508  * any other function in the library.
509  *
510  * Returns 0 on success, errno on failure.
511  */
513 {
514  static DEFINE_SPINLOCK(init_lock);
515  struct pci_dev *iob_pdev;
516  struct pci_dev *pdev;
517  struct resource res;
518  struct device_node *dn;
519  int i, intf, err = 0;
520  unsigned long timeout;
521  u32 tmp;
522 
523  if (!machine_is(pasemi))
524  return -ENODEV;
525 
526  spin_lock(&init_lock);
527 
528  /* Make sure we haven't already initialized */
529  if (dma_pdev)
530  goto out;
531 
532  iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
533  if (!iob_pdev) {
534  BUG();
535  printk(KERN_WARNING "Can't find I/O Bridge\n");
536  err = -ENODEV;
537  goto out;
538  }
539  iob_regs = map_onedev(iob_pdev, 0);
540 
541  dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
542  if (!dma_pdev) {
543  BUG();
544  printk(KERN_WARNING "Can't find DMA controller\n");
545  err = -ENODEV;
546  goto out;
547  }
548  dma_regs = map_onedev(dma_pdev, 0);
549  base_hw_irq = virq_to_hw(dma_pdev->irq);
550 
551  pci_read_config_dword(dma_pdev, PAS_DMA_CAP_TXCH, &tmp);
552  num_txch = (tmp & PAS_DMA_CAP_TXCH_TCHN_M) >> PAS_DMA_CAP_TXCH_TCHN_S;
553 
554  pci_read_config_dword(dma_pdev, PAS_DMA_CAP_RXCH, &tmp);
555  num_rxch = (tmp & PAS_DMA_CAP_RXCH_RCHN_M) >> PAS_DMA_CAP_RXCH_RCHN_S;
556 
557  intf = 0;
558  for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, NULL);
559  pdev;
560  pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, pdev))
561  mac_regs[intf++] = map_onedev(pdev, 0);
562 
563  pci_dev_put(pdev);
564 
565  for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, NULL);
566  pdev;
567  pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, pdev))
568  mac_regs[intf++] = map_onedev(pdev, 0);
569 
570  pci_dev_put(pdev);
571 
572  dn = pci_device_to_OF_node(iob_pdev);
573  if (dn)
574  err = of_address_to_resource(dn, 1, &res);
575  if (!dn || err) {
576  /* Fallback for old firmware */
577  res.start = 0xfd800000;
578  res.end = res.start + 0x1000;
579  }
580  dma_status = __ioremap(res.start, resource_size(&res), 0);
581  pci_dev_put(iob_pdev);
582 
583  for (i = 0; i < MAX_TXCH; i++)
584  __set_bit(i, txch_free);
585 
586  for (i = 0; i < MAX_RXCH; i++)
587  __set_bit(i, rxch_free);
588 
589  timeout = jiffies + HZ;
592  if (time_after(jiffies, timeout)) {
593  pr_warning("Warning: Could not disable RX section\n");
594  break;
595  }
596  }
597 
598  timeout = jiffies + HZ;
601  if (time_after(jiffies, timeout)) {
602  pr_warning("Warning: Could not disable TX section\n");
603  break;
604  }
605  }
606 
607  /* setup resource allocations for the different DMA sections */
609  pasemi_write_dma_reg(PAS_DMA_COM_CFG, tmp | 0x18000000);
610 
611  /* enable tx section */
613 
614  /* enable rx section */
616 
617  for (i = 0; i < MAX_FLAGS; i++)
618  __set_bit(i, flags_free);
619 
620  for (i = 0; i < MAX_FUN; i++)
621  __set_bit(i, fun_free);
622 
623  /* clear all status flags */
626 
627  printk(KERN_INFO "PA Semi PWRficient DMA library initialized "
628  "(%d tx, %d rx channels)\n", num_txch, num_rxch);
629 
630 out:
631  spin_unlock(&init_lock);
632  return err;
633 }