Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pci_dma.c
Go to the documentation of this file.
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License. See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
7  *
8  * Routines for PCI DMA mapping. See Documentation/DMA-API.txt for
9  * a description of how these routines should be used.
10  */
11 
12 #include <linux/gfp.h>
13 #include <linux/module.h>
14 #include <linux/dma-mapping.h>
15 #include <asm/dma.h>
16 #include <asm/sn/intr.h>
18 #include <asm/sn/pcidev.h>
19 #include <asm/sn/sn_sal.h>
20 
21 #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
22 #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
23 
35 static int sn_dma_supported(struct device *dev, u64 mask)
36 {
37  BUG_ON(dev->bus != &pci_bus_type);
38 
39  if (mask < 0x7fffffff)
40  return 0;
41  return 1;
42 }
43 
51 int sn_dma_set_mask(struct device *dev, u64 dma_mask)
52 {
53  BUG_ON(dev->bus != &pci_bus_type);
54 
55  if (!sn_dma_supported(dev, dma_mask))
56  return 0;
57 
58  *dev->dma_mask = dma_mask;
59  return 1;
60 }
62 
78 static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
80  struct dma_attrs *attrs)
81 {
82  void *cpuaddr;
83  unsigned long phys_addr;
84  int node;
85  struct pci_dev *pdev = to_pci_dev(dev);
86  struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
87 
88  BUG_ON(dev->bus != &pci_bus_type);
89 
90  /*
91  * Allocate the memory.
92  */
93  node = pcibus_to_node(pdev->bus);
94  if (likely(node >=0)) {
95  struct page *p = alloc_pages_exact_node(node,
96  flags, get_order(size));
97 
98  if (likely(p))
99  cpuaddr = page_address(p);
100  else
101  return NULL;
102  } else
103  cpuaddr = (void *)__get_free_pages(flags, get_order(size));
104 
105  if (unlikely(!cpuaddr))
106  return NULL;
107 
108  memset(cpuaddr, 0x0, size);
109 
110  /* physical addr. of the memory we just got */
111  phys_addr = __pa(cpuaddr);
112 
113  /*
114  * 64 bit address translations should never fail.
115  * 32 bit translations can fail if there are insufficient mapping
116  * resources.
117  */
118 
119  *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
121  if (!*dma_handle) {
122  printk(KERN_ERR "%s: out of ATEs\n", __func__);
123  free_pages((unsigned long)cpuaddr, get_order(size));
124  return NULL;
125  }
126 
127  return cpuaddr;
128 }
129 
140 static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
141  dma_addr_t dma_handle, struct dma_attrs *attrs)
142 {
143  struct pci_dev *pdev = to_pci_dev(dev);
144  struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
145 
146  BUG_ON(dev->bus != &pci_bus_type);
147 
148  provider->dma_unmap(pdev, dma_handle, 0);
149  free_pages((unsigned long)cpu_addr, get_order(size));
150 }
151 
176 static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
177  unsigned long offset, size_t size,
178  enum dma_data_direction dir,
179  struct dma_attrs *attrs)
180 {
181  void *cpu_addr = page_address(page) + offset;
183  unsigned long phys_addr;
184  struct pci_dev *pdev = to_pci_dev(dev);
185  struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
186  int dmabarr;
187 
188  dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
189 
190  BUG_ON(dev->bus != &pci_bus_type);
191 
192  phys_addr = __pa(cpu_addr);
193  if (dmabarr)
194  dma_addr = provider->dma_map_consistent(pdev, phys_addr,
195  size, SN_DMA_ADDR_PHYS);
196  else
197  dma_addr = provider->dma_map(pdev, phys_addr, size,
199 
200  if (!dma_addr) {
201  printk(KERN_ERR "%s: out of ATEs\n", __func__);
202  return 0;
203  }
204  return dma_addr;
205 }
206 
219 static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
220  size_t size, enum dma_data_direction dir,
221  struct dma_attrs *attrs)
222 {
223  struct pci_dev *pdev = to_pci_dev(dev);
224  struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
225 
226  BUG_ON(dev->bus != &pci_bus_type);
227 
228  provider->dma_unmap(pdev, dma_addr, dir);
229 }
230 
241 static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
242  int nhwentries, enum dma_data_direction dir,
243  struct dma_attrs *attrs)
244 {
245  int i;
246  struct pci_dev *pdev = to_pci_dev(dev);
247  struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
248  struct scatterlist *sg;
249 
250  BUG_ON(dev->bus != &pci_bus_type);
251 
252  for_each_sg(sgl, sg, nhwentries, i) {
253  provider->dma_unmap(pdev, sg->dma_address, dir);
254  sg->dma_address = (dma_addr_t) NULL;
255  sg->dma_length = 0;
256  }
257 }
258 
274 static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
275  int nhwentries, enum dma_data_direction dir,
276  struct dma_attrs *attrs)
277 {
278  unsigned long phys_addr;
279  struct scatterlist *saved_sg = sgl, *sg;
280  struct pci_dev *pdev = to_pci_dev(dev);
281  struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
282  int i;
283  int dmabarr;
284 
285  dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
286 
287  BUG_ON(dev->bus != &pci_bus_type);
288 
289  /*
290  * Setup a DMA address for each entry in the scatterlist.
291  */
292  for_each_sg(sgl, sg, nhwentries, i) {
294  phys_addr = SG_ENT_PHYS_ADDRESS(sg);
295  if (dmabarr)
296  dma_addr = provider->dma_map_consistent(pdev,
297  phys_addr,
298  sg->length,
300  else
301  dma_addr = provider->dma_map(pdev, phys_addr,
302  sg->length,
304 
305  sg->dma_address = dma_addr;
306  if (!sg->dma_address) {
307  printk(KERN_ERR "%s: out of ATEs\n", __func__);
308 
309  /*
310  * Free any successfully allocated entries.
311  */
312  if (i > 0)
313  sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
314  return 0;
315  }
316 
317  sg->dma_length = sg->length;
318  }
319 
320  return nhwentries;
321 }
322 
323 static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
324  size_t size, enum dma_data_direction dir)
325 {
326  BUG_ON(dev->bus != &pci_bus_type);
327 }
328 
329 static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
330  size_t size,
331  enum dma_data_direction dir)
332 {
333  BUG_ON(dev->bus != &pci_bus_type);
334 }
335 
336 static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
337  int nelems, enum dma_data_direction dir)
338 {
339  BUG_ON(dev->bus != &pci_bus_type);
340 }
341 
342 static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
343  int nelems, enum dma_data_direction dir)
344 {
345  BUG_ON(dev->bus != &pci_bus_type);
346 }
347 
348 static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
349 {
350  return 0;
351 }
352 
354 {
355  return DMA_BIT_MASK(64);
356 }
357 EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
358 
360 {
361  if (!SN_PCIBUS_BUSSOFT(bus))
362  return ERR_PTR(-ENODEV);
363 
364  return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET);
365 }
366 
368 {
369  unsigned long addr;
370  int ret;
371  struct ia64_sal_retval isrv;
372 
373  /*
374  * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
375  * around hw issues at the pci bus level. SGI proms older than
376  * 4.10 don't implement this.
377  */
378 
380  pci_domain_nr(bus), bus->number,
381  0, /* io */
382  0, /* read */
383  port, size, __pa(val));
384 
385  if (isrv.status == 0)
386  return size;
387 
388  /*
389  * If the above failed, retry using the SAL_PROBE call which should
390  * be present in all proms (but which cannot work round PCI chipset
391  * bugs). This code is retained for compatibility with old
392  * pre-4.10 proms, and should be removed at some point in the future.
393  */
394 
395  if (!SN_PCIBUS_BUSSOFT(bus))
396  return -ENODEV;
397 
398  addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
399  addr += port;
400 
401  ret = ia64_sn_probe_mem(addr, (long)size, (void *)val);
402 
403  if (ret == 2)
404  return -EINVAL;
405 
406  if (ret == 1)
407  *val = -1;
408 
409  return size;
410 }
411 
413 {
414  int ret = size;
415  unsigned long paddr;
416  unsigned long *addr;
417  struct ia64_sal_retval isrv;
418 
419  /*
420  * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
421  * around hw issues at the pci bus level. SGI proms older than
422  * 4.10 don't implement this.
423  */
424 
426  pci_domain_nr(bus), bus->number,
427  0, /* io */
428  1, /* write */
429  port, size, __pa(&val));
430 
431  if (isrv.status == 0)
432  return size;
433 
434  /*
435  * If the above failed, retry using the SAL_PROBE call which should
436  * be present in all proms (but which cannot work round PCI chipset
437  * bugs). This code is retained for compatibility with old
438  * pre-4.10 proms, and should be removed at some point in the future.
439  */
440 
441  if (!SN_PCIBUS_BUSSOFT(bus)) {
442  ret = -ENODEV;
443  goto out;
444  }
445 
446  /* Put the phys addr in uncached space */
447  paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
448  paddr += port;
449  addr = (unsigned long *)paddr;
450 
451  switch (size) {
452  case 1:
453  *(volatile u8 *)(addr) = (u8)(val);
454  break;
455  case 2:
456  *(volatile u16 *)(addr) = (u16)(val);
457  break;
458  case 4:
459  *(volatile u32 *)(addr) = (u32)(val);
460  break;
461  default:
462  ret = -EINVAL;
463  break;
464  }
465  out:
466  return ret;
467 }
468 
469 static struct dma_map_ops sn_dma_ops = {
470  .alloc = sn_dma_alloc_coherent,
471  .free = sn_dma_free_coherent,
472  .map_page = sn_dma_map_page,
473  .unmap_page = sn_dma_unmap_page,
474  .map_sg = sn_dma_map_sg,
475  .unmap_sg = sn_dma_unmap_sg,
476  .sync_single_for_cpu = sn_dma_sync_single_for_cpu,
477  .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
478  .sync_single_for_device = sn_dma_sync_single_for_device,
479  .sync_sg_for_device = sn_dma_sync_sg_for_device,
480  .mapping_error = sn_dma_mapping_error,
481  .dma_supported = sn_dma_supported,
482 };
483 
484 void sn_dma_init(void)
485 {
486  dma_ops = &sn_dma_ops;
487 }