Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dma-swiotlb.c
Go to the documentation of this file.
1 /*
2  * Contains routines needed to support swiotlb for ppc.
3  *
4  * Copyright (C) 2009-2010 Freescale Semiconductor, Inc.
5  * Author: Becky Bruce
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License as published by the
9  * Free Software Foundation; either version 2 of the License, or (at your
10  * option) any later version.
11  *
12  */
13 
14 #include <linux/dma-mapping.h>
15 #include <linux/memblock.h>
16 #include <linux/pfn.h>
17 #include <linux/of_platform.h>
18 #include <linux/platform_device.h>
19 #include <linux/pci.h>
20 
21 #include <asm/machdep.h>
22 #include <asm/swiotlb.h>
23 #include <asm/dma.h>
24 
25 unsigned int ppc_swiotlb_enable;
26 
27 static u64 swiotlb_powerpc_get_required(struct device *dev)
28 {
29  u64 end, mask, max_direct_dma_addr = dev->archdata.max_direct_dma_addr;
30 
31  end = memblock_end_of_DRAM();
32  if (max_direct_dma_addr && end > max_direct_dma_addr)
33  end = max_direct_dma_addr;
34  end += get_dma_offset(dev);
35 
36  mask = 1ULL << (fls64(end) - 1);
37  mask += mask - 1;
38 
39  return mask;
40 }
41 
42 /*
43  * At the moment, all platforms that use this code only require
44  * swiotlb to be used if we're operating on HIGHMEM. Since
45  * we don't ever call anything other than map_sg, unmap_sg,
46  * map_page, and unmap_page on highmem, use normal dma_ops
47  * for everything else.
48  */
53  .map_sg = swiotlb_map_sg_attrs,
54  .unmap_sg = swiotlb_unmap_sg_attrs,
55  .dma_supported = swiotlb_dma_supported,
56  .map_page = swiotlb_map_page,
57  .unmap_page = swiotlb_unmap_page,
58  .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
59  .sync_single_for_device = swiotlb_sync_single_for_device,
60  .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
61  .sync_sg_for_device = swiotlb_sync_sg_for_device,
62  .mapping_error = swiotlb_dma_mapping_error,
63  .get_required_mask = swiotlb_powerpc_get_required,
64 };
65 
67 {
68  struct pci_controller *hose;
69  struct dev_archdata *sd;
70 
71  hose = pci_bus_to_host(pdev->bus);
72  sd = &pdev->dev.archdata;
74  hose->dma_window_base_cur + hose->dma_window_size;
75 }
76 
77 static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
78  unsigned long action, void *data)
79 {
80  struct device *dev = data;
81  struct dev_archdata *sd;
82 
83  /* We are only intereted in device addition */
84  if (action != BUS_NOTIFY_ADD_DEVICE)
85  return 0;
86 
87  sd = &dev->archdata;
88  sd->max_direct_dma_addr = 0;
89 
90  /* May need to bounce if the device can't address all of DRAM */
91  if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM())
92  set_dma_ops(dev, &swiotlb_dma_ops);
93 
94  return NOTIFY_DONE;
95 }
96 
97 static struct notifier_block ppc_swiotlb_plat_bus_notifier = {
98  .notifier_call = ppc_swiotlb_bus_notify,
99  .priority = 0,
100 };
101 
103 {
105  &ppc_swiotlb_plat_bus_notifier);
106  return 0;
107 }
108 
110 {
111  if ((memblock_end_of_DRAM() - 1) > 0xffffffff)
112  ppc_swiotlb_enable = 1;
113 }
114 
115 static int __init swiotlb_late_init(void)
116 {
117  if (ppc_swiotlb_enable) {
119  set_pci_dma_ops(&swiotlb_dma_ops);
120  ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
121  } else {
122  swiotlb_free();
123  }
124 
125  return 0;
126 }
127 subsys_initcall(swiotlb_late_init);