Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
apbio.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2010 NVIDIA Corporation.
3  * Copyright (C) 2010 Google, Inc.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/io.h>
18 #include <mach/iomap.h>
19 #include <linux/of.h>
20 #include <linux/dmaengine.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/spinlock.h>
23 #include <linux/completion.h>
24 #include <linux/sched.h>
25 #include <linux/mutex.h>
26 
27 #include <mach/dma.h>
28 
29 #include "apbio.h"
30 
31 #if defined(CONFIG_TEGRA20_APB_DMA)
32 static DEFINE_MUTEX(tegra_apb_dma_lock);
33 static u32 *tegra_apb_bb;
34 static dma_addr_t tegra_apb_bb_phys;
35 static DECLARE_COMPLETION(tegra_apb_wait);
36 
37 static u32 tegra_apb_readl_direct(unsigned long offset);
38 static void tegra_apb_writel_direct(u32 value, unsigned long offset);
39 
40 static struct dma_chan *tegra_apb_dma_chan;
41 static struct dma_slave_config dma_sconfig;
42 
43 bool tegra_apb_dma_init(void)
44 {
46 
47  mutex_lock(&tegra_apb_dma_lock);
48 
49  /* Check to see if we raced to setup */
50  if (tegra_apb_dma_chan)
51  goto skip_init;
52 
53  dma_cap_zero(mask);
54  dma_cap_set(DMA_SLAVE, mask);
55  tegra_apb_dma_chan = dma_request_channel(mask, NULL, NULL);
56  if (!tegra_apb_dma_chan) {
57  /*
58  * This is common until the device is probed, so don't
59  * shout about it.
60  */
61  pr_debug("%s: can not allocate dma channel\n", __func__);
62  goto err_dma_alloc;
63  }
64 
65  tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
66  &tegra_apb_bb_phys, GFP_KERNEL);
67  if (!tegra_apb_bb) {
68  pr_err("%s: can not allocate bounce buffer\n", __func__);
69  goto err_buff_alloc;
70  }
71 
72  dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
73  dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
74  dma_sconfig.slave_id = TEGRA_DMA_REQ_SEL_CNTR;
75  dma_sconfig.src_maxburst = 1;
76  dma_sconfig.dst_maxburst = 1;
77 
78 skip_init:
79  mutex_unlock(&tegra_apb_dma_lock);
80  return true;
81 
82 err_buff_alloc:
83  dma_release_channel(tegra_apb_dma_chan);
84  tegra_apb_dma_chan = NULL;
85 
86 err_dma_alloc:
87  mutex_unlock(&tegra_apb_dma_lock);
88  return false;
89 }
90 
91 static void apb_dma_complete(void *args)
92 {
93  complete(&tegra_apb_wait);
94 }
95 
96 static int do_dma_transfer(unsigned long apb_add,
97  enum dma_transfer_direction dir)
98 {
100  int ret;
101 
102  if (dir == DMA_DEV_TO_MEM)
103  dma_sconfig.src_addr = apb_add;
104  else
105  dma_sconfig.dst_addr = apb_add;
106 
107  ret = dmaengine_slave_config(tegra_apb_dma_chan, &dma_sconfig);
108  if (ret)
109  return ret;
110 
111  dma_desc = dmaengine_prep_slave_single(tegra_apb_dma_chan,
112  tegra_apb_bb_phys, sizeof(u32), dir,
114  if (!dma_desc)
115  return -EINVAL;
116 
117  dma_desc->callback = apb_dma_complete;
118  dma_desc->callback_param = NULL;
119 
120  INIT_COMPLETION(tegra_apb_wait);
121 
122  dmaengine_submit(dma_desc);
123  dma_async_issue_pending(tegra_apb_dma_chan);
124  ret = wait_for_completion_timeout(&tegra_apb_wait,
125  msecs_to_jiffies(50));
126 
127  if (WARN(ret == 0, "apb read dma timed out")) {
128  dmaengine_terminate_all(tegra_apb_dma_chan);
129  return -EFAULT;
130  }
131  return 0;
132 }
133 
134 static u32 tegra_apb_readl_using_dma(unsigned long offset)
135 {
136  int ret;
137 
138  if (!tegra_apb_dma_chan && !tegra_apb_dma_init())
139  return tegra_apb_readl_direct(offset);
140 
141  mutex_lock(&tegra_apb_dma_lock);
142  ret = do_dma_transfer(offset, DMA_DEV_TO_MEM);
143  if (ret < 0) {
144  pr_err("error in reading offset 0x%08lx using dma\n", offset);
145  *(u32 *)tegra_apb_bb = 0;
146  }
147  mutex_unlock(&tegra_apb_dma_lock);
148  return *((u32 *)tegra_apb_bb);
149 }
150 
151 static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)
152 {
153  int ret;
154 
155  if (!tegra_apb_dma_chan && !tegra_apb_dma_init()) {
156  tegra_apb_writel_direct(value, offset);
157  return;
158  }
159 
160  mutex_lock(&tegra_apb_dma_lock);
161  *((u32 *)tegra_apb_bb) = value;
162  ret = do_dma_transfer(offset, DMA_MEM_TO_DEV);
163  if (ret < 0)
164  pr_err("error in writing offset 0x%08lx using dma\n", offset);
165  mutex_unlock(&tegra_apb_dma_lock);
166 }
167 #else
168 #define tegra_apb_readl_using_dma tegra_apb_readl_direct
169 #define tegra_apb_writel_using_dma tegra_apb_writel_direct
170 #endif
171 
172 typedef u32 (*apbio_read_fptr)(unsigned long offset);
173 typedef void (*apbio_write_fptr)(u32 value, unsigned long offset);
174 
175 static apbio_read_fptr apbio_read;
176 static apbio_write_fptr apbio_write;
177 
178 static u32 tegra_apb_readl_direct(unsigned long offset)
179 {
180  return readl(IO_ADDRESS(offset));
181 }
182 
183 static void tegra_apb_writel_direct(u32 value, unsigned long offset)
184 {
185  writel(value, IO_ADDRESS(offset));
186 }
187 
189 {
190  /* Need to use dma only when it is Tegra20 based platform */
191  if (of_machine_is_compatible("nvidia,tegra20") ||
192  !of_have_populated_dt()) {
193  apbio_read = tegra_apb_readl_using_dma;
194  apbio_write = tegra_apb_writel_using_dma;
195  } else {
196  apbio_read = tegra_apb_readl_direct;
197  apbio_write = tegra_apb_writel_direct;
198  }
199 }
200 
201 u32 tegra_apb_readl(unsigned long offset)
202 {
203  return apbio_read(offset);
204 }
205 
206 void tegra_apb_writel(u32 value, unsigned long offset)
207 {
208  apbio_write(value, offset);
209 }