Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
memory.c
Go to the documentation of this file.
1 /*
2  * Functions to handle I2O memory
3  *
4  * Pulled from the inlines in i2o headers and uninlined
5  *
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License as published by the
9  * Free Software Foundation; either version 2 of the License, or (at your
10  * option) any later version.
11  */
12 
13 #include <linux/module.h>
14 #include <linux/i2o.h>
15 #include <linux/delay.h>
16 #include <linux/string.h>
17 #include <linux/slab.h>
18 #include "core.h"
19 
20 /* Protects our 32/64bit mask switching */
21 static DEFINE_MUTEX(mem_lock);
22 
31 {
32  i2o_status_block *sb = c->status_block.virt;
33  u16 sg_count =
34  (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) -
35  body_size;
36 
37  if (c->pae_support) {
38  /*
39  * for 64-bit a SG attribute element must be added and each
40  * SG element needs 12 bytes instead of 8.
41  */
42  sg_count -= 2;
43  sg_count /= 3;
44  } else
45  sg_count /= 2;
46 
47  if (c->short_req && (sg_count > 8))
48  sg_count = 8;
49 
50  return sg_count;
51 }
53 
54 
72  size_t size,
74  u32 ** sg_ptr)
75 {
76  u32 sg_flags;
77  u32 *mptr = *sg_ptr;
79 
80  switch (direction) {
81  case DMA_TO_DEVICE:
82  sg_flags = 0xd4000000;
83  break;
84  case DMA_FROM_DEVICE:
85  sg_flags = 0xd0000000;
86  break;
87  default:
88  return 0;
89  }
90 
91  dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
92  if (!dma_mapping_error(&c->pdev->dev, dma_addr)) {
93 #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
94  if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
95  *mptr++ = cpu_to_le32(0x7C020002);
96  *mptr++ = cpu_to_le32(PAGE_SIZE);
97  }
98 #endif
99 
100  *mptr++ = cpu_to_le32(sg_flags | size);
101  *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr));
102 #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
103  if ((sizeof(dma_addr_t) > 4) && c->pae_support)
104  *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr));
105 #endif
106  *sg_ptr = mptr;
107  }
108  return dma_addr;
109 }
111 
128  int sg_count, enum dma_data_direction direction, u32 ** sg_ptr)
129 {
130  u32 sg_flags;
131  u32 *mptr = *sg_ptr;
132 
133  switch (direction) {
134  case DMA_TO_DEVICE:
135  sg_flags = 0x14000000;
136  break;
137  case DMA_FROM_DEVICE:
138  sg_flags = 0x10000000;
139  break;
140  default:
141  return 0;
142  }
143 
144  sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction);
145  if (!sg_count)
146  return 0;
147 
148 #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
149  if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
150  *mptr++ = cpu_to_le32(0x7C020002);
151  *mptr++ = cpu_to_le32(PAGE_SIZE);
152  }
153 #endif
154 
155  while (sg_count-- > 0) {
156  if (!sg_count)
157  sg_flags |= 0xC0000000;
158  *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg));
159  *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg)));
160 #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
161  if ((sizeof(dma_addr_t) > 4) && c->pae_support)
162  *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg)));
163 #endif
164  sg = sg_next(sg);
165  }
166  *sg_ptr = mptr;
167 
168  return 1;
169 }
171 
182 int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len)
183 {
184  struct pci_dev *pdev = to_pci_dev(dev);
185  int dma_64 = 0;
186 
187  mutex_lock(&mem_lock);
188  if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_BIT_MASK(64))) {
189  dma_64 = 1;
190  if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
191  mutex_unlock(&mem_lock);
192  return -ENOMEM;
193  }
194  }
195 
196  addr->virt = dma_alloc_coherent(dev, len, &addr->phys, GFP_KERNEL);
197 
198  if ((sizeof(dma_addr_t) > 4) && dma_64)
199  if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
200  printk(KERN_WARNING "i2o: unable to set 64-bit DMA");
201  mutex_unlock(&mem_lock);
202 
203  if (!addr->virt)
204  return -ENOMEM;
205 
206  memset(addr->virt, 0, len);
207  addr->len = len;
208 
209  return 0;
210 }
212 
213 
221 void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
222 {
223  if (addr->virt) {
224  if (addr->phys)
225  dma_free_coherent(dev, addr->len, addr->virt,
226  addr->phys);
227  else
228  kfree(addr->virt);
229  addr->virt = NULL;
230  }
231 }
233 
234 
247 int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len)
248 {
249  i2o_dma_free(dev, addr);
250 
251  if (len)
252  return i2o_dma_alloc(dev, addr, len);
253 
254  return 0;
255 }
257 
258 /*
259  * i2o_pool_alloc - Allocate an slab cache and mempool
260  * @mempool: pointer to struct i2o_pool to write data into.
261  * @name: name which is used to identify cache
262  * @size: size of each object
263  * @min_nr: minimum number of objects
264  *
265  * First allocates a slab cache with name and size. Then allocates a
266  * mempool which uses the slab cache for allocation and freeing.
267  *
268  * Returns 0 on success or negative error code on failure.
269  */
270 int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
271  size_t size, int min_nr)
272 {
273  pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL);
274  if (!pool->name)
275  goto exit;
276  strcpy(pool->name, name);
277 
278  pool->slab =
279  kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL);
280  if (!pool->slab)
281  goto free_name;
282 
283  pool->mempool = mempool_create_slab_pool(min_nr, pool->slab);
284  if (!pool->mempool)
285  goto free_slab;
286 
287  return 0;
288 
289 free_slab:
290  kmem_cache_destroy(pool->slab);
291 
292 free_name:
293  kfree(pool->name);
294 
295 exit:
296  return -ENOMEM;
297 }
299 
300 /*
301  * i2o_pool_free - Free slab cache and mempool again
302  * @mempool: pointer to struct i2o_pool which should be freed
303  *
304  * Note that you have to return all objects to the mempool again before
305  * calling i2o_pool_free().
306  */
308 {
309  mempool_destroy(pool->mempool);
310  kmem_cache_destroy(pool->slab);
311  kfree(pool->name);
312 };