Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
alloc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses. You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  * Redistribution and use in source and binary forms, with or
12  * without modification, are permitted provided that the following
13  * conditions are met:
14  *
15  * - Redistributions of source code must retain the above
16  * copyright notice, this list of conditions and the following
17  * disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above
20  * copyright notice, this list of conditions and the following
21  * disclaimer in the documentation and/or other materials
22  * provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/errno.h>
35 #include <linux/slab.h>
36 #include <linux/mm.h>
37 #include <linux/export.h>
38 #include <linux/bitmap.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/vmalloc.h>
41 
42 #include "mlx4.h"
43 
45 {
46  u32 obj;
47 
48  spin_lock(&bitmap->lock);
49 
50  obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
51  if (obj >= bitmap->max) {
52  bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
53  & bitmap->mask;
54  obj = find_first_zero_bit(bitmap->table, bitmap->max);
55  }
56 
57  if (obj < bitmap->max) {
58  set_bit(obj, bitmap->table);
59  bitmap->last = (obj + 1);
60  if (bitmap->last == bitmap->max)
61  bitmap->last = 0;
62  obj |= bitmap->top;
63  } else
64  obj = -1;
65 
66  if (obj != -1)
67  --bitmap->avail;
68 
69  spin_unlock(&bitmap->lock);
70 
71  return obj;
72 }
73 
75 {
76  mlx4_bitmap_free_range(bitmap, obj, 1);
77 }
78 
80 {
81  u32 obj;
82 
83  if (likely(cnt == 1 && align == 1))
84  return mlx4_bitmap_alloc(bitmap);
85 
86  spin_lock(&bitmap->lock);
87 
88  obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
89  bitmap->last, cnt, align - 1);
90  if (obj >= bitmap->max) {
91  bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
92  & bitmap->mask;
93  obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
94  0, cnt, align - 1);
95  }
96 
97  if (obj < bitmap->max) {
98  bitmap_set(bitmap->table, obj, cnt);
99  if (obj == bitmap->last) {
100  bitmap->last = (obj + cnt);
101  if (bitmap->last >= bitmap->max)
102  bitmap->last = 0;
103  }
104  obj |= bitmap->top;
105  } else
106  obj = -1;
107 
108  if (obj != -1)
109  bitmap->avail -= cnt;
110 
111  spin_unlock(&bitmap->lock);
112 
113  return obj;
114 }
115 
117 {
118  return bitmap->avail;
119 }
120 
122 {
123  obj &= bitmap->max + bitmap->reserved_top - 1;
124 
125  spin_lock(&bitmap->lock);
126  bitmap_clear(bitmap->table, obj, cnt);
127  bitmap->avail += cnt;
128  spin_unlock(&bitmap->lock);
129 }
130 
132  u32 reserved_bot, u32 reserved_top)
133 {
134  /* num must be a power of 2 */
135  if (num != roundup_pow_of_two(num))
136  return -EINVAL;
137 
138  bitmap->last = 0;
139  bitmap->top = 0;
140  bitmap->max = num - reserved_top;
141  bitmap->mask = mask;
142  bitmap->reserved_top = reserved_top;
143  bitmap->avail = num - reserved_top - reserved_bot;
144  spin_lock_init(&bitmap->lock);
145  bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
146  sizeof (long), GFP_KERNEL);
147  if (!bitmap->table)
148  return -ENOMEM;
149 
150  bitmap_set(bitmap->table, 0, reserved_bot);
151 
152  return 0;
153 }
154 
156 {
157  kfree(bitmap->table);
158 }
159 
160 /*
161  * Handling for queue buffers -- we allocate a bunch of memory and
162  * register it in a memory region at HCA virtual address 0. If the
163  * requested size is > max_direct, we split the allocation into
164  * multiple pages, so we don't require too much contiguous memory.
165  */
166 
167 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
168  struct mlx4_buf *buf)
169 {
170  dma_addr_t t;
171 
172  if (size <= max_direct) {
173  buf->nbufs = 1;
174  buf->npages = 1;
175  buf->page_shift = get_order(size) + PAGE_SHIFT;
176  buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
177  size, &t, GFP_KERNEL);
178  if (!buf->direct.buf)
179  return -ENOMEM;
180 
181  buf->direct.map = t;
182 
183  while (t & ((1 << buf->page_shift) - 1)) {
184  --buf->page_shift;
185  buf->npages *= 2;
186  }
187 
188  memset(buf->direct.buf, 0, size);
189  } else {
190  int i;
191 
192  buf->direct.buf = NULL;
193  buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
194  buf->npages = buf->nbufs;
195  buf->page_shift = PAGE_SHIFT;
196  buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
197  GFP_KERNEL);
198  if (!buf->page_list)
199  return -ENOMEM;
200 
201  for (i = 0; i < buf->nbufs; ++i) {
202  buf->page_list[i].buf =
203  dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
204  &t, GFP_KERNEL);
205  if (!buf->page_list[i].buf)
206  goto err_free;
207 
208  buf->page_list[i].map = t;
209 
210  memset(buf->page_list[i].buf, 0, PAGE_SIZE);
211  }
212 
213  if (BITS_PER_LONG == 64) {
214  struct page **pages;
215  pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
216  if (!pages)
217  goto err_free;
218  for (i = 0; i < buf->nbufs; ++i)
219  pages[i] = virt_to_page(buf->page_list[i].buf);
220  buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
221  kfree(pages);
222  if (!buf->direct.buf)
223  goto err_free;
224  }
225  }
226 
227  return 0;
228 
229 err_free:
230  mlx4_buf_free(dev, size, buf);
231 
232  return -ENOMEM;
233 }
235 
236 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
237 {
238  int i;
239 
240  if (buf->nbufs == 1)
241  dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
242  buf->direct.map);
243  else {
244  if (BITS_PER_LONG == 64 && buf->direct.buf)
245  vunmap(buf->direct.buf);
246 
247  for (i = 0; i < buf->nbufs; ++i)
248  if (buf->page_list[i].buf)
249  dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
250  buf->page_list[i].buf,
251  buf->page_list[i].map);
252  kfree(buf->page_list);
253  }
254 }
256 
257 static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
258 {
259  struct mlx4_db_pgdir *pgdir;
260 
261  pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
262  if (!pgdir)
263  return NULL;
264 
265  bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
266  pgdir->bits[0] = pgdir->order0;
267  pgdir->bits[1] = pgdir->order1;
268  pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
269  &pgdir->db_dma, GFP_KERNEL);
270  if (!pgdir->db_page) {
271  kfree(pgdir);
272  return NULL;
273  }
274 
275  return pgdir;
276 }
277 
278 static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
279  struct mlx4_db *db, int order)
280 {
281  int o;
282  int i;
283 
284  for (o = order; o <= 1; ++o) {
285  i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
286  if (i < MLX4_DB_PER_PAGE >> o)
287  goto found;
288  }
289 
290  return -ENOMEM;
291 
292 found:
293  clear_bit(i, pgdir->bits[o]);
294 
295  i <<= o;
296 
297  if (o > order)
298  set_bit(i ^ 1, pgdir->bits[order]);
299 
300  db->u.pgdir = pgdir;
301  db->index = i;
302  db->db = pgdir->db_page + db->index;
303  db->dma = pgdir->db_dma + db->index * 4;
304  db->order = order;
305 
306  return 0;
307 }
308 
309 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
310 {
311  struct mlx4_priv *priv = mlx4_priv(dev);
312  struct mlx4_db_pgdir *pgdir;
313  int ret = 0;
314 
315  mutex_lock(&priv->pgdir_mutex);
316 
317  list_for_each_entry(pgdir, &priv->pgdir_list, list)
318  if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
319  goto out;
320 
321  pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
322  if (!pgdir) {
323  ret = -ENOMEM;
324  goto out;
325  }
326 
327  list_add(&pgdir->list, &priv->pgdir_list);
328 
329  /* This should never fail -- we just allocated an empty page: */
330  WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
331 
332 out:
333  mutex_unlock(&priv->pgdir_mutex);
334 
335  return ret;
336 }
338 
339 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
340 {
341  struct mlx4_priv *priv = mlx4_priv(dev);
342  int o;
343  int i;
344 
345  mutex_lock(&priv->pgdir_mutex);
346 
347  o = db->order;
348  i = db->index;
349 
350  if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
351  clear_bit(i ^ 1, db->u.pgdir->order0);
352  ++o;
353  }
354  i >>= o;
355  set_bit(i, db->u.pgdir->bits[o]);
356 
357  if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
358  dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
359  db->u.pgdir->db_page, db->u.pgdir->db_dma);
360  list_del(&db->u.pgdir->list);
361  kfree(db->u.pgdir);
362  }
363 
364  mutex_unlock(&priv->pgdir_mutex);
365 }
367 
369  int size, int max_direct)
370 {
371  int err;
372 
373  err = mlx4_db_alloc(dev, &wqres->db, 1);
374  if (err)
375  return err;
376 
377  *wqres->db.db = 0;
378 
379  err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
380  if (err)
381  goto err_db;
382 
383  err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
384  &wqres->mtt);
385  if (err)
386  goto err_buf;
387 
388  err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
389  if (err)
390  goto err_mtt;
391 
392  return 0;
393 
394 err_mtt:
395  mlx4_mtt_cleanup(dev, &wqres->mtt);
396 err_buf:
397  mlx4_buf_free(dev, size, &wqres->buf);
398 err_db:
399  mlx4_db_free(dev, &wqres->db);
400 
401  return err;
402 }
404 
405 void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
406  int size)
407 {
408  mlx4_mtt_cleanup(dev, &wqres->mtt);
409  mlx4_buf_free(dev, size, &wqres->buf);
410  mlx4_db_free(dev, &wqres->db);
411 }