Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cache.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2004 Topspin Communications. All rights reserved.
3  * Copyright (c) 2005 Intel Corporation. All rights reserved.
4  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses. You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or
14  * without modification, are permitted provided that the following
15  * conditions are met:
16  *
17  * - Redistributions of source code must retain the above
18  * copyright notice, this list of conditions and the following
19  * disclaimer.
20  *
21  * - Redistributions in binary form must reproduce the above
22  * copyright notice, this list of conditions and the following
23  * disclaimer in the documentation and/or other materials
24  * provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 
41 #include <rdma/ib_cache.h>
42 
43 #include "core_priv.h"
44 
45 struct ib_pkey_cache {
46  int table_len;
47  u16 table[0];
48 };
49 
50 struct ib_gid_cache {
51  int table_len;
52  union ib_gid table[0];
53 };
54 
56  struct work_struct work;
57  struct ib_device *device;
59 };
60 
61 static inline int start_port(struct ib_device *device)
62 {
63  return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
64 }
65 
66 static inline int end_port(struct ib_device *device)
67 {
68  return (device->node_type == RDMA_NODE_IB_SWITCH) ?
69  0 : device->phys_port_cnt;
70 }
71 
72 int ib_get_cached_gid(struct ib_device *device,
73  u8 port_num,
74  int index,
75  union ib_gid *gid)
76 {
77  struct ib_gid_cache *cache;
78  unsigned long flags;
79  int ret = 0;
80 
81  if (port_num < start_port(device) || port_num > end_port(device))
82  return -EINVAL;
83 
84  read_lock_irqsave(&device->cache.lock, flags);
85 
86  cache = device->cache.gid_cache[port_num - start_port(device)];
87 
88  if (index < 0 || index >= cache->table_len)
89  ret = -EINVAL;
90  else
91  *gid = cache->table[index];
92 
93  read_unlock_irqrestore(&device->cache.lock, flags);
94 
95  return ret;
96 }
98 
99 int ib_find_cached_gid(struct ib_device *device,
100  union ib_gid *gid,
101  u8 *port_num,
102  u16 *index)
103 {
104  struct ib_gid_cache *cache;
105  unsigned long flags;
106  int p, i;
107  int ret = -ENOENT;
108 
109  *port_num = -1;
110  if (index)
111  *index = -1;
112 
113  read_lock_irqsave(&device->cache.lock, flags);
114 
115  for (p = 0; p <= end_port(device) - start_port(device); ++p) {
116  cache = device->cache.gid_cache[p];
117  for (i = 0; i < cache->table_len; ++i) {
118  if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
119  *port_num = p + start_port(device);
120  if (index)
121  *index = i;
122  ret = 0;
123  goto found;
124  }
125  }
126  }
127 found:
128  read_unlock_irqrestore(&device->cache.lock, flags);
129 
130  return ret;
131 }
133 
134 int ib_get_cached_pkey(struct ib_device *device,
135  u8 port_num,
136  int index,
137  u16 *pkey)
138 {
139  struct ib_pkey_cache *cache;
140  unsigned long flags;
141  int ret = 0;
142 
143  if (port_num < start_port(device) || port_num > end_port(device))
144  return -EINVAL;
145 
146  read_lock_irqsave(&device->cache.lock, flags);
147 
148  cache = device->cache.pkey_cache[port_num - start_port(device)];
149 
150  if (index < 0 || index >= cache->table_len)
151  ret = -EINVAL;
152  else
153  *pkey = cache->table[index];
154 
155  read_unlock_irqrestore(&device->cache.lock, flags);
156 
157  return ret;
158 }
160 
161 int ib_find_cached_pkey(struct ib_device *device,
162  u8 port_num,
163  u16 pkey,
164  u16 *index)
165 {
166  struct ib_pkey_cache *cache;
167  unsigned long flags;
168  int i;
169  int ret = -ENOENT;
170  int partial_ix = -1;
171 
172  if (port_num < start_port(device) || port_num > end_port(device))
173  return -EINVAL;
174 
175  read_lock_irqsave(&device->cache.lock, flags);
176 
177  cache = device->cache.pkey_cache[port_num - start_port(device)];
178 
179  *index = -1;
180 
181  for (i = 0; i < cache->table_len; ++i)
182  if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
183  if (cache->table[i] & 0x8000) {
184  *index = i;
185  ret = 0;
186  break;
187  } else
188  partial_ix = i;
189  }
190 
191  if (ret && partial_ix >= 0) {
192  *index = partial_ix;
193  ret = 0;
194  }
195 
196  read_unlock_irqrestore(&device->cache.lock, flags);
197 
198  return ret;
199 }
201 
203  u8 port_num,
204  u16 pkey,
205  u16 *index)
206 {
207  struct ib_pkey_cache *cache;
208  unsigned long flags;
209  int i;
210  int ret = -ENOENT;
211 
212  if (port_num < start_port(device) || port_num > end_port(device))
213  return -EINVAL;
214 
215  read_lock_irqsave(&device->cache.lock, flags);
216 
217  cache = device->cache.pkey_cache[port_num - start_port(device)];
218 
219  *index = -1;
220 
221  for (i = 0; i < cache->table_len; ++i)
222  if (cache->table[i] == pkey) {
223  *index = i;
224  ret = 0;
225  break;
226  }
227 
228  read_unlock_irqrestore(&device->cache.lock, flags);
229 
230  return ret;
231 }
233 
234 int ib_get_cached_lmc(struct ib_device *device,
235  u8 port_num,
236  u8 *lmc)
237 {
238  unsigned long flags;
239  int ret = 0;
240 
241  if (port_num < start_port(device) || port_num > end_port(device))
242  return -EINVAL;
243 
244  read_lock_irqsave(&device->cache.lock, flags);
245  *lmc = device->cache.lmc_cache[port_num - start_port(device)];
246  read_unlock_irqrestore(&device->cache.lock, flags);
247 
248  return ret;
249 }
251 
252 static void ib_cache_update(struct ib_device *device,
253  u8 port)
254 {
255  struct ib_port_attr *tprops = NULL;
256  struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
257  struct ib_gid_cache *gid_cache = NULL, *old_gid_cache;
258  int i;
259  int ret;
260 
261  tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
262  if (!tprops)
263  return;
264 
265  ret = ib_query_port(device, port, tprops);
266  if (ret) {
267  printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
268  ret, device->name);
269  goto err;
270  }
271 
272  pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
273  sizeof *pkey_cache->table, GFP_KERNEL);
274  if (!pkey_cache)
275  goto err;
276 
277  pkey_cache->table_len = tprops->pkey_tbl_len;
278 
279  gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len *
280  sizeof *gid_cache->table, GFP_KERNEL);
281  if (!gid_cache)
282  goto err;
283 
284  gid_cache->table_len = tprops->gid_tbl_len;
285 
286  for (i = 0; i < pkey_cache->table_len; ++i) {
287  ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
288  if (ret) {
289  printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
290  ret, device->name, i);
291  goto err;
292  }
293  }
294 
295  for (i = 0; i < gid_cache->table_len; ++i) {
296  ret = ib_query_gid(device, port, i, gid_cache->table + i);
297  if (ret) {
298  printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
299  ret, device->name, i);
300  goto err;
301  }
302  }
303 
304  write_lock_irq(&device->cache.lock);
305 
306  old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
307  old_gid_cache = device->cache.gid_cache [port - start_port(device)];
308 
309  device->cache.pkey_cache[port - start_port(device)] = pkey_cache;
310  device->cache.gid_cache [port - start_port(device)] = gid_cache;
311 
312  device->cache.lmc_cache[port - start_port(device)] = tprops->lmc;
313 
314  write_unlock_irq(&device->cache.lock);
315 
316  kfree(old_pkey_cache);
317  kfree(old_gid_cache);
318  kfree(tprops);
319  return;
320 
321 err:
322  kfree(pkey_cache);
323  kfree(gid_cache);
324  kfree(tprops);
325 }
326 
327 static void ib_cache_task(struct work_struct *_work)
328 {
329  struct ib_update_work *work =
330  container_of(_work, struct ib_update_work, work);
331 
332  ib_cache_update(work->device, work->port_num);
333  kfree(work);
334 }
335 
336 static void ib_cache_event(struct ib_event_handler *handler,
337  struct ib_event *event)
338 {
339  struct ib_update_work *work;
340 
341  if (event->event == IB_EVENT_PORT_ERR ||
342  event->event == IB_EVENT_PORT_ACTIVE ||
343  event->event == IB_EVENT_LID_CHANGE ||
344  event->event == IB_EVENT_PKEY_CHANGE ||
345  event->event == IB_EVENT_SM_CHANGE ||
346  event->event == IB_EVENT_CLIENT_REREGISTER ||
347  event->event == IB_EVENT_GID_CHANGE) {
348  work = kmalloc(sizeof *work, GFP_ATOMIC);
349  if (work) {
350  INIT_WORK(&work->work, ib_cache_task);
351  work->device = event->device;
352  work->port_num = event->element.port_num;
353  queue_work(ib_wq, &work->work);
354  }
355  }
356 }
357 
358 static void ib_cache_setup_one(struct ib_device *device)
359 {
360  int p;
361 
362  rwlock_init(&device->cache.lock);
363 
364  device->cache.pkey_cache =
365  kmalloc(sizeof *device->cache.pkey_cache *
366  (end_port(device) - start_port(device) + 1), GFP_KERNEL);
367  device->cache.gid_cache =
368  kmalloc(sizeof *device->cache.gid_cache *
369  (end_port(device) - start_port(device) + 1), GFP_KERNEL);
370 
371  device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
372  (end_port(device) -
373  start_port(device) + 1),
374  GFP_KERNEL);
375 
376  if (!device->cache.pkey_cache || !device->cache.gid_cache ||
377  !device->cache.lmc_cache) {
378  printk(KERN_WARNING "Couldn't allocate cache "
379  "for %s\n", device->name);
380  goto err;
381  }
382 
383  for (p = 0; p <= end_port(device) - start_port(device); ++p) {
384  device->cache.pkey_cache[p] = NULL;
385  device->cache.gid_cache [p] = NULL;
386  ib_cache_update(device, p + start_port(device));
387  }
388 
389  INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
390  device, ib_cache_event);
391  if (ib_register_event_handler(&device->cache.event_handler))
392  goto err_cache;
393 
394  return;
395 
396 err_cache:
397  for (p = 0; p <= end_port(device) - start_port(device); ++p) {
398  kfree(device->cache.pkey_cache[p]);
399  kfree(device->cache.gid_cache[p]);
400  }
401 
402 err:
403  kfree(device->cache.pkey_cache);
404  kfree(device->cache.gid_cache);
405  kfree(device->cache.lmc_cache);
406 }
407 
408 static void ib_cache_cleanup_one(struct ib_device *device)
409 {
410  int p;
411 
412  ib_unregister_event_handler(&device->cache.event_handler);
414 
415  for (p = 0; p <= end_port(device) - start_port(device); ++p) {
416  kfree(device->cache.pkey_cache[p]);
417  kfree(device->cache.gid_cache[p]);
418  }
419 
420  kfree(device->cache.pkey_cache);
421  kfree(device->cache.gid_cache);
422  kfree(device->cache.lmc_cache);
423 }
424 
425 static struct ib_client cache_client = {
426  .name = "cache",
427  .add = ib_cache_setup_one,
428  .remove = ib_cache_cleanup_one
429 };
430 
432 {
433  return ib_register_client(&cache_client);
434 }
435 
437 {
438  ib_unregister_client(&cache_client);
439 }