Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ipoib_main.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2004 Topspin Communications. All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses. You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  * Redistribution and use in source and binary forms, with or
13  * without modification, are permitted provided that the following
14  * conditions are met:
15  *
16  * - Redistributions of source code must retain the above
17  * copyright notice, this list of conditions and the following
18  * disclaimer.
19  *
20  * - Redistributions in binary form must reproduce the above
21  * copyright notice, this list of conditions and the following
22  * disclaimer in the documentation and/or other materials
23  * provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include "ipoib.h"
36 
37 #include <linux/module.h>
38 
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
43 
44 #include <linux/if_arp.h> /* For ARPHRD_xxx */
45 
46 #include <linux/ip.h>
47 #include <linux/in.h>
48 
49 #include <linux/jhash.h>
50 #include <net/arp.h>
51 
52 MODULE_AUTHOR("Roland Dreier");
53 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
54 MODULE_LICENSE("Dual BSD/GPL");
55 
58 
59 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
60 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
61 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
62 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
63 
64 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
65 int ipoib_debug_level;
66 
67 module_param_named(debug_level, ipoib_debug_level, int, 0644);
68 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
69 #endif
70 
72  struct net_device *dev;
73  struct ipoib_path path;
74 };
75 
76 static const u8 ipv4_bcast_addr[] = {
77  0x00, 0xff, 0xff, 0xff,
78  0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
79  0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
80 };
81 
83 
85 
86 static void ipoib_add_one(struct ib_device *device);
87 static void ipoib_remove_one(struct ib_device *device);
88 static void ipoib_neigh_reclaim(struct rcu_head *rp);
89 
90 static struct ib_client ipoib_client = {
91  .name = "ipoib",
92  .add = ipoib_add_one,
93  .remove = ipoib_remove_one
94 };
95 
97 {
98  struct ipoib_dev_priv *priv = netdev_priv(dev);
99 
100  ipoib_dbg(priv, "bringing up interface\n");
101 
103 
104  if (ipoib_pkey_dev_delay_open(dev))
105  return 0;
106 
107  if (ipoib_ib_dev_open(dev))
108  goto err_disable;
109 
110  if (ipoib_ib_dev_up(dev))
111  goto err_stop;
112 
113  if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
114  struct ipoib_dev_priv *cpriv;
115 
116  /* Bring up any child interfaces too */
117  mutex_lock(&priv->vlan_mutex);
118  list_for_each_entry(cpriv, &priv->child_intfs, list) {
119  int flags;
120 
121  flags = cpriv->dev->flags;
122  if (flags & IFF_UP)
123  continue;
124 
125  dev_change_flags(cpriv->dev, flags | IFF_UP);
126  }
127  mutex_unlock(&priv->vlan_mutex);
128  }
129 
130  netif_start_queue(dev);
131 
132  return 0;
133 
134 err_stop:
135  ipoib_ib_dev_stop(dev, 1);
136 
137 err_disable:
139 
140  return -EINVAL;
141 }
142 
143 static int ipoib_stop(struct net_device *dev)
144 {
145  struct ipoib_dev_priv *priv = netdev_priv(dev);
146 
147  ipoib_dbg(priv, "stopping interface\n");
148 
150 
151  netif_stop_queue(dev);
152 
153  ipoib_ib_dev_down(dev, 1);
154  ipoib_ib_dev_stop(dev, 0);
155 
156  if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
157  struct ipoib_dev_priv *cpriv;
158 
159  /* Bring down any child interfaces too */
160  mutex_lock(&priv->vlan_mutex);
161  list_for_each_entry(cpriv, &priv->child_intfs, list) {
162  int flags;
163 
164  flags = cpriv->dev->flags;
165  if (!(flags & IFF_UP))
166  continue;
167 
168  dev_change_flags(cpriv->dev, flags & ~IFF_UP);
169  }
170  mutex_unlock(&priv->vlan_mutex);
171  }
172 
173  return 0;
174 }
175 
176 static void ipoib_uninit(struct net_device *dev)
177 {
178  ipoib_dev_cleanup(dev);
179 }
180 
181 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
182 {
183  struct ipoib_dev_priv *priv = netdev_priv(dev);
184 
185  if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
186  features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
187 
188  return features;
189 }
190 
191 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
192 {
193  struct ipoib_dev_priv *priv = netdev_priv(dev);
194 
195  /* dev->mtu > 2K ==> connected mode */
196  if (ipoib_cm_admin_enabled(dev)) {
197  if (new_mtu > ipoib_cm_max_mtu(dev))
198  return -EINVAL;
199 
200  if (new_mtu > priv->mcast_mtu)
201  ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
202  priv->mcast_mtu);
203 
204  dev->mtu = new_mtu;
205  return 0;
206  }
207 
208  if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
209  return -EINVAL;
210 
211  priv->admin_mtu = new_mtu;
212 
213  dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
214 
215  return 0;
216 }
217 
218 int ipoib_set_mode(struct net_device *dev, const char *buf)
219 {
220  struct ipoib_dev_priv *priv = netdev_priv(dev);
221 
222  /* flush paths if we switch modes so that connections are restarted */
223  if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
225  ipoib_warn(priv, "enabling connected mode "
226  "will cause multicast packet drops\n");
228  rtnl_unlock();
229  priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
230 
231  ipoib_flush_paths(dev);
232  rtnl_lock();
233  return 0;
234  }
235 
236  if (!strcmp(buf, "datagram\n")) {
239  dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
240  rtnl_unlock();
241  ipoib_flush_paths(dev);
242  rtnl_lock();
243  return 0;
244  }
245 
246  return -EINVAL;
247 }
248 
249 static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
250 {
251  struct ipoib_dev_priv *priv = netdev_priv(dev);
252  struct rb_node *n = priv->path_tree.rb_node;
253  struct ipoib_path *path;
254  int ret;
255 
256  while (n) {
257  path = rb_entry(n, struct ipoib_path, rb_node);
258 
259  ret = memcmp(gid, path->pathrec.dgid.raw,
260  sizeof (union ib_gid));
261 
262  if (ret < 0)
263  n = n->rb_left;
264  else if (ret > 0)
265  n = n->rb_right;
266  else
267  return path;
268  }
269 
270  return NULL;
271 }
272 
273 static int __path_add(struct net_device *dev, struct ipoib_path *path)
274 {
275  struct ipoib_dev_priv *priv = netdev_priv(dev);
276  struct rb_node **n = &priv->path_tree.rb_node;
277  struct rb_node *pn = NULL;
278  struct ipoib_path *tpath;
279  int ret;
280 
281  while (*n) {
282  pn = *n;
283  tpath = rb_entry(pn, struct ipoib_path, rb_node);
284 
285  ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
286  sizeof (union ib_gid));
287  if (ret < 0)
288  n = &pn->rb_left;
289  else if (ret > 0)
290  n = &pn->rb_right;
291  else
292  return -EEXIST;
293  }
294 
295  rb_link_node(&path->rb_node, pn, n);
296  rb_insert_color(&path->rb_node, &priv->path_tree);
297 
298  list_add_tail(&path->list, &priv->path_list);
299 
300  return 0;
301 }
302 
303 static void path_free(struct net_device *dev, struct ipoib_path *path)
304 {
305  struct sk_buff *skb;
306 
307  while ((skb = __skb_dequeue(&path->queue)))
308  dev_kfree_skb_irq(skb);
309 
310  ipoib_dbg(netdev_priv(dev), "path_free\n");
311 
312  /* remove all neigh connected to this path */
313  ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
314 
315  if (path->ah)
316  ipoib_put_ah(path->ah);
317 
318  kfree(path);
319 }
320 
321 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
322 
323 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
324 {
325  struct ipoib_path_iter *iter;
326 
327  iter = kmalloc(sizeof *iter, GFP_KERNEL);
328  if (!iter)
329  return NULL;
330 
331  iter->dev = dev;
332  memset(iter->path.pathrec.dgid.raw, 0, 16);
333 
334  if (ipoib_path_iter_next(iter)) {
335  kfree(iter);
336  return NULL;
337  }
338 
339  return iter;
340 }
341 
342 int ipoib_path_iter_next(struct ipoib_path_iter *iter)
343 {
344  struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
345  struct rb_node *n;
346  struct ipoib_path *path;
347  int ret = 1;
348 
349  spin_lock_irq(&priv->lock);
350 
351  n = rb_first(&priv->path_tree);
352 
353  while (n) {
354  path = rb_entry(n, struct ipoib_path, rb_node);
355 
356  if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
357  sizeof (union ib_gid)) < 0) {
358  iter->path = *path;
359  ret = 0;
360  break;
361  }
362 
363  n = rb_next(n);
364  }
365 
366  spin_unlock_irq(&priv->lock);
367 
368  return ret;
369 }
370 
371 void ipoib_path_iter_read(struct ipoib_path_iter *iter,
372  struct ipoib_path *path)
373 {
374  *path = iter->path;
375 }
376 
377 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
378 
380 {
381  struct ipoib_dev_priv *priv = netdev_priv(dev);
382  struct ipoib_path *path, *tp;
383 
384  spin_lock_irq(&priv->lock);
385 
386  list_for_each_entry_safe(path, tp, &priv->path_list, list) {
387  ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n",
388  be16_to_cpu(path->pathrec.dlid),
389  path->pathrec.dgid.raw);
390  path->valid = 0;
391  }
392 
393  spin_unlock_irq(&priv->lock);
394 }
395 
396 void ipoib_flush_paths(struct net_device *dev)
397 {
398  struct ipoib_dev_priv *priv = netdev_priv(dev);
399  struct ipoib_path *path, *tp;
400  LIST_HEAD(remove_list);
401  unsigned long flags;
402 
403  netif_tx_lock_bh(dev);
404  spin_lock_irqsave(&priv->lock, flags);
405 
406  list_splice_init(&priv->path_list, &remove_list);
407 
408  list_for_each_entry(path, &remove_list, list)
409  rb_erase(&path->rb_node, &priv->path_tree);
410 
411  list_for_each_entry_safe(path, tp, &remove_list, list) {
412  if (path->query)
413  ib_sa_cancel_query(path->query_id, path->query);
414  spin_unlock_irqrestore(&priv->lock, flags);
415  netif_tx_unlock_bh(dev);
416  wait_for_completion(&path->done);
417  path_free(dev, path);
418  netif_tx_lock_bh(dev);
419  spin_lock_irqsave(&priv->lock, flags);
420  }
421 
422  spin_unlock_irqrestore(&priv->lock, flags);
423  netif_tx_unlock_bh(dev);
424 }
425 
426 static void path_rec_completion(int status,
427  struct ib_sa_path_rec *pathrec,
428  void *path_ptr)
429 {
430  struct ipoib_path *path = path_ptr;
431  struct net_device *dev = path->dev;
432  struct ipoib_dev_priv *priv = netdev_priv(dev);
433  struct ipoib_ah *ah = NULL;
434  struct ipoib_ah *old_ah = NULL;
435  struct ipoib_neigh *neigh, *tn;
436  struct sk_buff_head skqueue;
437  struct sk_buff *skb;
438  unsigned long flags;
439 
440  if (!status)
441  ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
442  be16_to_cpu(pathrec->dlid), pathrec->dgid.raw);
443  else
444  ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
445  status, path->pathrec.dgid.raw);
446 
447  skb_queue_head_init(&skqueue);
448 
449  if (!status) {
450  struct ib_ah_attr av;
451 
452  if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
453  ah = ipoib_create_ah(dev, priv->pd, &av);
454  }
455 
456  spin_lock_irqsave(&priv->lock, flags);
457 
458  if (!IS_ERR_OR_NULL(ah)) {
459  path->pathrec = *pathrec;
460 
461  old_ah = path->ah;
462  path->ah = ah;
463 
464  ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
465  ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
466 
467  while ((skb = __skb_dequeue(&path->queue)))
468  __skb_queue_tail(&skqueue, skb);
469 
470  list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
471  if (neigh->ah) {
472  WARN_ON(neigh->ah != old_ah);
473  /*
474  * Dropping the ah reference inside
475  * priv->lock is safe here, because we
476  * will hold one more reference from
477  * the original value of path->ah (ie
478  * old_ah).
479  */
480  ipoib_put_ah(neigh->ah);
481  }
482  kref_get(&path->ah->ref);
483  neigh->ah = path->ah;
484 
485  if (ipoib_cm_enabled(dev, neigh->daddr)) {
486  if (!ipoib_cm_get(neigh))
487  ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
488  path,
489  neigh));
490  if (!ipoib_cm_get(neigh)) {
491  list_del(&neigh->list);
492  ipoib_neigh_free(neigh);
493  continue;
494  }
495  }
496 
497  while ((skb = __skb_dequeue(&neigh->queue)))
498  __skb_queue_tail(&skqueue, skb);
499  }
500  path->valid = 1;
501  }
502 
503  path->query = NULL;
504  complete(&path->done);
505 
506  spin_unlock_irqrestore(&priv->lock, flags);
507 
508  if (old_ah)
509  ipoib_put_ah(old_ah);
510 
511  while ((skb = __skb_dequeue(&skqueue))) {
512  skb->dev = dev;
513  if (dev_queue_xmit(skb))
514  ipoib_warn(priv, "dev_queue_xmit failed "
515  "to requeue packet\n");
516  }
517 }
518 
519 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
520 {
521  struct ipoib_dev_priv *priv = netdev_priv(dev);
522  struct ipoib_path *path;
523 
524  if (!priv->broadcast)
525  return NULL;
526 
527  path = kzalloc(sizeof *path, GFP_ATOMIC);
528  if (!path)
529  return NULL;
530 
531  path->dev = dev;
532 
533  skb_queue_head_init(&path->queue);
534 
535  INIT_LIST_HEAD(&path->neigh_list);
536 
537  memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
538  path->pathrec.sgid = priv->local_gid;
539  path->pathrec.pkey = cpu_to_be16(priv->pkey);
540  path->pathrec.numb_path = 1;
541  path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
542 
543  return path;
544 }
545 
546 static int path_rec_start(struct net_device *dev,
547  struct ipoib_path *path)
548 {
549  struct ipoib_dev_priv *priv = netdev_priv(dev);
550 
551  ipoib_dbg(priv, "Start path record lookup for %pI6\n",
552  path->pathrec.dgid.raw);
553 
554  init_completion(&path->done);
555 
556  path->query_id =
557  ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
558  &path->pathrec,
564  1000, GFP_ATOMIC,
565  path_rec_completion,
566  path, &path->query);
567  if (path->query_id < 0) {
568  ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
569  path->query = NULL;
570  complete(&path->done);
571  return path->query_id;
572  }
573 
574  return 0;
575 }
576 
577 static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
578  struct net_device *dev)
579 {
580  struct ipoib_dev_priv *priv = netdev_priv(dev);
581  struct ipoib_path *path;
582  struct ipoib_neigh *neigh;
583  unsigned long flags;
584 
585  spin_lock_irqsave(&priv->lock, flags);
586  neigh = ipoib_neigh_alloc(daddr, dev);
587  if (!neigh) {
588  spin_unlock_irqrestore(&priv->lock, flags);
589  ++dev->stats.tx_dropped;
590  dev_kfree_skb_any(skb);
591  return;
592  }
593 
594  path = __path_find(dev, daddr + 4);
595  if (!path) {
596  path = path_rec_create(dev, daddr + 4);
597  if (!path)
598  goto err_path;
599 
600  __path_add(dev, path);
601  }
602 
603  list_add_tail(&neigh->list, &path->neigh_list);
604 
605  if (path->ah) {
606  kref_get(&path->ah->ref);
607  neigh->ah = path->ah;
608 
609  if (ipoib_cm_enabled(dev, neigh->daddr)) {
610  if (!ipoib_cm_get(neigh))
611  ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
612  if (!ipoib_cm_get(neigh)) {
613  list_del(&neigh->list);
614  ipoib_neigh_free(neigh);
615  goto err_drop;
616  }
617  if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
618  __skb_queue_tail(&neigh->queue, skb);
619  else {
620  ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
621  skb_queue_len(&neigh->queue));
622  goto err_drop;
623  }
624  } else {
625  spin_unlock_irqrestore(&priv->lock, flags);
626  ipoib_send(dev, skb, path->ah, IPOIB_QPN(daddr));
627  ipoib_neigh_put(neigh);
628  return;
629  }
630  } else {
631  neigh->ah = NULL;
632 
633  if (!path->query && path_rec_start(dev, path))
634  goto err_list;
635 
636  __skb_queue_tail(&neigh->queue, skb);
637  }
638 
639  spin_unlock_irqrestore(&priv->lock, flags);
640  ipoib_neigh_put(neigh);
641  return;
642 
643 err_list:
644  list_del(&neigh->list);
645 
646 err_path:
647  ipoib_neigh_free(neigh);
648 err_drop:
649  ++dev->stats.tx_dropped;
650  dev_kfree_skb_any(skb);
651 
652  spin_unlock_irqrestore(&priv->lock, flags);
653  ipoib_neigh_put(neigh);
654 }
655 
656 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
657  struct ipoib_cb *cb)
658 {
659  struct ipoib_dev_priv *priv = netdev_priv(dev);
660  struct ipoib_path *path;
661  unsigned long flags;
662 
663  spin_lock_irqsave(&priv->lock, flags);
664 
665  path = __path_find(dev, cb->hwaddr + 4);
666  if (!path || !path->valid) {
667  int new_path = 0;
668 
669  if (!path) {
670  path = path_rec_create(dev, cb->hwaddr + 4);
671  new_path = 1;
672  }
673  if (path) {
674  __skb_queue_tail(&path->queue, skb);
675 
676  if (!path->query && path_rec_start(dev, path)) {
677  spin_unlock_irqrestore(&priv->lock, flags);
678  if (new_path)
679  path_free(dev, path);
680  return;
681  } else
682  __path_add(dev, path);
683  } else {
684  ++dev->stats.tx_dropped;
685  dev_kfree_skb_any(skb);
686  }
687 
688  spin_unlock_irqrestore(&priv->lock, flags);
689  return;
690  }
691 
692  if (path->ah) {
693  ipoib_dbg(priv, "Send unicast ARP to %04x\n",
694  be16_to_cpu(path->pathrec.dlid));
695 
696  spin_unlock_irqrestore(&priv->lock, flags);
697  ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
698  return;
699  } else if ((path->query || !path_rec_start(dev, path)) &&
700  skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
701  __skb_queue_tail(&path->queue, skb);
702  } else {
703  ++dev->stats.tx_dropped;
704  dev_kfree_skb_any(skb);
705  }
706 
707  spin_unlock_irqrestore(&priv->lock, flags);
708 }
709 
710 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
711 {
712  struct ipoib_dev_priv *priv = netdev_priv(dev);
713  struct ipoib_neigh *neigh;
714  struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
715  struct ipoib_header *header;
716  unsigned long flags;
717 
718  header = (struct ipoib_header *) skb->data;
719 
720  if (unlikely(cb->hwaddr[4] == 0xff)) {
721  /* multicast, arrange "if" according to probability */
722  if ((header->proto != htons(ETH_P_IP)) &&
723  (header->proto != htons(ETH_P_IPV6)) &&
724  (header->proto != htons(ETH_P_ARP)) &&
725  (header->proto != htons(ETH_P_RARP))) {
726  /* ethertype not supported by IPoIB */
727  ++dev->stats.tx_dropped;
728  dev_kfree_skb_any(skb);
729  return NETDEV_TX_OK;
730  }
731  /* Add in the P_Key for multicast*/
732  cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
733  cb->hwaddr[9] = priv->pkey & 0xff;
734 
735  neigh = ipoib_neigh_get(dev, cb->hwaddr);
736  if (likely(neigh))
737  goto send_using_neigh;
738  ipoib_mcast_send(dev, cb->hwaddr, skb);
739  return NETDEV_TX_OK;
740  }
741 
742  /* unicast, arrange "switch" according to probability */
743  switch (header->proto) {
744  case htons(ETH_P_IP):
745  case htons(ETH_P_IPV6):
746  neigh = ipoib_neigh_get(dev, cb->hwaddr);
747  if (unlikely(!neigh)) {
748  neigh_add_path(skb, cb->hwaddr, dev);
749  return NETDEV_TX_OK;
750  }
751  break;
752  case htons(ETH_P_ARP):
753  case htons(ETH_P_RARP):
754  /* for unicast ARP and RARP should always perform path find */
755  unicast_arp_send(skb, dev, cb);
756  return NETDEV_TX_OK;
757  default:
758  /* ethertype not supported by IPoIB */
759  ++dev->stats.tx_dropped;
760  dev_kfree_skb_any(skb);
761  return NETDEV_TX_OK;
762  }
763 
764 send_using_neigh:
765  /* note we now hold a ref to neigh */
766  if (ipoib_cm_get(neigh)) {
767  if (ipoib_cm_up(neigh)) {
768  ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
769  goto unref;
770  }
771  } else if (neigh->ah) {
772  ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
773  goto unref;
774  }
775 
776  if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
777  spin_lock_irqsave(&priv->lock, flags);
778  __skb_queue_tail(&neigh->queue, skb);
779  spin_unlock_irqrestore(&priv->lock, flags);
780  } else {
781  ++dev->stats.tx_dropped;
782  dev_kfree_skb_any(skb);
783  }
784 
785 unref:
786  ipoib_neigh_put(neigh);
787 
788  return NETDEV_TX_OK;
789 }
790 
791 static void ipoib_timeout(struct net_device *dev)
792 {
793  struct ipoib_dev_priv *priv = netdev_priv(dev);
794 
795  ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
796  jiffies_to_msecs(jiffies - dev->trans_start));
797  ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
798  netif_queue_stopped(dev),
799  priv->tx_head, priv->tx_tail);
800  /* XXX reset QP, etc. */
801 }
802 
803 static int ipoib_hard_header(struct sk_buff *skb,
804  struct net_device *dev,
805  unsigned short type,
806  const void *daddr, const void *saddr, unsigned len)
807 {
808  struct ipoib_header *header;
809  struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
810 
811  header = (struct ipoib_header *) skb_push(skb, sizeof *header);
812 
813  header->proto = htons(type);
814  header->reserved = 0;
815 
816  /*
817  * we don't rely on dst_entry structure, always stuff the
818  * destination address into skb->cb so we can figure out where
819  * to send the packet later.
820  */
821  memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
822 
823  return 0;
824 }
825 
826 static void ipoib_set_mcast_list(struct net_device *dev)
827 {
828  struct ipoib_dev_priv *priv = netdev_priv(dev);
829 
830  if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
831  ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
832  return;
833  }
834 
835  queue_work(ipoib_workqueue, &priv->restart_task);
836 }
837 
838 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
839 {
840  /*
841  * Use only the address parts that contributes to spreading
842  * The subnet prefix is not used as one can not connect to
843  * same remote port (GUID) using the same remote QPN via two
844  * different subnets.
845  */
846  /* qpn octets[1:4) & port GUID octets[12:20) */
847  u32 *daddr_32 = (u32 *) daddr;
848  u32 hv;
849 
850  hv = jhash_3words(daddr_32[3], daddr_32[4], 0xFFFFFF & daddr_32[0], 0);
851  return hv & htbl->mask;
852 }
853 
854 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
855 {
856  struct ipoib_dev_priv *priv = netdev_priv(dev);
857  struct ipoib_neigh_table *ntbl = &priv->ntbl;
858  struct ipoib_neigh_hash *htbl;
859  struct ipoib_neigh *neigh = NULL;
860  u32 hash_val;
861 
862  rcu_read_lock_bh();
863 
864  htbl = rcu_dereference_bh(ntbl->htbl);
865 
866  if (!htbl)
867  goto out_unlock;
868 
869  hash_val = ipoib_addr_hash(htbl, daddr);
870  for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
871  neigh != NULL;
872  neigh = rcu_dereference_bh(neigh->hnext)) {
873  if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
874  /* found, take one ref on behalf of the caller */
875  if (!atomic_inc_not_zero(&neigh->refcnt)) {
876  /* deleted */
877  neigh = NULL;
878  goto out_unlock;
879  }
880  neigh->alive = jiffies;
881  goto out_unlock;
882  }
883  }
884 
885 out_unlock:
886  rcu_read_unlock_bh();
887  return neigh;
888 }
889 
890 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
891 {
892  struct ipoib_neigh_table *ntbl = &priv->ntbl;
893  struct ipoib_neigh_hash *htbl;
894  unsigned long neigh_obsolete;
895  unsigned long dt;
896  unsigned long flags;
897  int i;
898 
899  if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
900  return;
901 
902  spin_lock_irqsave(&priv->lock, flags);
903 
904  htbl = rcu_dereference_protected(ntbl->htbl,
905  lockdep_is_held(&priv->lock));
906 
907  if (!htbl)
908  goto out_unlock;
909 
910  /* neigh is obsolete if it was idle for two GC periods */
911  dt = 2 * arp_tbl.gc_interval;
912  neigh_obsolete = jiffies - dt;
913  /* handle possible race condition */
914  if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
915  goto out_unlock;
916 
917  for (i = 0; i < htbl->size; i++) {
918  struct ipoib_neigh *neigh;
919  struct ipoib_neigh __rcu **np = &htbl->buckets[i];
920 
921  while ((neigh = rcu_dereference_protected(*np,
922  lockdep_is_held(&priv->lock))) != NULL) {
923  /* was the neigh idle for two GC periods */
924  if (time_after(neigh_obsolete, neigh->alive)) {
925  rcu_assign_pointer(*np,
927  lockdep_is_held(&priv->lock)));
928  /* remove from path/mc list */
929  list_del(&neigh->list);
930  call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
931  } else {
932  np = &neigh->hnext;
933  }
934 
935  }
936  }
937 
938 out_unlock:
939  spin_unlock_irqrestore(&priv->lock, flags);
940 }
941 
942 static void ipoib_reap_neigh(struct work_struct *work)
943 {
944  struct ipoib_dev_priv *priv =
945  container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
946 
947  __ipoib_reap_neigh(priv);
948 
949  if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
950  queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
951  arp_tbl.gc_interval);
952 }
953 
954 
955 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
956  struct net_device *dev)
957 {
958  struct ipoib_neigh *neigh;
959 
960  neigh = kzalloc(sizeof *neigh, GFP_ATOMIC);
961  if (!neigh)
962  return NULL;
963 
964  neigh->dev = dev;
965  memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr));
966  skb_queue_head_init(&neigh->queue);
967  INIT_LIST_HEAD(&neigh->list);
968  ipoib_cm_set(neigh, NULL);
969  /* one ref on behalf of the caller */
970  atomic_set(&neigh->refcnt, 1);
971 
972  return neigh;
973 }
974 
976  struct net_device *dev)
977 {
978  struct ipoib_dev_priv *priv = netdev_priv(dev);
979  struct ipoib_neigh_table *ntbl = &priv->ntbl;
980  struct ipoib_neigh_hash *htbl;
981  struct ipoib_neigh *neigh;
982  u32 hash_val;
983 
984  htbl = rcu_dereference_protected(ntbl->htbl,
985  lockdep_is_held(&priv->lock));
986  if (!htbl) {
987  neigh = NULL;
988  goto out_unlock;
989  }
990 
991  /* need to add a new neigh, but maybe some other thread succeeded?
992  * recalc hash, maybe hash resize took place so we do a search
993  */
994  hash_val = ipoib_addr_hash(htbl, daddr);
995  for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
996  lockdep_is_held(&priv->lock));
997  neigh != NULL;
998  neigh = rcu_dereference_protected(neigh->hnext,
999  lockdep_is_held(&priv->lock))) {
1000  if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1001  /* found, take one ref on behalf of the caller */
1002  if (!atomic_inc_not_zero(&neigh->refcnt)) {
1003  /* deleted */
1004  neigh = NULL;
1005  break;
1006  }
1007  neigh->alive = jiffies;
1008  goto out_unlock;
1009  }
1010  }
1011 
1012  neigh = ipoib_neigh_ctor(daddr, dev);
1013  if (!neigh)
1014  goto out_unlock;
1015 
1016  /* one ref on behalf of the hash table */
1017  atomic_inc(&neigh->refcnt);
1018  neigh->alive = jiffies;
1019  /* put in hash */
1020  rcu_assign_pointer(neigh->hnext,
1021  rcu_dereference_protected(htbl->buckets[hash_val],
1022  lockdep_is_held(&priv->lock)));
1023  rcu_assign_pointer(htbl->buckets[hash_val], neigh);
1024  atomic_inc(&ntbl->entries);
1025 
1026 out_unlock:
1027 
1028  return neigh;
1029 }
1030 
1031 void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
1032 {
1033  /* neigh reference count was dropprd to zero */
1034  struct net_device *dev = neigh->dev;
1035  struct ipoib_dev_priv *priv = netdev_priv(dev);
1036  struct sk_buff *skb;
1037  if (neigh->ah)
1038  ipoib_put_ah(neigh->ah);
1039  while ((skb = __skb_dequeue(&neigh->queue))) {
1040  ++dev->stats.tx_dropped;
1041  dev_kfree_skb_any(skb);
1042  }
1043  if (ipoib_cm_get(neigh))
1044  ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
1045  ipoib_dbg(netdev_priv(dev),
1046  "neigh free for %06x %pI6\n",
1047  IPOIB_QPN(neigh->daddr),
1048  neigh->daddr + 4);
1049  kfree(neigh);
1050  if (atomic_dec_and_test(&priv->ntbl.entries)) {
1051  if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags))
1052  complete(&priv->ntbl.flushed);
1053  }
1054 }
1055 
1056 static void ipoib_neigh_reclaim(struct rcu_head *rp)
1057 {
1058  /* Called as a result of removal from hash table */
1059  struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
1060  /* note TX context may hold another ref */
1061  ipoib_neigh_put(neigh);
1062 }
1063 
1064 void ipoib_neigh_free(struct ipoib_neigh *neigh)
1065 {
1066  struct net_device *dev = neigh->dev;
1067  struct ipoib_dev_priv *priv = netdev_priv(dev);
1068  struct ipoib_neigh_table *ntbl = &priv->ntbl;
1069  struct ipoib_neigh_hash *htbl;
1070  struct ipoib_neigh __rcu **np;
1071  struct ipoib_neigh *n;
1072  u32 hash_val;
1073 
1074  htbl = rcu_dereference_protected(ntbl->htbl,
1075  lockdep_is_held(&priv->lock));
1076  if (!htbl)
1077  return;
1078 
1079  hash_val = ipoib_addr_hash(htbl, neigh->daddr);
1080  np = &htbl->buckets[hash_val];
1081  for (n = rcu_dereference_protected(*np,
1082  lockdep_is_held(&priv->lock));
1083  n != NULL;
1084  n = rcu_dereference_protected(*np,
1085  lockdep_is_held(&priv->lock))) {
1086  if (n == neigh) {
1087  /* found */
1088  rcu_assign_pointer(*np,
1090  lockdep_is_held(&priv->lock)));
1091  call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1092  return;
1093  } else {
1094  np = &n->hnext;
1095  }
1096  }
1097 }
1098 
1099 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1100 {
1101  struct ipoib_neigh_table *ntbl = &priv->ntbl;
1102  struct ipoib_neigh_hash *htbl;
1103  struct ipoib_neigh **buckets;
1104  u32 size;
1105 
1107  ntbl->htbl = NULL;
1108  htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
1109  if (!htbl)
1110  return -ENOMEM;
1112  size = roundup_pow_of_two(arp_tbl.gc_thresh3);
1113  buckets = kzalloc(size * sizeof(*buckets), GFP_KERNEL);
1114  if (!buckets) {
1115  kfree(htbl);
1116  return -ENOMEM;
1117  }
1118  htbl->size = size;
1119  htbl->mask = (size - 1);
1120  htbl->buckets = buckets;
1121  ntbl->htbl = htbl;
1122  htbl->ntbl = ntbl;
1123  atomic_set(&ntbl->entries, 0);
1124 
1125  /* start garbage collection */
1127  queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
1128  arp_tbl.gc_interval);
1129 
1130  return 0;
1131 }
1132 
1133 static void neigh_hash_free_rcu(struct rcu_head *head)
1134 {
1135  struct ipoib_neigh_hash *htbl = container_of(head,
1136  struct ipoib_neigh_hash,
1137  rcu);
1138  struct ipoib_neigh __rcu **buckets = htbl->buckets;
1139  struct ipoib_neigh_table *ntbl = htbl->ntbl;
1140 
1141  kfree(buckets);
1142  kfree(htbl);
1143  complete(&ntbl->deleted);
1144 }
1145 
1146 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
1147 {
1148  struct ipoib_dev_priv *priv = netdev_priv(dev);
1149  struct ipoib_neigh_table *ntbl = &priv->ntbl;
1150  struct ipoib_neigh_hash *htbl;
1151  unsigned long flags;
1152  int i;
1153 
1154  /* remove all neigh connected to a given path or mcast */
1155  spin_lock_irqsave(&priv->lock, flags);
1156 
1157  htbl = rcu_dereference_protected(ntbl->htbl,
1158  lockdep_is_held(&priv->lock));
1159 
1160  if (!htbl)
1161  goto out_unlock;
1162 
1163  for (i = 0; i < htbl->size; i++) {
1164  struct ipoib_neigh *neigh;
1165  struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1166 
1167  while ((neigh = rcu_dereference_protected(*np,
1168  lockdep_is_held(&priv->lock))) != NULL) {
1169  /* delete neighs belong to this parent */
1170  if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
1171  rcu_assign_pointer(*np,
1173  lockdep_is_held(&priv->lock)));
1174  /* remove from parent list */
1175  list_del(&neigh->list);
1176  call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1177  } else {
1178  np = &neigh->hnext;
1179  }
1180 
1181  }
1182  }
1183 out_unlock:
1184  spin_unlock_irqrestore(&priv->lock, flags);
1185 }
1186 
1187 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
1188 {
1189  struct ipoib_neigh_table *ntbl = &priv->ntbl;
1190  struct ipoib_neigh_hash *htbl;
1191  unsigned long flags;
1192  int i, wait_flushed = 0;
1193 
1194  init_completion(&priv->ntbl.flushed);
1195 
1196  spin_lock_irqsave(&priv->lock, flags);
1197 
1198  htbl = rcu_dereference_protected(ntbl->htbl,
1199  lockdep_is_held(&priv->lock));
1200  if (!htbl)
1201  goto out_unlock;
1202 
1203  wait_flushed = atomic_read(&priv->ntbl.entries);
1204  if (!wait_flushed)
1205  goto free_htbl;
1206 
1207  for (i = 0; i < htbl->size; i++) {
1208  struct ipoib_neigh *neigh;
1209  struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1210 
1211  while ((neigh = rcu_dereference_protected(*np,
1212  lockdep_is_held(&priv->lock))) != NULL) {
1213  rcu_assign_pointer(*np,
1215  lockdep_is_held(&priv->lock)));
1216  /* remove from path/mc list */
1217  list_del(&neigh->list);
1218  call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1219  }
1220  }
1221 
1222 free_htbl:
1223  rcu_assign_pointer(ntbl->htbl, NULL);
1224  call_rcu(&htbl->rcu, neigh_hash_free_rcu);
1225 
1226 out_unlock:
1227  spin_unlock_irqrestore(&priv->lock, flags);
1228  if (wait_flushed)
1229  wait_for_completion(&priv->ntbl.flushed);
1230 }
1231 
1232 static void ipoib_neigh_hash_uninit(struct net_device *dev)
1233 {
1234  struct ipoib_dev_priv *priv = netdev_priv(dev);
1235  int stopped;
1236 
1237  ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
1238  init_completion(&priv->ntbl.deleted);
1240 
1241  /* Stop GC if called at init fail need to cancel work */
1242  stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
1243  if (!stopped)
1245 
1246  ipoib_flush_neighs(priv);
1247 
1248  wait_for_completion(&priv->ntbl.deleted);
1249 }
1250 
1251 
1252 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
1253 {
1254  struct ipoib_dev_priv *priv = netdev_priv(dev);
1255 
1256  if (ipoib_neigh_hash_init(priv) < 0)
1257  goto out;
1258  /* Allocate RX/TX "rings" to hold queued skbs */
1259  priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
1260  GFP_KERNEL);
1261  if (!priv->rx_ring) {
1262  printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
1263  ca->name, ipoib_recvq_size);
1264  goto out_neigh_hash_cleanup;
1265  }
1266 
1267  priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
1268  if (!priv->tx_ring) {
1269  printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
1270  ca->name, ipoib_sendq_size);
1271  goto out_rx_ring_cleanup;
1272  }
1273 
1274  /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
1275 
1276  if (ipoib_ib_dev_init(dev, ca, port))
1277  goto out_tx_ring_cleanup;
1278 
1279  return 0;
1280 
1281 out_tx_ring_cleanup:
1282  vfree(priv->tx_ring);
1283 
1284 out_rx_ring_cleanup:
1285  kfree(priv->rx_ring);
1286 
1287 out_neigh_hash_cleanup:
1288  ipoib_neigh_hash_uninit(dev);
1289 out:
1290  return -ENOMEM;
1291 }
1292 
1294 {
1295  struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
1296  LIST_HEAD(head);
1297 
1298  ASSERT_RTNL();
1299 
1301 
1302  /* Delete any child interfaces first */
1303  list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
1304  /* Stop GC on child */
1305  set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
1308  }
1310 
1311  ipoib_ib_dev_cleanup(dev);
1312 
1313  kfree(priv->rx_ring);
1314  vfree(priv->tx_ring);
1315 
1316  priv->rx_ring = NULL;
1317  priv->tx_ring = NULL;
1318 
1319  ipoib_neigh_hash_uninit(dev);
1320 }
1321 
1322 static const struct header_ops ipoib_header_ops = {
1323  .create = ipoib_hard_header,
1324 };
1325 
1326 static const struct net_device_ops ipoib_netdev_ops = {
1327  .ndo_uninit = ipoib_uninit,
1328  .ndo_open = ipoib_open,
1329  .ndo_stop = ipoib_stop,
1330  .ndo_change_mtu = ipoib_change_mtu,
1331  .ndo_fix_features = ipoib_fix_features,
1332  .ndo_start_xmit = ipoib_start_xmit,
1333  .ndo_tx_timeout = ipoib_timeout,
1334  .ndo_set_rx_mode = ipoib_set_mcast_list,
1335 };
1336 
1337 void ipoib_setup(struct net_device *dev)
1338 {
1339  struct ipoib_dev_priv *priv = netdev_priv(dev);
1340 
1341  dev->netdev_ops = &ipoib_netdev_ops;
1342  dev->header_ops = &ipoib_header_ops;
1343 
1344  ipoib_set_ethtool_ops(dev);
1345 
1346  netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
1347 
1348  dev->watchdog_timeo = HZ;
1349 
1350  dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1351 
1353  dev->addr_len = INFINIBAND_ALEN;
1354  dev->type = ARPHRD_INFINIBAND;
1355  dev->tx_queue_len = ipoib_sendq_size * 2;
1357  NETIF_F_HIGHDMA);
1359 
1360  memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
1361 
1362  netif_carrier_off(dev);
1363 
1364  priv->dev = dev;
1365 
1366  spin_lock_init(&priv->lock);
1367 
1368  mutex_init(&priv->vlan_mutex);
1369 
1370  INIT_LIST_HEAD(&priv->path_list);
1371  INIT_LIST_HEAD(&priv->child_intfs);
1372  INIT_LIST_HEAD(&priv->dead_ahs);
1373  INIT_LIST_HEAD(&priv->multicast_list);
1374 
1383  INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
1384 }
1385 
1387 {
1388  struct net_device *dev;
1389 
1390  dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
1391  ipoib_setup);
1392  if (!dev)
1393  return NULL;
1394 
1395  return netdev_priv(dev);
1396 }
1397 
1398 static ssize_t show_pkey(struct device *dev,
1399  struct device_attribute *attr, char *buf)
1400 {
1401  struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1402 
1403  return sprintf(buf, "0x%04x\n", priv->pkey);
1404 }
1405 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1406 
1407 static ssize_t show_umcast(struct device *dev,
1408  struct device_attribute *attr, char *buf)
1409 {
1410  struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1411 
1412  return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
1413 }
1414 
1415 void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
1416 {
1417  struct ipoib_dev_priv *priv = netdev_priv(ndev);
1418 
1419  if (umcast_val > 0) {
1420  set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1421  ipoib_warn(priv, "ignoring multicast groups joined directly "
1422  "by userspace\n");
1423  } else
1425 }
1426 
1427 static ssize_t set_umcast(struct device *dev,
1428  struct device_attribute *attr,
1429  const char *buf, size_t count)
1430 {
1431  unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1432 
1433  ipoib_set_umcast(to_net_dev(dev), umcast_val);
1434 
1435  return count;
1436 }
1437 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
1438 
1440 {
1441  return device_create_file(&dev->dev, &dev_attr_umcast);
1442 }
1443 
1444 static ssize_t create_child(struct device *dev,
1445  struct device_attribute *attr,
1446  const char *buf, size_t count)
1447 {
1448  int pkey;
1449  int ret;
1450 
1451  if (sscanf(buf, "%i", &pkey) != 1)
1452  return -EINVAL;
1453 
1454  if (pkey < 0 || pkey > 0xffff)
1455  return -EINVAL;
1456 
1457  /*
1458  * Set the full membership bit, so that we join the right
1459  * broadcast group, etc.
1460  */
1461  pkey |= 0x8000;
1462 
1463  ret = ipoib_vlan_add(to_net_dev(dev), pkey);
1464 
1465  return ret ? ret : count;
1466 }
1467 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
1468 
1469 static ssize_t delete_child(struct device *dev,
1470  struct device_attribute *attr,
1471  const char *buf, size_t count)
1472 {
1473  int pkey;
1474  int ret;
1475 
1476  if (sscanf(buf, "%i", &pkey) != 1)
1477  return -EINVAL;
1478 
1479  if (pkey < 0 || pkey > 0xffff)
1480  return -EINVAL;
1481 
1482  ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
1483 
1484  return ret ? ret : count;
1485 
1486 }
1487 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
1488 
1490 {
1491  return device_create_file(&dev->dev, &dev_attr_pkey);
1492 }
1493 
1494 int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1495 {
1496  struct ib_device_attr *device_attr;
1497  int result = -ENOMEM;
1498 
1499  device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
1500  if (!device_attr) {
1501  printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1502  hca->name, sizeof *device_attr);
1503  return result;
1504  }
1505 
1506  result = ib_query_device(hca, device_attr);
1507  if (result) {
1508  printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1509  hca->name, result);
1510  kfree(device_attr);
1511  return result;
1512  }
1513  priv->hca_caps = device_attr->device_cap_flags;
1514 
1515  kfree(device_attr);
1516 
1517  if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1518  priv->dev->hw_features = NETIF_F_SG |
1520 
1521  if (priv->hca_caps & IB_DEVICE_UD_TSO)
1522  priv->dev->hw_features |= NETIF_F_TSO;
1523 
1524  priv->dev->features |= priv->dev->hw_features;
1525  }
1526 
1527  return 0;
1528 }
1529 
1530 static struct net_device *ipoib_add_port(const char *format,
1531  struct ib_device *hca, u8 port)
1532 {
1533  struct ipoib_dev_priv *priv;
1534  struct ib_port_attr attr;
1535  int result = -ENOMEM;
1536 
1537  priv = ipoib_intf_alloc(format);
1538  if (!priv)
1539  goto alloc_mem_failed;
1540 
1541  SET_NETDEV_DEV(priv->dev, hca->dma_device);
1542  priv->dev->dev_id = port - 1;
1543 
1544  if (!ib_query_port(hca, port, &attr))
1545  priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1546  else {
1547  printk(KERN_WARNING "%s: ib_query_port %d failed\n",
1548  hca->name, port);
1549  goto device_init_failed;
1550  }
1551 
1552  /* MTU will be reset when mcast join happens */
1553  priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
1554  priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
1555 
1556  priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
1557 
1558  result = ib_query_pkey(hca, port, 0, &priv->pkey);
1559  if (result) {
1560  printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
1561  hca->name, port, result);
1562  goto device_init_failed;
1563  }
1564 
1565  if (ipoib_set_dev_features(priv, hca))
1566  goto device_init_failed;
1567 
1568  /*
1569  * Set the full membership bit, so that we join the right
1570  * broadcast group, etc.
1571  */
1572  priv->pkey |= 0x8000;
1573 
1574  priv->dev->broadcast[8] = priv->pkey >> 8;
1575  priv->dev->broadcast[9] = priv->pkey & 0xff;
1576 
1577  result = ib_query_gid(hca, port, 0, &priv->local_gid);
1578  if (result) {
1579  printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
1580  hca->name, port, result);
1581  goto device_init_failed;
1582  } else
1583  memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1584 
1585  result = ipoib_dev_init(priv->dev, hca, port);
1586  if (result < 0) {
1587  printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
1588  hca->name, port, result);
1589  goto device_init_failed;
1590  }
1591 
1593  priv->ca, ipoib_event);
1594  result = ib_register_event_handler(&priv->event_handler);
1595  if (result < 0) {
1596  printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1597  "port %d (ret = %d)\n",
1598  hca->name, port, result);
1599  goto event_failed;
1600  }
1601 
1602  result = register_netdev(priv->dev);
1603  if (result) {
1604  printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
1605  hca->name, port, result);
1606  goto register_failed;
1607  }
1608 
1610 
1611  if (ipoib_cm_add_mode_attr(priv->dev))
1612  goto sysfs_failed;
1613  if (ipoib_add_pkey_attr(priv->dev))
1614  goto sysfs_failed;
1615  if (ipoib_add_umcast_attr(priv->dev))
1616  goto sysfs_failed;
1617  if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
1618  goto sysfs_failed;
1619  if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
1620  goto sysfs_failed;
1621 
1622  return priv->dev;
1623 
1624 sysfs_failed:
1626  unregister_netdev(priv->dev);
1627 
1628 register_failed:
1630  /* Stop GC if started before flush */
1633  flush_workqueue(ipoib_workqueue);
1634 
1635 event_failed:
1636  ipoib_dev_cleanup(priv->dev);
1637 
1638 device_init_failed:
1639  free_netdev(priv->dev);
1640 
1641 alloc_mem_failed:
1642  return ERR_PTR(result);
1643 }
1644 
1645 static void ipoib_add_one(struct ib_device *device)
1646 {
1647  struct list_head *dev_list;
1648  struct net_device *dev;
1649  struct ipoib_dev_priv *priv;
1650  int s, e, p;
1651 
1653  return;
1654 
1655  dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1656  if (!dev_list)
1657  return;
1658 
1659  INIT_LIST_HEAD(dev_list);
1660 
1661  if (device->node_type == RDMA_NODE_IB_SWITCH) {
1662  s = 0;
1663  e = 0;
1664  } else {
1665  s = 1;
1666  e = device->phys_port_cnt;
1667  }
1668 
1669  for (p = s; p <= e; ++p) {
1671  continue;
1672  dev = ipoib_add_port("ib%d", device, p);
1673  if (!IS_ERR(dev)) {
1674  priv = netdev_priv(dev);
1675  list_add_tail(&priv->list, dev_list);
1676  }
1677  }
1678 
1679  ib_set_client_data(device, &ipoib_client, dev_list);
1680 }
1681 
1682 static void ipoib_remove_one(struct ib_device *device)
1683 {
1684  struct ipoib_dev_priv *priv, *tmp;
1685  struct list_head *dev_list;
1686 
1688  return;
1689 
1690  dev_list = ib_get_client_data(device, &ipoib_client);
1691 
1692  list_for_each_entry_safe(priv, tmp, dev_list, list) {
1694 
1695  rtnl_lock();
1696  dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
1697  rtnl_unlock();
1698 
1699  /* Stop GC */
1702  flush_workqueue(ipoib_workqueue);
1703 
1704  unregister_netdev(priv->dev);
1705  free_netdev(priv->dev);
1706  }
1707 
1708  kfree(dev_list);
1709 }
1710 
1711 static int __init ipoib_init_module(void)
1712 {
1713  int ret;
1714 
1715  ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
1716  ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
1717  ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
1718 
1719  ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
1720  ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
1721  ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
1722 #ifdef CONFIG_INFINIBAND_IPOIB_CM
1724 #endif
1725 
1726  /*
1727  * When copying small received packets, we only copy from the
1728  * linear data part of the SKB, so we rely on this condition.
1729  */
1731 
1732  ret = ipoib_register_debugfs();
1733  if (ret)
1734  return ret;
1735 
1736  /*
1737  * We create our own workqueue mainly because we want to be
1738  * able to flush it when devices are being removed. We can't
1739  * use schedule_work()/flush_scheduled_work() because both
1740  * unregister_netdev() and linkwatch_event take the rtnl lock,
1741  * so flush_scheduled_work() can deadlock during device
1742  * removal.
1743  */
1744  ipoib_workqueue = create_singlethread_workqueue("ipoib");
1745  if (!ipoib_workqueue) {
1746  ret = -ENOMEM;
1747  goto err_fs;
1748  }
1749 
1751 
1752  ret = ib_register_client(&ipoib_client);
1753  if (ret)
1754  goto err_sa;
1755 
1756  ret = ipoib_netlink_init();
1757  if (ret)
1758  goto err_client;
1759 
1760  return 0;
1761 
1762 err_client:
1763  ib_unregister_client(&ipoib_client);
1764 
1765 err_sa:
1767  destroy_workqueue(ipoib_workqueue);
1768 
1769 err_fs:
1771 
1772  return ret;
1773 }
1774 
1775 static void __exit ipoib_cleanup_module(void)
1776 {
1778  ib_unregister_client(&ipoib_client);
1781  destroy_workqueue(ipoib_workqueue);
1782 }
1783 
1784 module_init(ipoib_init_module);
1785 module_exit(ipoib_cleanup_module);