Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cxgb3_offload.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses. You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  * Redistribution and use in source and binary forms, with or
11  * without modification, are permitted provided that the following
12  * conditions are met:
13  *
14  * - Redistributions of source code must retain the above
15  * copyright notice, this list of conditions and the following
16  * disclaimer.
17  *
18  * - Redistributions in binary form must reproduce the above
19  * copyright notice, this list of conditions and the following
20  * disclaimer in the documentation and/or other materials
21  * provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/list.h>
34 #include <linux/slab.h>
35 #include <net/neighbour.h>
36 #include <linux/notifier.h>
37 #include <linux/atomic.h>
38 #include <linux/proc_fs.h>
39 #include <linux/if_vlan.h>
40 #include <net/netevent.h>
41 #include <linux/highmem.h>
42 #include <linux/vmalloc.h>
43 #include <linux/export.h>
44 
45 #include "common.h"
46 #include "regs.h"
47 #include "cxgb3_ioctl.h"
48 #include "cxgb3_ctl_defs.h"
49 #include "cxgb3_defs.h"
50 #include "l2t.h"
51 #include "firmware_exports.h"
52 #include "cxgb3_offload.h"
53 
54 static LIST_HEAD(client_list);
55 static LIST_HEAD(ofld_dev_list);
56 static DEFINE_MUTEX(cxgb3_db_lock);
57 
58 static DEFINE_RWLOCK(adapter_list_lock);
59 static LIST_HEAD(adapter_list);
60 
61 static const unsigned int MAX_ATIDS = 64 * 1024;
62 static const unsigned int ATID_BASE = 0x10000;
63 
64 static void cxgb_neigh_update(struct neighbour *neigh);
65 static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
66  struct dst_entry *new, struct neighbour *new_neigh,
67  const void *daddr);
68 
69 static inline int offload_activated(struct t3cdev *tdev)
70 {
71  const struct adapter *adapter = tdev2adap(tdev);
72 
73  return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
74 }
75 
84 {
85  struct t3cdev *tdev;
86 
87  mutex_lock(&cxgb3_db_lock);
88  list_add_tail(&client->client_list, &client_list);
89 
90  if (client->add) {
92  if (offload_activated(tdev))
93  client->add(tdev);
94  }
95  }
96  mutex_unlock(&cxgb3_db_lock);
97 }
98 
100 
109 {
110  struct t3cdev *tdev;
111 
112  mutex_lock(&cxgb3_db_lock);
113  list_del(&client->client_list);
114 
115  if (client->remove) {
117  if (offload_activated(tdev))
118  client->remove(tdev);
119  }
120  }
121  mutex_unlock(&cxgb3_db_lock);
122 }
123 
125 
132 void cxgb3_add_clients(struct t3cdev *tdev)
133 {
134  struct cxgb3_client *client;
135 
136  mutex_lock(&cxgb3_db_lock);
138  if (client->add)
139  client->add(tdev);
140  }
141  mutex_unlock(&cxgb3_db_lock);
142 }
143 
151 void cxgb3_remove_clients(struct t3cdev *tdev)
152 {
153  struct cxgb3_client *client;
154 
155  mutex_lock(&cxgb3_db_lock);
157  if (client->remove)
158  client->remove(tdev);
159  }
160  mutex_unlock(&cxgb3_db_lock);
161 }
162 
164 {
165  struct cxgb3_client *client;
166 
167  mutex_lock(&cxgb3_db_lock);
169  if (client->event_handler)
170  client->event_handler(tdev, event, port);
171  }
172  mutex_unlock(&cxgb3_db_lock);
173 }
174 
175 static struct net_device *get_iff_from_mac(struct adapter *adapter,
176  const unsigned char *mac,
177  unsigned int vlan)
178 {
179  int i;
180 
181  for_each_port(adapter, i) {
182  struct net_device *dev = adapter->port[i];
183 
184  if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
185  if (vlan && vlan != VLAN_VID_MASK) {
186  rcu_read_lock();
187  dev = __vlan_find_dev_deep(dev, vlan);
188  rcu_read_unlock();
189  } else if (netif_is_bond_slave(dev)) {
190  while (dev->master)
191  dev = dev->master;
192  }
193  return dev;
194  }
195  }
196  return NULL;
197 }
198 
199 static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
200  void *data)
201 {
202  int i;
203  int ret = 0;
204  unsigned int val = 0;
205  struct ulp_iscsi_info *uiip = data;
206 
207  switch (req) {
209  uiip->pdev = adapter->pdev;
210  uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
211  uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
212  uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
213 
214  val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ);
215  for (i = 0; i < 4; i++, val >>= 8)
216  uiip->pgsz_factor[i] = val & 0xFF;
217 
218  val = t3_read_reg(adapter, A_TP_PARA_REG7);
219  uiip->max_txsz =
220  uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0,
222  /*
223  * On tx, the iscsi pdu has to be <= tx page size and has to
224  * fit into the Tx PM FIFO.
225  */
226  val = min(adapter->params.tp.tx_pg_size,
227  t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
228  uiip->max_txsz = min(val, uiip->max_txsz);
229 
230  /* set MaxRxData to 16224 */
231  val = t3_read_reg(adapter, A_TP_PARA_REG2);
232  if ((val >> S_MAXRXDATA) != 0x3f60) {
234  val |= V_MAXRXDATA(0x3f60);
236  "%s, iscsi set MaxRxData to 16224 (0x%x).\n",
237  adapter->name, val);
238  t3_write_reg(adapter, A_TP_PARA_REG2, val);
239  }
240 
241  /*
242  * on rx, the iscsi pdu has to be < rx page size and the
243  * the max rx data length programmed in TP
244  */
245  val = min(adapter->params.tp.rx_pg_size,
246  ((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
248  uiip->max_rxsz = min(val, uiip->max_rxsz);
249  break;
251  t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
252  /* program the ddp page sizes */
253  for (i = 0; i < 4; i++)
254  val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
255  if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) {
257  "%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n",
258  adapter->name, val, uiip->pgsz_factor[0],
259  uiip->pgsz_factor[1], uiip->pgsz_factor[2],
260  uiip->pgsz_factor[3]);
261  t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val);
262  }
263  break;
264  default:
265  ret = -EOPNOTSUPP;
266  }
267  return ret;
268 }
269 
270 /* Response queue used for RDMA events. */
271 #define ASYNC_NOTIF_RSPQ 0
272 
273 static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
274 {
275  int ret = 0;
276 
277  switch (req) {
278  case RDMA_GET_PARAMS: {
279  struct rdma_info *rdma = data;
280  struct pci_dev *pdev = adapter->pdev;
281 
282  rdma->udbell_physbase = pci_resource_start(pdev, 2);
283  rdma->udbell_len = pci_resource_len(pdev, 2);
284  rdma->tpt_base =
285  t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
286  rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
287  rdma->pbl_base =
288  t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
289  rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
290  rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
291  rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
292  rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL;
293  rdma->pdev = pdev;
294  break;
295  }
296  case RDMA_CQ_OP:{
297  unsigned long flags;
298  struct rdma_cq_op *rdma = data;
299 
300  /* may be called in any context */
301  spin_lock_irqsave(&adapter->sge.reg_lock, flags);
302  ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op,
303  rdma->credits);
304  spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
305  break;
306  }
307  case RDMA_GET_MEM:{
308  struct ch_mem_range *t = data;
309  struct mc7 *mem;
310 
311  if ((t->addr & 7) || (t->len & 7))
312  return -EINVAL;
313  if (t->mem_id == MEM_CM)
314  mem = &adapter->cm;
315  else if (t->mem_id == MEM_PMRX)
316  mem = &adapter->pmrx;
317  else if (t->mem_id == MEM_PMTX)
318  mem = &adapter->pmtx;
319  else
320  return -EINVAL;
321 
322  ret =
323  t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
324  (u64 *) t->buf);
325  if (ret)
326  return ret;
327  break;
328  }
329  case RDMA_CQ_SETUP:{
330  struct rdma_cq_setup *rdma = data;
331 
332  spin_lock_irq(&adapter->sge.reg_lock);
333  ret =
334  t3_sge_init_cqcntxt(adapter, rdma->id,
335  rdma->base_addr, rdma->size,
337  rdma->ovfl_mode, rdma->credits,
338  rdma->credit_thres);
339  spin_unlock_irq(&adapter->sge.reg_lock);
340  break;
341  }
342  case RDMA_CQ_DISABLE:
343  spin_lock_irq(&adapter->sge.reg_lock);
344  ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
345  spin_unlock_irq(&adapter->sge.reg_lock);
346  break;
347  case RDMA_CTRL_QP_SETUP:{
348  struct rdma_ctrlqp_setup *rdma = data;
349 
350  spin_lock_irq(&adapter->sge.reg_lock);
351  ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
354  rdma->base_addr, rdma->size,
355  FW_RI_TID_START, 1, 0);
356  spin_unlock_irq(&adapter->sge.reg_lock);
357  break;
358  }
359  case RDMA_GET_MIB: {
360  spin_lock(&adapter->stats_lock);
361  t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data);
362  spin_unlock(&adapter->stats_lock);
363  break;
364  }
365  default:
366  ret = -EOPNOTSUPP;
367  }
368  return ret;
369 }
370 
371 static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
372 {
373  struct adapter *adapter = tdev2adap(tdev);
374  struct tid_range *tid;
375  struct mtutab *mtup;
376  struct iff_mac *iffmacp;
377  struct ddp_params *ddpp;
378  struct adap_ports *ports;
379  struct ofld_page_info *rx_page_info;
380  struct tp_params *tp = &adapter->params.tp;
381  int i;
382 
383  switch (req) {
385  *(unsigned int *)data = FW_WR_NUM;
386  break;
387  case GET_WR_LEN:
388  *(unsigned int *)data = WR_FLITS;
389  break;
390  case GET_TX_MAX_CHUNK:
391  *(unsigned int *)data = 1 << 20; /* 1MB */
392  break;
393  case GET_TID_RANGE:
394  tid = data;
395  tid->num = t3_mc5_size(&adapter->mc5) -
396  adapter->params.mc5.nroutes -
397  adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
398  tid->base = 0;
399  break;
400  case GET_STID_RANGE:
401  tid = data;
402  tid->num = adapter->params.mc5.nservers;
403  tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
404  adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
405  break;
406  case GET_L2T_CAPACITY:
407  *(unsigned int *)data = 2048;
408  break;
409  case GET_MTUS:
410  mtup = data;
411  mtup->size = NMTUS;
412  mtup->mtus = adapter->params.mtus;
413  break;
414  case GET_IFF_FROM_MAC:
415  iffmacp = data;
416  iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
417  iffmacp->vlan_tag &
418  VLAN_VID_MASK);
419  break;
420  case GET_DDP_PARAMS:
421  ddpp = data;
422  ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
423  ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
424  ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
425  break;
426  case GET_PORTS:
427  ports = data;
428  ports->nports = adapter->params.nports;
429  for_each_port(adapter, i)
430  ports->lldevs[i] = adapter->port[i];
431  break;
434  if (!offload_running(adapter))
435  return -EAGAIN;
436  return cxgb_ulp_iscsi_ctl(adapter, req, data);
437  case RDMA_GET_PARAMS:
438  case RDMA_CQ_OP:
439  case RDMA_CQ_SETUP:
440  case RDMA_CQ_DISABLE:
441  case RDMA_CTRL_QP_SETUP:
442  case RDMA_GET_MEM:
443  case RDMA_GET_MIB:
444  if (!offload_running(adapter))
445  return -EAGAIN;
446  return cxgb_rdma_ctl(adapter, req, data);
447  case GET_RX_PAGE_INFO:
448  rx_page_info = data;
449  rx_page_info->page_size = tp->rx_pg_size;
450  rx_page_info->num = tp->rx_num_pgs;
451  break;
452  case GET_ISCSI_IPV4ADDR: {
453  struct iscsi_ipv4addr *p = data;
454  struct port_info *pi = netdev_priv(p->dev);
455  p->ipv4addr = pi->iscsi_ipv4addr;
456  break;
457  }
458  case GET_EMBEDDED_INFO: {
459  struct ch_embedded_info *e = data;
460 
461  spin_lock(&adapter->stats_lock);
462  t3_get_fw_version(adapter, &e->fw_vers);
463  t3_get_tp_version(adapter, &e->tp_vers);
464  spin_unlock(&adapter->stats_lock);
465  break;
466  }
467  default:
468  return -EOPNOTSUPP;
469  }
470  return 0;
471 }
472 
473 /*
474  * Dummy handler for Rx offload packets in case we get an offload packet before
475  * proper processing is setup. This complains and drops the packet as it isn't
476  * normal to get offload packets at this stage.
477  */
478 static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
479  int n)
480 {
481  while (n--)
482  dev_kfree_skb_any(skbs[n]);
483  return 0;
484 }
485 
486 static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
487 {
488 }
489 
490 void cxgb3_set_dummy_ops(struct t3cdev *dev)
491 {
492  dev->recv = rx_offload_blackhole;
493  dev->neigh_update = dummy_neigh_update;
494 }
495 
496 /*
497  * Free an active-open TID.
498  */
499 void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
500 {
501  struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
502  union active_open_entry *p = atid2entry(t, atid);
503  void *ctx = p->t3c_tid.ctx;
504 
505  spin_lock_bh(&t->atid_lock);
506  p->next = t->afree;
507  t->afree = p;
508  t->atids_in_use--;
509  spin_unlock_bh(&t->atid_lock);
510 
511  return ctx;
512 }
513 
515 
516 /*
517  * Free a server TID and return it to the free pool.
518  */
519 void cxgb3_free_stid(struct t3cdev *tdev, int stid)
520 {
521  struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
522  union listen_entry *p = stid2entry(t, stid);
523 
524  spin_lock_bh(&t->stid_lock);
525  p->next = t->sfree;
526  t->sfree = p;
527  t->stids_in_use--;
528  spin_unlock_bh(&t->stid_lock);
529 }
530 
532 
533 void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
534  void *ctx, unsigned int tid)
535 {
536  struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
537 
538  t->tid_tab[tid].client = client;
539  t->tid_tab[tid].ctx = ctx;
540  atomic_inc(&t->tids_in_use);
541 }
542 
544 
545 /*
546  * Populate a TID_RELEASE WR. The skb must be already propely sized.
547  */
548 static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
549 {
550  struct cpl_tid_release *req;
551 
553  req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
554  req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
556 }
557 
558 static void t3_process_tid_release_list(struct work_struct *work)
559 {
560  struct t3c_data *td = container_of(work, struct t3c_data,
562  struct sk_buff *skb;
563  struct t3cdev *tdev = td->dev;
564 
565 
566  spin_lock_bh(&td->tid_release_lock);
567  while (td->tid_release_list) {
568  struct t3c_tid_entry *p = td->tid_release_list;
569 
570  td->tid_release_list = p->ctx;
571  spin_unlock_bh(&td->tid_release_lock);
572 
573  skb = alloc_skb(sizeof(struct cpl_tid_release),
574  GFP_KERNEL);
575  if (!skb)
576  skb = td->nofail_skb;
577  if (!skb) {
578  spin_lock_bh(&td->tid_release_lock);
579  p->ctx = (void *)td->tid_release_list;
580  td->tid_release_list = p;
581  break;
582  }
583  mk_tid_release(skb, p - td->tid_maps.tid_tab);
584  cxgb3_ofld_send(tdev, skb);
585  p->ctx = NULL;
586  if (skb == td->nofail_skb)
587  td->nofail_skb =
588  alloc_skb(sizeof(struct cpl_tid_release),
589  GFP_KERNEL);
590  spin_lock_bh(&td->tid_release_lock);
591  }
592  td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1;
593  spin_unlock_bh(&td->tid_release_lock);
594 
595  if (!td->nofail_skb)
596  td->nofail_skb =
597  alloc_skb(sizeof(struct cpl_tid_release),
598  GFP_KERNEL);
599 }
600 
601 /* use ctx as a next pointer in the tid release list */
602 void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
603 {
604  struct t3c_data *td = T3C_DATA(tdev);
605  struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
606 
607  spin_lock_bh(&td->tid_release_lock);
608  p->ctx = (void *)td->tid_release_list;
609  p->client = NULL;
610  td->tid_release_list = p;
611  if (!p->ctx || td->release_list_incomplete)
613  spin_unlock_bh(&td->tid_release_lock);
614 }
615 
617 
618 /*
619  * Remove a tid from the TID table. A client may defer processing its last
620  * CPL message if it is locked at the time it arrives, and while the message
621  * sits in the client's backlog the TID may be reused for another connection.
622  * To handle this we atomically switch the TID association if it still points
623  * to the original client context.
624  */
625 void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
626 {
627  struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
628 
629  BUG_ON(tid >= t->ntids);
630  if (tdev->type == T3A)
631  (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
632  else {
633  struct sk_buff *skb;
634 
635  skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
636  if (likely(skb)) {
637  mk_tid_release(skb, tid);
638  cxgb3_ofld_send(tdev, skb);
639  t->tid_tab[tid].ctx = NULL;
640  } else
641  cxgb3_queue_tid_release(tdev, tid);
642  }
643  atomic_dec(&t->tids_in_use);
644 }
645 
647 
648 int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
649  void *ctx)
650 {
651  int atid = -1;
652  struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
653 
654  spin_lock_bh(&t->atid_lock);
655  if (t->afree &&
657  t->ntids) {
658  union active_open_entry *p = t->afree;
659 
660  atid = (p - t->atid_tab) + t->atid_base;
661  t->afree = p->next;
662  p->t3c_tid.ctx = ctx;
663  p->t3c_tid.client = client;
664  t->atids_in_use++;
665  }
666  spin_unlock_bh(&t->atid_lock);
667  return atid;
668 }
669 
671 
672 int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
673  void *ctx)
674 {
675  int stid = -1;
676  struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
677 
678  spin_lock_bh(&t->stid_lock);
679  if (t->sfree) {
680  union listen_entry *p = t->sfree;
681 
682  stid = (p - t->stid_tab) + t->stid_base;
683  t->sfree = p->next;
684  p->t3c_tid.ctx = ctx;
685  p->t3c_tid.client = client;
686  t->stids_in_use++;
687  }
688  spin_unlock_bh(&t->stid_lock);
689  return stid;
690 }
691 
693 
694 /* Get the t3cdev associated with a net_device */
695 struct t3cdev *dev2t3cdev(struct net_device *dev)
696 {
697  const struct port_info *pi = netdev_priv(dev);
698 
699  return (struct t3cdev *)pi->adapter;
700 }
701 
703 
704 static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
705 {
706  struct cpl_smt_write_rpl *rpl = cplhdr(skb);
707 
708  if (rpl->status != CPL_ERR_NONE)
710  "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
711  rpl->status, GET_TID(rpl));
712 
713  return CPL_RET_BUF_DONE;
714 }
715 
716 static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
717 {
718  struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
719 
720  if (rpl->status != CPL_ERR_NONE)
722  "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
723  rpl->status, GET_TID(rpl));
724 
725  return CPL_RET_BUF_DONE;
726 }
727 
728 static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
729 {
730  struct cpl_rte_write_rpl *rpl = cplhdr(skb);
731 
732  if (rpl->status != CPL_ERR_NONE)
734  "Unexpected RTE_WRITE_RPL status %u for entry %u\n",
735  rpl->status, GET_TID(rpl));
736 
737  return CPL_RET_BUF_DONE;
738 }
739 
740 static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
741 {
742  struct cpl_act_open_rpl *rpl = cplhdr(skb);
743  unsigned int atid = G_TID(ntohl(rpl->atid));
744  struct t3c_tid_entry *t3c_tid;
745 
746  t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
747  if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
748  t3c_tid->client->handlers &&
749  t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
750  return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
751  t3c_tid->
752  ctx);
753  } else {
754  printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
755  dev->name, CPL_ACT_OPEN_RPL);
757  }
758 }
759 
760 static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
761 {
762  union opcode_tid *p = cplhdr(skb);
763  unsigned int stid = G_TID(ntohl(p->opcode_tid));
764  struct t3c_tid_entry *t3c_tid;
765 
766  t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
767  if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
768  t3c_tid->client->handlers[p->opcode]) {
769  return t3c_tid->client->handlers[p->opcode] (dev, skb,
770  t3c_tid->ctx);
771  } else {
772  printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
773  dev->name, p->opcode);
775  }
776 }
777 
778 static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
779 {
780  union opcode_tid *p = cplhdr(skb);
781  unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
782  struct t3c_tid_entry *t3c_tid;
783 
784  t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
785  if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
786  t3c_tid->client->handlers[p->opcode]) {
787  return t3c_tid->client->handlers[p->opcode]
788  (dev, skb, t3c_tid->ctx);
789  } else {
790  printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
791  dev->name, p->opcode);
793  }
794 }
795 
796 static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
797 {
798  struct cpl_pass_accept_req *req = cplhdr(skb);
799  unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
800  struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
801  struct t3c_tid_entry *t3c_tid;
802  unsigned int tid = GET_TID(req);
803 
804  if (unlikely(tid >= t->ntids)) {
805  printk("%s: passive open TID %u too large\n",
806  dev->name, tid);
807  t3_fatal_err(tdev2adap(dev));
808  return CPL_RET_BUF_DONE;
809  }
810 
811  t3c_tid = lookup_stid(t, stid);
812  if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
813  t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
814  return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
815  (dev, skb, t3c_tid->ctx);
816  } else {
817  printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
818  dev->name, CPL_PASS_ACCEPT_REQ);
820  }
821 }
822 
823 /*
824  * Returns an sk_buff for a reply CPL message of size len. If the input
825  * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
826  * is allocated. The input skb must be of size at least len. Note that this
827  * operation does not destroy the original skb data even if it decides to reuse
828  * the buffer.
829  */
830 static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
831  gfp_t gfp)
832 {
833  if (likely(!skb_cloned(skb))) {
834  BUG_ON(skb->len < len);
835  __skb_trim(skb, len);
836  skb_get(skb);
837  } else {
838  skb = alloc_skb(len, gfp);
839  if (skb)
840  __skb_put(skb, len);
841  }
842  return skb;
843 }
844 
845 static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
846 {
847  union opcode_tid *p = cplhdr(skb);
848  unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
849  struct t3c_tid_entry *t3c_tid;
850 
851  t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
852  if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
853  t3c_tid->client->handlers[p->opcode]) {
854  return t3c_tid->client->handlers[p->opcode]
855  (dev, skb, t3c_tid->ctx);
856  } else {
857  struct cpl_abort_req_rss *req = cplhdr(skb);
858  struct cpl_abort_rpl *rpl;
859  struct sk_buff *reply_skb;
860  unsigned int tid = GET_TID(req);
861  u8 cmd = req->status;
862 
863  if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
865  goto out;
866 
867  reply_skb = cxgb3_get_cpl_reply_skb(skb,
868  sizeof(struct
869  cpl_abort_rpl),
870  GFP_ATOMIC);
871 
872  if (!reply_skb) {
873  printk("do_abort_req_rss: couldn't get skb!\n");
874  goto out;
875  }
876  reply_skb->priority = CPL_PRIORITY_DATA;
877  __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
878  rpl = cplhdr(reply_skb);
879  rpl->wr.wr_hi =
881  rpl->wr.wr_lo = htonl(V_WR_TID(tid));
883  rpl->cmd = cmd;
884  cxgb3_ofld_send(dev, reply_skb);
885 out:
886  return CPL_RET_BUF_DONE;
887  }
888 }
889 
890 static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
891 {
892  struct cpl_act_establish *req = cplhdr(skb);
893  unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
894  struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
895  struct t3c_tid_entry *t3c_tid;
896  unsigned int tid = GET_TID(req);
897 
898  if (unlikely(tid >= t->ntids)) {
899  printk("%s: active establish TID %u too large\n",
900  dev->name, tid);
901  t3_fatal_err(tdev2adap(dev));
902  return CPL_RET_BUF_DONE;
903  }
904 
905  t3c_tid = lookup_atid(t, atid);
906  if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
907  t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
908  return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
909  (dev, skb, t3c_tid->ctx);
910  } else {
911  printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
912  dev->name, CPL_ACT_ESTABLISH);
914  }
915 }
916 
917 static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
918 {
919  struct cpl_trace_pkt *p = cplhdr(skb);
920 
921  skb->protocol = htons(0xffff);
922  skb->dev = dev->lldev;
923  skb_pull(skb, sizeof(*p));
924  skb_reset_mac_header(skb);
925  netif_receive_skb(skb);
926  return 0;
927 }
928 
929 /*
930  * That skb would better have come from process_responses() where we abuse
931  * ->priority and ->csum to carry our data. NB: if we get to per-arch
932  * ->csum, the things might get really interesting here.
933  */
934 
935 static inline u32 get_hwtid(struct sk_buff *skb)
936 {
937  return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff;
938 }
939 
940 static inline u32 get_opcode(struct sk_buff *skb)
941 {
942  return G_OPCODE(ntohl((__force __be32)skb->csum));
943 }
944 
945 static int do_term(struct t3cdev *dev, struct sk_buff *skb)
946 {
947  unsigned int hwtid = get_hwtid(skb);
948  unsigned int opcode = get_opcode(skb);
949  struct t3c_tid_entry *t3c_tid;
950 
951  t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
952  if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
953  t3c_tid->client->handlers[opcode]) {
954  return t3c_tid->client->handlers[opcode] (dev, skb,
955  t3c_tid->ctx);
956  } else {
957  printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
958  dev->name, opcode);
960  }
961 }
962 
963 static int nb_callback(struct notifier_block *self, unsigned long event,
964  void *ctx)
965 {
966  switch (event) {
967  case (NETEVENT_NEIGH_UPDATE):{
968  cxgb_neigh_update((struct neighbour *)ctx);
969  break;
970  }
971  case (NETEVENT_REDIRECT):{
972  struct netevent_redirect *nr = ctx;
973  cxgb_redirect(nr->old, nr->old_neigh,
974  nr->new, nr->new_neigh,
975  nr->daddr);
976  cxgb_neigh_update(nr->new_neigh);
977  break;
978  }
979  default:
980  break;
981  }
982  return 0;
983 }
984 
985 static struct notifier_block nb = {
986  .notifier_call = nb_callback
987 };
988 
989 /*
990  * Process a received packet with an unknown/unexpected CPL opcode.
991  */
992 static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
993 {
994  printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
995  *skb->data);
997 }
998 
999 /*
1000  * Handlers for each CPL opcode
1001  */
1002 static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
1003 
1004 /*
1005  * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
1006  * to unregister an existing handler.
1007  */
1008 void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
1009 {
1010  if (opcode < NUM_CPL_CMDS)
1011  cpl_handlers[opcode] = h ? h : do_bad_cpl;
1012  else
1013  printk(KERN_ERR "T3C: handler registration for "
1014  "opcode %x failed\n", opcode);
1015 }
1016 
1018 
1019 /*
1020  * T3CDEV's receive method.
1021  */
1022 static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
1023 {
1024  while (n--) {
1025  struct sk_buff *skb = *skbs++;
1026  unsigned int opcode = get_opcode(skb);
1027  int ret = cpl_handlers[opcode] (dev, skb);
1028 
1029 #if VALIDATE_TID
1030  if (ret & CPL_RET_UNKNOWN_TID) {
1031  union opcode_tid *p = cplhdr(skb);
1032 
1033  printk(KERN_ERR "%s: CPL message (opcode %u) had "
1034  "unknown TID %u\n", dev->name, opcode,
1035  G_TID(ntohl(p->opcode_tid)));
1036  }
1037 #endif
1038  if (ret & CPL_RET_BUF_DONE)
1039  kfree_skb(skb);
1040  }
1041  return 0;
1042 }
1043 
1044 /*
1045  * Sends an sk_buff to a T3C driver after dealing with any active network taps.
1046  */
1047 int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
1048 {
1049  int r;
1050 
1051  local_bh_disable();
1052  r = dev->send(dev, skb);
1053  local_bh_enable();
1054  return r;
1055 }
1056 
1058 
1059 static int is_offloading(struct net_device *dev)
1060 {
1061  struct adapter *adapter;
1062  int i;
1063 
1064  read_lock_bh(&adapter_list_lock);
1066  for_each_port(adapter, i) {
1067  if (dev == adapter->port[i]) {
1068  read_unlock_bh(&adapter_list_lock);
1069  return 1;
1070  }
1071  }
1072  }
1073  read_unlock_bh(&adapter_list_lock);
1074  return 0;
1075 }
1076 
1077 static void cxgb_neigh_update(struct neighbour *neigh)
1078 {
1079  struct net_device *dev;
1080 
1081  if (!neigh)
1082  return;
1083  dev = neigh->dev;
1084  if (dev && (is_offloading(dev))) {
1085  struct t3cdev *tdev = dev2t3cdev(dev);
1086 
1087  BUG_ON(!tdev);
1088  t3_l2t_update(tdev, neigh);
1089  }
1090 }
1091 
1092 static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
1093 {
1094  struct sk_buff *skb;
1095  struct cpl_set_tcb_field *req;
1096 
1097  skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
1098  if (!skb) {
1099  printk(KERN_ERR "%s: cannot allocate skb!\n", __func__);
1100  return;
1101  }
1103  req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
1104  req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1106  req->reply = 0;
1107  req->cpu_idx = 0;
1108  req->word = htons(W_TCB_L2T_IX);
1110  req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
1111  tdev->send(tdev, skb);
1112 }
1113 
1114 static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
1115  struct dst_entry *new, struct neighbour *new_neigh,
1116  const void *daddr)
1117 {
1118  struct net_device *olddev, *newdev;
1119  struct tid_info *ti;
1120  struct t3cdev *tdev;
1121  u32 tid;
1122  int update_tcb;
1123  struct l2t_entry *e;
1124  struct t3c_tid_entry *te;
1125 
1126  olddev = old_neigh->dev;
1127  newdev = new_neigh->dev;
1128 
1129  if (!is_offloading(olddev))
1130  return;
1131  if (!is_offloading(newdev)) {
1132  printk(KERN_WARNING "%s: Redirect to non-offload "
1133  "device ignored.\n", __func__);
1134  return;
1135  }
1136  tdev = dev2t3cdev(olddev);
1137  BUG_ON(!tdev);
1138  if (tdev != dev2t3cdev(newdev)) {
1139  printk(KERN_WARNING "%s: Redirect to different "
1140  "offload device ignored.\n", __func__);
1141  return;
1142  }
1143 
1144  /* Add new L2T entry */
1145  e = t3_l2t_get(tdev, new, newdev, daddr);
1146  if (!e) {
1147  printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
1148  __func__);
1149  return;
1150  }
1151 
1152  /* Walk tid table and notify clients of dst change. */
1153  ti = &(T3C_DATA(tdev))->tid_maps;
1154  for (tid = 0; tid < ti->ntids; tid++) {
1155  te = lookup_tid(ti, tid);
1156  BUG_ON(!te);
1157  if (te && te->ctx && te->client && te->client->redirect) {
1158  update_tcb = te->client->redirect(te->ctx, old, new, e);
1159  if (update_tcb) {
1160  rcu_read_lock();
1161  l2t_hold(L2DATA(tdev), e);
1162  rcu_read_unlock();
1163  set_l2t_ix(tdev, tid, e);
1164  }
1165  }
1166  }
1167  l2t_release(tdev, e);
1168 }
1169 
1170 /*
1171  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1172  * The allocated memory is cleared.
1173  */
1174 void *cxgb_alloc_mem(unsigned long size)
1175 {
1176  void *p = kzalloc(size, GFP_KERNEL);
1177 
1178  if (!p)
1179  p = vzalloc(size);
1180  return p;
1181 }
1182 
1183 /*
1184  * Free memory allocated through t3_alloc_mem().
1185  */
1186 void cxgb_free_mem(void *addr)
1187 {
1188  if (is_vmalloc_addr(addr))
1189  vfree(addr);
1190  else
1191  kfree(addr);
1192 }
1193 
1194 /*
1195  * Allocate and initialize the TID tables. Returns 0 on success.
1196  */
1197 static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
1198  unsigned int natids, unsigned int nstids,
1199  unsigned int atid_base, unsigned int stid_base)
1200 {
1201  unsigned long size = ntids * sizeof(*t->tid_tab) +
1202  natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
1203 
1204  t->tid_tab = cxgb_alloc_mem(size);
1205  if (!t->tid_tab)
1206  return -ENOMEM;
1207 
1208  t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
1209  t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
1210  t->ntids = ntids;
1211  t->nstids = nstids;
1212  t->stid_base = stid_base;
1213  t->sfree = NULL;
1214  t->natids = natids;
1215  t->atid_base = atid_base;
1216  t->afree = NULL;
1217  t->stids_in_use = t->atids_in_use = 0;
1218  atomic_set(&t->tids_in_use, 0);
1220  spin_lock_init(&t->atid_lock);
1221 
1222  /*
1223  * Setup the free lists for stid_tab and atid_tab.
1224  */
1225  if (nstids) {
1226  while (--nstids)
1227  t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
1228  t->sfree = t->stid_tab;
1229  }
1230  if (natids) {
1231  while (--natids)
1232  t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1233  t->afree = t->atid_tab;
1234  }
1235  return 0;
1236 }
1237 
1238 static void free_tid_maps(struct tid_info *t)
1239 {
1240  cxgb_free_mem(t->tid_tab);
1241 }
1242 
1243 static inline void add_adapter(struct adapter *adap)
1244 {
1245  write_lock_bh(&adapter_list_lock);
1246  list_add_tail(&adap->adapter_list, &adapter_list);
1247  write_unlock_bh(&adapter_list_lock);
1248 }
1249 
1250 static inline void remove_adapter(struct adapter *adap)
1251 {
1252  write_lock_bh(&adapter_list_lock);
1253  list_del(&adap->adapter_list);
1254  write_unlock_bh(&adapter_list_lock);
1255 }
1256 
1257 int cxgb3_offload_activate(struct adapter *adapter)
1258 {
1259  struct t3cdev *dev = &adapter->tdev;
1260  int natids, err;
1261  struct t3c_data *t;
1262  struct tid_range stid_range, tid_range;
1263  struct mtutab mtutab;
1264  unsigned int l2t_capacity;
1265 
1266  t = kzalloc(sizeof(*t), GFP_KERNEL);
1267  if (!t)
1268  return -ENOMEM;
1269 
1270  err = -EOPNOTSUPP;
1271  if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
1272  dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
1273  dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
1274  dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
1275  dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
1276  dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
1277  goto out_free;
1278 
1279  err = -ENOMEM;
1280  RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity));
1281  if (!L2DATA(dev))
1282  goto out_free;
1283 
1284  natids = min(tid_range.num / 2, MAX_ATIDS);
1285  err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
1286  stid_range.num, ATID_BASE, stid_range.base);
1287  if (err)
1288  goto out_free_l2t;
1289 
1290  t->mtus = mtutab.mtus;
1291  t->nmtus = mtutab.size;
1292 
1293  INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
1295  INIT_LIST_HEAD(&t->list_node);
1296  t->dev = dev;
1297 
1298  T3C_DATA(dev) = t;
1299  dev->recv = process_rx;
1300  dev->neigh_update = t3_l2t_update;
1301 
1302  /* Register netevent handler once */
1303  if (list_empty(&adapter_list))
1305 
1306  t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL);
1307  t->release_list_incomplete = 0;
1308 
1309  add_adapter(adapter);
1310  return 0;
1311 
1312 out_free_l2t:
1313  t3_free_l2t(L2DATA(dev));
1314  RCU_INIT_POINTER(dev->l2opt, NULL);
1315 out_free:
1316  kfree(t);
1317  return err;
1318 }
1319 
1320 static void clean_l2_data(struct rcu_head *head)
1321 {
1322  struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
1323  t3_free_l2t(d);
1324 }
1325 
1326 
1327 void cxgb3_offload_deactivate(struct adapter *adapter)
1328 {
1329  struct t3cdev *tdev = &adapter->tdev;
1330  struct t3c_data *t = T3C_DATA(tdev);
1331  struct l2t_data *d;
1332 
1333  remove_adapter(adapter);
1334  if (list_empty(&adapter_list))
1336 
1337  free_tid_maps(&t->tid_maps);
1338  T3C_DATA(tdev) = NULL;
1339  rcu_read_lock();
1340  d = L2DATA(tdev);
1341  rcu_read_unlock();
1342  RCU_INIT_POINTER(tdev->l2opt, NULL);
1343  call_rcu(&d->rcu_head, clean_l2_data);
1344  if (t->nofail_skb)
1345  kfree_skb(t->nofail_skb);
1346  kfree(t);
1347 }
1348 
1349 static inline void register_tdev(struct t3cdev *tdev)
1350 {
1351  static int unit;
1352 
1353  mutex_lock(&cxgb3_db_lock);
1354  snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
1355  list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
1356  mutex_unlock(&cxgb3_db_lock);
1357 }
1358 
1359 static inline void unregister_tdev(struct t3cdev *tdev)
1360 {
1361  mutex_lock(&cxgb3_db_lock);
1362  list_del(&tdev->ofld_dev_list);
1363  mutex_unlock(&cxgb3_db_lock);
1364 }
1365 
1366 static inline int adap2type(struct adapter *adapter)
1367 {
1368  int type = 0;
1369 
1370  switch (adapter->params.rev) {
1371  case T3_REV_A:
1372  type = T3A;
1373  break;
1374  case T3_REV_B:
1375  case T3_REV_B2:
1376  type = T3B;
1377  break;
1378  case T3_REV_C:
1379  type = T3C;
1380  break;
1381  }
1382  return type;
1383 }
1384 
1385 void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
1386 {
1387  struct t3cdev *tdev = &adapter->tdev;
1388 
1389  INIT_LIST_HEAD(&tdev->ofld_dev_list);
1390 
1391  cxgb3_set_dummy_ops(tdev);
1392  tdev->send = t3_offload_tx;
1393  tdev->ctl = cxgb_offload_ctl;
1394  tdev->type = adap2type(adapter);
1395 
1396  register_tdev(tdev);
1397 }
1398 
1399 void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
1400 {
1401  struct t3cdev *tdev = &adapter->tdev;
1402 
1403  tdev->recv = NULL;
1404  tdev->neigh_update = NULL;
1405 
1406  unregister_tdev(tdev);
1407 }
1408 
1410 {
1411  int i;
1412 
1413  for (i = 0; i < NUM_CPL_CMDS; ++i)
1414  cpl_handlers[i] = do_bad_cpl;
1415 
1416  t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
1417  t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
1418  t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl);
1424  t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
1426  t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
1428  t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
1429  t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
1430  t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
1432  t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
1433  t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
1441  t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
1442 }