Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
iw_cxgb4.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses. You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  * Redistribution and use in source and binary forms, with or
11  * without modification, are permitted provided that the following
12  * conditions are met:
13  *
14  * - Redistributions of source code must retain the above
15  * copyright notice, this list of conditions and the following
16  * disclaimer.
17  * - Redistributions in binary form must reproduce the above
18  * copyright notice, this list of conditions and the following
19  * disclaimer in the documentation and/or other materials
20  * provided with the distribution.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29  * SOFTWARE.
30  */
31 #ifndef __IW_CXGB4_H__
32 #define __IW_CXGB4_H__
33 
34 #include <linux/mutex.h>
35 #include <linux/list.h>
36 #include <linux/spinlock.h>
37 #include <linux/idr.h>
38 #include <linux/completion.h>
39 #include <linux/netdevice.h>
40 #include <linux/sched.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/inet.h>
44 #include <linux/wait.h>
45 #include <linux/kref.h>
46 #include <linux/timer.h>
47 #include <linux/io.h>
48 
49 #include <asm/byteorder.h>
50 
51 #include <net/net_namespace.h>
52 
53 #include <rdma/ib_verbs.h>
54 #include <rdma/iw_cm.h>
55 
56 #include "cxgb4.h"
57 #include "cxgb4_uld.h"
58 #include "l2t.h"
59 #include "user.h"
60 
61 #define DRV_NAME "iw_cxgb4"
62 #define MOD DRV_NAME ":"
63 
64 extern int c4iw_debug;
65 #define PDBG(fmt, args...) \
66 do { \
67  if (c4iw_debug) \
68  printk(MOD fmt, ## args); \
69 } while (0)
70 
71 #include "t4.h"
72 
73 #define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
74 #define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
75 
76 static inline void *cplhdr(struct sk_buff *skb)
77 {
78  return skb->data;
79 }
80 
81 #define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */
82 #define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */
83 
84 struct c4iw_id_table {
86  u32 start; /* logical minimal id */
87  u32 last; /* hint for find */
90  unsigned long *table;
91 };
92 
93 struct c4iw_resource {
97 };
98 
99 struct c4iw_qid_list {
100  struct list_head entry;
102 };
103 
105  struct list_head qpids;
106  struct list_head cqids;
107  struct mutex lock;
108 };
109 
111  T4_FATAL_ERROR = (1<<0),
112 };
113 
114 struct c4iw_stat {
119 };
120 
121 struct c4iw_stats {
122  struct mutex lock;
123  struct c4iw_stat qid;
124  struct c4iw_stat pd;
125  struct c4iw_stat stag;
126  struct c4iw_stat pbl;
127  struct c4iw_stat rqt;
128  struct c4iw_stat ocqp;
133 };
134 
135 struct c4iw_rdev {
137  unsigned long qpshift;
139  unsigned long cqshift;
147  unsigned long oc_mw_pa;
150 };
151 
152 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
153 {
154  return rdev->flags & T4_FATAL_ERROR;
155 }
156 
157 static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
158 {
159  return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
160 }
161 
162 #define C4IW_WR_TO (10*HZ)
163 
164 struct c4iw_wr_wait {
166  int ret;
167 };
168 
169 static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
170 {
171  wr_waitp->ret = 0;
172  init_completion(&wr_waitp->completion);
173 }
174 
175 static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
176 {
177  wr_waitp->ret = ret;
178  complete(&wr_waitp->completion);
179 }
180 
181 static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
182  struct c4iw_wr_wait *wr_waitp,
183  u32 hwtid, u32 qpid,
184  const char *func)
185 {
186  unsigned to = C4IW_WR_TO;
187  int ret;
188 
189  do {
190  ret = wait_for_completion_timeout(&wr_waitp->completion, to);
191  if (!ret) {
192  printk(KERN_ERR MOD "%s - Device %s not responding - "
193  "tid %u qpid %u\n", func,
194  pci_name(rdev->lldi.pdev), hwtid, qpid);
195  if (c4iw_fatal_error(rdev)) {
196  wr_waitp->ret = -EIO;
197  break;
198  }
199  to = to << 2;
200  }
201  } while (!ret);
202  if (wr_waitp->ret)
203  PDBG("%s: FW reply %d tid %u qpid %u\n",
204  pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
205  return wr_waitp->ret;
206 }
207 
208 enum db_state {
209  NORMAL = 0,
212 };
213 
214 struct c4iw_dev {
215  struct ib_device ibdev;
216  struct c4iw_rdev rdev;
218  struct idr cqidr;
219  struct idr qpidr;
220  struct idr mmidr;
222  struct mutex db_mutex;
225  int qpcnt;
226 };
227 
228 static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
229 {
230  return container_of(ibdev, struct c4iw_dev, ibdev);
231 }
232 
233 static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
234 {
235  return container_of(rdev, struct c4iw_dev, rdev);
236 }
237 
238 static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
239 {
240  return idr_find(&rhp->cqidr, cqid);
241 }
242 
243 static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
244 {
245  return idr_find(&rhp->qpidr, qpid);
246 }
247 
248 static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
249 {
250  return idr_find(&rhp->mmidr, mmid);
251 }
252 
253 static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
254  void *handle, u32 id, int lock)
255 {
256  int ret;
257  int newid;
258 
259  do {
260  if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
261  return -ENOMEM;
262  if (lock)
263  spin_lock_irq(&rhp->lock);
264  ret = idr_get_new_above(idr, handle, id, &newid);
265  BUG_ON(!ret && newid != id);
266  if (lock)
267  spin_unlock_irq(&rhp->lock);
268  } while (ret == -EAGAIN);
269 
270  return ret;
271 }
272 
273 static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
274  void *handle, u32 id)
275 {
276  return _insert_handle(rhp, idr, handle, id, 1);
277 }
278 
279 static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
280  void *handle, u32 id)
281 {
282  return _insert_handle(rhp, idr, handle, id, 0);
283 }
284 
285 static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
286  u32 id, int lock)
287 {
288  if (lock)
289  spin_lock_irq(&rhp->lock);
290  idr_remove(idr, id);
291  if (lock)
292  spin_unlock_irq(&rhp->lock);
293 }
294 
295 static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
296 {
297  _remove_handle(rhp, idr, id, 1);
298 }
299 
300 static inline void remove_handle_nolock(struct c4iw_dev *rhp,
301  struct idr *idr, u32 id)
302 {
303  _remove_handle(rhp, idr, id, 0);
304 }
305 
306 struct c4iw_pd {
307  struct ib_pd ibpd;
309  struct c4iw_dev *rhp;
310 };
311 
312 static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
313 {
314  return container_of(ibpd, struct c4iw_pd, ibpd);
315 }
316 
317 struct tpt_attributes {
319  u64 va_fbo;
321  u32 stag;
322  u32 pdid;
323  u32 qpid;
324  u32 pbl_addr;
325  u32 pbl_size;
326  u32 state:1;
327  u32 type:2;
328  u32 rsvd:1;
330  u32 zbva:1;
332  u32 page_size:5;
333 };
334 
335 struct c4iw_mr {
336  struct ib_mr ibmr;
337  struct ib_umem *umem;
338  struct c4iw_dev *rhp;
341 };
342 
343 static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
344 {
345  return container_of(ibmr, struct c4iw_mr, ibmr);
346 }
347 
348 struct c4iw_mw {
349  struct ib_mw ibmw;
350  struct c4iw_dev *rhp;
353 };
354 
355 static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
356 {
357  return container_of(ibmw, struct c4iw_mw, ibmw);
358 }
359 
364  struct c4iw_dev *dev;
365  int size;
366 };
367 
368 static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
369  struct ib_fast_reg_page_list *ibpl)
370 {
371  return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
372 }
373 
374 struct c4iw_cq {
375  struct ib_cq ibcq;
376  struct c4iw_dev *rhp;
377  struct t4_cq cq;
382 };
383 
384 static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
385 {
386  return container_of(ibcq, struct c4iw_cq, ibcq);
387 }
388 
397 };
398 
425 };
426 
427 struct c4iw_qp {
428  struct ib_qp ibqp;
429  struct c4iw_dev *rhp;
430  struct c4iw_ep *ep;
432  struct t4_wq wq;
434  struct mutex mutex;
438 };
439 
440 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
441 {
442  return container_of(ibqp, struct c4iw_qp, ibqp);
443 }
444 
450  struct list_head mmaps;
451 };
452 
453 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
454 {
455  return container_of(c, struct c4iw_ucontext, ibucontext);
456 }
457 
459  struct list_head entry;
462  unsigned len;
463 };
464 
465 static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
466  u32 key, unsigned len)
467 {
468  struct list_head *pos, *nxt;
469  struct c4iw_mm_entry *mm;
470 
471  spin_lock(&ucontext->mmap_lock);
472  list_for_each_safe(pos, nxt, &ucontext->mmaps) {
473 
474  mm = list_entry(pos, struct c4iw_mm_entry, entry);
475  if (mm->key == key && mm->len == len) {
476  list_del_init(&mm->entry);
477  spin_unlock(&ucontext->mmap_lock);
478  PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
479  key, (unsigned long long) mm->addr, mm->len);
480  return mm;
481  }
482  }
483  spin_unlock(&ucontext->mmap_lock);
484  return NULL;
485 }
486 
487 static inline void insert_mmap(struct c4iw_ucontext *ucontext,
488  struct c4iw_mm_entry *mm)
489 {
490  spin_lock(&ucontext->mmap_lock);
491  PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
492  mm->key, (unsigned long long) mm->addr, mm->len);
493  list_add_tail(&mm->entry, &ucontext->mmaps);
494  spin_unlock(&ucontext->mmap_lock);
495 }
496 
518 };
519 
520 int c4iw_modify_qp(struct c4iw_dev *rhp,
521  struct c4iw_qp *qhp,
522  enum c4iw_qp_attr_mask mask,
523  struct c4iw_qp_attributes *attrs,
524  int internal);
525 
533 };
534 
535 static inline int c4iw_convert_state(enum ib_qp_state ib_state)
536 {
537  switch (ib_state) {
538  case IB_QPS_RESET:
539  case IB_QPS_INIT:
540  return C4IW_QP_STATE_IDLE;
541  case IB_QPS_RTS:
542  return C4IW_QP_STATE_RTS;
543  case IB_QPS_SQD:
544  return C4IW_QP_STATE_CLOSING;
545  case IB_QPS_SQE:
547  case IB_QPS_ERR:
548  return C4IW_QP_STATE_ERROR;
549  default:
550  return -1;
551  }
552 }
553 
554 static inline int to_ib_qp_state(int c4iw_qp_state)
555 {
556  switch (c4iw_qp_state) {
557  case C4IW_QP_STATE_IDLE:
558  return IB_QPS_INIT;
559  case C4IW_QP_STATE_RTS:
560  return IB_QPS_RTS;
562  return IB_QPS_SQD;
564  return IB_QPS_SQE;
565  case C4IW_QP_STATE_ERROR:
566  return IB_QPS_ERR;
567  }
568  return IB_QPS_ERR;
569 }
570 
571 static inline u32 c4iw_ib_to_tpt_access(int a)
572 {
577 }
578 
579 static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
580 {
581  return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
583 }
584 
588 };
589 
590 #define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
591 
592 #define MPA_KEY_REQ "MPA ID Req Frame"
593 #define MPA_KEY_REP "MPA ID Rep Frame"
594 
595 #define MPA_MAX_PRIVATE_DATA 256
596 #define MPA_ENHANCED_RDMA_CONN 0x10
597 #define MPA_REJECT 0x20
598 #define MPA_CRC 0x40
599 #define MPA_MARKERS 0x80
600 #define MPA_FLAGS_MASK 0xE0
601 
602 #define MPA_V2_PEER2PEER_MODEL 0x8000
603 #define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000
604 #define MPA_V2_RDMA_WRITE_RTR 0x8000
605 #define MPA_V2_RDMA_READ_RTR 0x4000
606 #define MPA_V2_IRD_ORD_MASK 0x3FFF
607 
608 #define c4iw_put_ep(ep) { \
609  PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
610  ep, atomic_read(&((ep)->kref.refcount))); \
611  WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
612  kref_put(&((ep)->kref), _c4iw_free_ep); \
613 }
614 
615 #define c4iw_get_ep(ep) { \
616  PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
617  ep, atomic_read(&((ep)->kref.refcount))); \
618  kref_get(&((ep)->kref)); \
619 }
620 void _c4iw_free_ep(struct kref *kref);
621 
622 struct mpa_message {
623  u8 key[16];
624  u8 flags;
625  u8 revision;
627  u8 private_data[0];
628 };
629 
633 };
634 
635 struct terminate_message {
636  u8 layer_etype;
637  u8 ecode;
639  u8 len_hdrs[0];
640 };
641 
642 #define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
643 
645  LAYER_RDMAP = 0x00,
646  LAYER_DDP = 0x10,
647  LAYER_MPA = 0x20,
654  DDP_LLP = 0x03
655 };
656 
669 };
670 
675  DDPT_TO_WRAP = 0x03,
677  DDPU_INV_QN = 0x01,
680  DDPU_INV_MO = 0x04,
683 };
684 
686  MPA_CRC_ERR = 0x02,
691 };
692 
694  IDLE = 0,
706 };
707 
713 };
714 
716  struct iw_cm_id *cm_id;
717  struct c4iw_qp *qp;
718  struct c4iw_dev *dev;
720  struct kref kref;
721  struct mutex mutex;
725  unsigned long flags;
726 };
727 
730  unsigned int stid;
731  int backlog;
732 };
733 
734 struct c4iw_ep {
738  struct list_head entry;
739  unsigned int atid;
743  struct l2t_entry *l2t;
744  struct dst_entry *dst;
745  struct sk_buff *mpa_skb;
748  unsigned int mpa_pkt_len;
763 };
764 
765 static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
766 {
767  return cm_id->provider_data;
768 }
769 
770 static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
771 {
772  return cm_id->provider_data;
773 }
774 
775 static inline int compute_wscale(int win)
776 {
777  int wscale = 0;
778 
779  while (wscale < 14 && (65535<<wscale) < win)
780  wscale++;
781  return wscale;
782 }
783 
784 u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
785 void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
786 int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
787  u32 reserved, u32 flags);
788 void c4iw_id_table_free(struct c4iw_id_table *alloc);
789 
790 typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
791 
792 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
793  struct l2t_entry *l2t);
794 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
795  struct c4iw_dev_ucontext *uctx);
796 u32 c4iw_get_resource(struct c4iw_id_table *id_table);
797 void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
798 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
799 int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
800 int c4iw_pblpool_create(struct c4iw_rdev *rdev);
801 int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
802 int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev);
803 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
804 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
805 void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
806 void c4iw_destroy_resource(struct c4iw_resource *rscp);
807 int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
808 int c4iw_register_device(struct c4iw_dev *dev);
809 void c4iw_unregister_device(struct c4iw_dev *dev);
810 int __init c4iw_cm_init(void);
811 void __exit c4iw_cm_term(void);
812 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
813  struct c4iw_dev_ucontext *uctx);
814 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
815  struct c4iw_dev_ucontext *uctx);
816 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
817 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
818  struct ib_send_wr **bad_wr);
819 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
820  struct ib_recv_wr **bad_wr);
821 int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
822  struct ib_mw_bind *mw_bind);
823 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
824 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
825 int c4iw_destroy_listen(struct iw_cm_id *cm_id);
826 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
827 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
828 void c4iw_qp_add_ref(struct ib_qp *qp);
829 void c4iw_qp_rem_ref(struct ib_qp *qp);
830 void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
832  struct ib_device *device,
833  int page_list_len);
834 struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
835 int c4iw_dealloc_mw(struct ib_mw *mw);
836 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd);
837 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
838  u64 length, u64 virt, int acc,
839  struct ib_udata *udata);
840 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
841 struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
842  struct ib_phys_buf *buffer_list,
843  int num_phys_buf,
844  int acc,
845  u64 *iova_start);
846 int c4iw_reregister_phys_mem(struct ib_mr *mr,
847  int mr_rereg_mask,
848  struct ib_pd *pd,
849  struct ib_phys_buf *buffer_list,
850  int num_phys_buf,
851  int acc, u64 *iova_start);
852 int c4iw_dereg_mr(struct ib_mr *ib_mr);
853 int c4iw_destroy_cq(struct ib_cq *ib_cq);
854 struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
855  int vector,
856  struct ib_ucontext *ib_context,
857  struct ib_udata *udata);
858 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
859 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
860 int c4iw_destroy_qp(struct ib_qp *ib_qp);
861 struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
862  struct ib_qp_init_attr *attrs,
863  struct ib_udata *udata);
864 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
865  int attr_mask, struct ib_udata *udata);
866 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
867  int attr_mask, struct ib_qp_init_attr *init_attr);
868 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
869 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
870 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
871 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
872 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
873 u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
874 void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
875 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
876 void c4iw_flush_hw_cq(struct t4_cq *cq);
877 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
878 void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
879 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
880 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
881 int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
882 int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
883 u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
884 int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
885 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
886 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
887  struct c4iw_dev_ucontext *uctx);
888 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
889 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
890  struct c4iw_dev_ucontext *uctx);
891 void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
892 
893 extern struct cxgb4_client t4c_client;
895 extern int c4iw_max_read_depth;
896 extern int db_fc_threshold;
897 
898 
899 #endif