Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ipath_verbs.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses. You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  * Redistribution and use in source and binary forms, with or
12  * without modification, are permitted provided that the following
13  * conditions are met:
14  *
15  * - Redistributions of source code must retain the above
16  * copyright notice, this list of conditions and the following
17  * disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above
20  * copyright notice, this list of conditions and the following
21  * disclaimer in the documentation and/or other materials
22  * provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #ifndef IPATH_VERBS_H
35 #define IPATH_VERBS_H
36 
37 #include <linux/types.h>
38 #include <linux/spinlock.h>
39 #include <linux/kernel.h>
40 #include <linux/interrupt.h>
41 #include <linux/kref.h>
42 #include <rdma/ib_pack.h>
43 #include <rdma/ib_user_verbs.h>
44 
45 #include "ipath_kernel.h"
46 
47 #define IPATH_MAX_RDMA_ATOMIC 4
48 
49 #define QPN_MAX (1 << 24)
50 #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
51 
52 /*
53  * Increment this value if any changes that break userspace ABI
54  * compatibility are made.
55  */
56 #define IPATH_UVERBS_ABI_VERSION 2
57 
58 /*
59  * Define an ib_cq_notify value that is not valid so we know when CQ
60  * notifications are armed.
61  */
62 #define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
63 
64 /* AETH NAK opcode values */
65 #define IB_RNR_NAK 0x20
66 #define IB_NAK_PSN_ERROR 0x60
67 #define IB_NAK_INVALID_REQUEST 0x61
68 #define IB_NAK_REMOTE_ACCESS_ERROR 0x62
69 #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
70 #define IB_NAK_INVALID_RD_REQUEST 0x64
71 
72 /* Flags for checking QP state (see ib_ipath_state_ops[]) */
73 #define IPATH_POST_SEND_OK 0x01
74 #define IPATH_POST_RECV_OK 0x02
75 #define IPATH_PROCESS_RECV_OK 0x04
76 #define IPATH_PROCESS_SEND_OK 0x08
77 #define IPATH_PROCESS_NEXT_SEND_OK 0x10
78 #define IPATH_FLUSH_SEND 0x20
79 #define IPATH_FLUSH_RECV 0x40
80 #define IPATH_PROCESS_OR_FLUSH_SEND \
81  (IPATH_PROCESS_SEND_OK | IPATH_FLUSH_SEND)
82 
83 /* IB Performance Manager status values */
84 #define IB_PMA_SAMPLE_STATUS_DONE 0x00
85 #define IB_PMA_SAMPLE_STATUS_STARTED 0x01
86 #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
87 
88 /* Mandatory IB performance counter select values. */
89 #define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
90 #define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
91 #define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
92 #define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
93 #define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
94 
95 struct ib_reth {
99 } __attribute__ ((packed));
102  __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
106 } __attribute__ ((packed));
107 
110  union {
111  struct {
114  } ud;
115  struct {
116  struct ib_reth reth;
118  } rc;
119  struct {
122  } at;
124  __be32 aeth;
126  } u;
127 } __attribute__ ((packed));
129 /*
130  * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
131  * long (72 w/ imm_data). Only the first 56 bytes of the IB header
132  * will be in the eager header buffer. The remaining 12 or 16 bytes
133  * are in the data buffer.
134  */
137  union {
138  struct {
139  struct ib_grh grh;
141  } l;
142  struct ipath_other_headers oth;
143  } u;
144 } __attribute__ ((packed));
149 } __attribute__ ((packed));
151 /*
152  * There is one struct ipath_mcast for each multicast GID.
153  * All attached QPs are then stored as a list of
154  * struct ipath_mcast_qp.
155  */
157  struct list_head list;
158  struct ipath_qp *qp;
159 };
160 
161 struct ipath_mcast {
162  struct rb_node rb_node;
163  union ib_gid mgid;
168 };
169 
170 /* Protection domain */
171 struct ipath_pd {
172  struct ib_pd ibpd;
173  int user; /* non-zero if created from user space */
174 };
175 
176 /* Address Handle */
177 struct ipath_ah {
178  struct ib_ah ibah;
179  struct ib_ah_attr attr;
180 };
181 
182 /*
183  * This structure is used by ipath_mmap() to validate an offset
184  * when an mmap() request is made. The vm_area_struct then uses
185  * this as its vm_private_data.
186  */
190  void *obj;
192  struct kref ref;
193  unsigned size;
194 };
195 
196 /*
197  * This structure is used to contain the head pointer, tail pointer,
198  * and completion queue entries as a single memory allocation so
199  * it can be mmap'ed into user space.
200  */
201 struct ipath_cq_wc {
202  u32 head; /* index of next entry to fill */
203  u32 tail; /* index of next ib_poll_cq() entry */
204  union {
205  /* these are actually size ibcq.cqe + 1 */
206  struct ib_uverbs_wc uqueue[0];
207  struct ib_wc kqueue[0];
208  };
209 };
210 
211 /*
212  * The completion queue structure.
213  */
214 struct ipath_cq {
215  struct ib_cq ibcq;
222 };
223 
224 /*
225  * A segment is a linear region of low physical memory.
226  * XXX Maybe we should use phys addr here and kmap()/kunmap().
227  * Used by the verbs layer.
228  */
229 struct ipath_seg {
230  void *vaddr;
231  size_t length;
232 };
233 
234 /* The number of ipath_segs that fit in a page. */
235 #define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg))
236 
239 };
240 
242  struct ib_pd *pd; /* shares refcnt of ibmr.pd */
243  u64 user_base; /* User's address for this region */
244  u64 iova; /* IB start address of this region */
245  size_t length;
247  u32 offset; /* offset (bytes) to start of region */
249  u32 max_segs; /* number of ipath_segs in all the arrays */
250  u32 mapsz; /* size of the map array */
251  struct ipath_segarray *map[0]; /* the segments */
252 };
253 
254 /*
255  * These keep track of the copy progress within a memory region.
256  * Used by the verbs layer.
257  */
258 struct ipath_sge {
259  struct ipath_mregion *mr;
260  void *vaddr; /* kernel virtual address of segment */
261  u32 sge_length; /* length of the SGE */
262  u32 length; /* remaining length of the segment */
263  u16 m; /* current index: mr->map[m] */
264  u16 n; /* current index: mr->map[m]->segs[n] */
265 };
266 
267 /* Memory region */
268 struct ipath_mr {
269  struct ib_mr ibmr;
270  struct ib_umem *umem;
271  struct ipath_mregion mr; /* must be last */
272 };
273 
274 /*
275  * Send work request queue entry.
276  * The size of the sg_list is determined when the QP is created and stored
277  * in qp->s_max_sge.
278  */
279 struct ipath_swqe {
280  struct ib_send_wr wr; /* don't use wr.sg_list */
281  u32 psn; /* first packet sequence number */
282  u32 lpsn; /* last packet sequence number */
283  u32 ssn; /* send sequence number */
284  u32 length; /* total length of data in sg_list */
285  struct ipath_sge sg_list[0];
286 };
287 
288 /*
289  * Receive work request queue entry.
290  * The size of the sg_list is determined when the QP (or SRQ) is created
291  * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
292  */
293 struct ipath_rwqe {
296  struct ib_sge sg_list[0];
297 };
298 
299 /*
300  * This structure is used to contain the head pointer, tail pointer,
301  * and receive work queue entries as a single memory allocation so
302  * it can be mmap'ed into user space.
303  * Note that the wq array elements are variable size so you can't
304  * just index into the array to get the N'th element;
305  * use get_rwqe_ptr() instead.
306  */
307 struct ipath_rwq {
308  u32 head; /* new work requests posted to the head */
309  u32 tail; /* receives pull requests from here. */
310  struct ipath_rwqe wq[0];
311 };
312 
313 struct ipath_rq {
314  struct ipath_rwq *wq;
316  u32 size; /* size of RWQE array */
318 };
319 
320 struct ipath_srq {
321  struct ib_srq ibsrq;
322  struct ipath_rq rq;
324  /* send signal when number of RWQEs < limit */
326 };
327 
329  struct ipath_sge *sg_list; /* next SGE to be used if any */
330  struct ipath_sge sge; /* progress state for the current SGE */
333 };
334 
335 /*
336  * This structure holds the information that the send tasklet needs
337  * to send a RDMA read response or atomic operation.
338  */
343  union {
346  };
347 };
348 
349 /*
350  * Variables prefixed with s_ are for the requester (sender).
351  * Variables prefixed with r_ are for the responder (receiver).
352  * Variables prefixed with ack_ are for responder replies.
353  *
354  * Common variables are protected by both r_rq.lock and s_lock in that order
355  * which only happens in modify_qp() or changing the QP 'state'.
356  */
357 struct ipath_qp {
358  struct ib_qp ibqp;
359  struct ipath_qp *next; /* link list for QPN hash table */
360  struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */
361  struct ipath_qp *pio_next; /* link for ipath_ib_piobufavail() */
362  struct list_head piowait; /* link for wait PIO buf */
363  struct list_head timerwait; /* link for waiting for timeouts */
365  struct ipath_ib_header s_hdr; /* next packet header to send */
373  struct ipath_sge_state s_sge; /* current send request data */
377  struct ipath_sge_state r_sge; /* current receive data */
381  u16 s_hdrwords; /* size of s_hdr in 32 bit words */
382  u32 s_cur_size; /* size of send packet in bytes */
383  u32 s_len; /* total length of s_sge */
384  u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
385  u32 s_next_psn; /* PSN for next request */
386  u32 s_last_psn; /* last response PSN processed */
387  u32 s_psn; /* current packet sequence number */
388  u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
389  u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
390  u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
391  u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
392  u64 r_wr_id; /* ID for current receive WQE */
393  unsigned long r_aflags;
394  u32 r_len; /* total length of r_sge */
395  u32 r_rcv_len; /* receive data len processed */
396  u32 r_psn; /* expected rcv packet sequence number */
397  u32 r_msn; /* message sequence number */
398  u8 state; /* QP state */
399  u8 s_state; /* opcode of last packet sent */
400  u8 s_ack_state; /* opcode of packet to ACK */
401  u8 s_nak_state; /* non-zero if NAK is pending */
402  u8 r_state; /* opcode of last packet received */
403  u8 r_nak_state; /* non-zero if NAK is pending */
404  u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
406  u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
407  u8 r_head_ack_queue; /* index into s_ack_queue[] */
409  u8 s_max_sge; /* size of s_wq->sg_list */
410  u8 s_retry_cnt; /* number of times to retry */
412  u8 s_retry; /* requester retry counter */
413  u8 s_rnr_retry; /* requester RNR retry counter */
414  u8 s_pkey_index; /* PKEY index to use */
415  u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
416  u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
417  u8 s_tail_ack_queue; /* index into s_ack_queue[] */
421  u8 timeout; /* Timeout for this QP */
424  u32 qkey; /* QKEY for this QP (for UD or RD) */
425  u32 s_size; /* send work queue size */
426  u32 s_head; /* new entries added here */
427  u32 s_tail; /* next entry to process */
428  u32 s_cur; /* current work queue entry */
429  u32 s_last; /* last un-ACK'ed entry */
430  u32 s_ssn; /* SSN of tail entry */
431  u32 s_lsn; /* limit sequence number (credit) */
432  struct ipath_swqe *s_wq; /* send work queue */
433  struct ipath_swqe *s_wqe;
435  struct ipath_rq r_rq; /* receive work queue */
436  struct ipath_sge r_sg_list[0]; /* verified SGEs */
437 };
438 
439 /*
440  * Atomic bit definitions for r_aflags.
441  */
442 #define IPATH_R_WRID_VALID 0
443 
444 /*
445  * Bit definitions for r_flags.
446  */
447 #define IPATH_R_REUSE_SGE 0x01
448 #define IPATH_R_RDMAR_SEQ 0x02
449 
450 /*
451  * Bit definitions for s_flags.
452  *
453  * IPATH_S_FENCE_PENDING - waiting for all prior RDMA read or atomic SWQEs
454  * before processing the next SWQE
455  * IPATH_S_RDMAR_PENDING - waiting for any RDMA read or atomic SWQEs
456  * before processing the next SWQE
457  * IPATH_S_WAITING - waiting for RNR timeout or send buffer available.
458  * IPATH_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
459  * IPATH_S_WAIT_DMA - waiting for send DMA queue to drain before generating
460  * next send completion entry not via send DMA.
461  */
462 #define IPATH_S_SIGNAL_REQ_WR 0x01
463 #define IPATH_S_FENCE_PENDING 0x02
464 #define IPATH_S_RDMAR_PENDING 0x04
465 #define IPATH_S_ACK_PENDING 0x08
466 #define IPATH_S_BUSY 0x10
467 #define IPATH_S_WAITING 0x20
468 #define IPATH_S_WAIT_SSN_CREDIT 0x40
469 #define IPATH_S_WAIT_DMA 0x80
470 
471 #define IPATH_S_ANY_WAIT (IPATH_S_FENCE_PENDING | IPATH_S_RDMAR_PENDING | \
472  IPATH_S_WAITING | IPATH_S_WAIT_SSN_CREDIT | IPATH_S_WAIT_DMA)
473 
474 #define IPATH_PSN_CREDIT 512
475 
476 /*
477  * Since struct ipath_swqe is not a fixed size, we can't simply index into
478  * struct ipath_qp.s_wq. This function does the array index computation.
479  */
480 static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp,
481  unsigned n)
482 {
483  return (struct ipath_swqe *)((char *)qp->s_wq +
484  (sizeof(struct ipath_swqe) +
485  qp->s_max_sge *
486  sizeof(struct ipath_sge)) * n);
487 }
488 
489 /*
490  * Since struct ipath_rwqe is not a fixed size, we can't simply index into
491  * struct ipath_rwq.wq. This function does the array index computation.
492  */
493 static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,
494  unsigned n)
495 {
496  return (struct ipath_rwqe *)
497  ((char *) rq->wq->wq +
498  (sizeof(struct ipath_rwqe) +
499  rq->max_sge * sizeof(struct ib_sge)) * n);
500 }
501 
502 /*
503  * QPN-map pages start out as NULL, they get allocated upon
504  * first use and are never deallocated. This way,
505  * large bitmaps are not allocated unless large numbers of QPs are used.
506  */
507 struct qpn_map {
509  void *page;
510 };
511 
514  u32 last; /* last QP number allocated */
515  u32 max; /* size of the hash table */
516  u32 nmaps; /* size of the map table */
517  struct ipath_qp **table;
518  /* bit map of free numbers */
520 };
521 
524  u32 next; /* next unused index (speeds search) */
525  u32 gen; /* generation count */
526  u32 max; /* size of the table */
528 };
529 
531  u64 n_packets; /* number of packets */
532  u64 n_bytes; /* total number of bytes */
533 };
534 
535 struct ipath_ibdev {
536  struct ib_device ibdev;
537  struct ipath_devdata *dd;
541  int ib_unit; /* This is the device number */
542  u16 sm_lid; /* in host order */
545  /* non-zero when timer is set */
546  unsigned long mkey_lease_timeout;
547 
548  /* The following fields are really per port. */
551  struct list_head pending[3]; /* FIFO of QPs waiting for ACKs */
552  struct list_head piowait; /* list for wait PIO buf */
554  void *txreq_bufs;
555  /* list of QPs waiting for RNR timer */
558  __be64 sys_image_guid; /* in network order */
559  __be64 gid_prefix; /* in network order */
561 
562  u32 n_pds_allocated; /* number of PDs allocated for device */
564  u32 n_ahs_allocated; /* number of AHs allocated for device */
566  u32 n_cqs_allocated; /* number of CQs allocated for device */
568  u32 n_qps_allocated; /* number of QPs allocated for device */
570  u32 n_srqs_allocated; /* number of SRQs allocated for device */
572  u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
574 
575  u64 ipath_sword; /* total dwords sent (sample result) */
576  u64 ipath_rword; /* total dwords received (sample result) */
577  u64 ipath_spkts; /* total packets sent (sample result) */
578  u64 ipath_rpkts; /* total packets received (sample result) */
579  /* # of ticks no data sent (sample result) */
581  u64 rcv_errors; /* # of packets with SW detected rcv errs */
582  u64 n_unicast_xmit; /* total unicast packets sent */
583  u64 n_unicast_rcv; /* total unicast packets received */
584  u64 n_multicast_xmit; /* total multicast packets sent */
585  u64 n_multicast_rcv; /* total multicast packets received */
586  u64 z_symbol_error_counter; /* starting count for PMA */
587  u64 z_link_error_recovery_counter; /* starting count for PMA */
588  u64 z_link_downed_counter; /* starting count for PMA */
589  u64 z_port_rcv_errors; /* starting count for PMA */
590  u64 z_port_rcv_remphys_errors; /* starting count for PMA */
591  u64 z_port_xmit_discards; /* starting count for PMA */
592  u64 z_port_xmit_data; /* starting count for PMA */
593  u64 z_port_rcv_data; /* starting count for PMA */
594  u64 z_port_xmit_packets; /* starting count for PMA */
595  u64 z_port_rcv_packets; /* starting count for PMA */
596  u32 z_pkey_violations; /* starting count for PMA */
597  u32 z_local_link_integrity_errors; /* starting count for PMA */
598  u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
599  u32 z_vl15_dropped; /* starting count for PMA */
622  u16 pending_index; /* which pending queue is active */
627 };
628 
643 };
644 
646  struct ipath_qp *qp;
647  struct ipath_swqe *wqe;
653 };
654 
655 static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
656 {
657  return container_of(ibmr, struct ipath_mr, ibmr);
658 }
659 
660 static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd)
661 {
662  return container_of(ibpd, struct ipath_pd, ibpd);
663 }
664 
665 static inline struct ipath_ah *to_iah(struct ib_ah *ibah)
666 {
667  return container_of(ibah, struct ipath_ah, ibah);
668 }
669 
670 static inline struct ipath_cq *to_icq(struct ib_cq *ibcq)
671 {
672  return container_of(ibcq, struct ipath_cq, ibcq);
673 }
674 
675 static inline struct ipath_srq *to_isrq(struct ib_srq *ibsrq)
676 {
677  return container_of(ibsrq, struct ipath_srq, ibsrq);
678 }
679 
680 static inline struct ipath_qp *to_iqp(struct ib_qp *ibqp)
681 {
682  return container_of(ibqp, struct ipath_qp, ibqp);
683 }
684 
685 static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev)
686 {
687  return container_of(ibdev, struct ipath_ibdev, ibdev);
688 }
689 
690 /*
691  * This must be called with s_lock held.
692  */
693 static inline void ipath_schedule_send(struct ipath_qp *qp)
694 {
695  if (qp->s_flags & IPATH_S_ANY_WAIT)
696  qp->s_flags &= ~IPATH_S_ANY_WAIT;
697  if (!(qp->s_flags & IPATH_S_BUSY))
698  tasklet_hi_schedule(&qp->s_task);
699 }
700 
701 int ipath_process_mad(struct ib_device *ibdev,
702  int mad_flags,
703  u8 port_num,
704  struct ib_wc *in_wc,
705  struct ib_grh *in_grh,
706  struct ib_mad *in_mad, struct ib_mad *out_mad);
707 
708 /*
709  * Compare the lower 24 bits of the two values.
710  * Returns an integer <, ==, or > than zero.
711  */
712 static inline int ipath_cmp24(u32 a, u32 b)
713 {
714  return (((int) a) - ((int) b)) << 8;
715 }
716 
717 struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid);
718 
719 int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
720  u64 *rwords, u64 *spkts, u64 *rpkts,
721  u64 *xmit_wait);
722 
724  struct ipath_verbs_counters *cntrs);
725 
726 int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
727 
728 int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
729 
730 int ipath_mcast_tree_empty(void);
731 
733 
734 struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn);
735 
736 struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
737  struct ib_qp_init_attr *init_attr,
738  struct ib_udata *udata);
739 
740 int ipath_destroy_qp(struct ib_qp *ibqp);
741 
742 int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);
743 
744 int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
745  int attr_mask, struct ib_udata *udata);
746 
747 int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
748  int attr_mask, struct ib_qp_init_attr *init_attr);
749 
750 unsigned ipath_free_all_qps(struct ipath_qp_table *qpt);
751 
752 int ipath_init_qp_table(struct ipath_ibdev *idev, int size);
753 
754 void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
755 
756 unsigned ipath_ib_rate_to_mult(enum ib_rate rate);
757 
758 int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
759  u32 hdrwords, struct ipath_sge_state *ss, u32 len);
760 
761 void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
762 
764 
765 void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
766  int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
767 
768 void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
769  int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
770 
771 void ipath_restart_rc(struct ipath_qp *qp, u32 psn);
772 
773 void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err);
774 
775 int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
776 
777 void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
778  int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
779 
780 int ipath_alloc_lkey(struct ipath_lkey_table *rkt,
781  struct ipath_mregion *mr);
782 
783 void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey);
784 
785 int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
786  struct ib_sge *sge, int acc);
787 
788 int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
789  u32 len, u64 vaddr, u32 rkey, int acc);
790 
791 int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
792  struct ib_recv_wr **bad_wr);
793 
794 struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
795  struct ib_srq_init_attr *srq_init_attr,
796  struct ib_udata *udata);
797 
798 int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
799  enum ib_srq_attr_mask attr_mask,
800  struct ib_udata *udata);
801 
802 int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
803 
804 int ipath_destroy_srq(struct ib_srq *ibsrq);
805 
806 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
807 
808 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
809 
810 struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
811  struct ib_ucontext *context,
812  struct ib_udata *udata);
813 
814 int ipath_destroy_cq(struct ib_cq *ibcq);
815 
816 int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
817 
818 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
819 
820 struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc);
821 
822 struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
823  struct ib_phys_buf *buffer_list,
824  int num_phys_buf, int acc, u64 *iova_start);
825 
826 struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
827  u64 virt_addr, int mr_access_flags,
828  struct ib_udata *udata);
829 
830 int ipath_dereg_mr(struct ib_mr *ibmr);
831 
832 struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
833  struct ib_fmr_attr *fmr_attr);
834 
835 int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
836  int list_len, u64 iova);
837 
838 int ipath_unmap_fmr(struct list_head *fmr_list);
839 
840 int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
841 
842 void ipath_release_mmap_info(struct kref *ref);
843 
845  u32 size,
846  struct ib_ucontext *context,
847  void *obj);
848 
850  struct ipath_mmap_info *ip,
851  u32 size, void *obj);
852 
853 int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
854 
855 void ipath_insert_rnr_queue(struct ipath_qp *qp);
856 
857 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
858  u32 *lengthp, struct ipath_sge_state *ss);
859 
860 int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
861 
862 u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
863  struct ib_global_route *grh, u32 hwords, u32 nwords);
864 
865 void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
866  struct ipath_other_headers *ohdr,
867  u32 bth0, u32 bth2);
868 
869 void ipath_do_send(unsigned long data);
870 
871 void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
872  enum ib_wc_status status);
873 
874 int ipath_make_rc_req(struct ipath_qp *qp);
875 
876 int ipath_make_uc_req(struct ipath_qp *qp);
877 
878 int ipath_make_ud_req(struct ipath_qp *qp);
879 
881 
883 
884 void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32);
885 
886 int ipath_ib_piobufavail(struct ipath_ibdev *);
887 
888 unsigned ipath_get_npkeys(struct ipath_devdata *);
889 
891 
892 unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);
893 
894 extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
895 
896 /*
897  * Below converts HCA-specific LinkTrainingState to IB PhysPortState
898  * values.
899  */
900 extern const u8 ipath_cvt_physportstate[];
901 #define IB_PHYSPORTSTATE_SLEEP 1
902 #define IB_PHYSPORTSTATE_POLL 2
903 #define IB_PHYSPORTSTATE_DISABLED 3
904 #define IB_PHYSPORTSTATE_CFG_TRAIN 4
905 #define IB_PHYSPORTSTATE_LINKUP 5
906 #define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
907 
908 extern const int ib_ipath_state_ops[];
909 
910 extern unsigned int ib_ipath_lkey_table_size;
911 
912 extern unsigned int ib_ipath_max_cqes;
913 
914 extern unsigned int ib_ipath_max_cqs;
915 
916 extern unsigned int ib_ipath_max_qp_wrs;
917 
918 extern unsigned int ib_ipath_max_qps;
919 
920 extern unsigned int ib_ipath_max_sges;
921 
922 extern unsigned int ib_ipath_max_mcast_grps;
923 
924 extern unsigned int ib_ipath_max_mcast_qp_attached;
925 
926 extern unsigned int ib_ipath_max_srqs;
927 
928 extern unsigned int ib_ipath_max_srq_sges;
929 
930 extern unsigned int ib_ipath_max_srq_wrs;
931 
932 extern const u32 ib_ipath_rnr_table[];
933 
935 
936 #endif /* IPATH_VERBS_H */