Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
qib_verbs.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Intel Corporation. All rights reserved.
3  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses. You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  * Redistribution and use in source and binary forms, with or
13  * without modification, are permitted provided that the following
14  * conditions are met:
15  *
16  * - Redistributions of source code must retain the above
17  * copyright notice, this list of conditions and the following
18  * disclaimer.
19  *
20  * - Redistributions in binary form must reproduce the above
21  * copyright notice, this list of conditions and the following
22  * disclaimer in the documentation and/or other materials
23  * provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #ifndef QIB_VERBS_H
36 #define QIB_VERBS_H
37 
38 #include <linux/types.h>
39 #include <linux/spinlock.h>
40 #include <linux/kernel.h>
41 #include <linux/interrupt.h>
42 #include <linux/kref.h>
43 #include <linux/workqueue.h>
44 #include <linux/completion.h>
45 #include <rdma/ib_pack.h>
46 #include <rdma/ib_user_verbs.h>
47 
48 struct qib_ctxtdata;
49 struct qib_pportdata;
50 struct qib_devdata;
51 struct qib_verbs_txreq;
52 
53 #define QIB_MAX_RDMA_ATOMIC 16
54 #define QIB_GUIDS_PER_PORT 5
55 
56 #define QPN_MAX (1 << 24)
57 #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
58 
59 /*
60  * Increment this value if any changes that break userspace ABI
61  * compatibility are made.
62  */
63 #define QIB_UVERBS_ABI_VERSION 2
64 
65 /*
66  * Define an ib_cq_notify value that is not valid so we know when CQ
67  * notifications are armed.
68  */
69 #define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
70 
71 #define IB_SEQ_NAK (3 << 29)
72 
73 /* AETH NAK opcode values */
74 #define IB_RNR_NAK 0x20
75 #define IB_NAK_PSN_ERROR 0x60
76 #define IB_NAK_INVALID_REQUEST 0x61
77 #define IB_NAK_REMOTE_ACCESS_ERROR 0x62
78 #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
79 #define IB_NAK_INVALID_RD_REQUEST 0x64
80 
81 /* Flags for checking QP state (see ib_qib_state_ops[]) */
82 #define QIB_POST_SEND_OK 0x01
83 #define QIB_POST_RECV_OK 0x02
84 #define QIB_PROCESS_RECV_OK 0x04
85 #define QIB_PROCESS_SEND_OK 0x08
86 #define QIB_PROCESS_NEXT_SEND_OK 0x10
87 #define QIB_FLUSH_SEND 0x20
88 #define QIB_FLUSH_RECV 0x40
89 #define QIB_PROCESS_OR_FLUSH_SEND \
90  (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
91 
92 /* IB Performance Manager status values */
93 #define IB_PMA_SAMPLE_STATUS_DONE 0x00
94 #define IB_PMA_SAMPLE_STATUS_STARTED 0x01
95 #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
96 
97 /* Mandatory IB performance counter select values. */
98 #define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
99 #define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
100 #define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
101 #define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
102 #define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
103 
104 #define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
105 
106 #define IB_BTH_REQ_ACK (1 << 31)
107 #define IB_BTH_SOLICITED (1 << 23)
108 #define IB_BTH_MIG_REQ (1 << 22)
109 
110 /* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
111 #define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
112 
113 #define IB_GRH_VERSION 6
114 #define IB_GRH_VERSION_MASK 0xF
115 #define IB_GRH_VERSION_SHIFT 28
116 #define IB_GRH_TCLASS_MASK 0xFF
117 #define IB_GRH_TCLASS_SHIFT 20
118 #define IB_GRH_FLOW_MASK 0xFFFFF
119 #define IB_GRH_FLOW_SHIFT 0
120 #define IB_GRH_NEXT_HDR 0x1B
121 
122 #define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
123 
124 /* Values for set/get portinfo VLCap OperationalVLs */
125 #define IB_VL_VL0 1
126 #define IB_VL_VL0_1 2
127 #define IB_VL_VL0_3 3
128 #define IB_VL_VL0_7 4
129 #define IB_VL_VL0_14 5
130 
131 static inline int qib_num_vls(int vls)
132 {
133  switch (vls) {
134  default:
135  case IB_VL_VL0:
136  return 1;
137  case IB_VL_VL0_1:
138  return 2;
139  case IB_VL_VL0_3:
140  return 4;
141  case IB_VL_VL0_7:
142  return 8;
143  case IB_VL_VL0_14:
144  return 15;
145  }
146 }
147 
148 struct ib_reth {
149  __be64 vaddr;
150  __be32 rkey;
151  __be32 length;
152 } __attribute__ ((packed));
155  __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
156  __be32 rkey;
159 } __attribute__ ((packed));
160 
163  union {
164  struct {
167  } ud;
168  struct {
169  struct ib_reth reth;
171  } rc;
172  struct {
175  } at;
177  __be32 aeth;
179  } u;
180 } __attribute__ ((packed));
182 /*
183  * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
184  * long (72 w/ imm_data). Only the first 56 bytes of the IB header
185  * will be in the eager header buffer. The remaining 12 or 16 bytes
186  * are in the data buffer.
187  */
190  union {
191  struct {
192  struct ib_grh grh;
194  } l;
195  struct qib_other_headers oth;
196  } u;
197 } __attribute__ ((packed));
202 } __attribute__ ((packed));
204 /*
205  * There is one struct qib_mcast for each multicast GID.
206  * All attached QPs are then stored as a list of
207  * struct qib_mcast_qp.
208  */
209 struct qib_mcast_qp {
210  struct list_head list;
211  struct qib_qp *qp;
212 };
213 
214 struct qib_mcast {
215  struct rb_node rb_node;
216  union ib_gid mgid;
221 };
222 
223 /* Protection domain */
224 struct qib_pd {
225  struct ib_pd ibpd;
226  int user; /* non-zero if created from user space */
227 };
228 
229 /* Address Handle */
230 struct qib_ah {
231  struct ib_ah ibah;
232  struct ib_ah_attr attr;
234 };
235 
236 /*
237  * This structure is used by qib_mmap() to validate an offset
238  * when an mmap() request is made. The vm_area_struct then uses
239  * this as its vm_private_data.
240  */
244  void *obj;
246  struct kref ref;
247  unsigned size;
248 };
249 
250 /*
251  * This structure is used to contain the head pointer, tail pointer,
252  * and completion queue entries as a single memory allocation so
253  * it can be mmap'ed into user space.
254  */
255 struct qib_cq_wc {
256  u32 head; /* index of next entry to fill */
257  u32 tail; /* index of next ib_poll_cq() entry */
258  union {
259  /* these are actually size ibcq.cqe + 1 */
260  struct ib_uverbs_wc uqueue[0];
261  struct ib_wc kqueue[0];
262  };
263 };
264 
265 /*
266  * The completion queue structure.
267  */
268 struct qib_cq {
269  struct ib_cq ibcq;
271  spinlock_t lock; /* protect changes in this struct */
274  struct qib_cq_wc *queue;
275  struct qib_mmap_info *ip;
276 };
277 
278 /*
279  * A segment is a linear region of low physical memory.
280  * XXX Maybe we should use phys addr here and kmap()/kunmap().
281  * Used by the verbs layer.
282  */
283 struct qib_seg {
284  void *vaddr;
285  size_t length;
286 };
287 
288 /* The number of qib_segs that fit in a page. */
289 #define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg))
290 
291 struct qib_segarray {
293 };
294 
295 struct qib_mregion {
296  struct ib_pd *pd; /* shares refcnt of ibmr.pd */
297  u64 user_base; /* User's address for this region */
298  u64 iova; /* IB start address of this region */
299  size_t length;
301  u32 offset; /* offset (bytes) to start of region */
303  u32 max_segs; /* number of qib_segs in all the arrays */
304  u32 mapsz; /* size of the map array */
305  u8 page_shift; /* 0 - non unform/non powerof2 sizes */
306  u8 lkey_published; /* in global table */
307  struct completion comp; /* complete when refcount goes to zero */
308  struct rcu_head list;
310  struct qib_segarray *map[0]; /* the segments */
311 };
312 
313 /*
314  * These keep track of the copy progress within a memory region.
315  * Used by the verbs layer.
316  */
317 struct qib_sge {
318  struct qib_mregion *mr;
319  void *vaddr; /* kernel virtual address of segment */
320  u32 sge_length; /* length of the SGE */
321  u32 length; /* remaining length of the segment */
322  u16 m; /* current index: mr->map[m] */
323  u16 n; /* current index: mr->map[m]->segs[n] */
324 };
325 
326 /* Memory region */
327 struct qib_mr {
328  struct ib_mr ibmr;
329  struct ib_umem *umem;
330  struct qib_mregion mr; /* must be last */
331 };
332 
333 /*
334  * Send work request queue entry.
335  * The size of the sg_list is determined when the QP is created and stored
336  * in qp->s_max_sge.
337  */
338 struct qib_swqe {
339  struct ib_send_wr wr; /* don't use wr.sg_list */
340  u32 psn; /* first packet sequence number */
341  u32 lpsn; /* last packet sequence number */
342  u32 ssn; /* send sequence number */
343  u32 length; /* total length of data in sg_list */
344  struct qib_sge sg_list[0];
345 };
346 
347 /*
348  * Receive work request queue entry.
349  * The size of the sg_list is determined when the QP (or SRQ) is created
350  * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
351  */
352 struct qib_rwqe {
355  struct ib_sge sg_list[0];
356 };
357 
358 /*
359  * This structure is used to contain the head pointer, tail pointer,
360  * and receive work queue entries as a single memory allocation so
361  * it can be mmap'ed into user space.
362  * Note that the wq array elements are variable size so you can't
363  * just index into the array to get the N'th element;
364  * use get_rwqe_ptr() instead.
365  */
366 struct qib_rwq {
367  u32 head; /* new work requests posted to the head */
368  u32 tail; /* receives pull requests from here. */
369  struct qib_rwqe wq[0];
370 };
371 
372 struct qib_rq {
373  struct qib_rwq *wq;
374  u32 size; /* size of RWQE array */
376  spinlock_t lock /* protect changes in this struct */
378 };
379 
380 struct qib_srq {
381  struct ib_srq ibsrq;
382  struct qib_rq rq;
383  struct qib_mmap_info *ip;
384  /* send signal when number of RWQEs < limit */
386 };
387 
389  struct qib_sge *sg_list; /* next SGE to be used if any */
390  struct qib_sge sge; /* progress state for the current SGE */
393 };
394 
395 /*
396  * This structure holds the information that the send tasklet needs
397  * to send a RDMA read response or atomic operation.
398  */
404  union {
407  };
408 };
409 
410 /*
411  * Variables prefixed with s_ are for the requester (sender).
412  * Variables prefixed with r_ are for the responder (receiver).
413  * Variables prefixed with ack_ are for responder replies.
414  *
415  * Common variables are protected by both r_rq.lock and s_lock in that order
416  * which only happens in modify_qp() or changing the QP 'state'.
417  */
418 struct qib_qp {
419  struct ib_qp ibqp;
420  /* read mostly fields above and below */
423  struct qib_qp __rcu *next; /* link list for QPN hash table */
424  struct qib_swqe *s_wq; /* send work queue */
425  struct qib_mmap_info *ip;
426  struct qib_ib_header *s_hdr; /* next packet header to send */
427  unsigned long timeout_jiffies; /* computed from timeout */
428 
431  u32 pmtu; /* decoded from path_mtu */
432  u32 qkey; /* QKEY for this QP (for UD or RD) */
433  u32 s_size; /* send work queue size */
434  u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
435 
436  u8 state; /* QP state */
438  u8 alt_timeout; /* Alternate path timeout for this QP */
439  u8 timeout; /* Timeout for this QP */
443  u8 s_pkey_index; /* PKEY index to use */
444  u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
445  u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
446  u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
447  u8 s_retry_cnt; /* number of times to retry */
449  u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
450  u8 s_max_sge; /* size of s_wq->sg_list */
452 
453  /* start of read/write fields */
454 
457 
458 
459  struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]
462 
463  spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
464  unsigned long r_aflags;
465  u64 r_wr_id; /* ID for current receive WQE */
466  u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
467  u32 r_len; /* total length of r_sge */
468  u32 r_rcv_len; /* receive data len processed */
469  u32 r_psn; /* expected rcv packet sequence number */
470  u32 r_msn; /* message sequence number */
471 
472  u8 r_state; /* opcode of last packet received */
474  u8 r_head_ack_queue; /* index into s_ack_queue[] */
475 
476  struct list_head rspwait; /* link for waititing to respond */
477 
478  struct qib_sge_state r_sge; /* current receive data */
479  struct qib_rq r_rq; /* receive work queue */
480 
485  struct qib_swqe *s_wqe;
486  struct qib_sge_state s_sge; /* current send request data */
489  u32 s_cur_size; /* size of send packet in bytes */
490  u32 s_len; /* total length of s_sge */
491  u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
492  u32 s_next_psn; /* PSN for next request */
493  u32 s_last_psn; /* last response PSN processed */
494  u32 s_sending_psn; /* lowest PSN that is being sent */
495  u32 s_sending_hpsn; /* highest PSN that is being sent */
496  u32 s_psn; /* current packet sequence number */
497  u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
498  u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
499  u32 s_head; /* new entries added here */
500  u32 s_tail; /* next entry to process */
501  u32 s_cur; /* current work queue entry */
502  u32 s_acked; /* last un-ACK'ed entry */
503  u32 s_last; /* last completed entry */
504  u32 s_ssn; /* SSN of tail entry */
505  u32 s_lsn; /* limit sequence number (credit) */
506  u16 s_hdrwords; /* size of s_hdr in 32 bit words */
508  u8 s_state; /* opcode of last packet sent */
509  u8 s_ack_state; /* opcode of packet to ACK */
510  u8 s_nak_state; /* non-zero if NAK is pending */
511  u8 r_nak_state; /* non-zero if NAK is pending */
512  u8 s_retry; /* requester retry counter */
513  u8 s_rnr_retry; /* requester RNR retry counter */
514  u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
515  u8 s_tail_ack_queue; /* index into s_ack_queue[] */
516 
519  struct list_head iowait; /* link for wait PIO buf */
520 
522 
524 
525  struct qib_sge r_sg_list[0] /* verified SGEs */
527 };
528 
529 /*
530  * Atomic bit definitions for r_aflags.
531  */
532 #define QIB_R_WRID_VALID 0
533 #define QIB_R_REWIND_SGE 1
534 
535 /*
536  * Bit definitions for r_flags.
537  */
538 #define QIB_R_REUSE_SGE 0x01
539 #define QIB_R_RDMAR_SEQ 0x02
540 #define QIB_R_RSP_NAK 0x04
541 #define QIB_R_RSP_SEND 0x08
542 #define QIB_R_COMM_EST 0x10
543 
544 /*
545  * Bit definitions for s_flags.
546  *
547  * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
548  * QIB_S_BUSY - send tasklet is processing the QP
549  * QIB_S_TIMER - the RC retry timer is active
550  * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
551  * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
552  * before processing the next SWQE
553  * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
554  * before processing the next SWQE
555  * QIB_S_WAIT_RNR - waiting for RNR timeout
556  * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
557  * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
558  * next send completion entry not via send DMA
559  * QIB_S_WAIT_PIO - waiting for a send buffer to be available
560  * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
561  * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
562  * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
563  * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
564  * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
565  * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
566  */
567 #define QIB_S_SIGNAL_REQ_WR 0x0001
568 #define QIB_S_BUSY 0x0002
569 #define QIB_S_TIMER 0x0004
570 #define QIB_S_RESP_PENDING 0x0008
571 #define QIB_S_ACK_PENDING 0x0010
572 #define QIB_S_WAIT_FENCE 0x0020
573 #define QIB_S_WAIT_RDMAR 0x0040
574 #define QIB_S_WAIT_RNR 0x0080
575 #define QIB_S_WAIT_SSN_CREDIT 0x0100
576 #define QIB_S_WAIT_DMA 0x0200
577 #define QIB_S_WAIT_PIO 0x0400
578 #define QIB_S_WAIT_TX 0x0800
579 #define QIB_S_WAIT_DMA_DESC 0x1000
580 #define QIB_S_WAIT_KMEM 0x2000
581 #define QIB_S_WAIT_PSN 0x4000
582 #define QIB_S_WAIT_ACK 0x8000
583 #define QIB_S_SEND_ONE 0x10000
584 #define QIB_S_UNLIMITED_CREDIT 0x20000
585 
586 /*
587  * Wait flags that would prevent any packet type from being sent.
588  */
589 #define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
590  QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
591 
592 /*
593  * Wait flags that would prevent send work requests from making progress.
594  */
595 #define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
596  QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
597  QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
598 
599 #define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
600 
601 #define QIB_PSN_CREDIT 16
602 
603 /*
604  * Since struct qib_swqe is not a fixed size, we can't simply index into
605  * struct qib_qp.s_wq. This function does the array index computation.
606  */
607 static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
608  unsigned n)
609 {
610  return (struct qib_swqe *)((char *)qp->s_wq +
611  (sizeof(struct qib_swqe) +
612  qp->s_max_sge *
613  sizeof(struct qib_sge)) * n);
614 }
615 
616 /*
617  * Since struct qib_rwqe is not a fixed size, we can't simply index into
618  * struct qib_rwq.wq. This function does the array index computation.
619  */
620 static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
621 {
622  return (struct qib_rwqe *)
623  ((char *) rq->wq->wq +
624  (sizeof(struct qib_rwqe) +
625  rq->max_sge * sizeof(struct ib_sge)) * n);
626 }
627 
628 /*
629  * QPN-map pages start out as NULL, they get allocated upon
630  * first use and are never deallocated. This way,
631  * large bitmaps are not allocated unless large numbers of QPs are used.
632  */
633 struct qpn_map {
634  void *page;
635 };
636 
638  spinlock_t lock; /* protect changes in this struct */
639  unsigned flags; /* flags for QP0/1 allocated for each port */
640  u32 last; /* last QP number allocated */
641  u32 nmaps; /* size of the map table */
644  /* bit map of free QP numbers other than 0/1 */
646 };
647 
649  spinlock_t lock; /* protect changes in this struct */
650  u32 next; /* next unused index (speeds search) */
651  u32 gen; /* generation count */
652  u32 max; /* size of the table */
654 };
655 
657  u64 n_packets; /* number of packets */
658  u64 n_bytes; /* total number of bytes */
659 };
660 
661 struct qib_ibport {
662  struct qib_qp __rcu *qp0;
663  struct qib_qp __rcu *qp1;
664  struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
665  struct qib_ah *sm_ah;
666  struct qib_ah *smi_ah;
668  spinlock_t lock; /* protect changes in this struct */
669 
670  /* non-zero when timer is set */
671  unsigned long mkey_lease_timeout;
672  unsigned long trap_timeout;
673  __be64 gid_prefix; /* in network order */
675  __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
676  u64 tid; /* TID for traps */
677  u64 n_unicast_xmit; /* total unicast packets sent */
678  u64 n_unicast_rcv; /* total unicast packets received */
679  u64 n_multicast_xmit; /* total multicast packets sent */
680  u64 n_multicast_rcv; /* total multicast packets received */
681  u64 z_symbol_error_counter; /* starting count for PMA */
682  u64 z_link_error_recovery_counter; /* starting count for PMA */
683  u64 z_link_downed_counter; /* starting count for PMA */
684  u64 z_port_rcv_errors; /* starting count for PMA */
685  u64 z_port_rcv_remphys_errors; /* starting count for PMA */
686  u64 z_port_xmit_discards; /* starting count for PMA */
687  u64 z_port_xmit_data; /* starting count for PMA */
688  u64 z_port_rcv_data; /* starting count for PMA */
689  u64 z_port_xmit_packets; /* starting count for PMA */
690  u64 z_port_rcv_packets; /* starting count for PMA */
691  u32 z_local_link_integrity_errors; /* starting count for PMA */
692  u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
693  u32 z_vl15_dropped; /* starting count for PMA */
726 
728 };
729 
730 
731 struct qib_ibdev {
732  struct ib_device ibdev;
734  spinlock_t mmap_offset_lock; /* protect mmap_offset */
737 
738  /* QP numbers are shared by all IB ports */
741  struct list_head piowait; /* list for wait PIO buf */
742  struct list_head dmawait; /* list for wait DMA */
743  struct list_head txwait; /* list for wait qib_verbs_txreq */
744  struct list_head memwait; /* list for wait kernel memory */
750  /* list of QPs waiting for RNR timer */
751  spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
752  u32 qp_table_size; /* size of the hash table */
753  u32 qp_rnd; /* random bytes for hash */
755 
758 
759  u32 n_pds_allocated; /* number of PDs allocated for device */
761  u32 n_ahs_allocated; /* number of AHs allocated for device */
763  u32 n_cqs_allocated; /* number of CQs allocated for device */
765  u32 n_qps_allocated; /* number of QPs allocated for device */
767  u32 n_srqs_allocated; /* number of SRQs allocated for device */
769  u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
771 };
772 
787 };
788 
789 static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
790 {
791  return container_of(ibmr, struct qib_mr, ibmr);
792 }
793 
794 static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
795 {
796  return container_of(ibpd, struct qib_pd, ibpd);
797 }
798 
799 static inline struct qib_ah *to_iah(struct ib_ah *ibah)
800 {
801  return container_of(ibah, struct qib_ah, ibah);
802 }
803 
804 static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
805 {
806  return container_of(ibcq, struct qib_cq, ibcq);
807 }
808 
809 static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
810 {
811  return container_of(ibsrq, struct qib_srq, ibsrq);
812 }
813 
814 static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
815 {
816  return container_of(ibqp, struct qib_qp, ibqp);
817 }
818 
819 static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
820 {
821  return container_of(ibdev, struct qib_ibdev, ibdev);
822 }
823 
824 /*
825  * Send if not busy or waiting for I/O and either
826  * a RC response is pending or we can process send work requests.
827  */
828 static inline int qib_send_ok(struct qib_qp *qp)
829 {
830  return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
831  (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
832  !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
833 }
834 
835 extern struct workqueue_struct *qib_cq_wq;
836 
837 /*
838  * This must be called with s_lock held.
839  */
840 void qib_schedule_send(struct qib_qp *qp);
841 
842 static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
843 {
844  u16 p1 = pkey1 & 0x7FFF;
845  u16 p2 = pkey2 & 0x7FFF;
846 
847  /*
848  * Low 15 bits must be non-zero and match, and
849  * one of the two must be a full member.
850  */
851  return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
852 }
853 
854 void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
855  u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
859 int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
860  struct ib_wc *in_wc, struct ib_grh *in_grh,
861  struct ib_mad *in_mad, struct ib_mad *out_mad);
863 void qib_free_agents(struct qib_ibdev *dev);
864 
865 /*
866  * Compare the lower 24 bits of the two values.
867  * Returns an integer <, ==, or > than zero.
868  */
869 static inline int qib_cmp24(u32 a, u32 b)
870 {
871  return (((int) a) - ((int) b)) << 8;
872 }
873 
874 struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
875 
876 int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
877  u64 *rwords, u64 *spkts, u64 *rpkts,
878  u64 *xmit_wait);
879 
880 int qib_get_counters(struct qib_pportdata *ppd,
881  struct qib_verbs_counters *cntrs);
882 
883 int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
884 
885 int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
886 
887 int qib_mcast_tree_empty(struct qib_ibport *ibp);
888 
889 __be32 qib_compute_aeth(struct qib_qp *qp);
890 
891 struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
892 
893 struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
894  struct ib_qp_init_attr *init_attr,
895  struct ib_udata *udata);
896 
897 int qib_destroy_qp(struct ib_qp *ibqp);
898 
899 int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
900 
901 int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
902  int attr_mask, struct ib_udata *udata);
903 
904 int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
905  int attr_mask, struct ib_qp_init_attr *init_attr);
906 
907 unsigned qib_free_all_qps(struct qib_devdata *dd);
908 
909 void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
910 
911 void qib_free_qpn_table(struct qib_qpn_table *qpt);
912 
913 void qib_get_credit(struct qib_qp *qp, u32 aeth);
914 
915 unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
916 
917 void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
918 
919 void qib_put_txreq(struct qib_verbs_txreq *tx);
920 
921 int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
922  u32 hdrwords, struct qib_sge_state *ss, u32 len);
923 
924 void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
925  int release);
926 
927 void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
928 
929 void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
930  int has_grh, void *data, u32 tlen, struct qib_qp *qp);
931 
932 void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
933  int has_grh, void *data, u32 tlen, struct qib_qp *qp);
934 
935 int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
936 
937 struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
938 
939 void qib_rc_rnr_retry(unsigned long arg);
940 
941 void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
942 
943 void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
944 
945 int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
946 
947 void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
948  int has_grh, void *data, u32 tlen, struct qib_qp *qp);
949 
950 int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
951 
952 void qib_free_lkey(struct qib_mregion *mr);
953 
954 int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
955  struct qib_sge *isge, struct ib_sge *sge, int acc);
956 
957 int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
958  u32 len, u64 vaddr, u32 rkey, int acc);
959 
960 int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
961  struct ib_recv_wr **bad_wr);
962 
963 struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
964  struct ib_srq_init_attr *srq_init_attr,
965  struct ib_udata *udata);
966 
967 int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
968  enum ib_srq_attr_mask attr_mask,
969  struct ib_udata *udata);
970 
971 int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
972 
973 int qib_destroy_srq(struct ib_srq *ibsrq);
974 
975 void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
976 
977 int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
978 
979 struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
980  int comp_vector, struct ib_ucontext *context,
981  struct ib_udata *udata);
982 
983 int qib_destroy_cq(struct ib_cq *ibcq);
984 
985 int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
986 
987 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
988 
989 struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
990 
991 struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
992  struct ib_phys_buf *buffer_list,
993  int num_phys_buf, int acc, u64 *iova_start);
994 
995 struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
996  u64 virt_addr, int mr_access_flags,
997  struct ib_udata *udata);
998 
999 int qib_dereg_mr(struct ib_mr *ibmr);
1000 
1001 struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
1002 
1004  struct ib_device *ibdev, int page_list_len);
1005 
1007 
1008 int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr);
1009 
1010 struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1011  struct ib_fmr_attr *fmr_attr);
1012 
1013 int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
1014  int list_len, u64 iova);
1015 
1016 int qib_unmap_fmr(struct list_head *fmr_list);
1017 
1018 int qib_dealloc_fmr(struct ib_fmr *ibfmr);
1019 
1020 static inline void qib_get_mr(struct qib_mregion *mr)
1021 {
1022  atomic_inc(&mr->refcount);
1023 }
1024 
1025 void mr_rcu_callback(struct rcu_head *list);
1026 
1027 static inline void qib_put_mr(struct qib_mregion *mr)
1028 {
1030  call_rcu(&mr->list, mr_rcu_callback);
1031 }
1032 
1033 static inline void qib_put_ss(struct qib_sge_state *ss)
1034 {
1035  while (ss->num_sge) {
1036  qib_put_mr(ss->sge.mr);
1037  if (--ss->num_sge)
1038  ss->sge = *ss->sg_list++;
1039  }
1040 }
1041 
1042 
1043 void qib_release_mmap_info(struct kref *ref);
1044 
1045 struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
1046  struct ib_ucontext *context,
1047  void *obj);
1048 
1049 void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
1050  u32 size, void *obj);
1051 
1052 int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
1053 
1054 int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
1055 
1056 void qib_migrate_qp(struct qib_qp *qp);
1057 
1058 int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
1059  int has_grh, struct qib_qp *qp, u32 bth0);
1060 
1061 u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
1062  struct ib_global_route *grh, u32 hwords, u32 nwords);
1063 
1064 void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
1065  u32 bth0, u32 bth2);
1066 
1067 void qib_do_send(struct work_struct *work);
1068 
1069 void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
1070  enum ib_wc_status status);
1071 
1072 void qib_send_rc_ack(struct qib_qp *qp);
1073 
1074 int qib_make_rc_req(struct qib_qp *qp);
1075 
1076 int qib_make_uc_req(struct qib_qp *qp);
1077 
1078 int qib_make_ud_req(struct qib_qp *qp);
1079 
1080 int qib_register_ib_device(struct qib_devdata *);
1081 
1082 void qib_unregister_ib_device(struct qib_devdata *);
1083 
1084 void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
1085 
1086 void qib_ib_piobufavail(struct qib_devdata *);
1087 
1088 unsigned qib_get_npkeys(struct qib_devdata *);
1089 
1090 unsigned qib_get_pkey(struct qib_ibport *, unsigned);
1091 
1092 extern const enum ib_wc_opcode ib_qib_wc_opcode[];
1093 
1094 /*
1095  * Below HCA-independent IB PhysPortState values, returned
1096  * by the f_ibphys_portstate() routine.
1097  */
1098 #define IB_PHYSPORTSTATE_SLEEP 1
1099 #define IB_PHYSPORTSTATE_POLL 2
1100 #define IB_PHYSPORTSTATE_DISABLED 3
1101 #define IB_PHYSPORTSTATE_CFG_TRAIN 4
1102 #define IB_PHYSPORTSTATE_LINKUP 5
1103 #define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
1104 #define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
1105 #define IB_PHYSPORTSTATE_CFG_IDLE 0xB
1106 #define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
1107 #define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
1108 #define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
1109 #define IB_PHYSPORTSTATE_CFG_ENH 0x10
1110 #define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
1111 
1112 extern const int ib_qib_state_ops[];
1113 
1114 extern __be64 ib_qib_sys_image_guid; /* in network order */
1115 
1116 extern unsigned int ib_qib_lkey_table_size;
1117 
1118 extern unsigned int ib_qib_max_cqes;
1119 
1120 extern unsigned int ib_qib_max_cqs;
1121 
1122 extern unsigned int ib_qib_max_qp_wrs;
1123 
1124 extern unsigned int ib_qib_max_qps;
1125 
1126 extern unsigned int ib_qib_max_sges;
1127 
1128 extern unsigned int ib_qib_max_mcast_grps;
1129 
1130 extern unsigned int ib_qib_max_mcast_qp_attached;
1131 
1132 extern unsigned int ib_qib_max_srqs;
1133 
1134 extern unsigned int ib_qib_max_srq_sges;
1135 
1136 extern unsigned int ib_qib_max_srq_wrs;
1137 
1138 extern const u32 ib_qib_rnr_table[];
1139 
1141 
1142 #endif /* QIB_VERBS_H */