Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cxio_wr.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses. You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  * Redistribution and use in source and binary forms, with or
11  * without modification, are permitted provided that the following
12  * conditions are met:
13  *
14  * - Redistributions of source code must retain the above
15  * copyright notice, this list of conditions and the following
16  * disclaimer.
17  *
18  * - Redistributions in binary form must reproduce the above
19  * copyright notice, this list of conditions and the following
20  * disclaimer in the documentation and/or other materials
21  * provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #ifndef __CXIO_WR_H__
33 #define __CXIO_WR_H__
34 
35 #include <asm/io.h>
36 #include <linux/pci.h>
37 #include <linux/timer.h>
38 #include "firmware_exports.h"
39 
40 #define T3_MAX_SGE 4
41 #define T3_MAX_INLINE 64
42 #define T3_STAG0_PBL_SIZE (2 * T3_MAX_SGE << 3)
43 #define T3_STAG0_MAX_PBE_LEN (128 * 1024 * 1024)
44 #define T3_STAG0_PAGE_SHIFT 15
45 
46 #define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
47 #define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
48  ((rptr)!=(wptr)) )
49 #define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
50 #define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
51 #define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
52 #define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
53 
54 static inline void ring_doorbell(void __iomem *doorbell, u32 qpid)
55 {
56  writel(((1<<31) | qpid), doorbell);
57 }
58 
59 #define SEQ32_GE(x,y) (!( (((u32) (x)) - ((u32) (y))) & 0x80000000 ))
60 
62  T3_COMPLETION_FLAG = 0x01,
63  T3_NOTIFY_FLAG = 0x02,
65  T3_READ_FENCE_FLAG = 0x08,
66  T3_LOCAL_FENCE_FLAG = 0x10
67 } __attribute__ ((packed));
68 
80 } __attribute__ ((packed));
81 
83  T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */
91  T3_RDMA_INIT, /* CHELSIO RI specific ... */
92  T3_BIND_MW,
95  T3_QP_MOD,
96  T3_BYPASS,
98 } __attribute__ ((packed));
99 
100 static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
101 {
102  switch (wrop) {
103  case T3_WR_BP: return T3_BYPASS;
104  case T3_WR_SEND: return T3_SEND;
105  case T3_WR_WRITE: return T3_RDMA_WRITE;
106  case T3_WR_READ: return T3_READ_REQ;
107  case T3_WR_INV_STAG: return T3_LOCAL_INV;
108  case T3_WR_BIND: return T3_BIND_MW;
109  case T3_WR_INIT: return T3_RDMA_INIT;
110  case T3_WR_QP_MOD: return T3_QP_MOD;
111  case T3_WR_FASTREG: return T3_FAST_REGISTER;
112  default: break;
113  }
114  return -1;
115 }
116 
117 
118 /* Work request id */
119 union t3_wrid {
120  struct {
123  } id0;
125 };
126 
127 #define WRID(wrid) (wrid.id1)
128 #define WRID_GEN(wrid) (wrid.id0.wr_gen)
129 #define WRID_IDX(wrid) (wrid.id0.wr_idx)
130 #define WRID_LO(wrid) (wrid.id0.wr_lo)
131 
132 struct fw_riwrh {
135 };
136 
137 #define S_FW_RIWR_OP 24
138 #define M_FW_RIWR_OP 0xff
139 #define V_FW_RIWR_OP(x) ((x) << S_FW_RIWR_OP)
140 #define G_FW_RIWR_OP(x) ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
141 
142 #define S_FW_RIWR_SOPEOP 22
143 #define M_FW_RIWR_SOPEOP 0x3
144 #define V_FW_RIWR_SOPEOP(x) ((x) << S_FW_RIWR_SOPEOP)
145 
146 #define S_FW_RIWR_FLAGS 8
147 #define M_FW_RIWR_FLAGS 0x3fffff
148 #define V_FW_RIWR_FLAGS(x) ((x) << S_FW_RIWR_FLAGS)
149 #define G_FW_RIWR_FLAGS(x) ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
150 
151 #define S_FW_RIWR_TID 8
152 #define V_FW_RIWR_TID(x) ((x) << S_FW_RIWR_TID)
153 
154 #define S_FW_RIWR_LEN 0
155 #define V_FW_RIWR_LEN(x) ((x) << S_FW_RIWR_LEN)
156 
157 #define S_FW_RIWR_GEN 31
158 #define V_FW_RIWR_GEN(x) ((x) << S_FW_RIWR_GEN)
159 
160 struct t3_sge {
164 };
165 
166 /* If num_sgle is zero, flit 5+ contains immediate data.*/
167 struct t3_send_wr {
168  struct fw_riwrh wrh; /* 0 */
169  union t3_wrid wrid; /* 1 */
170 
171  u8 rdmaop; /* 2 */
174  __be32 plen; /* 3 */
176  struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
177 };
178 
179 #define T3_MAX_FASTREG_DEPTH 10
180 #define T3_MAX_FASTREG_FRAG 10
181 
183  struct fw_riwrh wrh; /* 0 */
184  union t3_wrid wrid; /* 1 */
185  __be32 stag; /* 2 */
191  __be64 pbl_addrs[0]; /* 5+ */
192 };
193 
194 /*
195  * If a fastreg wr spans multiple wqes, then the 2nd fragment look like this.
196  */
197 struct t3_pbl_frag {
198  struct fw_riwrh wrh; /* 0 */
199  __be64 pbl_addrs[14]; /* 1..14 */
200 };
201 
202 #define S_FR_PAGE_COUNT 24
203 #define M_FR_PAGE_COUNT 0xff
204 #define V_FR_PAGE_COUNT(x) ((x) << S_FR_PAGE_COUNT)
205 #define G_FR_PAGE_COUNT(x) ((((x) >> S_FR_PAGE_COUNT)) & M_FR_PAGE_COUNT)
206 
207 #define S_FR_PAGE_SIZE 16
208 #define M_FR_PAGE_SIZE 0x1f
209 #define V_FR_PAGE_SIZE(x) ((x) << S_FR_PAGE_SIZE)
210 #define G_FR_PAGE_SIZE(x) ((((x) >> S_FR_PAGE_SIZE)) & M_FR_PAGE_SIZE)
211 
212 #define S_FR_TYPE 8
213 #define M_FR_TYPE 0x1
214 #define V_FR_TYPE(x) ((x) << S_FR_TYPE)
215 #define G_FR_TYPE(x) ((((x) >> S_FR_TYPE)) & M_FR_TYPE)
216 
217 #define S_FR_PERMS 0
218 #define M_FR_PERMS 0xff
219 #define V_FR_PERMS(x) ((x) << S_FR_PERMS)
220 #define G_FR_PERMS(x) ((((x) >> S_FR_PERMS)) & M_FR_PERMS)
221 
223  struct fw_riwrh wrh; /* 0 */
224  union t3_wrid wrid; /* 1 */
225  __be32 stag; /* 2 */
227 };
228 
230  struct fw_riwrh wrh; /* 0 */
231  union t3_wrid wrid; /* 1 */
232  u8 rdmaop; /* 2 */
235  __be64 to_sink; /* 3 */
236  __be32 plen; /* 4 */
238  struct t3_sge sgl[T3_MAX_SGE]; /* 5+ */
239 };
240 
242  struct fw_riwrh wrh; /* 0 */
243  union t3_wrid wrid; /* 1 */
244  u8 rdmaop; /* 2 */
248  __be64 rem_to; /* 3 */
251  __be64 local_to; /* 5 */
252 };
253 
255  struct fw_riwrh wrh; /* 0 */
256  union t3_wrid wrid; /* 1 */
257  u16 reserved; /* 2 */
261  __be32 mw_stag; /* 3 */
263  __be64 mw_va; /* 4 */
267 };
268 
270  struct fw_riwrh wrh; /* 0 */
271  union t3_wrid wrid; /* 1 */
273  __be32 num_sgle; /* 2 */
274  struct t3_sge sgl[T3_MAX_SGE]; /* 3+ */
276 };
277 
278 struct t3_bypass_wr {
279  struct fw_riwrh wrh;
280  union t3_wrid wrid; /* 1 */
281 };
282 
284  struct fw_riwrh wrh; /* 0 */
285  union t3_wrid wrid; /* 1 */
286  __be32 flags; /* 2 */
287  __be32 quiesce; /* 2 */
288  __be32 max_ird; /* 3 */
289  __be32 max_ord; /* 3 */
290  __be64 sge_cmd; /* 4 */
291  __be64 ctx1; /* 5 */
292  __be64 ctx0; /* 6 */
293 };
294 
301 };
302 
303 
307  uP_RI_MPA_CRC_ENABLE = 0x4,
309 } __attribute__ ((packed));
314  uP_RI_QP_BIND_ENABLE = 0x04,
316  uP_RI_QP_STAG0_ENABLE = 0x10
317 } __attribute__ ((packed));
320  RTR_READ = 1,
322  RTR_SEND = 3,
323 };
324 
325 #define S_RTR_TYPE 2
326 #define M_RTR_TYPE 0x3
327 #define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
328 #define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
329 
330 #define S_CHAN 4
331 #define M_CHAN 0x3
332 #define V_CHAN(x) ((x) << S_CHAN)
333 #define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)
334 
355 };
356 
358  struct fw_riwrh wrh; /* 0 */
359  union t3_wrid wrid; /* 1 */
360  __be32 qpid; /* 2 */
362  __be32 scqid; /* 3 */
364  __be32 rq_addr; /* 4 */
366  u8 mpaattrs; /* 5 */
371  __be32 ord; /* 6 */
376 };
377 
378 struct t3_genbit {
379  u64 flit[15];
381 };
382 
383 struct t3_wq_in_err {
384  u64 flit[13];
386 };
387 
389  MPA_INITIATOR = (1<<0),
390  PRIV_QP = (1<<1),
391 };
392 
393 union t3_wr {
394  struct t3_send_wr send;
408 };
409 
410 #define T3_SQ_CQE_FLIT 13
411 #define T3_SQ_COOKIE_FLIT 14
412 
413 #define T3_RQ_COOKIE_FLIT 13
414 #define T3_RQ_CQE_FLIT 14
415 
416 static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe)
417 {
418  return G_FW_RIWR_OP(be32_to_cpu(wqe->op_seop_flags));
419 }
420 
422  T3_EOP = 1,
423  T3_SOP = 2,
425 };
426 
427 static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
428  enum t3_wr_flags flags, u8 genbit, u32 tid,
429  u8 len, u8 sopeop)
430 {
432  V_FW_RIWR_SOPEOP(sopeop) |
433  V_FW_RIWR_FLAGS(flags));
434  wmb();
435  wqe->gen_tid_len = cpu_to_be32(V_FW_RIWR_GEN(genbit) |
436  V_FW_RIWR_TID(tid) |
437  V_FW_RIWR_LEN(len));
438  /* 2nd gen bit... */
439  ((union t3_wr *)wqe)->genbit.genbit = cpu_to_be64(genbit);
440 }
441 
442 /*
443  * T3 ULP2_TX commands
444  */
448 };
449 
450 /* T3 MC7 RDMA TPT entry format */
451 
455  TPT_MW = 0x2,
457 };
458 
460  TPT_ZBTO = 0,
462 };
463 
465  TPT_MW_BIND = 0x10,
470 };
471 
472 struct tpt_entry {
475 
480 
483 };
484 
485 #define S_TPT_VALID 31
486 #define V_TPT_VALID(x) ((x) << S_TPT_VALID)
487 #define F_TPT_VALID V_TPT_VALID(1U)
488 
489 #define S_TPT_STAG_KEY 23
490 #define M_TPT_STAG_KEY 0xFF
491 #define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)
492 #define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
493 
494 #define S_TPT_STAG_STATE 22
495 #define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)
496 #define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)
497 
498 #define S_TPT_STAG_TYPE 20
499 #define M_TPT_STAG_TYPE 0x3
500 #define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)
501 #define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
502 
503 #define S_TPT_PDID 0
504 #define M_TPT_PDID 0xFFFFF
505 #define V_TPT_PDID(x) ((x) << S_TPT_PDID)
506 #define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)
507 
508 #define S_TPT_PERM 28
509 #define M_TPT_PERM 0xF
510 #define V_TPT_PERM(x) ((x) << S_TPT_PERM)
511 #define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)
512 
513 #define S_TPT_REM_INV_DIS 27
514 #define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)
515 #define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)
516 
517 #define S_TPT_ADDR_TYPE 26
518 #define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)
519 #define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)
520 
521 #define S_TPT_MW_BIND_ENABLE 25
522 #define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)
523 #define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)
524 
525 #define S_TPT_PAGE_SIZE 20
526 #define M_TPT_PAGE_SIZE 0x1F
527 #define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)
528 #define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
529 
530 #define S_TPT_PBL_ADDR 0
531 #define M_TPT_PBL_ADDR 0x1FFFFFFF
532 #define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)
533 #define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
534 
535 #define S_TPT_QPID 0
536 #define M_TPT_QPID 0xFFFFF
537 #define V_TPT_QPID(x) ((x) << S_TPT_QPID)
538 #define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)
539 
540 #define S_TPT_PSTAG 0
541 #define M_TPT_PSTAG 0xFFFFFF
542 #define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)
543 #define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
544 
545 #define S_TPT_PBL_SIZE 0
546 #define M_TPT_PBL_SIZE 0xFFFFF
547 #define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)
548 #define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
549 
550 /*
551  * CQE defs
552  */
553 struct t3_cqe {
556  union {
557  struct {
560  } rcqe;
561  struct {
564  } scqe;
565  } u;
566 };
567 
568 #define S_CQE_OOO 31
569 #define M_CQE_OOO 0x1
570 #define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
571 #define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)
572 
573 #define S_CQE_QPID 12
574 #define M_CQE_QPID 0x7FFFF
575 #define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
576 #define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
577 
578 #define S_CQE_SWCQE 11
579 #define M_CQE_SWCQE 0x1
580 #define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
581 #define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
582 
583 #define S_CQE_GENBIT 10
584 #define M_CQE_GENBIT 0x1
585 #define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
586 #define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
587 
588 #define S_CQE_STATUS 5
589 #define M_CQE_STATUS 0x1F
590 #define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
591 #define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
592 
593 #define S_CQE_TYPE 4
594 #define M_CQE_TYPE 0x1
595 #define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
596 #define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
597 
598 #define S_CQE_OPCODE 0
599 #define M_CQE_OPCODE 0xF
600 #define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
601 #define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
602 
603 #define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x).header)))
604 #define CQE_OOO(x) (G_CQE_OOO(be32_to_cpu((x).header)))
605 #define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x).header)))
606 #define CQE_GENBIT(x) (G_CQE_GENBIT(be32_to_cpu((x).header)))
607 #define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x).header)))
608 #define SQ_TYPE(x) (CQE_TYPE((x)))
609 #define RQ_TYPE(x) (!CQE_TYPE((x)))
610 #define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header)))
611 #define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header)))
612 
613 #define CQE_SEND_OPCODE(x)( \
614  (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
615  (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
616  (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
617  (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
618 
619 #define CQE_LEN(x) (be32_to_cpu((x).len))
620 
621 /* used for RQ completion processing */
622 #define CQE_WRID_STAG(x) (be32_to_cpu((x).u.rcqe.stag))
623 #define CQE_WRID_MSN(x) (be32_to_cpu((x).u.rcqe.msn))
624 
625 /* used for SQ completion processing */
626 #define CQE_WRID_SQ_WPTR(x) ((x).u.scqe.wrid_hi)
627 #define CQE_WRID_WPTR(x) ((x).u.scqe.wrid_low)
628 
629 /* generic accessor macros */
630 #define CQE_WRID_HI(x) ((x).u.scqe.wrid_hi)
631 #define CQE_WRID_LOW(x) ((x).u.scqe.wrid_low)
632 
633 #define TPT_ERR_SUCCESS 0x0
634 #define TPT_ERR_STAG 0x1 /* STAG invalid: either the */
635  /* STAG is offlimt, being 0, */
636  /* or STAG_key mismatch */
637 #define TPT_ERR_PDID 0x2 /* PDID mismatch */
638 #define TPT_ERR_QPID 0x3 /* QPID mismatch */
639 #define TPT_ERR_ACCESS 0x4 /* Invalid access right */
640 #define TPT_ERR_WRAP 0x5 /* Wrap error */
641 #define TPT_ERR_BOUND 0x6 /* base and bounds voilation */
642 #define TPT_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
643  /* shared memory region */
644 #define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
645  /* shared memory region */
646 #define TPT_ERR_ECC 0x9 /* ECC error detected */
647 #define TPT_ERR_ECC_PSTAG 0xA /* ECC error detected when */
648  /* reading PSTAG for a MW */
649  /* Invalidate */
650 #define TPT_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
651  /* software error */
652 #define TPT_ERR_SWFLUSH 0xC /* SW FLUSHED */
653 #define TPT_ERR_CRC 0x10 /* CRC error */
654 #define TPT_ERR_MARKER 0x11 /* Marker error */
655 #define TPT_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
656 #define TPT_ERR_OUT_OF_RQE 0x13 /* out of RQE */
657 #define TPT_ERR_DDP_VERSION 0x14 /* wrong DDP version */
658 #define TPT_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
659 #define TPT_ERR_OPCODE 0x16 /* invalid rdma opcode */
660 #define TPT_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
661 #define TPT_ERR_MSN 0x18 /* MSN error */
662 #define TPT_ERR_TBIT 0x19 /* tag bit not set correctly */
663 #define TPT_ERR_MO 0x1A /* MO not 0 for TERMINATE */
664  /* or READ_REQ */
665 #define TPT_ERR_MSN_GAP 0x1B
666 #define TPT_ERR_MSN_RANGE 0x1C
667 #define TPT_ERR_IRD_OVERFLOW 0x1D
668 #define TPT_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
669  /* software error */
670 #define TPT_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
671  /* mismatch) */
672 
673 struct t3_swsq {
675  struct t3_cqe cqe;
678  int opcode;
679  int complete;
680  int signaled;
681 };
682 
683 struct t3_swrq {
686 };
687 
688 /*
689  * A T3 WQ implements both the SQ and RQ.
690  */
691 struct t3_wq {
692  union t3_wr *queue; /* DMA accessible memory */
693  dma_addr_t dma_addr; /* DMA address for HW */
694  DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
695  u32 error; /* 1 once we go to ERROR */
697  u32 wptr; /* idx to next available WR slot */
698  u32 size_log2; /* total wq size */
699  struct t3_swsq *sq; /* SW SQ */
700  struct t3_swsq *oldest_read; /* tracks oldest pending read */
701  u32 sq_wptr; /* sq_wptr - sq_rptr == count of */
702  u32 sq_rptr; /* pending wrs */
703  u32 sq_size_log2; /* sq size */
704  struct t3_swrq *rq; /* SW RQ (holds consumer wr_ids */
705  u32 rq_wptr; /* rq_wptr - rq_rptr == count of */
706  u32 rq_rptr; /* pending wrs */
707  struct t3_swrq *rq_oldest_wr; /* oldest wr on the SW RQ */
708  u32 rq_size_log2; /* rq size */
709  u32 rq_addr; /* rq adapter address */
710  void __iomem *doorbell; /* kernel db */
711  u64 udb; /* user db if any */
712  struct cxio_rdev *rdev;
713 };
714 
715 struct t3_cq {
722  struct t3_cqe *queue;
723  struct t3_cqe *sw_queue;
726 };
727 
728 #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
729  CQE_GENBIT(*cqe))
730 
733 };
734 
735 static inline int cxio_cq_in_error(struct t3_cq *cq)
736 {
737  return ((struct t3_cq_status_page *)
738  &cq->queue[1 << cq->size_log2])->cq_err;
739 }
740 
741 static inline void cxio_set_cq_in_error(struct t3_cq *cq)
742 {
743  ((struct t3_cq_status_page *)
744  &cq->queue[1 << cq->size_log2])->cq_err = 1;
745 }
746 
747 static inline void cxio_set_wq_in_error(struct t3_wq *wq)
748 {
749  wq->queue->wq_in_err.err |= 1;
750 }
751 
752 static inline void cxio_disable_wq_db(struct t3_wq *wq)
753 {
754  wq->queue->wq_in_err.err |= 2;
755 }
756 
757 static inline void cxio_enable_wq_db(struct t3_wq *wq)
758 {
759  wq->queue->wq_in_err.err &= ~2;
760 }
761 
762 static inline int cxio_wq_db_enabled(struct t3_wq *wq)
763 {
764  return !(wq->queue->wq_in_err.err & 2);
765 }
766 
767 static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
768 {
769  struct t3_cqe *cqe;
770 
771  cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
772  if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
773  return cqe;
774  return NULL;
775 }
776 
777 static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq)
778 {
779  struct t3_cqe *cqe;
780 
781  if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
782  cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
783  return cqe;
784  }
785  return NULL;
786 }
787 
788 static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq)
789 {
790  struct t3_cqe *cqe;
791 
792  if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
793  cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
794  return cqe;
795  }
796  cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
797  if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
798  return cqe;
799  return NULL;
800 }
801 
802 #endif