Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
drivers
infiniband
hw
cxgb3
cxio_wr.h
Go to the documentation of this file.
1
/*
2
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3
*
4
* This software is available to you under a choice of one of two
5
* licenses. You may choose to be licensed under the terms of the GNU
6
* General Public License (GPL) Version 2, available from the file
7
* COPYING in the main directory of this source tree, or the
8
* OpenIB.org BSD license below:
9
*
10
* Redistribution and use in source and binary forms, with or
11
* without modification, are permitted provided that the following
12
* conditions are met:
13
*
14
* - Redistributions of source code must retain the above
15
* copyright notice, this list of conditions and the following
16
* disclaimer.
17
*
18
* - Redistributions in binary form must reproduce the above
19
* copyright notice, this list of conditions and the following
20
* disclaimer in the documentation and/or other materials
21
* provided with the distribution.
22
*
23
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30
* SOFTWARE.
31
*/
32
#ifndef __CXIO_WR_H__
33
#define __CXIO_WR_H__
34
35
#include <asm/io.h>
36
#include <linux/pci.h>
37
#include <
linux/timer.h
>
38
#include "
firmware_exports.h
"
39
40
#define T3_MAX_SGE 4
41
#define T3_MAX_INLINE 64
42
#define T3_STAG0_PBL_SIZE (2 * T3_MAX_SGE << 3)
43
#define T3_STAG0_MAX_PBE_LEN (128 * 1024 * 1024)
44
#define T3_STAG0_PAGE_SHIFT 15
45
46
#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
47
#define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
48
((rptr)!=(wptr)) )
49
#define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
50
#define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
51
#define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
52
#define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
53
54
static
inline
void
ring_doorbell(
void
__iomem
*
doorbell
,
u32
qpid)
55
{
56
writel
(((1<<31) | qpid), doorbell);
57
}
58
59
#define SEQ32_GE(x,y) (!( (((u32) (x)) - ((u32) (y))) & 0x80000000 ))
60
61
enum
t3_wr_flags
{
62
T3_COMPLETION_FLAG
= 0x01,
63
T3_NOTIFY_FLAG
= 0x02,
64
T3_SOLICITED_EVENT_FLAG
= 0x04,
65
T3_READ_FENCE_FLAG
= 0x08,
66
T3_LOCAL_FENCE_FLAG
= 0x10
67
}
__attribute__
((packed));
68
69
enum
t3_wr_opcode
{
70
T3_WR_BP
=
FW_WROPCODE_RI_BYPASS
,
71
T3_WR_SEND
=
FW_WROPCODE_RI_SEND
,
72
T3_WR_WRITE
=
FW_WROPCODE_RI_RDMA_WRITE
,
73
T3_WR_READ
=
FW_WROPCODE_RI_RDMA_READ
,
74
T3_WR_INV_STAG
=
FW_WROPCODE_RI_LOCAL_INV
,
75
T3_WR_BIND
=
FW_WROPCODE_RI_BIND_MW
,
76
T3_WR_RCV
=
FW_WROPCODE_RI_RECEIVE
,
77
T3_WR_INIT
=
FW_WROPCODE_RI_RDMA_INIT
,
78
T3_WR_QP_MOD
=
FW_WROPCODE_RI_MODIFY_QP
,
79
T3_WR_FASTREG
=
FW_WROPCODE_RI_FASTREGISTER_MR
80
}
__attribute__
((packed));
81
82
enum
t3_rdma_opcode
{
83
T3_RDMA_WRITE
,
/* IETF RDMAP v1.0 ... */
84
T3_READ_REQ
,
85
T3_READ_RESP
,
86
T3_SEND
,
87
T3_SEND_WITH_INV
,
88
T3_SEND_WITH_SE
,
89
T3_SEND_WITH_SE_INV
,
90
T3_TERMINATE
,
91
T3_RDMA_INIT
,
/* CHELSIO RI specific ... */
92
T3_BIND_MW
,
93
T3_FAST_REGISTER
,
94
T3_LOCAL_INV
,
95
T3_QP_MOD
,
96
T3_BYPASS
,
97
T3_RDMA_READ_REQ_WITH_INV
,
98
}
__attribute__
((packed));
99
100
static
inline
enum
t3_rdma_opcode
wr2opcode(
enum
t3_wr_opcode
wrop)
101
{
102
switch
(wrop) {
103
case
T3_WR_BP
:
return
T3_BYPASS
;
104
case
T3_WR_SEND
:
return
T3_SEND
;
105
case
T3_WR_WRITE
:
return
T3_RDMA_WRITE
;
106
case
T3_WR_READ
:
return
T3_READ_REQ
;
107
case
T3_WR_INV_STAG
:
return
T3_LOCAL_INV
;
108
case
T3_WR_BIND
:
return
T3_BIND_MW
;
109
case
T3_WR_INIT
:
return
T3_RDMA_INIT
;
110
case
T3_WR_QP_MOD
:
return
T3_QP_MOD
;
111
case
T3_WR_FASTREG
:
return
T3_FAST_REGISTER
;
112
default
:
break
;
113
}
114
return
-1;
115
}
116
117
118
/* Work request id */
119
union
t3_wrid
{
120
struct
{
121
u32
hi
;
122
u32
low
;
123
}
id0
;
124
u64
id1
;
125
};
126
127
#define WRID(wrid) (wrid.id1)
128
#define WRID_GEN(wrid) (wrid.id0.wr_gen)
129
#define WRID_IDX(wrid) (wrid.id0.wr_idx)
130
#define WRID_LO(wrid) (wrid.id0.wr_lo)
131
132
struct
fw_riwrh
{
133
__be32
op_seop_flags
;
134
__be32
gen_tid_len
;
135
};
136
137
#define S_FW_RIWR_OP 24
138
#define M_FW_RIWR_OP 0xff
139
#define V_FW_RIWR_OP(x) ((x) << S_FW_RIWR_OP)
140
#define G_FW_RIWR_OP(x) ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
141
142
#define S_FW_RIWR_SOPEOP 22
143
#define M_FW_RIWR_SOPEOP 0x3
144
#define V_FW_RIWR_SOPEOP(x) ((x) << S_FW_RIWR_SOPEOP)
145
146
#define S_FW_RIWR_FLAGS 8
147
#define M_FW_RIWR_FLAGS 0x3fffff
148
#define V_FW_RIWR_FLAGS(x) ((x) << S_FW_RIWR_FLAGS)
149
#define G_FW_RIWR_FLAGS(x) ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
150
151
#define S_FW_RIWR_TID 8
152
#define V_FW_RIWR_TID(x) ((x) << S_FW_RIWR_TID)
153
154
#define S_FW_RIWR_LEN 0
155
#define V_FW_RIWR_LEN(x) ((x) << S_FW_RIWR_LEN)
156
157
#define S_FW_RIWR_GEN 31
158
#define V_FW_RIWR_GEN(x) ((x) << S_FW_RIWR_GEN)
159
160
struct
t3_sge
{
161
__be32
stag
;
162
__be32
len
;
163
__be64
to
;
164
};
165
166
/* If num_sgle is zero, flit 5+ contains immediate data.*/
167
struct
t3_send_wr
{
168
struct
fw_riwrh
wrh
;
/* 0 */
169
union
t3_wrid
wrid
;
/* 1 */
170
171
u8
rdmaop
;
/* 2 */
172
u8
reserved
[3];
173
__be32
rem_stag
;
174
__be32
plen
;
/* 3 */
175
__be32
num_sgle
;
176
struct
t3_sge
sgl
[
T3_MAX_SGE
];
/* 4+ */
177
};
178
179
#define T3_MAX_FASTREG_DEPTH 10
180
#define T3_MAX_FASTREG_FRAG 10
181
182
struct
t3_fastreg_wr
{
183
struct
fw_riwrh
wrh
;
/* 0 */
184
union
t3_wrid
wrid
;
/* 1 */
185
__be32
stag
;
/* 2 */
186
__be32
len
;
187
__be32
va_base_hi
;
/* 3 */
188
__be32
va_base_lo_fbo
;
189
__be32
page_type_perms
;
/* 4 */
190
__be32
reserved1
;
191
__be64
pbl_addrs
[0];
/* 5+ */
192
};
193
194
/*
195
* If a fastreg wr spans multiple wqes, then the 2nd fragment look like this.
196
*/
197
struct
t3_pbl_frag
{
198
struct
fw_riwrh
wrh
;
/* 0 */
199
__be64
pbl_addrs
[14];
/* 1..14 */
200
};
201
202
#define S_FR_PAGE_COUNT 24
203
#define M_FR_PAGE_COUNT 0xff
204
#define V_FR_PAGE_COUNT(x) ((x) << S_FR_PAGE_COUNT)
205
#define G_FR_PAGE_COUNT(x) ((((x) >> S_FR_PAGE_COUNT)) & M_FR_PAGE_COUNT)
206
207
#define S_FR_PAGE_SIZE 16
208
#define M_FR_PAGE_SIZE 0x1f
209
#define V_FR_PAGE_SIZE(x) ((x) << S_FR_PAGE_SIZE)
210
#define G_FR_PAGE_SIZE(x) ((((x) >> S_FR_PAGE_SIZE)) & M_FR_PAGE_SIZE)
211
212
#define S_FR_TYPE 8
213
#define M_FR_TYPE 0x1
214
#define V_FR_TYPE(x) ((x) << S_FR_TYPE)
215
#define G_FR_TYPE(x) ((((x) >> S_FR_TYPE)) & M_FR_TYPE)
216
217
#define S_FR_PERMS 0
218
#define M_FR_PERMS 0xff
219
#define V_FR_PERMS(x) ((x) << S_FR_PERMS)
220
#define G_FR_PERMS(x) ((((x) >> S_FR_PERMS)) & M_FR_PERMS)
221
222
struct
t3_local_inv_wr
{
223
struct
fw_riwrh
wrh
;
/* 0 */
224
union
t3_wrid
wrid
;
/* 1 */
225
__be32
stag
;
/* 2 */
226
__be32
reserved
;
227
};
228
229
struct
t3_rdma_write_wr
{
230
struct
fw_riwrh
wrh
;
/* 0 */
231
union
t3_wrid
wrid
;
/* 1 */
232
u8
rdmaop
;
/* 2 */
233
u8
reserved
[3];
234
__be32
stag_sink
;
235
__be64
to_sink
;
/* 3 */
236
__be32
plen
;
/* 4 */
237
__be32
num_sgle
;
238
struct
t3_sge
sgl
[
T3_MAX_SGE
];
/* 5+ */
239
};
240
241
struct
t3_rdma_read_wr
{
242
struct
fw_riwrh
wrh
;
/* 0 */
243
union
t3_wrid
wrid
;
/* 1 */
244
u8
rdmaop
;
/* 2 */
245
u8
local_inv
;
246
u8
reserved
[2];
247
__be32
rem_stag
;
248
__be64
rem_to
;
/* 3 */
249
__be32
local_stag
;
/* 4 */
250
__be32
local_len
;
251
__be64
local_to
;
/* 5 */
252
};
253
254
struct
t3_bind_mw_wr
{
255
struct
fw_riwrh
wrh
;
/* 0 */
256
union
t3_wrid
wrid
;
/* 1 */
257
u16
reserved
;
/* 2 */
258
u8
type
;
259
u8
perms
;
260
__be32
mr_stag
;
261
__be32
mw_stag
;
/* 3 */
262
__be32
mw_len
;
263
__be64
mw_va
;
/* 4 */
264
__be32
mr_pbl_addr
;
/* 5 */
265
u8
reserved2
[3];
266
u8
mr_pagesz
;
267
};
268
269
struct
t3_receive_wr
{
270
struct
fw_riwrh
wrh
;
/* 0 */
271
union
t3_wrid
wrid
;
/* 1 */
272
u8
pagesz
[
T3_MAX_SGE
];
273
__be32
num_sgle
;
/* 2 */
274
struct
t3_sge
sgl
[
T3_MAX_SGE
];
/* 3+ */
275
__be32
pbl_addr
[
T3_MAX_SGE
];
276
};
277
278
struct
t3_bypass_wr
{
279
struct
fw_riwrh
wrh
;
280
union
t3_wrid
wrid
;
/* 1 */
281
};
282
283
struct
t3_modify_qp_wr
{
284
struct
fw_riwrh
wrh
;
/* 0 */
285
union
t3_wrid
wrid
;
/* 1 */
286
__be32
flags
;
/* 2 */
287
__be32
quiesce
;
/* 2 */
288
__be32
max_ird
;
/* 3 */
289
__be32
max_ord
;
/* 3 */
290
__be64
sge_cmd
;
/* 4 */
291
__be64
ctx1
;
/* 5 */
292
__be64
ctx0
;
/* 6 */
293
};
294
295
enum
t3_modify_qp_flags
{
296
MODQP_QUIESCE
= 0x01,
297
MODQP_MAX_IRD
= 0x02,
298
MODQP_MAX_ORD
= 0x04,
299
MODQP_WRITE_EC
= 0x08,
300
MODQP_READ_EC
= 0x10,
301
};
302
303
304
enum
t3_mpa_attrs
{
305
uP_RI_MPA_RX_MARKER_ENABLE
= 0x1,
306
uP_RI_MPA_TX_MARKER_ENABLE
= 0x2,
307
uP_RI_MPA_CRC_ENABLE
= 0x4,
308
uP_RI_MPA_IETF_ENABLE
= 0x8
309
}
__attribute__
((packed));
310
311
enum
t3_qp_caps
{
312
uP_RI_QP_RDMA_READ_ENABLE
= 0x01,
313
uP_RI_QP_RDMA_WRITE_ENABLE
= 0x02,
314
uP_RI_QP_BIND_ENABLE
= 0x04,
315
uP_RI_QP_FAST_REGISTER_ENABLE
= 0x08,
316
uP_RI_QP_STAG0_ENABLE
= 0x10
317
}
__attribute__
((packed));
318
319
enum
rdma_init_rtr_types
{
320
RTR_READ
= 1,
321
RTR_WRITE
= 2,
322
RTR_SEND
= 3,
323
};
324
325
#define S_RTR_TYPE 2
326
#define M_RTR_TYPE 0x3
327
#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
328
#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
329
330
#define S_CHAN 4
331
#define M_CHAN 0x3
332
#define V_CHAN(x) ((x) << S_CHAN)
333
#define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)
334
335
struct
t3_rdma_init_attr
{
336
u32
tid
;
337
u32
qpid
;
338
u32
pdid
;
339
u32
scqid
;
340
u32
rcqid
;
341
u32
rq_addr
;
342
u32
rq_size
;
343
enum
t3_mpa_attrs
mpaattrs
;
344
enum
t3_qp_caps
qpcaps
;
345
u16
tcp_emss
;
346
u32
ord
;
347
u32
ird
;
348
u64
qp_dma_addr
;
349
u32
qp_dma_size
;
350
enum
rdma_init_rtr_types
rtr_type
;
351
u16
flags
;
352
u16
rqe_count
;
353
u32
irs
;
354
u32
chan
;
355
};
356
357
struct
t3_rdma_init_wr
{
358
struct
fw_riwrh
wrh
;
/* 0 */
359
union
t3_wrid
wrid
;
/* 1 */
360
__be32
qpid
;
/* 2 */
361
__be32
pdid
;
362
__be32
scqid
;
/* 3 */
363
__be32
rcqid
;
364
__be32
rq_addr
;
/* 4 */
365
__be32
rq_size
;
366
u8
mpaattrs
;
/* 5 */
367
u8
qpcaps
;
368
__be16
ulpdu_size
;
369
__be16
flags_rtr_type
;
370
__be16
rqe_count
;
371
__be32
ord
;
/* 6 */
372
__be32
ird
;
373
__be64
qp_dma_addr
;
/* 7 */
374
__be32
qp_dma_size
;
/* 8 */
375
__be32
irs
;
376
};
377
378
struct
t3_genbit
{
379
u64
flit
[15];
380
__be64
genbit
;
381
};
382
383
struct
t3_wq_in_err
{
384
u64
flit
[13];
385
u64
err
;
386
};
387
388
enum
rdma_init_wr_flags
{
389
MPA_INITIATOR
= (1<<0),
390
PRIV_QP
= (1<<1),
391
};
392
393
union
t3_wr
{
394
struct
t3_send_wr
send
;
395
struct
t3_rdma_write_wr
write
;
396
struct
t3_rdma_read_wr
read
;
397
struct
t3_receive_wr
recv
;
398
struct
t3_fastreg_wr
fastreg
;
399
struct
t3_pbl_frag
pbl_frag
;
400
struct
t3_local_inv_wr
local_inv
;
401
struct
t3_bind_mw_wr
bind
;
402
struct
t3_bypass_wr
bypass
;
403
struct
t3_rdma_init_wr
init
;
404
struct
t3_modify_qp_wr
qp_mod
;
405
struct
t3_genbit
genbit
;
406
struct
t3_wq_in_err
wq_in_err
;
407
__be64
flit
[16];
408
};
409
410
#define T3_SQ_CQE_FLIT 13
411
#define T3_SQ_COOKIE_FLIT 14
412
413
#define T3_RQ_COOKIE_FLIT 13
414
#define T3_RQ_CQE_FLIT 14
415
416
static
inline
enum
t3_wr_opcode
fw_riwrh_opcode(
struct
fw_riwrh
*wqe)
417
{
418
return
G_FW_RIWR_OP
(
be32_to_cpu
(wqe->
op_seop_flags
));
419
}
420
421
enum
t3_wr_hdr_bits
{
422
T3_EOP
= 1,
423
T3_SOP
= 2,
424
T3_SOPEOP
=
T3_EOP
|
T3_SOP
,
425
};
426
427
static
inline
void
build_fw_riwrh(
struct
fw_riwrh
*wqe,
enum
t3_wr_opcode
op
,
428
enum
t3_wr_flags
flags
,
u8
genbit,
u32
tid,
429
u8
len,
u8
sopeop)
430
{
431
wqe->
op_seop_flags
=
cpu_to_be32
(
V_FW_RIWR_OP
(op) |
432
V_FW_RIWR_SOPEOP
(sopeop) |
433
V_FW_RIWR_FLAGS
(flags));
434
wmb
();
435
wqe->
gen_tid_len
=
cpu_to_be32
(
V_FW_RIWR_GEN
(genbit) |
436
V_FW_RIWR_TID
(tid) |
437
V_FW_RIWR_LEN
(len));
438
/* 2nd gen bit... */
439
((
union
t3_wr
*)wqe)->genbit.genbit =
cpu_to_be64
(genbit);
440
}
441
442
/*
443
* T3 ULP2_TX commands
444
*/
445
enum
t3_utx_mem_op
{
446
T3_UTX_MEM_READ
= 2,
447
T3_UTX_MEM_WRITE
= 3
448
};
449
450
/* T3 MC7 RDMA TPT entry format */
451
452
enum
tpt_mem_type
{
453
TPT_NON_SHARED_MR
= 0x0,
454
TPT_SHARED_MR
= 0x1,
455
TPT_MW
= 0x2,
456
TPT_MW_RELAXED_PROTECTION
= 0x3
457
};
458
459
enum
tpt_addr_type
{
460
TPT_ZBTO
= 0,
461
TPT_VATO
= 1
462
};
463
464
enum
tpt_mem_perm
{
465
TPT_MW_BIND
= 0x10,
466
TPT_LOCAL_READ
= 0x8,
467
TPT_LOCAL_WRITE
= 0x4,
468
TPT_REMOTE_READ
= 0x2,
469
TPT_REMOTE_WRITE
= 0x1
470
};
471
472
struct
tpt_entry
{
473
__be32
valid_stag_pdid
;
474
__be32
flags_pagesize_qpid
;
475
476
__be32
rsvd_pbl_addr
;
477
__be32
len
;
478
__be32
va_hi
;
479
__be32
va_low_or_fbo
;
480
481
__be32
rsvd_bind_cnt_or_pstag
;
482
__be32
rsvd_pbl_size
;
483
};
484
485
#define S_TPT_VALID 31
486
#define V_TPT_VALID(x) ((x) << S_TPT_VALID)
487
#define F_TPT_VALID V_TPT_VALID(1U)
488
489
#define S_TPT_STAG_KEY 23
490
#define M_TPT_STAG_KEY 0xFF
491
#define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)
492
#define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
493
494
#define S_TPT_STAG_STATE 22
495
#define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)
496
#define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)
497
498
#define S_TPT_STAG_TYPE 20
499
#define M_TPT_STAG_TYPE 0x3
500
#define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)
501
#define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
502
503
#define S_TPT_PDID 0
504
#define M_TPT_PDID 0xFFFFF
505
#define V_TPT_PDID(x) ((x) << S_TPT_PDID)
506
#define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)
507
508
#define S_TPT_PERM 28
509
#define M_TPT_PERM 0xF
510
#define V_TPT_PERM(x) ((x) << S_TPT_PERM)
511
#define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)
512
513
#define S_TPT_REM_INV_DIS 27
514
#define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)
515
#define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)
516
517
#define S_TPT_ADDR_TYPE 26
518
#define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)
519
#define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)
520
521
#define S_TPT_MW_BIND_ENABLE 25
522
#define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)
523
#define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)
524
525
#define S_TPT_PAGE_SIZE 20
526
#define M_TPT_PAGE_SIZE 0x1F
527
#define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)
528
#define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
529
530
#define S_TPT_PBL_ADDR 0
531
#define M_TPT_PBL_ADDR 0x1FFFFFFF
532
#define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)
533
#define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
534
535
#define S_TPT_QPID 0
536
#define M_TPT_QPID 0xFFFFF
537
#define V_TPT_QPID(x) ((x) << S_TPT_QPID)
538
#define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)
539
540
#define S_TPT_PSTAG 0
541
#define M_TPT_PSTAG 0xFFFFFF
542
#define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)
543
#define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
544
545
#define S_TPT_PBL_SIZE 0
546
#define M_TPT_PBL_SIZE 0xFFFFF
547
#define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)
548
#define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
549
550
/*
551
* CQE defs
552
*/
553
struct
t3_cqe
{
554
__be32
header
;
555
__be32
len
;
556
union
{
557
struct
{
558
__be32
stag
;
559
__be32
msn
;
560
}
rcqe
;
561
struct
{
562
u32
wrid_hi
;
563
u32
wrid_low
;
564
}
scqe
;
565
}
u
;
566
};
567
568
#define S_CQE_OOO 31
569
#define M_CQE_OOO 0x1
570
#define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
571
#define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)
572
573
#define S_CQE_QPID 12
574
#define M_CQE_QPID 0x7FFFF
575
#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
576
#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
577
578
#define S_CQE_SWCQE 11
579
#define M_CQE_SWCQE 0x1
580
#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
581
#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
582
583
#define S_CQE_GENBIT 10
584
#define M_CQE_GENBIT 0x1
585
#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
586
#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
587
588
#define S_CQE_STATUS 5
589
#define M_CQE_STATUS 0x1F
590
#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
591
#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
592
593
#define S_CQE_TYPE 4
594
#define M_CQE_TYPE 0x1
595
#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
596
#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
597
598
#define S_CQE_OPCODE 0
599
#define M_CQE_OPCODE 0xF
600
#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
601
#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
602
603
#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x).header)))
604
#define CQE_OOO(x) (G_CQE_OOO(be32_to_cpu((x).header)))
605
#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x).header)))
606
#define CQE_GENBIT(x) (G_CQE_GENBIT(be32_to_cpu((x).header)))
607
#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x).header)))
608
#define SQ_TYPE(x) (CQE_TYPE((x)))
609
#define RQ_TYPE(x) (!CQE_TYPE((x)))
610
#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header)))
611
#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header)))
612
613
#define CQE_SEND_OPCODE(x)( \
614
(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
615
(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
616
(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
617
(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
618
619
#define CQE_LEN(x) (be32_to_cpu((x).len))
620
621
/* used for RQ completion processing */
622
#define CQE_WRID_STAG(x) (be32_to_cpu((x).u.rcqe.stag))
623
#define CQE_WRID_MSN(x) (be32_to_cpu((x).u.rcqe.msn))
624
625
/* used for SQ completion processing */
626
#define CQE_WRID_SQ_WPTR(x) ((x).u.scqe.wrid_hi)
627
#define CQE_WRID_WPTR(x) ((x).u.scqe.wrid_low)
628
629
/* generic accessor macros */
630
#define CQE_WRID_HI(x) ((x).u.scqe.wrid_hi)
631
#define CQE_WRID_LOW(x) ((x).u.scqe.wrid_low)
632
633
#define TPT_ERR_SUCCESS 0x0
634
#define TPT_ERR_STAG 0x1
/* STAG invalid: either the */
635
/* STAG is offlimt, being 0, */
636
/* or STAG_key mismatch */
637
#define TPT_ERR_PDID 0x2
/* PDID mismatch */
638
#define TPT_ERR_QPID 0x3
/* QPID mismatch */
639
#define TPT_ERR_ACCESS 0x4
/* Invalid access right */
640
#define TPT_ERR_WRAP 0x5
/* Wrap error */
641
#define TPT_ERR_BOUND 0x6
/* base and bounds voilation */
642
#define TPT_ERR_INVALIDATE_SHARED_MR 0x7
/* attempt to invalidate a */
643
/* shared memory region */
644
#define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8
/* attempt to invalidate a */
645
/* shared memory region */
646
#define TPT_ERR_ECC 0x9
/* ECC error detected */
647
#define TPT_ERR_ECC_PSTAG 0xA
/* ECC error detected when */
648
/* reading PSTAG for a MW */
649
/* Invalidate */
650
#define TPT_ERR_PBL_ADDR_BOUND 0xB
/* pbl addr out of bounds: */
651
/* software error */
652
#define TPT_ERR_SWFLUSH 0xC
/* SW FLUSHED */
653
#define TPT_ERR_CRC 0x10
/* CRC error */
654
#define TPT_ERR_MARKER 0x11
/* Marker error */
655
#define TPT_ERR_PDU_LEN_ERR 0x12
/* invalid PDU length */
656
#define TPT_ERR_OUT_OF_RQE 0x13
/* out of RQE */
657
#define TPT_ERR_DDP_VERSION 0x14
/* wrong DDP version */
658
#define TPT_ERR_RDMA_VERSION 0x15
/* wrong RDMA version */
659
#define TPT_ERR_OPCODE 0x16
/* invalid rdma opcode */
660
#define TPT_ERR_DDP_QUEUE_NUM 0x17
/* invalid ddp queue number */
661
#define TPT_ERR_MSN 0x18
/* MSN error */
662
#define TPT_ERR_TBIT 0x19
/* tag bit not set correctly */
663
#define TPT_ERR_MO 0x1A
/* MO not 0 for TERMINATE */
664
/* or READ_REQ */
665
#define TPT_ERR_MSN_GAP 0x1B
666
#define TPT_ERR_MSN_RANGE 0x1C
667
#define TPT_ERR_IRD_OVERFLOW 0x1D
668
#define TPT_ERR_RQE_ADDR_BOUND 0x1E
/* RQE addr out of bounds: */
669
/* software error */
670
#define TPT_ERR_INTERNAL_ERR 0x1F
/* internal error (opcode */
671
/* mismatch) */
672
673
struct
t3_swsq
{
674
__u64
wr_id
;
675
struct
t3_cqe
cqe
;
676
__u32
sq_wptr
;
677
__be32
read_len
;
678
int
opcode
;
679
int
complete
;
680
int
signaled
;
681
};
682
683
struct
t3_swrq
{
684
__u64
wr_id
;
685
__u32
pbl_addr
;
686
};
687
688
/*
689
* A T3 WQ implements both the SQ and RQ.
690
*/
691
struct
t3_wq
{
692
union
t3_wr
*
queue
;
/* DMA accessible memory */
693
dma_addr_t
dma_addr
;
/* DMA address for HW */
694
DEFINE_DMA_UNMAP_ADDR
(
mapping
);
/* unmap kruft */
695
u32
error
;
/* 1 once we go to ERROR */
696
u32
qpid
;
697
u32
wptr
;
/* idx to next available WR slot */
698
u32
size_log2
;
/* total wq size */
699
struct
t3_swsq
*
sq
;
/* SW SQ */
700
struct
t3_swsq
*
oldest_read
;
/* tracks oldest pending read */
701
u32
sq_wptr
;
/* sq_wptr - sq_rptr == count of */
702
u32
sq_rptr
;
/* pending wrs */
703
u32
sq_size_log2
;
/* sq size */
704
struct
t3_swrq
*
rq
;
/* SW RQ (holds consumer wr_ids */
705
u32
rq_wptr
;
/* rq_wptr - rq_rptr == count of */
706
u32
rq_rptr
;
/* pending wrs */
707
struct
t3_swrq
*
rq_oldest_wr
;
/* oldest wr on the SW RQ */
708
u32
rq_size_log2
;
/* rq size */
709
u32
rq_addr
;
/* rq adapter address */
710
void
__iomem
*
doorbell
;
/* kernel db */
711
u64
udb
;
/* user db if any */
712
struct
cxio_rdev
*
rdev
;
713
};
714
715
struct
t3_cq
{
716
u32
cqid
;
717
u32
rptr
;
718
u32
wptr
;
719
u32
size_log2
;
720
dma_addr_t
dma_addr
;
721
DEFINE_DMA_UNMAP_ADDR
(
mapping
);
722
struct
t3_cqe
*
queue
;
723
struct
t3_cqe
*
sw_queue
;
724
u32
sw_rptr
;
725
u32
sw_wptr
;
726
};
727
728
#define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
729
CQE_GENBIT(*cqe))
730
731
struct
t3_cq_status_page
{
732
u32
cq_err
;
733
};
734
735
static
inline
int
cxio_cq_in_error(
struct
t3_cq
*cq)
736
{
737
return
((
struct
t3_cq_status_page
*)
738
&cq->
queue
[1 << cq->
size_log2
])->cq_err;
739
}
740
741
static
inline
void
cxio_set_cq_in_error(
struct
t3_cq
*cq)
742
{
743
((
struct
t3_cq_status_page
*)
744
&cq->
queue
[1 << cq->
size_log2
])->cq_err = 1;
745
}
746
747
static
inline
void
cxio_set_wq_in_error(
struct
t3_wq
*wq)
748
{
749
wq->
queue
->
wq_in_err
.err |= 1;
750
}
751
752
static
inline
void
cxio_disable_wq_db(
struct
t3_wq
*wq)
753
{
754
wq->
queue
->
wq_in_err
.err |= 2;
755
}
756
757
static
inline
void
cxio_enable_wq_db(
struct
t3_wq
*wq)
758
{
759
wq->
queue
->
wq_in_err
.err &= ~2;
760
}
761
762
static
inline
int
cxio_wq_db_enabled(
struct
t3_wq
*wq)
763
{
764
return
!(wq->
queue
->
wq_in_err
.err & 2);
765
}
766
767
static
inline
struct
t3_cqe
*cxio_next_hw_cqe(
struct
t3_cq
*cq)
768
{
769
struct
t3_cqe
*
cqe
;
770
771
cqe = cq->
queue
+ (
Q_PTR2IDX
(cq->
rptr
, cq->
size_log2
));
772
if
(
CQ_VLD_ENTRY
(cq->
rptr
, cq->
size_log2
, cqe))
773
return
cqe;
774
return
NULL
;
775
}
776
777
static
inline
struct
t3_cqe
*cxio_next_sw_cqe(
struct
t3_cq
*cq)
778
{
779
struct
t3_cqe
*cqe;
780
781
if
(!
Q_EMPTY
(cq->
sw_rptr
, cq->
sw_wptr
)) {
782
cqe = cq->
sw_queue
+ (
Q_PTR2IDX
(cq->
sw_rptr
, cq->
size_log2
));
783
return
cqe;
784
}
785
return
NULL
;
786
}
787
788
static
inline
struct
t3_cqe
*cxio_next_cqe(
struct
t3_cq
*cq)
789
{
790
struct
t3_cqe
*cqe;
791
792
if
(!
Q_EMPTY
(cq->
sw_rptr
, cq->
sw_wptr
)) {
793
cqe = cq->
sw_queue
+ (
Q_PTR2IDX
(cq->
sw_rptr
, cq->
size_log2
));
794
return
cqe;
795
}
796
cqe = cq->
queue
+ (
Q_PTR2IDX
(cq->
rptr
, cq->
size_log2
));
797
if
(
CQ_VLD_ENTRY
(cq->
rptr
, cq->
size_log2
, cqe))
798
return
cqe;
799
return
NULL
;
800
}
801
802
#endif
Generated on Thu Jan 10 2013 13:37:17 for Linux Kernel by
1.8.2