Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
drivers
scsi
fnic
vnic_rq.h
Go to the documentation of this file.
1
/*
2
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
3
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
4
*
5
* This program is free software; you may redistribute it and/or modify
6
* it under the terms of the GNU General Public License as published by
7
* the Free Software Foundation; version 2 of the License.
8
*
9
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16
* SOFTWARE.
17
*/
18
#ifndef _VNIC_RQ_H_
19
#define _VNIC_RQ_H_
20
21
#include <linux/pci.h>
22
#include "
vnic_dev.h
"
23
#include "
vnic_cq.h
"
24
25
/*
26
* These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
27
* Driver) when both are built with CONFIG options =y
28
*/
29
#define vnic_rq_desc_avail fnic_rq_desc_avail
30
#define vnic_rq_desc_used fnic_rq_desc_used
31
#define vnic_rq_next_desc fnic_rq_next_desc
32
#define vnic_rq_next_index fnic_rq_next_index
33
#define vnic_rq_next_buf_index fnic_rq_next_buf_index
34
#define vnic_rq_post fnic_rq_post
35
#define vnic_rq_posting_soon fnic_rq_posting_soon
36
#define vnic_rq_return_descs fnic_rq_return_descs
37
#define vnic_rq_service fnic_rq_service
38
#define vnic_rq_fill fnic_rq_fill
39
#define vnic_rq_free fnic_rq_free
40
#define vnic_rq_alloc fnic_rq_alloc
41
#define vnic_rq_init fnic_rq_init
42
#define vnic_rq_error_status fnic_rq_error_status
43
#define vnic_rq_enable fnic_rq_enable
44
#define vnic_rq_disable fnic_rq_disable
45
#define vnic_rq_clean fnic_rq_clean
46
47
/* Receive queue control */
48
struct
vnic_rq_ctrl
{
49
u64
ring_base
;
/* 0x00 */
50
u32
ring_size
;
/* 0x08 */
51
u32
pad0
;
52
u32
posted_index
;
/* 0x10 */
53
u32
pad1
;
54
u32
cq_index
;
/* 0x18 */
55
u32
pad2
;
56
u32
enable
;
/* 0x20 */
57
u32
pad3
;
58
u32
running
;
/* 0x28 */
59
u32
pad4
;
60
u32
fetch_index
;
/* 0x30 */
61
u32
pad5
;
62
u32
error_interrupt_enable
;
/* 0x38 */
63
u32
pad6
;
64
u32
error_interrupt_offset
;
/* 0x40 */
65
u32
pad7
;
66
u32
error_status
;
/* 0x48 */
67
u32
pad8
;
68
u32
dropped_packet_count
;
/* 0x50 */
69
u32
pad9
;
70
u32
dropped_packet_count_rc
;
/* 0x58 */
71
u32
pad10
;
72
};
73
74
/* Break the vnic_rq_buf allocations into blocks of 64 entries */
75
#define VNIC_RQ_BUF_BLK_ENTRIES 64
76
#define VNIC_RQ_BUF_BLK_SZ \
77
(VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
78
#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
79
DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
80
#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
81
82
struct
vnic_rq_buf
{
83
struct
vnic_rq_buf
*
next
;
84
dma_addr_t
dma_addr
;
85
void
*
os_buf
;
86
unsigned
int
os_buf_index
;
87
unsigned
int
len
;
88
unsigned
int
index
;
89
void
*
desc
;
90
};
91
92
struct
vnic_rq
{
93
unsigned
int
index
;
94
struct
vnic_dev
*
vdev
;
95
struct
vnic_rq_ctrl
__iomem
*
ctrl
;
/* memory-mapped */
96
struct
vnic_dev_ring
ring
;
97
struct
vnic_rq_buf
*
bufs
[
VNIC_RQ_BUF_BLKS_MAX
];
98
struct
vnic_rq_buf
*
to_use
;
99
struct
vnic_rq_buf
*
to_clean
;
100
void
*
os_buf_head
;
101
unsigned
int
buf_index
;
102
unsigned
int
pkts_outstanding
;
103
};
104
105
static
inline
unsigned
int
vnic_rq_desc_avail
(
struct
vnic_rq
*
rq
)
106
{
107
/* how many does SW own? */
108
return
rq->
ring
.desc_avail;
109
}
110
111
static
inline
unsigned
int
vnic_rq_desc_used
(
struct
vnic_rq
*
rq
)
112
{
113
/* how many does HW own? */
114
return
rq->
ring
.desc_count - rq->
ring
.desc_avail - 1;
115
}
116
117
static
inline
void
*
vnic_rq_next_desc
(
struct
vnic_rq
*
rq
)
118
{
119
return
rq->
to_use
->desc;
120
}
121
122
static
inline
unsigned
int
vnic_rq_next_index
(
struct
vnic_rq
*
rq
)
123
{
124
return
rq->
to_use
->index;
125
}
126
127
static
inline
unsigned
int
vnic_rq_next_buf_index
(
struct
vnic_rq
*
rq
)
128
{
129
return
rq->
buf_index
++;
130
}
131
132
static
inline
void
vnic_rq_post
(
struct
vnic_rq
*
rq
,
133
void
*
os_buf
,
unsigned
int
os_buf_index
,
134
dma_addr_t
dma_addr
,
unsigned
int
len
)
135
{
136
struct
vnic_rq_buf
*
buf
= rq->
to_use
;
137
138
buf->
os_buf
=
os_buf
;
139
buf->
os_buf_index
=
os_buf_index
;
140
buf->
dma_addr
=
dma_addr
;
141
buf->
len
=
len
;
142
143
buf = buf->
next
;
144
rq->
to_use
=
buf
;
145
rq->
ring
.desc_avail--;
146
147
/* Move the posted_index every nth descriptor
148
*/
149
150
#ifndef VNIC_RQ_RETURN_RATE
151
#define VNIC_RQ_RETURN_RATE 0xf
/* keep 2^n - 1 */
152
#endif
153
154
if
((buf->
index
& VNIC_RQ_RETURN_RATE) == 0) {
155
/* Adding write memory barrier prevents compiler and/or CPU
156
* reordering, thus avoiding descriptor posting before
157
* descriptor is initialized. Otherwise, hardware can read
158
* stale descriptor fields.
159
*/
160
wmb
();
161
iowrite32
(buf->
index
, &rq->
ctrl
->posted_index);
162
}
163
}
164
165
static
inline
int
vnic_rq_posting_soon
(
struct
vnic_rq
*rq)
166
{
167
return
(rq->
to_use
->index & VNIC_RQ_RETURN_RATE) == 0;
168
}
169
170
static
inline
void
vnic_rq_return_descs
(
struct
vnic_rq
*rq,
unsigned
int
count
)
171
{
172
rq->
ring
.desc_avail +=
count
;
173
}
174
175
enum
desc_return_options
{
176
VNIC_RQ_RETURN_DESC
,
177
VNIC_RQ_DEFER_RETURN_DESC
,
178
};
179
180
static
inline
void
vnic_rq_service
(
struct
vnic_rq
*rq,
181
struct
cq_desc
*
cq_desc
,
u16
completed_index,
182
int
desc_return,
void
(*buf_service)(
struct
vnic_rq
*rq,
183
struct
cq_desc *cq_desc,
struct
vnic_rq_buf
*buf,
184
int
skipped,
void
*opaque),
void
*opaque)
185
{
186
struct
vnic_rq_buf
*
buf
;
187
int
skipped;
188
189
buf = rq->
to_clean
;
190
while
(1) {
191
192
skipped = (buf->
index
!= completed_index);
193
194
(*buf_service)(
rq
, cq_desc,
buf
, skipped, opaque);
195
196
if
(desc_return ==
VNIC_RQ_RETURN_DESC
)
197
rq->ring.desc_avail++;
198
199
rq->to_clean = buf->next;
200
201
if
(!skipped)
202
break
;
203
204
buf = rq->to_clean;
205
}
206
}
207
208
static
inline
int
vnic_rq_fill
(
struct
vnic_rq
*rq,
209
int
(*buf_fill)(
struct
vnic_rq
*rq))
210
{
211
int
err
;
212
213
while
(
vnic_rq_desc_avail
(rq) > 1) {
214
215
err = (*buf_fill)(
rq
);
216
if
(err)
217
return
err
;
218
}
219
220
return
0;
221
}
222
223
void
vnic_rq_free
(
struct
vnic_rq
*rq);
224
int
vnic_rq_alloc
(
struct
vnic_dev
*vdev,
struct
vnic_rq
*rq,
unsigned
int
index
,
225
unsigned
int
desc_count
,
unsigned
int
desc_size);
226
void
vnic_rq_init
(
struct
vnic_rq
*rq,
unsigned
int
cq_index,
227
unsigned
int
error_interrupt_enable,
228
unsigned
int
error_interrupt_offset);
229
unsigned
int
vnic_rq_error_status
(
struct
vnic_rq
*rq);
230
void
vnic_rq_enable
(
struct
vnic_rq
*rq);
231
int
vnic_rq_disable
(
struct
vnic_rq
*rq);
232
void
vnic_rq_clean
(
struct
vnic_rq
*rq,
233
void
(*buf_clean)(
struct
vnic_rq
*rq,
struct
vnic_rq_buf
*buf));
234
235
#endif
/* _VNIC_RQ_H_ */
Generated on Thu Jan 10 2013 14:00:42 for Linux Kernel by
1.8.2