Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ehea_qmr.h
Go to the documentation of this file.
1 /*
2  * linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
3  *
4  * eHEA ethernet device driver for IBM eServer System p
5  *
6  * (C) Copyright IBM Corp. 2006
7  *
8  * Authors:
9  * Christoph Raisch <[email protected]>
10  * Jan-Bernd Themann <[email protected]>
11  * Thomas Klein <[email protected]>
12  *
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2, or (at your option)
17  * any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27  */
28 
29 #ifndef __EHEA_QMR_H__
30 #define __EHEA_QMR_H__
31 
32 #include <linux/prefetch.h>
33 #include "ehea.h"
34 #include "ehea_hw.h"
35 
36 /*
37  * page size of ehea hardware queues
38  */
39 
40 #define EHEA_PAGESHIFT 12
41 #define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
42 #define EHEA_SECTSIZE (1UL << 24)
43 #define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
44 #define EHEA_HUGEPAGESHIFT 34
45 #define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT)
46 #define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
47 
48 #if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
49 #error eHEA module cannot work if kernel sectionsize < ehea sectionsize
50 #endif
51 
52 /* Some abbreviations used here:
53  *
54  * WQE - Work Queue Entry
55  * SWQE - Send Work Queue Entry
56  * RWQE - Receive Work Queue Entry
57  * CQE - Completion Queue Entry
58  * EQE - Event Queue Entry
59  * MR - Memory Region
60  */
61 
62 /* Use of WR_ID field for EHEA */
63 #define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19)
64 #define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23)
65 #define EHEA_SWQE2_TYPE 0x1
66 #define EHEA_SWQE3_TYPE 0x2
67 #define EHEA_RWQE2_TYPE 0x3
68 #define EHEA_RWQE3_TYPE 0x4
69 #define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47)
70 #define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63)
71 
72 struct ehea_vsgentry {
76 };
77 
78 /* maximum number of sg entries allowed in a WQE */
79 #define EHEA_MAX_WQE_SG_ENTRIES 252
80 #define SWQE2_MAX_IMM (0xD0 - 0x30)
81 #define SWQE3_MAX_IMM 224
82 
83 /* tx control flags for swqe */
84 #define EHEA_SWQE_CRC 0x8000
85 #define EHEA_SWQE_IP_CHECKSUM 0x4000
86 #define EHEA_SWQE_TCP_CHECKSUM 0x2000
87 #define EHEA_SWQE_TSO 0x1000
88 #define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800
89 #define EHEA_SWQE_VLAN_INSERT 0x0400
90 #define EHEA_SWQE_IMM_DATA_PRESENT 0x0200
91 #define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100
92 #define EHEA_SWQE_WRAP_CTL_REC 0x0080
93 #define EHEA_SWQE_WRAP_CTL_FORCE 0x0040
94 #define EHEA_SWQE_BIND 0x0020
95 #define EHEA_SWQE_PURGE 0x0010
96 
97 /* sizeof(struct ehea_swqe) less the union */
98 #define SWQE_HEADER_SIZE 32
99 
100 struct ehea_swqe {
112  u8 descriptors; /* number of valid descriptors in WQE */
117  union {
118  /* Send WQE Format 1 */
119  struct {
122 
123  /* Send WQE Format 2 */
124  struct {
126  /* 0x30 */
128  /* 0xd0 */
130  } immdata_desc __packed;
131 
132  /* Send WQE Format 3 */
133  struct {
135  } immdata_nodesc;
136  } u;
137 };
138 
139 struct ehea_rwqe {
140  u64 wr_id; /* work request ID */
147 };
148 
149 #define EHEA_CQE_VLAN_TAG_XTRACT 0x0400
150 
151 #define EHEA_CQE_TYPE_RQ 0x60
152 #define EHEA_CQE_STAT_ERR_MASK 0x700F
153 #define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
154 #define EHEA_CQE_BLIND_CKSUM 0x8000
155 #define EHEA_CQE_STAT_ERR_TCP 0x4000
156 #define EHEA_CQE_STAT_ERR_IP 0x2000
157 #define EHEA_CQE_STAT_ERR_CRC 0x1000
158 
159 /* Defines which bad send cqe stati lead to a port reset */
160 #define EHEA_CQE_STAT_RESET_MASK 0x0002
161 
162 struct ehea_cqe {
163  u64 wr_id; /* work request ID from WQE */
180 };
181 
182 #define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0)
183 #define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1)
184 #define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7)
185 #define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31)
186 #define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63)
187 #define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63)
188 #define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63)
189 #define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
190 #define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63)
191 #define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63)
192 #define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
193 #define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
194 
195 #define EHEA_AER_RESTYPE_QP 0x8
196 #define EHEA_AER_RESTYPE_CQ 0x4
197 #define EHEA_AER_RESTYPE_EQ 0x3
198 
199 /* Defines which affiliated errors lead to a port reset */
200 #define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
201 #define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
202 
203 struct ehea_eqe {
205 };
206 
207 #define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63)
208 #define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7)
209 
210 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
211 {
212  struct ehea_page *current_page;
213 
214  if (q_offset >= queue->queue_length)
215  q_offset -= queue->queue_length;
216  current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
217  return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
218 }
219 
220 static inline void *hw_qeit_get(struct hw_queue *queue)
221 {
222  return hw_qeit_calc(queue, queue->current_q_offset);
223 }
224 
225 static inline void hw_qeit_inc(struct hw_queue *queue)
226 {
227  queue->current_q_offset += queue->qe_size;
228  if (queue->current_q_offset >= queue->queue_length) {
229  queue->current_q_offset = 0;
230  /* toggle the valid flag */
231  queue->toggle_state = (~queue->toggle_state) & 1;
232  }
233 }
234 
235 static inline void *hw_qeit_get_inc(struct hw_queue *queue)
236 {
237  void *retvalue = hw_qeit_get(queue);
238  hw_qeit_inc(queue);
239  return retvalue;
240 }
241 
242 static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
243 {
244  struct ehea_cqe *retvalue = hw_qeit_get(queue);
245  u8 valid = retvalue->valid;
246  void *pref;
247 
248  if ((valid >> 7) == (queue->toggle_state & 1)) {
249  /* this is a good one */
250  hw_qeit_inc(queue);
251  pref = hw_qeit_calc(queue, queue->current_q_offset);
252  prefetch(pref);
253  prefetch(pref + 128);
254  } else
255  retvalue = NULL;
256  return retvalue;
257 }
258 
259 static inline void *hw_qeit_get_valid(struct hw_queue *queue)
260 {
261  struct ehea_cqe *retvalue = hw_qeit_get(queue);
262  void *pref;
263  u8 valid;
264 
265  pref = hw_qeit_calc(queue, queue->current_q_offset);
266  prefetch(pref);
267  prefetch(pref + 128);
268  prefetch(pref + 256);
269  valid = retvalue->valid;
270  if (!((valid >> 7) == (queue->toggle_state & 1)))
271  retvalue = NULL;
272  return retvalue;
273 }
274 
275 static inline void *hw_qeit_reset(struct hw_queue *queue)
276 {
277  queue->current_q_offset = 0;
278  return hw_qeit_get(queue);
279 }
280 
281 static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
282 {
283  u64 last_entry_in_q = queue->queue_length - queue->qe_size;
284  void *retvalue;
285 
286  retvalue = hw_qeit_get(queue);
287  queue->current_q_offset += queue->qe_size;
288  if (queue->current_q_offset > last_entry_in_q) {
289  queue->current_q_offset = 0;
290  queue->toggle_state = (~queue->toggle_state) & 1;
291  }
292  return retvalue;
293 }
294 
295 static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
296 {
297  void *retvalue = hw_qeit_get(queue);
298  u32 qe = *(u8 *)retvalue;
299  if ((qe >> 7) == (queue->toggle_state & 1))
300  hw_qeit_eq_get_inc(queue);
301  else
302  retvalue = NULL;
303  return retvalue;
304 }
305 
306 static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
307  int rq_nr)
308 {
309  struct hw_queue *queue;
310 
311  if (rq_nr == 1)
312  queue = &qp->hw_rqueue1;
313  else if (rq_nr == 2)
314  queue = &qp->hw_rqueue2;
315  else
316  queue = &qp->hw_rqueue3;
317 
318  return hw_qeit_get_inc(queue);
319 }
320 
321 static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
322  int *wqe_index)
323 {
324  struct hw_queue *queue = &my_qp->hw_squeue;
325  struct ehea_swqe *wqe_p;
326 
327  *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
328  wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue);
329 
330  return wqe_p;
331 }
332 
333 static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
334 {
335  iosync();
336  ehea_update_sqa(my_qp, 1);
337 }
338 
339 static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
340 {
341  struct hw_queue *queue = &qp->hw_rqueue1;
342 
343  *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
344  return hw_qeit_get_valid(queue);
345 }
346 
347 static inline void ehea_inc_cq(struct ehea_cq *cq)
348 {
349  hw_qeit_inc(&cq->hw_queue);
350 }
351 
352 static inline void ehea_inc_rq1(struct ehea_qp *qp)
353 {
354  hw_qeit_inc(&qp->hw_rqueue1);
355 }
356 
357 static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
358 {
359  return hw_qeit_get_valid(&my_cq->hw_queue);
360 }
361 
362 #define EHEA_CQ_REGISTER_ORIG 0
363 #define EHEA_EQ_REGISTER_ORIG 0
364 
366  EHEA_EQ = 0, /* event queue */
367  EHEA_NEQ /* notification event queue */
368 };
369 
371  enum ehea_eq_type type,
372  const u32 length, const u8 eqe_gen);
373 
374 int ehea_destroy_eq(struct ehea_eq *eq);
375 
376 struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
377 
378 struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
379  u64 eq_handle, u32 cq_token);
380 
381 int ehea_destroy_cq(struct ehea_cq *cq);
382 
383 struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd,
384  struct ehea_qp_init_attr *init_attr);
385 
386 int ehea_destroy_qp(struct ehea_qp *qp);
387 
388 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr);
389 
390 int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
391  struct ehea_mr *shared_mr);
392 
393 int ehea_rem_mr(struct ehea_mr *mr);
394 
396  u64 *aer, u64 *aerr);
397 
398 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
399 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
400 int ehea_create_busmap(void);
401 void ehea_destroy_busmap(void);
402 u64 ehea_map_vaddr(void *caddr);
403 
404 #endif /* __EHEA_QMR_H__ */