Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ehea_phyp.c
Go to the documentation of this file.
1 /*
2  * linux/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
3  *
4  * eHEA ethernet device driver for IBM eServer System p
5  *
6  * (C) Copyright IBM Corp. 2006
7  *
8  * Authors:
9  * Christoph Raisch <[email protected]>
10  * Jan-Bernd Themann <[email protected]>
11  * Thomas Klein <[email protected]>
12  *
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2, or (at your option)
17  * any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include "ehea_phyp.h"
32 
33 
34 static inline u16 get_order_of_qentries(u16 queue_entries)
35 {
36  u8 ld = 1; /* logarithmus dualis */
37  while (((1U << ld) - 1) < queue_entries)
38  ld++;
39  return ld - 1;
40 }
41 
42 /* Defines for H_CALL H_ALLOC_RESOURCE */
43 #define H_ALL_RES_TYPE_QP 1
44 #define H_ALL_RES_TYPE_CQ 2
45 #define H_ALL_RES_TYPE_EQ 3
46 #define H_ALL_RES_TYPE_MR 5
47 #define H_ALL_RES_TYPE_MW 6
48 
49 static long ehea_plpar_hcall_norets(unsigned long opcode,
50  unsigned long arg1,
51  unsigned long arg2,
52  unsigned long arg3,
53  unsigned long arg4,
54  unsigned long arg5,
55  unsigned long arg6,
56  unsigned long arg7)
57 {
58  long ret;
59  int i, sleep_msecs;
60 
61  for (i = 0; i < 5; i++) {
62  ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
63  arg5, arg6, arg7);
64 
65  if (H_IS_LONG_BUSY(ret)) {
66  sleep_msecs = get_longbusy_msecs(ret);
67  msleep_interruptible(sleep_msecs);
68  continue;
69  }
70 
71  if (ret < H_SUCCESS)
72  pr_err("opcode=%lx ret=%lx"
73  " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
74  " arg5=%lx arg6=%lx arg7=%lx\n",
75  opcode, ret,
76  arg1, arg2, arg3, arg4, arg5, arg6, arg7);
77 
78  return ret;
79  }
80 
81  return H_BUSY;
82 }
83 
84 static long ehea_plpar_hcall9(unsigned long opcode,
85  unsigned long *outs, /* array of 9 outputs */
86  unsigned long arg1,
87  unsigned long arg2,
88  unsigned long arg3,
89  unsigned long arg4,
90  unsigned long arg5,
91  unsigned long arg6,
92  unsigned long arg7,
93  unsigned long arg8,
94  unsigned long arg9)
95 {
96  long ret;
97  int i, sleep_msecs;
98  u8 cb_cat;
99 
100  for (i = 0; i < 5; i++) {
101  ret = plpar_hcall9(opcode, outs,
102  arg1, arg2, arg3, arg4, arg5,
103  arg6, arg7, arg8, arg9);
104 
105  if (H_IS_LONG_BUSY(ret)) {
106  sleep_msecs = get_longbusy_msecs(ret);
107  msleep_interruptible(sleep_msecs);
108  continue;
109  }
110 
111  cb_cat = EHEA_BMASK_GET(H_MEHEAPORT_CAT, arg2);
112 
113  if ((ret < H_SUCCESS) && !(((ret == H_AUTHORITY)
114  && (opcode == H_MODIFY_HEA_PORT))
115  && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO)
116  || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7)
117  && (arg3 == H_PORT_CB7_DUCQPN)))))
118  pr_err("opcode=%lx ret=%lx"
119  " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
120  " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
121  " arg9=%lx"
122  " out1=%lx out2=%lx out3=%lx out4=%lx"
123  " out5=%lx out6=%lx out7=%lx out8=%lx"
124  " out9=%lx\n",
125  opcode, ret,
126  arg1, arg2, arg3, arg4, arg5,
127  arg6, arg7, arg8, arg9,
128  outs[0], outs[1], outs[2], outs[3], outs[4],
129  outs[5], outs[6], outs[7], outs[8]);
130  return ret;
131  }
132 
133  return H_BUSY;
134 }
135 
136 u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
137  const u64 qp_handle, const u64 sel_mask, void *cb_addr)
138 {
139  return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
140  adapter_handle, /* R4 */
141  qp_category, /* R5 */
142  qp_handle, /* R6 */
143  sel_mask, /* R7 */
144  __pa(cb_addr), /* R8 */
145  0, 0);
146 }
147 
148 /* input param R5 */
149 #define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11)
150 #define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12)
151 #define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15)
152 #define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16)
153 #define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17)
154 #define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19)
155 #define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21)
156 #define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23)
157 #define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55)
158 #define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63)
159 
160 /* input param R9 */
161 #define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31)
162 #define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32, 63)
163 
164 /* input param R10 */
165 #define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7)
166 #define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15)
167 #define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23)
168 #define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31)
169 /* Max Send Scatter Gather Elements */
170 #define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39)
171 #define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47)
172 /* Max Receive SG Elements RQ1 */
173 #define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55)
174 #define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63)
175 
176 /* input param R11 */
177 #define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7)
178 /* max swqe immediate data length */
179 #define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63)
180 
181 /* input param R12 */
182 #define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15)
183 /* Threshold RQ2 */
184 #define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31)
185 /* Threshold RQ3 */
186 
187 /* output param R6 */
188 #define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15)
189 #define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31)
190 #define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47)
191 #define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63)
192 
193 /* output param, R7 */
194 #define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7)
195 #define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15)
196 #define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23)
197 #define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31)
198 #define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39)
199 
200 /* output param R8,R9 */
201 #define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31)
202 #define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63)
203 #define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31)
204 #define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63)
205 
206 /* output param R11,R12 */
207 #define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31)
208 #define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63)
209 #define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31)
210 #define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63)
211 
212 u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
213  struct ehea_qp_init_attr *init_attr, const u32 pd,
214  u64 *qp_handle, struct h_epas *h_epas)
215 {
216  u64 hret;
217  unsigned long outs[PLPAR_HCALL9_BUFSIZE];
218 
219  u64 allocate_controls =
220  EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0)
222  | EHEA_BMASK_SET(H_ALL_RES_QP_RQR, 6) /* rq1 & rq2 & rq3 */
223  | EHEA_BMASK_SET(H_ALL_RES_QP_EQEG, 0) /* EQE gen. disabled */
229 
230  u64 r9_reg = EHEA_BMASK_SET(H_ALL_RES_QP_PD, pd)
232 
233  u64 max_r10_reg =
235  get_order_of_qentries(init_attr->max_nr_send_wqes))
237  get_order_of_qentries(init_attr->max_nr_rwqes_rq1))
239  get_order_of_qentries(init_attr->max_nr_rwqes_rq2))
241  get_order_of_qentries(init_attr->max_nr_rwqes_rq3))
244  init_attr->wqe_size_enc_rq1)
246  init_attr->wqe_size_enc_rq2)
248  init_attr->wqe_size_enc_rq3);
249 
250  u64 r11_in =
253  u64 threshold =
256 
257  hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
258  outs,
259  adapter_handle, /* R4 */
260  allocate_controls, /* R5 */
261  init_attr->send_cq_handle, /* R6 */
262  init_attr->recv_cq_handle, /* R7 */
263  init_attr->aff_eq_handle, /* R8 */
264  r9_reg, /* R9 */
265  max_r10_reg, /* R10 */
266  r11_in, /* R11 */
267  threshold); /* R12 */
268 
269  *qp_handle = outs[0];
270  init_attr->qp_nr = (u32)outs[1];
271 
272  init_attr->act_nr_send_wqes =
274  init_attr->act_nr_rwqes_rq1 =
276  init_attr->act_nr_rwqes_rq2 =
278  init_attr->act_nr_rwqes_rq3 =
280 
281  init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq;
282  init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1;
283  init_attr->act_wqe_size_enc_rq2 = init_attr->wqe_size_enc_rq2;
284  init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3;
285 
286  init_attr->nr_sq_pages =
288  init_attr->nr_rq1_pages =
290  init_attr->nr_rq2_pages =
292  init_attr->nr_rq3_pages =
294 
295  init_attr->liobn_sq =
297  init_attr->liobn_rq1 =
299  init_attr->liobn_rq2 =
301  init_attr->liobn_rq3 =
303 
304  if (!hret)
305  hcp_epas_ctor(h_epas, outs[6], outs[6]);
306 
307  return hret;
308 }
309 
310 u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
311  struct ehea_cq_attr *cq_attr,
312  u64 *cq_handle, struct h_epas *epas)
313 {
314  u64 hret;
315  unsigned long outs[PLPAR_HCALL9_BUFSIZE];
316 
317  hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
318  outs,
319  adapter_handle, /* R4 */
320  H_ALL_RES_TYPE_CQ, /* R5 */
321  cq_attr->eq_handle, /* R6 */
322  cq_attr->cq_token, /* R7 */
323  cq_attr->max_nr_of_cqes, /* R8 */
324  0, 0, 0, 0); /* R9-R12 */
325 
326  *cq_handle = outs[0];
327  cq_attr->act_nr_of_cqes = outs[3];
328  cq_attr->nr_pages = outs[4];
329 
330  if (!hret)
331  hcp_epas_ctor(epas, outs[5], outs[6]);
332 
333  return hret;
334 }
335 
336 /* Defines for H_CALL H_ALLOC_RESOURCE */
337 #define H_ALL_RES_TYPE_QP 1
338 #define H_ALL_RES_TYPE_CQ 2
339 #define H_ALL_RES_TYPE_EQ 3
340 #define H_ALL_RES_TYPE_MR 5
341 #define H_ALL_RES_TYPE_MW 6
342 
343 /* input param R5 */
344 #define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0)
345 #define H_ALL_RES_EQ_NON_NEQ_ISN EHEA_BMASK_IBM(6, 7)
346 #define H_ALL_RES_EQ_INH_EQE_GEN EHEA_BMASK_IBM(16, 16)
347 #define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63)
348 /* input param R6 */
349 #define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63)
350 
351 /* output param R6 */
352 #define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63)
353 
354 /* output param R7 */
355 #define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63)
356 
357 /* output param R8 */
358 #define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63)
359 
360 /* output param R9 */
361 #define H_ALL_RES_EQ_ACT_EQ_IST_C EHEA_BMASK_IBM(30, 31)
362 #define H_ALL_RES_EQ_ACT_EQ_IST_1 EHEA_BMASK_IBM(40, 63)
363 
364 /* output param R10 */
365 #define H_ALL_RES_EQ_ACT_EQ_IST_2 EHEA_BMASK_IBM(40, 63)
366 
367 /* output param R11 */
368 #define H_ALL_RES_EQ_ACT_EQ_IST_3 EHEA_BMASK_IBM(40, 63)
369 
370 /* output param R12 */
371 #define H_ALL_RES_EQ_ACT_EQ_IST_4 EHEA_BMASK_IBM(40, 63)
372 
373 u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
374  struct ehea_eq_attr *eq_attr, u64 *eq_handle)
375 {
376  u64 hret, allocate_controls;
377  unsigned long outs[PLPAR_HCALL9_BUFSIZE];
378 
379  /* resource type */
380  allocate_controls =
382  | EHEA_BMASK_SET(H_ALL_RES_EQ_NEQ, eq_attr->type ? 1 : 0)
385 
386  hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
387  outs,
388  adapter_handle, /* R4 */
389  allocate_controls, /* R5 */
390  eq_attr->max_nr_of_eqes, /* R6 */
391  0, 0, 0, 0, 0, 0); /* R7-R10 */
392 
393  *eq_handle = outs[0];
394  eq_attr->act_nr_of_eqes = outs[3];
395  eq_attr->nr_pages = outs[4];
396  eq_attr->ist1 = outs[5];
397  eq_attr->ist2 = outs[6];
398  eq_attr->ist3 = outs[7];
399  eq_attr->ist4 = outs[8];
400 
401  return hret;
402 }
403 
404 u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
405  const u64 qp_handle, const u64 sel_mask,
406  void *cb_addr, u64 *inv_attr_id, u64 *proc_mask,
407  u16 *out_swr, u16 *out_rwr)
408 {
409  u64 hret;
410  unsigned long outs[PLPAR_HCALL9_BUFSIZE];
411 
412  hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP,
413  outs,
414  adapter_handle, /* R4 */
415  (u64) cat, /* R5 */
416  qp_handle, /* R6 */
417  sel_mask, /* R7 */
418  __pa(cb_addr), /* R8 */
419  0, 0, 0, 0); /* R9-R12 */
420 
421  *inv_attr_id = outs[0];
422  *out_swr = outs[3];
423  *out_rwr = outs[4];
424  *proc_mask = outs[5];
425 
426  return hret;
427 }
428 
429 u64 ehea_h_register_rpage(const u64 adapter_handle, const u8 pagesize,
430  const u8 queue_type, const u64 resource_handle,
431  const u64 log_pageaddr, u64 count)
432 {
434 
435  reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize)
436  | EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type);
437 
438  return ehea_plpar_hcall_norets(H_REGISTER_HEA_RPAGES,
439  adapter_handle, /* R4 */
440  reg_control, /* R5 */
441  resource_handle, /* R6 */
442  log_pageaddr, /* R7 */
443  count, /* R8 */
444  0, 0); /* R9-R10 */
445 }
446 
447 u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
448  const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
449  struct ehea_mr *mr)
450 {
451  u64 hret;
452  unsigned long outs[PLPAR_HCALL9_BUFSIZE];
453 
454  hret = ehea_plpar_hcall9(H_REGISTER_SMR,
455  outs,
456  adapter_handle , /* R4 */
457  orig_mr_handle, /* R5 */
458  vaddr_in, /* R6 */
459  (((u64)access_ctrl) << 32ULL), /* R7 */
460  pd, /* R8 */
461  0, 0, 0, 0); /* R9-R12 */
462 
463  mr->handle = outs[0];
464  mr->lkey = (u32)outs[2];
465 
466  return hret;
467 }
468 
469 u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
470 {
471  unsigned long outs[PLPAR_HCALL9_BUFSIZE];
472 
473  return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
474  outs,
475  adapter_handle, /* R4 */
476  H_DISABLE_GET_EHEA_WQE_P, /* R5 */
477  qp_handle, /* R6 */
478  0, 0, 0, 0, 0, 0); /* R7-R12 */
479 }
480 
481 u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
482  u64 force_bit)
483 {
484  return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
485  adapter_handle, /* R4 */
486  res_handle, /* R5 */
487  force_bit,
488  0, 0, 0, 0); /* R7-R10 */
489 }
490 
491 u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
492  const u64 length, const u32 access_ctrl,
493  const u32 pd, u64 *mr_handle, u32 *lkey)
494 {
495  u64 hret;
496  unsigned long outs[PLPAR_HCALL9_BUFSIZE];
497 
498  hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
499  outs,
500  adapter_handle, /* R4 */
501  5, /* R5 */
502  vaddr, /* R6 */
503  length, /* R7 */
504  (((u64) access_ctrl) << 32ULL), /* R8 */
505  pd, /* R9 */
506  0, 0, 0); /* R10-R12 */
507 
508  *mr_handle = outs[0];
509  *lkey = (u32)outs[2];
510  return hret;
511 }
512 
513 u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
514  const u8 pagesize, const u8 queue_type,
515  const u64 log_pageaddr, const u64 count)
516 {
517  if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
518  pr_err("not on pageboundary\n");
519  return H_PARAMETER;
520  }
521 
522  return ehea_h_register_rpage(adapter_handle, pagesize,
523  queue_type, mr_handle,
524  log_pageaddr, count);
525 }
526 
527 u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
528 {
529  u64 hret, cb_logaddr;
530 
531  cb_logaddr = __pa(cb_addr);
532 
533  hret = ehea_plpar_hcall_norets(H_QUERY_HEA,
534  adapter_handle, /* R4 */
535  cb_logaddr, /* R5 */
536  0, 0, 0, 0, 0); /* R6-R10 */
537 #ifdef DEBUG
538  ehea_dump(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
539 #endif
540  return hret;
541 }
542 
543 u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
544  const u8 cb_cat, const u64 select_mask,
545  void *cb_addr)
546 {
547  u64 port_info;
548  u64 cb_logaddr = __pa(cb_addr);
549  u64 arr_index = 0;
550 
551  port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
552  | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
553 
554  return ehea_plpar_hcall_norets(H_QUERY_HEA_PORT,
555  adapter_handle, /* R4 */
556  port_info, /* R5 */
557  select_mask, /* R6 */
558  arr_index, /* R7 */
559  cb_logaddr, /* R8 */
560  0, 0); /* R9-R10 */
561 }
562 
563 u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
564  const u8 cb_cat, const u64 select_mask,
565  void *cb_addr)
566 {
567  unsigned long outs[PLPAR_HCALL9_BUFSIZE];
568  u64 port_info;
569  u64 arr_index = 0;
570  u64 cb_logaddr = __pa(cb_addr);
571 
572  port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
573  | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
574 #ifdef DEBUG
575  ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL");
576 #endif
577  return ehea_plpar_hcall9(H_MODIFY_HEA_PORT,
578  outs,
579  adapter_handle, /* R4 */
580  port_info, /* R5 */
581  select_mask, /* R6 */
582  arr_index, /* R7 */
583  cb_logaddr, /* R8 */
584  0, 0, 0, 0); /* R9-R12 */
585 }
586 
587 u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
588  const u8 reg_type, const u64 mc_mac_addr,
589  const u16 vlan_id, const u32 hcall_id)
590 {
591  u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id;
592  u64 mac_addr = mc_mac_addr >> 16;
593 
594  r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num);
595  r6_reg_type = EHEA_BMASK_SET(H_REGBCMC_REGTYPE, reg_type);
596  r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr);
597  r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id);
598 
599  return ehea_plpar_hcall_norets(hcall_id,
600  adapter_handle, /* R4 */
601  r5_port_num, /* R5 */
602  r6_reg_type, /* R6 */
603  r7_mc_mac_addr, /* R7 */
604  r8_vlan_id, /* R8 */
605  0, 0); /* R9-R12 */
606 }
607 
608 u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
609  const u64 event_mask)
610 {
611  return ehea_plpar_hcall_norets(H_RESET_EVENTS,
612  adapter_handle, /* R4 */
613  neq_handle, /* R5 */
614  event_mask, /* R6 */
615  0, 0, 0, 0); /* R7-R12 */
616 }
617 
618 u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
619  void *rblock)
620 {
621  return ehea_plpar_hcall_norets(H_ERROR_DATA,
622  adapter_handle, /* R4 */
623  ressource_handle, /* R5 */
624  __pa(rblock), /* R6 */
625  0, 0, 0, 0); /* R7-R12 */
626 }