Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ehea_qmr.c
Go to the documentation of this file.
1 /*
2  * linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
3  *
4  * eHEA ethernet device driver for IBM eServer System p
5  *
6  * (C) Copyright IBM Corp. 2006
7  *
8  * Authors:
9  * Christoph Raisch <[email protected]>
10  * Jan-Bernd Themann <[email protected]>
11  * Thomas Klein <[email protected]>
12  *
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2, or (at your option)
17  * any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/mm.h>
32 #include <linux/slab.h>
33 #include "ehea.h"
34 #include "ehea_phyp.h"
35 #include "ehea_qmr.h"
36 
37 static struct ehea_bmap *ehea_bmap;
38 
39 static void *hw_qpageit_get_inc(struct hw_queue *queue)
40 {
41  void *retvalue = hw_qeit_get(queue);
42 
43  queue->current_q_offset += queue->pagesize;
44  if (queue->current_q_offset > queue->queue_length) {
45  queue->current_q_offset -= queue->pagesize;
46  retvalue = NULL;
47  } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
48  pr_err("not on pageboundary\n");
49  retvalue = NULL;
50  }
51  return retvalue;
52 }
53 
54 static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
55  const u32 pagesize, const u32 qe_size)
56 {
57  int pages_per_kpage = PAGE_SIZE / pagesize;
58  int i, k;
59 
60  if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
61  pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
62  (int)PAGE_SIZE, (int)pagesize);
63  return -EINVAL;
64  }
65 
66  queue->queue_length = nr_of_pages * pagesize;
67  queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
68  if (!queue->queue_pages) {
69  pr_err("no mem for queue_pages\n");
70  return -ENOMEM;
71  }
72 
73  /*
74  * allocate pages for queue:
75  * outer loop allocates whole kernel pages (page aligned) and
76  * inner loop divides a kernel page into smaller hea queue pages
77  */
78  i = 0;
79  while (i < nr_of_pages) {
80  u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
81  if (!kpage)
82  goto out_nomem;
83  for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
84  (queue->queue_pages)[i] = (struct ehea_page *)kpage;
85  kpage += pagesize;
86  i++;
87  }
88  }
89 
90  queue->current_q_offset = 0;
91  queue->qe_size = qe_size;
92  queue->pagesize = pagesize;
93  queue->toggle_state = 1;
94 
95  return 0;
96 out_nomem:
97  for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
98  if (!(queue->queue_pages)[i])
99  break;
100  free_page((unsigned long)(queue->queue_pages)[i]);
101  }
102  return -ENOMEM;
103 }
104 
105 static void hw_queue_dtor(struct hw_queue *queue)
106 {
107  int pages_per_kpage = PAGE_SIZE / queue->pagesize;
108  int i, nr_pages;
109 
110  if (!queue || !queue->queue_pages)
111  return;
112 
113  nr_pages = queue->queue_length / queue->pagesize;
114 
115  for (i = 0; i < nr_pages; i += pages_per_kpage)
116  free_page((unsigned long)(queue->queue_pages)[i]);
117 
118  kfree(queue->queue_pages);
119 }
120 
122  int nr_of_cqe, u64 eq_handle, u32 cq_token)
123 {
124  struct ehea_cq *cq;
125  struct h_epa epa;
126  u64 *cq_handle_ref, hret, rpage;
127  u32 act_nr_of_entries, act_pages, counter;
128  int ret;
129  void *vpage;
130 
131  cq = kzalloc(sizeof(*cq), GFP_KERNEL);
132  if (!cq) {
133  pr_err("no mem for cq\n");
134  goto out_nomem;
135  }
136 
137  cq->attr.max_nr_of_cqes = nr_of_cqe;
138  cq->attr.cq_token = cq_token;
139  cq->attr.eq_handle = eq_handle;
140 
141  cq->adapter = adapter;
142 
143  cq_handle_ref = &cq->fw_handle;
144  act_nr_of_entries = 0;
145  act_pages = 0;
146 
147  hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
148  &cq->fw_handle, &cq->epas);
149  if (hret != H_SUCCESS) {
150  pr_err("alloc_resource_cq failed\n");
151  goto out_freemem;
152  }
153 
154  ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
155  EHEA_PAGESIZE, sizeof(struct ehea_cqe));
156  if (ret)
157  goto out_freeres;
158 
159  for (counter = 0; counter < cq->attr.nr_pages; counter++) {
160  vpage = hw_qpageit_get_inc(&cq->hw_queue);
161  if (!vpage) {
162  pr_err("hw_qpageit_get_inc failed\n");
163  goto out_kill_hwq;
164  }
165 
166  rpage = __pa(vpage);
167  hret = ehea_h_register_rpage(adapter->handle,
169  cq->fw_handle, rpage, 1);
170  if (hret < H_SUCCESS) {
171  pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
172  cq, hret, counter, cq->attr.nr_pages);
173  goto out_kill_hwq;
174  }
175 
176  if (counter == (cq->attr.nr_pages - 1)) {
177  vpage = hw_qpageit_get_inc(&cq->hw_queue);
178 
179  if ((hret != H_SUCCESS) || (vpage)) {
180  pr_err("registration of pages not complete hret=%llx\n",
181  hret);
182  goto out_kill_hwq;
183  }
184  } else {
185  if (hret != H_PAGE_REGISTERED) {
186  pr_err("CQ: registration of page failed hret=%llx\n",
187  hret);
188  goto out_kill_hwq;
189  }
190  }
191  }
192 
193  hw_qeit_reset(&cq->hw_queue);
194  epa = cq->epas.kernel;
195  ehea_reset_cq_ep(cq);
196  ehea_reset_cq_n1(cq);
197 
198  return cq;
199 
200 out_kill_hwq:
201  hw_queue_dtor(&cq->hw_queue);
202 
203 out_freeres:
205 
206 out_freemem:
207  kfree(cq);
208 
209 out_nomem:
210  return NULL;
211 }
212 
213 static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
214 {
215  u64 hret;
216  u64 adapter_handle = cq->adapter->handle;
217 
218  /* deregister all previous registered pages */
219  hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
220  if (hret != H_SUCCESS)
221  return hret;
222 
223  hw_queue_dtor(&cq->hw_queue);
224  kfree(cq);
225 
226  return hret;
227 }
228 
229 int ehea_destroy_cq(struct ehea_cq *cq)
230 {
231  u64 hret, aer, aerr;
232  if (!cq)
233  return 0;
234 
235  hcp_epas_dtor(&cq->epas);
236  hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
237  if (hret == H_R_STATE) {
238  ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
239  hret = ehea_destroy_cq_res(cq, FORCE_FREE);
240  }
241 
242  if (hret != H_SUCCESS) {
243  pr_err("destroy CQ failed\n");
244  return -EIO;
245  }
246 
247  return 0;
248 }
249 
251  const enum ehea_eq_type type,
252  const u32 max_nr_of_eqes, const u8 eqe_gen)
253 {
254  int ret, i;
255  u64 hret, rpage;
256  void *vpage;
257  struct ehea_eq *eq;
258 
259  eq = kzalloc(sizeof(*eq), GFP_KERNEL);
260  if (!eq) {
261  pr_err("no mem for eq\n");
262  return NULL;
263  }
264 
265  eq->adapter = adapter;
266  eq->attr.type = type;
267  eq->attr.max_nr_of_eqes = max_nr_of_eqes;
268  eq->attr.eqe_gen = eqe_gen;
269  spin_lock_init(&eq->spinlock);
270 
271  hret = ehea_h_alloc_resource_eq(adapter->handle,
272  &eq->attr, &eq->fw_handle);
273  if (hret != H_SUCCESS) {
274  pr_err("alloc_resource_eq failed\n");
275  goto out_freemem;
276  }
277 
278  ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
279  EHEA_PAGESIZE, sizeof(struct ehea_eqe));
280  if (ret) {
281  pr_err("can't allocate eq pages\n");
282  goto out_freeres;
283  }
284 
285  for (i = 0; i < eq->attr.nr_pages; i++) {
286  vpage = hw_qpageit_get_inc(&eq->hw_queue);
287  if (!vpage) {
288  pr_err("hw_qpageit_get_inc failed\n");
289  hret = H_RESOURCE;
290  goto out_kill_hwq;
291  }
292 
293  rpage = __pa(vpage);
294 
295  hret = ehea_h_register_rpage(adapter->handle, 0,
297  eq->fw_handle, rpage, 1);
298 
299  if (i == (eq->attr.nr_pages - 1)) {
300  /* last page */
301  vpage = hw_qpageit_get_inc(&eq->hw_queue);
302  if ((hret != H_SUCCESS) || (vpage))
303  goto out_kill_hwq;
304 
305  } else {
306  if (hret != H_PAGE_REGISTERED)
307  goto out_kill_hwq;
308 
309  }
310  }
311 
312  hw_qeit_reset(&eq->hw_queue);
313  return eq;
314 
315 out_kill_hwq:
316  hw_queue_dtor(&eq->hw_queue);
317 
318 out_freeres:
320 
321 out_freemem:
322  kfree(eq);
323  return NULL;
324 }
325 
326 struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
327 {
328  struct ehea_eqe *eqe;
329  unsigned long flags;
330 
331  spin_lock_irqsave(&eq->spinlock, flags);
332  eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue);
333  spin_unlock_irqrestore(&eq->spinlock, flags);
334 
335  return eqe;
336 }
337 
338 static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
339 {
340  u64 hret;
341  unsigned long flags;
342 
343  spin_lock_irqsave(&eq->spinlock, flags);
344 
345  hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force);
346  spin_unlock_irqrestore(&eq->spinlock, flags);
347 
348  if (hret != H_SUCCESS)
349  return hret;
350 
351  hw_queue_dtor(&eq->hw_queue);
352  kfree(eq);
353 
354  return hret;
355 }
356 
357 int ehea_destroy_eq(struct ehea_eq *eq)
358 {
359  u64 hret, aer, aerr;
360  if (!eq)
361  return 0;
362 
363  hcp_epas_dtor(&eq->epas);
364 
365  hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
366  if (hret == H_R_STATE) {
367  ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
368  hret = ehea_destroy_eq_res(eq, FORCE_FREE);
369  }
370 
371  if (hret != H_SUCCESS) {
372  pr_err("destroy EQ failed\n");
373  return -EIO;
374  }
375 
376  return 0;
377 }
378 
379 /* allocates memory for a queue and registers pages in phyp */
380 static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
381  int nr_pages, int wqe_size, int act_nr_sges,
382  struct ehea_adapter *adapter, int h_call_q_selector)
383 {
384  u64 hret, rpage;
385  int ret, cnt;
386  void *vpage;
387 
388  ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
389  if (ret)
390  return ret;
391 
392  for (cnt = 0; cnt < nr_pages; cnt++) {
393  vpage = hw_qpageit_get_inc(hw_queue);
394  if (!vpage) {
395  pr_err("hw_qpageit_get_inc failed\n");
396  goto out_kill_hwq;
397  }
398  rpage = __pa(vpage);
399  hret = ehea_h_register_rpage(adapter->handle,
400  0, h_call_q_selector,
401  qp->fw_handle, rpage, 1);
402  if (hret < H_SUCCESS) {
403  pr_err("register_rpage_qp failed\n");
404  goto out_kill_hwq;
405  }
406  }
407  hw_qeit_reset(hw_queue);
408  return 0;
409 
410 out_kill_hwq:
411  hw_queue_dtor(hw_queue);
412  return -EIO;
413 }
414 
415 static inline u32 map_wqe_size(u8 wqe_enc_size)
416 {
417  return 128 << wqe_enc_size;
418 }
419 
420 struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
421  u32 pd, struct ehea_qp_init_attr *init_attr)
422 {
423  int ret;
424  u64 hret;
425  struct ehea_qp *qp;
426  u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
427  u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
428 
429 
430  qp = kzalloc(sizeof(*qp), GFP_KERNEL);
431  if (!qp) {
432  pr_err("no mem for qp\n");
433  return NULL;
434  }
435 
436  qp->adapter = adapter;
437 
438  hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
439  &qp->fw_handle, &qp->epas);
440  if (hret != H_SUCCESS) {
441  pr_err("ehea_h_alloc_resource_qp failed\n");
442  goto out_freemem;
443  }
444 
445  wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
446  wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
447  wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
448  wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
449 
450  ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
451  wqe_size_in_bytes_sq,
452  init_attr->act_wqe_size_enc_sq, adapter,
453  0);
454  if (ret) {
455  pr_err("can't register for sq ret=%x\n", ret);
456  goto out_freeres;
457  }
458 
459  ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
460  init_attr->nr_rq1_pages,
461  wqe_size_in_bytes_rq1,
462  init_attr->act_wqe_size_enc_rq1,
463  adapter, 1);
464  if (ret) {
465  pr_err("can't register for rq1 ret=%x\n", ret);
466  goto out_kill_hwsq;
467  }
468 
469  if (init_attr->rq_count > 1) {
470  ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
471  init_attr->nr_rq2_pages,
472  wqe_size_in_bytes_rq2,
473  init_attr->act_wqe_size_enc_rq2,
474  adapter, 2);
475  if (ret) {
476  pr_err("can't register for rq2 ret=%x\n", ret);
477  goto out_kill_hwr1q;
478  }
479  }
480 
481  if (init_attr->rq_count > 2) {
482  ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
483  init_attr->nr_rq3_pages,
484  wqe_size_in_bytes_rq3,
485  init_attr->act_wqe_size_enc_rq3,
486  adapter, 3);
487  if (ret) {
488  pr_err("can't register for rq3 ret=%x\n", ret);
489  goto out_kill_hwr2q;
490  }
491  }
492 
493  qp->init_attr = *init_attr;
494 
495  return qp;
496 
497 out_kill_hwr2q:
498  hw_queue_dtor(&qp->hw_rqueue2);
499 
500 out_kill_hwr1q:
501  hw_queue_dtor(&qp->hw_rqueue1);
502 
503 out_kill_hwsq:
504  hw_queue_dtor(&qp->hw_squeue);
505 
506 out_freeres:
509 
510 out_freemem:
511  kfree(qp);
512  return NULL;
513 }
514 
515 static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
516 {
517  u64 hret;
518  struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
519 
520 
521  ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
522  hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
523  if (hret != H_SUCCESS)
524  return hret;
525 
526  hw_queue_dtor(&qp->hw_squeue);
527  hw_queue_dtor(&qp->hw_rqueue1);
528 
529  if (qp_attr->rq_count > 1)
530  hw_queue_dtor(&qp->hw_rqueue2);
531  if (qp_attr->rq_count > 2)
532  hw_queue_dtor(&qp->hw_rqueue3);
533  kfree(qp);
534 
535  return hret;
536 }
537 
538 int ehea_destroy_qp(struct ehea_qp *qp)
539 {
540  u64 hret, aer, aerr;
541  if (!qp)
542  return 0;
543 
544  hcp_epas_dtor(&qp->epas);
545 
546  hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
547  if (hret == H_R_STATE) {
548  ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
549  hret = ehea_destroy_qp_res(qp, FORCE_FREE);
550  }
551 
552  if (hret != H_SUCCESS) {
553  pr_err("destroy QP failed\n");
554  return -EIO;
555  }
556 
557  return 0;
558 }
559 
560 static inline int ehea_calc_index(unsigned long i, unsigned long s)
561 {
562  return (i >> s) & EHEA_INDEX_MASK;
563 }
564 
565 static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
566  int dir)
567 {
568  if (!ehea_top_bmap->dir[dir]) {
569  ehea_top_bmap->dir[dir] =
570  kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
571  if (!ehea_top_bmap->dir[dir])
572  return -ENOMEM;
573  }
574  return 0;
575 }
576 
577 static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
578 {
579  if (!ehea_bmap->top[top]) {
580  ehea_bmap->top[top] =
581  kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
582  if (!ehea_bmap->top[top])
583  return -ENOMEM;
584  }
585  return ehea_init_top_bmap(ehea_bmap->top[top], dir);
586 }
587 
588 static DEFINE_MUTEX(ehea_busmap_mutex);
589 static unsigned long ehea_mr_len;
590 
591 #define EHEA_BUSMAP_ADD_SECT 1
592 #define EHEA_BUSMAP_REM_SECT 0
593 
594 static void ehea_rebuild_busmap(void)
595 {
597  int top, dir, idx;
598 
599  for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
600  struct ehea_top_bmap *ehea_top;
601  int valid_dir_entries = 0;
602 
603  if (!ehea_bmap->top[top])
604  continue;
605  ehea_top = ehea_bmap->top[top];
606  for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
607  struct ehea_dir_bmap *ehea_dir;
608  int valid_entries = 0;
609 
610  if (!ehea_top->dir[dir])
611  continue;
612  valid_dir_entries++;
613  ehea_dir = ehea_top->dir[dir];
614  for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
615  if (!ehea_dir->ent[idx])
616  continue;
617  valid_entries++;
618  ehea_dir->ent[idx] = vaddr;
619  vaddr += EHEA_SECTSIZE;
620  }
621  if (!valid_entries) {
622  ehea_top->dir[dir] = NULL;
623  kfree(ehea_dir);
624  }
625  }
626  if (!valid_dir_entries) {
627  ehea_bmap->top[top] = NULL;
628  kfree(ehea_top);
629  }
630  }
631 }
632 
633 static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
634 {
635  unsigned long i, start_section, end_section;
636 
637  if (!nr_pages)
638  return 0;
639 
640  if (!ehea_bmap) {
641  ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
642  if (!ehea_bmap)
643  return -ENOMEM;
644  }
645 
646  start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
647  end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
648  /* Mark entries as valid or invalid only; address is assigned later */
649  for (i = start_section; i < end_section; i++) {
650  u64 flag;
651  int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
652  int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
653  int idx = i & EHEA_INDEX_MASK;
654 
655  if (add) {
656  int ret = ehea_init_bmap(ehea_bmap, top, dir);
657  if (ret)
658  return ret;
659  flag = 1; /* valid */
660  ehea_mr_len += EHEA_SECTSIZE;
661  } else {
662  if (!ehea_bmap->top[top])
663  continue;
664  if (!ehea_bmap->top[top]->dir[dir])
665  continue;
666  flag = 0; /* invalid */
667  ehea_mr_len -= EHEA_SECTSIZE;
668  }
669 
670  ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
671  }
672  ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
673  return 0;
674 }
675 
676 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
677 {
678  int ret;
679 
680  mutex_lock(&ehea_busmap_mutex);
681  ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
682  mutex_unlock(&ehea_busmap_mutex);
683  return ret;
684 }
685 
686 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
687 {
688  int ret;
689 
690  mutex_lock(&ehea_busmap_mutex);
691  ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
692  mutex_unlock(&ehea_busmap_mutex);
693  return ret;
694 }
695 
696 static int ehea_is_hugepage(unsigned long pfn)
697 {
698  int page_order;
699 
700  if (pfn & EHEA_HUGEPAGE_PFN_MASK)
701  return 0;
702 
703  page_order = compound_order(pfn_to_page(pfn));
704  if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
705  return 0;
706 
707  return 1;
708 }
709 
710 static int ehea_create_busmap_callback(unsigned long initial_pfn,
711  unsigned long total_nr_pages, void *arg)
712 {
713  int ret;
714  unsigned long pfn, start_pfn, end_pfn, nr_pages;
715 
716  if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
717  return ehea_update_busmap(initial_pfn, total_nr_pages,
719 
720  /* Given chunk is >= 16GB -> check for hugepages */
721  start_pfn = initial_pfn;
722  end_pfn = initial_pfn + total_nr_pages;
723  pfn = start_pfn;
724 
725  while (pfn < end_pfn) {
726  if (ehea_is_hugepage(pfn)) {
727  /* Add mem found in front of the hugepage */
728  nr_pages = pfn - start_pfn;
729  ret = ehea_update_busmap(start_pfn, nr_pages,
731  if (ret)
732  return ret;
733 
734  /* Skip the hugepage */
735  pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
736  start_pfn = pfn;
737  } else
738  pfn += (EHEA_SECTSIZE / PAGE_SIZE);
739  }
740 
741  /* Add mem found behind the hugepage(s) */
742  nr_pages = pfn - start_pfn;
743  return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
744 }
745 
747 {
748  int ret;
749 
750  mutex_lock(&ehea_busmap_mutex);
751  ehea_mr_len = 0;
752  ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
753  ehea_create_busmap_callback);
754  mutex_unlock(&ehea_busmap_mutex);
755  return ret;
756 }
757 
759 {
760  int top, dir;
761  mutex_lock(&ehea_busmap_mutex);
762  if (!ehea_bmap)
763  goto out_destroy;
764 
765  for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
766  if (!ehea_bmap->top[top])
767  continue;
768 
769  for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
770  if (!ehea_bmap->top[top]->dir[dir])
771  continue;
772 
773  kfree(ehea_bmap->top[top]->dir[dir]);
774  }
775 
776  kfree(ehea_bmap->top[top]);
777  }
778 
779  kfree(ehea_bmap);
780  ehea_bmap = NULL;
781 out_destroy:
782  mutex_unlock(&ehea_busmap_mutex);
783 }
784 
785 u64 ehea_map_vaddr(void *caddr)
786 {
787  int top, dir, idx;
788  unsigned long index, offset;
789 
790  if (!ehea_bmap)
791  return EHEA_INVAL_ADDR;
792 
793  index = __pa(caddr) >> SECTION_SIZE_BITS;
794  top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
795  if (!ehea_bmap->top[top])
796  return EHEA_INVAL_ADDR;
797 
798  dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
799  if (!ehea_bmap->top[top]->dir[dir])
800  return EHEA_INVAL_ADDR;
801 
802  idx = index & EHEA_INDEX_MASK;
803  if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
804  return EHEA_INVAL_ADDR;
805 
806  offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
807  return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
808 }
809 
810 static inline void *ehea_calc_sectbase(int top, int dir, int idx)
811 {
812  unsigned long ret = idx;
813  ret |= dir << EHEA_DIR_INDEX_SHIFT;
814  ret |= top << EHEA_TOP_INDEX_SHIFT;
815  return __va(ret << SECTION_SIZE_BITS);
816 }
817 
818 static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
819  struct ehea_adapter *adapter,
820  struct ehea_mr *mr)
821 {
822  void *pg;
823  u64 j, m, hret;
824  unsigned long k = 0;
825  u64 pt_abs = __pa(pt);
826 
827  void *sectbase = ehea_calc_sectbase(top, dir, idx);
828 
829  for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
830 
831  for (m = 0; m < EHEA_MAX_RPAGE; m++) {
832  pg = sectbase + ((k++) * EHEA_PAGESIZE);
833  pt[m] = __pa(pg);
834  }
835  hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
836  0, pt_abs, EHEA_MAX_RPAGE);
837 
838  if ((hret != H_SUCCESS) &&
839  (hret != H_PAGE_REGISTERED)) {
840  ehea_h_free_resource(adapter->handle, mr->handle,
841  FORCE_FREE);
842  pr_err("register_rpage_mr failed\n");
843  return hret;
844  }
845  }
846  return hret;
847 }
848 
849 static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
850  struct ehea_adapter *adapter,
851  struct ehea_mr *mr)
852 {
853  u64 hret = H_SUCCESS;
854  int idx;
855 
856  for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
857  if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
858  continue;
859 
860  hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
861  if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
862  return hret;
863  }
864  return hret;
865 }
866 
867 static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
868  struct ehea_adapter *adapter,
869  struct ehea_mr *mr)
870 {
871  u64 hret = H_SUCCESS;
872  int dir;
873 
874  for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
875  if (!ehea_bmap->top[top]->dir[dir])
876  continue;
877 
878  hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
879  if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
880  return hret;
881  }
882  return hret;
883 }
884 
885 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
886 {
887  int ret;
888  u64 *pt;
889  u64 hret;
890  u32 acc_ctrl = EHEA_MR_ACC_CTRL;
891 
892  unsigned long top;
893 
894  pt = (void *)get_zeroed_page(GFP_KERNEL);
895  if (!pt) {
896  pr_err("no mem\n");
897  ret = -ENOMEM;
898  goto out;
899  }
900 
902  ehea_mr_len, acc_ctrl, adapter->pd,
903  &mr->handle, &mr->lkey);
904 
905  if (hret != H_SUCCESS) {
906  pr_err("alloc_resource_mr failed\n");
907  ret = -EIO;
908  goto out;
909  }
910 
911  if (!ehea_bmap) {
913  pr_err("no busmap available\n");
914  ret = -EIO;
915  goto out;
916  }
917 
918  for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
919  if (!ehea_bmap->top[top])
920  continue;
921 
922  hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
923  if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
924  break;
925  }
926 
927  if (hret != H_SUCCESS) {
929  pr_err("registering mr failed\n");
930  ret = -EIO;
931  goto out;
932  }
933 
934  mr->vaddr = EHEA_BUSMAP_START;
935  mr->adapter = adapter;
936  ret = 0;
937 out:
938  free_page((unsigned long)pt);
939  return ret;
940 }
941 
942 int ehea_rem_mr(struct ehea_mr *mr)
943 {
944  u64 hret;
945 
946  if (!mr || !mr->adapter)
947  return -EINVAL;
948 
949  hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
950  FORCE_FREE);
951  if (hret != H_SUCCESS) {
952  pr_err("destroy MR failed\n");
953  return -EIO;
954  }
955 
956  return 0;
957 }
958 
959 int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
960  struct ehea_mr *shared_mr)
961 {
962  u64 hret;
963 
964  hret = ehea_h_register_smr(adapter->handle, old_mr->handle,
965  old_mr->vaddr, EHEA_MR_ACC_CTRL,
966  adapter->pd, shared_mr);
967  if (hret != H_SUCCESS)
968  return -EIO;
969 
970  shared_mr->adapter = adapter;
971 
972  return 0;
973 }
974 
975 static void print_error_data(u64 *data)
976 {
977  int length;
979  u64 resource = data[1];
980 
981  length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]);
982 
983  if (length > EHEA_PAGESIZE)
984  length = EHEA_PAGESIZE;
985 
986  if (type == EHEA_AER_RESTYPE_QP)
987  pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
988  resource, data[6], data[12], data[22]);
989  else if (type == EHEA_AER_RESTYPE_CQ)
990  pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
991  resource, data[6]);
992  else if (type == EHEA_AER_RESTYPE_EQ)
993  pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
994  resource, data[6]);
995 
996  ehea_dump(data, length, "error data");
997 }
998 
1000  u64 *aer, u64 *aerr)
1001 {
1002  unsigned long ret;
1003  u64 *rblock;
1004  u64 type = 0;
1005 
1006  rblock = (void *)get_zeroed_page(GFP_KERNEL);
1007  if (!rblock) {
1008  pr_err("Cannot allocate rblock memory\n");
1009  goto out;
1010  }
1011 
1012  ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
1013 
1014  if (ret == H_SUCCESS) {
1015  type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
1016  *aer = rblock[6];
1017  *aerr = rblock[12];
1018  print_error_data(rblock);
1019  } else if (ret == H_R_STATE) {
1020  pr_err("No error data available: %llX\n", res_handle);
1021  } else
1022  pr_err("Error data could not be fetched: %llX\n", res_handle);
1023 
1024  free_page((unsigned long)rblock);
1025 out:
1026  return type;
1027 }