29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <linux/slab.h>
41 void *retvalue = hw_qeit_get(queue);
48 pr_err(
"not on pageboundary\n");
55 const u32 pagesize,
const u32 qe_size)
57 int pages_per_kpage =
PAGE_SIZE / pagesize;
60 if ((pagesize >
PAGE_SIZE) || (!pages_per_kpage)) {
61 pr_err(
"pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
69 pr_err(
"no mem for queue_pages\n");
79 while (i < nr_of_pages) {
83 for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
97 for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
105 static void hw_queue_dtor(
struct hw_queue *queue)
115 for (i = 0; i < nr_pages; i += pages_per_kpage)
122 int nr_of_cqe,
u64 eq_handle,
u32 cq_token)
126 u64 *cq_handle_ref, hret, rpage;
133 pr_err(
"no mem for cq\n");
137 cq->
attr.max_nr_of_cqes = nr_of_cqe;
138 cq->
attr.cq_token = cq_token;
139 cq->
attr.eq_handle = eq_handle;
144 act_nr_of_entries = 0;
149 if (hret != H_SUCCESS) {
150 pr_err(
"alloc_resource_cq failed\n");
159 for (counter = 0; counter < cq->
attr.nr_pages; counter++) {
160 vpage = hw_qpageit_get_inc(&cq->
hw_queue);
162 pr_err(
"hw_qpageit_get_inc failed\n");
170 if (hret < H_SUCCESS) {
171 pr_err(
"register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
172 cq, hret, counter, cq->
attr.nr_pages);
176 if (counter == (cq->
attr.nr_pages - 1)) {
177 vpage = hw_qpageit_get_inc(&cq->
hw_queue);
179 if ((hret != H_SUCCESS) || (vpage)) {
180 pr_err(
"registration of pages not complete hret=%llx\n",
185 if (hret != H_PAGE_REGISTERED) {
186 pr_err(
"CQ: registration of page failed hret=%llx\n",
194 epa = cq->
epas.kernel;
195 ehea_reset_cq_ep(cq);
196 ehea_reset_cq_n1(cq);
220 if (hret != H_SUCCESS)
235 hcp_epas_dtor(&cq->
epas);
237 if (hret == H_R_STATE) {
242 if (hret != H_SUCCESS) {
243 pr_err(
"destroy CQ failed\n");
252 const u32 max_nr_of_eqes,
const u8 eqe_gen)
261 pr_err(
"no mem for eq\n");
267 eq->
attr.max_nr_of_eqes = max_nr_of_eqes;
268 eq->
attr.eqe_gen = eqe_gen;
273 if (hret != H_SUCCESS) {
274 pr_err(
"alloc_resource_eq failed\n");
281 pr_err(
"can't allocate eq pages\n");
285 for (i = 0; i < eq->
attr.nr_pages; i++) {
286 vpage = hw_qpageit_get_inc(&eq->
hw_queue);
288 pr_err(
"hw_qpageit_get_inc failed\n");
299 if (i == (eq->
attr.nr_pages - 1)) {
301 vpage = hw_qpageit_get_inc(&eq->
hw_queue);
302 if ((hret != H_SUCCESS) || (vpage))
306 if (hret != H_PAGE_REGISTERED)
332 eqe = hw_eqit_eq_get_inc_valid(&eq->
hw_queue);
333 spin_unlock_irqrestore(&eq->
spinlock, flags);
338 static u64 ehea_destroy_eq_res(
struct ehea_eq *eq,
u64 force)
346 spin_unlock_irqrestore(&eq->
spinlock, flags);
348 if (hret != H_SUCCESS)
363 hcp_epas_dtor(&eq->
epas);
366 if (hret == H_R_STATE) {
371 if (hret != H_SUCCESS) {
372 pr_err(
"destroy EQ failed\n");
381 int nr_pages,
int wqe_size,
int act_nr_sges,
388 ret = hw_queue_ctor(hw_queue, nr_pages,
EHEA_PAGESIZE, wqe_size);
392 for (cnt = 0; cnt < nr_pages; cnt++) {
393 vpage = hw_qpageit_get_inc(hw_queue);
395 pr_err(
"hw_qpageit_get_inc failed\n");
400 0, h_call_q_selector,
402 if (hret < H_SUCCESS) {
403 pr_err(
"register_rpage_qp failed\n");
407 hw_qeit_reset(hw_queue);
411 hw_queue_dtor(hw_queue);
415 static inline u32 map_wqe_size(
u8 wqe_enc_size)
417 return 128 << wqe_enc_size;
426 u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
427 u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
432 pr_err(
"no mem for qp\n");
440 if (hret != H_SUCCESS) {
441 pr_err(
"ehea_h_alloc_resource_qp failed\n");
451 wqe_size_in_bytes_sq,
455 pr_err(
"can't register for sq ret=%x\n", ret);
459 ret = ehea_qp_alloc_register(qp, &qp->
hw_rqueue1,
461 wqe_size_in_bytes_rq1,
465 pr_err(
"can't register for rq1 ret=%x\n", ret);
470 ret = ehea_qp_alloc_register(qp, &qp->
hw_rqueue2,
472 wqe_size_in_bytes_rq2,
476 pr_err(
"can't register for rq2 ret=%x\n", ret);
482 ret = ehea_qp_alloc_register(qp, &qp->
hw_rqueue3,
484 wqe_size_in_bytes_rq3,
488 pr_err(
"can't register for rq3 ret=%x\n", ret);
515 static u64 ehea_destroy_qp_res(
struct ehea_qp *qp,
u64 force)
523 if (hret != H_SUCCESS)
544 hcp_epas_dtor(&qp->
epas);
547 if (hret == H_R_STATE) {
552 if (hret != H_SUCCESS) {
553 pr_err(
"destroy QP failed\n");
560 static inline int ehea_calc_index(
unsigned long i,
unsigned long s)
568 if (!ehea_top_bmap->
dir[dir]) {
569 ehea_top_bmap->
dir[dir] =
571 if (!ehea_top_bmap->
dir[dir])
577 static inline int ehea_init_bmap(
struct ehea_bmap *ehea_bmap,
int top,
int dir)
579 if (!ehea_bmap->
top[top]) {
581 kzalloc(
sizeof(
struct ehea_top_bmap),
GFP_KERNEL);
582 if (!ehea_bmap->
top[top])
585 return ehea_init_top_bmap(ehea_bmap->
top[top], dir);
589 static unsigned long ehea_mr_len;
591 #define EHEA_BUSMAP_ADD_SECT 1
592 #define EHEA_BUSMAP_REM_SECT 0
594 static void ehea_rebuild_busmap(
void)
600 struct ehea_top_bmap *ehea_top;
601 int valid_dir_entries = 0;
603 if (!ehea_bmap->
top[top])
605 ehea_top = ehea_bmap->
top[
top];
608 int valid_entries = 0;
610 if (!ehea_top->
dir[dir])
613 ehea_dir = ehea_top->
dir[dir];
615 if (!ehea_dir->
ent[idx])
621 if (!valid_entries) {
626 if (!valid_dir_entries) {
633 static int ehea_update_busmap(
unsigned long pfn,
unsigned long nr_pages,
int add)
635 unsigned long i, start_section, end_section;
641 ehea_bmap = kzalloc(
sizeof(
struct ehea_bmap),
GFP_KERNEL);
649 for (i = start_section; i < end_section; i++) {
656 int ret = ehea_init_bmap(ehea_bmap, top, dir);
662 if (!ehea_bmap->
top[top])
664 if (!ehea_bmap->
top[top]->dir[dir])
672 ehea_rebuild_busmap();
696 static int ehea_is_hugepage(
unsigned long pfn)
710 static int ehea_create_busmap_callback(
unsigned long initial_pfn,
711 unsigned long total_nr_pages,
void *
arg)
714 unsigned long pfn, start_pfn, end_pfn, nr_pages;
717 return ehea_update_busmap(initial_pfn, total_nr_pages,
721 start_pfn = initial_pfn;
722 end_pfn = initial_pfn + total_nr_pages;
725 while (pfn < end_pfn) {
726 if (ehea_is_hugepage(pfn)) {
728 nr_pages = pfn - start_pfn;
729 ret = ehea_update_busmap(start_pfn, nr_pages,
742 nr_pages = pfn - start_pfn;
753 ehea_create_busmap_callback);
766 if (!ehea_bmap->
top[top])
770 if (!ehea_bmap->
top[top]->dir[dir])
773 kfree(ehea_bmap->
top[top]->dir[dir]);
795 if (!ehea_bmap->
top[top])
799 if (!ehea_bmap->
top[top]->dir[dir])
803 if (!ehea_bmap->
top[top]->dir[dir]->ent[idx])
810 static inline void *ehea_calc_sectbase(
int top,
int dir,
int idx)
812 unsigned long ret =
idx;
818 static u64 ehea_reg_mr_section(
int top,
int dir,
int idx,
u64 *pt,
827 void *sectbase = ehea_calc_sectbase(top, dir, idx);
838 if ((hret != H_SUCCESS) &&
839 (hret != H_PAGE_REGISTERED)) {
842 pr_err(
"register_rpage_mr failed\n");
849 static u64 ehea_reg_mr_sections(
int top,
int dir,
u64 *pt,
853 u64 hret = H_SUCCESS;
857 if (!ehea_bmap->
top[top]->dir[dir]->ent[idx])
860 hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
861 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
867 static u64 ehea_reg_mr_dir_sections(
int top,
u64 *pt,
871 u64 hret = H_SUCCESS;
875 if (!ehea_bmap->
top[top]->dir[dir])
878 hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
879 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
902 ehea_mr_len, acc_ctrl, adapter->
pd,
905 if (hret != H_SUCCESS) {
906 pr_err(
"alloc_resource_mr failed\n");
913 pr_err(
"no busmap available\n");
919 if (!ehea_bmap->
top[top])
922 hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
923 if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
927 if (hret != H_SUCCESS) {
929 pr_err(
"registering mr failed\n");
951 if (hret != H_SUCCESS) {
952 pr_err(
"destroy MR failed\n");
966 adapter->
pd, shared_mr);
967 if (hret != H_SUCCESS)
975 static void print_error_data(
u64 *
data)
987 pr_err(
"QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
988 resource, data[6], data[12], data[22]);
990 pr_err(
"CQ (resource=%llX) state: AER=0x%llX\n",
993 pr_err(
"EQ (resource=%llX) state: AER=0x%llX\n",
1008 pr_err(
"Cannot allocate rblock memory\n");
1014 if (ret == H_SUCCESS) {
1018 print_error_data(rblock);
1019 }
else if (ret == H_R_STATE) {
1020 pr_err(
"No error data available: %llX\n", res_handle);
1022 pr_err(
"Error data could not be fetched: %llX\n", res_handle);