44 #include <linux/slab.h>
55 #define EQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
56 #define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
57 #define EQE_EE_IDENTIFIER EHCA_BMASK_IBM( 2, 7)
58 #define EQE_CQ_NUMBER EHCA_BMASK_IBM( 8, 31)
59 #define EQE_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
60 #define EQE_QP_TOKEN EHCA_BMASK_IBM(32, 63)
61 #define EQE_CQ_TOKEN EHCA_BMASK_IBM(32, 63)
63 #define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
64 #define NEQE_EVENT_CODE EHCA_BMASK_IBM( 2, 7)
65 #define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15)
66 #define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
67 #define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16)
68 #define NEQE_SPECIFIC_EVENT EHCA_BMASK_IBM(16, 23)
70 #define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63)
71 #define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7)
73 static void queue_comp_task(
struct ehca_cq *__cq);
77 static inline void comp_event_callback(
struct ehca_cq *cq)
79 if (!cq->
ib_cq.comp_handler)
89 static void print_error_data(
struct ehca_shca *shca,
void *
data,
105 "QP 0x%x (resource=%llx) has errors.",
106 qp->
ib_qp.qp_num, resource);
114 "CQ 0x%x (resource=%llx) has errors.",
120 "Unknown error type: %llx on %s.",
127 "---------------------------------------------------");
128 ehca_dmp(rblock, length,
"resource=%llx", resource);
130 "----------------------------------------------------");
156 if (ret == H_R_STATE)
158 "No error data is available: %llx.", resource);
159 else if (ret == H_SUCCESS) {
167 print_error_data(shca, data, rblock, length);
170 "Error data could not be fetched: %llx", resource);
192 if (!qp->
ib_srq.event_handler)
195 event.element.srq = &qp->
ib_srq;
198 if (!qp->
ib_qp.event_handler)
201 event.element.qp = &qp->
ib_qp;
206 static void qp_event_callback(
struct ehca_shca *shca,
u64 eqe,
240 static void cq_event_callback(
struct ehca_shca *shca,
263 static void parse_identifier(
struct ehca_shca *shca,
u64 eqe)
267 switch (identifier) {
283 cq_event_callback(shca, eqe);
346 if (new_attr.sm_sl != old_attr->
sm_sl ||
347 new_attr.sm_lid != old_attr->
sm_lid)
351 if (new_attr.lid != old_attr->
lid ||
352 new_attr.lmc != old_attr->
lmc)
358 sizeof(
u16) * new_attr.pkey_tbl_len))
362 *old_attr = new_attr;
366 static int replay_modify_qp(
struct ehca_sport *sport)
382 return aqp1_destroyed;
400 if (replay_modify_qp(sport))
420 "%d configuration change", port);
432 notify_port_conf_change(shca, port);
442 if (spec_event == 0x80)
443 dispatch_port_event(shca, port,
445 "client reregister req.");
448 "event %x on port %x", spec_event, port);
459 static inline void reset_eq_pending(
struct ehca_cq *cq)
474 tasklet_hi_schedule(&shca->
neq.interrupt_task);
489 parse_ec(shca, eqe->
entry);
495 shca->
neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
497 if (ret != H_SUCCESS)
507 tasklet_hi_schedule(&shca->
eq.interrupt_task);
519 eqe_value = eqe->
entry;
531 "Invalid eqe for non-existing cq token=%x",
535 reset_eq_pending(cq);
539 comp_event_callback(cq);
545 parse_identifier(shca, eqe_value);
559 const int max_query_cnt = 100;
567 }
while (int_state && query_cnt < max_query_cnt);
568 if (
unlikely((query_cnt == max_query_cnt)))
570 int_state, query_cnt);
578 if (!eqe_cache[eqe_cnt].eqe)
580 eqe_value = eqe_cache[eqe_cnt].
eqe->entry;
585 if (eqe_cache[eqe_cnt].cq)
586 atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
588 if (!eqe_cache[eqe_cnt].cq) {
590 "Invalid eqe for non-existing cq "
595 eqe_cache[eqe_cnt].
cq =
NULL;
601 "No eqe found for irq event");
602 goto unlock_irq_spinlock;
603 }
else if (!is_irq) {
605 if (ret != H_SUCCESS)
607 "bad return code EOI -rc = %lld\n", ret);
613 for (i = 0; i < eqe_cnt; i++) {
619 eq_empty = (!ipz_eqit_eq_peek_valid(&shca->
eq.ipz_queue));
622 for (i = 0; i < eqe_cnt; i++)
628 comp_event_callback(cq);
634 parse_identifier(shca, eq->
eqe_cache[i].eqe->entry);
638 goto unlock_irq_spinlock;
644 process_eqe(shca, eqe);
663 ehca_dmp(cpu_online_mask, cpumask_size(),
"");
667 cpu = cpumask_next(pool->
last_cpu, cpu_online_mask);
668 if (cpu >= nr_cpu_ids)
669 cpu = cpumask_first(cpu_online_mask);
677 static void __queue_comp_task(
struct ehca_cq *__cq,
695 spin_unlock_irqrestore(&cct->
task_lock, flags);
698 static void queue_comp_task(
struct ehca_cq *__cq)
706 cpu_id = find_next_online_cpu(pool);
715 spin_unlock_irqrestore(&cct->
task_lock, flags);
717 cpu_id = find_next_online_cpu(pool);
722 __queue_comp_task(__cq, cct, thread);
729 while (!list_empty(&cct->
cq_list)) {
733 comp_event_callback(cq);
741 list_del_init(cct->
cq_list.next);
748 static void comp_task_park(
unsigned int cpu)
762 cpu = find_next_online_cpu(pool);
768 __queue_comp_task(cq, target, thread);
773 static void comp_task_stop(
unsigned int cpu,
bool online)
784 static int comp_task_should_run(
unsigned int cpu)
791 static void comp_task(
unsigned int cpu)
797 cql_empty = list_empty(&cct->
cq_list);
806 .thread_should_run = comp_task_should_run,
807 .thread_fn = comp_task,
808 .thread_comm =
"ehca_comp/%u",
809 .cleanup = comp_task_stop,
810 .park = comp_task_park,
848 pr_info(
"eHCA scaling code enabled\n");