9 #include <linux/module.h>
11 #include <linux/kernel.h>
17 #include <asm/debug.h>
32 static inline int do_siga_sync(
unsigned long schid,
33 unsigned int out_mask,
unsigned int in_mask,
36 register unsigned long __fc
asm (
"0") = fc;
37 register unsigned long __schid
asm (
"1") = schid;
38 register unsigned long out asm (
"2") = out_mask;
39 register unsigned long in asm (
"3") = in_mask;
47 :
"d" (__fc),
"d" (__schid),
"d" (
out),
"d" (
in) :
"cc");
51 static inline int do_siga_input(
unsigned long schid,
unsigned int mask,
54 register unsigned long __fc
asm (
"0") = fc;
55 register unsigned long __schid
asm (
"1") = schid;
56 register unsigned long __mask asm (
"2") = mask;
64 :
"d" (__fc),
"d" (__schid),
"d" (
__mask) :
"cc");
78 static inline int do_siga_output(
unsigned long schid,
unsigned long mask,
79 unsigned int *
bb,
unsigned int fc,
82 register unsigned long __fc
asm(
"0") = fc;
83 register unsigned long __schid
asm(
"1") = schid;
84 register unsigned long __mask asm(
"2") = mask;
85 register unsigned long __aob
asm(
"3") = aob;
92 :
"=d" (
cc),
"+d" (__fc),
"+d" (__aob)
93 :
"d" (__schid),
"d" (
__mask)
99 static inline int qdio_check_ccq(
struct qdio_q *
q,
unsigned int ccq)
102 if (ccq == 0 || ccq == 32)
126 static int qdio_do_eqbs(
struct qdio_q *q,
unsigned char *
state,
130 unsigned int ccq = 0;
138 ccq = do_eqbs(q->
irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
140 rc = qdio_check_ccq(q, ccq);
142 return count - tmp_count;
150 BUG_ON(tmp_count == count);
161 return count - tmp_count;
165 DBF_ERROR(
"%3d%3d%2d", count, tmp_count, nr);
182 static int qdio_do_sqbs(
struct qdio_q *q,
unsigned char state,
int start,
185 unsigned int ccq = 0;
199 ccq = do_sqbs(q->
irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
200 rc = qdio_check_ccq(q, ccq);
203 return count - tmp_count;
206 if (rc == 1 || rc == 2) {
213 DBF_ERROR(
"%3d%3d%2d", count, tmp_count, nr);
220 static inline int get_buf_states(
struct qdio_q *q,
unsigned int bufnr,
221 unsigned char *state,
unsigned int count,
222 int auto_ack,
int merge_pending)
224 unsigned char __state = 0;
231 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
233 for (i = 0; i <
count; i++) {
235 __state = q->
slsb.val[bufnr];
238 }
else if (merge_pending) {
239 if ((q->
slsb.val[bufnr] & __state) != __state)
241 }
else if (q->
slsb.val[bufnr] != __state)
249 static inline int get_buf_state(
struct qdio_q *q,
unsigned int bufnr,
250 unsigned char *state,
int auto_ack)
252 return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
256 static inline int set_buf_states(
struct qdio_q *q,
int bufnr,
257 unsigned char state,
int count)
265 return qdio_do_sqbs(q, state, bufnr, count);
267 for (i = 0; i <
count; i++) {
274 static inline int set_buf_state(
struct qdio_q *q,
int bufnr,
277 return set_buf_states(q, bufnr, state, 1);
291 QDIO_MAX_BUFFERS_PER_Q);
297 unsigned long schid = *((
u32 *) &q->irq_ptr->schid);
305 schid = q->irq_ptr->sch_token;
309 cc = do_siga_sync(schid, output, input, fc);
312 return (cc) ? -
EIO : 0;
315 static inline int qdio_siga_sync_q(
struct qdio_q *q)
318 return qdio_siga_sync(q, 0, q->
mask);
320 return qdio_siga_sync(q, q->
mask, 0);
323 static int qdio_siga_output(
struct qdio_q *q,
unsigned int *busy_bit,
326 unsigned long schid = *((
u32 *) &q->
irq_ptr->schid);
330 unsigned long laob = 0;
332 if (q->
u.
out.use_cq && aob != 0) {
344 cc = do_siga_output(schid, q->
mask, busy_bit, fc, laob);
360 "%4x cc2 BB1:%1d",
SCH_NO(q), q->
nr);
366 static inline int qdio_siga_input(
struct qdio_q *q)
368 unsigned long schid = *((
u32 *) &q->
irq_ptr->schid);
380 cc = do_siga_input(schid, q->
mask, fc);
383 return (cc) ? -
EIO : 0;
386 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
387 #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
389 static inline void qdio_sync_queues(
struct qdio_q *q)
399 unsigned char *state)
403 return get_buf_states(q, bufnr, state, 1, 0, 0);
406 static inline void qdio_stop_polling(
struct qdio_q *q)
408 if (!q->
u.
in.polling)
416 set_buf_states(q, q->
u.
in.ack_start, SLSB_P_INPUT_NOT_INIT,
418 q->
u.
in.ack_count = 0;
420 set_buf_state(q, q->
u.
in.ack_start, SLSB_P_INPUT_NOT_INIT);
423 static inline void account_sbals(
struct qdio_q *q,
int count)
437 static void process_buffer_error(
struct qdio_q *q,
int count)
439 unsigned char state = (q->
is_input_q) ? SLSB_P_INPUT_NOT_INIT :
440 SLSB_P_OUTPUT_NOT_INIT;
468 static inline void inbound_primed(
struct qdio_q *q,
int count)
476 if (!q->
u.
in.polling) {
484 set_buf_states(q, q->
u.
in.ack_start, SLSB_P_INPUT_NOT_INIT,
496 if (q->
u.
in.polling) {
499 set_buf_state(q, q->
u.
in.ack_start, SLSB_P_INPUT_NOT_INIT);
505 q->
u.
in.ack_start =
new;
510 set_buf_states(q, q->
first_to_check, SLSB_P_INPUT_NOT_INIT, count);
513 static int get_inbound_buffer_frontier(
struct qdio_q *q)
516 unsigned char state = 0;
534 count = get_buf_states(q, q->
first_to_check, &state, count, 1, 0);
540 inbound_primed(q, count);
544 if (q->
irq_ptr->perf_stat_enabled)
545 account_sbals(q, count);
548 process_buffer_error(q, count);
551 if (q->
irq_ptr->perf_stat_enabled)
552 account_sbals_error(q, count);
555 case SLSB_P_INPUT_NOT_INIT:
557 if (q->
irq_ptr->perf_stat_enabled)
568 static int qdio_inbound_q_moved(
struct qdio_q *q)
572 bufnr = get_inbound_buffer_frontier(q);
583 static inline int qdio_inbound_q_done(
struct qdio_q *q)
585 unsigned char state = 0;
617 static inline int contains_aobs(
struct qdio_q *q)
622 static inline void qdio_trace_aob(
struct qdio_irq *irq,
struct qdio_q *q,
623 int i,
struct qaob *aob)
630 (
unsigned long) aob->
res0[0]);
632 (
unsigned long) aob->
res0[1]);
634 (
unsigned long) aob->
res0[2]);
636 (
unsigned long) aob->
res0[3]);
638 (
unsigned long) aob->
res0[4]);
640 (
unsigned long) aob->
res0[5]);
650 (
unsigned long) aob->
sba[tmp]);
652 (
unsigned long) q->sbal[i]->element[tmp].addr);
655 q->sbal[i]->element[tmp].length);
658 for (tmp = 0; tmp < 2; ++
tmp) {
660 (
unsigned long) aob->
res4[tmp]);
666 static inline void qdio_handle_aobs(
struct qdio_q *q,
int start,
int count)
668 unsigned char state = 0;
671 if (!contains_aobs(q))
674 for (j = 0; j <
count; ++
j) {
675 get_buf_state(q, b, &state, 0);
682 q->
u.
out.sbal_state[
b].flags |=
693 static inline unsigned long qdio_aob_for_buffer(
struct qdio_output_q *q,
696 unsigned long phys_aob = 0;
701 if (!q->
aobs[bufnr]) {
705 if (q->
aobs[bufnr]) {
718 static void qdio_kick_handler(
struct qdio_q *q)
738 qdio_handle_aobs(q, start, count);
748 static void __qdio_inbound_processing(
struct qdio_q *q)
752 if (!qdio_inbound_q_moved(q))
755 qdio_kick_handler(q);
757 if (!qdio_inbound_q_done(q)) {
766 qdio_stop_polling(q);
771 if (!qdio_inbound_q_done(q)) {
781 __qdio_inbound_processing(q);
784 static int get_outbound_buffer_frontier(
struct qdio_q *q)
787 unsigned char state = 0;
795 multicast_outbound(q)))
807 count = get_buf_states(q, q->
first_to_check, &state, count, 0, 1);
817 "out empty:%1d %02x", q->
nr, count);
821 if (q->
irq_ptr->perf_stat_enabled)
822 account_sbals(q, count);
826 process_buffer_error(q, count);
829 if (q->
irq_ptr->perf_stat_enabled)
830 account_sbals_error(q, count);
834 if (q->
irq_ptr->perf_stat_enabled)
839 case SLSB_P_OUTPUT_NOT_INIT:
851 static inline int qdio_outbound_q_done(
struct qdio_q *q)
856 static inline int qdio_outbound_q_moved(
struct qdio_q *q)
860 bufnr = get_outbound_buffer_frontier(q);
870 static int qdio_kick_outbound_q(
struct qdio_q *q,
unsigned long aob)
873 unsigned int busy_bit;
882 cc = qdio_siga_output(q, &busy_bit, aob);
912 static void __qdio_outbound_processing(
struct qdio_q *q)
917 if (qdio_outbound_q_moved(q))
918 qdio_kick_handler(q);
924 if (q->
u.
out.pci_out_enabled)
932 if (qdio_outbound_q_done(q))
935 if (!timer_pending(&q->
u.
out.timer))
949 __qdio_outbound_processing(q);
961 static inline void qdio_check_outbound_after_thinint(
struct qdio_q *q)
970 if (!qdio_outbound_q_done(out))
971 tasklet_schedule(&out->
tasklet);
974 static
void __tiqdio_inbound_processing(
struct qdio_q *q)
984 qdio_check_outbound_after_thinint(q);
986 if (!qdio_inbound_q_moved(q))
989 qdio_kick_handler(q);
991 if (!qdio_inbound_q_done(q)) {
994 tasklet_schedule(&q->tasklet);
999 qdio_stop_polling(q);
1004 if (!qdio_inbound_q_done(q)) {
1007 tasklet_schedule(&q->tasklet);
1014 __tiqdio_inbound_processing(q);
1017 static inline void qdio_set_state(
struct qdio_irq *irq_ptr,
1026 static void qdio_irq_check_sense(
struct qdio_irq *irq_ptr,
struct irb *
irb)
1030 DBF_ERROR_HEX(irb, 64);
1031 DBF_ERROR_HEX(irb->
ecw, 64);
1036 static void qdio_int_handler_pci(
struct qdio_irq *irq_ptr)
1045 if (q->
u.
in.queue_start_poll) {
1048 &q->
u.
in.queue_irq_state)) {
1055 tasklet_schedule(&q->
tasklet);
1063 if (qdio_outbound_q_done(q))
1066 qdio_siga_sync_q(q);
1067 tasklet_schedule(&q->
tasklet);
1080 DBF_ERROR(
"ds: %2x cs:%2x", dstat, cstat);
1103 static void qdio_establish_handle_irq(
struct ccw_device *cdev,
int cstat,
1121 DBF_ERROR(
"ds: %2x cs:%2x", dstat, cstat);
1132 if (!intparm || !irq_ptr) {
1141 switch (PTR_ERR(irb)) {
1152 qdio_irq_check_sense(irq_ptr, irb);
1156 switch (irq_ptr->
state) {
1158 qdio_establish_handle_irq(cdev, cstat, dstat);
1166 qdio_int_handler_pci(irq_ptr);
1170 qdio_handle_activate_check(cdev, intparm, cstat,
1201 static void qdio_shutdown_queues(
struct ccw_device *cdev)
1225 unsigned long flags;
1250 qdio_shutdown_queues(cdev);
1325 DBF_EVENT(
"qallocate:%4x", init_data->
cdev->private->schid.sch_no);
1361 WARN_ON((
unsigned long)irq_ptr->
qdr & 0xfff);
1377 static void qdio_detect_hsicq(
struct qdio_irq *irq_ptr)
1405 unsigned long saveflags;
1410 irq_ptr = cdev->
private->qdio_data;
1428 irq_ptr->
ccw.cmd_code = irq_ptr->
equeue.cmd;
1430 irq_ptr->
ccw.count = irq_ptr->
equeue.count;
1461 qdio_detect_hsicq(irq_ptr);
1464 qdio_init_buf_states(irq_ptr);
1481 unsigned long saveflags;
1485 irq_ptr = cdev->
private->qdio_data;
1498 irq_ptr->
ccw.cmd_code = irq_ptr->
aqueue.cmd;
1500 irq_ptr->
ccw.count = irq_ptr->
aqueue.count;
1501 irq_ptr->
ccw.cda = 0;
1523 switch (irq_ptr->
state) {
1538 static inline int buf_in_between(
int bufnr,
int start,
int count)
1540 int end =
add_buf(start, count);
1543 if (bufnr >= start && bufnr < end)
1550 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1564 static int handle_inbound(
struct qdio_q *q,
unsigned int callflags,
1565 int bufnr,
int count)
1571 if (!q->
u.
in.polling)
1575 if (count == QDIO_MAX_BUFFERS_PER_Q) {
1577 q->
u.
in.polling = 0;
1578 q->
u.
in.ack_count = 0;
1580 }
else if (buf_in_between(q->
u.
in.ack_start, bufnr, count)) {
1585 q->
u.
in.ack_count -= diff;
1586 if (q->
u.
in.ack_count <= 0) {
1587 q->
u.
in.polling = 0;
1588 q->
u.
in.ack_count = 0;
1595 q->
u.
in.polling = 0;
1602 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1605 return qdio_siga_input(q);
1617 static int handle_outbound(
struct qdio_q *q,
unsigned int callflags,
1618 int bufnr,
int count)
1620 unsigned char state = 0;
1627 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1629 if (used == QDIO_MAX_BUFFERS_PER_Q)
1633 q->
u.
out.pci_out_enabled = 1;
1636 q->
u.
out.pci_out_enabled = 0;
1639 unsigned long phys_aob = 0;
1644 phys_aob = qdio_aob_for_buffer(&q->
u.
out, bufnr);
1646 rc = qdio_kick_outbound_q(q, phys_aob);
1648 rc = qdio_siga_sync_q(q);
1651 get_buf_state(q,
prev_buf(bufnr), &state, 0);
1653 rc = qdio_kick_outbound_q(q, 0);
1659 if (used >= q->
u.
out.scan_threshold || rc)
1660 tasklet_schedule(&q->
tasklet);
1663 if (!timer_pending(&q->
u.
out.timer))
1677 int q_nr,
unsigned int bufnr,
unsigned int count)
1682 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1685 irq_ptr = cdev->
private->qdio_data;
1690 "do%02x b:%02x c:%02x", callflags, bufnr, count);
1697 return handle_inbound(irq_ptr->
input_qs[q_nr],
1698 callflags, bufnr, count);
1700 return handle_outbound(irq_ptr->
output_qs[q_nr],
1701 callflags, bufnr, count);
1727 qdio_stop_polling(q);
1736 if (!qdio_inbound_q_done(q))
1742 &q->
u.
in.queue_irq_state))
1779 qdio_sync_queues(q);
1782 qdio_check_outbound_after_thinint(q);
1784 if (!qdio_inbound_q_moved(q))
1822 &q->
u.
in.queue_irq_state))
1829 static int __init init_QDIO(
void)
1856 static void __exit exit_QDIO(
void)