2 * linux/drivers/s390/cio/qdio_main.c
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <asm/atomic.h>
17 #include <asm/debug.h>
24 #include "qdio_debug.h"
25 #include "qdio_perf.h"
27 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28 "Jan Glauber <jang@linux.vnet.ibm.com>");
29 MODULE_DESCRIPTION("QDIO base support");
30 MODULE_LICENSE("GPL");
32 static inline int do_siga_sync(struct subchannel_id schid
,
33 unsigned int out_mask
, unsigned int in_mask
)
35 register unsigned long __fc
asm ("0") = 2;
36 register struct subchannel_id __schid
asm ("1") = schid
;
37 register unsigned long out
asm ("2") = out_mask
;
38 register unsigned long in
asm ("3") = in_mask
;
46 : "d" (__fc
), "d" (__schid
), "d" (out
), "d" (in
) : "cc");
50 static inline int do_siga_input(struct subchannel_id schid
, unsigned int mask
)
52 register unsigned long __fc
asm ("0") = 1;
53 register struct subchannel_id __schid
asm ("1") = schid
;
54 register unsigned long __mask
asm ("2") = mask
;
62 : "d" (__fc
), "d" (__schid
), "d" (__mask
) : "cc", "memory");
67 * do_siga_output - perform SIGA-w/wt function
68 * @schid: subchannel id or in case of QEBSM the subchannel token
69 * @mask: which output queues to process
70 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
71 * @fc: function code to perform
73 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
74 * Note: For IQDC unicast queues only the highest priority queue is processed.
76 static inline int do_siga_output(unsigned long schid
, unsigned long mask
,
77 unsigned int *bb
, unsigned int fc
)
79 register unsigned long __fc
asm("0") = fc
;
80 register unsigned long __schid
asm("1") = schid
;
81 register unsigned long __mask
asm("2") = mask
;
82 int cc
= QDIO_ERROR_SIGA_ACCESS_EXCEPTION
;
90 : "+d" (cc
), "+d" (__fc
), "+d" (__schid
), "+d" (__mask
)
92 *bb
= ((unsigned int) __fc
) >> 31;
96 static inline int qdio_check_ccq(struct qdio_q
*q
, unsigned int ccq
)
98 /* all done or next buffer state different */
99 if (ccq
== 0 || ccq
== 32)
101 /* not all buffers processed */
102 if (ccq
== 96 || ccq
== 97)
104 /* notify devices immediately */
105 DBF_ERROR("%4x ccq:%3d", SCH_NO(q
), ccq
);
110 * qdio_do_eqbs - extract buffer states for QEBSM
111 * @q: queue to manipulate
112 * @state: state of the extracted buffers
113 * @start: buffer number to start at
114 * @count: count of buffers to examine
115 * @auto_ack: automatically acknowledge buffers
117 * Returns the number of successfully extracted equal buffer states.
118 * Stops processing if a state is different from the last buffers state.
120 static int qdio_do_eqbs(struct qdio_q
*q
, unsigned char *state
,
121 int start
, int count
, int auto_ack
)
123 unsigned int ccq
= 0;
124 int tmp_count
= count
, tmp_start
= start
;
128 BUG_ON(!q
->irq_ptr
->sch_token
);
129 qdio_perf_stat_inc(&perf_stats
.debug_eqbs_all
);
132 nr
+= q
->irq_ptr
->nr_input_qs
;
134 ccq
= do_eqbs(q
->irq_ptr
->sch_token
, state
, nr
, &tmp_start
, &tmp_count
,
136 rc
= qdio_check_ccq(q
, ccq
);
138 /* At least one buffer was processed, return and extract the remaining
141 if ((ccq
== 96) && (count
!= tmp_count
)) {
142 qdio_perf_stat_inc(&perf_stats
.debug_eqbs_incomplete
);
143 return (count
- tmp_count
);
147 DBF_DEV_EVENT(DBF_WARN
, q
->irq_ptr
, "EQBS again:%2d", ccq
);
152 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q
));
153 DBF_ERROR("%3d%3d%2d", count
, tmp_count
, nr
);
154 q
->handler(q
->irq_ptr
->cdev
,
155 QDIO_ERROR_ACTIVATE_CHECK_CONDITION
,
156 0, -1, -1, q
->irq_ptr
->int_parm
);
159 return count
- tmp_count
;
163 * qdio_do_sqbs - set buffer states for QEBSM
164 * @q: queue to manipulate
165 * @state: new state of the buffers
166 * @start: first buffer number to change
167 * @count: how many buffers to change
169 * Returns the number of successfully changed buffers.
170 * Does retrying until the specified count of buffer states is set or an
173 static int qdio_do_sqbs(struct qdio_q
*q
, unsigned char state
, int start
,
176 unsigned int ccq
= 0;
177 int tmp_count
= count
, tmp_start
= start
;
184 BUG_ON(!q
->irq_ptr
->sch_token
);
185 qdio_perf_stat_inc(&perf_stats
.debug_sqbs_all
);
188 nr
+= q
->irq_ptr
->nr_input_qs
;
190 ccq
= do_sqbs(q
->irq_ptr
->sch_token
, state
, nr
, &tmp_start
, &tmp_count
);
191 rc
= qdio_check_ccq(q
, ccq
);
193 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "SQBS again:%2d", ccq
);
194 qdio_perf_stat_inc(&perf_stats
.debug_sqbs_incomplete
);
198 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q
));
199 DBF_ERROR("%3d%3d%2d", count
, tmp_count
, nr
);
200 q
->handler(q
->irq_ptr
->cdev
,
201 QDIO_ERROR_ACTIVATE_CHECK_CONDITION
,
202 0, -1, -1, q
->irq_ptr
->int_parm
);
206 return count
- tmp_count
;
209 /* returns number of examined buffers and their common state in *state */
210 static inline int get_buf_states(struct qdio_q
*q
, unsigned int bufnr
,
211 unsigned char *state
, unsigned int count
,
214 unsigned char __state
= 0;
217 BUG_ON(bufnr
> QDIO_MAX_BUFFERS_MASK
);
218 BUG_ON(count
> QDIO_MAX_BUFFERS_PER_Q
);
221 return qdio_do_eqbs(q
, state
, bufnr
, count
, auto_ack
);
223 for (i
= 0; i
< count
; i
++) {
225 __state
= q
->slsb
.val
[bufnr
];
226 else if (q
->slsb
.val
[bufnr
] != __state
)
228 bufnr
= next_buf(bufnr
);
234 inline int get_buf_state(struct qdio_q
*q
, unsigned int bufnr
,
235 unsigned char *state
, int auto_ack
)
237 return get_buf_states(q
, bufnr
, state
, 1, auto_ack
);
240 /* wrap-around safe setting of slsb states, returns number of changed buffers */
241 static inline int set_buf_states(struct qdio_q
*q
, int bufnr
,
242 unsigned char state
, int count
)
246 BUG_ON(bufnr
> QDIO_MAX_BUFFERS_MASK
);
247 BUG_ON(count
> QDIO_MAX_BUFFERS_PER_Q
);
250 return qdio_do_sqbs(q
, state
, bufnr
, count
);
252 for (i
= 0; i
< count
; i
++) {
253 xchg(&q
->slsb
.val
[bufnr
], state
);
254 bufnr
= next_buf(bufnr
);
259 static inline int set_buf_state(struct qdio_q
*q
, int bufnr
,
262 return set_buf_states(q
, bufnr
, state
, 1);
265 /* set slsb states to initial state */
266 void qdio_init_buf_states(struct qdio_irq
*irq_ptr
)
271 for_each_input_queue(irq_ptr
, q
, i
)
272 set_buf_states(q
, 0, SLSB_P_INPUT_NOT_INIT
,
273 QDIO_MAX_BUFFERS_PER_Q
);
274 for_each_output_queue(irq_ptr
, q
, i
)
275 set_buf_states(q
, 0, SLSB_P_OUTPUT_NOT_INIT
,
276 QDIO_MAX_BUFFERS_PER_Q
);
279 static int qdio_siga_sync(struct qdio_q
*q
, unsigned int output
,
284 if (!need_siga_sync(q
))
287 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-s:%1d", q
->nr
);
288 qdio_perf_stat_inc(&perf_stats
.siga_sync
);
290 cc
= do_siga_sync(q
->irq_ptr
->schid
, output
, input
);
292 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q
), cc
);
296 inline int qdio_siga_sync_q(struct qdio_q
*q
)
299 return qdio_siga_sync(q
, 0, q
->mask
);
301 return qdio_siga_sync(q
, q
->mask
, 0);
304 static inline int qdio_siga_sync_out(struct qdio_q
*q
)
306 return qdio_siga_sync(q
, ~0U, 0);
309 static inline int qdio_siga_sync_all(struct qdio_q
*q
)
311 return qdio_siga_sync(q
, ~0U, ~0U);
314 static int qdio_siga_output(struct qdio_q
*q
, unsigned int *busy_bit
)
321 if (q
->u
.out
.use_enh_siga
)
325 schid
= q
->irq_ptr
->sch_token
;
329 schid
= *((u32
*)&q
->irq_ptr
->schid
);
332 cc
= do_siga_output(schid
, q
->mask
, busy_bit
, fc
);
334 /* hipersocket busy condition */
336 WARN_ON(queue_type(q
) != QDIO_IQDIO_QFMT
|| cc
!= 2);
339 start_time
= get_usecs();
342 if ((get_usecs() - start_time
) < QDIO_BUSY_BIT_PATIENCE
)
348 static inline int qdio_siga_input(struct qdio_q
*q
)
352 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-r:%1d", q
->nr
);
353 qdio_perf_stat_inc(&perf_stats
.siga_in
);
355 cc
= do_siga_input(q
->irq_ptr
->schid
, q
->mask
);
357 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q
), cc
);
361 /* called from thinint inbound handler */
362 void qdio_sync_after_thinint(struct qdio_q
*q
)
364 if (pci_out_supported(q
)) {
365 if (need_siga_sync_thinint(q
))
366 qdio_siga_sync_all(q
);
367 else if (need_siga_sync_out_thinint(q
))
368 qdio_siga_sync_out(q
);
373 inline void qdio_stop_polling(struct qdio_q
*q
)
375 if (!q
->u
.in
.polling
)
379 qdio_perf_stat_inc(&perf_stats
.debug_stop_polling
);
381 /* show the card that we are not polling anymore */
383 set_buf_states(q
, q
->last_move_ftc
, SLSB_P_INPUT_NOT_INIT
,
385 q
->u
.in
.ack_count
= 0;
387 set_buf_state(q
, q
->last_move_ftc
, SLSB_P_INPUT_NOT_INIT
);
390 static void announce_buffer_error(struct qdio_q
*q
, int count
)
392 q
->qdio_error
|= QDIO_ERROR_SLSB_STATE
;
394 /* special handling for no target buffer empty */
395 if ((!q
->is_input_q
&&
396 (q
->sbal
[q
->first_to_check
]->element
[15].flags
& 0xff) == 0x10)) {
397 qdio_perf_stat_inc(&perf_stats
.outbound_target_full
);
398 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "OUTFULL FTC:%3d",
403 DBF_ERROR("%4x BUF ERROR", SCH_NO(q
));
404 DBF_ERROR((q
->is_input_q
) ? "IN:%2d" : "OUT:%2d", q
->nr
);
405 DBF_ERROR("FTC:%3d C:%3d", q
->first_to_check
, count
);
406 DBF_ERROR("F14:%2x F15:%2x",
407 q
->sbal
[q
->first_to_check
]->element
[14].flags
& 0xff,
408 q
->sbal
[q
->first_to_check
]->element
[15].flags
& 0xff);
411 static inline void inbound_primed(struct qdio_q
*q
, int count
)
415 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in prim: %3d", count
);
417 /* for QEBSM the ACK was already set by EQBS */
419 if (!q
->u
.in
.polling
) {
421 q
->u
.in
.ack_count
= count
;
422 q
->last_move_ftc
= q
->first_to_check
;
426 /* delete the previous ACK's */
427 set_buf_states(q
, q
->last_move_ftc
, SLSB_P_INPUT_NOT_INIT
,
429 q
->u
.in
.ack_count
= count
;
430 q
->last_move_ftc
= q
->first_to_check
;
435 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
436 * or by the next inbound run.
438 new = add_buf(q
->first_to_check
, count
- 1);
439 if (q
->u
.in
.polling
) {
440 /* reset the previous ACK but first set the new one */
441 set_buf_state(q
, new, SLSB_P_INPUT_ACK
);
442 set_buf_state(q
, q
->last_move_ftc
, SLSB_P_INPUT_NOT_INIT
);
445 set_buf_state(q
, new, SLSB_P_INPUT_ACK
);
449 * last_move_ftc points to the ACK'ed buffer and not to the last turns
450 * first_to_check like for qebsm. Since it is only used to check if
451 * the queue front moved in qdio_inbound_q_done this is not a problem.
453 q
->last_move_ftc
= new;
459 * Need to change all PRIMED buffers to NOT_INIT, otherwise
460 * we're loosing initiative in the thinint code.
462 set_buf_states(q
, q
->first_to_check
, SLSB_P_INPUT_NOT_INIT
,
466 static int get_inbound_buffer_frontier(struct qdio_q
*q
)
472 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
475 count
= min(atomic_read(&q
->nr_buf_used
), QDIO_MAX_BUFFERS_MASK
);
476 stop
= add_buf(q
->first_to_check
, count
);
479 * No siga sync here, as a PCI or we after a thin interrupt
480 * will sync the queues.
483 /* need to set count to 1 for non-qebsm */
488 if (q
->first_to_check
== stop
)
491 count
= get_buf_states(q
, q
->first_to_check
, &state
, count
, 1);
496 case SLSB_P_INPUT_PRIMED
:
497 inbound_primed(q
, count
);
499 * No siga-sync needed for non-qebsm here, as the inbound queue
500 * will be synced on the next siga-r, resp.
501 * tiqdio_is_inbound_q_done will do the siga-sync.
503 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
504 atomic_sub(count
, &q
->nr_buf_used
);
506 case SLSB_P_INPUT_ERROR
:
507 announce_buffer_error(q
, count
);
508 /* process the buffer, the upper layer will take care of it */
509 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
510 atomic_sub(count
, &q
->nr_buf_used
);
512 case SLSB_CU_INPUT_EMPTY
:
513 case SLSB_P_INPUT_NOT_INIT
:
514 case SLSB_P_INPUT_ACK
:
515 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in nop");
521 return q
->first_to_check
;
524 int qdio_inbound_q_moved(struct qdio_q
*q
)
528 bufnr
= get_inbound_buffer_frontier(q
);
530 if ((bufnr
!= q
->last_move_ftc
) || q
->qdio_error
) {
531 if (!need_siga_sync(q
) && !pci_out_supported(q
))
532 q
->u
.in
.timestamp
= get_usecs();
534 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in moved");
540 static int qdio_inbound_q_done(struct qdio_q
*q
)
542 unsigned char state
= 0;
544 if (!atomic_read(&q
->nr_buf_used
))
548 * We need that one for synchronization with the adapter, as it
549 * does a kind of PCI avoidance.
553 get_buf_state(q
, q
->first_to_check
, &state
, 0);
554 if (state
== SLSB_P_INPUT_PRIMED
)
555 /* we got something to do */
558 /* on VM, we don't poll, so the q is always done here */
559 if (need_siga_sync(q
) || pci_out_supported(q
))
563 * At this point we know, that inbound first_to_check
564 * has (probably) not moved (see qdio_inbound_processing).
566 if (get_usecs() > q
->u
.in
.timestamp
+ QDIO_INPUT_THRESHOLD
) {
567 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in done:%3d",
571 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in notd:%3d",
577 void qdio_kick_inbound_handler(struct qdio_q
*q
)
579 int count
, start
, end
;
581 qdio_perf_stat_inc(&perf_stats
.inbound_handler
);
583 start
= q
->first_to_kick
;
584 end
= q
->first_to_check
;
588 count
= end
+ QDIO_MAX_BUFFERS_PER_Q
- start
;
590 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "kih s:%3d c:%3d", start
, count
);
592 if (unlikely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_ACTIVE
))
595 q
->handler(q
->irq_ptr
->cdev
, q
->qdio_error
, q
->nr
,
596 start
, count
, q
->irq_ptr
->int_parm
);
598 /* for the next time */
599 q
->first_to_kick
= q
->first_to_check
;
603 static void __qdio_inbound_processing(struct qdio_q
*q
)
605 qdio_perf_stat_inc(&perf_stats
.tasklet_inbound
);
607 if (!qdio_inbound_q_moved(q
))
610 qdio_kick_inbound_handler(q
);
612 if (!qdio_inbound_q_done(q
))
613 /* means poll time is not yet over */
616 qdio_stop_polling(q
);
618 * We need to check again to not lose initiative after
619 * resetting the ACK state.
621 if (!qdio_inbound_q_done(q
))
625 /* inbound tasklet */
626 void qdio_inbound_processing(unsigned long data
)
628 struct qdio_q
*q
= (struct qdio_q
*)data
;
629 __qdio_inbound_processing(q
);
632 static int get_outbound_buffer_frontier(struct qdio_q
*q
)
637 if (((queue_type(q
) != QDIO_IQDIO_QFMT
) && !pci_out_supported(q
)) ||
638 (queue_type(q
) == QDIO_IQDIO_QFMT
&& multicast_outbound(q
)))
642 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
645 count
= min(atomic_read(&q
->nr_buf_used
), QDIO_MAX_BUFFERS_MASK
);
646 stop
= add_buf(q
->first_to_check
, count
);
648 /* need to set count to 1 for non-qebsm */
653 if (q
->first_to_check
== stop
)
654 return q
->first_to_check
;
656 count
= get_buf_states(q
, q
->first_to_check
, &state
, count
, 0);
658 return q
->first_to_check
;
661 case SLSB_P_OUTPUT_EMPTY
:
662 /* the adapter got it */
663 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "out empty:%1d %3d", q
->nr
, count
);
665 atomic_sub(count
, &q
->nr_buf_used
);
666 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
668 * We fetch all buffer states at once. get_buf_states may
669 * return count < stop. For QEBSM we do not loop.
674 case SLSB_P_OUTPUT_ERROR
:
675 announce_buffer_error(q
, count
);
676 /* process the buffer, the upper layer will take care of it */
677 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
678 atomic_sub(count
, &q
->nr_buf_used
);
680 case SLSB_CU_OUTPUT_PRIMED
:
681 /* the adapter has not fetched the output yet */
682 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "out primed:%1d", q
->nr
);
684 case SLSB_P_OUTPUT_NOT_INIT
:
685 case SLSB_P_OUTPUT_HALTED
:
690 return q
->first_to_check
;
693 /* all buffers processed? */
694 static inline int qdio_outbound_q_done(struct qdio_q
*q
)
696 return atomic_read(&q
->nr_buf_used
) == 0;
699 static inline int qdio_outbound_q_moved(struct qdio_q
*q
)
703 bufnr
= get_outbound_buffer_frontier(q
);
705 if ((bufnr
!= q
->last_move_ftc
) || q
->qdio_error
) {
706 q
->last_move_ftc
= bufnr
;
707 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "out moved:%1d", q
->nr
);
713 static void qdio_kick_outbound_q(struct qdio_q
*q
)
715 unsigned int busy_bit
;
718 if (!need_siga_out(q
))
721 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-w:%1d", q
->nr
);
722 qdio_perf_stat_inc(&perf_stats
.siga_out
);
724 cc
= qdio_siga_output(q
, &busy_bit
);
730 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q
), q
->nr
);
731 q
->qdio_error
= cc
| QDIO_ERROR_SIGA_BUSY
;
733 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-w cc2:%1d",
740 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q
), cc
);
746 static void qdio_kick_outbound_handler(struct qdio_q
*q
)
748 int start
, end
, count
;
750 start
= q
->first_to_kick
;
751 end
= q
->last_move_ftc
;
755 count
= end
+ QDIO_MAX_BUFFERS_PER_Q
- start
;
757 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "kickouth: %1d", q
->nr
);
758 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "s:%3d c:%3d", start
, count
);
760 if (unlikely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_ACTIVE
))
763 q
->handler(q
->irq_ptr
->cdev
, q
->qdio_error
, q
->nr
, start
, count
,
764 q
->irq_ptr
->int_parm
);
766 /* for the next time: */
767 q
->first_to_kick
= q
->last_move_ftc
;
771 static void __qdio_outbound_processing(struct qdio_q
*q
)
775 qdio_perf_stat_inc(&perf_stats
.tasklet_outbound
);
776 spin_lock_irqsave(&q
->lock
, flags
);
778 BUG_ON(atomic_read(&q
->nr_buf_used
) < 0);
780 if (qdio_outbound_q_moved(q
))
781 qdio_kick_outbound_handler(q
);
783 spin_unlock_irqrestore(&q
->lock
, flags
);
785 if (queue_type(q
) == QDIO_ZFCP_QFMT
)
786 if (!pci_out_supported(q
) && !qdio_outbound_q_done(q
))
789 /* bail out for HiperSockets unicast queues */
790 if (queue_type(q
) == QDIO_IQDIO_QFMT
&& !multicast_outbound(q
))
793 if ((queue_type(q
) == QDIO_IQDIO_QFMT
) &&
794 (atomic_read(&q
->nr_buf_used
)) > QDIO_IQDIO_POLL_LVL
)
797 if (q
->u
.out
.pci_out_enabled
)
801 * Now we know that queue type is either qeth without pci enabled
802 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
803 * EMPTY is noticed and outbound_handler is called after some time.
805 if (qdio_outbound_q_done(q
))
806 del_timer(&q
->u
.out
.timer
);
808 if (!timer_pending(&q
->u
.out
.timer
)) {
809 mod_timer(&q
->u
.out
.timer
, jiffies
+ 10 * HZ
);
810 qdio_perf_stat_inc(&perf_stats
.debug_tl_out_timer
);
816 if (unlikely(q
->irq_ptr
->state
== QDIO_IRQ_STATE_STOPPED
))
818 tasklet_schedule(&q
->tasklet
);
821 /* outbound tasklet */
822 void qdio_outbound_processing(unsigned long data
)
824 struct qdio_q
*q
= (struct qdio_q
*)data
;
825 __qdio_outbound_processing(q
);
828 void qdio_outbound_timer(unsigned long data
)
830 struct qdio_q
*q
= (struct qdio_q
*)data
;
832 if (unlikely(q
->irq_ptr
->state
== QDIO_IRQ_STATE_STOPPED
))
834 tasklet_schedule(&q
->tasklet
);
837 /* called from thinint inbound tasklet */
838 void qdio_check_outbound_after_thinint(struct qdio_q
*q
)
843 if (!pci_out_supported(q
))
846 for_each_output_queue(q
->irq_ptr
, out
, i
)
847 if (!qdio_outbound_q_done(out
))
848 tasklet_schedule(&out
->tasklet
);
851 static inline void qdio_set_state(struct qdio_irq
*irq_ptr
,
852 enum qdio_irq_states state
)
854 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "newstate: %1d", state
);
856 irq_ptr
->state
= state
;
860 static void qdio_irq_check_sense(struct qdio_irq
*irq_ptr
, struct irb
*irb
)
862 if (irb
->esw
.esw0
.erw
.cons
) {
863 DBF_ERROR("%4x sense:", irq_ptr
->schid
.sch_no
);
864 DBF_ERROR_HEX(irb
, 64);
865 DBF_ERROR_HEX(irb
->ecw
, 64);
869 /* PCI interrupt handler */
870 static void qdio_int_handler_pci(struct qdio_irq
*irq_ptr
)
875 if (unlikely(irq_ptr
->state
== QDIO_IRQ_STATE_STOPPED
))
878 qdio_perf_stat_inc(&perf_stats
.pci_int
);
880 for_each_input_queue(irq_ptr
, q
, i
)
881 tasklet_schedule(&q
->tasklet
);
883 if (!(irq_ptr
->qib
.ac
& QIB_AC_OUTBOUND_PCI_SUPPORTED
))
886 for_each_output_queue(irq_ptr
, q
, i
) {
887 if (qdio_outbound_q_done(q
))
890 if (!siga_syncs_out_pci(q
))
893 tasklet_schedule(&q
->tasklet
);
897 static void qdio_handle_activate_check(struct ccw_device
*cdev
,
898 unsigned long intparm
, int cstat
, int dstat
)
900 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
903 DBF_ERROR("%4x ACT CHECK", irq_ptr
->schid
.sch_no
);
904 DBF_ERROR("intp :%lx", intparm
);
905 DBF_ERROR("ds: %2x cs:%2x", dstat
, cstat
);
907 if (irq_ptr
->nr_input_qs
) {
908 q
= irq_ptr
->input_qs
[0];
909 } else if (irq_ptr
->nr_output_qs
) {
910 q
= irq_ptr
->output_qs
[0];
915 q
->handler(q
->irq_ptr
->cdev
, QDIO_ERROR_ACTIVATE_CHECK_CONDITION
,
916 0, -1, -1, irq_ptr
->int_parm
);
918 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_STOPPED
);
921 static void qdio_call_shutdown(struct work_struct
*work
)
923 struct ccw_device_private
*priv
;
924 struct ccw_device
*cdev
;
926 priv
= container_of(work
, struct ccw_device_private
, kick_work
);
928 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
929 put_device(&cdev
->dev
);
932 static void qdio_int_error(struct ccw_device
*cdev
)
934 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
936 switch (irq_ptr
->state
) {
937 case QDIO_IRQ_STATE_INACTIVE
:
938 case QDIO_IRQ_STATE_CLEANUP
:
939 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ERR
);
941 case QDIO_IRQ_STATE_ESTABLISHED
:
942 case QDIO_IRQ_STATE_ACTIVE
:
943 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_STOPPED
);
944 if (get_device(&cdev
->dev
)) {
945 /* Can't call shutdown from interrupt context. */
946 PREPARE_WORK(&cdev
->private->kick_work
,
948 queue_work(ccw_device_work
, &cdev
->private->kick_work
);
954 wake_up(&cdev
->private->wait_q
);
957 static int qdio_establish_check_errors(struct ccw_device
*cdev
, int cstat
,
960 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
962 if (cstat
|| (dstat
& ~(DEV_STAT_CHN_END
| DEV_STAT_DEV_END
))) {
963 DBF_ERROR("EQ:ck con");
967 if (!(dstat
& DEV_STAT_DEV_END
)) {
968 DBF_ERROR("EQ:no dev");
972 if (dstat
& ~(DEV_STAT_CHN_END
| DEV_STAT_DEV_END
)) {
973 DBF_ERROR("EQ: bad io");
978 DBF_ERROR("%4x EQ:error", irq_ptr
->schid
.sch_no
);
979 DBF_ERROR("ds: %2x cs:%2x", dstat
, cstat
);
981 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ERR
);
985 static void qdio_establish_handle_irq(struct ccw_device
*cdev
, int cstat
,
988 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
990 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "qest irq");
991 if (!qdio_establish_check_errors(cdev
, cstat
, dstat
))
992 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ESTABLISHED
);
995 /* qdio interrupt handler */
996 void qdio_int_handler(struct ccw_device
*cdev
, unsigned long intparm
,
999 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1002 qdio_perf_stat_inc(&perf_stats
.qdio_int
);
1004 if (!intparm
|| !irq_ptr
) {
1005 DBF_ERROR("qint:%4x", cdev
->private->schid
.sch_no
);
1010 switch (PTR_ERR(irb
)) {
1012 DBF_ERROR("%4x IO error", irq_ptr
->schid
.sch_no
);
1015 DBF_ERROR("%4x IO timeout", irq_ptr
->schid
.sch_no
);
1016 qdio_int_error(cdev
);
1023 qdio_irq_check_sense(irq_ptr
, irb
);
1025 cstat
= irb
->scsw
.cmd
.cstat
;
1026 dstat
= irb
->scsw
.cmd
.dstat
;
1028 switch (irq_ptr
->state
) {
1029 case QDIO_IRQ_STATE_INACTIVE
:
1030 qdio_establish_handle_irq(cdev
, cstat
, dstat
);
1033 case QDIO_IRQ_STATE_CLEANUP
:
1034 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_INACTIVE
);
1037 case QDIO_IRQ_STATE_ESTABLISHED
:
1038 case QDIO_IRQ_STATE_ACTIVE
:
1039 if (cstat
& SCHN_STAT_PCI
) {
1040 qdio_int_handler_pci(irq_ptr
);
1041 /* no state change so no need to wake up wait_q */
1044 if ((cstat
& ~SCHN_STAT_PCI
) || dstat
) {
1045 qdio_handle_activate_check(cdev
, intparm
, cstat
,
1052 wake_up(&cdev
->private->wait_q
);
1056 * qdio_get_ssqd_desc - get qdio subchannel description
1057 * @cdev: ccw device to get description for
1058 * @data: where to store the ssqd
1060 * Returns 0 or an error code. The results of the chsc are stored in the
1061 * specified structure.
1063 int qdio_get_ssqd_desc(struct ccw_device
*cdev
,
1064 struct qdio_ssqd_desc
*data
)
1067 if (!cdev
|| !cdev
->private)
1070 DBF_EVENT("get ssqd:%4x", cdev
->private->schid
.sch_no
);
1071 return qdio_setup_get_ssqd(NULL
, &cdev
->private->schid
, data
);
1073 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc
);
1076 * qdio_cleanup - shutdown queues and free data structures
1077 * @cdev: associated ccw device
1078 * @how: use halt or clear to shutdown
1080 * This function calls qdio_shutdown() for @cdev with method @how.
1081 * and qdio_free(). The qdio_free() return value is ignored since
1082 * !irq_ptr is already checked.
1084 int qdio_cleanup(struct ccw_device
*cdev
, int how
)
1086 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1092 rc
= qdio_shutdown(cdev
, how
);
1097 EXPORT_SYMBOL_GPL(qdio_cleanup
);
1099 static void qdio_shutdown_queues(struct ccw_device
*cdev
)
1101 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1105 for_each_input_queue(irq_ptr
, q
, i
)
1106 tasklet_kill(&q
->tasklet
);
1108 for_each_output_queue(irq_ptr
, q
, i
) {
1109 del_timer(&q
->u
.out
.timer
);
1110 tasklet_kill(&q
->tasklet
);
1115 * qdio_shutdown - shut down a qdio subchannel
1116 * @cdev: associated ccw device
1117 * @how: use halt or clear to shutdown
1119 int qdio_shutdown(struct ccw_device
*cdev
, int how
)
1121 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1123 unsigned long flags
;
1128 BUG_ON(irqs_disabled());
1129 DBF_EVENT("qshutdown:%4x", cdev
->private->schid
.sch_no
);
1131 mutex_lock(&irq_ptr
->setup_mutex
);
1133 * Subchannel was already shot down. We cannot prevent being called
1134 * twice since cio may trigger a shutdown asynchronously.
1136 if (irq_ptr
->state
== QDIO_IRQ_STATE_INACTIVE
) {
1137 mutex_unlock(&irq_ptr
->setup_mutex
);
1142 * Indicate that the device is going down. Scheduling the queue
1143 * tasklets is forbidden from here on.
1145 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_STOPPED
);
1147 tiqdio_remove_input_queues(irq_ptr
);
1148 qdio_shutdown_queues(cdev
);
1149 qdio_shutdown_debug_entries(irq_ptr
, cdev
);
1151 /* cleanup subchannel */
1152 spin_lock_irqsave(get_ccwdev_lock(cdev
), flags
);
1154 if (how
& QDIO_FLAG_CLEANUP_USING_CLEAR
)
1155 rc
= ccw_device_clear(cdev
, QDIO_DOING_CLEANUP
);
1157 /* default behaviour is halt */
1158 rc
= ccw_device_halt(cdev
, QDIO_DOING_CLEANUP
);
1160 DBF_ERROR("%4x SHUTD ERR", irq_ptr
->schid
.sch_no
);
1161 DBF_ERROR("rc:%4d", rc
);
1165 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_CLEANUP
);
1166 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), flags
);
1167 wait_event_interruptible_timeout(cdev
->private->wait_q
,
1168 irq_ptr
->state
== QDIO_IRQ_STATE_INACTIVE
||
1169 irq_ptr
->state
== QDIO_IRQ_STATE_ERR
,
1171 spin_lock_irqsave(get_ccwdev_lock(cdev
), flags
);
1174 qdio_shutdown_thinint(irq_ptr
);
1176 /* restore interrupt handler */
1177 if ((void *)cdev
->handler
== (void *)qdio_int_handler
)
1178 cdev
->handler
= irq_ptr
->orig_handler
;
1179 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), flags
);
1181 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_INACTIVE
);
1182 mutex_unlock(&irq_ptr
->setup_mutex
);
1187 EXPORT_SYMBOL_GPL(qdio_shutdown
);
1190 * qdio_free - free data structures for a qdio subchannel
1191 * @cdev: associated ccw device
1193 int qdio_free(struct ccw_device
*cdev
)
1195 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1200 DBF_EVENT("qfree:%4x", cdev
->private->schid
.sch_no
);
1201 mutex_lock(&irq_ptr
->setup_mutex
);
1203 if (irq_ptr
->debug_area
!= NULL
) {
1204 debug_unregister(irq_ptr
->debug_area
);
1205 irq_ptr
->debug_area
= NULL
;
1207 cdev
->private->qdio_data
= NULL
;
1208 mutex_unlock(&irq_ptr
->setup_mutex
);
1210 qdio_release_memory(irq_ptr
);
1213 EXPORT_SYMBOL_GPL(qdio_free
);
1216 * qdio_initialize - allocate and establish queues for a qdio subchannel
1217 * @init_data: initialization data
1219 * This function first allocates queues via qdio_allocate() and on success
1220 * establishes them via qdio_establish().
1222 int qdio_initialize(struct qdio_initialize
*init_data
)
1226 rc
= qdio_allocate(init_data
);
1230 rc
= qdio_establish(init_data
);
1232 qdio_free(init_data
->cdev
);
1235 EXPORT_SYMBOL_GPL(qdio_initialize
);
1238 * qdio_allocate - allocate qdio queues and associated data
1239 * @init_data: initialization data
1241 int qdio_allocate(struct qdio_initialize
*init_data
)
1243 struct qdio_irq
*irq_ptr
;
1245 DBF_EVENT("qallocate:%4x", init_data
->cdev
->private->schid
.sch_no
);
1247 if ((init_data
->no_input_qs
&& !init_data
->input_handler
) ||
1248 (init_data
->no_output_qs
&& !init_data
->output_handler
))
1251 if ((init_data
->no_input_qs
> QDIO_MAX_QUEUES_PER_IRQ
) ||
1252 (init_data
->no_output_qs
> QDIO_MAX_QUEUES_PER_IRQ
))
1255 if ((!init_data
->input_sbal_addr_array
) ||
1256 (!init_data
->output_sbal_addr_array
))
1259 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1260 irq_ptr
= (void *) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1264 mutex_init(&irq_ptr
->setup_mutex
);
1265 qdio_allocate_dbf(init_data
, irq_ptr
);
1268 * Allocate a page for the chsc calls in qdio_establish.
1269 * Must be pre-allocated since a zfcp recovery will call
1270 * qdio_establish. In case of low memory and swap on a zfcp disk
1271 * we may not be able to allocate memory otherwise.
1273 irq_ptr
->chsc_page
= get_zeroed_page(GFP_KERNEL
);
1274 if (!irq_ptr
->chsc_page
)
1277 /* qdr is used in ccw1.cda which is u32 */
1278 irq_ptr
->qdr
= (struct qdr
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1281 WARN_ON((unsigned long)irq_ptr
->qdr
& 0xfff);
1283 if (qdio_allocate_qs(irq_ptr
, init_data
->no_input_qs
,
1284 init_data
->no_output_qs
))
1287 init_data
->cdev
->private->qdio_data
= irq_ptr
;
1288 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_INACTIVE
);
1291 qdio_release_memory(irq_ptr
);
1295 EXPORT_SYMBOL_GPL(qdio_allocate
);
1298 * qdio_establish - establish queues on a qdio subchannel
1299 * @init_data: initialization data
1301 int qdio_establish(struct qdio_initialize
*init_data
)
1303 struct qdio_irq
*irq_ptr
;
1304 struct ccw_device
*cdev
= init_data
->cdev
;
1305 unsigned long saveflags
;
1308 DBF_EVENT("qestablish:%4x", cdev
->private->schid
.sch_no
);
1310 irq_ptr
= cdev
->private->qdio_data
;
1314 if (cdev
->private->state
!= DEV_STATE_ONLINE
)
1317 mutex_lock(&irq_ptr
->setup_mutex
);
1318 qdio_setup_irq(init_data
);
1320 rc
= qdio_establish_thinint(irq_ptr
);
1322 mutex_unlock(&irq_ptr
->setup_mutex
);
1323 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
1328 irq_ptr
->ccw
.cmd_code
= irq_ptr
->equeue
.cmd
;
1329 irq_ptr
->ccw
.flags
= CCW_FLAG_SLI
;
1330 irq_ptr
->ccw
.count
= irq_ptr
->equeue
.count
;
1331 irq_ptr
->ccw
.cda
= (u32
)((addr_t
)irq_ptr
->qdr
);
1333 spin_lock_irqsave(get_ccwdev_lock(cdev
), saveflags
);
1334 ccw_device_set_options_mask(cdev
, 0);
1336 rc
= ccw_device_start(cdev
, &irq_ptr
->ccw
, QDIO_DOING_ESTABLISH
, 0, 0);
1338 DBF_ERROR("%4x est IO ERR", irq_ptr
->schid
.sch_no
);
1339 DBF_ERROR("rc:%4x", rc
);
1341 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), saveflags
);
1344 mutex_unlock(&irq_ptr
->setup_mutex
);
1345 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
1349 wait_event_interruptible_timeout(cdev
->private->wait_q
,
1350 irq_ptr
->state
== QDIO_IRQ_STATE_ESTABLISHED
||
1351 irq_ptr
->state
== QDIO_IRQ_STATE_ERR
, HZ
);
1353 if (irq_ptr
->state
!= QDIO_IRQ_STATE_ESTABLISHED
) {
1354 mutex_unlock(&irq_ptr
->setup_mutex
);
1355 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
1359 qdio_setup_ssqd_info(irq_ptr
);
1360 DBF_EVENT("qDmmwc:%2x", irq_ptr
->ssqd_desc
.mmwc
);
1361 DBF_EVENT("qib ac:%4x", irq_ptr
->qib
.ac
);
1363 /* qebsm is now setup if available, initialize buffer states */
1364 qdio_init_buf_states(irq_ptr
);
1366 mutex_unlock(&irq_ptr
->setup_mutex
);
1367 qdio_print_subchannel_info(irq_ptr
, cdev
);
1368 qdio_setup_debug_entries(irq_ptr
, cdev
);
1371 EXPORT_SYMBOL_GPL(qdio_establish
);
1374 * qdio_activate - activate queues on a qdio subchannel
1375 * @cdev: associated cdev
1377 int qdio_activate(struct ccw_device
*cdev
)
1379 struct qdio_irq
*irq_ptr
;
1381 unsigned long saveflags
;
1383 DBF_EVENT("qactivate:%4x", cdev
->private->schid
.sch_no
);
1385 irq_ptr
= cdev
->private->qdio_data
;
1389 if (cdev
->private->state
!= DEV_STATE_ONLINE
)
1392 mutex_lock(&irq_ptr
->setup_mutex
);
1393 if (irq_ptr
->state
== QDIO_IRQ_STATE_INACTIVE
) {
1398 irq_ptr
->ccw
.cmd_code
= irq_ptr
->aqueue
.cmd
;
1399 irq_ptr
->ccw
.flags
= CCW_FLAG_SLI
;
1400 irq_ptr
->ccw
.count
= irq_ptr
->aqueue
.count
;
1401 irq_ptr
->ccw
.cda
= 0;
1403 spin_lock_irqsave(get_ccwdev_lock(cdev
), saveflags
);
1404 ccw_device_set_options(cdev
, CCWDEV_REPORT_ALL
);
1406 rc
= ccw_device_start(cdev
, &irq_ptr
->ccw
, QDIO_DOING_ACTIVATE
,
1407 0, DOIO_DENY_PREFETCH
);
1409 DBF_ERROR("%4x act IO ERR", irq_ptr
->schid
.sch_no
);
1410 DBF_ERROR("rc:%4x", rc
);
1412 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), saveflags
);
1417 if (is_thinint_irq(irq_ptr
))
1418 tiqdio_add_input_queues(irq_ptr
);
1420 /* wait for subchannel to become active */
1423 switch (irq_ptr
->state
) {
1424 case QDIO_IRQ_STATE_STOPPED
:
1425 case QDIO_IRQ_STATE_ERR
:
1429 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ACTIVE
);
1433 mutex_unlock(&irq_ptr
->setup_mutex
);
1436 EXPORT_SYMBOL_GPL(qdio_activate
);
1438 static inline int buf_in_between(int bufnr
, int start
, int count
)
1440 int end
= add_buf(start
, count
);
1443 if (bufnr
>= start
&& bufnr
< end
)
1449 /* wrap-around case */
1450 if ((bufnr
>= start
&& bufnr
<= QDIO_MAX_BUFFERS_PER_Q
) ||
1458 * handle_inbound - reset processed input buffers
1459 * @q: queue containing the buffers
1461 * @bufnr: first buffer to process
1462 * @count: how many buffers are emptied
1464 static void handle_inbound(struct qdio_q
*q
, unsigned int callflags
,
1465 int bufnr
, int count
)
1469 if (!q
->u
.in
.polling
)
1472 /* protect against stop polling setting an ACK for an emptied slsb */
1473 if (count
== QDIO_MAX_BUFFERS_PER_Q
) {
1474 /* overwriting everything, just delete polling status */
1475 q
->u
.in
.polling
= 0;
1476 q
->u
.in
.ack_count
= 0;
1478 } else if (buf_in_between(q
->last_move_ftc
, bufnr
, count
)) {
1480 /* partial overwrite, just update last_move_ftc */
1481 diff
= add_buf(bufnr
, count
);
1482 diff
= sub_buf(diff
, q
->last_move_ftc
);
1483 q
->u
.in
.ack_count
-= diff
;
1484 if (q
->u
.in
.ack_count
<= 0) {
1485 q
->u
.in
.polling
= 0;
1486 q
->u
.in
.ack_count
= 0;
1489 q
->last_move_ftc
= add_buf(q
->last_move_ftc
, diff
);
1492 /* the only ACK will be deleted, so stop polling */
1493 q
->u
.in
.polling
= 0;
1497 count
= set_buf_states(q
, bufnr
, SLSB_CU_INPUT_EMPTY
, count
);
1499 used
= atomic_add_return(count
, &q
->nr_buf_used
) - count
;
1500 BUG_ON(used
+ count
> QDIO_MAX_BUFFERS_PER_Q
);
1502 /* no need to signal as long as the adapter had free buffers */
1506 if (need_siga_in(q
)) {
1507 cc
= qdio_siga_input(q
);
1514 * handle_outbound - process filled outbound buffers
1515 * @q: queue containing the buffers
1517 * @bufnr: first buffer to process
1518 * @count: how many buffers are filled
1520 static void handle_outbound(struct qdio_q
*q
, unsigned int callflags
,
1521 int bufnr
, int count
)
1523 unsigned char state
;
1526 qdio_perf_stat_inc(&perf_stats
.outbound_handler
);
1528 count
= set_buf_states(q
, bufnr
, SLSB_CU_OUTPUT_PRIMED
, count
);
1529 used
= atomic_add_return(count
, &q
->nr_buf_used
);
1530 BUG_ON(used
> QDIO_MAX_BUFFERS_PER_Q
);
1532 if (callflags
& QDIO_FLAG_PCI_OUT
)
1533 q
->u
.out
.pci_out_enabled
= 1;
1535 q
->u
.out
.pci_out_enabled
= 0;
1537 if (queue_type(q
) == QDIO_IQDIO_QFMT
) {
1538 if (multicast_outbound(q
))
1539 qdio_kick_outbound_q(q
);
1541 if ((q
->irq_ptr
->ssqd_desc
.mmwc
> 1) &&
1543 (count
<= q
->irq_ptr
->ssqd_desc
.mmwc
)) {
1544 /* exploit enhanced SIGA */
1545 q
->u
.out
.use_enh_siga
= 1;
1546 qdio_kick_outbound_q(q
);
1549 * One siga-w per buffer required for unicast
1552 q
->u
.out
.use_enh_siga
= 0;
1554 qdio_kick_outbound_q(q
);
1557 /* report CC=2 conditions synchronously */
1559 __qdio_outbound_processing(q
);
1563 if (need_siga_sync(q
)) {
1564 qdio_siga_sync_q(q
);
1568 /* try to fast requeue buffers */
1569 get_buf_state(q
, prev_buf(bufnr
), &state
, 0);
1570 if (state
!= SLSB_CU_OUTPUT_PRIMED
)
1571 qdio_kick_outbound_q(q
);
1573 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "fast-req");
1574 qdio_perf_stat_inc(&perf_stats
.fast_requeue
);
1577 tasklet_schedule(&q
->tasklet
);
1581 * do_QDIO - process input or output buffers
1582 * @cdev: associated ccw_device for the qdio subchannel
1583 * @callflags: input or output and special flags from the program
1584 * @q_nr: queue number
1585 * @bufnr: buffer number
1586 * @count: how many buffers to process
1588 int do_QDIO(struct ccw_device
*cdev
, unsigned int callflags
,
1589 int q_nr
, int bufnr
, int count
)
1591 struct qdio_irq
*irq_ptr
;
1593 if ((bufnr
> QDIO_MAX_BUFFERS_PER_Q
) ||
1594 (count
> QDIO_MAX_BUFFERS_PER_Q
) ||
1595 (q_nr
> QDIO_MAX_QUEUES_PER_IRQ
))
1601 irq_ptr
= cdev
->private->qdio_data
;
1605 if (callflags
& QDIO_FLAG_SYNC_INPUT
)
1606 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "doQDIO input");
1608 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "doQDIO output");
1609 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "q:%1d flag:%4x", q_nr
, callflags
);
1610 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "buf:%2d cnt:%3d", bufnr
, count
);
1612 if (irq_ptr
->state
!= QDIO_IRQ_STATE_ACTIVE
)
1615 if (callflags
& QDIO_FLAG_SYNC_INPUT
)
1616 handle_inbound(irq_ptr
->input_qs
[q_nr
], callflags
, bufnr
,
1618 else if (callflags
& QDIO_FLAG_SYNC_OUTPUT
)
1619 handle_outbound(irq_ptr
->output_qs
[q_nr
], callflags
, bufnr
,
1625 EXPORT_SYMBOL_GPL(do_QDIO
);
1627 static int __init
init_QDIO(void)
1631 rc
= qdio_setup_init();
1634 rc
= tiqdio_allocate_memory();
1637 rc
= qdio_debug_init();
1640 rc
= qdio_setup_perf_stats();
1643 rc
= tiqdio_register_thinints();
1649 qdio_remove_perf_stats();
1653 tiqdio_free_memory();
1659 static void __exit
exit_QDIO(void)
1661 tiqdio_unregister_thinints();
1662 tiqdio_free_memory();
1663 qdio_remove_perf_stats();
1668 module_init(init_QDIO
);
1669 module_exit(exit_QDIO
);