2 * linux/drivers/s390/cio/qdio_main.c
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <asm/atomic.h>
17 #include <asm/debug.h>
24 #include "qdio_debug.h"
25 #include "qdio_perf.h"
27 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28 "Jan Glauber <jang@linux.vnet.ibm.com>");
29 MODULE_DESCRIPTION("QDIO base support");
30 MODULE_LICENSE("GPL");
32 static inline int do_siga_sync(struct subchannel_id schid
,
33 unsigned int out_mask
, unsigned int in_mask
)
35 register unsigned long __fc
asm ("0") = 2;
36 register struct subchannel_id __schid
asm ("1") = schid
;
37 register unsigned long out
asm ("2") = out_mask
;
38 register unsigned long in
asm ("3") = in_mask
;
46 : "d" (__fc
), "d" (__schid
), "d" (out
), "d" (in
) : "cc");
50 static inline int do_siga_input(struct subchannel_id schid
, unsigned int mask
)
52 register unsigned long __fc
asm ("0") = 1;
53 register struct subchannel_id __schid
asm ("1") = schid
;
54 register unsigned long __mask
asm ("2") = mask
;
62 : "d" (__fc
), "d" (__schid
), "d" (__mask
) : "cc", "memory");
67 * do_siga_output - perform SIGA-w/wt function
68 * @schid: subchannel id or in case of QEBSM the subchannel token
69 * @mask: which output queues to process
70 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
71 * @fc: function code to perform
73 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
74 * Note: For IQDC unicast queues only the highest priority queue is processed.
76 static inline int do_siga_output(unsigned long schid
, unsigned long mask
,
77 u32
*bb
, unsigned int fc
)
79 register unsigned long __fc
asm("0") = fc
;
80 register unsigned long __schid
asm("1") = schid
;
81 register unsigned long __mask
asm("2") = mask
;
82 int cc
= QDIO_ERROR_SIGA_ACCESS_EXCEPTION
;
90 : "+d" (cc
), "+d" (__fc
), "+d" (__schid
), "+d" (__mask
)
92 *bb
= ((unsigned int) __fc
) >> 31;
96 static inline int qdio_check_ccq(struct qdio_q
*q
, unsigned int ccq
)
98 /* all done or next buffer state different */
99 if (ccq
== 0 || ccq
== 32)
101 /* not all buffers processed */
102 if (ccq
== 96 || ccq
== 97)
104 /* notify devices immediately */
105 DBF_ERROR("%4x ccq:%3d", SCH_NO(q
), ccq
);
110 * qdio_do_eqbs - extract buffer states for QEBSM
111 * @q: queue to manipulate
112 * @state: state of the extracted buffers
113 * @start: buffer number to start at
114 * @count: count of buffers to examine
116 * Returns the number of successfull extracted equal buffer states.
117 * Stops processing if a state is different from the last buffers state.
119 static int qdio_do_eqbs(struct qdio_q
*q
, unsigned char *state
,
120 int start
, int count
)
122 unsigned int ccq
= 0;
123 int tmp_count
= count
, tmp_start
= start
;
127 BUG_ON(!q
->irq_ptr
->sch_token
);
128 qdio_perf_stat_inc(&perf_stats
.debug_eqbs_all
);
131 nr
+= q
->irq_ptr
->nr_input_qs
;
133 ccq
= do_eqbs(q
->irq_ptr
->sch_token
, state
, nr
, &tmp_start
, &tmp_count
);
134 rc
= qdio_check_ccq(q
, ccq
);
136 /* At least one buffer was processed, return and extract the remaining
139 if ((ccq
== 96) && (count
!= tmp_count
)) {
140 qdio_perf_stat_inc(&perf_stats
.debug_eqbs_incomplete
);
141 return (count
- tmp_count
);
145 DBF_DEV_EVENT(DBF_WARN
, q
->irq_ptr
, "EQBS again:%2d", ccq
);
150 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q
));
151 DBF_ERROR("%3d%3d%2d", count
, tmp_count
, nr
);
152 q
->handler(q
->irq_ptr
->cdev
,
153 QDIO_ERROR_ACTIVATE_CHECK_CONDITION
,
154 0, -1, -1, q
->irq_ptr
->int_parm
);
157 return count
- tmp_count
;
161 * qdio_do_sqbs - set buffer states for QEBSM
162 * @q: queue to manipulate
163 * @state: new state of the buffers
164 * @start: first buffer number to change
165 * @count: how many buffers to change
167 * Returns the number of successfully changed buffers.
168 * Does retrying until the specified count of buffer states is set or an
171 static int qdio_do_sqbs(struct qdio_q
*q
, unsigned char state
, int start
,
174 unsigned int ccq
= 0;
175 int tmp_count
= count
, tmp_start
= start
;
179 BUG_ON(!q
->irq_ptr
->sch_token
);
180 qdio_perf_stat_inc(&perf_stats
.debug_sqbs_all
);
183 nr
+= q
->irq_ptr
->nr_input_qs
;
185 ccq
= do_sqbs(q
->irq_ptr
->sch_token
, state
, nr
, &tmp_start
, &tmp_count
);
186 rc
= qdio_check_ccq(q
, ccq
);
188 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "SQBS again:%2d", ccq
);
189 qdio_perf_stat_inc(&perf_stats
.debug_sqbs_incomplete
);
193 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q
));
194 DBF_ERROR("%3d%3d%2d", count
, tmp_count
, nr
);
195 q
->handler(q
->irq_ptr
->cdev
,
196 QDIO_ERROR_ACTIVATE_CHECK_CONDITION
,
197 0, -1, -1, q
->irq_ptr
->int_parm
);
201 return count
- tmp_count
;
204 /* returns number of examined buffers and their common state in *state */
205 static inline int get_buf_states(struct qdio_q
*q
, unsigned int bufnr
,
206 unsigned char *state
, unsigned int count
)
208 unsigned char __state
= 0;
211 BUG_ON(bufnr
> QDIO_MAX_BUFFERS_MASK
);
212 BUG_ON(count
> QDIO_MAX_BUFFERS_PER_Q
);
215 return qdio_do_eqbs(q
, state
, bufnr
, count
);
217 for (i
= 0; i
< count
; i
++) {
219 __state
= q
->slsb
.val
[bufnr
];
220 else if (q
->slsb
.val
[bufnr
] != __state
)
222 bufnr
= next_buf(bufnr
);
228 inline int get_buf_state(struct qdio_q
*q
, unsigned int bufnr
,
229 unsigned char *state
)
231 return get_buf_states(q
, bufnr
, state
, 1);
234 /* wrap-around safe setting of slsb states, returns number of changed buffers */
235 static inline int set_buf_states(struct qdio_q
*q
, int bufnr
,
236 unsigned char state
, int count
)
240 BUG_ON(bufnr
> QDIO_MAX_BUFFERS_MASK
);
241 BUG_ON(count
> QDIO_MAX_BUFFERS_PER_Q
);
244 return qdio_do_sqbs(q
, state
, bufnr
, count
);
246 for (i
= 0; i
< count
; i
++) {
247 xchg(&q
->slsb
.val
[bufnr
], state
);
248 bufnr
= next_buf(bufnr
);
253 static inline int set_buf_state(struct qdio_q
*q
, int bufnr
,
256 return set_buf_states(q
, bufnr
, state
, 1);
259 /* set slsb states to initial state */
260 void qdio_init_buf_states(struct qdio_irq
*irq_ptr
)
265 for_each_input_queue(irq_ptr
, q
, i
)
266 set_buf_states(q
, 0, SLSB_P_INPUT_NOT_INIT
,
267 QDIO_MAX_BUFFERS_PER_Q
);
268 for_each_output_queue(irq_ptr
, q
, i
)
269 set_buf_states(q
, 0, SLSB_P_OUTPUT_NOT_INIT
,
270 QDIO_MAX_BUFFERS_PER_Q
);
273 static int qdio_siga_sync(struct qdio_q
*q
, unsigned int output
,
278 if (!need_siga_sync(q
))
281 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-s:");
282 DBF_DEV_HEX(DBF_INFO
, q
->irq_ptr
, q
, sizeof(void *));
283 qdio_perf_stat_inc(&perf_stats
.siga_sync
);
285 cc
= do_siga_sync(q
->irq_ptr
->schid
, output
, input
);
287 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q
), cc
);
291 inline int qdio_siga_sync_q(struct qdio_q
*q
)
294 return qdio_siga_sync(q
, 0, q
->mask
);
296 return qdio_siga_sync(q
, q
->mask
, 0);
299 static inline int qdio_siga_sync_out(struct qdio_q
*q
)
301 return qdio_siga_sync(q
, ~0U, 0);
304 static inline int qdio_siga_sync_all(struct qdio_q
*q
)
306 return qdio_siga_sync(q
, ~0U, ~0U);
309 static inline int qdio_do_siga_output(struct qdio_q
*q
, unsigned int *busy_bit
)
314 if (q
->u
.out
.use_enh_siga
) {
318 schid
= *((u32
*)&q
->irq_ptr
->schid
);
320 schid
= q
->irq_ptr
->sch_token
;
323 return do_siga_output(schid
, q
->mask
, busy_bit
, fc
);
326 static int qdio_siga_output(struct qdio_q
*q
)
332 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-w:%1d", q
->nr
);
333 qdio_perf_stat_inc(&perf_stats
.siga_out
);
335 cc
= qdio_do_siga_output(q
, &busy_bit
);
336 if (queue_type(q
) == QDIO_IQDIO_QFMT
&& cc
== 2 && busy_bit
) {
337 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-w bb:%2d", q
->nr
);
340 start_time
= get_usecs();
341 else if ((get_usecs() - start_time
) < QDIO_BUSY_BIT_PATIENCE
)
345 if (cc
== 2 && busy_bit
)
346 cc
|= QDIO_ERROR_SIGA_BUSY
;
348 DBF_ERROR("%4x SIGA-W:%2d", SCH_NO(q
), cc
);
352 static inline int qdio_siga_input(struct qdio_q
*q
)
356 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "siga-r:%1d", q
->nr
);
357 qdio_perf_stat_inc(&perf_stats
.siga_in
);
359 cc
= do_siga_input(q
->irq_ptr
->schid
, q
->mask
);
361 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q
), cc
);
365 /* called from thinint inbound handler */
366 void qdio_sync_after_thinint(struct qdio_q
*q
)
368 if (pci_out_supported(q
)) {
369 if (need_siga_sync_thinint(q
))
370 qdio_siga_sync_all(q
);
371 else if (need_siga_sync_out_thinint(q
))
372 qdio_siga_sync_out(q
);
377 inline void qdio_stop_polling(struct qdio_q
*q
)
379 spin_lock_bh(&q
->u
.in
.lock
);
380 if (!q
->u
.in
.polling
) {
381 spin_unlock_bh(&q
->u
.in
.lock
);
385 qdio_perf_stat_inc(&perf_stats
.debug_stop_polling
);
387 /* show the card that we are not polling anymore */
388 set_buf_state(q
, q
->last_move_ftc
, SLSB_P_INPUT_NOT_INIT
);
389 spin_unlock_bh(&q
->u
.in
.lock
);
392 static void announce_buffer_error(struct qdio_q
*q
)
394 DBF_ERROR("%4x BUF ERROR", SCH_NO(q
));
395 DBF_ERROR((q
->is_input_q
) ? "IN:%2d" : "OUT:%2d", q
->nr
);
396 DBF_ERROR("FTC:%3d", q
->first_to_check
);
397 DBF_ERROR("F14:%2x F15:%2x",
398 q
->sbal
[q
->first_to_check
]->element
[14].flags
& 0xff,
399 q
->sbal
[q
->first_to_check
]->element
[15].flags
& 0xff);
401 q
->qdio_error
= QDIO_ERROR_SLSB_STATE
;
404 static int get_inbound_buffer_frontier(struct qdio_q
*q
)
410 * If we still poll don't update last_move_ftc, keep the
411 * previously ACK buffer there.
413 if (!q
->u
.in
.polling
)
414 q
->last_move_ftc
= q
->first_to_check
;
417 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
420 count
= min(atomic_read(&q
->nr_buf_used
), QDIO_MAX_BUFFERS_MASK
);
421 stop
= add_buf(q
->first_to_check
, count
);
424 * No siga sync here, as a PCI or we after a thin interrupt
425 * will sync the queues.
428 /* need to set count to 1 for non-qebsm */
433 if (q
->first_to_check
== stop
)
436 count
= get_buf_states(q
, q
->first_to_check
, &state
, count
);
441 case SLSB_P_INPUT_PRIMED
:
442 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in prim: %3d", count
);
445 * Only ACK the first buffer. The ACK will be removed in
449 state
= SLSB_P_INPUT_NOT_INIT
;
452 state
= SLSB_P_INPUT_ACK
;
454 set_buf_state(q
, q
->first_to_check
, state
);
457 * Need to change all PRIMED buffers to NOT_INIT, otherwise
458 * we're loosing initiative in the thinint code.
461 set_buf_states(q
, next_buf(q
->first_to_check
),
462 SLSB_P_INPUT_NOT_INIT
, count
- 1);
465 * No siga-sync needed for non-qebsm here, as the inbound queue
466 * will be synced on the next siga-r, resp.
467 * tiqdio_is_inbound_q_done will do the siga-sync.
469 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
470 atomic_sub(count
, &q
->nr_buf_used
);
472 case SLSB_P_INPUT_ERROR
:
473 announce_buffer_error(q
);
474 /* process the buffer, the upper layer will take care of it */
475 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
476 atomic_sub(count
, &q
->nr_buf_used
);
478 case SLSB_CU_INPUT_EMPTY
:
479 case SLSB_P_INPUT_NOT_INIT
:
480 case SLSB_P_INPUT_ACK
:
481 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in nop");
487 return q
->first_to_check
;
490 int qdio_inbound_q_moved(struct qdio_q
*q
)
494 bufnr
= get_inbound_buffer_frontier(q
);
496 if ((bufnr
!= q
->last_move_ftc
) || q
->qdio_error
) {
497 if (!need_siga_sync(q
) && !pci_out_supported(q
))
498 q
->u
.in
.timestamp
= get_usecs();
500 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in moved");
506 static int qdio_inbound_q_done(struct qdio_q
*q
)
508 unsigned char state
= 0;
510 if (!atomic_read(&q
->nr_buf_used
))
514 * We need that one for synchronization with the adapter, as it
515 * does a kind of PCI avoidance.
519 get_buf_state(q
, q
->first_to_check
, &state
);
520 if (state
== SLSB_P_INPUT_PRIMED
)
521 /* we got something to do */
524 /* on VM, we don't poll, so the q is always done here */
525 if (need_siga_sync(q
) || pci_out_supported(q
))
529 * At this point we know, that inbound first_to_check
530 * has (probably) not moved (see qdio_inbound_processing).
532 if (get_usecs() > q
->u
.in
.timestamp
+ QDIO_INPUT_THRESHOLD
) {
533 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in done:%3d",
537 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "in notd:%3d",
543 void qdio_kick_inbound_handler(struct qdio_q
*q
)
545 int count
, start
, end
;
547 qdio_perf_stat_inc(&perf_stats
.inbound_handler
);
549 start
= q
->first_to_kick
;
550 end
= q
->first_to_check
;
554 count
= end
+ QDIO_MAX_BUFFERS_PER_Q
- start
;
556 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "kih s:%3d c:%3d", start
, count
);
558 if (unlikely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_ACTIVE
))
561 q
->handler(q
->irq_ptr
->cdev
, q
->qdio_error
, q
->nr
,
562 start
, count
, q
->irq_ptr
->int_parm
);
564 /* for the next time */
565 q
->first_to_kick
= q
->first_to_check
;
569 static void __qdio_inbound_processing(struct qdio_q
*q
)
571 qdio_perf_stat_inc(&perf_stats
.tasklet_inbound
);
573 if (!qdio_inbound_q_moved(q
))
576 qdio_kick_inbound_handler(q
);
578 if (!qdio_inbound_q_done(q
))
579 /* means poll time is not yet over */
582 qdio_stop_polling(q
);
584 * We need to check again to not lose initiative after
585 * resetting the ACK state.
587 if (!qdio_inbound_q_done(q
))
591 /* inbound tasklet */
592 void qdio_inbound_processing(unsigned long data
)
594 struct qdio_q
*q
= (struct qdio_q
*)data
;
595 __qdio_inbound_processing(q
);
598 static int get_outbound_buffer_frontier(struct qdio_q
*q
)
603 if (((queue_type(q
) != QDIO_IQDIO_QFMT
) && !pci_out_supported(q
)) ||
604 (queue_type(q
) == QDIO_IQDIO_QFMT
&& multicast_outbound(q
)))
608 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
611 count
= min(atomic_read(&q
->nr_buf_used
), QDIO_MAX_BUFFERS_MASK
);
612 stop
= add_buf(q
->first_to_check
, count
);
614 /* need to set count to 1 for non-qebsm */
619 if (q
->first_to_check
== stop
)
620 return q
->first_to_check
;
622 count
= get_buf_states(q
, q
->first_to_check
, &state
, count
);
624 return q
->first_to_check
;
627 case SLSB_P_OUTPUT_EMPTY
:
628 /* the adapter got it */
629 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "out empty:%1d %3d", q
->nr
, count
);
631 atomic_sub(count
, &q
->nr_buf_used
);
632 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
634 * We fetch all buffer states at once. get_buf_states may
635 * return count < stop. For QEBSM we do not loop.
640 case SLSB_P_OUTPUT_ERROR
:
641 announce_buffer_error(q
);
642 /* process the buffer, the upper layer will take care of it */
643 q
->first_to_check
= add_buf(q
->first_to_check
, count
);
644 atomic_sub(count
, &q
->nr_buf_used
);
646 case SLSB_CU_OUTPUT_PRIMED
:
647 /* the adapter has not fetched the output yet */
648 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "out primed:%1d", q
->nr
);
650 case SLSB_P_OUTPUT_NOT_INIT
:
651 case SLSB_P_OUTPUT_HALTED
:
656 return q
->first_to_check
;
659 /* all buffers processed? */
660 static inline int qdio_outbound_q_done(struct qdio_q
*q
)
662 return atomic_read(&q
->nr_buf_used
) == 0;
665 static inline int qdio_outbound_q_moved(struct qdio_q
*q
)
669 bufnr
= get_outbound_buffer_frontier(q
);
671 if ((bufnr
!= q
->last_move_ftc
) || q
->qdio_error
) {
672 q
->last_move_ftc
= bufnr
;
673 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "out moved:%1d", q
->nr
);
680 * VM could present us cc=2 and busy bit set on SIGA-write
681 * during reconfiguration of their Guest LAN (only in iqdio mode,
682 * otherwise qdio is asynchronous and cc=2 and busy bit there will take
683 * the queues down immediately).
685 * Therefore qdio_siga_output will try for a short time constantly,
686 * if such a condition occurs. If it doesn't change, it will
687 * increase the busy_siga_counter and save the timestamp, and
688 * schedule the queue for later processing. qdio_outbound_processing
689 * will check out the counter. If non-zero, it will call qdio_kick_outbound_q
690 * as often as the value of the counter. This will attempt further SIGA
691 * instructions. For each successful SIGA, the counter is
692 * decreased, for failing SIGAs the counter remains the same, after
693 * all. After some time of no movement, qdio_kick_outbound_q will
694 * finally fail and reflect corresponding error codes to call
695 * the upper layer module and have it take the queues down.
697 * Note that this is a change from the original HiperSockets design
698 * (saying cc=2 and busy bit means take the queues down), but in
699 * these days Guest LAN didn't exist... excessive cc=2 with busy bit
700 * conditions will still take the queues down, but the threshold is
701 * higher due to the Guest LAN environment.
703 * Called from outbound tasklet and do_QDIO handler.
705 static void qdio_kick_outbound_q(struct qdio_q
*q
)
709 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "kickoutq:%1d", q
->nr
);
711 if (!need_siga_out(q
))
714 rc
= qdio_siga_output(q
);
717 /* TODO: improve error handling for CC=0 case */
718 if (q
->u
.out
.timestamp
)
719 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "cc2 rslv:%4x",
720 atomic_read(&q
->u
.out
.busy_siga_counter
));
721 /* went smooth this time, reset timestamp */
722 q
->u
.out
.timestamp
= 0;
724 /* cc=2 and busy bit */
725 case (2 | QDIO_ERROR_SIGA_BUSY
):
726 atomic_inc(&q
->u
.out
.busy_siga_counter
);
728 /* if the last siga was successful, save timestamp here */
729 if (!q
->u
.out
.timestamp
)
730 q
->u
.out
.timestamp
= get_usecs();
732 /* if we're in time, don't touch qdio_error */
733 if (get_usecs() - q
->u
.out
.timestamp
< QDIO_BUSY_BIT_GIVE_UP
) {
734 tasklet_schedule(&q
->tasklet
);
737 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q
), q
->nr
);
739 /* for plain cc=1, 2 or 3 */
744 static void qdio_kick_outbound_handler(struct qdio_q
*q
)
746 int start
, end
, count
;
748 start
= q
->first_to_kick
;
749 end
= q
->last_move_ftc
;
753 count
= end
+ QDIO_MAX_BUFFERS_PER_Q
- start
;
755 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "kickouth: %1d", q
->nr
);
756 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "s:%3d c:%3d", start
, count
);
758 if (unlikely(q
->irq_ptr
->state
!= QDIO_IRQ_STATE_ACTIVE
))
761 q
->handler(q
->irq_ptr
->cdev
, q
->qdio_error
, q
->nr
, start
, count
,
762 q
->irq_ptr
->int_parm
);
764 /* for the next time: */
765 q
->first_to_kick
= q
->last_move_ftc
;
769 static void __qdio_outbound_processing(struct qdio_q
*q
)
773 qdio_perf_stat_inc(&perf_stats
.tasklet_outbound
);
775 /* see comment in qdio_kick_outbound_q */
776 siga_attempts
= atomic_read(&q
->u
.out
.busy_siga_counter
);
777 while (siga_attempts
--) {
778 atomic_dec(&q
->u
.out
.busy_siga_counter
);
779 qdio_kick_outbound_q(q
);
782 BUG_ON(atomic_read(&q
->nr_buf_used
) < 0);
784 if (qdio_outbound_q_moved(q
))
785 qdio_kick_outbound_handler(q
);
787 if (queue_type(q
) == QDIO_ZFCP_QFMT
) {
788 if (!pci_out_supported(q
) && !qdio_outbound_q_done(q
))
789 tasklet_schedule(&q
->tasklet
);
793 /* bail out for HiperSockets unicast queues */
794 if (queue_type(q
) == QDIO_IQDIO_QFMT
&& !multicast_outbound(q
))
797 if ((queue_type(q
) == QDIO_IQDIO_QFMT
) &&
798 (atomic_read(&q
->nr_buf_used
)) > QDIO_IQDIO_POLL_LVL
) {
799 tasklet_schedule(&q
->tasklet
);
803 if (q
->u
.out
.pci_out_enabled
)
807 * Now we know that queue type is either qeth without pci enabled
808 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
809 * EMPTY is noticed and outbound_handler is called after some time.
811 if (qdio_outbound_q_done(q
))
812 del_timer(&q
->u
.out
.timer
);
814 if (!timer_pending(&q
->u
.out
.timer
)) {
815 mod_timer(&q
->u
.out
.timer
, jiffies
+ 10 * HZ
);
816 qdio_perf_stat_inc(&perf_stats
.debug_tl_out_timer
);
821 /* outbound tasklet */
822 void qdio_outbound_processing(unsigned long data
)
824 struct qdio_q
*q
= (struct qdio_q
*)data
;
825 __qdio_outbound_processing(q
);
828 void qdio_outbound_timer(unsigned long data
)
830 struct qdio_q
*q
= (struct qdio_q
*)data
;
831 tasklet_schedule(&q
->tasklet
);
834 /* called from thinint inbound tasklet */
835 void qdio_check_outbound_after_thinint(struct qdio_q
*q
)
840 if (!pci_out_supported(q
))
843 for_each_output_queue(q
->irq_ptr
, out
, i
)
844 if (!qdio_outbound_q_done(out
))
845 tasklet_schedule(&out
->tasklet
);
848 static inline void qdio_set_state(struct qdio_irq
*irq_ptr
,
849 enum qdio_irq_states state
)
851 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "newstate: %1d", state
);
853 irq_ptr
->state
= state
;
857 static void qdio_irq_check_sense(struct qdio_irq
*irq_ptr
, struct irb
*irb
)
859 if (irb
->esw
.esw0
.erw
.cons
) {
860 DBF_ERROR("%4x sense:", irq_ptr
->schid
.sch_no
);
861 DBF_ERROR_HEX(irb
, 64);
862 DBF_ERROR_HEX(irb
->ecw
, 64);
866 /* PCI interrupt handler */
867 static void qdio_int_handler_pci(struct qdio_irq
*irq_ptr
)
872 qdio_perf_stat_inc(&perf_stats
.pci_int
);
874 for_each_input_queue(irq_ptr
, q
, i
)
875 tasklet_schedule(&q
->tasklet
);
877 if (!(irq_ptr
->qib
.ac
& QIB_AC_OUTBOUND_PCI_SUPPORTED
))
880 for_each_output_queue(irq_ptr
, q
, i
) {
881 if (qdio_outbound_q_done(q
))
884 if (!siga_syncs_out_pci(q
))
887 tasklet_schedule(&q
->tasklet
);
891 static void qdio_handle_activate_check(struct ccw_device
*cdev
,
892 unsigned long intparm
, int cstat
, int dstat
)
894 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
897 DBF_ERROR("%4x ACT CHECK", irq_ptr
->schid
.sch_no
);
898 DBF_ERROR("intp :%lx", intparm
);
899 DBF_ERROR("ds: %2x cs:%2x", dstat
, cstat
);
901 if (irq_ptr
->nr_input_qs
) {
902 q
= irq_ptr
->input_qs
[0];
903 } else if (irq_ptr
->nr_output_qs
) {
904 q
= irq_ptr
->output_qs
[0];
909 q
->handler(q
->irq_ptr
->cdev
, QDIO_ERROR_ACTIVATE_CHECK_CONDITION
,
910 0, -1, -1, irq_ptr
->int_parm
);
912 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_STOPPED
);
915 static void qdio_call_shutdown(struct work_struct
*work
)
917 struct ccw_device_private
*priv
;
918 struct ccw_device
*cdev
;
920 priv
= container_of(work
, struct ccw_device_private
, kick_work
);
922 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
923 put_device(&cdev
->dev
);
926 static void qdio_int_error(struct ccw_device
*cdev
)
928 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
930 switch (irq_ptr
->state
) {
931 case QDIO_IRQ_STATE_INACTIVE
:
932 case QDIO_IRQ_STATE_CLEANUP
:
933 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ERR
);
935 case QDIO_IRQ_STATE_ESTABLISHED
:
936 case QDIO_IRQ_STATE_ACTIVE
:
937 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_STOPPED
);
938 if (get_device(&cdev
->dev
)) {
939 /* Can't call shutdown from interrupt context. */
940 PREPARE_WORK(&cdev
->private->kick_work
,
942 queue_work(ccw_device_work
, &cdev
->private->kick_work
);
948 wake_up(&cdev
->private->wait_q
);
951 static int qdio_establish_check_errors(struct ccw_device
*cdev
, int cstat
,
954 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
956 if (cstat
|| (dstat
& ~(DEV_STAT_CHN_END
| DEV_STAT_DEV_END
))) {
957 DBF_ERROR("EQ:ck con");
961 if (!(dstat
& DEV_STAT_DEV_END
)) {
962 DBF_ERROR("EQ:no dev");
966 if (dstat
& ~(DEV_STAT_CHN_END
| DEV_STAT_DEV_END
)) {
967 DBF_ERROR("EQ: bad io");
972 DBF_ERROR("%4x EQ:error", irq_ptr
->schid
.sch_no
);
973 DBF_ERROR("ds: %2x cs:%2x", dstat
, cstat
);
975 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ERR
);
979 static void qdio_establish_handle_irq(struct ccw_device
*cdev
, int cstat
,
982 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
984 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "qest irq");
985 if (!qdio_establish_check_errors(cdev
, cstat
, dstat
))
986 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ESTABLISHED
);
989 /* qdio interrupt handler */
990 void qdio_int_handler(struct ccw_device
*cdev
, unsigned long intparm
,
993 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
996 qdio_perf_stat_inc(&perf_stats
.qdio_int
);
998 if (!intparm
|| !irq_ptr
) {
999 DBF_ERROR("qint:%4x", cdev
->private->schid
.sch_no
);
1004 switch (PTR_ERR(irb
)) {
1006 DBF_ERROR("%4x IO error", irq_ptr
->schid
.sch_no
);
1009 DBF_ERROR("%4x IO timeout", irq_ptr
->schid
.sch_no
);
1010 qdio_int_error(cdev
);
1017 qdio_irq_check_sense(irq_ptr
, irb
);
1019 cstat
= irb
->scsw
.cmd
.cstat
;
1020 dstat
= irb
->scsw
.cmd
.dstat
;
1022 switch (irq_ptr
->state
) {
1023 case QDIO_IRQ_STATE_INACTIVE
:
1024 qdio_establish_handle_irq(cdev
, cstat
, dstat
);
1027 case QDIO_IRQ_STATE_CLEANUP
:
1028 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_INACTIVE
);
1031 case QDIO_IRQ_STATE_ESTABLISHED
:
1032 case QDIO_IRQ_STATE_ACTIVE
:
1033 if (cstat
& SCHN_STAT_PCI
) {
1034 qdio_int_handler_pci(irq_ptr
);
1035 /* no state change so no need to wake up wait_q */
1038 if ((cstat
& ~SCHN_STAT_PCI
) || dstat
) {
1039 qdio_handle_activate_check(cdev
, intparm
, cstat
,
1046 wake_up(&cdev
->private->wait_q
);
1050 * qdio_get_ssqd_desc - get qdio subchannel description
1051 * @cdev: ccw device to get description for
1052 * @data: where to store the ssqd
1054 * Returns 0 or an error code. The results of the chsc are stored in the
1055 * specified structure.
1057 int qdio_get_ssqd_desc(struct ccw_device
*cdev
,
1058 struct qdio_ssqd_desc
*data
)
1061 if (!cdev
|| !cdev
->private)
1064 DBF_EVENT("get ssqd:%4x", cdev
->private->schid
.sch_no
);
1065 return qdio_setup_get_ssqd(NULL
, &cdev
->private->schid
, data
);
1067 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc
);
1070 * qdio_cleanup - shutdown queues and free data structures
1071 * @cdev: associated ccw device
1072 * @how: use halt or clear to shutdown
1074 * This function calls qdio_shutdown() for @cdev with method @how
1075 * and on success qdio_free() for @cdev.
1077 int qdio_cleanup(struct ccw_device
*cdev
, int how
)
1079 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1085 rc
= qdio_shutdown(cdev
, how
);
1087 rc
= qdio_free(cdev
);
1090 EXPORT_SYMBOL_GPL(qdio_cleanup
);
1092 static void qdio_shutdown_queues(struct ccw_device
*cdev
)
1094 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1098 for_each_input_queue(irq_ptr
, q
, i
)
1099 tasklet_disable(&q
->tasklet
);
1101 for_each_output_queue(irq_ptr
, q
, i
) {
1102 tasklet_disable(&q
->tasklet
);
1103 del_timer(&q
->u
.out
.timer
);
1108 * qdio_shutdown - shut down a qdio subchannel
1109 * @cdev: associated ccw device
1110 * @how: use halt or clear to shutdown
1112 int qdio_shutdown(struct ccw_device
*cdev
, int how
)
1114 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1116 unsigned long flags
;
1121 DBF_EVENT("qshutdown:%4x", cdev
->private->schid
.sch_no
);
1123 mutex_lock(&irq_ptr
->setup_mutex
);
1125 * Subchannel was already shot down. We cannot prevent being called
1126 * twice since cio may trigger a shutdown asynchronously.
1128 if (irq_ptr
->state
== QDIO_IRQ_STATE_INACTIVE
) {
1129 mutex_unlock(&irq_ptr
->setup_mutex
);
1133 tiqdio_remove_input_queues(irq_ptr
);
1134 qdio_shutdown_queues(cdev
);
1135 qdio_shutdown_debug_entries(irq_ptr
, cdev
);
1137 /* cleanup subchannel */
1138 spin_lock_irqsave(get_ccwdev_lock(cdev
), flags
);
1140 if (how
& QDIO_FLAG_CLEANUP_USING_CLEAR
)
1141 rc
= ccw_device_clear(cdev
, QDIO_DOING_CLEANUP
);
1143 /* default behaviour is halt */
1144 rc
= ccw_device_halt(cdev
, QDIO_DOING_CLEANUP
);
1146 DBF_ERROR("%4x SHUTD ERR", irq_ptr
->schid
.sch_no
);
1147 DBF_ERROR("rc:%4d", rc
);
1151 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_CLEANUP
);
1152 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), flags
);
1153 wait_event_interruptible_timeout(cdev
->private->wait_q
,
1154 irq_ptr
->state
== QDIO_IRQ_STATE_INACTIVE
||
1155 irq_ptr
->state
== QDIO_IRQ_STATE_ERR
,
1157 spin_lock_irqsave(get_ccwdev_lock(cdev
), flags
);
1160 qdio_shutdown_thinint(irq_ptr
);
1162 /* restore interrupt handler */
1163 if ((void *)cdev
->handler
== (void *)qdio_int_handler
)
1164 cdev
->handler
= irq_ptr
->orig_handler
;
1165 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), flags
);
1167 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_INACTIVE
);
1168 mutex_unlock(&irq_ptr
->setup_mutex
);
1173 EXPORT_SYMBOL_GPL(qdio_shutdown
);
1176 * qdio_free - free data structures for a qdio subchannel
1177 * @cdev: associated ccw device
1179 int qdio_free(struct ccw_device
*cdev
)
1181 struct qdio_irq
*irq_ptr
= cdev
->private->qdio_data
;
1186 DBF_EVENT("qfree:%4x", cdev
->private->schid
.sch_no
);
1187 mutex_lock(&irq_ptr
->setup_mutex
);
1189 if (irq_ptr
->debug_area
!= NULL
) {
1190 debug_unregister(irq_ptr
->debug_area
);
1191 irq_ptr
->debug_area
= NULL
;
1193 cdev
->private->qdio_data
= NULL
;
1194 mutex_unlock(&irq_ptr
->setup_mutex
);
1196 qdio_release_memory(irq_ptr
);
1199 EXPORT_SYMBOL_GPL(qdio_free
);
1202 * qdio_initialize - allocate and establish queues for a qdio subchannel
1203 * @init_data: initialization data
1205 * This function first allocates queues via qdio_allocate() and on success
1206 * establishes them via qdio_establish().
1208 int qdio_initialize(struct qdio_initialize
*init_data
)
1212 rc
= qdio_allocate(init_data
);
1216 rc
= qdio_establish(init_data
);
1218 qdio_free(init_data
->cdev
);
1221 EXPORT_SYMBOL_GPL(qdio_initialize
);
1224 * qdio_allocate - allocate qdio queues and associated data
1225 * @init_data: initialization data
1227 int qdio_allocate(struct qdio_initialize
*init_data
)
1229 struct qdio_irq
*irq_ptr
;
1231 DBF_EVENT("qallocate:%4x", init_data
->cdev
->private->schid
.sch_no
);
1233 if ((init_data
->no_input_qs
&& !init_data
->input_handler
) ||
1234 (init_data
->no_output_qs
&& !init_data
->output_handler
))
1237 if ((init_data
->no_input_qs
> QDIO_MAX_QUEUES_PER_IRQ
) ||
1238 (init_data
->no_output_qs
> QDIO_MAX_QUEUES_PER_IRQ
))
1241 if ((!init_data
->input_sbal_addr_array
) ||
1242 (!init_data
->output_sbal_addr_array
))
1245 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1246 irq_ptr
= (void *) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1250 mutex_init(&irq_ptr
->setup_mutex
);
1251 qdio_allocate_dbf(init_data
, irq_ptr
);
1254 * Allocate a page for the chsc calls in qdio_establish.
1255 * Must be pre-allocated since a zfcp recovery will call
1256 * qdio_establish. In case of low memory and swap on a zfcp disk
1257 * we may not be able to allocate memory otherwise.
1259 irq_ptr
->chsc_page
= get_zeroed_page(GFP_KERNEL
);
1260 if (!irq_ptr
->chsc_page
)
1263 /* qdr is used in ccw1.cda which is u32 */
1264 irq_ptr
->qdr
= (struct qdr
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1267 WARN_ON((unsigned long)irq_ptr
->qdr
& 0xfff);
1269 if (qdio_allocate_qs(irq_ptr
, init_data
->no_input_qs
,
1270 init_data
->no_output_qs
))
1273 init_data
->cdev
->private->qdio_data
= irq_ptr
;
1274 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_INACTIVE
);
1277 qdio_release_memory(irq_ptr
);
1281 EXPORT_SYMBOL_GPL(qdio_allocate
);
1284 * qdio_establish - establish queues on a qdio subchannel
1285 * @init_data: initialization data
1287 int qdio_establish(struct qdio_initialize
*init_data
)
1289 struct qdio_irq
*irq_ptr
;
1290 struct ccw_device
*cdev
= init_data
->cdev
;
1291 unsigned long saveflags
;
1294 DBF_EVENT("qestablish:%4x", cdev
->private->schid
.sch_no
);
1296 irq_ptr
= cdev
->private->qdio_data
;
1300 if (cdev
->private->state
!= DEV_STATE_ONLINE
)
1303 mutex_lock(&irq_ptr
->setup_mutex
);
1304 qdio_setup_irq(init_data
);
1306 rc
= qdio_establish_thinint(irq_ptr
);
1308 mutex_unlock(&irq_ptr
->setup_mutex
);
1309 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
1314 irq_ptr
->ccw
.cmd_code
= irq_ptr
->equeue
.cmd
;
1315 irq_ptr
->ccw
.flags
= CCW_FLAG_SLI
;
1316 irq_ptr
->ccw
.count
= irq_ptr
->equeue
.count
;
1317 irq_ptr
->ccw
.cda
= (u32
)((addr_t
)irq_ptr
->qdr
);
1319 spin_lock_irqsave(get_ccwdev_lock(cdev
), saveflags
);
1320 ccw_device_set_options_mask(cdev
, 0);
1322 rc
= ccw_device_start(cdev
, &irq_ptr
->ccw
, QDIO_DOING_ESTABLISH
, 0, 0);
1324 DBF_ERROR("%4x est IO ERR", irq_ptr
->schid
.sch_no
);
1325 DBF_ERROR("rc:%4x", rc
);
1327 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), saveflags
);
1330 mutex_unlock(&irq_ptr
->setup_mutex
);
1331 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
1335 wait_event_interruptible_timeout(cdev
->private->wait_q
,
1336 irq_ptr
->state
== QDIO_IRQ_STATE_ESTABLISHED
||
1337 irq_ptr
->state
== QDIO_IRQ_STATE_ERR
, HZ
);
1339 if (irq_ptr
->state
!= QDIO_IRQ_STATE_ESTABLISHED
) {
1340 mutex_unlock(&irq_ptr
->setup_mutex
);
1341 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
1345 qdio_setup_ssqd_info(irq_ptr
);
1346 DBF_EVENT("qDmmwc:%2x", irq_ptr
->ssqd_desc
.mmwc
);
1347 DBF_EVENT("qib ac:%4x", irq_ptr
->qib
.ac
);
1349 /* qebsm is now setup if available, initialize buffer states */
1350 qdio_init_buf_states(irq_ptr
);
1352 mutex_unlock(&irq_ptr
->setup_mutex
);
1353 qdio_print_subchannel_info(irq_ptr
, cdev
);
1354 qdio_setup_debug_entries(irq_ptr
, cdev
);
1357 EXPORT_SYMBOL_GPL(qdio_establish
);
1360 * qdio_activate - activate queues on a qdio subchannel
1361 * @cdev: associated cdev
1363 int qdio_activate(struct ccw_device
*cdev
)
1365 struct qdio_irq
*irq_ptr
;
1367 unsigned long saveflags
;
1369 DBF_EVENT("qactivate:%4x", cdev
->private->schid
.sch_no
);
1371 irq_ptr
= cdev
->private->qdio_data
;
1375 if (cdev
->private->state
!= DEV_STATE_ONLINE
)
1378 mutex_lock(&irq_ptr
->setup_mutex
);
1379 if (irq_ptr
->state
== QDIO_IRQ_STATE_INACTIVE
) {
1384 irq_ptr
->ccw
.cmd_code
= irq_ptr
->aqueue
.cmd
;
1385 irq_ptr
->ccw
.flags
= CCW_FLAG_SLI
;
1386 irq_ptr
->ccw
.count
= irq_ptr
->aqueue
.count
;
1387 irq_ptr
->ccw
.cda
= 0;
1389 spin_lock_irqsave(get_ccwdev_lock(cdev
), saveflags
);
1390 ccw_device_set_options(cdev
, CCWDEV_REPORT_ALL
);
1392 rc
= ccw_device_start(cdev
, &irq_ptr
->ccw
, QDIO_DOING_ACTIVATE
,
1393 0, DOIO_DENY_PREFETCH
);
1395 DBF_ERROR("%4x act IO ERR", irq_ptr
->schid
.sch_no
);
1396 DBF_ERROR("rc:%4x", rc
);
1398 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), saveflags
);
1403 if (is_thinint_irq(irq_ptr
))
1404 tiqdio_add_input_queues(irq_ptr
);
1406 /* wait for subchannel to become active */
1409 switch (irq_ptr
->state
) {
1410 case QDIO_IRQ_STATE_STOPPED
:
1411 case QDIO_IRQ_STATE_ERR
:
1412 mutex_unlock(&irq_ptr
->setup_mutex
);
1413 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
1416 qdio_set_state(irq_ptr
, QDIO_IRQ_STATE_ACTIVE
);
1420 mutex_unlock(&irq_ptr
->setup_mutex
);
1423 EXPORT_SYMBOL_GPL(qdio_activate
);
1425 static inline int buf_in_between(int bufnr
, int start
, int count
)
1427 int end
= add_buf(start
, count
);
1430 if (bufnr
>= start
&& bufnr
< end
)
1436 /* wrap-around case */
1437 if ((bufnr
>= start
&& bufnr
<= QDIO_MAX_BUFFERS_PER_Q
) ||
1445 * handle_inbound - reset processed input buffers
1446 * @q: queue containing the buffers
1448 * @bufnr: first buffer to process
1449 * @count: how many buffers are emptied
1451 static void handle_inbound(struct qdio_q
*q
, unsigned int callflags
,
1452 int bufnr
, int count
)
1454 unsigned long flags
;
1458 * do_QDIO could run in parallel with the queue tasklet so the
1459 * upper-layer programm could empty the ACK'ed buffer here.
1460 * If that happens we must clear the polling flag, otherwise
1461 * qdio_stop_polling() could set the buffer to NOT_INIT after
1462 * it was set to EMPTY which would kill us.
1464 spin_lock_irqsave(&q
->u
.in
.lock
, flags
);
1465 if (q
->u
.in
.polling
)
1466 if (buf_in_between(q
->last_move_ftc
, bufnr
, count
))
1467 q
->u
.in
.polling
= 0;
1469 count
= set_buf_states(q
, bufnr
, SLSB_CU_INPUT_EMPTY
, count
);
1470 spin_unlock_irqrestore(&q
->u
.in
.lock
, flags
);
1472 used
= atomic_add_return(count
, &q
->nr_buf_used
) - count
;
1473 BUG_ON(used
+ count
> QDIO_MAX_BUFFERS_PER_Q
);
1475 /* no need to signal as long as the adapter had free buffers */
1479 if (need_siga_in(q
)) {
1480 rc
= qdio_siga_input(q
);
1487 * handle_outbound - process filled outbound buffers
1488 * @q: queue containing the buffers
1490 * @bufnr: first buffer to process
1491 * @count: how many buffers are filled
1493 static void handle_outbound(struct qdio_q
*q
, unsigned int callflags
,
1494 int bufnr
, int count
)
1496 unsigned char state
;
1499 qdio_perf_stat_inc(&perf_stats
.outbound_handler
);
1501 count
= set_buf_states(q
, bufnr
, SLSB_CU_OUTPUT_PRIMED
, count
);
1502 used
= atomic_add_return(count
, &q
->nr_buf_used
);
1503 BUG_ON(used
> QDIO_MAX_BUFFERS_PER_Q
);
1505 if (callflags
& QDIO_FLAG_PCI_OUT
)
1506 q
->u
.out
.pci_out_enabled
= 1;
1508 q
->u
.out
.pci_out_enabled
= 0;
1510 if (queue_type(q
) == QDIO_IQDIO_QFMT
) {
1511 if (multicast_outbound(q
))
1512 qdio_kick_outbound_q(q
);
1514 if ((q
->irq_ptr
->ssqd_desc
.mmwc
> 1) &&
1516 (count
<= q
->irq_ptr
->ssqd_desc
.mmwc
)) {
1517 /* exploit enhanced SIGA */
1518 q
->u
.out
.use_enh_siga
= 1;
1519 qdio_kick_outbound_q(q
);
1522 * One siga-w per buffer required for unicast
1525 q
->u
.out
.use_enh_siga
= 0;
1527 qdio_kick_outbound_q(q
);
1532 if (need_siga_sync(q
)) {
1533 qdio_siga_sync_q(q
);
1537 /* try to fast requeue buffers */
1538 get_buf_state(q
, prev_buf(bufnr
), &state
);
1539 if (state
!= SLSB_CU_OUTPUT_PRIMED
)
1540 qdio_kick_outbound_q(q
);
1542 DBF_DEV_EVENT(DBF_INFO
, q
->irq_ptr
, "fast-req");
1543 qdio_perf_stat_inc(&perf_stats
.fast_requeue
);
1546 /* Fixme: could wait forever if called from process context */
1547 tasklet_schedule(&q
->tasklet
);
1551 * do_QDIO - process input or output buffers
1552 * @cdev: associated ccw_device for the qdio subchannel
1553 * @callflags: input or output and special flags from the program
1554 * @q_nr: queue number
1555 * @bufnr: buffer number
1556 * @count: how many buffers to process
1558 int do_QDIO(struct ccw_device
*cdev
, unsigned int callflags
,
1559 int q_nr
, int bufnr
, int count
)
1561 struct qdio_irq
*irq_ptr
;
1563 if ((bufnr
> QDIO_MAX_BUFFERS_PER_Q
) ||
1564 (count
> QDIO_MAX_BUFFERS_PER_Q
) ||
1565 (q_nr
> QDIO_MAX_QUEUES_PER_IRQ
))
1571 irq_ptr
= cdev
->private->qdio_data
;
1575 if (callflags
& QDIO_FLAG_SYNC_INPUT
)
1576 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "doQDIO input");
1578 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "doQDIO output");
1579 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "q:%1d flag:%4x", q_nr
, callflags
);
1580 DBF_DEV_EVENT(DBF_INFO
, irq_ptr
, "buf:%2d cnt:%3d", bufnr
, count
);
1582 if (irq_ptr
->state
!= QDIO_IRQ_STATE_ACTIVE
)
1585 if (callflags
& QDIO_FLAG_SYNC_INPUT
)
1586 handle_inbound(irq_ptr
->input_qs
[q_nr
], callflags
, bufnr
,
1588 else if (callflags
& QDIO_FLAG_SYNC_OUTPUT
)
1589 handle_outbound(irq_ptr
->output_qs
[q_nr
], callflags
, bufnr
,
1595 EXPORT_SYMBOL_GPL(do_QDIO
);
1597 static int __init
init_QDIO(void)
1601 rc
= qdio_setup_init();
1604 rc
= tiqdio_allocate_memory();
1607 rc
= qdio_debug_init();
1610 rc
= qdio_setup_perf_stats();
1613 rc
= tiqdio_register_thinints();
1619 qdio_remove_perf_stats();
1623 tiqdio_free_memory();
1629 static void __exit
exit_QDIO(void)
1631 tiqdio_unregister_thinints();
1632 tiqdio_free_memory();
1633 qdio_remove_perf_stats();
1638 module_init(init_QDIO
);
1639 module_exit(exit_QDIO
);