2 * drivers/s390/char/sclp.c
3 * core function to access sclp interface
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/kmod.h>
14 #include <linux/bootmem.h>
15 #include <linux/err.h>
16 #include <linux/ptrace.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/interrupt.h>
20 #include <linux/timer.h>
21 #include <linux/init.h>
22 #include <linux/cpumask.h>
23 #include <linux/reboot.h>
24 #include <asm/s390_ext.h>
25 #include <asm/processor.h>
29 #define SCLP_CORE_PRINT_HEADER "sclp low level driver: "
31 /* Structure for register_early_external_interrupt. */
32 static ext_int_info_t ext_int_info_hwc
;
34 /* spinlock to protect global variables of sclp_core */
35 static spinlock_t sclp_lock
;
37 /* Mask of valid sclp events */
38 static sccb_mask_t sclp_receive_mask
;
39 static sccb_mask_t sclp_send_mask
;
41 /* List of registered event types */
42 static struct list_head sclp_reg_list
;
45 static struct list_head sclp_req_queue
;
47 /* sccb for unconditional read */
48 static struct sclp_req sclp_read_req
;
49 static char sclp_read_sccb
[PAGE_SIZE
] __attribute__((__aligned__(PAGE_SIZE
)));
50 /* sccb for write mask sccb */
51 static char sclp_init_sccb
[PAGE_SIZE
] __attribute__((__aligned__(PAGE_SIZE
)));
53 /* Timer for init mask retries. */
54 static struct timer_list retry_timer
;
56 /* Timer for busy retries. */
57 static struct timer_list sclp_busy_timer
;
59 static volatile unsigned long sclp_status
= 0;
60 /* some status flags */
62 #define SCLP_RUNNING 1
63 #define SCLP_READING 2
64 #define SCLP_SHUTDOWN 3
66 #define SCLP_INIT_POLL_INTERVAL 1
67 #define SCLP_BUSY_POLL_INTERVAL 1
69 #define SCLP_COMMAND_INITIATED 0
71 #define SCLP_NOT_OPERATIONAL 3
74 * assembler instruction for Service Call
77 __service_call(sclp_cmdw_t command
, void *sccb
)
82 * Mnemonic: SERVC Rx, Ry [RRE]
84 * Rx: SCLP command word
88 " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
92 : "d" (command
), "a" (__pa(sccb
))
95 * cc == 0: Service Call succesful initiated
96 * cc == 2: SCLP busy, new Service Call not initiated,
98 * cc == 3: SCLP function not operational
100 if (cc
== SCLP_NOT_OPERATIONAL
)
108 sclp_start_request(void)
110 struct sclp_req
*req
;
114 spin_lock_irqsave(&sclp_lock
, flags
);
115 /* quick exit if sclp is already in use */
116 if (test_bit(SCLP_RUNNING
, &sclp_status
)) {
117 spin_unlock_irqrestore(&sclp_lock
, flags
);
120 /* Try to start requests from the request queue. */
121 while (!list_empty(&sclp_req_queue
)) {
122 req
= list_entry(sclp_req_queue
.next
, struct sclp_req
, list
);
123 rc
= __service_call(req
->command
, req
->sccb
);
125 /* Sucessfully started request. */
126 req
->status
= SCLP_REQ_RUNNING
;
127 /* Request active. Set running indication. */
128 set_bit(SCLP_RUNNING
, &sclp_status
);
133 * SCLP is busy but no request is running.
136 if (!timer_pending(&sclp_busy_timer
) ||
137 !mod_timer(&sclp_busy_timer
,
138 jiffies
+ SCLP_BUSY_POLL_INTERVAL
*HZ
)) {
139 sclp_busy_timer
.function
=
140 (void *) sclp_start_request
;
141 sclp_busy_timer
.expires
=
142 jiffies
+ SCLP_BUSY_POLL_INTERVAL
*HZ
;
143 add_timer(&sclp_busy_timer
);
147 /* Request failed. */
148 req
->status
= SCLP_REQ_FAILED
;
149 list_del(&req
->list
);
151 spin_unlock_irqrestore(&sclp_lock
, flags
);
152 req
->callback(req
, req
->callback_data
);
153 spin_lock_irqsave(&sclp_lock
, flags
);
156 spin_unlock_irqrestore(&sclp_lock
, flags
);
160 sclp_process_evbufs(struct sccb_header
*sccb
)
164 struct evbuf_header
*evbuf
;
166 struct sclp_register
*t
;
168 spin_lock_irqsave(&sclp_lock
, flags
);
169 evbuf
= (struct evbuf_header
*) (sccb
+ 1);
171 while ((addr_t
) evbuf
< (addr_t
) sccb
+ sccb
->length
) {
172 /* check registered event */
174 list_for_each(l
, &sclp_reg_list
) {
175 t
= list_entry(l
, struct sclp_register
, list
);
176 if (t
->receive_mask
& (1 << (32 - evbuf
->type
))) {
177 if (t
->receiver_fn
!= NULL
) {
178 spin_unlock_irqrestore(&sclp_lock
,
180 t
->receiver_fn(evbuf
);
181 spin_lock_irqsave(&sclp_lock
, flags
);
188 /* Check for unrequested event buffer */
191 evbuf
= (struct evbuf_header
*)
192 ((addr_t
) evbuf
+ evbuf
->length
);
194 spin_unlock_irqrestore(&sclp_lock
, flags
);
199 sclp_error_message(u16 rc
)
204 { 0x0000, "No response code stored (machine malfunction)" },
205 { 0x0020, "Normal Completion" },
206 { 0x0040, "SCLP equipment check" },
207 { 0x0100, "SCCB boundary violation" },
208 { 0x01f0, "Invalid command" },
209 { 0x0220, "Normal Completion; suppressed buffers pending" },
210 { 0x0300, "Insufficient SCCB length" },
211 { 0x0340, "Contained SCLP equipment check" },
212 { 0x05f0, "Target resource in improper state" },
213 { 0x40f0, "Invalid function code/not installed" },
214 { 0x60f0, "No buffers stored" },
215 { 0x62f0, "No buffers stored; suppressed buffers pending" },
216 { 0x70f0, "Invalid selection mask" },
217 { 0x71f0, "Event buffer exceeds available space" },
218 { 0x72f0, "Inconsistent lengths" },
219 { 0x73f0, "Event buffer syntax error" }
222 for (i
= 0; i
< sizeof(sclp_errors
)/sizeof(sclp_errors
[0]); i
++)
223 if (rc
== sclp_errors
[i
].code
)
224 return sclp_errors
[i
].msg
;
225 return "Invalid response code";
229 * postprocessing of unconditional read service call
232 sclp_unconditional_read_cb(struct sclp_req
*read_req
, void *data
)
234 struct sccb_header
*sccb
;
236 sccb
= read_req
->sccb
;
237 if (sccb
->response_code
== 0x0020 ||
238 sccb
->response_code
== 0x0220) {
239 if (sclp_process_evbufs(sccb
) != 0)
240 printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
241 "unconditional read: "
242 "unrequested event buffer received.\n");
245 if (sccb
->response_code
!= 0x0020)
246 printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
247 "unconditional read: %s (response code=0x%x).\n",
248 sclp_error_message(sccb
->response_code
),
249 sccb
->response_code
);
251 clear_bit(SCLP_READING
, &sclp_status
);
255 * Function to queue Read Event Data/Unconditional Read
258 __sclp_unconditional_read(void)
260 struct sccb_header
*sccb
;
261 struct sclp_req
*read_req
;
264 * Don't try to initiate Unconditional Read if we are not able to
267 if (sclp_receive_mask
== 0)
269 /* Don't try reading if a read is already outstanding */
270 if (test_and_set_bit(SCLP_READING
, &sclp_status
))
272 /* Initialize read sccb */
273 sccb
= (struct sccb_header
*) sclp_read_sccb
;
275 sccb
->length
= PAGE_SIZE
;
276 sccb
->function_code
= 0; /* unconditional read */
277 sccb
->control_mask
[2] = 0x80; /* variable length response */
278 /* Initialize request structure */
279 read_req
= &sclp_read_req
;
280 read_req
->command
= SCLP_CMDW_READDATA
;
281 read_req
->status
= SCLP_REQ_QUEUED
;
282 read_req
->callback
= sclp_unconditional_read_cb
;
283 read_req
->sccb
= sccb
;
284 /* Add read request to the head of queue */
285 list_add(&read_req
->list
, &sclp_req_queue
);
288 /* Bit masks to interpret external interruption parameter contents. */
289 #define EXT_INT_SCCB_MASK 0xfffffff8
290 #define EXT_INT_STATECHANGE_PENDING 0x00000002
291 #define EXT_INT_EVBUF_PENDING 0x00000001
294 * Handler for service-signal external interruptions
297 sclp_interrupt_handler(struct pt_regs
*regs
, __u16 code
)
299 u32 ext_int_param
, finished_sccb
, evbuf_pending
;
301 struct sclp_req
*req
, *tmp
;
303 spin_lock(&sclp_lock
);
305 * Only process interrupt if sclp is initialized.
306 * This avoids strange effects for a pending request
307 * from before the last re-ipl.
309 if (!test_bit(SCLP_INIT
, &sclp_status
)) {
310 /* Now clear the running bit */
311 clear_bit(SCLP_RUNNING
, &sclp_status
);
312 spin_unlock(&sclp_lock
);
315 ext_int_param
= S390_lowcore
.ext_params
;
316 finished_sccb
= ext_int_param
& EXT_INT_SCCB_MASK
;
317 evbuf_pending
= ext_int_param
& (EXT_INT_EVBUF_PENDING
|
318 EXT_INT_STATECHANGE_PENDING
);
320 if (finished_sccb
!= 0U) {
321 list_for_each(l
, &sclp_req_queue
) {
322 tmp
= list_entry(l
, struct sclp_req
, list
);
323 if (finished_sccb
== (u32
)(addr_t
) tmp
->sccb
) {
324 list_del(&tmp
->list
);
330 spin_unlock(&sclp_lock
);
331 /* Perform callback */
333 req
->status
= SCLP_REQ_DONE
;
334 if (req
->callback
!= NULL
)
335 req
->callback(req
, req
->callback_data
);
337 spin_lock(&sclp_lock
);
338 /* Head queue a read sccb if an event buffer is pending */
340 __sclp_unconditional_read();
341 /* Now clear the running bit if SCLP indicated a finished SCCB */
342 if (finished_sccb
!= 0U)
343 clear_bit(SCLP_RUNNING
, &sclp_status
);
344 spin_unlock(&sclp_lock
);
345 /* and start next request on the queue */
346 sclp_start_request();
350 * Wait synchronously for external interrupt of sclp. We may not receive
351 * any other external interrupt, so we disable all other external interrupts
352 * in control register 0.
357 unsigned long psw_mask
;
358 unsigned long cr0
, cr0_sync
;
360 /* Prevent BH from executing. */
364 * enable service signal external interruption (cr0.22)
365 * disable cr0.20-21, cr0.25, cr0.27, cr0.30-31
366 * don't touch any other bit in cr0
368 __ctl_store(cr0
, 0, 0);
370 cr0_sync
|= 0x00000200;
371 cr0_sync
&= 0xFFFFF3AC;
372 __ctl_load(cr0_sync
, 0, 0);
374 /* enable external interruptions (PSW-mask.7) */
375 asm volatile ("STOSM 0(%1),0x01"
376 : "=m" (psw_mask
) : "a" (&psw_mask
) : "memory");
378 /* wait until ISR signals receipt of interrupt */
379 while (test_bit(SCLP_RUNNING
, &sclp_status
)) {
384 /* disable external interruptions */
385 asm volatile ("SSM 0(%0)"
386 : : "a" (&psw_mask
) : "memory");
389 __ctl_load(cr0
, 0, 0);
394 * Queue an SCLP request. Request will immediately be processed if queue is
398 sclp_add_request(struct sclp_req
*req
)
402 if (!test_bit(SCLP_INIT
, &sclp_status
)) {
403 req
->status
= SCLP_REQ_FAILED
;
404 if (req
->callback
!= NULL
)
405 req
->callback(req
, req
->callback_data
);
408 spin_lock_irqsave(&sclp_lock
, flags
);
409 /* queue the request */
410 req
->status
= SCLP_REQ_QUEUED
;
411 list_add_tail(&req
->list
, &sclp_req_queue
);
412 spin_unlock_irqrestore(&sclp_lock
, flags
);
413 /* try to start the first request on the queue */
414 sclp_start_request();
417 /* state change notification */
418 struct sclp_statechangebuf
{
419 struct evbuf_header header
;
420 u8 validity_sclp_active_facility_mask
: 1;
421 u8 validity_sclp_receive_mask
: 1;
422 u8 validity_sclp_send_mask
: 1;
423 u8 validity_read_data_function_mask
: 1;
426 u64 sclp_active_facility_mask
;
427 sccb_mask_t sclp_receive_mask
;
428 sccb_mask_t sclp_send_mask
;
429 u32 read_data_function_mask
;
430 } __attribute__((packed
));
433 __sclp_notify_state_change(void)
436 struct sclp_register
*t
;
437 sccb_mask_t receive_mask
, send_mask
;
439 list_for_each(l
, &sclp_reg_list
) {
440 t
= list_entry(l
, struct sclp_register
, list
);
441 receive_mask
= t
->receive_mask
& sclp_receive_mask
;
442 send_mask
= t
->send_mask
& sclp_send_mask
;
443 if (t
->sclp_receive_mask
!= receive_mask
||
444 t
->sclp_send_mask
!= send_mask
) {
445 t
->sclp_receive_mask
= receive_mask
;
446 t
->sclp_send_mask
= send_mask
;
447 if (t
->state_change_fn
!= NULL
)
448 t
->state_change_fn(t
);
454 sclp_state_change(struct evbuf_header
*evbuf
)
457 struct sclp_statechangebuf
*scbuf
;
459 spin_lock_irqsave(&sclp_lock
, flags
);
460 scbuf
= (struct sclp_statechangebuf
*) evbuf
;
462 if (scbuf
->validity_sclp_receive_mask
) {
463 if (scbuf
->mask_length
!= sizeof(sccb_mask_t
))
464 printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
465 "state change event with mask length %i\n",
468 /* set new receive mask */
469 sclp_receive_mask
= scbuf
->sclp_receive_mask
;
472 if (scbuf
->validity_sclp_send_mask
) {
473 if (scbuf
->mask_length
!= sizeof(sccb_mask_t
))
474 printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
475 "state change event with mask length %i\n",
478 /* set new send mask */
479 sclp_send_mask
= scbuf
->sclp_send_mask
;
482 __sclp_notify_state_change();
483 spin_unlock_irqrestore(&sclp_lock
, flags
);
486 static struct sclp_register sclp_state_change_event
= {
487 .receive_mask
= EvTyp_StateChange_Mask
,
488 .receiver_fn
= sclp_state_change
493 * SCLP quiesce event handler
497 do_load_quiesce_psw(void * __unused
)
499 static atomic_t cpuid
= ATOMIC_INIT(-1);
504 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid
))
505 signal_processor(smp_processor_id(), sigp_stop
);
506 /* Wait for all other cpus to enter stopped state */
508 while (i
< NR_CPUS
) {
509 if (!cpu_online(i
)) {
513 switch (signal_processor_ps(&status
, 0, i
, sigp_sense
)) {
514 case sigp_order_code_accepted
:
515 case sigp_status_stored
:
516 /* Check for stopped and check stop state */
522 case sigp_not_operational
:
527 /* Quiesce the last cpu with the special psw */
528 quiesce_psw
.mask
= PSW_BASE_BITS
| PSW_MASK_WAIT
;
529 quiesce_psw
.addr
= 0xfff;
530 __load_psw(quiesce_psw
);
534 do_machine_quiesce(void)
536 on_each_cpu(do_load_quiesce_psw
, NULL
, 0, 0);
540 do_machine_quiesce(void)
544 quiesce_psw
.mask
= PSW_BASE_BITS
| PSW_MASK_WAIT
;
545 quiesce_psw
.addr
= 0xfff;
546 __load_psw(quiesce_psw
);
550 extern void ctrl_alt_del(void);
553 sclp_quiesce(struct evbuf_header
*evbuf
)
556 * We got a "shutdown" request.
557 * Add a call to an appropriate "shutdown" routine here. This
558 * routine should set all PSWs to 'disabled-wait', 'stopped'
559 * or 'check-stopped' - except 1 PSW which needs to carry a
560 * special bit pattern called 'quiesce PSW'.
562 _machine_restart
= (void *) do_machine_quiesce
;
563 _machine_halt
= do_machine_quiesce
;
564 _machine_power_off
= do_machine_quiesce
;
568 static struct sclp_register sclp_quiesce_event
= {
569 .receive_mask
= EvTyp_SigQuiesce_Mask
,
570 .receiver_fn
= sclp_quiesce
573 /* initialisation of SCLP */
575 struct sccb_header header
;
578 sccb_mask_t receive_mask
;
579 sccb_mask_t send_mask
;
580 sccb_mask_t sclp_send_mask
;
581 sccb_mask_t sclp_receive_mask
;
582 } __attribute__((packed
));
584 static void sclp_init_mask_retry(unsigned long);
590 struct init_sccb
*sccb
;
591 struct sclp_req
*req
;
593 struct sclp_register
*t
;
596 sccb
= (struct init_sccb
*) sclp_init_sccb
;
597 /* stick the request structure to the end of the init sccb page */
598 req
= (struct sclp_req
*) ((addr_t
) sccb
+ PAGE_SIZE
) - 1;
600 /* SCLP setup concerning receiving and sending Event Buffers */
601 req
->command
= SCLP_CMDW_WRITEMASK
;
602 req
->status
= SCLP_REQ_QUEUED
;
603 req
->callback
= NULL
;
605 /* setup sccb for writemask command */
606 memset(sccb
, 0, sizeof(struct init_sccb
));
607 sccb
->header
.length
= sizeof(struct init_sccb
);
608 sccb
->mask_length
= sizeof(sccb_mask_t
);
609 /* copy in the sccb mask of the registered event types */
610 spin_lock_irqsave(&sclp_lock
, flags
);
611 if (!test_bit(SCLP_SHUTDOWN
, &sclp_status
)) {
612 list_for_each(l
, &sclp_reg_list
) {
613 t
= list_entry(l
, struct sclp_register
, list
);
614 sccb
->receive_mask
|= t
->receive_mask
;
615 sccb
->send_mask
|= t
->send_mask
;
618 sccb
->sclp_receive_mask
= 0;
619 sccb
->sclp_send_mask
= 0;
620 if (test_bit(SCLP_INIT
, &sclp_status
)) {
621 /* add request to sclp queue */
622 list_add_tail(&req
->list
, &sclp_req_queue
);
623 spin_unlock_irqrestore(&sclp_lock
, flags
);
624 /* and start if SCLP is idle */
625 sclp_start_request();
626 /* now wait for completion */
627 while (req
->status
!= SCLP_REQ_DONE
&&
628 req
->status
!= SCLP_REQ_FAILED
)
630 spin_lock_irqsave(&sclp_lock
, flags
);
633 * Special case for the very first write mask command.
634 * The interrupt handler is not removing request from
635 * the request queue and doesn't call callbacks yet
636 * because there might be an pending old interrupt
637 * after a Re-IPL. We have to receive and ignore it.
640 rc
= __service_call(req
->command
, req
->sccb
);
642 set_bit(SCLP_RUNNING
, &sclp_status
);
643 spin_unlock_irqrestore(&sclp_lock
, flags
);
647 spin_lock_irqsave(&sclp_lock
, flags
);
648 } while (rc
== -EBUSY
);
650 if (sccb
->header
.response_code
!= 0x0020) {
651 /* WRITEMASK failed - we cannot rely on receiving a state
652 change event, so initially, polling is the only alternative
653 for us to ever become operational. */
654 if (!test_bit(SCLP_SHUTDOWN
, &sclp_status
) &&
655 (!timer_pending(&retry_timer
) ||
656 !mod_timer(&retry_timer
,
657 jiffies
+ SCLP_INIT_POLL_INTERVAL
*HZ
))) {
658 retry_timer
.function
= sclp_init_mask_retry
;
659 retry_timer
.data
= 0;
660 retry_timer
.expires
= jiffies
+
661 SCLP_INIT_POLL_INTERVAL
*HZ
;
662 add_timer(&retry_timer
);
665 sclp_receive_mask
= sccb
->sclp_receive_mask
;
666 sclp_send_mask
= sccb
->sclp_send_mask
;
667 __sclp_notify_state_change();
669 spin_unlock_irqrestore(&sclp_lock
, flags
);
674 sclp_init_mask_retry(unsigned long data
)
679 /* Reboot event handler - reset send and receive mask to prevent pending SCLP
680 * events from interfering with rebooted system. */
682 sclp_reboot_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
686 /* Note: need spinlock to maintain atomicity when accessing global
688 spin_lock_irqsave(&sclp_lock
, flags
);
689 set_bit(SCLP_SHUTDOWN
, &sclp_status
);
690 spin_unlock_irqrestore(&sclp_lock
, flags
);
695 static struct notifier_block sclp_reboot_notifier
= {
696 .notifier_call
= sclp_reboot_event
700 * sclp setup function. Called early (no kmalloc!) from sclp_console_init().
707 if (test_bit(SCLP_INIT
, &sclp_status
))
708 /* Already initialized. */
711 spin_lock_init(&sclp_lock
);
712 INIT_LIST_HEAD(&sclp_req_queue
);
714 /* init event list */
715 INIT_LIST_HEAD(&sclp_reg_list
);
716 list_add(&sclp_state_change_event
.list
, &sclp_reg_list
);
717 list_add(&sclp_quiesce_event
.list
, &sclp_reg_list
);
719 rc
= register_reboot_notifier(&sclp_reboot_notifier
);
724 * request the 0x2401 external interrupt
725 * The sclp driver is initialized early (before kmalloc works). We
726 * need to use register_early_external_interrupt.
728 if (register_early_external_interrupt(0x2401, sclp_interrupt_handler
,
729 &ext_int_info_hwc
) != 0)
732 /* enable service-signal external interruptions,
733 * Control Register 0 bit 22 := 1
734 * (besides PSW bit 7 must be set to 1 sometimes for external
739 init_timer(&retry_timer
);
740 init_timer(&sclp_busy_timer
);
741 /* do the initial write event mask */
742 rc
= sclp_init_mask();
744 /* Ok, now everything is setup right. */
745 set_bit(SCLP_INIT
, &sclp_status
);
749 /* The sclp_init_mask failed. SCLP is broken, unregister and exit. */
751 unregister_early_external_interrupt(0x2401, sclp_interrupt_handler
,
758 * Register the SCLP event listener identified by REG. Return 0 on success.
759 * Some error codes and their meaning:
761 * -ENODEV = SCLP interface is not supported on this machine
762 * -EBUSY = there is already a listener registered for the requested
764 * -EIO = SCLP interface is currently not operational
767 sclp_register(struct sclp_register
*reg
)
771 struct sclp_register
*t
;
773 if (!MACHINE_HAS_SCLP
)
776 if (!test_bit(SCLP_INIT
, &sclp_status
))
778 spin_lock_irqsave(&sclp_lock
, flags
);
779 /* check already registered event masks for collisions */
780 list_for_each(l
, &sclp_reg_list
) {
781 t
= list_entry(l
, struct sclp_register
, list
);
782 if (t
->receive_mask
& reg
->receive_mask
||
783 t
->send_mask
& reg
->send_mask
) {
784 spin_unlock_irqrestore(&sclp_lock
, flags
);
789 * set present mask to 0 to trigger state change
790 * callback in sclp_init_mask
792 reg
->sclp_receive_mask
= 0;
793 reg
->sclp_send_mask
= 0;
794 list_add(®
->list
, &sclp_reg_list
);
795 spin_unlock_irqrestore(&sclp_lock
, flags
);
801 * Unregister the SCLP event listener identified by REG.
804 sclp_unregister(struct sclp_register
*reg
)
808 spin_lock_irqsave(&sclp_lock
, flags
);
809 list_del(®
->list
);
810 spin_unlock_irqrestore(&sclp_lock
, flags
);
814 #define SCLP_EVBUF_PROCESSED 0x80
817 * Traverse array of event buffers contained in SCCB and remove all buffers
818 * with a set "processed" flag. Return the number of unprocessed buffers.
821 sclp_remove_processed(struct sccb_header
*sccb
)
823 struct evbuf_header
*evbuf
;
827 evbuf
= (struct evbuf_header
*) (sccb
+ 1);
829 remaining
= sccb
->length
- sizeof(struct sccb_header
);
830 while (remaining
> 0) {
831 remaining
-= evbuf
->length
;
832 if (evbuf
->flags
& SCLP_EVBUF_PROCESSED
) {
833 sccb
->length
-= evbuf
->length
;
834 memcpy((void *) evbuf
,
835 (void *) ((addr_t
) evbuf
+ evbuf
->length
),
839 evbuf
= (struct evbuf_header
*)
840 ((addr_t
) evbuf
+ evbuf
->length
);
847 module_init(sclp_init
);
849 EXPORT_SYMBOL(sclp_add_request
);
850 EXPORT_SYMBOL(sclp_sync_wait
);
851 EXPORT_SYMBOL(sclp_register
);
852 EXPORT_SYMBOL(sclp_unregister
);
853 EXPORT_SYMBOL(sclp_error_message
);