2 * Copyright (C) 2004 Hollis Blanchard <hollisb@us.ibm.com>, IBM
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 /* Host Virtual Serial Interface (HVSI) is a protocol between the hosted OS
20 * and the service processor on IBM pSeries servers. On these servers, there
21 * are no serial ports under the OS's control, and sometimes there is no other
22 * console available either. However, the service processor has two standard
23 * serial ports, so this over-complicated protocol allows the OS to control
24 * those ports by proxy.
26 * Besides data, the procotol supports the reading/writing of the serial
27 * port's DTR line, and the reading of the CD line. This is to allow the OS to
28 * control a modem attached to the service processor's serial port. Note that
29 * the OS cannot change the speed of the port through this protocol.
34 #include <linux/console.h>
35 #include <linux/ctype.h>
36 #include <linux/delay.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/module.h>
40 #include <linux/major.h>
41 #include <linux/kernel.h>
42 #include <linux/spinlock.h>
43 #include <linux/sysrq.h>
44 #include <linux/tty.h>
45 #include <linux/tty_flip.h>
46 #include <asm/hvcall.h>
47 #include <asm/hvconsole.h>
49 #include <asm/uaccess.h>
51 #include <asm/param.h>
53 #define HVSI_MAJOR 229
54 #define HVSI_MINOR 128
55 #define MAX_NR_HVSI_CONSOLES 4
57 #define HVSI_TIMEOUT (5*HZ)
58 #define HVSI_VERSION 1
59 #define HVSI_MAX_PACKET 256
60 #define HVSI_MAX_READ 16
61 #define HVSI_MAX_OUTGOING_DATA 12
65 * we pass data via two 8-byte registers, so we would like our char arrays
66 * properly aligned for those loads.
68 #define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
71 struct delayed_work writer
;
72 struct work_struct handshaker
;
73 wait_queue_head_t emptyq
; /* woken when outbuf is emptied */
74 wait_queue_head_t stateq
; /* woken when HVSI state changes */
77 struct tty_struct
*tty
;
79 uint8_t throttle_buf
[128];
80 uint8_t outbuf
[N_OUTBUF
]; /* to implement write_room and chars_in_buffer */
81 /* inbuf is for packet reassembly. leave a little room for leftovers. */
82 uint8_t inbuf
[HVSI_MAX_PACKET
+ HVSI_MAX_READ
];
88 atomic_t seqno
; /* HVSI packet sequence number */
90 uint8_t state
; /* HVSI protocol state */
92 #ifdef CONFIG_MAGIC_SYSRQ
94 #endif /* CONFIG_MAGIC_SYSRQ */
96 static struct hvsi_struct hvsi_ports
[MAX_NR_HVSI_CONSOLES
];
98 static struct tty_driver
*hvsi_driver
;
99 static int hvsi_count
;
100 static int (*hvsi_wait
)(struct hvsi_struct
*hp
, int state
);
102 enum HVSI_PROTOCOL_STATE
{
104 HVSI_WAIT_FOR_VER_RESPONSE
,
105 HVSI_WAIT_FOR_VER_QUERY
,
107 HVSI_WAIT_FOR_MCTRL_RESPONSE
,
110 #define HVSI_CONSOLE 0x1
112 #define VS_DATA_PACKET_HEADER 0xff
113 #define VS_CONTROL_PACKET_HEADER 0xfe
114 #define VS_QUERY_PACKET_HEADER 0xfd
115 #define VS_QUERY_RESPONSE_PACKET_HEADER 0xfc
118 #define VSV_SET_MODEM_CTL 1 /* to service processor only */
119 #define VSV_MODEM_CTL_UPDATE 2 /* from service processor only */
120 #define VSV_CLOSE_PROTOCOL 3
123 #define VSV_SEND_VERSION_NUMBER 1
124 #define VSV_SEND_MODEM_CTL_STATUS 2
126 /* yes, these masks are not consecutive. */
127 #define HVSI_TSDTR 0x01
128 #define HVSI_TSCD 0x20
134 } __attribute__((packed
));
140 uint8_t data
[HVSI_MAX_OUTGOING_DATA
];
141 } __attribute__((packed
));
143 struct hvsi_control
{
148 /* optional depending on verb: */
151 } __attribute__((packed
));
158 } __attribute__((packed
));
160 struct hvsi_query_response
{
165 uint16_t query_seqno
;
170 } __attribute__((packed
));
174 static inline int is_console(struct hvsi_struct
*hp
)
176 return hp
->flags
& HVSI_CONSOLE
;
179 static inline int is_open(struct hvsi_struct
*hp
)
181 /* if we're waiting for an mctrl then we're already open */
182 return (hp
->state
== HVSI_OPEN
)
183 || (hp
->state
== HVSI_WAIT_FOR_MCTRL_RESPONSE
);
186 static inline void print_state(struct hvsi_struct
*hp
)
189 static const char *state_names
[] = {
191 "HVSI_WAIT_FOR_VER_RESPONSE",
192 "HVSI_WAIT_FOR_VER_QUERY",
194 "HVSI_WAIT_FOR_MCTRL_RESPONSE",
197 const char *name
= state_names
[hp
->state
];
199 if (hp
->state
> ARRAY_SIZE(state_names
))
202 pr_debug("hvsi%i: state = %s\n", hp
->index
, name
);
206 static inline void __set_state(struct hvsi_struct
*hp
, int state
)
210 wake_up_all(&hp
->stateq
);
213 static inline void set_state(struct hvsi_struct
*hp
, int state
)
217 spin_lock_irqsave(&hp
->lock
, flags
);
218 __set_state(hp
, state
);
219 spin_unlock_irqrestore(&hp
->lock
, flags
);
222 static inline int len_packet(const uint8_t *packet
)
224 return (int)((struct hvsi_header
*)packet
)->len
;
227 static inline int is_header(const uint8_t *packet
)
229 struct hvsi_header
*header
= (struct hvsi_header
*)packet
;
230 return header
->type
>= VS_QUERY_RESPONSE_PACKET_HEADER
;
233 static inline int got_packet(const struct hvsi_struct
*hp
, uint8_t *packet
)
235 if (hp
->inbuf_end
< packet
+ sizeof(struct hvsi_header
))
236 return 0; /* don't even have the packet header */
238 if (hp
->inbuf_end
< (packet
+ len_packet(packet
)))
239 return 0; /* don't have the rest of the packet */
244 /* shift remaining bytes in packetbuf down */
245 static void compact_inbuf(struct hvsi_struct
*hp
, uint8_t *read_to
)
247 int remaining
= (int)(hp
->inbuf_end
- read_to
);
249 pr_debug("%s: %i chars remain\n", __func__
, remaining
);
251 if (read_to
!= hp
->inbuf
)
252 memmove(hp
->inbuf
, read_to
, remaining
);
254 hp
->inbuf_end
= hp
->inbuf
+ remaining
;
258 #define dbg_dump_packet(packet) dump_packet(packet)
259 #define dbg_dump_hex(data, len) dump_hex(data, len)
261 #define dbg_dump_packet(packet) do { } while (0)
262 #define dbg_dump_hex(data, len) do { } while (0)
265 static void dump_hex(const uint8_t *data
, int len
)
270 for (i
=0; i
< len
; i
++)
271 printk("%.2x", data
[i
]);
274 for (i
=0; i
< len
; i
++) {
275 if (isprint(data
[i
]))
276 printk("%c", data
[i
]);
283 static void dump_packet(uint8_t *packet
)
285 struct hvsi_header
*header
= (struct hvsi_header
*)packet
;
287 printk("type 0x%x, len %i, seqno %i:\n", header
->type
, header
->len
,
290 dump_hex(packet
, header
->len
);
293 static int hvsi_read(struct hvsi_struct
*hp
, char *buf
, int count
)
297 got
= hvc_get_chars(hp
->vtermno
, buf
, count
);
302 static void hvsi_recv_control(struct hvsi_struct
*hp
, uint8_t *packet
,
303 struct tty_struct
**to_hangup
, struct hvsi_struct
**to_handshake
)
305 struct hvsi_control
*header
= (struct hvsi_control
*)packet
;
307 switch (header
->verb
) {
308 case VSV_MODEM_CTL_UPDATE
:
309 if ((header
->word
& HVSI_TSCD
) == 0) {
310 /* CD went away; no more connection */
311 pr_debug("hvsi%i: CD dropped\n", hp
->index
);
312 hp
->mctrl
&= TIOCM_CD
;
313 /* If userland hasn't done an open(2) yet, hp->tty is NULL. */
314 if (hp
->tty
&& !(hp
->tty
->flags
& CLOCAL
))
315 *to_hangup
= hp
->tty
;
318 case VSV_CLOSE_PROTOCOL
:
319 pr_debug("hvsi%i: service processor came back\n", hp
->index
);
320 if (hp
->state
!= HVSI_CLOSED
) {
325 printk(KERN_WARNING
"hvsi%i: unknown HVSI control packet: ",
332 static void hvsi_recv_response(struct hvsi_struct
*hp
, uint8_t *packet
)
334 struct hvsi_query_response
*resp
= (struct hvsi_query_response
*)packet
;
337 case HVSI_WAIT_FOR_VER_RESPONSE
:
338 __set_state(hp
, HVSI_WAIT_FOR_VER_QUERY
);
340 case HVSI_WAIT_FOR_MCTRL_RESPONSE
:
342 if (resp
->u
.mctrl_word
& HVSI_TSDTR
)
343 hp
->mctrl
|= TIOCM_DTR
;
344 if (resp
->u
.mctrl_word
& HVSI_TSCD
)
345 hp
->mctrl
|= TIOCM_CD
;
346 __set_state(hp
, HVSI_OPEN
);
349 printk(KERN_ERR
"hvsi%i: unexpected query response: ", hp
->index
);
355 /* respond to service processor's version query */
356 static int hvsi_version_respond(struct hvsi_struct
*hp
, uint16_t query_seqno
)
358 struct hvsi_query_response packet __ALIGNED__
;
361 packet
.type
= VS_QUERY_RESPONSE_PACKET_HEADER
;
362 packet
.len
= sizeof(struct hvsi_query_response
);
363 packet
.seqno
= atomic_inc_return(&hp
->seqno
);
364 packet
.verb
= VSV_SEND_VERSION_NUMBER
;
365 packet
.u
.version
= HVSI_VERSION
;
366 packet
.query_seqno
= query_seqno
+1;
368 pr_debug("%s: sending %i bytes\n", __func__
, packet
.len
);
369 dbg_dump_hex((uint8_t*)&packet
, packet
.len
);
371 wrote
= hvc_put_chars(hp
->vtermno
, (char *)&packet
, packet
.len
);
372 if (wrote
!= packet
.len
) {
373 printk(KERN_ERR
"hvsi%i: couldn't send query response!\n",
381 static void hvsi_recv_query(struct hvsi_struct
*hp
, uint8_t *packet
)
383 struct hvsi_query
*query
= (struct hvsi_query
*)packet
;
386 case HVSI_WAIT_FOR_VER_QUERY
:
387 hvsi_version_respond(hp
, query
->seqno
);
388 __set_state(hp
, HVSI_OPEN
);
391 printk(KERN_ERR
"hvsi%i: unexpected query: ", hp
->index
);
397 static void hvsi_insert_chars(struct hvsi_struct
*hp
, const char *buf
, int len
)
401 for (i
=0; i
< len
; i
++) {
403 #ifdef CONFIG_MAGIC_SYSRQ
407 } else if (hp
->sysrq
) {
408 handle_sysrq(c
, hp
->tty
);
412 #endif /* CONFIG_MAGIC_SYSRQ */
413 tty_insert_flip_char(hp
->tty
, c
, 0);
418 * We could get 252 bytes of data at once here. But the tty layer only
419 * throttles us at TTY_THRESHOLD_THROTTLE (128) bytes, so we could overflow
420 * it. Accordingly we won't send more than 128 bytes at a time to the flip
421 * buffer, which will give the tty buffer a chance to throttle us. Should the
422 * value of TTY_THRESHOLD_THROTTLE change in n_tty.c, this code should be
425 #define TTY_THRESHOLD_THROTTLE 128
426 static struct tty_struct
*hvsi_recv_data(struct hvsi_struct
*hp
,
427 const uint8_t *packet
)
429 const struct hvsi_header
*header
= (const struct hvsi_header
*)packet
;
430 const uint8_t *data
= packet
+ sizeof(struct hvsi_header
);
431 int datalen
= header
->len
- sizeof(struct hvsi_header
);
432 int overflow
= datalen
- TTY_THRESHOLD_THROTTLE
;
434 pr_debug("queueing %i chars '%.*s'\n", datalen
, datalen
, data
);
440 pr_debug("%s: got >TTY_THRESHOLD_THROTTLE bytes\n", __func__
);
441 datalen
= TTY_THRESHOLD_THROTTLE
;
444 hvsi_insert_chars(hp
, data
, datalen
);
448 * we still have more data to deliver, so we need to save off the
449 * overflow and send it later
451 pr_debug("%s: deferring overflow\n", __func__
);
452 memcpy(hp
->throttle_buf
, data
+ TTY_THRESHOLD_THROTTLE
, overflow
);
453 hp
->n_throttle
= overflow
;
460 * Returns true/false indicating data successfully read from hypervisor.
461 * Used both to get packets for tty connections and to advance the state
462 * machine during console handshaking (in which case tty = NULL and we ignore
465 static int hvsi_load_chunk(struct hvsi_struct
*hp
, struct tty_struct
**flip
,
466 struct tty_struct
**hangup
, struct hvsi_struct
**handshake
)
468 uint8_t *packet
= hp
->inbuf
;
475 chunklen
= hvsi_read(hp
, hp
->inbuf_end
, HVSI_MAX_READ
);
477 pr_debug("%s: 0-length read\n", __func__
);
481 pr_debug("%s: got %i bytes\n", __func__
, chunklen
);
482 dbg_dump_hex(hp
->inbuf_end
, chunklen
);
484 hp
->inbuf_end
+= chunklen
;
486 /* handle all completed packets */
487 while ((packet
< hp
->inbuf_end
) && got_packet(hp
, packet
)) {
488 struct hvsi_header
*header
= (struct hvsi_header
*)packet
;
490 if (!is_header(packet
)) {
491 printk(KERN_ERR
"hvsi%i: got malformed packet\n", hp
->index
);
492 /* skip bytes until we find a header or run out of data */
493 while ((packet
< hp
->inbuf_end
) && (!is_header(packet
)))
498 pr_debug("%s: handling %i-byte packet\n", __func__
,
500 dbg_dump_packet(packet
);
502 switch (header
->type
) {
503 case VS_DATA_PACKET_HEADER
:
507 break; /* no tty buffer to put data in */
508 *flip
= hvsi_recv_data(hp
, packet
);
510 case VS_CONTROL_PACKET_HEADER
:
511 hvsi_recv_control(hp
, packet
, hangup
, handshake
);
513 case VS_QUERY_RESPONSE_PACKET_HEADER
:
514 hvsi_recv_response(hp
, packet
);
516 case VS_QUERY_PACKET_HEADER
:
517 hvsi_recv_query(hp
, packet
);
520 printk(KERN_ERR
"hvsi%i: unknown HVSI packet type 0x%x\n",
521 hp
->index
, header
->type
);
526 packet
+= len_packet(packet
);
528 if (*hangup
|| *handshake
) {
529 pr_debug("%s: hangup or handshake\n", __func__
);
531 * we need to send the hangup now before receiving any more data.
532 * If we get "data, hangup, data", we can't deliver the second
533 * data before the hangup.
539 compact_inbuf(hp
, packet
);
544 static void hvsi_send_overflow(struct hvsi_struct
*hp
)
546 pr_debug("%s: delivering %i bytes overflow\n", __func__
,
549 hvsi_insert_chars(hp
, hp
->throttle_buf
, hp
->n_throttle
);
554 * must get all pending data because we only get an irq on empty->non-empty
557 static irqreturn_t
hvsi_interrupt(int irq
, void *arg
)
559 struct hvsi_struct
*hp
= (struct hvsi_struct
*)arg
;
560 struct tty_struct
*flip
;
561 struct tty_struct
*hangup
;
562 struct hvsi_struct
*handshake
;
566 pr_debug("%s\n", __func__
);
569 spin_lock_irqsave(&hp
->lock
, flags
);
570 again
= hvsi_load_chunk(hp
, &flip
, &hangup
, &handshake
);
571 spin_unlock_irqrestore(&hp
->lock
, flags
);
574 * we have to call tty_flip_buffer_push() and tty_hangup() outside our
575 * spinlock. But we also have to keep going until we've read all the
580 /* there was data put in the tty flip buffer */
581 tty_flip_buffer_push(flip
);
590 pr_debug("hvsi%i: attempting re-handshake\n", handshake
->index
);
591 schedule_work(&handshake
->handshaker
);
595 spin_lock_irqsave(&hp
->lock
, flags
);
596 if (hp
->tty
&& hp
->n_throttle
597 && (!test_bit(TTY_THROTTLED
, &hp
->tty
->flags
))) {
598 /* we weren't hung up and we weren't throttled, so we can deliver the
601 hvsi_send_overflow(hp
);
603 spin_unlock_irqrestore(&hp
->lock
, flags
);
606 tty_flip_buffer_push(flip
);
612 /* for boot console, before the irq handler is running */
613 static int __init
poll_for_state(struct hvsi_struct
*hp
, int state
)
615 unsigned long end_jiffies
= jiffies
+ HVSI_TIMEOUT
;
618 hvsi_interrupt(hp
->virq
, (void *)hp
); /* get pending data */
620 if (hp
->state
== state
)
624 if (time_after(jiffies
, end_jiffies
))
629 /* wait for irq handler to change our state */
630 static int wait_for_state(struct hvsi_struct
*hp
, int state
)
634 if (!wait_event_timeout(hp
->stateq
, (hp
->state
== state
), HVSI_TIMEOUT
))
640 static int hvsi_query(struct hvsi_struct
*hp
, uint16_t verb
)
642 struct hvsi_query packet __ALIGNED__
;
645 packet
.type
= VS_QUERY_PACKET_HEADER
;
646 packet
.len
= sizeof(struct hvsi_query
);
647 packet
.seqno
= atomic_inc_return(&hp
->seqno
);
650 pr_debug("%s: sending %i bytes\n", __func__
, packet
.len
);
651 dbg_dump_hex((uint8_t*)&packet
, packet
.len
);
653 wrote
= hvc_put_chars(hp
->vtermno
, (char *)&packet
, packet
.len
);
654 if (wrote
!= packet
.len
) {
655 printk(KERN_ERR
"hvsi%i: couldn't send query (%i)!\n", hp
->index
,
663 static int hvsi_get_mctrl(struct hvsi_struct
*hp
)
667 set_state(hp
, HVSI_WAIT_FOR_MCTRL_RESPONSE
);
668 hvsi_query(hp
, VSV_SEND_MODEM_CTL_STATUS
);
670 ret
= hvsi_wait(hp
, HVSI_OPEN
);
672 printk(KERN_ERR
"hvsi%i: didn't get modem flags\n", hp
->index
);
673 set_state(hp
, HVSI_OPEN
);
677 pr_debug("%s: mctrl 0x%x\n", __func__
, hp
->mctrl
);
682 /* note that we can only set DTR */
683 static int hvsi_set_mctrl(struct hvsi_struct
*hp
, uint16_t mctrl
)
685 struct hvsi_control packet __ALIGNED__
;
688 packet
.type
= VS_CONTROL_PACKET_HEADER
,
689 packet
.seqno
= atomic_inc_return(&hp
->seqno
);
690 packet
.len
= sizeof(struct hvsi_control
);
691 packet
.verb
= VSV_SET_MODEM_CTL
;
692 packet
.mask
= HVSI_TSDTR
;
694 if (mctrl
& TIOCM_DTR
)
695 packet
.word
= HVSI_TSDTR
;
697 pr_debug("%s: sending %i bytes\n", __func__
, packet
.len
);
698 dbg_dump_hex((uint8_t*)&packet
, packet
.len
);
700 wrote
= hvc_put_chars(hp
->vtermno
, (char *)&packet
, packet
.len
);
701 if (wrote
!= packet
.len
) {
702 printk(KERN_ERR
"hvsi%i: couldn't set DTR!\n", hp
->index
);
709 static void hvsi_drain_input(struct hvsi_struct
*hp
)
711 uint8_t buf
[HVSI_MAX_READ
] __ALIGNED__
;
712 unsigned long end_jiffies
= jiffies
+ HVSI_TIMEOUT
;
714 while (time_before(end_jiffies
, jiffies
))
715 if (0 == hvsi_read(hp
, buf
, HVSI_MAX_READ
))
719 static int hvsi_handshake(struct hvsi_struct
*hp
)
724 * We could have a CLOSE or other data waiting for us before we even try
725 * to open; try to throw it all away so we don't get confused. (CLOSE
726 * is the first message sent up the pipe when the FSP comes online. We
727 * need to distinguish between "it came up a while ago and we're the first
728 * user" and "it was just reset before it saw our handshake packet".)
730 hvsi_drain_input(hp
);
732 set_state(hp
, HVSI_WAIT_FOR_VER_RESPONSE
);
733 ret
= hvsi_query(hp
, VSV_SEND_VERSION_NUMBER
);
735 printk(KERN_ERR
"hvsi%i: couldn't send version query\n", hp
->index
);
739 ret
= hvsi_wait(hp
, HVSI_OPEN
);
746 static void hvsi_handshaker(struct work_struct
*work
)
748 struct hvsi_struct
*hp
=
749 container_of(work
, struct hvsi_struct
, handshaker
);
751 if (hvsi_handshake(hp
) >= 0)
754 printk(KERN_ERR
"hvsi%i: re-handshaking failed\n", hp
->index
);
755 if (is_console(hp
)) {
757 * ttys will re-attempt the handshake via hvsi_open, but
758 * the console will not.
760 printk(KERN_ERR
"hvsi%i: lost console!\n", hp
->index
);
764 static int hvsi_put_chars(struct hvsi_struct
*hp
, const char *buf
, int count
)
766 struct hvsi_data packet __ALIGNED__
;
769 BUG_ON(count
> HVSI_MAX_OUTGOING_DATA
);
771 packet
.type
= VS_DATA_PACKET_HEADER
;
772 packet
.seqno
= atomic_inc_return(&hp
->seqno
);
773 packet
.len
= count
+ sizeof(struct hvsi_header
);
774 memcpy(&packet
.data
, buf
, count
);
776 ret
= hvc_put_chars(hp
->vtermno
, (char *)&packet
, packet
.len
);
777 if (ret
== packet
.len
) {
778 /* return the number of chars written, not the packet length */
781 return ret
; /* return any errors */
784 static void hvsi_close_protocol(struct hvsi_struct
*hp
)
786 struct hvsi_control packet __ALIGNED__
;
788 packet
.type
= VS_CONTROL_PACKET_HEADER
;
789 packet
.seqno
= atomic_inc_return(&hp
->seqno
);
791 packet
.verb
= VSV_CLOSE_PROTOCOL
;
793 pr_debug("%s: sending %i bytes\n", __func__
, packet
.len
);
794 dbg_dump_hex((uint8_t*)&packet
, packet
.len
);
796 hvc_put_chars(hp
->vtermno
, (char *)&packet
, packet
.len
);
799 static int hvsi_open(struct tty_struct
*tty
, struct file
*filp
)
801 struct hvsi_struct
*hp
;
803 int line
= tty
->index
;
806 pr_debug("%s\n", __func__
);
808 if (line
< 0 || line
>= hvsi_count
)
810 hp
= &hvsi_ports
[line
];
812 tty
->driver_data
= hp
;
815 if (hp
->state
== HVSI_FSP_DIED
)
818 spin_lock_irqsave(&hp
->lock
, flags
);
821 atomic_set(&hp
->seqno
, 0);
822 h_vio_signal(hp
->vtermno
, VIO_IRQ_ENABLE
);
823 spin_unlock_irqrestore(&hp
->lock
, flags
);
826 return 0; /* this has already been handshaked as the console */
828 ret
= hvsi_handshake(hp
);
830 printk(KERN_ERR
"%s: HVSI handshaking failed\n", tty
->name
);
834 ret
= hvsi_get_mctrl(hp
);
836 printk(KERN_ERR
"%s: couldn't get initial modem flags\n", tty
->name
);
840 ret
= hvsi_set_mctrl(hp
, hp
->mctrl
| TIOCM_DTR
);
842 printk(KERN_ERR
"%s: couldn't set DTR\n", tty
->name
);
849 /* wait for hvsi_write_worker to empty hp->outbuf */
850 static void hvsi_flush_output(struct hvsi_struct
*hp
)
852 wait_event_timeout(hp
->emptyq
, (hp
->n_outbuf
<= 0), HVSI_TIMEOUT
);
854 /* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */
855 cancel_delayed_work(&hp
->writer
);
856 flush_scheduled_work();
859 * it's also possible that our timeout expired and hvsi_write_worker
860 * didn't manage to push outbuf. poof.
865 static void hvsi_close(struct tty_struct
*tty
, struct file
*filp
)
867 struct hvsi_struct
*hp
= tty
->driver_data
;
870 pr_debug("%s\n", __func__
);
872 if (tty_hung_up_p(filp
))
875 spin_lock_irqsave(&hp
->lock
, flags
);
877 if (--hp
->count
== 0) {
879 hp
->inbuf_end
= hp
->inbuf
; /* discard remaining partial packets */
881 /* only close down connection if it is not the console */
882 if (!is_console(hp
)) {
883 h_vio_signal(hp
->vtermno
, VIO_IRQ_DISABLE
); /* no more irqs */
884 __set_state(hp
, HVSI_CLOSED
);
886 * any data delivered to the tty layer after this will be
887 * discarded (except for XON/XOFF)
891 spin_unlock_irqrestore(&hp
->lock
, flags
);
893 /* let any existing irq handlers finish. no more will start. */
894 synchronize_irq(hp
->virq
);
896 /* hvsi_write_worker will re-schedule until outbuf is empty. */
897 hvsi_flush_output(hp
);
899 /* tell FSP to stop sending data */
900 hvsi_close_protocol(hp
);
903 * drain anything FSP is still in the middle of sending, and let
904 * hvsi_handshake drain the rest on the next open.
906 hvsi_drain_input(hp
);
908 spin_lock_irqsave(&hp
->lock
, flags
);
910 } else if (hp
->count
< 0)
911 printk(KERN_ERR
"hvsi_close %lu: oops, count is %d\n",
912 hp
- hvsi_ports
, hp
->count
);
914 spin_unlock_irqrestore(&hp
->lock
, flags
);
917 static void hvsi_hangup(struct tty_struct
*tty
)
919 struct hvsi_struct
*hp
= tty
->driver_data
;
922 pr_debug("%s\n", __func__
);
924 spin_lock_irqsave(&hp
->lock
, flags
);
930 spin_unlock_irqrestore(&hp
->lock
, flags
);
933 /* called with hp->lock held */
934 static void hvsi_push(struct hvsi_struct
*hp
)
938 if (hp
->n_outbuf
<= 0)
941 n
= hvsi_put_chars(hp
, hp
->outbuf
, hp
->n_outbuf
);
944 pr_debug("%s: wrote %i chars\n", __func__
, n
);
946 } else if (n
== -EIO
) {
947 __set_state(hp
, HVSI_FSP_DIED
);
948 printk(KERN_ERR
"hvsi%i: service processor died\n", hp
->index
);
952 /* hvsi_write_worker will keep rescheduling itself until outbuf is empty */
953 static void hvsi_write_worker(struct work_struct
*work
)
955 struct hvsi_struct
*hp
=
956 container_of(work
, struct hvsi_struct
, writer
.work
);
959 static long start_j
= 0;
965 spin_lock_irqsave(&hp
->lock
, flags
);
967 pr_debug("%s: %i chars in buffer\n", __func__
, hp
->n_outbuf
);
971 * We could have a non-open connection if the service processor died
972 * while we were busily scheduling ourselves. In that case, it could
973 * be minutes before the service processor comes back, so only try
974 * again once a second.
976 schedule_delayed_work(&hp
->writer
, HZ
);
981 if (hp
->n_outbuf
> 0)
982 schedule_delayed_work(&hp
->writer
, 10);
985 pr_debug("%s: outbuf emptied after %li jiffies\n", __func__
,
989 wake_up_all(&hp
->emptyq
);
994 spin_unlock_irqrestore(&hp
->lock
, flags
);
997 static int hvsi_write_room(struct tty_struct
*tty
)
999 struct hvsi_struct
*hp
= tty
->driver_data
;
1001 return N_OUTBUF
- hp
->n_outbuf
;
1004 static int hvsi_chars_in_buffer(struct tty_struct
*tty
)
1006 struct hvsi_struct
*hp
= tty
->driver_data
;
1008 return hp
->n_outbuf
;
1011 static int hvsi_write(struct tty_struct
*tty
,
1012 const unsigned char *buf
, int count
)
1014 struct hvsi_struct
*hp
= tty
->driver_data
;
1015 const char *source
= buf
;
1016 unsigned long flags
;
1018 int origcount
= count
;
1020 spin_lock_irqsave(&hp
->lock
, flags
);
1022 pr_debug("%s: %i chars in buffer\n", __func__
, hp
->n_outbuf
);
1025 /* we're either closing or not yet open; don't accept data */
1026 pr_debug("%s: not open\n", __func__
);
1031 * when the hypervisor buffer (16K) fills, data will stay in hp->outbuf
1032 * and hvsi_write_worker will be scheduled. subsequent hvsi_write() calls
1033 * will see there is no room in outbuf and return.
1035 while ((count
> 0) && (hvsi_write_room(hp
->tty
) > 0)) {
1036 int chunksize
= min(count
, hvsi_write_room(hp
->tty
));
1038 BUG_ON(hp
->n_outbuf
< 0);
1039 memcpy(hp
->outbuf
+ hp
->n_outbuf
, source
, chunksize
);
1040 hp
->n_outbuf
+= chunksize
;
1043 source
+= chunksize
;
1048 if (hp
->n_outbuf
> 0) {
1050 * we weren't able to write it all to the hypervisor.
1051 * schedule another push attempt.
1053 schedule_delayed_work(&hp
->writer
, 10);
1057 spin_unlock_irqrestore(&hp
->lock
, flags
);
1059 if (total
!= origcount
)
1060 pr_debug("%s: wanted %i, only wrote %i\n", __func__
, origcount
,
1067 * I have never seen throttle or unthrottle called, so this little throttle
1068 * buffering scheme may or may not work.
1070 static void hvsi_throttle(struct tty_struct
*tty
)
1072 struct hvsi_struct
*hp
= tty
->driver_data
;
1074 pr_debug("%s\n", __func__
);
1076 h_vio_signal(hp
->vtermno
, VIO_IRQ_DISABLE
);
1079 static void hvsi_unthrottle(struct tty_struct
*tty
)
1081 struct hvsi_struct
*hp
= tty
->driver_data
;
1082 unsigned long flags
;
1085 pr_debug("%s\n", __func__
);
1087 spin_lock_irqsave(&hp
->lock
, flags
);
1088 if (hp
->n_throttle
) {
1089 hvsi_send_overflow(hp
);
1092 spin_unlock_irqrestore(&hp
->lock
, flags
);
1095 tty_flip_buffer_push(hp
->tty
);
1097 h_vio_signal(hp
->vtermno
, VIO_IRQ_ENABLE
);
1100 static int hvsi_tiocmget(struct tty_struct
*tty
, struct file
*file
)
1102 struct hvsi_struct
*hp
= tty
->driver_data
;
1108 static int hvsi_tiocmset(struct tty_struct
*tty
, struct file
*file
,
1109 unsigned int set
, unsigned int clear
)
1111 struct hvsi_struct
*hp
= tty
->driver_data
;
1112 unsigned long flags
;
1115 /* we can only alter DTR */
1119 spin_lock_irqsave(&hp
->lock
, flags
);
1121 new_mctrl
= (hp
->mctrl
& ~clear
) | set
;
1123 if (hp
->mctrl
!= new_mctrl
) {
1124 hvsi_set_mctrl(hp
, new_mctrl
);
1125 hp
->mctrl
= new_mctrl
;
1127 spin_unlock_irqrestore(&hp
->lock
, flags
);
1133 static const struct tty_operations hvsi_ops
= {
1135 .close
= hvsi_close
,
1136 .write
= hvsi_write
,
1137 .hangup
= hvsi_hangup
,
1138 .write_room
= hvsi_write_room
,
1139 .chars_in_buffer
= hvsi_chars_in_buffer
,
1140 .throttle
= hvsi_throttle
,
1141 .unthrottle
= hvsi_unthrottle
,
1142 .tiocmget
= hvsi_tiocmget
,
1143 .tiocmset
= hvsi_tiocmset
,
1146 static int __init
hvsi_init(void)
1150 hvsi_driver
= alloc_tty_driver(hvsi_count
);
1154 hvsi_driver
->owner
= THIS_MODULE
;
1155 hvsi_driver
->driver_name
= "hvsi";
1156 hvsi_driver
->name
= "hvsi";
1157 hvsi_driver
->major
= HVSI_MAJOR
;
1158 hvsi_driver
->minor_start
= HVSI_MINOR
;
1159 hvsi_driver
->type
= TTY_DRIVER_TYPE_SYSTEM
;
1160 hvsi_driver
->init_termios
= tty_std_termios
;
1161 hvsi_driver
->init_termios
.c_cflag
= B9600
| CS8
| CREAD
| HUPCL
;
1162 hvsi_driver
->init_termios
.c_ispeed
= 9600;
1163 hvsi_driver
->init_termios
.c_ospeed
= 9600;
1164 hvsi_driver
->flags
= TTY_DRIVER_REAL_RAW
;
1165 tty_set_operations(hvsi_driver
, &hvsi_ops
);
1167 for (i
=0; i
< hvsi_count
; i
++) {
1168 struct hvsi_struct
*hp
= &hvsi_ports
[i
];
1171 ret
= request_irq(hp
->virq
, hvsi_interrupt
, IRQF_DISABLED
, "hvsi", hp
);
1173 printk(KERN_ERR
"HVSI: couldn't reserve irq 0x%x (error %i)\n",
1176 hvsi_wait
= wait_for_state
; /* irqs active now */
1178 if (tty_register_driver(hvsi_driver
))
1179 panic("Couldn't register hvsi console driver\n");
1181 printk(KERN_DEBUG
"HVSI: registered %i devices\n", hvsi_count
);
1185 device_initcall(hvsi_init
);
1187 /***** console (not tty) code: *****/
1189 static void hvsi_console_print(struct console
*console
, const char *buf
,
1192 struct hvsi_struct
*hp
= &hvsi_ports
[console
->index
];
1193 char c
[HVSI_MAX_OUTGOING_DATA
] __ALIGNED__
;
1194 unsigned int i
= 0, n
= 0;
1195 int ret
, donecr
= 0;
1202 * ugh, we have to translate LF -> CRLF ourselves, in place.
1203 * copied from hvc_console.c:
1205 while (count
> 0 || i
> 0) {
1206 if (count
> 0 && i
< sizeof(c
)) {
1207 if (buf
[n
] == '\n' && !donecr
) {
1216 ret
= hvsi_put_chars(hp
, c
, i
);
1224 static struct tty_driver
*hvsi_console_device(struct console
*console
,
1227 *index
= console
->index
;
1231 static int __init
hvsi_console_setup(struct console
*console
, char *options
)
1233 struct hvsi_struct
*hp
= &hvsi_ports
[console
->index
];
1236 if (console
->index
< 0 || console
->index
>= hvsi_count
)
1239 /* give the FSP a chance to change the baud rate when we re-open */
1240 hvsi_close_protocol(hp
);
1242 ret
= hvsi_handshake(hp
);
1246 ret
= hvsi_get_mctrl(hp
);
1250 ret
= hvsi_set_mctrl(hp
, hp
->mctrl
| TIOCM_DTR
);
1254 hp
->flags
|= HVSI_CONSOLE
;
1259 static struct console hvsi_con_driver
= {
1261 .write
= hvsi_console_print
,
1262 .device
= hvsi_console_device
,
1263 .setup
= hvsi_console_setup
,
1264 .flags
= CON_PRINTBUFFER
,
1268 static int __init
hvsi_console_init(void)
1270 struct device_node
*vty
;
1272 hvsi_wait
= poll_for_state
; /* no irqs yet; must poll */
1274 /* search device tree for vty nodes */
1275 for (vty
= of_find_compatible_node(NULL
, "serial", "hvterm-protocol");
1277 vty
= of_find_compatible_node(vty
, "serial", "hvterm-protocol")) {
1278 struct hvsi_struct
*hp
;
1279 const uint32_t *vtermno
, *irq
;
1281 vtermno
= of_get_property(vty
, "reg", NULL
);
1282 irq
= of_get_property(vty
, "interrupts", NULL
);
1283 if (!vtermno
|| !irq
)
1286 if (hvsi_count
>= MAX_NR_HVSI_CONSOLES
) {
1291 hp
= &hvsi_ports
[hvsi_count
];
1292 INIT_DELAYED_WORK(&hp
->writer
, hvsi_write_worker
);
1293 INIT_WORK(&hp
->handshaker
, hvsi_handshaker
);
1294 init_waitqueue_head(&hp
->emptyq
);
1295 init_waitqueue_head(&hp
->stateq
);
1296 spin_lock_init(&hp
->lock
);
1297 hp
->index
= hvsi_count
;
1298 hp
->inbuf_end
= hp
->inbuf
;
1299 hp
->state
= HVSI_CLOSED
;
1300 hp
->vtermno
= *vtermno
;
1301 hp
->virq
= irq_create_mapping(NULL
, irq
[0]);
1302 if (hp
->virq
== NO_IRQ
) {
1303 printk(KERN_ERR
"%s: couldn't create irq mapping for 0x%x\n",
1312 register_console(&hvsi_con_driver
);
1315 console_initcall(hvsi_console_init
);