3 * Copyright 1999 Digi International (www.digi.com)
4 * James Puzzo <jamesp at digi dot com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
13 * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
14 * PURPOSE. See the GNU General Public License for more details.
26 * Handle the file operations required for the "network" devices.
27 * Includes those functions required to register the "net" devices
36 #include <linux/module.h>
37 #include <linux/proc_fs.h>
38 #include <linux/types.h>
39 #include <linux/string.h>
40 #include <linux/tty.h>
41 #include <linux/tty_flip.h>
42 #include <linux/spinlock.h>
43 #include <linux/poll.h>
44 #include <linux/sched.h>
45 #include <linux/ratelimit.h>
46 #include <asm/unaligned.h>
48 #define MYFLIPLEN TBUF_MAX
50 #include "dgrp_common.h"
52 #define TTY_FLIPBUF_SIZE 512
53 #define DEVICE_NAME_SIZE 50
56 * Generic helper function declarations
58 static void parity_scan(struct ch_struct
*ch
, unsigned char *cbuf
,
59 unsigned char *fbuf
, int *len
);
62 * File operation declarations
64 static int dgrp_net_open(struct inode
*, struct file
*);
65 static int dgrp_net_release(struct inode
*, struct file
*);
66 static ssize_t
dgrp_net_read(struct file
*, char __user
*, size_t, loff_t
*);
67 static ssize_t
dgrp_net_write(struct file
*, const char __user
*, size_t,
69 static long dgrp_net_ioctl(struct file
*file
, unsigned int cmd
,
71 static unsigned int dgrp_net_select(struct file
*file
,
72 struct poll_table_struct
*table
);
74 static const struct file_operations net_ops
= {
76 .read
= dgrp_net_read
,
77 .write
= dgrp_net_write
,
78 .poll
= dgrp_net_select
,
79 .unlocked_ioctl
= dgrp_net_ioctl
,
80 .open
= dgrp_net_open
,
81 .release
= dgrp_net_release
,
84 static struct inode_operations net_inode_ops
= {
85 .permission
= dgrp_inode_permission
88 void dgrp_register_net_hook(struct proc_dir_entry
*de
)
90 struct nd_struct
*node
= de
->data
;
92 de
->proc_iops
= &net_inode_ops
;
93 de
->proc_fops
= &net_ops
;
95 sema_init(&node
->nd_net_semaphore
, 1);
96 node
->nd_state
= NS_CLOSED
;
97 dgrp_create_node_class_sysfs_files(node
);
102 * dgrp_dump() -- prints memory for debugging purposes.
103 * @mem: Memory location which should be printed to the console
104 * @len: Number of bytes to be dumped
106 static void dgrp_dump(u8
*mem
, int len
)
110 pr_debug("dgrp dump length = %d, data = ", len
);
111 for (i
= 0; i
< len
; ++i
)
112 pr_debug("%.2x ", mem
[i
]);
117 * dgrp_read_data_block() -- Read a data block
118 * @ch: struct ch_struct *
120 * @flipbuf_size: size of flipbuf
122 static void dgrp_read_data_block(struct ch_struct
*ch
, u8
*flipbuf
,
128 if (flipbuf_size
<= 0)
131 t
= RBUF_MAX
- ch
->ch_rout
;
135 memcpy(flipbuf
, ch
->ch_rbuf
+ ch
->ch_rout
, t
);
141 memcpy(flipbuf
, ch
->ch_rbuf
+ ch
->ch_rout
, n
);
148 * dgrp_input() -- send data to the line disipline
149 * @ch: pointer to channel struct
151 * Copys the rbuf to the flipbuf and sends to line discipline.
152 * Sends input buffer data to the line discipline.
155 static void dgrp_input(struct ch_struct
*ch
)
157 struct nd_struct
*nd
;
158 struct tty_struct
*tty
;
174 spin_lock_irqsave(&nd
->nd_lock
, lock_flags
);
176 myflipbuf
= nd
->nd_inputbuf
;
177 myflipflagbuf
= nd
->nd_inputflagbuf
;
179 if (!ch
->ch_open_count
) {
180 ch
->ch_rout
= ch
->ch_rin
;
184 if (ch
->ch_tun
.un_flag
& UN_CLOSING
) {
185 ch
->ch_rout
= ch
->ch_rin
;
189 tty
= (ch
->ch_tun
).un_tty
;
192 if (!tty
|| tty
->magic
!= TTY_MAGIC
) {
193 ch
->ch_rout
= ch
->ch_rin
;
197 tty_count
= tty
->count
;
199 ch
->ch_rout
= ch
->ch_rin
;
203 if (tty
->closing
|| test_bit(TTY_CLOSING
, &tty
->flags
)) {
204 ch
->ch_rout
= ch
->ch_rin
;
208 spin_unlock_irqrestore(&nd
->nd_lock
, lock_flags
);
210 /* data_len should be the number of chars that we read in */
211 data_len
= (ch
->ch_rin
- ch
->ch_rout
) & RBUF_MASK
;
213 /* len is the amount of data we are going to transfer here */
214 len
= tty_buffer_request_room(tty
, data_len
);
216 /* Check DPA flow control */
217 if ((nd
->nd_dpa_debug
) &&
218 (nd
->nd_dpa_flag
& DPA_WAIT_SPACE
) &&
219 (nd
->nd_dpa_port
== MINOR(tty_devnum(ch
->ch_tun
.un_tty
))))
222 if ((len
) && !(ch
->ch_flag
& CH_RXSTOP
)) {
224 dgrp_read_data_block(ch
, myflipbuf
, len
);
226 if (I_PARMRK(tty
) || I_BRKINT(tty
) || I_INPCK(tty
))
227 parity_scan(ch
, myflipbuf
, myflipflagbuf
, &len
);
229 memset(myflipflagbuf
, TTY_NORMAL
, len
);
231 if ((nd
->nd_dpa_debug
) &&
232 (nd
->nd_dpa_port
== PORT_NUM(MINOR(tty_devnum(tty
)))))
233 dgrp_dpa_data(nd
, 1, myflipbuf
, len
);
235 tty_insert_flip_string_flags(tty
, myflipbuf
,
237 tty_flip_buffer_push(tty
);
239 ch
->ch_rxcount
+= len
;
243 * Wake up any sleepers (maybe dgrp close) that might be waiting
244 * for a channel flag state change.
246 wake_up_interruptible(&ch
->ch_flag_wait
);
250 spin_unlock_irqrestore(&nd
->nd_lock
, lock_flags
);
257 * Loop to inspect each single character or 0xFF escape.
259 * if PARMRK & ~DOSMODE:
260 * 0xFF 0xFF Normal 0xFF character, escaped
261 * to eliminate confusion.
262 * 0xFF 0x00 0x00 Break
263 * 0xFF 0x00 CC Error character CC.
264 * CC Normal character CC.
266 * if PARMRK & DOSMODE:
267 * 0xFF 0x18 0x00 Break
268 * 0xFF 0x08 0x00 Framing Error
269 * 0xFF 0x04 0x00 Parity error
270 * 0xFF 0x0C 0x00 Both Framing and Parity error
272 * TODO: do we need to do the XMODEM, XOFF, XON, XANY processing??
275 static void parity_scan(struct ch_struct
*ch
, unsigned char *cbuf
,
276 unsigned char *fbuf
, int *len
)
280 int DOS
= ((ch
->ch_iflag
& IF_DOSMODE
) == 0 ? 0 : 1);
281 unsigned char *cout
; /* character buffer */
282 unsigned char *fout
; /* flag buffer */
294 switch (ch
->ch_pscan_state
) {
296 /* reset to sanity and fall through */
297 ch
->ch_pscan_state
= 0 ;
301 if (c
== 0xff) /* delete this character from stream */
302 ch
->ch_pscan_state
= 1;
305 *fout
++ = TTY_NORMAL
;
313 /* doubled ff, transform to single ff */
315 *fout
++ = TTY_NORMAL
;
317 ch
->ch_pscan_state
= 0;
319 /* save value examination in next state */
320 ch
->ch_pscan_savechar
= c
;
321 ch
->ch_pscan_state
= 2;
326 /* third character of ff sequence */
329 if (ch
->ch_pscan_savechar
& 0x10)
331 else if (ch
->ch_pscan_savechar
& 0x08)
335 * either marked as a parity error,
336 * indeterminate, or not in DOSMODE
337 * call it a parity error
339 *fout
++ = TTY_PARITY
;
341 /* case FF XX ?? where XX is not 00 */
342 if (ch
->ch_pscan_savechar
& 0xff) {
343 /* this should not happen */
344 pr_info("%s: parity_scan: error unexpected byte\n",
346 *fout
++ = TTY_PARITY
;
348 /* case FF 00 XX where XX is not 00 */
350 *fout
++ = TTY_PARITY
;
357 ch
->ch_pscan_state
= 0;
365 * dgrp_net_idle() -- Idle the network connection
366 * @nd: pointer to node structure to idle
368 static void dgrp_net_idle(struct nd_struct
*nd
)
370 struct ch_struct
*ch
;
375 nd
->nd_state
= NS_IDLE
;
378 for (i
= nd
->nd_seq_out
; ; i
= (i
+ 1) & SEQ_MASK
) {
379 if (!nd
->nd_seq_wait
[i
]) {
380 nd
->nd_seq_wait
[i
] = 0;
381 wake_up_interruptible(&nd
->nd_seq_wque
[i
]);
384 if (i
== nd
->nd_seq_in
)
388 nd
->nd_seq_out
= nd
->nd_seq_in
;
393 nd
->nd_tx_module
= 0x10;
394 nd
->nd_rx_module
= 0x00;
396 for (i
= 0, ch
= nd
->nd_chan
; i
< CHAN_MAX
; i
++, ch
++) {
397 ch
->ch_state
= CS_IDLE
;
400 ch
->ch_otype_waiting
= 0;
405 * Increase the number of channels, waking up any
406 * threads that might be waiting for the channels
409 static void increase_channel_count(struct nd_struct
*nd
, int n
)
411 struct ch_struct
*ch
;
412 struct device
*classp
;
413 char name
[DEVICE_NAME_SIZE
];
418 for (i
= nd
->nd_chan_count
; i
< n
; ++i
) {
419 ch
= nd
->nd_chan
+ i
;
421 /* FIXME: return a useful error instead! */
422 buf
= kmalloc(TBUF_MAX
, GFP_KERNEL
);
427 pr_info_ratelimited("%s - ch_tbuf was not NULL\n",
432 buf
= kmalloc(RBUF_MAX
, GFP_KERNEL
);
437 pr_info("%s - ch_rbuf was not NULL\n",
441 classp
= tty_port_register_device(&ch
->port
,
442 nd
->nd_serial_ttdriver
, i
,
445 ch
->ch_tun
.un_sysfs
= classp
;
446 snprintf(name
, DEVICE_NAME_SIZE
, "tty_%d", i
);
448 dgrp_create_tty_sysfs(&ch
->ch_tun
, classp
);
449 ret
= sysfs_create_link(&nd
->nd_class_dev
->kobj
,
450 &classp
->kobj
, name
);
452 /* NOTE: We don't support "cu" devices anymore,
453 * so you will notice we don't register them
455 if (dgrp_register_prdevices
) {
456 classp
= tty_register_device(nd
->nd_xprint_ttdriver
,
458 ch
->ch_pun
.un_sysfs
= classp
;
459 snprintf(name
, DEVICE_NAME_SIZE
, "pr_%d", i
);
461 dgrp_create_tty_sysfs(&ch
->ch_pun
, classp
);
462 ret
= sysfs_create_link(&nd
->nd_class_dev
->kobj
,
463 &classp
->kobj
, name
);
466 nd
->nd_chan_count
= i
+ 1;
467 wake_up_interruptible(&ch
->ch_flag_wait
);
472 * Decrease the number of channels, and wake up any threads that might
473 * be waiting on the channels that vanished.
475 static void decrease_channel_count(struct nd_struct
*nd
, int n
)
477 struct ch_struct
*ch
;
478 char name
[DEVICE_NAME_SIZE
];
481 for (i
= nd
->nd_chan_count
- 1; i
>= n
; --i
) {
482 ch
= nd
->nd_chan
+ i
;
485 * Make any open ports inoperative.
487 ch
->ch_state
= CS_IDLE
;
490 ch
->ch_otype_waiting
= 0;
493 * Only "HANGUP" if we care about carrier
494 * transitions and we are already open.
496 if (ch
->ch_open_count
!= 0) {
497 ch
->ch_flag
|= CH_HANGUP
;
502 * Unlike the CH_HANGUP flag above, use another
503 * flag to indicate to the RealPort state machine
504 * that this port has disappeared.
506 if (ch
->ch_open_count
!= 0)
507 ch
->ch_flag
|= CH_PORT_GONE
;
509 wake_up_interruptible(&ch
->ch_flag_wait
);
511 nd
->nd_chan_count
= i
;
519 nd
->nd_chan_count
= i
;
521 dgrp_remove_tty_sysfs(ch
->ch_tun
.un_sysfs
);
522 snprintf(name
, DEVICE_NAME_SIZE
, "tty_%d", i
);
523 sysfs_remove_link(&nd
->nd_class_dev
->kobj
, name
);
524 tty_unregister_device(nd
->nd_serial_ttdriver
, i
);
527 * NOTE: We don't support "cu" devices anymore, so don't
528 * unregister them here anymore.
531 if (dgrp_register_prdevices
) {
532 dgrp_remove_tty_sysfs(ch
->ch_pun
.un_sysfs
);
533 snprintf(name
, DEVICE_NAME_SIZE
, "pr_%d", i
);
534 sysfs_remove_link(&nd
->nd_class_dev
->kobj
, name
);
535 tty_unregister_device(nd
->nd_xprint_ttdriver
, i
);
541 * dgrp_chan_count() -- Adjust the node channel count.
542 * @nd: pointer to a node structure
543 * @n: new value for channel count
545 * Adjusts the node channel count. If new ports have appeared, it tries
546 * to signal those processes that might have been waiting for ports to
547 * appear. If ports have disappeared it tries to signal those processes
548 * that might be hung waiting for a response for the now non-existant port.
550 static void dgrp_chan_count(struct nd_struct
*nd
, int n
)
552 if (n
== nd
->nd_chan_count
)
555 if (n
> nd
->nd_chan_count
)
556 increase_channel_count(nd
, n
);
558 if (n
< nd
->nd_chan_count
)
559 decrease_channel_count(nd
, n
);
563 * dgrp_monitor() -- send data to the device monitor queue
564 * @nd: pointer to a node structure
565 * @buf: data to copy to the monitoring buffer
566 * @len: number of bytes to transfer to the buffer
568 * Called by the net device routines to send data to the device
569 * monitor queue. If the device monitor buffer is too full to
570 * accept the data, it waits until the buffer is ready.
572 static void dgrp_monitor(struct nd_struct
*nd
, u8
*buf
, int len
)
581 down(&nd
->nd_mon_semaphore
);
584 * Loop while data remains.
586 while ((len
> 0) && (nd
->nd_mon_buf
)) {
588 * Determine the amount of available space left in the
589 * buffer. If there's none, wait until some appears.
592 n
= (nd
->nd_mon_out
- nd
->nd_mon_in
- 1) & MON_MASK
;
595 nd
->nd_mon_flag
|= MON_WAIT_SPACE
;
597 up(&nd
->nd_mon_semaphore
);
600 * Go to sleep waiting until the condition becomes true.
602 rtn
= wait_event_interruptible(nd
->nd_mon_wqueue
,
603 ((nd
->nd_mon_flag
& MON_WAIT_SPACE
) == 0));
605 /* FIXME: really ignore rtn? */
608 * We can't exit here if we receive a signal, since
609 * to do so would trash the debug stream.
612 down(&nd
->nd_mon_semaphore
);
618 * Copy as much data as will fit.
624 r
= MON_MAX
- nd
->nd_mon_in
;
627 memcpy(nd
->nd_mon_buf
+ nd
->nd_mon_in
, buf
, r
);
637 memcpy(nd
->nd_mon_buf
+ nd
->nd_mon_in
, buf
, n
);
644 if (nd
->nd_mon_in
>= MON_MAX
)
645 pr_info_ratelimited("%s - nd_mon_in (%i) >= MON_MAX\n",
646 __func__
, nd
->nd_mon_in
);
649 * Wakeup any thread waiting for data
652 if (nd
->nd_mon_flag
& MON_WAIT_DATA
) {
653 nd
->nd_mon_flag
&= ~MON_WAIT_DATA
;
654 wake_up_interruptible(&nd
->nd_mon_wqueue
);
659 * Release the monitor lock.
661 up(&nd
->nd_mon_semaphore
);
665 * dgrp_encode_time() -- Encodes rpdump time into a 4-byte quantity.
666 * @nd: pointer to a node structure
667 * @buf: destination buffer
669 * Encodes "rpdump" time into a 4-byte quantity. Time is measured since
672 static void dgrp_encode_time(struct nd_struct
*nd
, u8
*buf
)
677 * Convert time in HZ since open to time in milliseconds
680 t
= jiffies
- nd
->nd_mon_lbolt
;
681 t
= 1000 * (t
/ HZ
) + 1000 * (t
% HZ
) / HZ
;
683 put_unaligned_be32((uint
)(t
& 0xffffffff), buf
);
689 * dgrp_monitor_message() -- Builds a rpdump style message.
690 * @nd: pointer to a node structure
691 * @message: destination buffer
693 static void dgrp_monitor_message(struct nd_struct
*nd
, char *message
)
698 header
[0] = RPDUMP_MESSAGE
;
700 dgrp_encode_time(nd
, header
+ 1);
704 put_unaligned_be16(n
, header
+ 5);
706 dgrp_monitor(nd
, header
, sizeof(header
));
707 dgrp_monitor(nd
, (u8
*) message
, n
);
713 * dgrp_monitor_reset() -- Note a reset in the monitoring buffer.
714 * @nd: pointer to a node structure
716 static void dgrp_monitor_reset(struct nd_struct
*nd
)
720 header
[0] = RPDUMP_RESET
;
722 dgrp_encode_time(nd
, header
+ 1);
724 dgrp_monitor(nd
, header
, sizeof(header
));
728 * dgrp_monitor_data() -- builds a monitor data packet
729 * @nd: pointer to a node structure
730 * @type: type of message to be logged
731 * @buf: data to be logged
732 * @size: number of bytes in the buffer
734 static void dgrp_monitor_data(struct nd_struct
*nd
, u8 type
, u8
*buf
, int size
)
740 dgrp_encode_time(nd
, header
+ 1);
742 put_unaligned_be16(size
, header
+ 5);
744 dgrp_monitor(nd
, header
, sizeof(header
));
745 dgrp_monitor(nd
, buf
, size
);
748 static int alloc_nd_buffers(struct nd_struct
*nd
)
752 nd
->nd_writebuf
= NULL
;
753 nd
->nd_inputbuf
= NULL
;
754 nd
->nd_inputflagbuf
= NULL
;
757 * Allocate the network read/write buffer.
759 nd
->nd_iobuf
= kzalloc(UIO_MAX
+ 10, GFP_KERNEL
);
764 * Allocate a buffer for doing the copy from user space to
765 * kernel space in the write routines.
767 nd
->nd_writebuf
= kzalloc(WRITEBUFLEN
, GFP_KERNEL
);
768 if (!nd
->nd_writebuf
)
772 * Allocate a buffer for doing the copy from kernel space to
773 * tty buffer space in the read routines.
775 nd
->nd_inputbuf
= kzalloc(MYFLIPLEN
, GFP_KERNEL
);
776 if (!nd
->nd_inputbuf
)
780 * Allocate a buffer for doing the copy from kernel space to
781 * tty buffer space in the read routines.
783 nd
->nd_inputflagbuf
= kzalloc(MYFLIPLEN
, GFP_KERNEL
);
784 if (!nd
->nd_inputflagbuf
)
791 kfree(nd
->nd_writebuf
);
792 kfree(nd
->nd_inputbuf
);
793 kfree(nd
->nd_inputflagbuf
);
798 * dgrp_net_open() -- Open the NET device for a particular PortServer
800 static int dgrp_net_open(struct inode
*inode
, struct file
*file
)
802 struct nd_struct
*nd
;
803 struct proc_dir_entry
*de
;
807 rtn
= try_module_get(THIS_MODULE
);
811 if (!capable(CAP_SYS_ADMIN
)) {
817 * Make sure that the "private_data" field hasn't already been used.
819 if (file
->private_data
) {
825 * Get the node pointer, and fail if it doesn't exist.
833 nd
= (struct nd_struct
*) de
->data
;
839 file
->private_data
= (void *) nd
;
844 down(&nd
->nd_net_semaphore
);
846 if (nd
->nd_state
!= NS_CLOSED
) {
852 * Initialize the link speed parameters.
855 nd
->nd_link
.lk_fast_rate
= UIO_MAX
;
856 nd
->nd_link
.lk_slow_rate
= UIO_MAX
;
858 nd
->nd_link
.lk_fast_delay
= 1000;
859 nd
->nd_link
.lk_slow_delay
= 1000;
861 nd
->nd_link
.lk_header_size
= 46;
864 rtn
= alloc_nd_buffers(nd
);
869 * The port is now open, so move it to the IDLE state
873 nd
->nd_tx_time
= jiffies
;
876 * If the polling routing is not running, start it running here
878 spin_lock_irqsave(&dgrp_poll_data
.poll_lock
, lock_flags
);
880 if (!dgrp_poll_data
.node_active_count
) {
881 dgrp_poll_data
.node_active_count
= 2;
882 dgrp_poll_data
.timer
.expires
= jiffies
+
883 dgrp_poll_tick
* HZ
/ 1000;
884 add_timer(&dgrp_poll_data
.timer
);
887 spin_unlock_irqrestore(&dgrp_poll_data
.poll_lock
, lock_flags
);
889 dgrp_monitor_message(nd
, "Net Open");
893 * Release the NET lock.
895 up(&nd
->nd_net_semaphore
);
899 module_put(THIS_MODULE
);
904 /* dgrp_net_release() -- close the NET device for a particular PortServer */
905 static int dgrp_net_release(struct inode
*inode
, struct file
*file
)
907 struct nd_struct
*nd
;
910 nd
= (struct nd_struct
*)(file
->private_data
);
914 /* TODO : historical locking placeholder */
916 * In the HPUX version of the RealPort driver (which served as a basis
917 * for this driver) this locking code was used. Saved if ever we need
918 * to review the locking under Linux.
920 /* spinlock(&nd->nd_lock); */
926 down(&nd
->nd_net_semaphore
);
929 * Before "closing" the internal connection, make sure all
934 nd
->nd_state
= NS_CLOSED
;
938 * TODO ... must the wait queue be reset on close?
939 * should any pending waiters be reset?
940 * Let's decide to assert that the waitq is empty... and see
943 if (waitqueue_active(&nd
->nd_tx_waitq
))
944 pr_info("%s - expected waitqueue_active to be false\n",
952 /* TODO : historical locking placeholder */
954 * In the HPUX version of the RealPort driver (which served as a basis
955 * for this driver) this locking code was used. Saved if ever we need
956 * to review the locking under Linux.
958 /* spinunlock( &nd->nd_lock ); */
961 kfree(nd
->nd_writebuf
);
962 nd
->nd_writebuf
= NULL
;
964 kfree(nd
->nd_inputbuf
);
965 nd
->nd_inputbuf
= NULL
;
967 kfree(nd
->nd_inputflagbuf
);
968 nd
->nd_inputflagbuf
= NULL
;
970 /* TODO : historical locking placeholder */
972 * In the HPUX version of the RealPort driver (which served as a basis
973 * for this driver) this locking code was used. Saved if ever we need
974 * to review the locking under Linux.
976 /* spinlock(&nd->nd_lock); */
979 * Set the active port count to zero.
981 dgrp_chan_count(nd
, 0);
983 /* TODO : historical locking placeholder */
985 * In the HPUX version of the RealPort driver (which served as a basis
986 * for this driver) this locking code was used. Saved if ever we need
987 * to review the locking under Linux.
989 /* spinunlock(&nd->nd_lock); */
992 * Release the NET lock.
994 up(&nd
->nd_net_semaphore
);
997 * Cause the poller to stop scheduling itself if this is
998 * the last active node.
1000 spin_lock_irqsave(&dgrp_poll_data
.poll_lock
, lock_flags
);
1002 if (dgrp_poll_data
.node_active_count
== 2) {
1003 del_timer(&dgrp_poll_data
.timer
);
1004 dgrp_poll_data
.node_active_count
= 0;
1007 spin_unlock_irqrestore(&dgrp_poll_data
.poll_lock
, lock_flags
);
1009 down(&nd
->nd_net_semaphore
);
1011 dgrp_monitor_message(nd
, "Net Close");
1013 up(&nd
->nd_net_semaphore
);
1016 module_put(THIS_MODULE
);
1017 file
->private_data
= NULL
;
1021 /* used in dgrp_send to setup command header */
1022 static inline u8
*set_cmd_header(u8
*b
, u8 port
, u8 cmd
)
1024 *b
++ = 0xb0 + (port
& 0x0f);
1030 * dgrp_send() -- build a packet for transmission to the server
1031 * @nd: pointer to a node structure
1032 * @tmax: maximum bytes to transmit
1034 * returns number of bytes sent
1036 static int dgrp_send(struct nd_struct
*nd
, long tmax
)
1038 struct ch_struct
*ch
= nd
->nd_chan
;
1057 long wanted_sync_port
= -1;
1058 ushort tdata
[CHAN_MAX
];
1061 mbuf
= nd
->nd_iobuf
+ UIO_BASE
;
1064 send_sync
= nd
->nd_link
.lk_slow_rate
< UIO_MAX
;
1069 memset(tdata
, 0, sizeof(tdata
));
1073 * If there are any outstanding requests to be serviced,
1074 * service them here.
1076 if (nd
->nd_send
& NR_PASSWORD
) {
1079 * Send Password response.
1084 put_unaligned_be16(strlen(nd
->password
), b
+ 2);
1086 b
+= strlen(nd
->password
);
1087 nd
->nd_send
&= ~(NR_PASSWORD
);
1092 * Loop over all modules to generate commands, and determine
1093 * the amount of data queued for transmit.
1096 for (mod
= 0, port
= 0; port
< nd
->nd_chan_count
; mod
++) {
1098 * If this is not the current module, enter a module select
1099 * code in the buffer.
1102 if (mod
!= nd
->nd_tx_module
)
1106 * Loop to process one module.
1109 maxport
= port
+ 16;
1111 if (maxport
> nd
->nd_chan_count
)
1112 maxport
= nd
->nd_chan_count
;
1114 for (; port
< maxport
; port
++, ch
++) {
1116 * Switch based on channel state.
1119 switch (ch
->ch_state
) {
1121 * Send requests when the port is closed, and there
1122 * are no Open, Close or Cancel requests expected.
1127 * Wait until any open error code
1128 * has been delivered to all
1132 if (ch
->ch_open_error
) {
1133 if (ch
->ch_wait_count
[ch
->ch_otype
]) {
1138 ch
->ch_open_error
= 0;
1142 * Wait until the channel HANGUP flag is reset
1143 * before sending the first open. We can only
1144 * get to this state after a server disconnect.
1147 if ((ch
->ch_flag
& CH_HANGUP
) != 0)
1151 * If recovering from a TCP disconnect, or if
1152 * there is an immediate open pending, send an
1153 * Immediate Open request.
1155 if ((ch
->ch_flag
& CH_PORT_GONE
) ||
1156 ch
->ch_wait_count
[OTYPE_IMMEDIATE
] != 0) {
1157 b
= set_cmd_header(b
, port
, 10);
1160 ch
->ch_state
= CS_WAIT_OPEN
;
1161 ch
->ch_otype
= OTYPE_IMMEDIATE
;
1166 * If there is no Persistent or Incoming Open on the wait
1167 * list in the server, and a thread is waiting for a
1168 * Persistent or Incoming Open, send a Persistent or Incoming
1171 if (ch
->ch_otype_waiting
== 0) {
1172 if (ch
->ch_wait_count
[OTYPE_PERSISTENT
] != 0) {
1173 b
= set_cmd_header(b
, port
, 10);
1176 ch
->ch_state
= CS_WAIT_OPEN
;
1177 ch
->ch_otype
= OTYPE_PERSISTENT
;
1178 } else if (ch
->ch_wait_count
[OTYPE_INCOMING
] != 0) {
1179 b
= set_cmd_header(b
, port
, 10);
1182 ch
->ch_state
= CS_WAIT_OPEN
;
1183 ch
->ch_otype
= OTYPE_INCOMING
;
1189 * If a Persistent or Incoming Open is pending in
1190 * the server, but there is no longer an open
1191 * thread waiting for it, cancel the request.
1194 if (ch
->ch_wait_count
[ch
->ch_otype_waiting
] == 0) {
1195 b
= set_cmd_header(b
, port
, 10);
1198 ch
->ch_state
= CS_WAIT_CANCEL
;
1199 ch
->ch_otype
= ch
->ch_otype_waiting
;
1204 * Send port parameter queries.
1208 * Clear out all FEP state that might remain
1209 * from the last connection.
1212 ch
->ch_flag
|= CH_PARAM
;
1214 ch
->ch_flag
&= ~CH_RX_FLUSH
;
1250 /* Send Sequence Request */
1251 b
= set_cmd_header(b
, port
, 14);
1253 /* Configure Event Conditions Packet */
1254 b
= set_cmd_header(b
, port
, 42);
1255 put_unaligned_be16(0x02c0, b
);
1257 *b
++ = (DM_DTR
| DM_RTS
| DM_CTS
|
1258 DM_DSR
| DM_RI
| DM_CD
);
1260 /* Send Status Request */
1261 b
= set_cmd_header(b
, port
, 16);
1263 /* Send Buffer Request */
1264 b
= set_cmd_header(b
, port
, 20);
1266 /* Send Port Capability Request */
1267 b
= set_cmd_header(b
, port
, 22);
1269 ch
->ch_expect
= (RR_SEQUENCE
|
1274 ch
->ch_state
= CS_WAIT_QUERY
;
1276 /* Raise modem signals */
1277 b
= set_cmd_header(b
, port
, 44);
1279 if (ch
->ch_flag
& CH_PORT_GONE
)
1280 ch
->ch_s_mout
= ch
->ch_mout
;
1282 ch
->ch_s_mout
= ch
->ch_mout
= DM_DTR
| DM_RTS
;
1285 *b
++ = ch
->ch_s_mflow
= 0;
1286 *b
++ = ch
->ch_s_mctrl
= ch
->ch_mctrl
= 0;
1288 if (ch
->ch_flag
& CH_PORT_GONE
)
1289 ch
->ch_flag
&= ~CH_PORT_GONE
;
1294 * Handle normal open and ready mode.
1300 * If the port is not open, and there are no
1301 * no longer any ports requesting an open,
1302 * then close the port.
1305 if (ch
->ch_open_count
== 0 &&
1306 ch
->ch_wait_count
[ch
->ch_otype
] == 0) {
1311 * Process waiting input.
1313 * If there is no one to read it, discard the data.
1315 * Otherwise if we are not in fastcook mode, or if there is a
1316 * fastcook thread waiting for data, send the data to the
1319 if (ch
->ch_rin
!= ch
->ch_rout
) {
1320 if (ch
->ch_tun
.un_open_count
== 0 ||
1321 (ch
->ch_tun
.un_flag
& UN_CLOSING
) ||
1322 (ch
->ch_cflag
& CF_CREAD
) == 0) {
1323 ch
->ch_rout
= ch
->ch_rin
;
1324 } else if ((ch
->ch_flag
& CH_FAST_READ
) == 0 ||
1325 ch
->ch_inwait
!= 0) {
1328 if (ch
->ch_rin
!= ch
->ch_rout
)
1334 * Handle receive flush, and changes to
1335 * server port parameters.
1338 if (ch
->ch_flag
& (CH_RX_FLUSH
| CH_PARAM
)) {
1340 * If we are in receive flush mode,
1341 * and enough data has gone by, reset
1342 * receive flush mode.
1344 if (ch
->ch_flag
& CH_RX_FLUSH
) {
1345 if (((ch
->ch_flush_seq
- nd
->nd_seq_out
) & SEQ_MASK
) >
1346 ((nd
->nd_seq_in
- nd
->nd_seq_out
) & SEQ_MASK
))
1347 ch
->ch_flag
&= ~CH_RX_FLUSH
;
1356 if (ch
->ch_s_tmax
!= ch
->ch_tmax
||
1357 ch
->ch_s_ttime
!= ch
->ch_ttime
) {
1358 b
= set_cmd_header(b
, port
, 48);
1360 ch
->ch_s_tmax
= ch
->ch_tmax
;
1361 ch
->ch_s_ttime
= ch
->ch_ttime
;
1363 put_unaligned_be16(ch
->ch_s_tmax
,
1367 put_unaligned_be16(ch
->ch_s_ttime
,
1376 if (ch
->ch_s_rlow
!= ch
->ch_rlow
||
1377 ch
->ch_s_rhigh
!= ch
->ch_rhigh
) {
1378 b
= set_cmd_header(b
, port
, 45);
1380 ch
->ch_s_rlow
= ch
->ch_rlow
;
1381 ch
->ch_s_rhigh
= ch
->ch_rhigh
;
1383 put_unaligned_be16(ch
->ch_s_rlow
,
1387 put_unaligned_be16(ch
->ch_s_rhigh
,
1393 * Send BRATE, CFLAG, IFLAG,
1397 if (ch
->ch_s_brate
!= ch
->ch_brate
||
1398 ch
->ch_s_cflag
!= ch
->ch_cflag
||
1399 ch
->ch_s_iflag
!= ch
->ch_iflag
||
1400 ch
->ch_s_oflag
!= ch
->ch_oflag
||
1401 ch
->ch_s_xflag
!= ch
->ch_xflag
) {
1402 b
= set_cmd_header(b
, port
, 40);
1404 ch
->ch_s_brate
= ch
->ch_brate
;
1405 ch
->ch_s_cflag
= ch
->ch_cflag
;
1406 ch
->ch_s_iflag
= ch
->ch_iflag
;
1407 ch
->ch_s_oflag
= ch
->ch_oflag
;
1408 ch
->ch_s_xflag
= ch
->ch_xflag
;
1410 put_unaligned_be16(ch
->ch_s_brate
,
1414 put_unaligned_be16(ch
->ch_s_cflag
,
1418 put_unaligned_be16(ch
->ch_s_iflag
,
1422 put_unaligned_be16(ch
->ch_s_oflag
,
1426 put_unaligned_be16(ch
->ch_s_xflag
,
1432 * Send MOUT, MFLOW, MCTRL.
1435 if (ch
->ch_s_mout
!= ch
->ch_mout
||
1436 ch
->ch_s_mflow
!= ch
->ch_mflow
||
1437 ch
->ch_s_mctrl
!= ch
->ch_mctrl
) {
1438 b
= set_cmd_header(b
, port
, 44);
1440 *b
++ = ch
->ch_s_mout
= ch
->ch_mout
;
1441 *b
++ = ch
->ch_s_mflow
= ch
->ch_mflow
;
1442 *b
++ = ch
->ch_s_mctrl
= ch
->ch_mctrl
;
1446 * Send Flow control characters.
1449 if (ch
->ch_s_xon
!= ch
->ch_xon
||
1450 ch
->ch_s_xoff
!= ch
->ch_xoff
||
1451 ch
->ch_s_lnext
!= ch
->ch_lnext
||
1452 ch
->ch_s_xxon
!= ch
->ch_xxon
||
1453 ch
->ch_s_xxoff
!= ch
->ch_xxoff
) {
1454 b
= set_cmd_header(b
, port
, 46);
1456 *b
++ = ch
->ch_s_xon
= ch
->ch_xon
;
1457 *b
++ = ch
->ch_s_xoff
= ch
->ch_xoff
;
1458 *b
++ = ch
->ch_s_lnext
= ch
->ch_lnext
;
1459 *b
++ = ch
->ch_s_xxon
= ch
->ch_xxon
;
1460 *b
++ = ch
->ch_s_xxoff
= ch
->ch_xxoff
;
1467 if (ch
->ch_s_rmax
!= ch
->ch_rmax
||
1468 ch
->ch_s_rtime
!= ch
->ch_rtime
) {
1469 b
= set_cmd_header(b
, port
, 47);
1471 ch
->ch_s_rmax
= ch
->ch_rmax
;
1472 ch
->ch_s_rtime
= ch
->ch_rtime
;
1474 put_unaligned_be16(ch
->ch_s_rmax
,
1478 put_unaligned_be16(ch
->ch_s_rtime
,
1483 ch
->ch_flag
&= ~CH_PARAM
;
1484 wake_up_interruptible(&ch
->ch_flag_wait
);
1489 * Handle action commands.
1492 if (ch
->ch_send
!= 0) {
1493 /* int send = ch->ch_send & ~ch->ch_expect; */
1494 send
= ch
->ch_send
& ~ch
->ch_expect
;
1496 /* Send character immediate */
1497 if ((send
& RR_TX_ICHAR
) != 0) {
1498 b
= set_cmd_header(b
, port
, 60);
1501 ch
->ch_expect
|= RR_TX_ICHAR
;
1505 if ((send
& RR_TX_BREAK
) != 0) {
1506 if (ch
->ch_break_time
!= 0) {
1507 b
= set_cmd_header(b
, port
, 61);
1508 put_unaligned_be16(ch
->ch_break_time
,
1512 ch
->ch_expect
|= RR_TX_BREAK
;
1513 ch
->ch_break_time
= 0;
1515 ch
->ch_send
&= ~RR_TX_BREAK
;
1516 ch
->ch_flag
&= ~CH_TX_BREAK
;
1517 wake_up_interruptible(&ch
->ch_flag_wait
);
1522 * Flush input/output buffers.
1525 if ((send
& (RR_RX_FLUSH
| RR_TX_FLUSH
)) != 0) {
1526 b
= set_cmd_header(b
, port
, 62);
1528 *b
++ = ((send
& RR_TX_FLUSH
) == 0 ? 1 :
1529 (send
& RR_RX_FLUSH
) == 0 ? 2 : 3);
1531 if (send
& RR_RX_FLUSH
) {
1532 ch
->ch_flush_seq
= nd
->nd_seq_in
;
1533 ch
->ch_flag
|= CH_RX_FLUSH
;
1536 wanted_sync_port
= port
;
1539 ch
->ch_send
&= ~(RR_RX_FLUSH
| RR_TX_FLUSH
);
1542 /* Pause input/output */
1543 if ((send
& (RR_RX_STOP
| RR_TX_STOP
)) != 0) {
1544 b
= set_cmd_header(b
, port
, 63);
1547 if ((send
& RR_TX_STOP
) != 0)
1550 if ((send
& RR_RX_STOP
) != 0)
1555 ch
->ch_send
&= ~(RR_RX_STOP
| RR_TX_STOP
);
1558 /* Start input/output */
1559 if ((send
& (RR_RX_START
| RR_TX_START
)) != 0) {
1560 b
= set_cmd_header(b
, port
, 64);
1563 if ((send
& RR_TX_START
) != 0)
1564 *b
|= EV_OPU
| EV_OPS
| EV_OPX
;
1566 if ((send
& RR_RX_START
) != 0)
1567 *b
|= EV_IPU
| EV_IPS
;
1571 ch
->ch_send
&= ~(RR_RX_START
| RR_TX_START
);
1577 * Send a window sequence to acknowledge received data.
1580 rwin
= (ch
->ch_s_rin
+
1581 ((ch
->ch_rout
- ch
->ch_rin
- 1) & RBUF_MASK
));
1583 n
= (rwin
- ch
->ch_s_rwin
) & 0xffff;
1585 if (n
>= RBUF_MAX
/ 4) {
1586 b
[0] = 0xa0 + (port
& 0xf);
1587 ch
->ch_s_rwin
= rwin
;
1588 put_unaligned_be16(rwin
, b
+ 1);
1593 * If the terminal is waiting on LOW
1594 * water or EMPTY, and the condition
1595 * is now satisfied, call the line
1596 * discipline to put more data in the
1600 n
= (ch
->ch_tin
- ch
->ch_tout
) & TBUF_MASK
;
1602 if ((ch
->ch_tun
.un_flag
& (UN_EMPTY
|UN_LOW
)) != 0) {
1603 if ((ch
->ch_tun
.un_flag
& UN_LOW
) != 0 ?
1605 (n
== 0 && ch
->ch_s_tpos
== ch
->ch_s_tin
)) {
1606 ch
->ch_tun
.un_flag
&= ~(UN_EMPTY
|UN_LOW
);
1608 if (waitqueue_active(&((ch
->ch_tun
.un_tty
)->write_wait
)))
1609 wake_up_interruptible(&((ch
->ch_tun
.un_tty
)->write_wait
));
1610 tty_wakeup(ch
->ch_tun
.un_tty
);
1611 n
= (ch
->ch_tin
- ch
->ch_tout
) & TBUF_MASK
;
1616 * If the printer is waiting on LOW
1617 * water, TIME, EMPTY or PWAIT, and is
1618 * now ready to put more data in the
1619 * buffer, call the line discipline to
1623 /* FIXME: jiffies - ch->ch_waketime can never
1624 be < 0. Someone needs to work out what is
1625 actually intended here */
1626 if (ch
->ch_pun
.un_open_count
&&
1627 (ch
->ch_pun
.un_flag
&
1628 (UN_EMPTY
|UN_TIME
|UN_LOW
|UN_PWAIT
)) != 0) {
1630 if ((ch
->ch_pun
.un_flag
& UN_LOW
) != 0 ?
1632 (ch
->ch_pun
.un_flag
& UN_TIME
) != 0 ?
1633 ((jiffies
- ch
->ch_waketime
) >= 0) :
1634 (n
== 0 && ch
->ch_s_tpos
== ch
->ch_s_tin
) &&
1635 ((ch
->ch_pun
.un_flag
& UN_EMPTY
) != 0 ||
1636 ((ch
->ch_tun
.un_open_count
&&
1637 ch
->ch_tun
.un_tty
->ops
->chars_in_buffer
) ?
1638 (ch
->ch_tun
.un_tty
->ops
->chars_in_buffer
)(ch
->ch_tun
.un_tty
) == 0
1642 ch
->ch_pun
.un_flag
&= ~(UN_EMPTY
| UN_TIME
| UN_LOW
| UN_PWAIT
);
1644 if (waitqueue_active(&((ch
->ch_pun
.un_tty
)->write_wait
)))
1645 wake_up_interruptible(&((ch
->ch_pun
.un_tty
)->write_wait
));
1646 tty_wakeup(ch
->ch_pun
.un_tty
);
1647 n
= (ch
->ch_tin
- ch
->ch_tout
) & TBUF_MASK
;
1649 } else if ((ch
->ch_pun
.un_flag
& UN_TIME
) != 0) {
1656 * Determine the max number of bytes
1657 * this port can send, including
1658 * packet header overhead.
1661 t
= ((ch
->ch_s_tsize
+ ch
->ch_s_tpos
- ch
->ch_s_tin
) & 0xffff);
1667 n
+= (n
<= 8 ? 1 : n
<= 255 ? 2 : 3);
1680 b
= set_cmd_header(b
, port
, 10);
1681 if (ch
->ch_otype
== OTYPE_IMMEDIATE
)
1686 ch
->ch_state
= CS_WAIT_CLOSE
;
1690 * Wait for a previous server request.
1694 case CS_WAIT_CANCEL
:
1701 pr_info("%s - unexpected channel state (%i)\n",
1702 __func__
, ch
->ch_state
);
1707 * If a module select code is needed, drop one in. If space
1708 * was reserved for one, but none is needed, recover the space.
1711 if (mod
!= nd
->nd_tx_module
) {
1713 mbuf
[-1] = 0xf0 | mod
;
1714 nd
->nd_tx_module
= mod
;
1722 * Adjust "tmax" so that under worst case conditions we do
1723 * not overflow either the daemon buffer or the internal
1724 * buffer in the loop that follows. Leave a safe area
1725 * of 64 bytes so we start getting asserts before we start
1726 * losing data or clobbering memory.
1729 n
= UIO_MAX
- UIO_BASE
;
1739 * Allocate space for 5 Module Selects, 1 Sequence Request,
1740 * and 1 Set TREQ for each active channel.
1743 tmax
-= 5 + 3 + 4 * nd
->nd_chan_count
;
1746 * Further reduce "tmax" to the available transmit credit.
1747 * Note that this is a soft constraint; The transmit credit
1748 * can go negative for a time and then recover.
1751 n
= nd
->nd_tx_deposit
- nd
->nd_tx_charge
- nd
->nd_link
.lk_header_size
;
1757 * Finally reduce tmax by the number of bytes already in
1764 * Suspend data transmit unless every ready channel can send
1765 * at least 1 character.
1767 if (tmax
< 2 * nd
->nd_chan_count
) {
1770 } else if (tchan
> 1 && ttotal
> tmax
) {
1773 * If transmit is limited by the credit budget, find the
1774 * largest number of characters we can send without driving
1775 * the credit negative.
1784 for (try = 0; try < 3; try++) {
1788 for (i
= 0; i
< tc
; i
++) {
1789 if (tsend
< tdata
[i
])
1790 tdata
[c
++] = tdata
[i
];
1806 tsend
= tm
/ nd
->nd_chan_count
;
1813 * If no budgetary constraints, or only one channel ready
1814 * to send, set the character limit to the remaining
1821 tsend
-= (tsend
<= 9) ? 1 : (tsend
<= 257) ? 2 : 3;
1824 * Loop over all channels, sending queued data.
1831 for (mod
= 0; port
< nd
->nd_chan_count
; mod
++) {
1833 * If this is not the current module, enter a module select
1834 * code in the buffer.
1837 if (mod
!= nd
->nd_tx_module
)
1841 * Loop to process one module.
1844 maxport
= port
+ 16;
1846 if (maxport
> nd
->nd_chan_count
)
1847 maxport
= nd
->nd_chan_count
;
1849 for (; port
< maxport
; port
++, ch
++) {
1850 if (ch
->ch_state
!= CS_READY
)
1855 n
= (ch
->ch_tin
- ch
->ch_tout
) & TBUF_MASK
;
1858 * If there is data that can be sent, send it.
1861 if (n
!= 0 && used_buffer
> 0) {
1862 t
= (ch
->ch_s_tsize
+ ch
->ch_s_tpos
- ch
->ch_s_tin
) & 0xffff;
1872 if (n
> used_buffer
) {
1881 * Create the correct size transmit header,
1882 * depending on the amount of data to transmit.
1887 b
[0] = ((n
- 1) << 4) + (port
& 0xf);
1890 } else if (n
<= 255) {
1892 b
[0] = 0x80 + (port
& 0xf);
1898 b
[0] = 0x90 + (port
& 0xf);
1899 put_unaligned_be16(n
, b
+ 1);
1903 ch
->ch_s_tin
= (ch
->ch_s_tin
+ n
) & 0xffff;
1906 * Copy transmit data to the packet.
1909 t
= TBUF_MAX
- ch
->ch_tout
;
1912 memcpy(b
, ch
->ch_tbuf
+ ch
->ch_tout
, t
);
1919 memcpy(b
, ch
->ch_tbuf
+ ch
->ch_tout
, n
);
1923 n
= (ch
->ch_tin
- ch
->ch_tout
) & TBUF_MASK
;
1927 * Wake any terminal unit process waiting in the
1928 * dgrp_write routine for low water.
1934 if ((ch
->ch_flag
& CH_LOW
) != 0) {
1935 ch
->ch_flag
&= ~CH_LOW
;
1936 wake_up_interruptible(&ch
->ch_flag_wait
);
1939 /* selwakeup tty_sel */
1940 if (ch
->ch_tun
.un_open_count
) {
1941 struct tty_struct
*tty
= (ch
->ch_tun
.un_tty
);
1943 if (waitqueue_active(&tty
->write_wait
))
1944 wake_up_interruptible(&tty
->write_wait
);
1949 if (ch
->ch_pun
.un_open_count
) {
1950 struct tty_struct
*tty
= (ch
->ch_pun
.un_tty
);
1952 if (waitqueue_active(&tty
->write_wait
))
1953 wake_up_interruptible(&tty
->write_wait
);
1959 * Do EMPTY processing.
1965 if ((ch
->ch_flag
& (CH_EMPTY
| CH_DRAIN
)) != 0 ||
1966 (ch
->ch_pun
.un_flag
& UN_EMPTY
) != 0) {
1968 * If there is still data in the server, ask the server
1969 * to notify us when its all gone.
1972 if (ch
->ch_s_treq
!= ch
->ch_s_tin
) {
1973 b
= set_cmd_header(b
, port
, 43);
1975 ch
->ch_s_treq
= ch
->ch_s_tin
;
1976 put_unaligned_be16(ch
->ch_s_treq
,
1982 * If there is a thread waiting for buffer empty,
1983 * and we are truly empty, wake the thread.
1986 else if ((ch
->ch_flag
& CH_EMPTY
) != 0 &&
1987 (ch
->ch_send
& RR_TX_BREAK
) == 0) {
1988 ch
->ch_flag
&= ~CH_EMPTY
;
1990 wake_up_interruptible(&ch
->ch_flag_wait
);
1996 * If a module select code is needed, drop one in. If space
1997 * was reserved for one, but none is needed, recover the space.
2000 if (mod
!= nd
->nd_tx_module
) {
2002 mbuf
[-1] = 0xf0 | mod
;
2003 nd
->nd_tx_module
= mod
;
2011 * Send a synchronization sequence associated with the last open
2012 * channel that sent data, and remember the time when the data was
2018 if ((send_sync
|| nd
->nd_seq_wait
[in
] != 0) && lastport
>= 0) {
2022 * Attempt the use the port that really wanted the sync.
2023 * This gets around a race condition where the "lastport" is in
2024 * the middle of the close() routine, and by the time we
2025 * send this command, it will have already acked the close, and
2026 * thus not send the sync response.
2028 if (wanted_sync_port
>= 0)
2029 lastport
= wanted_sync_port
;
2031 * Set a flag just in case the port is in the middle of a close,
2032 * it will not be permitted to actually close until we get an
2033 * sync response, and clear the flag there.
2035 ch
= nd
->nd_chan
+ lastport
;
2036 ch
->ch_flag
|= CH_WAITING_SYNC
;
2038 mod
= lastport
>> 4;
2040 if (mod
!= nd
->nd_tx_module
) {
2044 nd
->nd_tx_module
= mod
;
2047 bb
= set_cmd_header(bb
, lastport
, 12);
2050 nd
->nd_seq_size
[in
] = bb
- buf
;
2051 nd
->nd_seq_time
[in
] = jiffies
;
2053 if (++in
>= SEQ_MAX
)
2056 if (in
!= nd
->nd_seq_out
) {
2059 nd
->nd_unack
+= b
- buf
;
2064 * If there are no open ports, a sync cannot be sent.
2065 * There is nothing left to wait for anyway, so wake any
2066 * thread waiting for an acknowledgement.
2069 else if (nd
->nd_seq_wait
[in
] != 0) {
2070 nd
->nd_seq_wait
[in
] = 0;
2072 wake_up_interruptible(&nd
->nd_seq_wque
[in
]);
2076 * If there is no traffic for an interval of IDLE_MAX, then
2077 * send a single byte packet.
2081 nd
->nd_tx_time
= jiffies
;
2082 } else if ((ulong
)(jiffies
- nd
->nd_tx_time
) >= IDLE_MAX
) {
2083 *b
++ = 0xf0 | nd
->nd_tx_module
;
2084 nd
->nd_tx_time
= jiffies
;
2090 pr_info("%s - n(%i) >= tsafe(%i)\n",
2091 __func__
, n
, tsafe
);
2096 nd
->nd_tx_work
= work
;
2103 * Data to be sent TO the PortServer from the "async." half of the driver.
2105 static ssize_t
dgrp_net_read(struct file
*file
, char __user
*buf
, size_t count
,
2108 struct nd_struct
*nd
;
2115 * Get the node pointer, and quit if it doesn't exist.
2117 nd
= (struct nd_struct
*)(file
->private_data
);
2121 if (count
< UIO_MIN
)
2125 * Only one read/write operation may be in progress at
2130 * Grab the NET lock.
2132 down(&nd
->nd_net_semaphore
);
2134 nd
->nd_read_count
++;
2136 nd
->nd_tx_ready
= 0;
2139 * Determine the effective size of the buffer.
2142 if (nd
->nd_remain
> UIO_BASE
)
2143 pr_info_ratelimited("%s - nd_remain(%i) > UIO_BASE\n",
2144 __func__
, nd
->nd_remain
);
2146 b
= local_buf
= nd
->nd_iobuf
+ UIO_BASE
;
2149 * Generate data according to the node state.
2152 switch (nd
->nd_state
) {
2154 * Initialize the connection.
2159 dgrp_monitor_reset(nd
);
2162 * Request a Product ID Packet.
2169 nd
->nd_expect
|= NR_IDENT
;
2172 * Request a Server Capability ID Response.
2179 nd
->nd_expect
|= NR_CAPABILITY
;
2182 * Request a Server VPD Response.
2189 nd
->nd_expect
|= NR_VPD
;
2191 nd
->nd_state
= NS_WAIT_QUERY
;
2195 * We do serious communication with the server only in
2200 b
= dgrp_send(nd
, count
) + local_buf
;
2204 * Send off an error after receiving a bogus message
2209 n
= strlen(nd
->nd_error
);
2213 memcpy(b
+ 2, nd
->nd_error
, n
);
2218 * Set the active port count to zero.
2220 dgrp_chan_count(nd
, 0);
2230 nd
->nd_send_count
++;
2232 nd
->nd_tx_byte
+= n
+ nd
->nd_link
.lk_header_size
;
2233 nd
->nd_tx_charge
+= n
+ nd
->nd_link
.lk_header_size
;
2236 rtn
= copy_to_user((void __user
*)buf
, local_buf
, n
);
2247 dgrp_monitor_data(nd
, RPDUMP_CLIENT
, local_buf
, n
);
2250 * Release the NET lock.
2253 up(&nd
->nd_net_semaphore
);
2259 * dgrp_receive() -- decode data packets received from the remote PortServer.
2260 * @nd: pointer to a node structure
2262 static void dgrp_receive(struct nd_struct
*nd
)
2264 struct ch_struct
*ch
;
2281 nd
->nd_tx_time
= jiffies
;
2283 ID_TO_CHAR(nd
->nd_ID
, ID
);
2285 b
= buf
= nd
->nd_iobuf
;
2286 remain
= nd
->nd_remain
;
2289 * Loop to process Realport protocol packets.
2292 while (remain
> 0) {
2294 int n1
= b
[0] & 0x0f;
2297 port
= (nd
->nd_rx_module
<< 4) + n1
;
2299 if (port
>= nd
->nd_chan_count
) {
2300 error
= "Improper Port Number";
2304 ch
= nd
->nd_chan
+ port
;
2311 * Process by major packet type.
2317 * Process 1-byte header data packet.
2335 * Process 2-byte header data packet.
2349 * Process 3-byte header data packet.
2356 dlen
= get_unaligned_be16(b
+ 1);
2362 * Common packet handling code.
2369 * Otherwise data should appear only when we are
2370 * in the CS_READY state.
2373 if (ch
->ch_state
< CS_READY
) {
2374 error
= "Data received before RWIN established";
2379 * Assure that the data received is within the
2383 n
= (ch
->ch_s_rwin
- ch
->ch_s_rin
) & 0xffff;
2386 error
= "Receive data overrun";
2391 * If we received 3 or less characters,
2392 * assume it is a human typing, and set RTIME
2393 * to 10 milliseconds.
2395 * If we receive 10 or more characters,
2396 * assume its not a human typing, and set RTIME
2397 * to 100 milliseconds.
2400 if (ch
->ch_edelay
!= DGRP_RTIME
) {
2401 if (ch
->ch_rtime
!= ch
->ch_edelay
) {
2402 ch
->ch_rtime
= ch
->ch_edelay
;
2403 ch
->ch_flag
|= CH_PARAM
;
2405 } else if (dlen
<= 3) {
2406 if (ch
->ch_rtime
!= 10) {
2408 ch
->ch_flag
|= CH_PARAM
;
2411 if (ch
->ch_rtime
!= DGRP_RTIME
) {
2412 ch
->ch_rtime
= DGRP_RTIME
;
2413 ch
->ch_flag
|= CH_PARAM
;
2418 * If a portion of the packet is outside the
2419 * buffer, shorten the effective length of the
2420 * data packet to be the amount of data received.
2424 dlen
-= plen
- remain
;
2427 * Detect if receive flush is now complete.
2430 if ((ch
->ch_flag
& CH_RX_FLUSH
) != 0 &&
2431 ((ch
->ch_flush_seq
- nd
->nd_seq_out
) & SEQ_MASK
) >=
2432 ((nd
->nd_seq_in
- nd
->nd_seq_out
) & SEQ_MASK
)) {
2433 ch
->ch_flag
&= ~CH_RX_FLUSH
;
2437 * If we are ready to receive, move the data into
2438 * the receive buffer.
2441 ch
->ch_s_rin
= (ch
->ch_s_rin
+ dlen
) & 0xffff;
2443 if (ch
->ch_state
== CS_READY
&&
2444 (ch
->ch_tun
.un_open_count
!= 0) &&
2445 (ch
->ch_tun
.un_flag
& UN_CLOSING
) == 0 &&
2446 (ch
->ch_cflag
& CF_CREAD
) != 0 &&
2447 (ch
->ch_flag
& (CH_BAUD0
| CH_RX_FLUSH
)) == 0 &&
2448 (ch
->ch_send
& RR_RX_FLUSH
) == 0) {
2450 if (ch
->ch_rin
+ dlen
>= RBUF_MAX
) {
2451 n
= RBUF_MAX
- ch
->ch_rin
;
2453 memcpy(ch
->ch_rbuf
+ ch
->ch_rin
, dbuf
, n
);
2460 memcpy(ch
->ch_rbuf
+ ch
->ch_rin
, dbuf
, dlen
);
2466 * If we are not in fastcook mode, or
2467 * if there is a fastcook thread
2468 * waiting for data, send the data to
2469 * the line discipline.
2472 if ((ch
->ch_flag
& CH_FAST_READ
) == 0 ||
2473 ch
->ch_inwait
!= 0) {
2478 * If there is a read thread waiting
2479 * in select, and we are in fastcook
2480 * mode, wake him up.
2483 if (waitqueue_active(&ch
->ch_tun
.un_tty
->read_wait
) &&
2484 (ch
->ch_flag
& CH_FAST_READ
) != 0)
2485 wake_up_interruptible(&ch
->ch_tun
.un_tty
->read_wait
);
2488 * Wake any thread waiting in the
2492 if ((ch
->ch_flag
& CH_INPUT
) != 0) {
2493 ch
->ch_flag
&= ~CH_INPUT
;
2495 wake_up_interruptible(&ch
->ch_flag_wait
);
2500 * Fabricate and insert a data packet header to
2501 * preced the remaining data when it comes in.
2504 if (remain
< plen
) {
2505 dlen
= plen
- remain
;
2509 put_unaligned_be16(dlen
, b
+ 1);
2517 * Handle Window Sequence packets.
2528 ushort tpos
= get_unaligned_be16(b
+ 1);
2530 ushort ack
= (tpos
- ch
->ch_s_tpos
) & 0xffff;
2531 ushort unack
= (ch
->ch_s_tin
- ch
->ch_s_tpos
) & 0xffff;
2532 ushort notify
= (ch
->ch_s_treq
- ch
->ch_s_tpos
) & 0xffff;
2534 if (ch
->ch_state
< CS_READY
|| ack
> unack
) {
2535 error
= "Improper Window Sequence";
2539 ch
->ch_s_tpos
= tpos
;
2542 ch
->ch_s_treq
= tpos
;
2547 * Handle Command response packets.
2553 * RealPort engine fix - 03/11/2004
2555 * This check did not used to be here.
2557 * We were using b[1] without verifying that the data
2558 * is actually there and valid. On a split packet, it
2561 * NOTE: I have never actually seen the failure happen
2562 * under Linux, but since I have seen it occur
2563 * under both Solaris and HP-UX, the assumption
2564 * is that it *could* happen here as well...
2573 * Handle Open Response.
2586 port
= get_unaligned_be16(b
+ 4);
2588 if (port
>= nd
->nd_chan_count
) {
2589 error
= "Open channel number out of range";
2593 ch
= nd
->nd_chan
+ port
;
2596 * How we handle an open response depends primarily
2597 * on our current channel state.
2600 switch (ch
->ch_state
) {
2604 * Handle a delayed open.
2607 if (ch
->ch_otype_waiting
!= 0 &&
2608 req
== ch
->ch_otype_waiting
&&
2611 ch
->ch_otype_waiting
= 0;
2612 ch
->ch_state
= CS_SEND_QUERY
;
2620 * Handle the open response.
2623 if (req
== ch
->ch_otype
) {
2627 * On successful response, open the
2628 * port and proceed normally.
2632 ch
->ch_state
= CS_SEND_QUERY
;
2636 * On a busy response to a persistent open,
2637 * remember that the open is pending.
2642 if (req
!= OTYPE_IMMEDIATE
) {
2643 ch
->ch_otype_waiting
= req
;
2644 ch
->ch_state
= CS_IDLE
;
2649 * Otherwise the server open failed. If
2650 * the Unix port is open, hang it up.
2654 if (ch
->ch_open_count
!= 0) {
2655 ch
->ch_flag
|= CH_HANGUP
;
2657 ch
->ch_state
= CS_IDLE
;
2661 ch
->ch_open_error
= resp
;
2662 ch
->ch_state
= CS_IDLE
;
2664 wake_up_interruptible(&ch
->ch_flag_wait
);
2670 * Handle delayed response arrival preceding
2671 * the open response we are waiting for.
2674 if (ch
->ch_otype_waiting
!= 0 &&
2675 req
== ch
->ch_otype_waiting
&&
2677 ch
->ch_otype
= ch
->ch_otype_waiting
;
2678 ch
->ch_otype_waiting
= 0;
2679 ch
->ch_state
= CS_WAIT_FAIL
;
2688 * Handle response to immediate open arriving
2689 * after a delayed open success.
2692 if (req
== OTYPE_IMMEDIATE
) {
2693 ch
->ch_state
= CS_SEND_QUERY
;
2699 case CS_WAIT_CANCEL
:
2701 * Handle delayed open response arriving before
2702 * the cancel response.
2705 if (req
== ch
->ch_otype_waiting
&&
2707 ch
->ch_otype_waiting
= 0;
2712 * Handle cancel response.
2715 if (req
== 4 && resp
== 0) {
2716 ch
->ch_otype_waiting
= 0;
2717 ch
->ch_state
= CS_IDLE
;
2725 * Handle a successful response to a port
2730 ch
->ch_state
= CS_IDLE
;
2738 error
= "Improper Open Response";
2746 * Handle Synchronize Response.
2758 * If channel was waiting for this sync response,
2759 * unset the flag, and wake up anyone waiting
2762 if (ch
->ch_flag
& CH_WAITING_SYNC
) {
2763 ch
->ch_flag
&= ~(CH_WAITING_SYNC
);
2764 wake_up_interruptible(&ch
->ch_flag_wait
);
2767 if (((seq
- nd
->nd_seq_out
) & SEQ_MASK
) >=
2768 ((nd
->nd_seq_in
- nd
->nd_seq_out
) & SEQ_MASK
)) {
2772 for (s
= nd
->nd_seq_out
;; s
= (s
+ 1) & SEQ_MASK
) {
2773 if (nd
->nd_seq_wait
[s
] != 0) {
2774 nd
->nd_seq_wait
[s
] = 0;
2776 wake_up_interruptible(&nd
->nd_seq_wque
[s
]);
2779 nd
->nd_unack
-= nd
->nd_seq_size
[s
];
2785 nd
->nd_seq_out
= (seq
+ 1) & SEQ_MASK
;
2790 * Handle Sequence Response.
2799 /* Record that we have received the Sequence
2800 * Response, but we aren't interested in the
2801 * sequence numbers. We were using RIN like it
2802 * was ROUT and that was causing problems,
2803 * fixed 7-13-2001 David Fries. See comment in
2804 * drp.h for ch_s_rin variable.
2805 int rin = get_unaligned_be16(b + 2);
2806 int tpos = get_unaligned_be16(b + 4);
2809 ch
->ch_send
&= ~RR_SEQUENCE
;
2810 ch
->ch_expect
&= ~RR_SEQUENCE
;
2815 * Handle Status Response.
2824 ch
->ch_s_elast
= get_unaligned_be16(b
+ 2);
2825 ch
->ch_s_mlast
= b
[4];
2827 ch
->ch_expect
&= ~RR_STATUS
;
2828 ch
->ch_send
&= ~RR_STATUS
;
2831 * CH_PHYS_CD is cleared because something _could_ be
2832 * waiting for the initial sense of carrier... and if
2833 * carrier is high immediately, we want to be sure to
2834 * wake them as soon as possible.
2836 ch
->ch_flag
&= ~CH_PHYS_CD
;
2843 * Handle Line Error Response.
2854 * Handle Buffer Response.
2863 ch
->ch_s_rsize
= get_unaligned_be16(b
+ 2);
2864 ch
->ch_s_tsize
= get_unaligned_be16(b
+ 4);
2866 ch
->ch_send
&= ~RR_BUFFER
;
2867 ch
->ch_expect
&= ~RR_BUFFER
;
2872 * Handle Port Capability Response.
2881 ch
->ch_send
&= ~RR_CAPABILITY
;
2882 ch
->ch_expect
&= ~RR_CAPABILITY
;
2886 * When all queries are complete, set those parameters
2887 * derived from the query results, then transition
2888 * to the READY state.
2892 if (ch
->ch_state
== CS_WAIT_QUERY
&&
2893 (ch
->ch_expect
& (RR_SEQUENCE
|
2896 RR_CAPABILITY
)) == 0) {
2897 ch
->ch_tmax
= ch
->ch_s_tsize
/ 4;
2899 if (ch
->ch_edelay
== DGRP_TTIME
)
2900 ch
->ch_ttime
= DGRP_TTIME
;
2902 ch
->ch_ttime
= ch
->ch_edelay
;
2904 ch
->ch_rmax
= ch
->ch_s_rsize
/ 4;
2906 if (ch
->ch_edelay
== DGRP_RTIME
)
2907 ch
->ch_rtime
= DGRP_RTIME
;
2909 ch
->ch_rtime
= ch
->ch_edelay
;
2911 ch
->ch_rlow
= 2 * ch
->ch_s_rsize
/ 8;
2912 ch
->ch_rhigh
= 6 * ch
->ch_s_rsize
/ 8;
2914 ch
->ch_state
= CS_READY
;
2917 wake_up_interruptible(&ch
->ch_flag_wait
);
2936 mlast
= ch
->ch_s_mlast
;
2937 elast
= ch
->ch_s_elast
;
2939 mstat
= ch
->ch_s_mlast
= b
[1];
2940 estat
= ch
->ch_s_elast
= get_unaligned_be16(b
+ 2);
2943 * Handle modem changes.
2946 if (((mstat
^ mlast
) & DM_CD
) != 0)
2951 * Handle received break.
2954 if ((estat
& ~elast
& EV_RXB
) != 0 &&
2955 (ch
->ch_tun
.un_open_count
!= 0) &&
2956 I_BRKINT(ch
->ch_tun
.un_tty
) &&
2957 !(I_IGNBRK(ch
->ch_tun
.un_tty
))) {
2959 tty_buffer_request_room(ch
->ch_tun
.un_tty
, 1);
2960 tty_insert_flip_char(ch
->ch_tun
.un_tty
, 0, TTY_BREAK
);
2961 tty_flip_buffer_push(ch
->ch_tun
.un_tty
);
2966 * On transmit break complete, if more break traffic
2967 * is waiting then send it. Otherwise wake any threads
2968 * waiting for transmitter empty.
2971 if ((~estat
& elast
& EV_TXB
) != 0 &&
2972 (ch
->ch_expect
& RR_TX_BREAK
) != 0) {
2976 ch
->ch_expect
&= ~RR_TX_BREAK
;
2978 if (ch
->ch_break_time
!= 0) {
2979 ch
->ch_send
|= RR_TX_BREAK
;
2981 ch
->ch_send
&= ~RR_TX_BREAK
;
2982 ch
->ch_flag
&= ~CH_TX_BREAK
;
2983 wake_up_interruptible(&ch
->ch_flag_wait
);
2990 error
= "Unrecognized command";
2994 * Decode Special Codes.
3000 * One byte module select.
3012 nd
->nd_rx_module
= n1
;
3016 * Two byte module select.
3024 nd
->nd_rx_module
= b
[1];
3028 * ID Request packet.
3035 plen
= get_unaligned_be16(b
+ 2);
3037 if (plen
< 12 || plen
> 1000) {
3038 error
= "Response Packet length error";
3050 nd
->nd_send
|= NR_ECHO
;
3054 * ID Response packet.
3058 nd
->nd_send
|= NR_IDENT
;
3062 * ID Response packet.
3066 nd
->nd_send
|= NR_PASSWORD
;
3073 * Various node-level response packets.
3080 plen
= get_unaligned_be16(b
+ 2);
3082 if (plen
< 4 || plen
> 1000) {
3083 error
= "Response Packet length error";
3095 nd
->nd_expect
&= ~NR_ECHO
;
3099 * Product Response Packet.
3106 nd
->nd_hw_ver
= (b
[8] << 8) | b
[9];
3107 nd
->nd_sw_ver
= (b
[10] << 8) | b
[11];
3108 nd
->nd_hw_id
= b
[6];
3109 desclen
= ((plen
- 12) > MAX_DESC_LEN
) ? MAX_DESC_LEN
:
3113 error
= "Response Packet desclen error";
3117 strncpy(nd
->nd_ps_desc
, b
+ 12, desclen
);
3118 nd
->nd_ps_desc
[desclen
] = 0;
3121 nd
->nd_expect
&= ~NR_IDENT
;
3125 * Capability Response Packet.
3130 int nn
= get_unaligned_be16(b
+ 4);
3135 dgrp_chan_count(nd
, nn
);
3138 nd
->nd_expect
&= ~NR_CAPABILITY
;
3142 * VPD Response Packet.
3147 * NOTE: case 15 is here ONLY because the EtherLite
3148 * is broken, and sends a response to 24 back as 15.
3149 * To resolve this, the EtherLite firmware is now
3150 * fixed to send back 24 correctly, but, for backwards
3151 * compatibility, we now have reserved 15 for the
3152 * bad EtherLite response to 24 as well.
3160 * If the product doesn't support VPD,
3161 * it will send back a null IDRESP,
3162 * which is a length of 4 bytes.
3165 memcpy(nd
->nd_vpd
, b
+ 4, min(plen
- 4, (long) VPDSIZE
));
3166 nd
->nd_vpd_len
= min(plen
- 4, (long) VPDSIZE
);
3169 nd
->nd_expect
&= ~NR_VPD
;
3176 if (nd
->nd_expect
== 0 &&
3177 nd
->nd_state
== NS_WAIT_QUERY
) {
3178 nd
->nd_state
= NS_READY
;
3190 plen
= get_unaligned_be16(b
+ 2) + 4;
3193 error
= "Debug Packet too large";
3202 * Handle reset packet.
3221 error
= "Client Reset Acknowledge";
3238 * When the buffer is exhausted, copy any data left at the
3239 * top of the buffer back down to the bottom for the next
3244 if (remain
> 0 && b
!= buf
)
3245 memcpy(buf
, b
, remain
);
3247 nd
->nd_remain
= remain
;
3251 * Handle a decode error.
3255 error
= "Protocol decode error";
3258 * Handle a general protocol error.
3263 nd
->nd_state
= NS_SEND_ERROR
;
3264 nd
->nd_error
= error
;
3268 * dgrp_net_write() -- write data to the network device.
3270 * A zero byte write indicates that the connection to the RealPort
3271 * device has been broken.
3273 * A non-zero write indicates data from the RealPort device.
3275 static ssize_t
dgrp_net_write(struct file
*file
, const char __user
*buf
,
3276 size_t count
, loff_t
*ppos
)
3278 struct nd_struct
*nd
;
3284 * Get the node pointer, and quit if it doesn't exist.
3286 nd
= (struct nd_struct
*)(file
->private_data
);
3291 * Grab the NET lock.
3293 down(&nd
->nd_net_semaphore
);
3295 nd
->nd_write_count
++;
3298 * Handle disconnect.
3304 * Set the active port count to zero.
3306 dgrp_chan_count(nd
, 0);
3311 * Loop to process entire receive packet.
3315 n
= UIO_MAX
- nd
->nd_remain
;
3320 nd
->nd_rx_byte
+= n
+ nd
->nd_link
.lk_header_size
;
3322 rtn
= copy_from_user(nd
->nd_iobuf
+ nd
->nd_remain
,
3323 (void __user
*) buf
+ total
, n
);
3336 dgrp_monitor_data(nd
, RPDUMP_SERVER
,
3337 nd
->nd_iobuf
+ nd
->nd_remain
, n
);
3348 * Release the NET lock.
3350 up(&nd
->nd_net_semaphore
);
3358 * Determine whether a device is ready to be read or written to, and
3361 static unsigned int dgrp_net_select(struct file
*file
,
3362 struct poll_table_struct
*table
)
3364 unsigned int retval
= 0;
3365 struct nd_struct
*nd
= file
->private_data
;
3367 poll_wait(file
, &nd
->nd_tx_waitq
, table
);
3369 if (nd
->nd_tx_ready
)
3370 retval
|= POLLIN
| POLLRDNORM
; /* Conditionally readable */
3372 retval
|= POLLOUT
| POLLWRNORM
; /* Always writeable */
3380 * Implement those functions which allow the network daemon to control
3381 * the network parameters in the driver. The ioctls include ones to
3382 * get and set the link speed parameters for the PortServer.
3384 static long dgrp_net_ioctl(struct file
*file
, unsigned int cmd
,
3387 struct nd_struct
*nd
;
3389 long size
= _IOC_SIZE(cmd
);
3390 struct link_struct link
;
3392 nd
= file
->private_data
;
3394 if (_IOC_DIR(cmd
) & _IOC_READ
)
3395 rtn
= access_ok(VERIFY_WRITE
, (void __user
*) arg
, size
);
3396 else if (_IOC_DIR(cmd
) & _IOC_WRITE
)
3397 rtn
= access_ok(VERIFY_READ
, (void __user
*) arg
, size
);
3404 if (size
!= sizeof(struct link_struct
))
3407 if (copy_from_user((void *)(&link
), (void __user
*) arg
, size
))
3410 if (link
.lk_fast_rate
< 9600)
3411 link
.lk_fast_rate
= 9600;
3413 if (link
.lk_slow_rate
< 2400)
3414 link
.lk_slow_rate
= 2400;
3416 if (link
.lk_fast_rate
> 10000000)
3417 link
.lk_fast_rate
= 10000000;
3419 if (link
.lk_slow_rate
> link
.lk_fast_rate
)
3420 link
.lk_slow_rate
= link
.lk_fast_rate
;
3422 if (link
.lk_fast_delay
> 2000)
3423 link
.lk_fast_delay
= 2000;
3425 if (link
.lk_slow_delay
> 10000)
3426 link
.lk_slow_delay
= 10000;
3428 if (link
.lk_fast_delay
< 60)
3429 link
.lk_fast_delay
= 60;
3431 if (link
.lk_slow_delay
< link
.lk_fast_delay
)
3432 link
.lk_slow_delay
= link
.lk_fast_delay
;
3434 if (link
.lk_header_size
< 2)
3435 link
.lk_header_size
= 2;
3437 if (link
.lk_header_size
> 128)
3438 link
.lk_header_size
= 128;
3440 link
.lk_fast_rate
/= 8 * 1000 / dgrp_poll_tick
;
3441 link
.lk_slow_rate
/= 8 * 1000 / dgrp_poll_tick
;
3443 link
.lk_fast_delay
/= dgrp_poll_tick
;
3444 link
.lk_slow_delay
/= dgrp_poll_tick
;
3451 if (size
!= sizeof(struct link_struct
))
3454 if (copy_to_user((void __user
*)arg
, (void *)(&nd
->nd_link
),
3469 * dgrp_poll_handler() -- handler for poll timer
3471 * As each timer expires, it determines (a) whether the "transmit"
3472 * waiter needs to be woken up, and (b) whether the poller needs to
3475 void dgrp_poll_handler(unsigned long arg
)
3477 struct dgrp_poll_data
*poll_data
;
3478 struct nd_struct
*nd
;
3479 struct link_struct
*lk
;
3485 poll_data
= (struct dgrp_poll_data
*) arg
;
3486 freq
= 1000 / poll_data
->poll_tick
;
3487 poll_data
->poll_round
+= 17;
3489 if (poll_data
->poll_round
>= freq
)
3490 poll_data
->poll_round
-= freq
;
3493 * Loop to process all open nodes.
3495 * For each node, determine the rate at which it should
3496 * be transmitting data. Then if the node should wake up
3497 * and transmit data now, enable the net receive select
3498 * to get the transmit going.
3501 list_for_each_entry(nd
, &nd_struct_list
, list
) {
3506 * Decrement statistics. These are only for use with
3507 * KME, so don't worry that the operations are done
3508 * unlocked, and so the results are occasionally wrong.
3511 nd
->nd_read_count
-= (nd
->nd_read_count
+
3512 poll_data
->poll_round
) / freq
;
3513 nd
->nd_write_count
-= (nd
->nd_write_count
+
3514 poll_data
->poll_round
) / freq
;
3515 nd
->nd_send_count
-= (nd
->nd_send_count
+
3516 poll_data
->poll_round
) / freq
;
3517 nd
->nd_tx_byte
-= (nd
->nd_tx_byte
+
3518 poll_data
->poll_round
) / freq
;
3519 nd
->nd_rx_byte
-= (nd
->nd_rx_byte
+
3520 poll_data
->poll_round
) / freq
;
3523 * Wake the daemon to transmit data only when there is
3524 * enough byte credit to send data.
3526 * The results are approximate because the operations
3527 * are performed unlocked, and we are inspecting
3528 * data asynchronously updated elsewhere. The whole
3529 * thing is just approximation anyway, so that should
3533 if (lk
->lk_slow_rate
>= UIO_MAX
) {
3536 nd
->nd_rate
= UIO_MAX
;
3538 nd
->nd_tx_deposit
= nd
->nd_tx_charge
+ 3 * UIO_MAX
;
3539 nd
->nd_tx_credit
= 3 * UIO_MAX
;
3550 long seq_in
= nd
->nd_seq_in
;
3551 long seq_out
= nd
->nd_seq_out
;
3554 * If there are no outstanding packets, run at the
3558 if (seq_in
== seq_out
) {
3560 rate
= lk
->lk_fast_rate
;
3564 * Otherwise compute the transmit rate based on the
3565 * delay since the oldest packet.
3570 * The actual delay is computed as the
3571 * time since the oldest unacknowledged
3572 * packet was sent, minus the time it
3573 * took to send that packet to the server.
3576 delay
= ((jiffies
- nd
->nd_seq_time
[seq_out
])
3577 - (nd
->nd_seq_size
[seq_out
] /
3581 * If the delay is less than the "fast"
3582 * delay, transmit full speed. If greater
3583 * than the "slow" delay, transmit at the
3584 * "slow" speed. In between, interpolate
3585 * between the fast and slow speeds.
3589 (delay
<= lk
->lk_fast_delay
?
3591 delay
>= lk
->lk_slow_delay
?
3594 (lk
->lk_slow_delay
- delay
) *
3595 (lk
->lk_fast_rate
- lk
->lk_slow_rate
) /
3596 (lk
->lk_slow_delay
- lk
->lk_fast_delay
)
3601 nd
->nd_delay
= delay
;
3605 * Increase the transmit credit by depositing the
3606 * current transmit rate.
3609 deposit
= nd
->nd_tx_deposit
;
3610 charge
= nd
->nd_tx_charge
;
3615 * If the available transmit credit becomes too large,
3616 * reduce the deposit to correct the value.
3618 * Too large is the max of:
3619 * 6 times the header size
3620 * 3 times the current transmit rate.
3623 size
= 2 * nd
->nd_link
.lk_header_size
;
3630 excess
= deposit
- charge
- size
;
3635 nd
->nd_tx_deposit
= deposit
;
3636 nd
->nd_tx_credit
= deposit
- charge
;
3639 * Wake the transmit task only if the transmit credit
3640 * is at least 3 times the transmit header size.
3643 size
= 3 * lk
->lk_header_size
;
3645 if (nd
->nd_tx_credit
< size
)
3651 * Enable the READ select to wake the daemon if there
3652 * is useful work for the drp_read routine to perform.
3655 if (waitqueue_active(&nd
->nd_tx_waitq
) &&
3656 (nd
->nd_tx_work
!= 0 ||
3657 (ulong
)(jiffies
- nd
->nd_tx_time
) >= IDLE_MAX
)) {
3658 nd
->nd_tx_ready
= 1;
3660 wake_up_interruptible(&nd
->nd_tx_waitq
);
3663 /* nd->nd_flag &= ~ND_SELECT; */
3669 * Schedule ourself back at the nominal wakeup interval.
3671 spin_lock_irqsave(&poll_data
->poll_lock
, lock_flags
);
3673 poll_data
->node_active_count
--;
3674 if (poll_data
->node_active_count
> 0) {
3675 poll_data
->node_active_count
++;
3676 poll_time
= poll_data
->timer
.expires
+
3677 poll_data
->poll_tick
* HZ
/ 1000;
3679 time
= poll_time
- jiffies
;
3681 if (time
>= 2 * poll_data
->poll_tick
)
3682 poll_time
= jiffies
+ dgrp_poll_tick
* HZ
/ 1000;
3684 poll_data
->timer
.expires
= poll_time
;
3685 add_timer(&poll_data
->timer
);
3688 spin_unlock_irqrestore(&poll_data
->poll_lock
, lock_flags
);