2 * linux/fs/ncpfs/sock.c
4 * Copyright (C) 1992, 1993 Rick Sladkey
6 * Modified 1995, 1996 by Volker Lendecke to be usable for ncp
7 * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
12 #include <linux/time.h>
13 #include <linux/errno.h>
14 #include <linux/socket.h>
15 #include <linux/fcntl.h>
16 #include <linux/stat.h>
17 #include <linux/string.h>
18 #include <asm/uaccess.h>
20 #include <linux/net.h>
22 #include <linux/netdevice.h>
23 #include <linux/signal.h>
26 #include <linux/ipx.h>
27 #include <linux/poll.h>
28 #include <linux/file.h>
30 #include <linux/ncp_fs.h>
32 #include "ncpsign_kernel.h"
34 static int _recv(struct socket
*sock
, void *buf
, int size
, unsigned flags
)
36 struct msghdr msg
= {NULL
, };
37 struct kvec iov
= {buf
, size
};
38 return kernel_recvmsg(sock
, &msg
, &iov
, 1, size
, flags
);
41 static inline int do_send(struct socket
*sock
, struct kvec
*vec
, int count
,
42 int len
, unsigned flags
)
44 struct msghdr msg
= { .msg_flags
= flags
};
45 return kernel_sendmsg(sock
, &msg
, vec
, count
, len
);
48 static int _send(struct socket
*sock
, const void *buff
, int len
)
51 vec
.iov_base
= (void *) buff
;
53 return do_send(sock
, &vec
, 1, len
, 0);
56 struct ncp_request_reply
{
60 unsigned char* reply_buf
;
63 enum { RQ_DONE
, RQ_INPROGRESS
, RQ_QUEUED
, RQ_IDLE
, RQ_ABANDONED
} status
;
67 struct kvec tx_iov
[3];
72 static inline struct ncp_request_reply
* ncp_alloc_req(void)
74 struct ncp_request_reply
*req
;
76 req
= kmalloc(sizeof(struct ncp_request_reply
), GFP_KERNEL
);
80 init_waitqueue_head(&req
->wq
);
81 atomic_set(&req
->refs
, (1));
82 req
->status
= RQ_IDLE
;
87 static void ncp_req_get(struct ncp_request_reply
*req
)
89 atomic_inc(&req
->refs
);
92 static void ncp_req_put(struct ncp_request_reply
*req
)
94 if (atomic_dec_and_test(&req
->refs
))
98 void ncp_tcp_data_ready(struct sock
*sk
, int len
)
100 struct ncp_server
*server
= sk
->sk_user_data
;
102 server
->data_ready(sk
, len
);
103 schedule_work(&server
->rcv
.tq
);
106 void ncp_tcp_error_report(struct sock
*sk
)
108 struct ncp_server
*server
= sk
->sk_user_data
;
110 server
->error_report(sk
);
111 schedule_work(&server
->rcv
.tq
);
114 void ncp_tcp_write_space(struct sock
*sk
)
116 struct ncp_server
*server
= sk
->sk_user_data
;
118 /* We do not need any locking: we first set tx.creq, and then we do sendmsg,
120 server
->write_space(sk
);
122 schedule_work(&server
->tx
.tq
);
125 void ncpdgram_timeout_call(unsigned long v
)
127 struct ncp_server
*server
= (void*)v
;
129 schedule_work(&server
->timeout_tq
);
132 static inline void ncp_finish_request(struct ncp_server
*server
, struct ncp_request_reply
*req
, int result
)
134 req
->result
= result
;
135 if (req
->status
!= RQ_ABANDONED
)
136 memcpy(req
->reply_buf
, server
->rxbuf
, req
->datalen
);
137 req
->status
= RQ_DONE
;
138 wake_up_all(&req
->wq
);
142 static void __abort_ncp_connection(struct ncp_server
*server
)
144 struct ncp_request_reply
*req
;
146 ncp_invalidate_conn(server
);
147 del_timer(&server
->timeout_tm
);
148 while (!list_empty(&server
->tx
.requests
)) {
149 req
= list_entry(server
->tx
.requests
.next
, struct ncp_request_reply
, req
);
151 list_del_init(&req
->req
);
152 ncp_finish_request(server
, req
, -EIO
);
154 req
= server
->rcv
.creq
;
156 server
->rcv
.creq
= NULL
;
157 ncp_finish_request(server
, req
, -EIO
);
158 server
->rcv
.ptr
= NULL
;
159 server
->rcv
.state
= 0;
161 req
= server
->tx
.creq
;
163 server
->tx
.creq
= NULL
;
164 ncp_finish_request(server
, req
, -EIO
);
168 static inline int get_conn_number(struct ncp_reply_header
*rp
)
170 return rp
->conn_low
| (rp
->conn_high
<< 8);
173 static inline void __ncp_abort_request(struct ncp_server
*server
, struct ncp_request_reply
*req
, int err
)
175 /* If req is done, we got signal, but we also received answer... */
176 switch (req
->status
) {
181 list_del_init(&req
->req
);
182 ncp_finish_request(server
, req
, err
);
185 req
->status
= RQ_ABANDONED
;
192 static inline void ncp_abort_request(struct ncp_server
*server
, struct ncp_request_reply
*req
, int err
)
194 mutex_lock(&server
->rcv
.creq_mutex
);
195 __ncp_abort_request(server
, req
, err
);
196 mutex_unlock(&server
->rcv
.creq_mutex
);
199 static inline void __ncptcp_abort(struct ncp_server
*server
)
201 __abort_ncp_connection(server
);
204 static int ncpdgram_send(struct socket
*sock
, struct ncp_request_reply
*req
)
207 /* sock_sendmsg updates iov pointers for us :-( */
208 memcpy(vec
, req
->tx_ciov
, req
->tx_iovlen
* sizeof(vec
[0]));
209 return do_send(sock
, vec
, req
->tx_iovlen
,
210 req
->tx_totallen
, MSG_DONTWAIT
);
213 static void __ncptcp_try_send(struct ncp_server
*server
)
215 struct ncp_request_reply
*rq
;
220 rq
= server
->tx
.creq
;
224 /* sock_sendmsg updates iov pointers for us :-( */
225 memcpy(iovc
, rq
->tx_ciov
, rq
->tx_iovlen
* sizeof(iov
[0]));
226 result
= do_send(server
->ncp_sock
, iovc
, rq
->tx_iovlen
,
227 rq
->tx_totallen
, MSG_NOSIGNAL
| MSG_DONTWAIT
);
229 if (result
== -EAGAIN
)
233 printk(KERN_ERR
"ncpfs: tcp: Send failed: %d\n", result
);
234 __ncp_abort_request(server
, rq
, result
);
237 if (result
>= rq
->tx_totallen
) {
238 server
->rcv
.creq
= rq
;
239 server
->tx
.creq
= NULL
;
242 rq
->tx_totallen
-= result
;
244 while (iov
->iov_len
<= result
) {
245 result
-= iov
->iov_len
;
249 iov
->iov_base
+= result
;
250 iov
->iov_len
-= result
;
254 static inline void ncp_init_header(struct ncp_server
*server
, struct ncp_request_reply
*req
, struct ncp_request_header
*h
)
256 req
->status
= RQ_INPROGRESS
;
257 h
->conn_low
= server
->connection
;
258 h
->conn_high
= server
->connection
>> 8;
259 h
->sequence
= ++server
->sequence
;
262 static void ncpdgram_start_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
265 struct ncp_request_header
* h
;
267 req
->tx_ciov
= req
->tx_iov
+ 1;
269 h
= req
->tx_iov
[1].iov_base
;
270 ncp_init_header(server
, req
, h
);
271 signlen
= sign_packet(server
, req
->tx_iov
[1].iov_base
+ sizeof(struct ncp_request_header
) - 1,
272 req
->tx_iov
[1].iov_len
- sizeof(struct ncp_request_header
) + 1,
273 cpu_to_le32(req
->tx_totallen
), req
->sign
);
275 req
->tx_ciov
[1].iov_base
= req
->sign
;
276 req
->tx_ciov
[1].iov_len
= signlen
;
278 req
->tx_totallen
+= signlen
;
280 server
->rcv
.creq
= req
;
281 server
->timeout_last
= server
->m
.time_out
;
282 server
->timeout_retries
= server
->m
.retry_count
;
283 ncpdgram_send(server
->ncp_sock
, req
);
284 mod_timer(&server
->timeout_tm
, jiffies
+ server
->m
.time_out
);
287 #define NCP_TCP_XMIT_MAGIC (0x446D6454)
288 #define NCP_TCP_XMIT_VERSION (1)
289 #define NCP_TCP_RCVD_MAGIC (0x744E6350)
291 static void ncptcp_start_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
294 struct ncp_request_header
* h
;
296 req
->tx_ciov
= req
->tx_iov
;
297 h
= req
->tx_iov
[1].iov_base
;
298 ncp_init_header(server
, req
, h
);
299 signlen
= sign_packet(server
, req
->tx_iov
[1].iov_base
+ sizeof(struct ncp_request_header
) - 1,
300 req
->tx_iov
[1].iov_len
- sizeof(struct ncp_request_header
) + 1,
301 cpu_to_be32(req
->tx_totallen
+ 24), req
->sign
+ 4) + 16;
303 req
->sign
[0] = htonl(NCP_TCP_XMIT_MAGIC
);
304 req
->sign
[1] = htonl(req
->tx_totallen
+ signlen
);
305 req
->sign
[2] = htonl(NCP_TCP_XMIT_VERSION
);
306 req
->sign
[3] = htonl(req
->datalen
+ 8);
307 req
->tx_iov
[0].iov_base
= req
->sign
;
308 req
->tx_iov
[0].iov_len
= signlen
;
310 req
->tx_totallen
+= signlen
;
312 server
->tx
.creq
= req
;
313 __ncptcp_try_send(server
);
316 static inline void __ncp_start_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
318 /* we copy the data so that we do not depend on the caller
320 memcpy(server
->txbuf
, req
->tx_iov
[1].iov_base
, req
->tx_iov
[1].iov_len
);
321 req
->tx_iov
[1].iov_base
= server
->txbuf
;
323 if (server
->ncp_sock
->type
== SOCK_STREAM
)
324 ncptcp_start_request(server
, req
);
326 ncpdgram_start_request(server
, req
);
329 static int ncp_add_request(struct ncp_server
*server
, struct ncp_request_reply
*req
)
331 mutex_lock(&server
->rcv
.creq_mutex
);
332 if (!ncp_conn_valid(server
)) {
333 mutex_unlock(&server
->rcv
.creq_mutex
);
334 printk(KERN_ERR
"ncpfs: tcp: Server died\n");
338 if (server
->tx
.creq
|| server
->rcv
.creq
) {
339 req
->status
= RQ_QUEUED
;
340 list_add_tail(&req
->req
, &server
->tx
.requests
);
341 mutex_unlock(&server
->rcv
.creq_mutex
);
344 __ncp_start_request(server
, req
);
345 mutex_unlock(&server
->rcv
.creq_mutex
);
349 static void __ncp_next_request(struct ncp_server
*server
)
351 struct ncp_request_reply
*req
;
353 server
->rcv
.creq
= NULL
;
354 if (list_empty(&server
->tx
.requests
)) {
357 req
= list_entry(server
->tx
.requests
.next
, struct ncp_request_reply
, req
);
358 list_del_init(&req
->req
);
359 __ncp_start_request(server
, req
);
362 static void info_server(struct ncp_server
*server
, unsigned int id
, const void * data
, size_t len
)
364 if (server
->info_sock
) {
368 hdr
[0] = cpu_to_be32(len
+ 8);
369 hdr
[1] = cpu_to_be32(id
);
371 iov
[0].iov_base
= hdr
;
373 iov
[1].iov_base
= (void *) data
;
374 iov
[1].iov_len
= len
;
376 do_send(server
->info_sock
, iov
, 2, len
+ 8, MSG_NOSIGNAL
);
380 void ncpdgram_rcv_proc(struct work_struct
*work
)
382 struct ncp_server
*server
=
383 container_of(work
, struct ncp_server
, rcv
.tq
);
386 sock
= server
->ncp_sock
;
389 struct ncp_reply_header reply
;
392 result
= _recv(sock
, &reply
, sizeof(reply
), MSG_PEEK
| MSG_DONTWAIT
);
396 if (result
>= sizeof(reply
)) {
397 struct ncp_request_reply
*req
;
399 if (reply
.type
== NCP_WATCHDOG
) {
400 unsigned char buf
[10];
402 if (server
->connection
!= get_conn_number(&reply
)) {
405 result
= _recv(sock
, buf
, sizeof(buf
), MSG_DONTWAIT
);
407 DPRINTK("recv failed with %d\n", result
);
411 DPRINTK("too short (%u) watchdog packet\n", result
);
415 DPRINTK("bad signature (%02X) in watchdog packet\n", buf
[9]);
419 _send(sock
, buf
, sizeof(buf
));
422 if (reply
.type
!= NCP_POSITIVE_ACK
&& reply
.type
!= NCP_REPLY
) {
423 result
= _recv(sock
, server
->unexpected_packet
.data
, sizeof(server
->unexpected_packet
.data
), MSG_DONTWAIT
);
427 info_server(server
, 0, server
->unexpected_packet
.data
, result
);
430 mutex_lock(&server
->rcv
.creq_mutex
);
431 req
= server
->rcv
.creq
;
432 if (req
&& (req
->tx_type
== NCP_ALLOC_SLOT_REQUEST
|| (server
->sequence
== reply
.sequence
&&
433 server
->connection
== get_conn_number(&reply
)))) {
434 if (reply
.type
== NCP_POSITIVE_ACK
) {
435 server
->timeout_retries
= server
->m
.retry_count
;
436 server
->timeout_last
= NCP_MAX_RPC_TIMEOUT
;
437 mod_timer(&server
->timeout_tm
, jiffies
+ NCP_MAX_RPC_TIMEOUT
);
438 } else if (reply
.type
== NCP_REPLY
) {
439 result
= _recv(sock
, server
->rxbuf
, req
->datalen
, MSG_DONTWAIT
);
440 #ifdef CONFIG_NCPFS_PACKET_SIGNING
441 if (result
>= 0 && server
->sign_active
&& req
->tx_type
!= NCP_DEALLOC_SLOT_REQUEST
) {
442 if (result
< 8 + 8) {
448 hdrl
= sock
->sk
->sk_family
== AF_INET
? 8 : 6;
449 if (sign_verify_reply(server
, server
->rxbuf
+ hdrl
, result
- hdrl
, cpu_to_le32(result
), server
->rxbuf
+ result
)) {
450 printk(KERN_INFO
"ncpfs: Signature violation\n");
456 del_timer(&server
->timeout_tm
);
457 server
->rcv
.creq
= NULL
;
458 ncp_finish_request(server
, req
, result
);
459 __ncp_next_request(server
);
460 mutex_unlock(&server
->rcv
.creq_mutex
);
464 mutex_unlock(&server
->rcv
.creq_mutex
);
467 _recv(sock
, &reply
, sizeof(reply
), MSG_DONTWAIT
);
471 static void __ncpdgram_timeout_proc(struct ncp_server
*server
)
473 /* If timer is pending, we are processing another request... */
474 if (!timer_pending(&server
->timeout_tm
)) {
475 struct ncp_request_reply
* req
;
477 req
= server
->rcv
.creq
;
481 if (server
->m
.flags
& NCP_MOUNT_SOFT
) {
482 if (server
->timeout_retries
-- == 0) {
483 __ncp_abort_request(server
, req
, -ETIMEDOUT
);
488 ncpdgram_send(server
->ncp_sock
, req
);
489 timeout
= server
->timeout_last
<< 1;
490 if (timeout
> NCP_MAX_RPC_TIMEOUT
) {
491 timeout
= NCP_MAX_RPC_TIMEOUT
;
493 server
->timeout_last
= timeout
;
494 mod_timer(&server
->timeout_tm
, jiffies
+ timeout
);
499 void ncpdgram_timeout_proc(struct work_struct
*work
)
501 struct ncp_server
*server
=
502 container_of(work
, struct ncp_server
, timeout_tq
);
503 mutex_lock(&server
->rcv
.creq_mutex
);
504 __ncpdgram_timeout_proc(server
);
505 mutex_unlock(&server
->rcv
.creq_mutex
);
508 static int do_tcp_rcv(struct ncp_server
*server
, void *buffer
, size_t len
)
513 result
= _recv(server
->ncp_sock
, buffer
, len
, MSG_DONTWAIT
);
515 static unsigned char dummy
[1024];
517 if (len
> sizeof(dummy
)) {
520 result
= _recv(server
->ncp_sock
, dummy
, len
, MSG_DONTWAIT
);
526 printk(KERN_ERR
"ncpfs: tcp: bug in recvmsg (%u > %Zu)\n", result
, len
);
532 static int __ncptcp_rcv_proc(struct ncp_server
*server
)
534 /* We have to check the result, so store the complete header */
537 struct ncp_request_reply
*req
;
541 while (server
->rcv
.len
) {
542 result
= do_tcp_rcv(server
, server
->rcv
.ptr
, server
->rcv
.len
);
543 if (result
== -EAGAIN
) {
547 req
= server
->rcv
.creq
;
549 __ncp_abort_request(server
, req
, -EIO
);
551 __ncptcp_abort(server
);
554 printk(KERN_ERR
"ncpfs: tcp: error in recvmsg: %d\n", result
);
556 DPRINTK(KERN_ERR
"ncpfs: tcp: EOF\n");
560 if (server
->rcv
.ptr
) {
561 server
->rcv
.ptr
+= result
;
563 server
->rcv
.len
-= result
;
565 switch (server
->rcv
.state
) {
567 if (server
->rcv
.buf
.magic
!= htonl(NCP_TCP_RCVD_MAGIC
)) {
568 printk(KERN_ERR
"ncpfs: tcp: Unexpected reply type %08X\n", ntohl(server
->rcv
.buf
.magic
));
569 __ncptcp_abort(server
);
572 datalen
= ntohl(server
->rcv
.buf
.len
) & 0x0FFFFFFF;
574 printk(KERN_ERR
"ncpfs: tcp: Unexpected reply len %d\n", datalen
);
575 __ncptcp_abort(server
);
578 #ifdef CONFIG_NCPFS_PACKET_SIGNING
579 if (server
->sign_active
) {
581 printk(KERN_ERR
"ncpfs: tcp: Unexpected reply len %d\n", datalen
);
582 __ncptcp_abort(server
);
585 server
->rcv
.buf
.len
= datalen
- 8;
586 server
->rcv
.ptr
= (unsigned char*)&server
->rcv
.buf
.p1
;
588 server
->rcv
.state
= 4;
592 type
= ntohs(server
->rcv
.buf
.type
);
593 #ifdef CONFIG_NCPFS_PACKET_SIGNING
596 if (type
!= NCP_REPLY
) {
597 if (datalen
- 8 <= sizeof(server
->unexpected_packet
.data
)) {
598 *(__u16
*)(server
->unexpected_packet
.data
) = htons(type
);
599 server
->unexpected_packet
.len
= datalen
- 8;
601 server
->rcv
.state
= 5;
602 server
->rcv
.ptr
= server
->unexpected_packet
.data
+ 2;
603 server
->rcv
.len
= datalen
- 10;
606 DPRINTK("ncpfs: tcp: Unexpected NCP type %02X\n", type
);
608 server
->rcv
.state
= 2;
610 server
->rcv
.ptr
= NULL
;
611 server
->rcv
.len
= datalen
- 10;
614 req
= server
->rcv
.creq
;
616 DPRINTK(KERN_ERR
"ncpfs: Reply without appropriate request\n");
619 if (datalen
> req
->datalen
+ 8) {
620 printk(KERN_ERR
"ncpfs: tcp: Unexpected reply len %d (expected at most %Zd)\n", datalen
, req
->datalen
+ 8);
621 server
->rcv
.state
= 3;
624 req
->datalen
= datalen
- 8;
625 ((struct ncp_reply_header
*)server
->rxbuf
)->type
= NCP_REPLY
;
626 server
->rcv
.ptr
= server
->rxbuf
+ 2;
627 server
->rcv
.len
= datalen
- 10;
628 server
->rcv
.state
= 1;
630 #ifdef CONFIG_NCPFS_PACKET_SIGNING
632 datalen
= server
->rcv
.buf
.len
;
633 type
= ntohs(server
->rcv
.buf
.type2
);
637 req
= server
->rcv
.creq
;
638 if (req
->tx_type
!= NCP_ALLOC_SLOT_REQUEST
) {
639 if (((struct ncp_reply_header
*)server
->rxbuf
)->sequence
!= server
->sequence
) {
640 printk(KERN_ERR
"ncpfs: tcp: Bad sequence number\n");
641 __ncp_abort_request(server
, req
, -EIO
);
644 if ((((struct ncp_reply_header
*)server
->rxbuf
)->conn_low
| (((struct ncp_reply_header
*)server
->rxbuf
)->conn_high
<< 8)) != server
->connection
) {
645 printk(KERN_ERR
"ncpfs: tcp: Connection number mismatch\n");
646 __ncp_abort_request(server
, req
, -EIO
);
650 #ifdef CONFIG_NCPFS_PACKET_SIGNING
651 if (server
->sign_active
&& req
->tx_type
!= NCP_DEALLOC_SLOT_REQUEST
) {
652 if (sign_verify_reply(server
, server
->rxbuf
+ 6, req
->datalen
- 6, cpu_to_be32(req
->datalen
+ 16), &server
->rcv
.buf
.type
)) {
653 printk(KERN_ERR
"ncpfs: tcp: Signature violation\n");
654 __ncp_abort_request(server
, req
, -EIO
);
659 ncp_finish_request(server
, req
, req
->datalen
);
661 __ncp_next_request(server
);
664 server
->rcv
.ptr
= (unsigned char*)&server
->rcv
.buf
;
665 server
->rcv
.len
= 10;
666 server
->rcv
.state
= 0;
669 ncp_finish_request(server
, server
->rcv
.creq
, -EIO
);
672 info_server(server
, 0, server
->unexpected_packet
.data
, server
->unexpected_packet
.len
);
678 void ncp_tcp_rcv_proc(struct work_struct
*work
)
680 struct ncp_server
*server
=
681 container_of(work
, struct ncp_server
, rcv
.tq
);
683 mutex_lock(&server
->rcv
.creq_mutex
);
684 __ncptcp_rcv_proc(server
);
685 mutex_unlock(&server
->rcv
.creq_mutex
);
688 void ncp_tcp_tx_proc(struct work_struct
*work
)
690 struct ncp_server
*server
=
691 container_of(work
, struct ncp_server
, tx
.tq
);
693 mutex_lock(&server
->rcv
.creq_mutex
);
694 __ncptcp_try_send(server
);
695 mutex_unlock(&server
->rcv
.creq_mutex
);
698 static int do_ncp_rpc_call(struct ncp_server
*server
, int size
,
699 unsigned char* reply_buf
, int max_reply_size
)
702 struct ncp_request_reply
*req
;
704 req
= ncp_alloc_req();
708 req
->reply_buf
= reply_buf
;
709 req
->datalen
= max_reply_size
;
710 req
->tx_iov
[1].iov_base
= server
->packet
;
711 req
->tx_iov
[1].iov_len
= size
;
713 req
->tx_totallen
= size
;
714 req
->tx_type
= *(u_int16_t
*)server
->packet
;
716 result
= ncp_add_request(server
, req
);
720 if (wait_event_interruptible(req
->wq
, req
->status
== RQ_DONE
)) {
721 ncp_abort_request(server
, req
, -EINTR
);
726 result
= req
->result
;
735 * We need the server to be locked here, so check!
738 static int ncp_do_request(struct ncp_server
*server
, int size
,
739 void* reply
, int max_reply_size
)
743 if (server
->lock
== 0) {
744 printk(KERN_ERR
"ncpfs: Server not locked!\n");
747 if (!ncp_conn_valid(server
)) {
748 printk(KERN_ERR
"ncpfs: Connection invalid!\n");
753 unsigned long mask
, flags
;
755 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
756 old_set
= current
->blocked
;
757 if (current
->flags
& PF_EXITING
)
760 mask
= sigmask(SIGKILL
);
761 if (server
->m
.flags
& NCP_MOUNT_INTR
) {
762 /* FIXME: This doesn't seem right at all. So, like,
763 we can't handle SIGINT and get whatever to stop?
764 What if we've blocked it ourselves? What about
765 alarms? Why, in fact, are we mucking with the
766 sigmask at all? -- r~ */
767 if (current
->sighand
->action
[SIGINT
- 1].sa
.sa_handler
== SIG_DFL
)
768 mask
|= sigmask(SIGINT
);
769 if (current
->sighand
->action
[SIGQUIT
- 1].sa
.sa_handler
== SIG_DFL
)
770 mask
|= sigmask(SIGQUIT
);
772 siginitsetinv(¤t
->blocked
, mask
);
774 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
776 result
= do_ncp_rpc_call(server
, size
, reply
, max_reply_size
);
778 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
779 current
->blocked
= old_set
;
781 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
784 DDPRINTK("do_ncp_rpc_call returned %d\n", result
);
789 /* ncp_do_request assures that at least a complete reply header is
790 * received. It assumes that server->current_size contains the ncp
793 int ncp_request2(struct ncp_server
*server
, int function
,
796 struct ncp_request_header
*h
;
797 struct ncp_reply_header
* reply
= rpl
;
800 h
= (struct ncp_request_header
*) (server
->packet
);
801 if (server
->has_subfunction
!= 0) {
802 *(__u16
*) & (h
->data
[0]) = htons(server
->current_size
- sizeof(*h
) - 2);
804 h
->type
= NCP_REQUEST
;
806 * The server shouldn't know or care what task is making a
807 * request, so we always use the same task number.
809 h
->task
= 2; /* (current->pid) & 0xff; */
810 h
->function
= function
;
812 result
= ncp_do_request(server
, server
->current_size
, reply
, size
);
814 DPRINTK("ncp_request_error: %d\n", result
);
817 server
->completion
= reply
->completion_code
;
818 server
->conn_status
= reply
->connection_state
;
819 server
->reply_size
= result
;
820 server
->ncp_reply_size
= result
- sizeof(struct ncp_reply_header
);
822 result
= reply
->completion_code
;
825 PPRINTK("ncp_request: completion code=%x\n", result
);
830 int ncp_connect(struct ncp_server
*server
)
832 struct ncp_request_header
*h
;
835 server
->connection
= 0xFFFF;
836 server
->sequence
= 255;
838 h
= (struct ncp_request_header
*) (server
->packet
);
839 h
->type
= NCP_ALLOC_SLOT_REQUEST
;
840 h
->task
= 2; /* see above */
843 result
= ncp_do_request(server
, sizeof(*h
), server
->packet
, server
->packet_size
);
846 server
->connection
= h
->conn_low
+ (h
->conn_high
* 256);
852 int ncp_disconnect(struct ncp_server
*server
)
854 struct ncp_request_header
*h
;
856 h
= (struct ncp_request_header
*) (server
->packet
);
857 h
->type
= NCP_DEALLOC_SLOT_REQUEST
;
858 h
->task
= 2; /* see above */
861 return ncp_do_request(server
, sizeof(*h
), server
->packet
, server
->packet_size
);
864 void ncp_lock_server(struct ncp_server
*server
)
866 mutex_lock(&server
->mutex
);
868 printk(KERN_WARNING
"ncp_lock_server: was locked!\n");
872 void ncp_unlock_server(struct ncp_server
*server
)
875 printk(KERN_WARNING
"ncp_unlock_server: was not locked!\n");
879 mutex_unlock(&server
->mutex
);