tc35815: Mark carrier-off before starting PHY
[linux-2.6/mini2440.git] / fs / ncpfs / sock.c
blobe37df8d5fe707968d0720de94b45c6a4bf5d8f2e
1 /*
2 * linux/fs/ncpfs/sock.c
4 * Copyright (C) 1992, 1993 Rick Sladkey
6 * Modified 1995, 1996 by Volker Lendecke to be usable for ncp
7 * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
9 */
12 #include <linux/time.h>
13 #include <linux/errno.h>
14 #include <linux/socket.h>
15 #include <linux/fcntl.h>
16 #include <linux/stat.h>
17 #include <linux/string.h>
18 #include <asm/uaccess.h>
19 #include <linux/in.h>
20 #include <linux/net.h>
21 #include <linux/mm.h>
22 #include <linux/netdevice.h>
23 #include <linux/signal.h>
24 #include <net/scm.h>
25 #include <net/sock.h>
26 #include <linux/ipx.h>
27 #include <linux/poll.h>
28 #include <linux/file.h>
30 #include <linux/ncp_fs.h>
32 #include "ncpsign_kernel.h"
34 static int _recv(struct socket *sock, void *buf, int size, unsigned flags)
36 struct msghdr msg = {NULL, };
37 struct kvec iov = {buf, size};
38 return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
41 static inline int do_send(struct socket *sock, struct kvec *vec, int count,
42 int len, unsigned flags)
44 struct msghdr msg = { .msg_flags = flags };
45 return kernel_sendmsg(sock, &msg, vec, count, len);
48 static int _send(struct socket *sock, const void *buff, int len)
50 struct kvec vec;
51 vec.iov_base = (void *) buff;
52 vec.iov_len = len;
53 return do_send(sock, &vec, 1, len, 0);
56 struct ncp_request_reply {
57 struct list_head req;
58 wait_queue_head_t wq;
59 atomic_t refs;
60 unsigned char* reply_buf;
61 size_t datalen;
62 int result;
63 enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status;
64 struct kvec* tx_ciov;
65 size_t tx_totallen;
66 size_t tx_iovlen;
67 struct kvec tx_iov[3];
68 u_int16_t tx_type;
69 u_int32_t sign[6];
72 static inline struct ncp_request_reply* ncp_alloc_req(void)
74 struct ncp_request_reply *req;
76 req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL);
77 if (!req)
78 return NULL;
80 init_waitqueue_head(&req->wq);
81 atomic_set(&req->refs, (1));
82 req->status = RQ_IDLE;
84 return req;
87 static void ncp_req_get(struct ncp_request_reply *req)
89 atomic_inc(&req->refs);
92 static void ncp_req_put(struct ncp_request_reply *req)
94 if (atomic_dec_and_test(&req->refs))
95 kfree(req);
98 void ncp_tcp_data_ready(struct sock *sk, int len)
100 struct ncp_server *server = sk->sk_user_data;
102 server->data_ready(sk, len);
103 schedule_work(&server->rcv.tq);
106 void ncp_tcp_error_report(struct sock *sk)
108 struct ncp_server *server = sk->sk_user_data;
110 server->error_report(sk);
111 schedule_work(&server->rcv.tq);
114 void ncp_tcp_write_space(struct sock *sk)
116 struct ncp_server *server = sk->sk_user_data;
118 /* We do not need any locking: we first set tx.creq, and then we do sendmsg,
119 not vice versa... */
120 server->write_space(sk);
121 if (server->tx.creq)
122 schedule_work(&server->tx.tq);
125 void ncpdgram_timeout_call(unsigned long v)
127 struct ncp_server *server = (void*)v;
129 schedule_work(&server->timeout_tq);
132 static inline void ncp_finish_request(struct ncp_server *server, struct ncp_request_reply *req, int result)
134 req->result = result;
135 if (req->status != RQ_ABANDONED)
136 memcpy(req->reply_buf, server->rxbuf, req->datalen);
137 req->status = RQ_DONE;
138 wake_up_all(&req->wq);
139 ncp_req_put(req);
142 static void __abort_ncp_connection(struct ncp_server *server)
144 struct ncp_request_reply *req;
146 ncp_invalidate_conn(server);
147 del_timer(&server->timeout_tm);
148 while (!list_empty(&server->tx.requests)) {
149 req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
151 list_del_init(&req->req);
152 ncp_finish_request(server, req, -EIO);
154 req = server->rcv.creq;
155 if (req) {
156 server->rcv.creq = NULL;
157 ncp_finish_request(server, req, -EIO);
158 server->rcv.ptr = NULL;
159 server->rcv.state = 0;
161 req = server->tx.creq;
162 if (req) {
163 server->tx.creq = NULL;
164 ncp_finish_request(server, req, -EIO);
168 static inline int get_conn_number(struct ncp_reply_header *rp)
170 return rp->conn_low | (rp->conn_high << 8);
173 static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
175 /* If req is done, we got signal, but we also received answer... */
176 switch (req->status) {
177 case RQ_IDLE:
178 case RQ_DONE:
179 break;
180 case RQ_QUEUED:
181 list_del_init(&req->req);
182 ncp_finish_request(server, req, err);
183 break;
184 case RQ_INPROGRESS:
185 req->status = RQ_ABANDONED;
186 break;
187 case RQ_ABANDONED:
188 break;
192 static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
194 mutex_lock(&server->rcv.creq_mutex);
195 __ncp_abort_request(server, req, err);
196 mutex_unlock(&server->rcv.creq_mutex);
199 static inline void __ncptcp_abort(struct ncp_server *server)
201 __abort_ncp_connection(server);
204 static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
206 struct kvec vec[3];
207 /* sock_sendmsg updates iov pointers for us :-( */
208 memcpy(vec, req->tx_ciov, req->tx_iovlen * sizeof(vec[0]));
209 return do_send(sock, vec, req->tx_iovlen,
210 req->tx_totallen, MSG_DONTWAIT);
213 static void __ncptcp_try_send(struct ncp_server *server)
215 struct ncp_request_reply *rq;
216 struct kvec *iov;
217 struct kvec iovc[3];
218 int result;
220 rq = server->tx.creq;
221 if (!rq)
222 return;
224 /* sock_sendmsg updates iov pointers for us :-( */
225 memcpy(iovc, rq->tx_ciov, rq->tx_iovlen * sizeof(iov[0]));
226 result = do_send(server->ncp_sock, iovc, rq->tx_iovlen,
227 rq->tx_totallen, MSG_NOSIGNAL | MSG_DONTWAIT);
229 if (result == -EAGAIN)
230 return;
232 if (result < 0) {
233 printk(KERN_ERR "ncpfs: tcp: Send failed: %d\n", result);
234 __ncp_abort_request(server, rq, result);
235 return;
237 if (result >= rq->tx_totallen) {
238 server->rcv.creq = rq;
239 server->tx.creq = NULL;
240 return;
242 rq->tx_totallen -= result;
243 iov = rq->tx_ciov;
244 while (iov->iov_len <= result) {
245 result -= iov->iov_len;
246 iov++;
247 rq->tx_iovlen--;
249 iov->iov_base += result;
250 iov->iov_len -= result;
251 rq->tx_ciov = iov;
254 static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h)
256 req->status = RQ_INPROGRESS;
257 h->conn_low = server->connection;
258 h->conn_high = server->connection >> 8;
259 h->sequence = ++server->sequence;
262 static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req)
264 size_t signlen;
265 struct ncp_request_header* h;
267 req->tx_ciov = req->tx_iov + 1;
269 h = req->tx_iov[1].iov_base;
270 ncp_init_header(server, req, h);
271 signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
272 req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1,
273 cpu_to_le32(req->tx_totallen), req->sign);
274 if (signlen) {
275 req->tx_ciov[1].iov_base = req->sign;
276 req->tx_ciov[1].iov_len = signlen;
277 req->tx_iovlen += 1;
278 req->tx_totallen += signlen;
280 server->rcv.creq = req;
281 server->timeout_last = server->m.time_out;
282 server->timeout_retries = server->m.retry_count;
283 ncpdgram_send(server->ncp_sock, req);
284 mod_timer(&server->timeout_tm, jiffies + server->m.time_out);
287 #define NCP_TCP_XMIT_MAGIC (0x446D6454)
288 #define NCP_TCP_XMIT_VERSION (1)
289 #define NCP_TCP_RCVD_MAGIC (0x744E6350)
291 static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
293 size_t signlen;
294 struct ncp_request_header* h;
296 req->tx_ciov = req->tx_iov;
297 h = req->tx_iov[1].iov_base;
298 ncp_init_header(server, req, h);
299 signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
300 req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1,
301 cpu_to_be32(req->tx_totallen + 24), req->sign + 4) + 16;
303 req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC);
304 req->sign[1] = htonl(req->tx_totallen + signlen);
305 req->sign[2] = htonl(NCP_TCP_XMIT_VERSION);
306 req->sign[3] = htonl(req->datalen + 8);
307 req->tx_iov[0].iov_base = req->sign;
308 req->tx_iov[0].iov_len = signlen;
309 req->tx_iovlen += 1;
310 req->tx_totallen += signlen;
312 server->tx.creq = req;
313 __ncptcp_try_send(server);
316 static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
318 /* we copy the data so that we do not depend on the caller
319 staying alive */
320 memcpy(server->txbuf, req->tx_iov[1].iov_base, req->tx_iov[1].iov_len);
321 req->tx_iov[1].iov_base = server->txbuf;
323 if (server->ncp_sock->type == SOCK_STREAM)
324 ncptcp_start_request(server, req);
325 else
326 ncpdgram_start_request(server, req);
329 static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req)
331 mutex_lock(&server->rcv.creq_mutex);
332 if (!ncp_conn_valid(server)) {
333 mutex_unlock(&server->rcv.creq_mutex);
334 printk(KERN_ERR "ncpfs: tcp: Server died\n");
335 return -EIO;
337 ncp_req_get(req);
338 if (server->tx.creq || server->rcv.creq) {
339 req->status = RQ_QUEUED;
340 list_add_tail(&req->req, &server->tx.requests);
341 mutex_unlock(&server->rcv.creq_mutex);
342 return 0;
344 __ncp_start_request(server, req);
345 mutex_unlock(&server->rcv.creq_mutex);
346 return 0;
349 static void __ncp_next_request(struct ncp_server *server)
351 struct ncp_request_reply *req;
353 server->rcv.creq = NULL;
354 if (list_empty(&server->tx.requests)) {
355 return;
357 req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
358 list_del_init(&req->req);
359 __ncp_start_request(server, req);
362 static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len)
364 if (server->info_sock) {
365 struct kvec iov[2];
366 __be32 hdr[2];
368 hdr[0] = cpu_to_be32(len + 8);
369 hdr[1] = cpu_to_be32(id);
371 iov[0].iov_base = hdr;
372 iov[0].iov_len = 8;
373 iov[1].iov_base = (void *) data;
374 iov[1].iov_len = len;
376 do_send(server->info_sock, iov, 2, len + 8, MSG_NOSIGNAL);
380 void ncpdgram_rcv_proc(struct work_struct *work)
382 struct ncp_server *server =
383 container_of(work, struct ncp_server, rcv.tq);
384 struct socket* sock;
386 sock = server->ncp_sock;
388 while (1) {
389 struct ncp_reply_header reply;
390 int result;
392 result = _recv(sock, &reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT);
393 if (result < 0) {
394 break;
396 if (result >= sizeof(reply)) {
397 struct ncp_request_reply *req;
399 if (reply.type == NCP_WATCHDOG) {
400 unsigned char buf[10];
402 if (server->connection != get_conn_number(&reply)) {
403 goto drop;
405 result = _recv(sock, buf, sizeof(buf), MSG_DONTWAIT);
406 if (result < 0) {
407 DPRINTK("recv failed with %d\n", result);
408 continue;
410 if (result < 10) {
411 DPRINTK("too short (%u) watchdog packet\n", result);
412 continue;
414 if (buf[9] != '?') {
415 DPRINTK("bad signature (%02X) in watchdog packet\n", buf[9]);
416 continue;
418 buf[9] = 'Y';
419 _send(sock, buf, sizeof(buf));
420 continue;
422 if (reply.type != NCP_POSITIVE_ACK && reply.type != NCP_REPLY) {
423 result = _recv(sock, server->unexpected_packet.data, sizeof(server->unexpected_packet.data), MSG_DONTWAIT);
424 if (result < 0) {
425 continue;
427 info_server(server, 0, server->unexpected_packet.data, result);
428 continue;
430 mutex_lock(&server->rcv.creq_mutex);
431 req = server->rcv.creq;
432 if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence &&
433 server->connection == get_conn_number(&reply)))) {
434 if (reply.type == NCP_POSITIVE_ACK) {
435 server->timeout_retries = server->m.retry_count;
436 server->timeout_last = NCP_MAX_RPC_TIMEOUT;
437 mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT);
438 } else if (reply.type == NCP_REPLY) {
439 result = _recv(sock, server->rxbuf, req->datalen, MSG_DONTWAIT);
440 #ifdef CONFIG_NCPFS_PACKET_SIGNING
441 if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
442 if (result < 8 + 8) {
443 result = -EIO;
444 } else {
445 unsigned int hdrl;
447 result -= 8;
448 hdrl = sock->sk->sk_family == AF_INET ? 8 : 6;
449 if (sign_verify_reply(server, server->rxbuf + hdrl, result - hdrl, cpu_to_le32(result), server->rxbuf + result)) {
450 printk(KERN_INFO "ncpfs: Signature violation\n");
451 result = -EIO;
455 #endif
456 del_timer(&server->timeout_tm);
457 server->rcv.creq = NULL;
458 ncp_finish_request(server, req, result);
459 __ncp_next_request(server);
460 mutex_unlock(&server->rcv.creq_mutex);
461 continue;
464 mutex_unlock(&server->rcv.creq_mutex);
466 drop:;
467 _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT);
471 static void __ncpdgram_timeout_proc(struct ncp_server *server)
473 /* If timer is pending, we are processing another request... */
474 if (!timer_pending(&server->timeout_tm)) {
475 struct ncp_request_reply* req;
477 req = server->rcv.creq;
478 if (req) {
479 int timeout;
481 if (server->m.flags & NCP_MOUNT_SOFT) {
482 if (server->timeout_retries-- == 0) {
483 __ncp_abort_request(server, req, -ETIMEDOUT);
484 return;
487 /* Ignore errors */
488 ncpdgram_send(server->ncp_sock, req);
489 timeout = server->timeout_last << 1;
490 if (timeout > NCP_MAX_RPC_TIMEOUT) {
491 timeout = NCP_MAX_RPC_TIMEOUT;
493 server->timeout_last = timeout;
494 mod_timer(&server->timeout_tm, jiffies + timeout);
499 void ncpdgram_timeout_proc(struct work_struct *work)
501 struct ncp_server *server =
502 container_of(work, struct ncp_server, timeout_tq);
503 mutex_lock(&server->rcv.creq_mutex);
504 __ncpdgram_timeout_proc(server);
505 mutex_unlock(&server->rcv.creq_mutex);
508 static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len)
510 int result;
512 if (buffer) {
513 result = _recv(server->ncp_sock, buffer, len, MSG_DONTWAIT);
514 } else {
515 static unsigned char dummy[1024];
517 if (len > sizeof(dummy)) {
518 len = sizeof(dummy);
520 result = _recv(server->ncp_sock, dummy, len, MSG_DONTWAIT);
522 if (result < 0) {
523 return result;
525 if (result > len) {
526 printk(KERN_ERR "ncpfs: tcp: bug in recvmsg (%u > %Zu)\n", result, len);
527 return -EIO;
529 return result;
532 static int __ncptcp_rcv_proc(struct ncp_server *server)
534 /* We have to check the result, so store the complete header */
535 while (1) {
536 int result;
537 struct ncp_request_reply *req;
538 int datalen;
539 int type;
541 while (server->rcv.len) {
542 result = do_tcp_rcv(server, server->rcv.ptr, server->rcv.len);
543 if (result == -EAGAIN) {
544 return 0;
546 if (result <= 0) {
547 req = server->rcv.creq;
548 if (req) {
549 __ncp_abort_request(server, req, -EIO);
550 } else {
551 __ncptcp_abort(server);
553 if (result < 0) {
554 printk(KERN_ERR "ncpfs: tcp: error in recvmsg: %d\n", result);
555 } else {
556 DPRINTK(KERN_ERR "ncpfs: tcp: EOF\n");
558 return -EIO;
560 if (server->rcv.ptr) {
561 server->rcv.ptr += result;
563 server->rcv.len -= result;
565 switch (server->rcv.state) {
566 case 0:
567 if (server->rcv.buf.magic != htonl(NCP_TCP_RCVD_MAGIC)) {
568 printk(KERN_ERR "ncpfs: tcp: Unexpected reply type %08X\n", ntohl(server->rcv.buf.magic));
569 __ncptcp_abort(server);
570 return -EIO;
572 datalen = ntohl(server->rcv.buf.len) & 0x0FFFFFFF;
573 if (datalen < 10) {
574 printk(KERN_ERR "ncpfs: tcp: Unexpected reply len %d\n", datalen);
575 __ncptcp_abort(server);
576 return -EIO;
578 #ifdef CONFIG_NCPFS_PACKET_SIGNING
579 if (server->sign_active) {
580 if (datalen < 18) {
581 printk(KERN_ERR "ncpfs: tcp: Unexpected reply len %d\n", datalen);
582 __ncptcp_abort(server);
583 return -EIO;
585 server->rcv.buf.len = datalen - 8;
586 server->rcv.ptr = (unsigned char*)&server->rcv.buf.p1;
587 server->rcv.len = 8;
588 server->rcv.state = 4;
589 break;
591 #endif
592 type = ntohs(server->rcv.buf.type);
593 #ifdef CONFIG_NCPFS_PACKET_SIGNING
594 cont:;
595 #endif
596 if (type != NCP_REPLY) {
597 if (datalen - 8 <= sizeof(server->unexpected_packet.data)) {
598 *(__u16*)(server->unexpected_packet.data) = htons(type);
599 server->unexpected_packet.len = datalen - 8;
601 server->rcv.state = 5;
602 server->rcv.ptr = server->unexpected_packet.data + 2;
603 server->rcv.len = datalen - 10;
604 break;
606 DPRINTK("ncpfs: tcp: Unexpected NCP type %02X\n", type);
607 skipdata2:;
608 server->rcv.state = 2;
609 skipdata:;
610 server->rcv.ptr = NULL;
611 server->rcv.len = datalen - 10;
612 break;
614 req = server->rcv.creq;
615 if (!req) {
616 DPRINTK(KERN_ERR "ncpfs: Reply without appropriate request\n");
617 goto skipdata2;
619 if (datalen > req->datalen + 8) {
620 printk(KERN_ERR "ncpfs: tcp: Unexpected reply len %d (expected at most %Zd)\n", datalen, req->datalen + 8);
621 server->rcv.state = 3;
622 goto skipdata;
624 req->datalen = datalen - 8;
625 ((struct ncp_reply_header*)server->rxbuf)->type = NCP_REPLY;
626 server->rcv.ptr = server->rxbuf + 2;
627 server->rcv.len = datalen - 10;
628 server->rcv.state = 1;
629 break;
630 #ifdef CONFIG_NCPFS_PACKET_SIGNING
631 case 4:
632 datalen = server->rcv.buf.len;
633 type = ntohs(server->rcv.buf.type2);
634 goto cont;
635 #endif
636 case 1:
637 req = server->rcv.creq;
638 if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) {
639 if (((struct ncp_reply_header*)server->rxbuf)->sequence != server->sequence) {
640 printk(KERN_ERR "ncpfs: tcp: Bad sequence number\n");
641 __ncp_abort_request(server, req, -EIO);
642 return -EIO;
644 if ((((struct ncp_reply_header*)server->rxbuf)->conn_low | (((struct ncp_reply_header*)server->rxbuf)->conn_high << 8)) != server->connection) {
645 printk(KERN_ERR "ncpfs: tcp: Connection number mismatch\n");
646 __ncp_abort_request(server, req, -EIO);
647 return -EIO;
650 #ifdef CONFIG_NCPFS_PACKET_SIGNING
651 if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
652 if (sign_verify_reply(server, server->rxbuf + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) {
653 printk(KERN_ERR "ncpfs: tcp: Signature violation\n");
654 __ncp_abort_request(server, req, -EIO);
655 return -EIO;
658 #endif
659 ncp_finish_request(server, req, req->datalen);
660 nextreq:;
661 __ncp_next_request(server);
662 case 2:
663 next:;
664 server->rcv.ptr = (unsigned char*)&server->rcv.buf;
665 server->rcv.len = 10;
666 server->rcv.state = 0;
667 break;
668 case 3:
669 ncp_finish_request(server, server->rcv.creq, -EIO);
670 goto nextreq;
671 case 5:
672 info_server(server, 0, server->unexpected_packet.data, server->unexpected_packet.len);
673 goto next;
678 void ncp_tcp_rcv_proc(struct work_struct *work)
680 struct ncp_server *server =
681 container_of(work, struct ncp_server, rcv.tq);
683 mutex_lock(&server->rcv.creq_mutex);
684 __ncptcp_rcv_proc(server);
685 mutex_unlock(&server->rcv.creq_mutex);
688 void ncp_tcp_tx_proc(struct work_struct *work)
690 struct ncp_server *server =
691 container_of(work, struct ncp_server, tx.tq);
693 mutex_lock(&server->rcv.creq_mutex);
694 __ncptcp_try_send(server);
695 mutex_unlock(&server->rcv.creq_mutex);
698 static int do_ncp_rpc_call(struct ncp_server *server, int size,
699 unsigned char* reply_buf, int max_reply_size)
701 int result;
702 struct ncp_request_reply *req;
704 req = ncp_alloc_req();
705 if (!req)
706 return -ENOMEM;
708 req->reply_buf = reply_buf;
709 req->datalen = max_reply_size;
710 req->tx_iov[1].iov_base = server->packet;
711 req->tx_iov[1].iov_len = size;
712 req->tx_iovlen = 1;
713 req->tx_totallen = size;
714 req->tx_type = *(u_int16_t*)server->packet;
716 result = ncp_add_request(server, req);
717 if (result < 0)
718 goto out;
720 if (wait_event_interruptible(req->wq, req->status == RQ_DONE)) {
721 ncp_abort_request(server, req, -EINTR);
722 result = -EINTR;
723 goto out;
726 result = req->result;
728 out:
729 ncp_req_put(req);
731 return result;
735 * We need the server to be locked here, so check!
738 static int ncp_do_request(struct ncp_server *server, int size,
739 void* reply, int max_reply_size)
741 int result;
743 if (server->lock == 0) {
744 printk(KERN_ERR "ncpfs: Server not locked!\n");
745 return -EIO;
747 if (!ncp_conn_valid(server)) {
748 printk(KERN_ERR "ncpfs: Connection invalid!\n");
749 return -EIO;
752 sigset_t old_set;
753 unsigned long mask, flags;
755 spin_lock_irqsave(&current->sighand->siglock, flags);
756 old_set = current->blocked;
757 if (current->flags & PF_EXITING)
758 mask = 0;
759 else
760 mask = sigmask(SIGKILL);
761 if (server->m.flags & NCP_MOUNT_INTR) {
762 /* FIXME: This doesn't seem right at all. So, like,
763 we can't handle SIGINT and get whatever to stop?
764 What if we've blocked it ourselves? What about
765 alarms? Why, in fact, are we mucking with the
766 sigmask at all? -- r~ */
767 if (current->sighand->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
768 mask |= sigmask(SIGINT);
769 if (current->sighand->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
770 mask |= sigmask(SIGQUIT);
772 siginitsetinv(&current->blocked, mask);
773 recalc_sigpending();
774 spin_unlock_irqrestore(&current->sighand->siglock, flags);
776 result = do_ncp_rpc_call(server, size, reply, max_reply_size);
778 spin_lock_irqsave(&current->sighand->siglock, flags);
779 current->blocked = old_set;
780 recalc_sigpending();
781 spin_unlock_irqrestore(&current->sighand->siglock, flags);
784 DDPRINTK("do_ncp_rpc_call returned %d\n", result);
786 return result;
789 /* ncp_do_request assures that at least a complete reply header is
790 * received. It assumes that server->current_size contains the ncp
791 * request size
793 int ncp_request2(struct ncp_server *server, int function,
794 void* rpl, int size)
796 struct ncp_request_header *h;
797 struct ncp_reply_header* reply = rpl;
798 int result;
800 h = (struct ncp_request_header *) (server->packet);
801 if (server->has_subfunction != 0) {
802 *(__u16 *) & (h->data[0]) = htons(server->current_size - sizeof(*h) - 2);
804 h->type = NCP_REQUEST;
806 * The server shouldn't know or care what task is making a
807 * request, so we always use the same task number.
809 h->task = 2; /* (current->pid) & 0xff; */
810 h->function = function;
812 result = ncp_do_request(server, server->current_size, reply, size);
813 if (result < 0) {
814 DPRINTK("ncp_request_error: %d\n", result);
815 goto out;
817 server->completion = reply->completion_code;
818 server->conn_status = reply->connection_state;
819 server->reply_size = result;
820 server->ncp_reply_size = result - sizeof(struct ncp_reply_header);
822 result = reply->completion_code;
824 if (result != 0)
825 PPRINTK("ncp_request: completion code=%x\n", result);
826 out:
827 return result;
830 int ncp_connect(struct ncp_server *server)
832 struct ncp_request_header *h;
833 int result;
835 server->connection = 0xFFFF;
836 server->sequence = 255;
838 h = (struct ncp_request_header *) (server->packet);
839 h->type = NCP_ALLOC_SLOT_REQUEST;
840 h->task = 2; /* see above */
841 h->function = 0;
843 result = ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
844 if (result < 0)
845 goto out;
846 server->connection = h->conn_low + (h->conn_high * 256);
847 result = 0;
848 out:
849 return result;
852 int ncp_disconnect(struct ncp_server *server)
854 struct ncp_request_header *h;
856 h = (struct ncp_request_header *) (server->packet);
857 h->type = NCP_DEALLOC_SLOT_REQUEST;
858 h->task = 2; /* see above */
859 h->function = 0;
861 return ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
864 void ncp_lock_server(struct ncp_server *server)
866 mutex_lock(&server->mutex);
867 if (server->lock)
868 printk(KERN_WARNING "ncp_lock_server: was locked!\n");
869 server->lock = 1;
872 void ncp_unlock_server(struct ncp_server *server)
874 if (!server->lock) {
875 printk(KERN_WARNING "ncp_unlock_server: was not locked!\n");
876 return;
878 server->lock = 0;
879 mutex_unlock(&server->mutex);