Avoid multi-line string constants.
[wine/dcerpc.git] / server / sock.c
blobfb6a9732f045a31871c542623117f1ed15902637
1 /*
2 * Server-side socket management
4 * Copyright (C) 1999 Marcus Meissner, Ove Kåven
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 * FIXME: we use read|write access in all cases. Shouldn't we depend that
21 * on the access of the current handle?
24 #include "config.h"
26 #include <assert.h>
27 #include <fcntl.h>
28 #include <stdio.h>
29 #include <string.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #ifdef HAVE_SYS_ERRNO_H
33 # include <sys/errno.h>
34 #endif
35 #include <sys/time.h>
36 #include <sys/types.h>
37 #ifdef HAVE_SYS_SOCKET_H
38 # include <sys/socket.h>
39 #endif
40 #include <sys/ioctl.h>
41 #ifdef HAVE_SYS_FILIO_H
42 # include <sys/filio.h>
43 #endif
44 #include <time.h>
45 #include <unistd.h>
47 #include "winerror.h"
48 #include "winbase.h"
49 #include "process.h"
50 #include "handle.h"
51 #include "thread.h"
52 #include "request.h"
53 #include "user.h"
54 #include "async.h"
56 /* To avoid conflicts with the Unix socket headers. Plus we only need a few
57 * macros anyway.
59 #define USE_WS_PREFIX
60 #include "winsock2.h"
62 struct sock
64 struct object obj; /* object header */
65 unsigned int state; /* status bits */
66 unsigned int mask; /* event mask */
67 unsigned int hmask; /* held (blocked) events */
68 unsigned int pmask; /* pending events */
69 unsigned int flags; /* socket flags */
70 struct event *event; /* event object */
71 user_handle_t window; /* window to send the message to */
72 unsigned int message; /* message to send */
73 unsigned int wparam; /* message wparam (socket handle) */
74 int errors[FD_MAX_EVENTS]; /* event errors */
75 struct sock* deferred; /* socket that waits for a deferred accept */
76 struct async_queue read_q; /* Queue for asynchronous reads */
77 struct async_queue write_q; /* Queue for asynchronous writes */
80 static void sock_dump( struct object *obj, int verbose );
81 static int sock_signaled( struct object *obj, struct thread *thread );
82 static int sock_get_poll_events( struct object *obj );
83 static void sock_poll_event( struct object *obj, int event );
84 static int sock_get_fd( struct object *obj );
85 static int sock_get_info( struct object *obj, struct get_file_info_reply *reply, int *flags );
86 static void sock_destroy( struct object *obj );
87 static int sock_get_error( int err );
88 static void sock_set_error(void);
89 static void sock_queue_async(struct object *obj, void *ptr, unsigned int status, int type, int count);
91 static const struct object_ops sock_ops =
93 sizeof(struct sock), /* size */
94 sock_dump, /* dump */
95 add_queue, /* add_queue */
96 remove_queue, /* remove_queue */
97 sock_signaled, /* signaled */
98 no_satisfied, /* satisfied */
99 sock_get_poll_events, /* get_poll_events */
100 sock_poll_event, /* poll_event */
101 sock_get_fd, /* get_fd */
102 no_flush, /* flush */
103 sock_get_info, /* get_file_info */
104 sock_queue_async, /* queue_async */
105 sock_destroy /* destroy */
109 /* Permutation of 0..FD_MAX_EVENTS - 1 representing the order in which
110 * we post messages if there are multiple events. Used to send
111 * messages. The problem is if there is both a FD_CONNECT event and,
112 * say, an FD_READ event available on the same socket, we want to
113 * notify the app of the connect event first. Otherwise it may
114 * discard the read event because it thinks it hasn't connected yet.
116 static const int event_bitorder[FD_MAX_EVENTS] =
118 FD_CONNECT_BIT,
119 FD_ACCEPT_BIT,
120 FD_OOB_BIT,
121 FD_WRITE_BIT,
122 FD_READ_BIT,
123 FD_CLOSE_BIT,
124 6, 7, 8, 9 /* leftovers */
128 static int sock_reselect( struct sock *sock )
130 int ev = sock_get_poll_events( &sock->obj );
132 if (debug_level)
133 fprintf(stderr,"sock_reselect(%d): new mask %x\n", sock->obj.fd, ev);
135 if (sock->obj.select == -1) {
136 /* previously unconnected socket, is this reselect supposed to connect it? */
137 if (!(sock->state & ~FD_WINE_NONBLOCKING)) return 0;
138 /* ok, it is, attach it to the wineserver's main poll loop */
139 add_select_user( &sock->obj );
141 /* update condition mask */
142 set_select_events( &sock->obj, ev );
143 return ev;
146 /* After POLLHUP is received, the socket will no longer be in the main select loop.
147 This function is used to signal pending events nevertheless */
148 static void sock_try_event ( struct sock *sock, int event )
150 struct pollfd pfd;
152 pfd.fd = sock->obj.fd;
153 pfd.events = event;
154 pfd.revents = 0;
155 poll (&pfd, 1, 0);
157 if ( pfd.revents )
159 if ( debug_level ) fprintf ( stderr, "sock_try_event: %x\n", pfd.revents );
160 sock_poll_event ( &sock->obj, pfd.revents );
164 /* wake anybody waiting on the socket event or send the associated message */
165 static void sock_wake_up( struct sock *sock, int pollev )
167 unsigned int events = sock->pmask & sock->mask;
168 int i;
169 int async_active = 0;
171 if ( sock->flags & FD_FLAG_OVERLAPPED )
173 if( pollev & (POLLIN|POLLPRI) && IS_READY( sock->read_q ) )
175 if (debug_level) fprintf ( stderr, "activating read queue for socket %p\n", sock );
176 async_notify( sock->read_q.head, STATUS_ALERTED );
177 async_active = 1;
179 if( pollev & POLLOUT && IS_READY( sock->write_q ) )
181 if (debug_level) fprintf ( stderr, "activating write queue for socket %p\n", sock );
182 async_notify( sock->write_q.head, STATUS_ALERTED );
183 async_active = 1;
187 /* Do not signal events if there are still pending asynchronous IO requests */
188 /* We need this to delay FD_CLOSE events until all pending overlapped requests are processed */
189 if ( !events || async_active ) return;
191 if (sock->event)
193 if (debug_level) fprintf(stderr, "signalling events %x ptr %p\n", events, sock->event );
194 set_event( sock->event );
196 if (sock->window)
198 if (debug_level) fprintf(stderr, "signalling events %x win %x\n", events, sock->window );
199 for (i = 0; i < FD_MAX_EVENTS; i++)
201 int event = event_bitorder[i];
202 if (sock->pmask & (1 << event))
204 unsigned int lparam = (1 << event) | (sock->errors[event] << 16);
205 post_message( sock->window, sock->message, sock->wparam, lparam );
208 sock->pmask = 0;
209 sock_reselect( sock );
213 inline static int sock_error(int s)
215 unsigned int optval = 0, optlen;
217 optlen = sizeof(optval);
218 getsockopt(s, SOL_SOCKET, SO_ERROR, (void *) &optval, &optlen);
219 return optval ? sock_get_error(optval) : 0;
222 static void sock_poll_event( struct object *obj, int event )
224 struct sock *sock = (struct sock *)obj;
225 int empty_recv = 0;
227 assert( sock->obj.ops == &sock_ops );
228 if (debug_level)
229 fprintf(stderr, "socket %d select event: %x\n", sock->obj.fd, event);
230 if (sock->state & FD_CONNECT)
232 /* connecting */
233 if (event & POLLOUT)
235 /* we got connected */
236 sock->state |= FD_WINE_CONNECTED|FD_READ|FD_WRITE;
237 sock->state &= ~FD_CONNECT;
238 sock->pmask |= FD_CONNECT;
239 sock->errors[FD_CONNECT_BIT] = 0;
240 if (debug_level)
241 fprintf(stderr, "socket %d connection success\n", sock->obj.fd);
243 else if (event & (POLLERR|POLLHUP))
245 /* we didn't get connected? */
246 sock->state &= ~FD_CONNECT;
247 sock->pmask |= FD_CONNECT;
248 sock->errors[FD_CONNECT_BIT] = sock_error( sock->obj.fd );
249 if (debug_level)
250 fprintf(stderr, "socket %d connection failure\n", sock->obj.fd);
252 } else
253 if (sock->state & FD_WINE_LISTENING)
255 /* listening */
256 if (event & POLLIN)
258 /* incoming connection */
259 sock->pmask |= FD_ACCEPT;
260 sock->errors[FD_ACCEPT_BIT] = 0;
261 sock->hmask |= FD_ACCEPT;
263 else if (event & (POLLERR|POLLHUP))
265 /* failed incoming connection? */
266 sock->pmask |= FD_ACCEPT;
267 sock->errors[FD_ACCEPT_BIT] = sock_error( sock->obj.fd );
268 sock->hmask |= FD_ACCEPT;
270 } else
272 /* normal data flow */
273 if (event & POLLIN)
275 char dummy;
276 int nr;
278 /* Linux 2.4 doesn't report POLLHUP if only one side of the socket
279 * has been closed, so we need to check for it explicitly here */
280 nr = recv( sock->obj.fd, &dummy, 1, MSG_PEEK );
281 if ( nr > 0 )
283 /* incoming data */
284 sock->pmask |= FD_READ;
285 sock->hmask |= (FD_READ|FD_CLOSE);
286 sock->errors[FD_READ_BIT] = 0;
287 if (debug_level)
288 fprintf(stderr, "socket %d is readable\n", sock->obj.fd );
290 else if ( nr == 0 )
291 empty_recv = 1;
292 else
294 /* EAGAIN can happen if an async recv() falls between the server's poll()
295 call and the invocation of this routine */
296 if ( errno == EAGAIN )
297 event &= ~POLLIN;
298 else
300 if ( debug_level )
301 fprintf ( stderr, "recv error on socket %d: %d\n", sock->obj.fd, errno );
302 event = POLLERR;
307 else if (event & POLLHUP) empty_recv = 1;
309 if (event & POLLOUT)
311 sock->pmask |= FD_WRITE;
312 sock->hmask |= FD_WRITE;
313 sock->errors[FD_WRITE_BIT] = 0;
314 if (debug_level)
315 fprintf(stderr, "socket %d is writable\n", sock->obj.fd);
317 if (event & POLLPRI)
319 sock->pmask |= FD_OOB;
320 sock->hmask |= FD_OOB;
321 sock->errors[FD_OOB_BIT] = 0;
322 if (debug_level)
323 fprintf(stderr, "socket %d got OOB data\n", sock->obj.fd);
325 /* According to WS2 specs, FD_CLOSE is only delivered when there is
326 no more data to be read (i.e. empty_recv = 1) */
327 else if ( empty_recv && (sock->state & (FD_READ|FD_WRITE) ))
329 sock->errors[FD_CLOSE_BIT] = sock_error( sock->obj.fd );
330 if ( event & POLLERR)
331 sock->state &= ~(FD_WINE_CONNECTED|FD_WRITE);
332 sock->pmask |= FD_CLOSE;
333 sock->hmask |= FD_CLOSE;
334 if (debug_level)
335 fprintf(stderr, "socket %d aborted by error %d, event: %x - removing from select loop\n",
336 sock->obj.fd, sock->errors[FD_CLOSE_BIT], event);
340 if ( sock->pmask & FD_CLOSE || event & (POLLERR|POLLHUP) )
342 if ( debug_level )
343 fprintf ( stderr, "removing socket %d from select loop\n", sock->obj.fd );
344 set_select_events( &sock->obj, -1 );
346 else
347 sock_reselect( sock );
349 /* wake up anyone waiting for whatever just happened */
350 if ( sock->pmask & sock->mask || sock->flags & FD_FLAG_OVERLAPPED ) sock_wake_up( sock, event );
352 /* if anyone is stupid enough to wait on the socket object itself,
353 * maybe we should wake them up too, just in case? */
354 wake_up( &sock->obj, 0 );
357 static void sock_dump( struct object *obj, int verbose )
359 struct sock *sock = (struct sock *)obj;
360 assert( obj->ops == &sock_ops );
361 printf( "Socket fd=%d, state=%x, mask=%x, pending=%x, held=%x\n",
362 sock->obj.fd, sock->state,
363 sock->mask, sock->pmask, sock->hmask );
366 static int sock_signaled( struct object *obj, struct thread *thread )
368 struct sock *sock = (struct sock *)obj;
369 assert( obj->ops == &sock_ops );
371 return check_select_events( sock->obj.fd, sock_get_poll_events( &sock->obj ) );
374 static int sock_get_poll_events( struct object *obj )
376 struct sock *sock = (struct sock *)obj;
377 unsigned int mask = sock->mask & sock->state & ~sock->hmask;
378 int ev = 0;
380 assert( obj->ops == &sock_ops );
382 if (sock->state & FD_CONNECT)
383 /* connecting, wait for writable */
384 return POLLOUT;
385 if (sock->state & FD_WINE_LISTENING)
386 /* listening, wait for readable */
387 return (sock->hmask & FD_ACCEPT) ? 0 : POLLIN;
389 if (mask & (FD_READ) || (sock->flags & WSA_FLAG_OVERLAPPED && IS_READY (sock->read_q)))
390 ev |= POLLIN | POLLPRI;
391 if (mask & FD_WRITE || (sock->flags & WSA_FLAG_OVERLAPPED && IS_READY (sock->write_q)))
392 ev |= POLLOUT;
393 /* We use POLLIN with 0 bytes recv() as FD_CLOSE indication. */
394 if (sock->mask & ~sock->hmask & FD_CLOSE)
395 ev |= POLLIN;
397 return ev;
400 static int sock_get_fd( struct object *obj )
402 struct sock *sock = (struct sock *)obj;
403 assert( obj->ops == &sock_ops );
404 return sock->obj.fd;
407 static int sock_get_info( struct object *obj, struct get_file_info_reply *reply, int *flags )
409 struct sock *sock = (struct sock*) obj;
410 assert ( obj->ops == &sock_ops );
412 if (reply)
414 reply->type = FILE_TYPE_PIPE;
415 reply->attr = 0;
416 reply->access_time = 0;
417 reply->write_time = 0;
418 reply->size_high = 0;
419 reply->size_low = 0;
420 reply->links = 0;
421 reply->index_high = 0;
422 reply->index_low = 0;
423 reply->serial = 0;
425 *flags = 0;
426 if (sock->flags & WSA_FLAG_OVERLAPPED) *flags |= FD_FLAG_OVERLAPPED;
427 if ( !(sock->state & FD_READ ) ) *flags |= FD_FLAG_RECV_SHUTDOWN;
428 if ( !(sock->state & FD_WRITE ) ) *flags |= FD_FLAG_SEND_SHUTDOWN;
429 return FD_TYPE_SOCKET;
432 static void sock_queue_async(struct object *obj, void *ptr, unsigned int status, int type, int count)
434 struct sock *sock = (struct sock *)obj;
435 struct async_queue *q;
436 struct async *async;
437 int pollev;
439 assert( obj->ops == &sock_ops );
441 if ( !(sock->flags & WSA_FLAG_OVERLAPPED) )
443 set_error ( STATUS_INVALID_HANDLE );
444 return;
447 switch( type )
449 case ASYNC_TYPE_READ:
450 q = &sock->read_q;
451 sock->hmask &= ~FD_CLOSE;
452 break;
453 case ASYNC_TYPE_WRITE:
454 q = &sock->write_q;
455 break;
456 default:
457 set_error( STATUS_INVALID_PARAMETER );
458 return;
461 async = find_async ( q, current, ptr );
463 if ( status == STATUS_PENDING )
465 if ( ( !( sock->state & FD_READ ) && type == ASYNC_TYPE_READ ) ||
466 ( !( sock->state & FD_WRITE ) && type == ASYNC_TYPE_WRITE ) )
468 set_error ( STATUS_PIPE_DISCONNECTED );
469 if ( async ) destroy_async ( async );
471 else
473 if ( !async )
474 async = create_async ( obj, current, ptr );
475 if ( !async )
476 return;
478 async->status = STATUS_PENDING;
479 if ( !async->q )
480 async_insert ( q, async );
483 else if ( async ) destroy_async ( async );
484 else set_error ( STATUS_INVALID_PARAMETER );
486 pollev = sock_reselect ( sock );
487 if ( pollev ) sock_try_event ( sock, pollev );
490 static void sock_destroy( struct object *obj )
492 struct sock *sock = (struct sock *)obj;
493 assert( obj->ops == &sock_ops );
495 /* FIXME: special socket shutdown stuff? */
497 if ( sock->deferred )
498 release_object ( sock->deferred );
500 if ( sock->flags & WSA_FLAG_OVERLAPPED )
502 destroy_async_queue ( &sock->read_q );
503 destroy_async_queue ( &sock->write_q );
505 if (sock->event) release_object( sock->event );
508 /* create a new and unconnected socket */
509 static struct object *create_socket( int family, int type, int protocol, unsigned int flags )
511 struct sock *sock;
512 int sockfd;
514 sockfd = socket( family, type, protocol );
515 if (debug_level)
516 fprintf(stderr,"socket(%d,%d,%d)=%d\n",family,type,protocol,sockfd);
517 if (sockfd == -1) {
518 sock_set_error();
519 return NULL;
521 fcntl(sockfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
522 if (!(sock = alloc_object( &sock_ops, -1 ))) return NULL;
523 sock->obj.fd = sockfd;
524 sock->state = (type != SOCK_STREAM) ? (FD_READ|FD_WRITE) : 0;
525 sock->mask = 0;
526 sock->hmask = 0;
527 sock->pmask = 0;
528 sock->flags = flags;
529 sock->event = NULL;
530 sock->window = 0;
531 sock->message = 0;
532 sock->wparam = 0;
533 sock->deferred = NULL;
534 if (sock->flags & WSA_FLAG_OVERLAPPED)
536 init_async_queue (&sock->read_q);
537 init_async_queue (&sock->write_q);
539 sock_reselect( sock );
540 clear_error();
541 return &sock->obj;
544 /* accept a socket (creates a new fd) */
545 static struct sock *accept_socket( obj_handle_t handle )
547 struct sock *acceptsock;
548 struct sock *sock;
549 int acceptfd;
550 struct sockaddr saddr;
551 int slen;
553 sock=(struct sock*)get_handle_obj(current->process,handle,
554 GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops);
555 if (!sock)
556 return NULL;
558 if ( sock->deferred ) {
559 acceptsock = sock->deferred;
560 sock->deferred = NULL;
561 } else {
563 /* Try to accept(2). We can't be safe that this an already connected socket
564 * or that accept() is allowed on it. In those cases we will get -1/errno
565 * return.
567 slen = sizeof(saddr);
568 acceptfd = accept(sock->obj.fd,&saddr,&slen);
569 if (acceptfd==-1) {
570 sock_set_error();
571 release_object( sock );
572 return NULL;
574 if (!(acceptsock = alloc_object( &sock_ops, -1 )))
576 release_object( sock );
577 return NULL;
580 /* newly created socket gets the same properties of the listening socket */
581 fcntl(acceptfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
582 acceptsock->obj.fd = acceptfd;
583 acceptsock->state = FD_WINE_CONNECTED|FD_READ|FD_WRITE;
584 if (sock->state & FD_WINE_NONBLOCKING)
585 acceptsock->state |= FD_WINE_NONBLOCKING;
586 acceptsock->mask = sock->mask;
587 acceptsock->hmask = 0;
588 acceptsock->pmask = 0;
589 acceptsock->event = NULL;
590 acceptsock->window = sock->window;
591 acceptsock->message = sock->message;
592 acceptsock->wparam = 0;
593 if (sock->event) acceptsock->event = (struct event *)grab_object( sock->event );
594 acceptsock->flags = sock->flags;
595 acceptsock->deferred = 0;
596 if ( acceptsock->flags & WSA_FLAG_OVERLAPPED )
598 init_async_queue ( &acceptsock->read_q );
599 init_async_queue ( &acceptsock->write_q );
602 clear_error();
603 sock->pmask &= ~FD_ACCEPT;
604 sock->hmask &= ~FD_ACCEPT;
605 sock_reselect( sock );
606 release_object( sock );
607 return acceptsock;
610 /* set the last error depending on errno */
611 static int sock_get_error( int err )
613 switch (err)
615 case EINTR: return WSAEINTR; break;
616 case EBADF: return WSAEBADF; break;
617 case EPERM:
618 case EACCES: return WSAEACCES; break;
619 case EFAULT: return WSAEFAULT; break;
620 case EINVAL: return WSAEINVAL; break;
621 case EMFILE: return WSAEMFILE; break;
622 case EWOULDBLOCK: return WSAEWOULDBLOCK; break;
623 case EINPROGRESS: return WSAEINPROGRESS; break;
624 case EALREADY: return WSAEALREADY; break;
625 case ENOTSOCK: return WSAENOTSOCK; break;
626 case EDESTADDRREQ: return WSAEDESTADDRREQ; break;
627 case EMSGSIZE: return WSAEMSGSIZE; break;
628 case EPROTOTYPE: return WSAEPROTOTYPE; break;
629 case ENOPROTOOPT: return WSAENOPROTOOPT; break;
630 case EPROTONOSUPPORT: return WSAEPROTONOSUPPORT; break;
631 case ESOCKTNOSUPPORT: return WSAESOCKTNOSUPPORT; break;
632 case EOPNOTSUPP: return WSAEOPNOTSUPP; break;
633 case EPFNOSUPPORT: return WSAEPFNOSUPPORT; break;
634 case EAFNOSUPPORT: return WSAEAFNOSUPPORT; break;
635 case EADDRINUSE: return WSAEADDRINUSE; break;
636 case EADDRNOTAVAIL: return WSAEADDRNOTAVAIL; break;
637 case ENETDOWN: return WSAENETDOWN; break;
638 case ENETUNREACH: return WSAENETUNREACH; break;
639 case ENETRESET: return WSAENETRESET; break;
640 case ECONNABORTED: return WSAECONNABORTED; break;
641 case EPIPE:
642 case ECONNRESET: return WSAECONNRESET; break;
643 case ENOBUFS: return WSAENOBUFS; break;
644 case EISCONN: return WSAEISCONN; break;
645 case ENOTCONN: return WSAENOTCONN; break;
646 case ESHUTDOWN: return WSAESHUTDOWN; break;
647 case ETOOMANYREFS: return WSAETOOMANYREFS; break;
648 case ETIMEDOUT: return WSAETIMEDOUT; break;
649 case ECONNREFUSED: return WSAECONNREFUSED; break;
650 case ELOOP: return WSAELOOP; break;
651 case ENAMETOOLONG: return WSAENAMETOOLONG; break;
652 case EHOSTDOWN: return WSAEHOSTDOWN; break;
653 case EHOSTUNREACH: return WSAEHOSTUNREACH; break;
654 case ENOTEMPTY: return WSAENOTEMPTY; break;
655 #ifdef EPROCLIM
656 case EPROCLIM: return WSAEPROCLIM; break;
657 #endif
658 #ifdef EUSERS
659 case EUSERS: return WSAEUSERS; break;
660 #endif
661 #ifdef EDQUOT
662 case EDQUOT: return WSAEDQUOT; break;
663 #endif
664 #ifdef ESTALE
665 case ESTALE: return WSAESTALE; break;
666 #endif
667 #ifdef EREMOTE
668 case EREMOTE: return WSAEREMOTE; break;
669 #endif
670 default: errno=err; perror("sock_set_error"); return ERROR_UNKNOWN; break;
674 /* set the last error depending on errno */
675 static void sock_set_error(void)
677 set_error( sock_get_error( errno ) );
680 /* create a socket */
681 DECL_HANDLER(create_socket)
683 struct object *obj;
685 reply->handle = 0;
686 if ((obj = create_socket( req->family, req->type, req->protocol, req->flags )) != NULL)
688 reply->handle = alloc_handle( current->process, obj, req->access, req->inherit );
689 release_object( obj );
693 /* accept a socket */
694 DECL_HANDLER(accept_socket)
696 struct sock *sock;
698 reply->handle = 0;
699 if ((sock = accept_socket( req->lhandle )) != NULL)
701 reply->handle = alloc_handle( current->process, &sock->obj, req->access, req->inherit );
702 sock->wparam = reply->handle; /* wparam for message is the socket handle */
703 sock_reselect( sock );
704 release_object( &sock->obj );
708 /* set socket event parameters */
709 DECL_HANDLER(set_socket_event)
711 struct sock *sock;
712 struct event *old_event;
713 int pollev;
715 if (!(sock = (struct sock*)get_handle_obj( current->process, req->handle,
716 GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE, &sock_ops)))
717 return;
718 old_event = sock->event;
719 sock->mask = req->mask;
720 sock->event = NULL;
721 sock->window = req->window;
722 sock->message = req->msg;
723 sock->wparam = req->handle; /* wparam is the socket handle */
724 if (req->event) sock->event = get_event_obj( current->process, req->event, EVENT_MODIFY_STATE );
726 if (debug_level && sock->event) fprintf(stderr, "event ptr: %p\n", sock->event);
728 pollev = sock_reselect( sock );
729 if ( pollev ) sock_try_event ( sock, pollev );
731 if (sock->mask)
732 sock->state |= FD_WINE_NONBLOCKING;
734 /* if a network event is pending, signal the event object
735 it is possible that FD_CONNECT or FD_ACCEPT network events has happened
736 before a WSAEventSelect() was done on it.
737 (when dealing with Asynchronous socket) */
738 if (sock->pmask & sock->mask) sock_wake_up( sock, pollev );
740 if (old_event) release_object( old_event ); /* we're through with it */
741 release_object( &sock->obj );
744 /* get socket event parameters */
745 DECL_HANDLER(get_socket_event)
747 struct sock *sock;
749 sock=(struct sock*)get_handle_obj(current->process,req->handle,GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops);
750 if (!sock)
752 reply->mask = 0;
753 reply->pmask = 0;
754 reply->state = 0;
755 set_error( WSAENOTSOCK );
756 return;
758 reply->mask = sock->mask;
759 reply->pmask = sock->pmask;
760 reply->state = sock->state;
761 set_reply_data( sock->errors, min( get_reply_max_size(), sizeof(sock->errors) ));
763 if (req->service)
765 if (req->c_event)
767 struct event *cevent = get_event_obj( current->process, req->c_event,
768 EVENT_MODIFY_STATE );
769 if (cevent)
771 reset_event( cevent );
772 release_object( cevent );
775 sock->pmask = 0;
776 sock_reselect( sock );
778 release_object( &sock->obj );
781 /* re-enable pending socket events */
782 DECL_HANDLER(enable_socket_event)
784 struct sock *sock;
785 int pollev;
787 if (!(sock = (struct sock*)get_handle_obj( current->process, req->handle,
788 GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE, &sock_ops)))
789 return;
791 sock->pmask &= ~req->mask; /* is this safe? */
792 sock->hmask &= ~req->mask;
793 if ( req->mask & FD_READ )
794 sock->hmask &= ~FD_CLOSE;
795 sock->state |= req->sstate;
796 sock->state &= ~req->cstate;
798 pollev = sock_reselect( sock );
799 if ( pollev ) sock_try_event ( sock, pollev );
801 release_object( &sock->obj );
804 DECL_HANDLER(set_socket_deferred)
806 struct sock *sock, *acceptsock;
808 sock=(struct sock*)get_handle_obj( current->process,req->handle,
809 GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops );
810 if ( !sock )
812 set_error ( WSAENOTSOCK );
813 return;
815 acceptsock = (struct sock*)get_handle_obj( current->process,req->deferred,
816 GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops );
817 if ( !acceptsock )
819 release_object ( sock );
820 set_error ( WSAENOTSOCK );
821 return;
823 sock->deferred = acceptsock;
824 release_object ( sock );