Added Korean resources.
[wine.git] / server / sock.c
blobd8d6c5b10978958ec8b4f7028223538cdef26dd6
1 /*
2 * Server-side socket management
4 * Copyright (C) 1999 Marcus Meissner, Ove Kåven
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 * FIXME: we use read|write access in all cases. Shouldn't we depend that
21 * on the access of the current handle?
24 #include "config.h"
26 #include <assert.h>
27 #include <fcntl.h>
28 #include <stdio.h>
29 #include <string.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #ifdef HAVE_SYS_ERRNO_H
33 # include <sys/errno.h>
34 #endif
35 #include <sys/time.h>
36 #include <sys/types.h>
37 #ifdef HAVE_SYS_SOCKET_H
38 # include <sys/socket.h>
39 #endif
40 #include <sys/ioctl.h>
41 #ifdef HAVE_SYS_FILIO_H
42 # include <sys/filio.h>
43 #endif
44 #include <time.h>
45 #include <unistd.h>
47 #include "winerror.h"
48 #include "winbase.h"
49 #include "process.h"
50 #include "handle.h"
51 #include "thread.h"
52 #include "request.h"
53 #include "user.h"
54 #include "async.h"
56 /* To avoid conflicts with the Unix socket headers. Plus we only need a few
57 * macros anyway.
59 #define USE_WS_PREFIX
60 #include "winsock2.h"
62 struct sock
64 struct object obj; /* object header */
65 unsigned int state; /* status bits */
66 unsigned int mask; /* event mask */
67 unsigned int hmask; /* held (blocked) events */
68 unsigned int pmask; /* pending events */
69 unsigned int flags; /* socket flags */
70 struct event *event; /* event object */
71 user_handle_t window; /* window to send the message to */
72 unsigned int message; /* message to send */
73 unsigned int wparam; /* message wparam (socket handle) */
74 int errors[FD_MAX_EVENTS]; /* event errors */
75 struct sock* deferred; /* socket that waits for a deferred accept */
76 struct async_queue read_q; /* Queue for asynchronous reads */
77 struct async_queue write_q; /* Queue for asynchronous writes */
80 static void sock_dump( struct object *obj, int verbose );
81 static int sock_signaled( struct object *obj, struct thread *thread );
82 static int sock_get_poll_events( struct object *obj );
83 static void sock_poll_event( struct object *obj, int event );
84 static int sock_get_fd( struct object *obj );
85 static int sock_get_info( struct object *obj, struct get_file_info_reply *reply, int *flags );
86 static void sock_destroy( struct object *obj );
87 static int sock_get_error( int err );
88 static void sock_set_error(void);
89 static void sock_queue_async(struct object *obj, void *ptr, unsigned int status, int type, int count);
91 static const struct object_ops sock_ops =
93 sizeof(struct sock), /* size */
94 sock_dump, /* dump */
95 add_queue, /* add_queue */
96 remove_queue, /* remove_queue */
97 sock_signaled, /* signaled */
98 no_satisfied, /* satisfied */
99 sock_get_poll_events, /* get_poll_events */
100 sock_poll_event, /* poll_event */
101 sock_get_fd, /* get_fd */
102 no_flush, /* flush */
103 sock_get_info, /* get_file_info */
104 sock_queue_async, /* queue_async */
105 sock_destroy /* destroy */
109 /* Permutation of 0..FD_MAX_EVENTS - 1 representing the order in which
110 * we post messages if there are multiple events. Used to send
111 * messages. The problem is if there is both a FD_CONNECT event and,
112 * say, an FD_READ event available on the same socket, we want to
113 * notify the app of the connect event first. Otherwise it may
114 * discard the read event because it thinks it hasn't connected yet.
116 static const int event_bitorder[FD_MAX_EVENTS] =
118 FD_CONNECT_BIT,
119 FD_ACCEPT_BIT,
120 FD_OOB_BIT,
121 FD_WRITE_BIT,
122 FD_READ_BIT,
123 FD_CLOSE_BIT,
124 6, 7, 8, 9 /* leftovers */
128 static int sock_reselect( struct sock *sock )
130 int ev = sock_get_poll_events( &sock->obj );
132 if (debug_level)
133 fprintf(stderr,"sock_reselect(%d): new mask %x\n", sock->obj.fd, ev);
135 if (sock->obj.select == -1) {
136 /* previously unconnected socket, is this reselect supposed to connect it? */
137 if (!(sock->state & ~FD_WINE_NONBLOCKING)) return 0;
138 /* ok, it is, attach it to the wineserver's main poll loop */
139 add_select_user( &sock->obj );
141 /* update condition mask */
142 set_select_events( &sock->obj, ev );
143 return ev;
146 /* After POLLHUP is received, the socket will no longer be in the main select loop.
147 This function is used to signal pending events nevertheless */
148 static void sock_try_event ( struct sock *sock, int event )
150 struct pollfd pfd;
152 pfd.fd = sock->obj.fd;
153 pfd.events = event;
154 pfd.revents = 0;
155 poll (&pfd, 1, 0);
157 if ( pfd.revents )
159 if ( debug_level ) fprintf ( stderr, "sock_try_event: %x\n", pfd.revents );
160 sock_poll_event ( &sock->obj, pfd.revents );
164 /* wake anybody waiting on the socket event or send the associated message */
165 static void sock_wake_up( struct sock *sock, int pollev )
167 unsigned int events = sock->pmask & sock->mask;
168 int i;
169 int async_active = 0;
171 if ( sock->flags & FD_FLAG_OVERLAPPED )
173 if( pollev & (POLLIN|POLLPRI) && IS_READY( sock->read_q ) )
175 if (debug_level) fprintf ( stderr, "activating read queue for socket %p\n", sock );
176 async_notify( sock->read_q.head, STATUS_ALERTED );
177 async_active = 1;
179 if( pollev & POLLOUT && IS_READY( sock->write_q ) )
181 if (debug_level) fprintf ( stderr, "activating write queue for socket %p\n", sock );
182 async_notify( sock->write_q.head, STATUS_ALERTED );
183 async_active = 1;
187 /* Do not signal events if there are still pending asynchronous IO requests */
188 /* We need this to delay FD_CLOSE events until all pending overlapped requests are processed */
189 if ( !events || async_active ) return;
191 if (sock->event)
193 if (debug_level) fprintf(stderr, "signalling events %x ptr %p\n", events, sock->event );
194 set_event( sock->event );
196 if (sock->window)
198 if (debug_level) fprintf(stderr, "signalling events %x win %x\n", events, sock->window );
199 for (i = 0; i < FD_MAX_EVENTS; i++)
201 int event = event_bitorder[i];
202 if (sock->pmask & (1 << event))
204 unsigned int lparam = (1 << event) | (sock->errors[event] << 16);
205 post_message( sock->window, sock->message, sock->wparam, lparam );
208 sock->pmask = 0;
209 sock_reselect( sock );
213 inline static int sock_error(int s)
215 unsigned int optval = 0, optlen;
217 optlen = sizeof(optval);
218 getsockopt(s, SOL_SOCKET, SO_ERROR, (void *) &optval, &optlen);
219 return optval ? sock_get_error(optval) : 0;
222 static void sock_poll_event( struct object *obj, int event )
224 struct sock *sock = (struct sock *)obj;
225 int empty_recv = 0;
227 assert( sock->obj.ops == &sock_ops );
228 if (debug_level)
229 fprintf(stderr, "socket %d select event: %x\n", sock->obj.fd, event);
230 if (sock->state & FD_CONNECT)
232 /* connecting */
233 if (event & POLLOUT)
235 /* we got connected */
236 sock->state |= FD_WINE_CONNECTED|FD_READ|FD_WRITE;
237 sock->state &= ~FD_CONNECT;
238 sock->pmask |= FD_CONNECT;
239 sock->errors[FD_CONNECT_BIT] = 0;
240 if (debug_level)
241 fprintf(stderr, "socket %d connection success\n", sock->obj.fd);
243 else if (event & (POLLERR|POLLHUP))
245 /* we didn't get connected? */
246 sock->state &= ~FD_CONNECT;
247 sock->pmask |= FD_CONNECT;
248 sock->errors[FD_CONNECT_BIT] = sock_error( sock->obj.fd );
249 if (debug_level)
250 fprintf(stderr, "socket %d connection failure\n", sock->obj.fd);
252 } else
253 if (sock->state & FD_WINE_LISTENING)
255 /* listening */
256 if (event & POLLIN)
258 /* incoming connection */
259 sock->pmask |= FD_ACCEPT;
260 sock->errors[FD_ACCEPT_BIT] = 0;
261 sock->hmask |= FD_ACCEPT;
263 else if (event & (POLLERR|POLLHUP))
265 /* failed incoming connection? */
266 sock->pmask |= FD_ACCEPT;
267 sock->errors[FD_ACCEPT_BIT] = sock_error( sock->obj.fd );
268 sock->hmask |= FD_ACCEPT;
270 } else
272 /* normal data flow */
273 if (event & POLLIN)
275 char dummy;
276 int nr;
278 /* Linux 2.4 doesn't report POLLHUP if only one side of the socket
279 * has been closed, so we need to check for it explicitly here */
280 nr = recv( sock->obj.fd, &dummy, 1, MSG_PEEK );
281 if ( nr > 0 )
283 /* incoming data */
284 sock->pmask |= FD_READ;
285 sock->hmask |= (FD_READ|FD_CLOSE);
286 sock->errors[FD_READ_BIT] = 0;
287 if (debug_level)
288 fprintf(stderr, "socket %d is readable\n", sock->obj.fd );
290 else if ( nr == 0 )
291 empty_recv = 1;
292 else
294 /* EAGAIN can happen if an async recv() falls between the server's poll()
295 call and the invocation of this routine */
296 if ( errno == EAGAIN )
297 event &= ~POLLIN;
298 else
300 if ( debug_level )
301 fprintf ( stderr, "recv error on socket %d: %d\n", sock->obj.fd, errno );
302 event = POLLERR;
307 if (event & POLLOUT)
309 sock->pmask |= FD_WRITE;
310 sock->hmask |= FD_WRITE;
311 sock->errors[FD_WRITE_BIT] = 0;
312 if (debug_level)
313 fprintf(stderr, "socket %d is writable\n", sock->obj.fd);
315 if (event & POLLPRI)
317 sock->pmask |= FD_OOB;
318 sock->hmask |= FD_OOB;
319 sock->errors[FD_OOB_BIT] = 0;
320 if (debug_level)
321 fprintf(stderr, "socket %d got OOB data\n", sock->obj.fd);
323 /* According to WS2 specs, FD_CLOSE is only delivered when there is
324 no more data to be read (i.e. empty_recv = 1) */
325 else if ( empty_recv && (sock->state & (FD_READ|FD_WRITE) ))
327 sock->errors[FD_CLOSE_BIT] = sock_error( sock->obj.fd );
328 if ( event & ( POLLERR|POLLHUP ) )
329 sock->state &= ~(FD_WINE_CONNECTED|FD_WRITE);
330 sock->pmask |= FD_CLOSE;
331 sock->hmask |= FD_CLOSE;
332 if (debug_level)
333 fprintf(stderr, "socket %d aborted by error %d, event: %x - removing from select loop\n",
334 sock->obj.fd, sock->errors[FD_CLOSE_BIT], event);
338 if ( sock->pmask & FD_CLOSE || event & (POLLERR|POLLHUP) )
340 if ( debug_level )
341 fprintf ( stderr, "removing socket %d from select loop\n", sock->obj.fd );
342 set_select_events( &sock->obj, -1 );
344 else
345 sock_reselect( sock );
347 /* wake up anyone waiting for whatever just happened */
348 if ( sock->pmask & sock->mask || sock->flags & FD_FLAG_OVERLAPPED ) sock_wake_up( sock, event );
350 /* if anyone is stupid enough to wait on the socket object itself,
351 * maybe we should wake them up too, just in case? */
352 wake_up( &sock->obj, 0 );
355 static void sock_dump( struct object *obj, int verbose )
357 struct sock *sock = (struct sock *)obj;
358 assert( obj->ops == &sock_ops );
359 printf( "Socket fd=%d, state=%x, mask=%x, pending=%x, held=%x\n",
360 sock->obj.fd, sock->state,
361 sock->mask, sock->pmask, sock->hmask );
364 static int sock_signaled( struct object *obj, struct thread *thread )
366 struct sock *sock = (struct sock *)obj;
367 assert( obj->ops == &sock_ops );
369 return check_select_events( sock->obj.fd, sock_get_poll_events( &sock->obj ) );
372 static int sock_get_poll_events( struct object *obj )
374 struct sock *sock = (struct sock *)obj;
375 unsigned int mask = sock->mask & sock->state & ~sock->hmask;
376 int ev = 0;
378 assert( obj->ops == &sock_ops );
380 if (sock->state & FD_CONNECT)
381 /* connecting, wait for writable */
382 return POLLOUT;
383 if (sock->state & FD_WINE_LISTENING)
384 /* listening, wait for readable */
385 return (sock->hmask & FD_ACCEPT) ? 0 : POLLIN;
387 if (mask & (FD_READ) || (sock->flags & WSA_FLAG_OVERLAPPED && IS_READY (sock->read_q)))
388 ev |= POLLIN | POLLPRI;
389 if (mask & FD_WRITE || (sock->flags & WSA_FLAG_OVERLAPPED && IS_READY (sock->write_q)))
390 ev |= POLLOUT;
391 /* We use POLLIN with 0 bytes recv() as FD_CLOSE indication. */
392 if (sock->mask & ~sock->hmask & FD_CLOSE)
393 ev |= POLLIN;
395 return ev;
398 static int sock_get_fd( struct object *obj )
400 struct sock *sock = (struct sock *)obj;
401 assert( obj->ops == &sock_ops );
402 return sock->obj.fd;
405 static int sock_get_info( struct object *obj, struct get_file_info_reply *reply, int *flags )
407 struct sock *sock = (struct sock*) obj;
408 assert ( obj->ops == &sock_ops );
410 if (reply)
412 reply->type = FILE_TYPE_PIPE;
413 reply->attr = 0;
414 reply->access_time = 0;
415 reply->write_time = 0;
416 reply->size_high = 0;
417 reply->size_low = 0;
418 reply->links = 0;
419 reply->index_high = 0;
420 reply->index_low = 0;
421 reply->serial = 0;
423 *flags = 0;
424 if (sock->flags & WSA_FLAG_OVERLAPPED) *flags |= FD_FLAG_OVERLAPPED;
425 if ( !(sock->state & FD_READ ) ) *flags |= FD_FLAG_RECV_SHUTDOWN;
426 if ( !(sock->state & FD_WRITE ) ) *flags |= FD_FLAG_SEND_SHUTDOWN;
427 return FD_TYPE_SOCKET;
430 static void sock_queue_async(struct object *obj, void *ptr, unsigned int status, int type, int count)
432 struct sock *sock = (struct sock *)obj;
433 struct async_queue *q;
434 struct async *async;
435 int pollev;
437 assert( obj->ops == &sock_ops );
439 if ( !(sock->flags & WSA_FLAG_OVERLAPPED) )
441 set_error ( STATUS_INVALID_HANDLE );
442 return;
445 switch( type )
447 case ASYNC_TYPE_READ:
448 q = &sock->read_q;
449 sock->hmask &= ~FD_CLOSE;
450 break;
451 case ASYNC_TYPE_WRITE:
452 q = &sock->write_q;
453 break;
454 default:
455 set_error( STATUS_INVALID_PARAMETER );
456 return;
459 async = find_async ( q, current, ptr );
461 if ( status == STATUS_PENDING )
463 if ( ( !( sock->state & FD_READ ) && type == ASYNC_TYPE_READ ) ||
464 ( !( sock->state & FD_WRITE ) && type == ASYNC_TYPE_WRITE ) )
466 set_error ( STATUS_PIPE_DISCONNECTED );
467 if ( async ) destroy_async ( async );
469 else
471 if ( !async )
472 async = create_async ( obj, current, ptr );
473 if ( !async )
474 return;
476 async->status = STATUS_PENDING;
477 if ( !async->q )
478 async_insert ( q, async );
481 else if ( async ) destroy_async ( async );
482 else set_error ( STATUS_INVALID_PARAMETER );
484 pollev = sock_reselect ( sock );
485 if ( pollev ) sock_try_event ( sock, pollev );
488 static void sock_destroy( struct object *obj )
490 struct sock *sock = (struct sock *)obj;
491 assert( obj->ops == &sock_ops );
493 /* FIXME: special socket shutdown stuff? */
495 if ( sock->deferred )
496 release_object ( sock->deferred );
498 if ( sock->flags & WSA_FLAG_OVERLAPPED )
500 destroy_async_queue ( &sock->read_q );
501 destroy_async_queue ( &sock->write_q );
503 if (sock->event) release_object( sock->event );
506 /* create a new and unconnected socket */
507 static struct object *create_socket( int family, int type, int protocol, unsigned int flags )
509 struct sock *sock;
510 int sockfd;
512 sockfd = socket( family, type, protocol );
513 if (debug_level)
514 fprintf(stderr,"socket(%d,%d,%d)=%d\n",family,type,protocol,sockfd);
515 if (sockfd == -1) {
516 sock_set_error();
517 return NULL;
519 fcntl(sockfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
520 if (!(sock = alloc_object( &sock_ops, -1 ))) return NULL;
521 sock->obj.fd = sockfd;
522 sock->state = (type != SOCK_STREAM) ? (FD_READ|FD_WRITE) : 0;
523 sock->mask = 0;
524 sock->hmask = 0;
525 sock->pmask = 0;
526 sock->flags = flags;
527 sock->event = NULL;
528 sock->window = 0;
529 sock->message = 0;
530 sock->wparam = 0;
531 sock->deferred = NULL;
532 if (sock->flags & WSA_FLAG_OVERLAPPED)
534 init_async_queue (&sock->read_q);
535 init_async_queue (&sock->write_q);
537 sock_reselect( sock );
538 clear_error();
539 return &sock->obj;
542 /* accept a socket (creates a new fd) */
543 static struct sock *accept_socket( obj_handle_t handle )
545 struct sock *acceptsock;
546 struct sock *sock;
547 int acceptfd;
548 struct sockaddr saddr;
549 int slen;
551 sock=(struct sock*)get_handle_obj(current->process,handle,
552 GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops);
553 if (!sock)
554 return NULL;
556 if ( sock->deferred ) {
557 acceptsock = sock->deferred;
558 sock->deferred = NULL;
559 } else {
561 /* Try to accept(2). We can't be safe that this an already connected socket
562 * or that accept() is allowed on it. In those cases we will get -1/errno
563 * return.
565 slen = sizeof(saddr);
566 acceptfd = accept(sock->obj.fd,&saddr,&slen);
567 if (acceptfd==-1) {
568 sock_set_error();
569 release_object( sock );
570 return NULL;
572 if (!(acceptsock = alloc_object( &sock_ops, -1 )))
574 release_object( sock );
575 return NULL;
578 /* newly created socket gets the same properties of the listening socket */
579 fcntl(acceptfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
580 acceptsock->obj.fd = acceptfd;
581 acceptsock->state = FD_WINE_CONNECTED|FD_READ|FD_WRITE;
582 if (sock->state & FD_WINE_NONBLOCKING)
583 acceptsock->state |= FD_WINE_NONBLOCKING;
584 acceptsock->mask = sock->mask;
585 acceptsock->hmask = 0;
586 acceptsock->pmask = 0;
587 acceptsock->event = NULL;
588 acceptsock->window = sock->window;
589 acceptsock->message = sock->message;
590 acceptsock->wparam = 0;
591 if (sock->event) acceptsock->event = (struct event *)grab_object( sock->event );
592 acceptsock->flags = sock->flags;
593 acceptsock->deferred = 0;
594 if ( acceptsock->flags & WSA_FLAG_OVERLAPPED )
596 init_async_queue ( &acceptsock->read_q );
597 init_async_queue ( &acceptsock->write_q );
600 clear_error();
601 sock->pmask &= ~FD_ACCEPT;
602 sock->hmask &= ~FD_ACCEPT;
603 sock_reselect( sock );
604 release_object( sock );
605 return acceptsock;
608 /* set the last error depending on errno */
609 static int sock_get_error( int err )
611 switch (err)
613 case EINTR: return WSAEINTR; break;
614 case EBADF: return WSAEBADF; break;
615 case EPERM:
616 case EACCES: return WSAEACCES; break;
617 case EFAULT: return WSAEFAULT; break;
618 case EINVAL: return WSAEINVAL; break;
619 case EMFILE: return WSAEMFILE; break;
620 case EWOULDBLOCK: return WSAEWOULDBLOCK; break;
621 case EINPROGRESS: return WSAEINPROGRESS; break;
622 case EALREADY: return WSAEALREADY; break;
623 case ENOTSOCK: return WSAENOTSOCK; break;
624 case EDESTADDRREQ: return WSAEDESTADDRREQ; break;
625 case EMSGSIZE: return WSAEMSGSIZE; break;
626 case EPROTOTYPE: return WSAEPROTOTYPE; break;
627 case ENOPROTOOPT: return WSAENOPROTOOPT; break;
628 case EPROTONOSUPPORT: return WSAEPROTONOSUPPORT; break;
629 case ESOCKTNOSUPPORT: return WSAESOCKTNOSUPPORT; break;
630 case EOPNOTSUPP: return WSAEOPNOTSUPP; break;
631 case EPFNOSUPPORT: return WSAEPFNOSUPPORT; break;
632 case EAFNOSUPPORT: return WSAEAFNOSUPPORT; break;
633 case EADDRINUSE: return WSAEADDRINUSE; break;
634 case EADDRNOTAVAIL: return WSAEADDRNOTAVAIL; break;
635 case ENETDOWN: return WSAENETDOWN; break;
636 case ENETUNREACH: return WSAENETUNREACH; break;
637 case ENETRESET: return WSAENETRESET; break;
638 case ECONNABORTED: return WSAECONNABORTED; break;
639 case EPIPE:
640 case ECONNRESET: return WSAECONNRESET; break;
641 case ENOBUFS: return WSAENOBUFS; break;
642 case EISCONN: return WSAEISCONN; break;
643 case ENOTCONN: return WSAENOTCONN; break;
644 case ESHUTDOWN: return WSAESHUTDOWN; break;
645 case ETOOMANYREFS: return WSAETOOMANYREFS; break;
646 case ETIMEDOUT: return WSAETIMEDOUT; break;
647 case ECONNREFUSED: return WSAECONNREFUSED; break;
648 case ELOOP: return WSAELOOP; break;
649 case ENAMETOOLONG: return WSAENAMETOOLONG; break;
650 case EHOSTDOWN: return WSAEHOSTDOWN; break;
651 case EHOSTUNREACH: return WSAEHOSTUNREACH; break;
652 case ENOTEMPTY: return WSAENOTEMPTY; break;
653 #ifdef EPROCLIM
654 case EPROCLIM: return WSAEPROCLIM; break;
655 #endif
656 #ifdef EUSERS
657 case EUSERS: return WSAEUSERS; break;
658 #endif
659 #ifdef EDQUOT
660 case EDQUOT: return WSAEDQUOT; break;
661 #endif
662 #ifdef ESTALE
663 case ESTALE: return WSAESTALE; break;
664 #endif
665 #ifdef EREMOTE
666 case EREMOTE: return WSAEREMOTE; break;
667 #endif
668 default: errno=err; perror("sock_set_error"); return ERROR_UNKNOWN; break;
672 /* set the last error depending on errno */
673 static void sock_set_error(void)
675 set_error( sock_get_error( errno ) );
678 /* create a socket */
679 DECL_HANDLER(create_socket)
681 struct object *obj;
683 reply->handle = 0;
684 if ((obj = create_socket( req->family, req->type, req->protocol, req->flags )) != NULL)
686 reply->handle = alloc_handle( current->process, obj, req->access, req->inherit );
687 release_object( obj );
691 /* accept a socket */
692 DECL_HANDLER(accept_socket)
694 struct sock *sock;
696 reply->handle = 0;
697 if ((sock = accept_socket( req->lhandle )) != NULL)
699 reply->handle = alloc_handle( current->process, &sock->obj, req->access, req->inherit );
700 sock->wparam = reply->handle; /* wparam for message is the socket handle */
701 sock_reselect( sock );
702 release_object( &sock->obj );
706 /* set socket event parameters */
707 DECL_HANDLER(set_socket_event)
709 struct sock *sock;
710 struct event *old_event;
711 int pollev;
713 if (!(sock = (struct sock*)get_handle_obj( current->process, req->handle,
714 GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE, &sock_ops)))
715 return;
716 old_event = sock->event;
717 sock->mask = req->mask;
718 sock->event = NULL;
719 sock->window = req->window;
720 sock->message = req->msg;
721 sock->wparam = req->handle; /* wparam is the socket handle */
722 if (req->event) sock->event = get_event_obj( current->process, req->event, EVENT_MODIFY_STATE );
724 if (debug_level && sock->event) fprintf(stderr, "event ptr: %p\n", sock->event);
726 pollev = sock_reselect( sock );
727 if ( pollev ) sock_try_event ( sock, pollev );
729 if (sock->mask)
730 sock->state |= FD_WINE_NONBLOCKING;
732 /* if a network event is pending, signal the event object
733 it is possible that FD_CONNECT or FD_ACCEPT network events has happened
734 before a WSAEventSelect() was done on it.
735 (when dealing with Asynchronous socket) */
736 if (sock->pmask & sock->mask) sock_wake_up( sock, pollev );
738 if (old_event) release_object( old_event ); /* we're through with it */
739 release_object( &sock->obj );
742 /* get socket event parameters */
743 DECL_HANDLER(get_socket_event)
745 struct sock *sock;
747 sock=(struct sock*)get_handle_obj(current->process,req->handle,GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops);
748 if (!sock)
750 reply->mask = 0;
751 reply->pmask = 0;
752 reply->state = 0;
753 set_error( WSAENOTSOCK );
754 return;
756 reply->mask = sock->mask;
757 reply->pmask = sock->pmask;
758 reply->state = sock->state;
759 set_reply_data( sock->errors, min( get_reply_max_size(), sizeof(sock->errors) ));
761 if (req->service)
763 if (req->c_event)
765 struct event *cevent = get_event_obj( current->process, req->c_event,
766 EVENT_MODIFY_STATE );
767 if (cevent)
769 reset_event( cevent );
770 release_object( cevent );
773 sock->pmask = 0;
774 sock_reselect( sock );
776 release_object( &sock->obj );
779 /* re-enable pending socket events */
780 DECL_HANDLER(enable_socket_event)
782 struct sock *sock;
783 int pollev;
785 if (!(sock = (struct sock*)get_handle_obj( current->process, req->handle,
786 GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE, &sock_ops)))
787 return;
789 sock->pmask &= ~req->mask; /* is this safe? */
790 sock->hmask &= ~req->mask;
791 if ( req->mask & FD_READ )
792 sock->hmask &= ~FD_CLOSE;
793 sock->state |= req->sstate;
794 sock->state &= ~req->cstate;
796 pollev = sock_reselect( sock );
797 if ( pollev ) sock_try_event ( sock, pollev );
799 release_object( &sock->obj );
802 DECL_HANDLER(set_socket_deferred)
804 struct sock *sock, *acceptsock;
806 sock=(struct sock*)get_handle_obj( current->process,req->handle,
807 GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops );
808 if ( !sock )
810 set_error ( WSAENOTSOCK );
811 return;
813 acceptsock = (struct sock*)get_handle_obj( current->process,req->deferred,
814 GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops );
815 if ( !acceptsock )
817 release_object ( sock );
818 set_error ( WSAENOTSOCK );
819 return;
821 sock->deferred = acceptsock;
822 release_object ( sock );