2 * RPCSS named pipe server
4 * Copyright (C) 2002 Greg Turner
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
24 #include "wine/debug.h"
26 WINE_DEFAULT_DEBUG_CHANNEL(ole
);
28 static HANDLE np_server_end
;
29 static HANDLE np_server_work_event
;
30 static CRITICAL_SECTION np_server_cs
;
31 static LONG srv_thread_count
;
32 static BOOL server_live
;
34 LONG
RPCSS_SrvThreadCount(void)
36 return srv_thread_count
;
39 BOOL
RPCSS_UnBecomePipeServer(void)
43 HANDLE master_mutex
= RPCSS_GetMasterMutex();
47 wait_result
= WaitForSingleObject(master_mutex
, MASTER_MUTEX_TIMEOUT
);
49 switch (wait_result
) {
50 case WAIT_ABANDONED
: /* ? */
52 /* we have ownership */
57 WINE_ERR("This should never happen: couldn't enter mutex.\n");
58 /* this is totally unacceptable. no graceful out exists */
62 /* now that we have the master mutex, we can safely stop
63 listening on the pipe. Before we proceed, we do a final
64 check that it's OK to shut down to ensure atomicity */
66 if (!RPCSS_ReadyToDie())
69 WINE_TRACE("shutting down pipe.\n");
71 if (!CloseHandle(np_server_end
))
72 WINE_WARN("Failed to close named pipe.\n");
73 if (!CloseHandle(np_server_work_event
))
74 WINE_WARN("Failed to close the event handle.\n");
75 DeleteCriticalSection(&np_server_cs
);
78 if (!ReleaseMutex(master_mutex
))
79 WINE_ERR("Unable to leave master mutex!??\n");
84 static void RPCSS_ServerProcessRANMessage(PRPCSS_NP_MESSAGE pMsg
, PRPCSS_NP_REPLY pReply
)
87 /* we do absolutely nothing, but on the server end,
88 the lazy timeout is reset as a result of our connection. */
89 RPCSS_SetMaxLazyTimeout(pMsg
->message
.ranmsg
.timeout
);
90 RPCSS_SetLazyTimeRemaining(RPCSS_GetMaxLazyTimeout());
94 static void RPCSS_ServerProcessREGISTEREPMessage(PRPCSS_NP_MESSAGE pMsg
, PRPCSS_NP_REPLY pReply
,
99 RPCSS_RegisterRpcEndpoints(
100 pMsg
->message
.registerepmsg
.iface
,
101 pMsg
->message
.registerepmsg
.object_count
,
102 pMsg
->message
.registerepmsg
.binding_count
,
103 pMsg
->message
.registerepmsg
.no_replace
,
105 pMsg
->vardata_payload_size
112 static void RPCSS_ServerProcessUNREGISTEREPMessage(PRPCSS_NP_MESSAGE pMsg
,
113 PRPCSS_NP_REPLY pReply
, char *vardata
)
117 RPCSS_UnregisterRpcEndpoints(
118 pMsg
->message
.unregisterepmsg
.iface
,
119 pMsg
->message
.unregisterepmsg
.object_count
,
120 pMsg
->message
.unregisterepmsg
.binding_count
,
122 pMsg
->vardata_payload_size
129 static void RPCSS_ServerProcessRESOLVEEPMessage(PRPCSS_NP_MESSAGE pMsg
,
130 PRPCSS_NP_REPLY pReply
, char *vardata
)
134 /* for now, reply is placed into *pReply.as_string, on success, by RPCSS_ResolveRpcEndpoints */
135 ZeroMemory(pReply
->as_string
, MAX_RPCSS_NP_REPLY_STRING_LEN
);
136 RPCSS_ResolveRpcEndpoints(
137 pMsg
->message
.resolveepmsg
.iface
,
138 pMsg
->message
.resolveepmsg
.object
,
144 static void RPCSS_ServerProcessMessage(PRPCSS_NP_MESSAGE pMsg
, PRPCSS_NP_REPLY pReply
, char *vardata
)
147 switch (pMsg
->message_type
) {
148 case RPCSS_NP_MESSAGE_TYPEID_RANMSG
:
149 RPCSS_ServerProcessRANMessage(pMsg
, pReply
);
151 case RPCSS_NP_MESSAGE_TYPEID_REGISTEREPMSG
:
152 RPCSS_ServerProcessREGISTEREPMessage(pMsg
, pReply
, vardata
);
154 case RPCSS_NP_MESSAGE_TYPEID_UNREGISTEREPMSG
:
155 RPCSS_ServerProcessUNREGISTEREPMessage(pMsg
, pReply
, vardata
);
157 case RPCSS_NP_MESSAGE_TYPEID_RESOLVEEPMSG
:
158 RPCSS_ServerProcessRESOLVEEPMessage(pMsg
, pReply
, vardata
);
161 WINE_ERR("Message type unknown!! No action taken.\n");
165 /* each message gets its own thread. this is it. */
166 static VOID
HandlerThread(LPVOID lpvPipeHandle
)
168 RPCSS_NP_MESSAGE msg
, vardata_payload_msg
;
169 char *c
, *vardata
= NULL
;
170 RPCSS_NP_REPLY reply
;
171 DWORD bytesread
, written
;
172 BOOL success
, had_payload
= FALSE
;
175 mypipe
= (HANDLE
) lpvPipeHandle
;
177 WINE_TRACE("mypipe: %p\n", mypipe
);
180 mypipe
, /* pipe handle */
181 (char *) &msg
, /* message buffer */
182 sizeof(RPCSS_NP_MESSAGE
), /* message buffer size */
183 &bytesread
, /* receives number of bytes read */
184 NULL
/* not overlapped */
187 if (msg
.vardata_payload_size
) {
189 /* this fudge space allows us not to worry about exceeding the buffer space
191 vardata
= LocalAlloc(LPTR
, (msg
.vardata_payload_size
) + VARDATA_PAYLOAD_BYTES
);
193 WINE_ERR("vardata memory allocation failure.\n");
196 for ( c
= vardata
; (c
- vardata
) < msg
.vardata_payload_size
;
197 c
+= VARDATA_PAYLOAD_BYTES
) {
200 (char *) &vardata_payload_msg
,
201 sizeof(RPCSS_NP_MESSAGE
),
205 if ( (!success
) || (bytesread
!= sizeof(RPCSS_NP_MESSAGE
)) ||
206 (vardata_payload_msg
.message_type
!= RPCSS_NP_MESSAGE_TYPEID_VARDATAPAYLOADMSG
) ) {
207 WINE_ERR("vardata payload read failure! (s=%s,br=%ld,exp_br=%d,mt=%u,mt_exp=%u\n",
208 success
? "TRUE" : "FALSE", bytesread
, sizeof(RPCSS_NP_MESSAGE
),
209 vardata_payload_msg
.message_type
, RPCSS_NP_MESSAGE_TYPEID_VARDATAPAYLOADMSG
);
213 CopyMemory(c
, vardata_payload_msg
.message
.vardatapayloadmsg
.payload
, VARDATA_PAYLOAD_BYTES
);
214 WINE_TRACE("payload read.\n");
219 if (success
&& (bytesread
== sizeof(RPCSS_NP_MESSAGE
))) {
220 WINE_TRACE("read success.\n");
221 /* process the message and send a reply, serializing requests. */
222 EnterCriticalSection(&np_server_cs
);
223 WINE_TRACE("processing message.\n");
224 RPCSS_ServerProcessMessage(&msg
, &reply
, vardata
);
225 LeaveCriticalSection(&np_server_cs
);
227 if (had_payload
) LocalFree(vardata
);
229 WINE_TRACE("message processed, sending reply....\n");
232 mypipe
, /* pipe handle */
233 (char *) &reply
, /* reply buffer */
234 sizeof(RPCSS_NP_REPLY
), /* reply buffer size */
235 &written
, /* receives number of bytes written */
236 NULL
/* not overlapped */
239 if ( (!success
) || (written
!= sizeof(RPCSS_NP_REPLY
)) )
240 WINE_WARN("Message reply failed. (successs=%s, br=%ld, exp_br=%d)\n",
241 success
? "TRUE" : "FALSE", written
, sizeof(RPCSS_NP_REPLY
));
243 WINE_TRACE("Reply sent successfully.\n");
245 WINE_WARN("Message receipt failed.\n");
247 FlushFileBuffers(mypipe
);
248 DisconnectNamedPipe(mypipe
);
250 InterlockedDecrement(&srv_thread_count
);
253 static VOID
NPMainWorkThread(LPVOID ignored
)
256 HANDLE hthread
, master_mutex
= RPCSS_GetMasterMutex();
257 DWORD threadid
, wait_result
;
261 while (server_live
) {
262 connected
= ConnectNamedPipe(np_server_end
, NULL
) ?
263 TRUE
: (GetLastError() == ERROR_PIPE_CONNECTED
);
266 /* is "work" the act of connecting pipes, or the act of serving
267 requests successfully? for now I will make it the former. */
268 if (!SetEvent(np_server_work_event
))
269 WINE_WARN("failed to signal np_server_work_event.\n");
271 /* Create a thread for this client. */
272 InterlockedIncrement(&srv_thread_count
);
273 hthread
= CreateThread(
274 NULL
, /* no security attribute */
275 0, /* default stack size */
276 (LPTHREAD_START_ROUTINE
) HandlerThread
,
277 (LPVOID
) np_server_end
, /* thread parameter */
278 0, /* not suspended */
279 &threadid
/* returns thread ID (not used) */
283 WINE_TRACE("Spawned handler thread: %p\n", hthread
);
284 CloseHandle(hthread
);
286 /* for safety's sake, hold the mutex while we switch the pipe */
288 wait_result
= WaitForSingleObject(master_mutex
, MASTER_MUTEX_TIMEOUT
);
290 switch (wait_result
) {
291 case WAIT_ABANDONED
: /* ? */
293 /* we have ownership */
299 wait_result
= WAIT_FAILED
;
302 if (wait_result
== WAIT_FAILED
) {
303 WINE_ERR("Couldn't enter master mutex. Expect prolems.\n");
305 /* now create a new named pipe instance to listen on */
306 np_server_end
= CreateNamedPipe(
307 NAME_RPCSS_NAMED_PIPE
, /* pipe name */
308 PIPE_ACCESS_DUPLEX
, /* pipe open mode */
309 PIPE_TYPE_MESSAGE
| PIPE_READMODE_MESSAGE
| PIPE_WAIT
, /* pipe-specific modes */
310 PIPE_UNLIMITED_INSTANCES
, /* maximum instances */
311 sizeof(RPCSS_NP_REPLY
), /* output buffer size */
312 sizeof(RPCSS_NP_MESSAGE
), /* input buffer size */
313 2000, /* time-out interval */
317 if (np_server_end
== INVALID_HANDLE_VALUE
) {
318 WINE_ERR("Failed to recreate named pipe!\n");
319 /* not sure what to do? */
323 if (!ReleaseMutex(master_mutex
))
324 WINE_ERR("Uh oh. Couldn't leave master mutex. Expect deadlock.\n");
327 WINE_ERR("Failed to spawn handler thread!\n");
328 DisconnectNamedPipe(np_server_end
);
329 InterlockedDecrement(&srv_thread_count
);
333 WINE_TRACE("Server thread shutdown.\n");
336 static HANDLE
RPCSS_NPConnect(void)
338 HANDLE the_pipe
= NULL
;
339 DWORD dwmode
, wait_result
;
340 HANDLE master_mutex
= RPCSS_GetMasterMutex();
346 wait_result
= WaitForSingleObject(master_mutex
, MASTER_MUTEX_TIMEOUT
);
347 switch (wait_result
) {
354 WINE_ERR("This should never happen: couldn't enter mutex.\n");
358 /* try to open the client side of the named pipe. */
359 the_pipe
= CreateFileA(
360 NAME_RPCSS_NAMED_PIPE
, /* pipe name */
361 GENERIC_READ
| GENERIC_WRITE
, /* r/w access */
363 NULL
, /* no security attributes */
364 OPEN_EXISTING
, /* open an existing pipe */
365 0, /* default attributes */
366 NULL
/* no template file */
369 if (the_pipe
!= INVALID_HANDLE_VALUE
)
372 if (GetLastError() != ERROR_PIPE_BUSY
) {
373 WINE_WARN("Unable to open named pipe %s (assuming unavailable).\n",
374 wine_dbgstr_a(NAME_RPCSS_NAMED_PIPE
));
379 WINE_WARN("Named pipe busy (will wait)\n");
381 if (!ReleaseMutex(master_mutex
))
382 WINE_ERR("Failed to release master mutex. Expect deadlock.\n");
384 /* wait for the named pipe. We are only
385 willing to wait only 5 seconds. It should be available /very/ soon. */
386 if (! WaitNamedPipeA(NAME_RPCSS_NAMED_PIPE
, MASTER_MUTEX_WAITNAMEDPIPE_TIMEOUT
))
388 WINE_ERR("Named pipe unavailable after waiting. Something is probably wrong.\n");
395 dwmode
= PIPE_READMODE_MESSAGE
;
396 /* SetNamedPipeHandleState not implemented ATM, but still seems to work somehow. */
397 if (! SetNamedPipeHandleState(the_pipe
, &dwmode
, NULL
, NULL
))
398 WINE_WARN("Failed to set pipe handle state\n");
401 if (!ReleaseMutex(master_mutex
))
402 WINE_ERR("Uh oh, failed to leave the RPC Master Mutex!\n");
407 static BOOL
RPCSS_SendReceiveNPMsg(HANDLE np
, PRPCSS_NP_MESSAGE msg
, PRPCSS_NP_REPLY reply
)
411 WINE_TRACE("(np == %p, msg == %p, reply == %p)\n", np
, msg
, reply
);
413 if (! WriteFile(np
, msg
, sizeof(RPCSS_NP_MESSAGE
), &count
, NULL
)) {
414 WINE_ERR("write failed.\n");
418 if (count
!= sizeof(RPCSS_NP_MESSAGE
)) {
419 WINE_ERR("write count mismatch.\n");
423 if (! ReadFile(np
, reply
, sizeof(RPCSS_NP_REPLY
), &count
, NULL
)) {
424 WINE_ERR("read failed.\n");
428 if (count
!= sizeof(RPCSS_NP_REPLY
)) {
429 WINE_ERR("read count mismatch. got %ld, expected %u.\n", count
, sizeof(RPCSS_NP_REPLY
));
433 /* message execution was successful */
437 BOOL
RPCSS_BecomePipeServer(void)
439 RPCSS_NP_MESSAGE msg
;
440 RPCSS_NP_REPLY reply
;
442 HANDLE client_handle
, hthread
, master_mutex
= RPCSS_GetMasterMutex();
443 DWORD threadid
, wait_result
;
447 wait_result
= WaitForSingleObject(master_mutex
, MASTER_MUTEX_TIMEOUT
);
449 switch (wait_result
) {
450 case WAIT_ABANDONED
: /* ? */
452 /* we have ownership */
457 WINE_ERR("Couldn't enter master mutex.\n");
461 /* now we have the master mutex. during this time we will
463 * o check if an rpcss already listens on the pipe. If so,
464 * we will tell it we were invoked, which will cause the
465 * other end to update its timeouts. After, we just return
468 * o otherwise, we establish the pipe for ourselves and get
469 * ready to listen on it
472 if ((client_handle
= RPCSS_NPConnect()) != NULL
) {
473 msg
.message_type
= RPCSS_NP_MESSAGE_TYPEID_RANMSG
;
474 msg
.message
.ranmsg
.timeout
= RPCSS_GetMaxLazyTimeout();
475 msg
.vardata_payload_size
= 0;
476 if (!RPCSS_SendReceiveNPMsg(client_handle
, &msg
, &reply
))
477 WINE_ERR("Something is amiss: RPC_SendReceive failed.\n");
481 np_server_work_event
= CreateEventA(NULL
, FALSE
, FALSE
, "RpcNpServerWorkEvent");
482 if (np_server_work_event
== NULL
) {
483 /* dunno what we can do then */
484 WINE_ERR("Unable to create the np_server_work_event\n");
487 InitializeCriticalSection(&np_server_cs
);
489 np_server_end
= CreateNamedPipe(
490 NAME_RPCSS_NAMED_PIPE
, /* pipe name */
491 PIPE_ACCESS_DUPLEX
, /* pipe open mode */
492 PIPE_TYPE_MESSAGE
| PIPE_READMODE_MESSAGE
| PIPE_WAIT
, /* pipe-specific modes */
493 PIPE_UNLIMITED_INSTANCES
, /* maximum number of instances */
494 sizeof(RPCSS_NP_REPLY
), /* output buffer size */
495 sizeof(RPCSS_NP_MESSAGE
), /* input buffer size */
496 2000, /* time-out interval */
500 if (np_server_end
== INVALID_HANDLE_VALUE
) {
501 WINE_ERR("Failed to create named pipe!\n");
502 DeleteCriticalSection(&np_server_cs
);
503 if (!CloseHandle(np_server_work_event
)) /* we will leak the handle... */
504 WINE_WARN("Failed to close np_server_work_event handle!\n");
505 np_server_work_event
= NULL
;
506 np_server_end
= NULL
;
514 /* OK, now spawn the (single) server thread */
515 hthread
= CreateThread(
516 NULL
, /* no security attribute */
517 0, /* default stack size */
518 (LPTHREAD_START_ROUTINE
) NPMainWorkThread
,
519 NULL
, /* thread parameter */
520 0, /* not suspended */
521 &threadid
/* returns thread ID (not used) */
524 WINE_TRACE("Created server thread.\n");
525 CloseHandle(hthread
);
527 WINE_ERR("Serious error: unable to create server thread!\n");
528 if (!CloseHandle(np_server_work_event
)) /* we will leak the handle... */
529 WINE_WARN("Failed to close np_server_work_event handle!\n");
530 if (!CloseHandle(np_server_end
)) /* we will leak the handle... */
531 WINE_WARN("Unable to close named pipe handle!\n");
532 DeleteCriticalSection(&np_server_cs
);
533 np_server_end
= NULL
;
534 np_server_work_event
= NULL
;
539 if (!ReleaseMutex(master_mutex
))
540 WINE_ERR("Unable to leave master mutex!??\n");
545 BOOL
RPCSS_NPDoWork(void)
547 DWORD waitresult
= WaitForSingleObject(np_server_work_event
, 1000);
549 if (waitresult
== WAIT_TIMEOUT
)
551 if (waitresult
== WAIT_OBJECT_0
)