2 * Copyright (C) 2013-2022 Red Hat Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of Red Hat nor the names of its contributors may be
16 * used to endorse or promote products derived from this software without
17 * specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY RED HAT AND CONTRIBUTORS ''AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
22 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RED HAT OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
26 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 #ifdef HAVE_SYS_SOCKET_H
46 #include <sys/socket.h>
52 /* Default number of parallel requests. */
53 #define DEFAULT_PARALLEL_REQUESTS 16
55 static struct connection
*new_connection (int sockin
, int sockout
,
57 static void free_connection (struct connection
*conn
);
59 /* Don't call these raw socket functions directly. Use conn->recv etc. */
60 static int raw_recv ( void *buf
, size_t len
);
61 static int raw_send_socket (const void *buf
, size_t len
, int flags
);
63 static int raw_send_other (const void *buf
, size_t len
, int flags
);
65 static void raw_close (int how
);
68 connection_get_status (void)
74 pthread_mutex_lock (&conn
->status_lock
))
78 pthread_mutex_unlock (&conn
->status_lock
))
83 /* Update the status if the new value is lower than the existing value. */
85 connection_set_status (conn_status value
)
90 pthread_mutex_lock (&conn
->status_lock
))
92 if (value
< conn
->status
) {
93 if (conn
->nworkers
&& conn
->status
> STATUS_CLIENT_DONE
&&
94 value
<= STATUS_CLIENT_DONE
) {
97 assert (conn
->status_pipe
[1] >= 0);
98 if (write (conn
->status_pipe
[1], &c
, 1) != 1 && errno
!= EAGAIN
)
99 debug ("failed to notify pipe-to-self: %m");
101 if (conn
->status
>= STATUS_CLIENT_DONE
&& value
< STATUS_CLIENT_DONE
)
102 conn
->close (SHUT_WR
);
103 conn
->status
= value
;
105 if (conn
->nworkers
&&
106 pthread_mutex_unlock (&conn
->status_lock
))
111 struct connection
*conn
;
116 connection_worker (void *data
)
118 struct worker_data
*worker
= data
;
119 struct connection
*conn
= worker
->conn
;
120 char *name
= worker
->name
;
122 debug ("starting worker thread %s", name
);
123 threadlocal_new_server_thread ();
124 threadlocal_set_name (name
);
125 threadlocal_set_conn (conn
);
128 while (!quit
&& connection_get_status () > STATUS_CLIENT_DONE
)
129 protocol_recv_request_send_reply ();
130 debug ("exiting worker thread %s", threadlocal_get_name ());
136 handle_single_connection (int sockin
, int sockout
)
138 const char *plugin_name
;
140 struct connection
*conn
;
141 int nworkers
= threads
? threads
: DEFAULT_PARALLEL_REQUESTS
;
142 pthread_t
*workers
= NULL
;
146 /* Because of asynchronous exit it is plausible that a new
147 * connection is started at the same time as the backend is being
148 * shut down. top may therefore be NULL, and if this happens return
152 unlock_connection ();
156 if (thread_model
< NBDKIT_THREAD_MODEL_PARALLEL
|| nworkers
== 1)
158 conn
= new_connection (sockin
, sockout
, nworkers
);
162 plugin_name
= top
->plugin_name (top
);
163 threadlocal_set_name (plugin_name
);
165 if (top
->preconnect (top
, read_only
) == -1)
170 * Note that this calls the backend .open callback when it is safe
171 * to do so (eg. after TLS authentication).
173 if (protocol_handshake () == -1)
175 conn
->handshake_complete
= true;
178 /* No need for a separate thread. */
179 debug ("handshake complete, processing requests serially");
180 while (!quit
&& connection_get_status () > STATUS_CLIENT_DONE
)
181 protocol_recv_request_send_reply ();
184 /* Create thread pool to process requests. */
185 debug ("handshake complete, processing requests with %d threads",
187 workers
= calloc (nworkers
, sizeof *workers
);
188 if (unlikely (!workers
)) {
193 for (nworkers
= 0; nworkers
< conn
->nworkers
; nworkers
++) {
194 struct worker_data
*worker
= malloc (sizeof *worker
);
197 if (unlikely (!worker
)) {
199 connection_set_status (STATUS_DEAD
);
202 if (unlikely (asprintf (&worker
->name
, "%s.%d", plugin_name
, nworkers
)
205 connection_set_status (STATUS_DEAD
);
210 err
= pthread_create (&workers
[nworkers
], NULL
, connection_worker
,
212 if (unlikely (err
)) {
214 perror ("pthread_create");
215 connection_set_status (STATUS_DEAD
);
223 pthread_join (workers
[--nworkers
], NULL
);
227 /* Finalize (for filters), called just before close. */
229 r
= backend_finalize (conn
->top_context
);
235 free_connection (conn
);
236 unlock_connection ();
239 static struct connection
*
240 new_connection (int sockin
, int sockout
, int nworkers
)
242 struct connection
*conn
;
245 socklen_t optlen
= sizeof opt
;
248 conn
= calloc (1, sizeof *conn
);
253 conn
->status_pipe
[0] = conn
->status_pipe
[1] = -1;
255 pthread_mutex_init (&conn
->request_lock
, NULL
);
256 pthread_mutex_init (&conn
->read_lock
, NULL
);
257 pthread_mutex_init (&conn
->write_lock
, NULL
);
258 pthread_mutex_init (&conn
->status_lock
, NULL
);
260 conn
->default_exportname
= calloc (top
->i
+ 1,
261 sizeof *conn
->default_exportname
);
262 if (conn
->default_exportname
== NULL
) {
267 conn
->status
= STATUS_ACTIVE
;
268 conn
->nworkers
= nworkers
;
271 if (pipe2 (conn
->status_pipe
, O_NONBLOCK
| O_CLOEXEC
)) {
277 /* If we were fully parallel, then this function could be
278 * accepting connections in one thread while another thread could
279 * be in a plugin trying to fork. But plugins.c forced
280 * thread_model to serialize_all_requests when it detects a lack
281 * of atomic CLOEXEC, at which point, we can use a mutex to ensure
282 * we aren't accepting until the plugin is not running, making
283 * non-atomicity okay.
285 assert (thread_model
<= NBDKIT_THREAD_MODEL_SERIALIZE_ALL_REQUESTS
);
287 if (pipe (conn
->status_pipe
)) {
292 if (set_nonblock (set_cloexec (conn
->status_pipe
[0])) == -1) {
294 close (conn
->status_pipe
[1]);
298 if (set_nonblock (set_cloexec (conn
->status_pipe
[1])) == -1) {
300 close (conn
->status_pipe
[0]);
305 #else /* !HAVE_PIPE2 && !HAVE_PIPE */
306 /* Windows has neither pipe2 nor pipe. XXX */
311 conn
->sockin
= sockin
;
312 conn
->sockout
= sockout
;
313 conn
->recv
= raw_recv
;
315 if (getsockopt (sockout
, SOL_SOCKET
, SO_TYPE
, &opt
, &optlen
) == 0)
316 conn
->send
= raw_send_socket
;
318 conn
->send
= raw_send_other
;
320 conn
->send
= raw_send_socket
;
322 conn
->close
= raw_close
;
324 threadlocal_set_conn (conn
);
328 #if defined(HAVE_PIPE2) || defined(HAVE_PIPE)
331 if (conn
->status_pipe
[0] >= 0)
332 close (conn
->status_pipe
[0]);
333 if (conn
->status_pipe
[1] >= 0)
334 close (conn
->status_pipe
[1]);
335 free (conn
->default_exportname
);
338 pthread_mutex_destroy (&conn
->request_lock
);
339 pthread_mutex_destroy (&conn
->read_lock
);
340 pthread_mutex_destroy (&conn
->write_lock
);
341 pthread_mutex_destroy (&conn
->status_lock
);
347 free_connection (struct connection
*conn
)
354 conn
->close (SHUT_RDWR
);
356 /* Don't call the plugin again if quit has been set because the main
357 * thread will be in the process of unloading it. The plugin.unload
358 * callback should always be called.
362 if (conn
->top_context
) {
363 backend_close (conn
->top_context
);
364 conn
->top_context
= NULL
;
369 if (conn
->status_pipe
[0] >= 0) {
370 close (conn
->status_pipe
[0]);
371 close (conn
->status_pipe
[1]);
374 pthread_mutex_destroy (&conn
->request_lock
);
375 pthread_mutex_destroy (&conn
->read_lock
);
376 pthread_mutex_destroy (&conn
->write_lock
);
377 pthread_mutex_destroy (&conn
->status_lock
);
379 free (conn
->exportname_from_set_meta_context
);
383 free (conn
->default_exportname
[b
->i
]);
384 free (conn
->default_exportname
);
387 threadlocal_set_conn (NULL
);
390 /* Write buffer to conn->sockout with send() and either succeed completely
391 * (returns 0) or fail (returns -1). flags may include SEND_MORE as a hint
392 * that this send will be followed by related data.
395 raw_send_socket (const void *vbuf
, size_t len
, int flags
)
398 int sock
= conn
->sockout
;
399 const char *buf
= vbuf
;
405 if (flags
& SEND_MORE
)
409 r
= send (sock
, buf
, len
, f
);
411 if (errno
== EINTR
|| errno
== EAGAIN
)
423 /* Write buffer to conn->sockout with write() and either succeed completely
424 * (returns 0) or fail (returns -1). flags is ignored.
427 raw_send_other (const void *vbuf
, size_t len
, int flags
)
430 int sock
= conn
->sockout
;
431 const char *buf
= vbuf
;
436 r
= write (sock
, buf
, len
);
438 if (errno
== EINTR
|| errno
== EAGAIN
)
450 /* Read buffer from conn->sockin and either succeed completely
451 * (returns > 0), read an EOF (returns 0), or fail (returns -1).
454 raw_recv (void *vbuf
, size_t len
)
457 int sock
= conn
->sockin
;
460 bool first_read
= true;
463 /* On Unix we want to use read(2) here because that allows us to
464 * read from non-sockets (think: nbdkit -s). In particular this
465 * makes fuzzing possible. However this is not possible on
466 * Windows where we must use recv.
469 r
= read (sock
, buf
, len
);
471 r
= recv (sock
, buf
, len
, 0);
474 if (errno
== EINTR
|| errno
== EAGAIN
)
481 /* Partial record read. This is an error. */
493 /* There's no place in the NBD protocol to send back errors from
494 * close, so this function ignores errors.
501 if (conn
->sockout
>= 0 && how
== SHUT_WR
) {
502 if (conn
->sockin
== conn
->sockout
)
503 shutdown (conn
->sockout
, how
);
505 closesocket (conn
->sockout
);
509 if (conn
->sockin
>= 0)
510 closesocket (conn
->sockin
);
511 if (conn
->sockout
>= 0 && conn
->sockin
!= conn
->sockout
)
512 closesocket (conn
->sockout
);