2 * Copyright (C) 2013-2019 Red Hat Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of Red Hat nor the names of its contributors may be
16 * used to endorse or promote products derived from this software without
17 * specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY RED HAT AND CONTRIBUTORS ''AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
22 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RED HAT OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
26 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 #include <sys/socket.h>
48 /* Default number of parallel requests. */
49 #define DEFAULT_PARALLEL_REQUESTS 16
51 static struct connection
*new_connection (int sockin
, int sockout
,
53 static void free_connection (struct connection
*conn
);
55 /* Don't call these raw socket functions directly. Use conn->recv etc. */
56 static int raw_recv (struct connection
*, void *buf
, size_t len
);
57 static int raw_send_socket (struct connection
*, const void *buf
, size_t len
,
59 static int raw_send_other (struct connection
*, const void *buf
, size_t len
,
61 static void raw_close (struct connection
*);
64 connection_get_handle (struct connection
*conn
, size_t i
)
66 assert (i
< conn
->nr_handles
);
67 return conn
->handles
[i
].handle
;
71 connection_get_status (struct connection
*conn
)
76 pthread_mutex_lock (&conn
->status_lock
))
80 pthread_mutex_unlock (&conn
->status_lock
))
85 /* Update the status if the new value is lower than the existing value.
86 * For convenience, return the incoming value.
89 connection_set_status (struct connection
*conn
, int value
)
92 pthread_mutex_lock (&conn
->status_lock
))
94 if (value
< conn
->status
) {
95 if (conn
->nworkers
&& conn
->status
> 0) {
98 assert (conn
->status_pipe
[1] >= 0);
99 if (write (conn
->status_pipe
[1], &c
, 1) != 1 && errno
!= EAGAIN
)
100 debug ("failed to notify pipe-to-self: %m");
102 conn
->status
= value
;
104 if (conn
->nworkers
&&
105 pthread_mutex_unlock (&conn
->status_lock
))
111 struct connection
*conn
;
116 connection_worker (void *data
)
118 struct worker_data
*worker
= data
;
119 struct connection
*conn
= worker
->conn
;
120 char *name
= worker
->name
;
122 debug ("starting worker thread %s", name
);
123 threadlocal_new_server_thread ();
124 threadlocal_set_name (name
);
125 threadlocal_set_conn (conn
);
128 while (!quit
&& connection_get_status (conn
) > 0)
129 protocol_recv_request_send_reply (conn
);
130 debug ("exiting worker thread %s", threadlocal_get_name ());
136 handle_single_connection (int sockin
, int sockout
)
138 const char *plugin_name
;
140 struct connection
*conn
;
141 int nworkers
= threads
? threads
: DEFAULT_PARALLEL_REQUESTS
;
142 pthread_t
*workers
= NULL
;
146 if (backend
->thread_model (backend
) < NBDKIT_THREAD_MODEL_PARALLEL
||
149 conn
= new_connection (sockin
, sockout
, nworkers
);
153 /* NB: because of an asynchronous exit backend can be set to NULL at
154 * just about any time.
157 plugin_name
= backend
->plugin_name (backend
);
159 plugin_name
= "(unknown)";
160 threadlocal_set_name (plugin_name
);
162 if (backend
&& backend
->preconnect (backend
, conn
, read_only
) == -1)
167 * Note that this calls the backend .open callback when it is safe
168 * to do so (eg. after TLS authentication).
170 if (protocol_handshake (conn
) == -1)
174 /* No need for a separate thread. */
175 debug ("handshake complete, processing requests serially");
176 while (!quit
&& connection_get_status (conn
) > 0)
177 protocol_recv_request_send_reply (conn
);
180 /* Create thread pool to process requests. */
181 debug ("handshake complete, processing requests with %d threads",
183 workers
= calloc (nworkers
, sizeof *workers
);
184 if (unlikely (!workers
)) {
189 for (nworkers
= 0; nworkers
< conn
->nworkers
; nworkers
++) {
190 struct worker_data
*worker
= malloc (sizeof *worker
);
193 if (unlikely (!worker
)) {
195 connection_set_status (conn
, -1);
198 if (unlikely (asprintf (&worker
->name
, "%s.%d", plugin_name
, nworkers
)
201 connection_set_status (conn
, -1);
206 err
= pthread_create (&workers
[nworkers
], NULL
, connection_worker
,
208 if (unlikely (err
)) {
210 perror ("pthread_create");
211 connection_set_status (conn
, -1);
219 pthread_join (workers
[--nworkers
], NULL
);
223 /* Finalize (for filters), called just before close. */
225 r
= backend_finalize (backend
, conn
);
226 unlock_request (conn
);
231 free_connection (conn
);
232 unlock_connection ();
235 static struct connection
*
236 new_connection (int sockin
, int sockout
, int nworkers
)
238 struct connection
*conn
;
240 socklen_t optlen
= sizeof opt
;
243 conn
= calloc (1, sizeof *conn
);
249 conn
->status_pipe
[0] = conn
->status_pipe
[1] = -1;
251 conn
->handles
= calloc (backend
->i
+ 1, sizeof *conn
->handles
);
252 if (conn
->handles
== NULL
) {
256 conn
->nr_handles
= backend
->i
+ 1;
258 reset_b_conn_handle (&conn
->handles
[b
->i
]);
261 conn
->nworkers
= nworkers
;
264 if (pipe2 (conn
->status_pipe
, O_NONBLOCK
| O_CLOEXEC
)) {
269 /* If we were fully parallel, then this function could be
270 * accepting connections in one thread while another thread could
271 * be in a plugin trying to fork. But plugins.c forced
272 * thread_model to serialize_all_requests when it detects a lack
273 * of atomic CLOEXEC, at which point, we can use a mutex to ensure
274 * we aren't accepting until the plugin is not running, making
275 * non-atomicity okay.
277 assert (backend
->thread_model (backend
) <=
278 NBDKIT_THREAD_MODEL_SERIALIZE_ALL_REQUESTS
);
280 if (pipe (conn
->status_pipe
)) {
282 unlock_request (NULL
);
285 if (set_nonblock (set_cloexec (conn
->status_pipe
[0])) == -1) {
287 close (conn
->status_pipe
[1]);
288 unlock_request (NULL
);
291 if (set_nonblock (set_cloexec (conn
->status_pipe
[1])) == -1) {
293 close (conn
->status_pipe
[0]);
294 unlock_request (NULL
);
297 unlock_request (NULL
);
300 conn
->sockin
= sockin
;
301 conn
->sockout
= sockout
;
302 pthread_mutex_init (&conn
->request_lock
, NULL
);
303 pthread_mutex_init (&conn
->read_lock
, NULL
);
304 pthread_mutex_init (&conn
->write_lock
, NULL
);
305 pthread_mutex_init (&conn
->status_lock
, NULL
);
307 conn
->recv
= raw_recv
;
308 if (getsockopt (sockout
, SOL_SOCKET
, SO_TYPE
, &opt
, &optlen
) == 0)
309 conn
->send
= raw_send_socket
;
311 conn
->send
= raw_send_other
;
312 conn
->close
= raw_close
;
314 threadlocal_set_conn (conn
);
319 if (conn
->status_pipe
[0] >= 0)
320 close (conn
->status_pipe
[0]);
321 if (conn
->status_pipe
[1] >= 0)
322 close (conn
->status_pipe
[1]);
323 free (conn
->handles
);
329 free_connection (struct connection
*conn
)
334 threadlocal_set_conn (NULL
);
339 /* Restore something to stdin/out so the rest of our code can
340 * continue to assume that all new fds will be above stderr.
341 * Swap directions to get EBADF on improper use of stdin/out.
343 fd
= open ("/dev/null", O_WRONLY
| O_CLOEXEC
);
345 fd
= open ("/dev/null", O_RDONLY
| O_CLOEXEC
);
349 /* Don't call the plugin again if quit has been set because the main
350 * thread will be in the process of unloading it. The plugin.unload
351 * callback should always be called.
355 backend_close (backend
, conn
);
356 unlock_request (conn
);
359 if (conn
->status_pipe
[0] >= 0) {
360 close (conn
->status_pipe
[0]);
361 close (conn
->status_pipe
[1]);
364 pthread_mutex_destroy (&conn
->request_lock
);
365 pthread_mutex_destroy (&conn
->read_lock
);
366 pthread_mutex_destroy (&conn
->write_lock
);
367 pthread_mutex_destroy (&conn
->status_lock
);
369 free (conn
->handles
);
373 /* Write buffer to conn->sockout with send() and either succeed completely
374 * (returns 0) or fail (returns -1). flags may include SEND_MORE as a hint
375 * that this send will be followed by related data.
378 raw_send_socket (struct connection
*conn
, const void *vbuf
, size_t len
,
381 int sock
= conn
->sockout
;
382 const char *buf
= vbuf
;
387 if (flags
& SEND_MORE
)
391 r
= send (sock
, buf
, len
, f
);
393 if (errno
== EINTR
|| errno
== EAGAIN
)
404 /* Write buffer to conn->sockout with write() and either succeed completely
405 * (returns 0) or fail (returns -1). flags is ignored.
408 raw_send_other (struct connection
*conn
, const void *vbuf
, size_t len
,
411 int sock
= conn
->sockout
;
412 const char *buf
= vbuf
;
416 r
= write (sock
, buf
, len
);
418 if (errno
== EINTR
|| errno
== EAGAIN
)
429 /* Read buffer from conn->sockin and either succeed completely
430 * (returns > 0), read an EOF (returns 0), or fail (returns -1).
433 raw_recv (struct connection
*conn
, void *vbuf
, size_t len
)
435 int sock
= conn
->sockin
;
438 bool first_read
= true;
441 r
= read (sock
, buf
, len
);
443 if (errno
== EINTR
|| errno
== EAGAIN
)
450 /* Partial record read. This is an error. */
462 /* There's no place in the NBD protocol to send back errors from
463 * close, so this function ignores errors.
466 raw_close (struct connection
*conn
)
468 if (conn
->sockin
>= 0)
469 close (conn
->sockin
);
470 if (conn
->sockout
>= 0 && conn
->sockin
!= conn
->sockout
)
471 close (conn
->sockout
);