2 * Copyright (C) 2013-2019 Red Hat Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of Red Hat nor the names of its contributors may be
16 * used to endorse or promote products derived from this software without
17 * specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY RED HAT AND CONTRIBUTORS ''AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
22 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RED HAT OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
26 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 #include <sys/socket.h>
48 /* Default number of parallel requests. */
49 #define DEFAULT_PARALLEL_REQUESTS 16
51 static struct connection
*new_connection (int sockin
, int sockout
,
53 static void free_connection (struct connection
*conn
);
55 /* Don't call these raw socket functions directly. Use conn->recv etc. */
56 static int raw_recv (struct connection
*, void *buf
, size_t len
);
57 static int raw_send_socket (struct connection
*, const void *buf
, size_t len
,
59 static int raw_send_other (struct connection
*, const void *buf
, size_t len
,
61 static void raw_close (struct connection
*);
64 connection_get_handle (struct connection
*conn
, size_t i
)
66 assert (i
< conn
->nr_handles
);
67 return conn
->handles
[i
].handle
;
71 connection_get_status (struct connection
*conn
)
76 pthread_mutex_lock (&conn
->status_lock
))
80 pthread_mutex_unlock (&conn
->status_lock
))
85 /* Update the status if the new value is lower than the existing value.
86 * For convenience, return the incoming value.
89 connection_set_status (struct connection
*conn
, int value
)
92 pthread_mutex_lock (&conn
->status_lock
))
94 if (value
< conn
->status
) {
95 if (conn
->nworkers
&& conn
->status
> 0) {
98 assert (conn
->status_pipe
[1] >= 0);
99 if (write (conn
->status_pipe
[1], &c
, 1) != 1 && errno
!= EAGAIN
)
100 nbdkit_debug ("failed to notify pipe-to-self: %m");
102 conn
->status
= value
;
104 if (conn
->nworkers
&&
105 pthread_mutex_unlock (&conn
->status_lock
))
111 struct connection
*conn
;
116 connection_worker (void *data
)
118 struct worker_data
*worker
= data
;
119 struct connection
*conn
= worker
->conn
;
120 char *name
= worker
->name
;
122 debug ("starting worker thread %s", name
);
123 threadlocal_new_server_thread ();
124 threadlocal_set_name (name
);
125 threadlocal_set_conn (conn
);
128 while (!quit
&& connection_get_status (conn
) > 0)
129 protocol_recv_request_send_reply (conn
);
130 debug ("exiting worker thread %s", threadlocal_get_name ());
136 _handle_single_connection (int sockin
, int sockout
)
138 const char *plugin_name
;
140 struct connection
*conn
;
141 int nworkers
= threads
? threads
: DEFAULT_PARALLEL_REQUESTS
;
142 pthread_t
*workers
= NULL
;
144 if (backend
->thread_model (backend
) < NBDKIT_THREAD_MODEL_PARALLEL
||
147 conn
= new_connection (sockin
, sockout
, nworkers
);
152 r
= backend_open (backend
, conn
, readonly
);
153 unlock_request (conn
);
157 /* NB: because of an asynchronous exit backend can be set to NULL at
158 * just about any time.
161 plugin_name
= backend
->plugin_name (backend
);
163 plugin_name
= "(unknown)";
164 threadlocal_set_name (plugin_name
);
166 /* Prepare (for filters), called just after open. */
169 r
= backend
->prepare (backend
, conn
);
172 unlock_request (conn
);
177 if (protocol_handshake (conn
) == -1)
181 /* No need for a separate thread. */
182 debug ("handshake complete, processing requests serially");
183 while (!quit
&& connection_get_status (conn
) > 0)
184 protocol_recv_request_send_reply (conn
);
187 /* Create thread pool to process requests. */
188 debug ("handshake complete, processing requests with %d threads",
190 workers
= calloc (nworkers
, sizeof *workers
);
196 for (nworkers
= 0; nworkers
< conn
->nworkers
; nworkers
++) {
197 struct worker_data
*worker
= malloc (sizeof *worker
);
202 connection_set_status (conn
, -1);
205 if (asprintf (&worker
->name
, "%s.%d", plugin_name
, nworkers
) < 0) {
207 connection_set_status (conn
, -1);
212 err
= pthread_create (&workers
[nworkers
], NULL
, connection_worker
,
216 perror ("pthread_create");
217 connection_set_status (conn
, -1);
225 pthread_join (workers
[--nworkers
], NULL
);
229 /* Finalize (for filters), called just before close. */
232 r
= backend
->finalize (backend
, conn
);
235 unlock_request (conn
);
239 ret
= connection_get_status (conn
);
241 free_connection (conn
);
246 handle_single_connection (int sockin
, int sockout
)
251 r
= _handle_single_connection (sockin
, sockout
);
252 unlock_connection ();
257 static struct connection
*
258 new_connection (int sockin
, int sockout
, int nworkers
)
260 struct connection
*conn
;
262 socklen_t optlen
= sizeof opt
;
265 conn
= calloc (1, sizeof *conn
);
270 conn
->handles
= calloc (backend
->i
+ 1, sizeof *conn
->handles
);
271 if (conn
->handles
== NULL
) {
276 conn
->nr_handles
= backend
->i
+ 1;
277 memset (conn
->handles
, -1, conn
->nr_handles
* sizeof *conn
->handles
);
279 conn
->handles
[b
->i
].handle
= NULL
;
282 conn
->nworkers
= nworkers
;
285 if (pipe2 (conn
->status_pipe
, O_NONBLOCK
| O_CLOEXEC
)) {
291 /* If we were fully parallel, then this function could be
292 * accepting connections in one thread while another thread could
293 * be in a plugin trying to fork. But plugins.c forced
294 * thread_model to serialize_all_requests when it detects a lack
295 * of atomic CLOEXEC, at which point, we can use a mutex to ensure
296 * we aren't accepting until the plugin is not running, making
297 * non-atomicity okay.
299 assert (backend
->thread_model (backend
) <=
300 NBDKIT_THREAD_MODEL_SERIALIZE_ALL_REQUESTS
);
302 if (pipe (conn
->status_pipe
)) {
305 unlock_request (NULL
);
308 if (set_nonblock (set_cloexec (conn
->status_pipe
[0])) == -1) {
310 close (conn
->status_pipe
[1]);
312 unlock_request (NULL
);
315 if (set_nonblock (set_cloexec (conn
->status_pipe
[1])) == -1) {
317 close (conn
->status_pipe
[0]);
319 unlock_request (NULL
);
322 unlock_request (NULL
);
326 conn
->status_pipe
[0] = conn
->status_pipe
[1] = -1;
327 conn
->sockin
= sockin
;
328 conn
->sockout
= sockout
;
329 pthread_mutex_init (&conn
->request_lock
, NULL
);
330 pthread_mutex_init (&conn
->read_lock
, NULL
);
331 pthread_mutex_init (&conn
->write_lock
, NULL
);
332 pthread_mutex_init (&conn
->status_lock
, NULL
);
334 conn
->recv
= raw_recv
;
335 if (getsockopt (sockout
, SOL_SOCKET
, SO_TYPE
, &opt
, &optlen
) == 0)
336 conn
->send
= raw_send_socket
;
338 conn
->send
= raw_send_other
;
339 conn
->close
= raw_close
;
341 threadlocal_set_conn (conn
);
347 free_connection (struct connection
*conn
)
352 threadlocal_set_conn (NULL
);
357 /* Restore something to stdin/out so the rest of our code can
358 * continue to assume that all new fds will be above stderr.
359 * Swap directions to get EBADF on improper use of stdin/out.
361 fd
= open ("/dev/null", O_WRONLY
| O_CLOEXEC
);
363 fd
= open ("/dev/null", O_RDONLY
| O_CLOEXEC
);
367 /* Don't call the plugin again if quit has been set because the main
368 * thread will be in the process of unloading it. The plugin.unload
369 * callback should always be called.
371 if (!quit
&& connection_get_handle (conn
, 0)) {
373 backend
->close (backend
, conn
);
374 unlock_request (conn
);
377 if (conn
->status_pipe
[0] >= 0) {
378 close (conn
->status_pipe
[0]);
379 close (conn
->status_pipe
[1]);
382 pthread_mutex_destroy (&conn
->request_lock
);
383 pthread_mutex_destroy (&conn
->read_lock
);
384 pthread_mutex_destroy (&conn
->write_lock
);
385 pthread_mutex_destroy (&conn
->status_lock
);
387 free (conn
->handles
);
391 /* Write buffer to conn->sockout with send() and either succeed completely
392 * (returns 0) or fail (returns -1). flags may include SEND_MORE as a hint
393 * that this send will be followed by related data.
396 raw_send_socket (struct connection
*conn
, const void *vbuf
, size_t len
,
399 int sock
= conn
->sockout
;
400 const char *buf
= vbuf
;
405 if (flags
& SEND_MORE
)
409 r
= send (sock
, buf
, len
, f
);
411 if (errno
== EINTR
|| errno
== EAGAIN
)
422 /* Write buffer to conn->sockout with write() and either succeed completely
423 * (returns 0) or fail (returns -1). flags is ignored.
426 raw_send_other (struct connection
*conn
, const void *vbuf
, size_t len
,
429 int sock
= conn
->sockout
;
430 const char *buf
= vbuf
;
434 r
= write (sock
, buf
, len
);
436 if (errno
== EINTR
|| errno
== EAGAIN
)
447 /* Read buffer from conn->sockin and either succeed completely
448 * (returns > 0), read an EOF (returns 0), or fail (returns -1).
451 raw_recv (struct connection
*conn
, void *vbuf
, size_t len
)
453 int sock
= conn
->sockin
;
456 bool first_read
= true;
459 r
= read (sock
, buf
, len
);
461 if (errno
== EINTR
|| errno
== EAGAIN
)
468 /* Partial record read. This is an error. */
480 /* There's no place in the NBD protocol to send back errors from
481 * close, so this function ignores errors.
484 raw_close (struct connection
*conn
)
486 if (conn
->sockin
>= 0)
487 close (conn
->sockin
);
488 if (conn
->sockout
>= 0 && conn
->sockin
!= conn
->sockout
)
489 close (conn
->sockout
);