2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2009
6 ** NOTE! The following LGPL license applies to the tsocket
7 ** library. This does NOT imply that all of Samba is released
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 3 of the License, or (at your option) any later version.
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Lesser General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public
21 License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "system/filesys.h"
27 #include "tsocket_internal.h"
29 struct tdgram_sendto_queue_state
{
30 /* this structs are owned by the caller */
32 struct tevent_context
*ev
;
33 struct tdgram_context
*dgram
;
36 const struct tsocket_address
*dst
;
41 static void tdgram_sendto_queue_trigger(struct tevent_req
*req
,
43 static void tdgram_sendto_queue_done(struct tevent_req
*subreq
);
45 struct tevent_req
*tdgram_sendto_queue_send(TALLOC_CTX
*mem_ctx
,
46 struct tevent_context
*ev
,
47 struct tdgram_context
*dgram
,
48 struct tevent_queue
*queue
,
51 struct tsocket_address
*dst
)
53 struct tevent_req
*req
;
54 struct tdgram_sendto_queue_state
*state
;
55 struct tevent_queue_entry
*e
;
57 req
= tevent_req_create(mem_ctx
, &state
,
58 struct tdgram_sendto_queue_state
);
63 state
->caller
.ev
= ev
;
64 state
->caller
.dgram
= dgram
;
65 state
->caller
.buf
= buf
;
66 state
->caller
.len
= len
;
67 state
->caller
.dst
= dst
;
71 * we use tevent_queue_add_optimize_empty() with allow_direct
72 * in order to optimize for the empty queue case.
74 e
= tevent_queue_add_optimize_empty(
78 tdgram_sendto_queue_trigger
,
80 if (tevent_req_nomem(e
, req
)) {
81 return tevent_req_post(req
, ev
);
83 if (!tevent_req_is_in_progress(req
)) {
84 return tevent_req_post(req
, ev
);
90 static void tdgram_sendto_queue_trigger(struct tevent_req
*req
,
93 struct tdgram_sendto_queue_state
*state
= tevent_req_data(req
,
94 struct tdgram_sendto_queue_state
);
95 struct tevent_req
*subreq
;
97 subreq
= tdgram_sendto_send(state
,
103 if (tevent_req_nomem(subreq
, req
)) {
106 tevent_req_set_callback(subreq
, tdgram_sendto_queue_done
, req
);
109 static void tdgram_sendto_queue_done(struct tevent_req
*subreq
)
111 struct tevent_req
*req
= tevent_req_callback_data(subreq
,
113 struct tdgram_sendto_queue_state
*state
= tevent_req_data(req
,
114 struct tdgram_sendto_queue_state
);
118 ret
= tdgram_sendto_recv(subreq
, &sys_errno
);
121 tevent_req_error(req
, sys_errno
);
126 tevent_req_done(req
);
129 ssize_t
tdgram_sendto_queue_recv(struct tevent_req
*req
, int *perrno
)
131 struct tdgram_sendto_queue_state
*state
= tevent_req_data(req
,
132 struct tdgram_sendto_queue_state
);
135 ret
= tsocket_simple_int_recv(req
, perrno
);
140 tevent_req_received(req
);
144 struct tstream_readv_pdu_state
{
145 /* this structs are owned by the caller */
147 struct tevent_context
*ev
;
148 struct tstream_context
*stream
;
149 tstream_readv_pdu_next_vector_t next_vector_fn
;
150 void *next_vector_private
;
154 * Each call to the callback resets iov and count
155 * the callback allocated the iov as child of our state,
156 * that means we are allowed to modify and free it.
158 * we should call the callback every time we filled the given
159 * vector and ask for a new vector. We return if the callback
162 struct iovec
*vector
;
166 * the total number of bytes we read,
167 * the return value of the _recv function
172 static void tstream_readv_pdu_ask_for_next_vector(struct tevent_req
*req
);
173 static void tstream_readv_pdu_readv_done(struct tevent_req
*subreq
);
175 struct tevent_req
*tstream_readv_pdu_send(TALLOC_CTX
*mem_ctx
,
176 struct tevent_context
*ev
,
177 struct tstream_context
*stream
,
178 tstream_readv_pdu_next_vector_t next_vector_fn
,
179 void *next_vector_private
)
181 struct tevent_req
*req
;
182 struct tstream_readv_pdu_state
*state
;
184 req
= tevent_req_create(mem_ctx
, &state
,
185 struct tstream_readv_pdu_state
);
190 state
->caller
.ev
= ev
;
191 state
->caller
.stream
= stream
;
192 state
->caller
.next_vector_fn
= next_vector_fn
;
193 state
->caller
.next_vector_private
= next_vector_private
;
195 state
->vector
= NULL
;
197 state
->total_read
= 0;
199 tstream_readv_pdu_ask_for_next_vector(req
);
200 if (!tevent_req_is_in_progress(req
)) {
207 return tevent_req_post(req
, ev
);
210 static void tstream_readv_pdu_ask_for_next_vector(struct tevent_req
*req
)
212 struct tstream_readv_pdu_state
*state
= tevent_req_data(req
,
213 struct tstream_readv_pdu_state
);
217 struct tevent_req
*subreq
;
218 bool optimize
= false;
219 bool save_optimize
= false;
221 if (state
->count
> 0) {
223 * This is not the first time we asked for a vector,
224 * which means parts of the pdu already arrived.
226 * In this case it make sense to enable
227 * a syscall/performance optimization if the
228 * low level tstream implementation supports it.
233 TALLOC_FREE(state
->vector
);
236 ret
= state
->caller
.next_vector_fn(state
->caller
.stream
,
237 state
->caller
.next_vector_private
,
238 state
, &state
->vector
, &state
->count
);
240 tevent_req_error(req
, errno
);
244 if (state
->count
== 0) {
245 tevent_req_done(req
);
249 for (i
=0; i
< state
->count
; i
++) {
250 size_t tmp
= to_read
;
251 tmp
+= state
->vector
[i
].iov_len
;
254 tevent_req_error(req
, EMSGSIZE
);
262 * this is invalid the next vector function should have
263 * reported count == 0.
266 tevent_req_error(req
, EINVAL
);
270 if (state
->total_read
+ to_read
< state
->total_read
) {
271 tevent_req_error(req
, EMSGSIZE
);
277 * If the low level stream is a bsd socket
278 * we will get syscall optimization.
280 * If it is not a bsd socket
281 * tstream_bsd_optimize_readv() just returns.
283 save_optimize
= tstream_bsd_optimize_readv(state
->caller
.stream
,
286 subreq
= tstream_readv_send(state
,
288 state
->caller
.stream
,
292 tstream_bsd_optimize_readv(state
->caller
.stream
,
295 if (tevent_req_nomem(subreq
, req
)) {
298 tevent_req_set_callback(subreq
, tstream_readv_pdu_readv_done
, req
);
301 static void tstream_readv_pdu_readv_done(struct tevent_req
*subreq
)
303 struct tevent_req
*req
= tevent_req_callback_data(subreq
,
305 struct tstream_readv_pdu_state
*state
= tevent_req_data(req
,
306 struct tstream_readv_pdu_state
);
310 ret
= tstream_readv_recv(subreq
, &sys_errno
);
312 tevent_req_error(req
, sys_errno
);
316 state
->total_read
+= ret
;
318 /* ask the callback for a new vector we should fill */
319 tstream_readv_pdu_ask_for_next_vector(req
);
322 int tstream_readv_pdu_recv(struct tevent_req
*req
, int *perrno
)
324 struct tstream_readv_pdu_state
*state
= tevent_req_data(req
,
325 struct tstream_readv_pdu_state
);
328 ret
= tsocket_simple_int_recv(req
, perrno
);
330 ret
= state
->total_read
;
333 tevent_req_received(req
);
337 struct tstream_readv_pdu_queue_state
{
338 /* this structs are owned by the caller */
340 struct tevent_context
*ev
;
341 struct tstream_context
*stream
;
342 tstream_readv_pdu_next_vector_t next_vector_fn
;
343 void *next_vector_private
;
348 static void tstream_readv_pdu_queue_trigger(struct tevent_req
*req
,
350 static void tstream_readv_pdu_queue_done(struct tevent_req
*subreq
);
352 struct tevent_req
*tstream_readv_pdu_queue_send(TALLOC_CTX
*mem_ctx
,
353 struct tevent_context
*ev
,
354 struct tstream_context
*stream
,
355 struct tevent_queue
*queue
,
356 tstream_readv_pdu_next_vector_t next_vector_fn
,
357 void *next_vector_private
)
359 struct tevent_req
*req
;
360 struct tstream_readv_pdu_queue_state
*state
;
361 struct tevent_queue_entry
*e
;
363 req
= tevent_req_create(mem_ctx
, &state
,
364 struct tstream_readv_pdu_queue_state
);
369 state
->caller
.ev
= ev
;
370 state
->caller
.stream
= stream
;
371 state
->caller
.next_vector_fn
= next_vector_fn
;
372 state
->caller
.next_vector_private
= next_vector_private
;
376 * we use tevent_queue_add_optimize_empty() with allow_direct
377 * in order to optimize for the empty queue case.
379 e
= tevent_queue_add_optimize_empty(
383 tstream_readv_pdu_queue_trigger
,
385 if (tevent_req_nomem(e
, req
)) {
386 return tevent_req_post(req
, ev
);
388 if (!tevent_req_is_in_progress(req
)) {
389 return tevent_req_post(req
, ev
);
395 static void tstream_readv_pdu_queue_trigger(struct tevent_req
*req
,
398 struct tstream_readv_pdu_queue_state
*state
= tevent_req_data(req
,
399 struct tstream_readv_pdu_queue_state
);
400 struct tevent_req
*subreq
;
402 subreq
= tstream_readv_pdu_send(state
,
404 state
->caller
.stream
,
405 state
->caller
.next_vector_fn
,
406 state
->caller
.next_vector_private
);
407 if (tevent_req_nomem(subreq
, req
)) {
410 tevent_req_set_callback(subreq
, tstream_readv_pdu_queue_done
,req
);
413 static void tstream_readv_pdu_queue_done(struct tevent_req
*subreq
)
415 struct tevent_req
*req
= tevent_req_callback_data(subreq
,
417 struct tstream_readv_pdu_queue_state
*state
= tevent_req_data(req
,
418 struct tstream_readv_pdu_queue_state
);
422 ret
= tstream_readv_pdu_recv(subreq
, &sys_errno
);
425 tevent_req_error(req
, sys_errno
);
430 tevent_req_done(req
);
433 int tstream_readv_pdu_queue_recv(struct tevent_req
*req
, int *perrno
)
435 struct tstream_readv_pdu_queue_state
*state
= tevent_req_data(req
,
436 struct tstream_readv_pdu_queue_state
);
439 ret
= tsocket_simple_int_recv(req
, perrno
);
444 tevent_req_received(req
);
448 struct tstream_writev_queue_state
{
449 /* this structs are owned by the caller */
451 struct tevent_context
*ev
;
452 struct tstream_context
*stream
;
453 const struct iovec
*vector
;
459 static void tstream_writev_queue_trigger(struct tevent_req
*req
,
461 static void tstream_writev_queue_done(struct tevent_req
*subreq
);
463 struct tevent_req
*tstream_writev_queue_send(TALLOC_CTX
*mem_ctx
,
464 struct tevent_context
*ev
,
465 struct tstream_context
*stream
,
466 struct tevent_queue
*queue
,
467 const struct iovec
*vector
,
470 struct tevent_req
*req
;
471 struct tstream_writev_queue_state
*state
;
472 struct tevent_queue_entry
*e
;
474 req
= tevent_req_create(mem_ctx
, &state
,
475 struct tstream_writev_queue_state
);
480 state
->caller
.ev
= ev
;
481 state
->caller
.stream
= stream
;
482 state
->caller
.vector
= vector
;
483 state
->caller
.count
= count
;
487 * we use tevent_queue_add_optimize_empty() with allow_direct
488 * in order to optimize for the empty queue case.
490 e
= tevent_queue_add_optimize_empty(
494 tstream_writev_queue_trigger
,
496 if (tevent_req_nomem(e
, req
)) {
497 return tevent_req_post(req
, ev
);
499 if (!tevent_req_is_in_progress(req
)) {
500 return tevent_req_post(req
, ev
);
506 static void tstream_writev_queue_trigger(struct tevent_req
*req
,
509 struct tstream_writev_queue_state
*state
= tevent_req_data(req
,
510 struct tstream_writev_queue_state
);
511 struct tevent_req
*subreq
;
513 subreq
= tstream_writev_send(state
,
515 state
->caller
.stream
,
516 state
->caller
.vector
,
517 state
->caller
.count
);
518 if (tevent_req_nomem(subreq
, req
)) {
521 tevent_req_set_callback(subreq
, tstream_writev_queue_done
,req
);
524 static void tstream_writev_queue_done(struct tevent_req
*subreq
)
526 struct tevent_req
*req
= tevent_req_callback_data(subreq
,
528 struct tstream_writev_queue_state
*state
= tevent_req_data(req
,
529 struct tstream_writev_queue_state
);
533 ret
= tstream_writev_recv(subreq
, &sys_errno
);
536 tevent_req_error(req
, sys_errno
);
541 tevent_req_done(req
);
544 int tstream_writev_queue_recv(struct tevent_req
*req
, int *perrno
)
546 struct tstream_writev_queue_state
*state
= tevent_req_data(req
,
547 struct tstream_writev_queue_state
);
550 ret
= tsocket_simple_int_recv(req
, perrno
);
555 tevent_req_received(req
);