2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2009
6 ** NOTE! The following LGPL license applies to the tsocket
7 ** library. This does NOT imply that all of Samba is released
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 3 of the License, or (at your option) any later version.
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Lesser General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public
21 License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "system/filesys.h"
27 #include "tsocket_internal.h"
29 struct tdgram_sendto_queue_state
{
30 /* this structs are owned by the caller */
32 struct tevent_context
*ev
;
33 struct tdgram_context
*dgram
;
36 const struct tsocket_address
*dst
;
41 static void tdgram_sendto_queue_trigger(struct tevent_req
*req
,
43 static void tdgram_sendto_queue_done(struct tevent_req
*subreq
);
45 struct tevent_req
*tdgram_sendto_queue_send(TALLOC_CTX
*mem_ctx
,
46 struct tevent_context
*ev
,
47 struct tdgram_context
*dgram
,
48 struct tevent_queue
*queue
,
51 struct tsocket_address
*dst
)
53 struct tevent_req
*req
;
54 struct tdgram_sendto_queue_state
*state
;
55 struct tevent_queue_entry
*e
;
57 req
= tevent_req_create(mem_ctx
, &state
,
58 struct tdgram_sendto_queue_state
);
63 state
->caller
.ev
= ev
;
64 state
->caller
.dgram
= dgram
;
65 state
->caller
.buf
= buf
;
66 state
->caller
.len
= len
;
67 state
->caller
.dst
= dst
;
71 * we use tevent_queue_add_optimize_empty() with allow_direct
72 * in order to optimize for the empty queue case.
74 e
= tevent_queue_add_optimize_empty(
78 tdgram_sendto_queue_trigger
,
80 if (tevent_req_nomem(e
, req
)) {
81 return tevent_req_post(req
, ev
);
83 if (!tevent_req_is_in_progress(req
)) {
84 return tevent_req_post(req
, ev
);
90 static void tdgram_sendto_queue_trigger(struct tevent_req
*req
,
93 struct tdgram_sendto_queue_state
*state
= tevent_req_data(req
,
94 struct tdgram_sendto_queue_state
);
95 struct tevent_req
*subreq
;
97 subreq
= tdgram_sendto_send(state
,
103 if (tevent_req_nomem(subreq
, req
)) {
106 tevent_req_set_callback(subreq
, tdgram_sendto_queue_done
, req
);
109 static void tdgram_sendto_queue_done(struct tevent_req
*subreq
)
111 struct tevent_req
*req
= tevent_req_callback_data(subreq
,
113 struct tdgram_sendto_queue_state
*state
= tevent_req_data(req
,
114 struct tdgram_sendto_queue_state
);
118 ret
= tdgram_sendto_recv(subreq
, &sys_errno
);
121 tevent_req_error(req
, sys_errno
);
126 tevent_req_done(req
);
129 ssize_t
tdgram_sendto_queue_recv(struct tevent_req
*req
, int *perrno
)
131 struct tdgram_sendto_queue_state
*state
= tevent_req_data(req
,
132 struct tdgram_sendto_queue_state
);
135 ret
= tsocket_simple_int_recv(req
, perrno
);
140 tevent_req_received(req
);
144 struct tstream_readv_pdu_state
{
145 /* this structs are owned by the caller */
147 struct tevent_context
*ev
;
148 struct tstream_context
*stream
;
149 tstream_readv_pdu_next_vector_t next_vector_fn
;
150 void *next_vector_private
;
154 * Each call to the callback resets iov and count
155 * the callback allocated the iov as child of our state,
156 * that means we are allowed to modify and free it.
158 * we should call the callback every time we filled the given
159 * vector and ask for a new vector. We return if the callback
162 struct iovec
*vector
;
166 * the total number of bytes we read,
167 * the return value of the _recv function
172 static void tstream_readv_pdu_ask_for_next_vector(struct tevent_req
*req
);
173 static void tstream_readv_pdu_readv_done(struct tevent_req
*subreq
);
175 struct tevent_req
*tstream_readv_pdu_send(TALLOC_CTX
*mem_ctx
,
176 struct tevent_context
*ev
,
177 struct tstream_context
*stream
,
178 tstream_readv_pdu_next_vector_t next_vector_fn
,
179 void *next_vector_private
)
181 struct tevent_req
*req
;
182 struct tstream_readv_pdu_state
*state
;
184 req
= tevent_req_create(mem_ctx
, &state
,
185 struct tstream_readv_pdu_state
);
190 state
->caller
.ev
= ev
;
191 state
->caller
.stream
= stream
;
192 state
->caller
.next_vector_fn
= next_vector_fn
;
193 state
->caller
.next_vector_private
= next_vector_private
;
195 state
->vector
= NULL
;
197 state
->total_read
= 0;
199 tstream_readv_pdu_ask_for_next_vector(req
);
200 if (!tevent_req_is_in_progress(req
)) {
207 return tevent_req_post(req
, ev
);
210 static void tstream_readv_pdu_ask_for_next_vector(struct tevent_req
*req
)
212 struct tstream_readv_pdu_state
*state
= tevent_req_data(req
,
213 struct tstream_readv_pdu_state
);
217 struct tevent_req
*subreq
;
218 bool optimize
= false;
219 bool save_optimize
= false;
221 if (state
->count
> 0) {
223 * This is not the first time we asked for a vector,
224 * which means parts of the pdu already arrived.
226 * In this case it make sense to enable
227 * a syscall/performance optimization if the
228 * low level tstream implementation supports it.
233 TALLOC_FREE(state
->vector
);
236 ret
= state
->caller
.next_vector_fn(state
->caller
.stream
,
237 state
->caller
.next_vector_private
,
238 state
, &state
->vector
, &state
->count
);
240 tevent_req_error(req
, errno
);
244 if (state
->count
== 0) {
245 tevent_req_done(req
);
249 for (i
=0; i
< state
->count
; i
++) {
250 size_t tmp
= to_read
;
251 tmp
+= state
->vector
[i
].iov_len
;
254 tevent_req_error(req
, EMSGSIZE
);
262 * this is invalid the next vector function should have
263 * reported count == 0.
266 tevent_req_error(req
, EINVAL
);
270 if (state
->total_read
+ to_read
< state
->total_read
) {
271 tevent_req_error(req
, EMSGSIZE
);
277 * If the low level stream is a bsd socket
278 * we will get syscall optimization.
280 * If it is not a bsd socket
281 * tstream_bsd_optimize_readv() just returns.
283 save_optimize
= tstream_bsd_optimize_readv(state
->caller
.stream
,
286 subreq
= tstream_readv_send(state
,
288 state
->caller
.stream
,
292 tstream_bsd_optimize_readv(state
->caller
.stream
,
295 if (tevent_req_nomem(subreq
, req
)) {
298 tevent_req_set_callback(subreq
, tstream_readv_pdu_readv_done
, req
);
301 static void tstream_readv_pdu_readv_done(struct tevent_req
*subreq
)
303 struct tevent_req
*req
= tevent_req_callback_data(subreq
,
305 struct tstream_readv_pdu_state
*state
= tevent_req_data(req
,
306 struct tstream_readv_pdu_state
);
310 ret
= tstream_readv_recv(subreq
, &sys_errno
);
313 tevent_req_error(req
, sys_errno
);
317 state
->total_read
+= ret
;
319 /* ask the callback for a new vector we should fill */
320 tstream_readv_pdu_ask_for_next_vector(req
);
323 int tstream_readv_pdu_recv(struct tevent_req
*req
, int *perrno
)
325 struct tstream_readv_pdu_state
*state
= tevent_req_data(req
,
326 struct tstream_readv_pdu_state
);
329 ret
= tsocket_simple_int_recv(req
, perrno
);
331 ret
= state
->total_read
;
334 tevent_req_received(req
);
338 struct tstream_readv_pdu_queue_state
{
339 /* this structs are owned by the caller */
341 struct tevent_context
*ev
;
342 struct tstream_context
*stream
;
343 tstream_readv_pdu_next_vector_t next_vector_fn
;
344 void *next_vector_private
;
349 static void tstream_readv_pdu_queue_trigger(struct tevent_req
*req
,
351 static void tstream_readv_pdu_queue_done(struct tevent_req
*subreq
);
353 struct tevent_req
*tstream_readv_pdu_queue_send(TALLOC_CTX
*mem_ctx
,
354 struct tevent_context
*ev
,
355 struct tstream_context
*stream
,
356 struct tevent_queue
*queue
,
357 tstream_readv_pdu_next_vector_t next_vector_fn
,
358 void *next_vector_private
)
360 struct tevent_req
*req
;
361 struct tstream_readv_pdu_queue_state
*state
;
362 struct tevent_queue_entry
*e
;
364 req
= tevent_req_create(mem_ctx
, &state
,
365 struct tstream_readv_pdu_queue_state
);
370 state
->caller
.ev
= ev
;
371 state
->caller
.stream
= stream
;
372 state
->caller
.next_vector_fn
= next_vector_fn
;
373 state
->caller
.next_vector_private
= next_vector_private
;
377 * we use tevent_queue_add_optimize_empty() with allow_direct
378 * in order to optimize for the empty queue case.
380 e
= tevent_queue_add_optimize_empty(
384 tstream_readv_pdu_queue_trigger
,
386 if (tevent_req_nomem(e
, req
)) {
387 return tevent_req_post(req
, ev
);
389 if (!tevent_req_is_in_progress(req
)) {
390 return tevent_req_post(req
, ev
);
396 static void tstream_readv_pdu_queue_trigger(struct tevent_req
*req
,
399 struct tstream_readv_pdu_queue_state
*state
= tevent_req_data(req
,
400 struct tstream_readv_pdu_queue_state
);
401 struct tevent_req
*subreq
;
403 subreq
= tstream_readv_pdu_send(state
,
405 state
->caller
.stream
,
406 state
->caller
.next_vector_fn
,
407 state
->caller
.next_vector_private
);
408 if (tevent_req_nomem(subreq
, req
)) {
411 tevent_req_set_callback(subreq
, tstream_readv_pdu_queue_done
,req
);
414 static void tstream_readv_pdu_queue_done(struct tevent_req
*subreq
)
416 struct tevent_req
*req
= tevent_req_callback_data(subreq
,
418 struct tstream_readv_pdu_queue_state
*state
= tevent_req_data(req
,
419 struct tstream_readv_pdu_queue_state
);
423 ret
= tstream_readv_pdu_recv(subreq
, &sys_errno
);
426 tevent_req_error(req
, sys_errno
);
431 tevent_req_done(req
);
434 int tstream_readv_pdu_queue_recv(struct tevent_req
*req
, int *perrno
)
436 struct tstream_readv_pdu_queue_state
*state
= tevent_req_data(req
,
437 struct tstream_readv_pdu_queue_state
);
440 ret
= tsocket_simple_int_recv(req
, perrno
);
445 tevent_req_received(req
);
449 struct tstream_writev_queue_state
{
450 /* this structs are owned by the caller */
452 struct tevent_context
*ev
;
453 struct tstream_context
*stream
;
454 const struct iovec
*vector
;
460 static void tstream_writev_queue_trigger(struct tevent_req
*req
,
462 static void tstream_writev_queue_done(struct tevent_req
*subreq
);
464 struct tevent_req
*tstream_writev_queue_send(TALLOC_CTX
*mem_ctx
,
465 struct tevent_context
*ev
,
466 struct tstream_context
*stream
,
467 struct tevent_queue
*queue
,
468 const struct iovec
*vector
,
471 struct tevent_req
*req
;
472 struct tstream_writev_queue_state
*state
;
473 struct tevent_queue_entry
*e
;
475 req
= tevent_req_create(mem_ctx
, &state
,
476 struct tstream_writev_queue_state
);
481 state
->caller
.ev
= ev
;
482 state
->caller
.stream
= stream
;
483 state
->caller
.vector
= vector
;
484 state
->caller
.count
= count
;
488 * we use tevent_queue_add_optimize_empty() with allow_direct
489 * in order to optimize for the empty queue case.
491 e
= tevent_queue_add_optimize_empty(
495 tstream_writev_queue_trigger
,
497 if (tevent_req_nomem(e
, req
)) {
498 return tevent_req_post(req
, ev
);
500 if (!tevent_req_is_in_progress(req
)) {
501 return tevent_req_post(req
, ev
);
507 static void tstream_writev_queue_trigger(struct tevent_req
*req
,
510 struct tstream_writev_queue_state
*state
= tevent_req_data(req
,
511 struct tstream_writev_queue_state
);
512 struct tevent_req
*subreq
;
514 subreq
= tstream_writev_send(state
,
516 state
->caller
.stream
,
517 state
->caller
.vector
,
518 state
->caller
.count
);
519 if (tevent_req_nomem(subreq
, req
)) {
522 tevent_req_set_callback(subreq
, tstream_writev_queue_done
,req
);
525 static void tstream_writev_queue_done(struct tevent_req
*subreq
)
527 struct tevent_req
*req
= tevent_req_callback_data(subreq
,
529 struct tstream_writev_queue_state
*state
= tevent_req_data(req
,
530 struct tstream_writev_queue_state
);
534 ret
= tstream_writev_recv(subreq
, &sys_errno
);
537 tevent_req_error(req
, sys_errno
);
542 tevent_req_done(req
);
545 int tstream_writev_queue_recv(struct tevent_req
*req
, int *perrno
)
547 struct tstream_writev_queue_state
*state
= tevent_req_data(req
,
548 struct tstream_writev_queue_state
);
551 ret
= tsocket_simple_int_recv(req
, perrno
);
556 tevent_req_received(req
);