s3:libsmb: add cli_{query,set}_security_descriptor() which take sec_info flags
[Samba/gebeck_regimport.git] / lib / tsocket / tsocket_helpers.c
blob49c6840f87387361a2bfd1a7857b0724e3cb9487
1 /*
2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2009
6 ** NOTE! The following LGPL license applies to the tsocket
7 ** library. This does NOT imply that all of Samba is released
8 ** under the LGPL
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 3 of the License, or (at your option) any later version.
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Lesser General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public
21 License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "replace.h"
25 #include "system/filesys.h"
26 #include "tsocket.h"
27 #include "tsocket_internal.h"
29 struct tdgram_sendto_queue_state {
30 /* this structs are owned by the caller */
31 struct {
32 struct tevent_context *ev;
33 struct tdgram_context *dgram;
34 const uint8_t *buf;
35 size_t len;
36 const struct tsocket_address *dst;
37 } caller;
38 ssize_t ret;
41 static void tdgram_sendto_queue_trigger(struct tevent_req *req,
42 void *private_data);
43 static void tdgram_sendto_queue_done(struct tevent_req *subreq);
45 struct tevent_req *tdgram_sendto_queue_send(TALLOC_CTX *mem_ctx,
46 struct tevent_context *ev,
47 struct tdgram_context *dgram,
48 struct tevent_queue *queue,
49 const uint8_t *buf,
50 size_t len,
51 struct tsocket_address *dst)
53 struct tevent_req *req;
54 struct tdgram_sendto_queue_state *state;
55 struct tevent_queue_entry *e;
57 req = tevent_req_create(mem_ctx, &state,
58 struct tdgram_sendto_queue_state);
59 if (!req) {
60 return NULL;
63 state->caller.ev = ev;
64 state->caller.dgram = dgram;
65 state->caller.buf = buf;
66 state->caller.len = len;
67 state->caller.dst = dst;
68 state->ret = -1;
71 * we use tevent_queue_add_optimize_empty() with allow_direct
72 * in order to optimize for the empty queue case.
74 e = tevent_queue_add_optimize_empty(
75 queue,
76 ev,
77 req,
78 tdgram_sendto_queue_trigger,
79 NULL);
80 if (tevent_req_nomem(e, req)) {
81 return tevent_req_post(req, ev);
83 if (!tevent_req_is_in_progress(req)) {
84 return tevent_req_post(req, ev);
87 return req;
90 static void tdgram_sendto_queue_trigger(struct tevent_req *req,
91 void *private_data)
93 struct tdgram_sendto_queue_state *state = tevent_req_data(req,
94 struct tdgram_sendto_queue_state);
95 struct tevent_req *subreq;
97 subreq = tdgram_sendto_send(state,
98 state->caller.ev,
99 state->caller.dgram,
100 state->caller.buf,
101 state->caller.len,
102 state->caller.dst);
103 if (tevent_req_nomem(subreq, req)) {
104 return;
106 tevent_req_set_callback(subreq, tdgram_sendto_queue_done, req);
109 static void tdgram_sendto_queue_done(struct tevent_req *subreq)
111 struct tevent_req *req = tevent_req_callback_data(subreq,
112 struct tevent_req);
113 struct tdgram_sendto_queue_state *state = tevent_req_data(req,
114 struct tdgram_sendto_queue_state);
115 ssize_t ret;
116 int sys_errno;
118 ret = tdgram_sendto_recv(subreq, &sys_errno);
119 talloc_free(subreq);
120 if (ret == -1) {
121 tevent_req_error(req, sys_errno);
122 return;
124 state->ret = ret;
126 tevent_req_done(req);
129 ssize_t tdgram_sendto_queue_recv(struct tevent_req *req, int *perrno)
131 struct tdgram_sendto_queue_state *state = tevent_req_data(req,
132 struct tdgram_sendto_queue_state);
133 ssize_t ret;
135 ret = tsocket_simple_int_recv(req, perrno);
136 if (ret == 0) {
137 ret = state->ret;
140 tevent_req_received(req);
141 return ret;
144 struct tstream_readv_pdu_state {
145 /* this structs are owned by the caller */
146 struct {
147 struct tevent_context *ev;
148 struct tstream_context *stream;
149 tstream_readv_pdu_next_vector_t next_vector_fn;
150 void *next_vector_private;
151 } caller;
154 * Each call to the callback resets iov and count
155 * the callback allocated the iov as child of our state,
156 * that means we are allowed to modify and free it.
158 * we should call the callback every time we filled the given
159 * vector and ask for a new vector. We return if the callback
160 * ask for 0 bytes.
162 struct iovec *vector;
163 size_t count;
166 * the total number of bytes we read,
167 * the return value of the _recv function
169 int total_read;
172 static void tstream_readv_pdu_ask_for_next_vector(struct tevent_req *req);
173 static void tstream_readv_pdu_readv_done(struct tevent_req *subreq);
175 struct tevent_req *tstream_readv_pdu_send(TALLOC_CTX *mem_ctx,
176 struct tevent_context *ev,
177 struct tstream_context *stream,
178 tstream_readv_pdu_next_vector_t next_vector_fn,
179 void *next_vector_private)
181 struct tevent_req *req;
182 struct tstream_readv_pdu_state *state;
184 req = tevent_req_create(mem_ctx, &state,
185 struct tstream_readv_pdu_state);
186 if (!req) {
187 return NULL;
190 state->caller.ev = ev;
191 state->caller.stream = stream;
192 state->caller.next_vector_fn = next_vector_fn;
193 state->caller.next_vector_private = next_vector_private;
195 state->vector = NULL;
196 state->count = 0;
197 state->total_read = 0;
199 tstream_readv_pdu_ask_for_next_vector(req);
200 if (!tevent_req_is_in_progress(req)) {
201 goto post;
204 return req;
206 post:
207 return tevent_req_post(req, ev);
210 static void tstream_readv_pdu_ask_for_next_vector(struct tevent_req *req)
212 struct tstream_readv_pdu_state *state = tevent_req_data(req,
213 struct tstream_readv_pdu_state);
214 int ret;
215 size_t to_read = 0;
216 size_t i;
217 struct tevent_req *subreq;
218 bool optimize = false;
219 bool save_optimize = false;
221 if (state->count > 0) {
223 * This is not the first time we asked for a vector,
224 * which means parts of the pdu already arrived.
226 * In this case it make sense to enable
227 * a syscall/performance optimization if the
228 * low level tstream implementation supports it.
230 optimize = true;
233 TALLOC_FREE(state->vector);
234 state->count = 0;
236 ret = state->caller.next_vector_fn(state->caller.stream,
237 state->caller.next_vector_private,
238 state, &state->vector, &state->count);
239 if (ret == -1) {
240 tevent_req_error(req, errno);
241 return;
244 if (state->count == 0) {
245 tevent_req_done(req);
246 return;
249 for (i=0; i < state->count; i++) {
250 size_t tmp = to_read;
251 tmp += state->vector[i].iov_len;
253 if (tmp < to_read) {
254 tevent_req_error(req, EMSGSIZE);
255 return;
258 to_read = tmp;
262 * this is invalid the next vector function should have
263 * reported count == 0.
265 if (to_read == 0) {
266 tevent_req_error(req, EINVAL);
267 return;
270 if (state->total_read + to_read < state->total_read) {
271 tevent_req_error(req, EMSGSIZE);
272 return;
275 if (optimize) {
277 * If the low level stream is a bsd socket
278 * we will get syscall optimization.
280 * If it is not a bsd socket
281 * tstream_bsd_optimize_readv() just returns.
283 save_optimize = tstream_bsd_optimize_readv(state->caller.stream,
284 true);
286 subreq = tstream_readv_send(state,
287 state->caller.ev,
288 state->caller.stream,
289 state->vector,
290 state->count);
291 if (optimize) {
292 tstream_bsd_optimize_readv(state->caller.stream,
293 save_optimize);
295 if (tevent_req_nomem(subreq, req)) {
296 return;
298 tevent_req_set_callback(subreq, tstream_readv_pdu_readv_done, req);
301 static void tstream_readv_pdu_readv_done(struct tevent_req *subreq)
303 struct tevent_req *req = tevent_req_callback_data(subreq,
304 struct tevent_req);
305 struct tstream_readv_pdu_state *state = tevent_req_data(req,
306 struct tstream_readv_pdu_state);
307 int ret;
308 int sys_errno;
310 ret = tstream_readv_recv(subreq, &sys_errno);
311 if (ret == -1) {
312 tevent_req_error(req, sys_errno);
313 return;
316 state->total_read += ret;
318 /* ask the callback for a new vector we should fill */
319 tstream_readv_pdu_ask_for_next_vector(req);
322 int tstream_readv_pdu_recv(struct tevent_req *req, int *perrno)
324 struct tstream_readv_pdu_state *state = tevent_req_data(req,
325 struct tstream_readv_pdu_state);
326 int ret;
328 ret = tsocket_simple_int_recv(req, perrno);
329 if (ret == 0) {
330 ret = state->total_read;
333 tevent_req_received(req);
334 return ret;
337 struct tstream_readv_pdu_queue_state {
338 /* this structs are owned by the caller */
339 struct {
340 struct tevent_context *ev;
341 struct tstream_context *stream;
342 tstream_readv_pdu_next_vector_t next_vector_fn;
343 void *next_vector_private;
344 } caller;
345 int ret;
348 static void tstream_readv_pdu_queue_trigger(struct tevent_req *req,
349 void *private_data);
350 static void tstream_readv_pdu_queue_done(struct tevent_req *subreq);
352 struct tevent_req *tstream_readv_pdu_queue_send(TALLOC_CTX *mem_ctx,
353 struct tevent_context *ev,
354 struct tstream_context *stream,
355 struct tevent_queue *queue,
356 tstream_readv_pdu_next_vector_t next_vector_fn,
357 void *next_vector_private)
359 struct tevent_req *req;
360 struct tstream_readv_pdu_queue_state *state;
361 struct tevent_queue_entry *e;
363 req = tevent_req_create(mem_ctx, &state,
364 struct tstream_readv_pdu_queue_state);
365 if (!req) {
366 return NULL;
369 state->caller.ev = ev;
370 state->caller.stream = stream;
371 state->caller.next_vector_fn = next_vector_fn;
372 state->caller.next_vector_private = next_vector_private;
373 state->ret = -1;
376 * we use tevent_queue_add_optimize_empty() with allow_direct
377 * in order to optimize for the empty queue case.
379 e = tevent_queue_add_optimize_empty(
380 queue,
382 req,
383 tstream_readv_pdu_queue_trigger,
384 NULL);
385 if (tevent_req_nomem(e, req)) {
386 return tevent_req_post(req, ev);
388 if (!tevent_req_is_in_progress(req)) {
389 return tevent_req_post(req, ev);
392 return req;
395 static void tstream_readv_pdu_queue_trigger(struct tevent_req *req,
396 void *private_data)
398 struct tstream_readv_pdu_queue_state *state = tevent_req_data(req,
399 struct tstream_readv_pdu_queue_state);
400 struct tevent_req *subreq;
402 subreq = tstream_readv_pdu_send(state,
403 state->caller.ev,
404 state->caller.stream,
405 state->caller.next_vector_fn,
406 state->caller.next_vector_private);
407 if (tevent_req_nomem(subreq, req)) {
408 return;
410 tevent_req_set_callback(subreq, tstream_readv_pdu_queue_done ,req);
413 static void tstream_readv_pdu_queue_done(struct tevent_req *subreq)
415 struct tevent_req *req = tevent_req_callback_data(subreq,
416 struct tevent_req);
417 struct tstream_readv_pdu_queue_state *state = tevent_req_data(req,
418 struct tstream_readv_pdu_queue_state);
419 int ret;
420 int sys_errno;
422 ret = tstream_readv_pdu_recv(subreq, &sys_errno);
423 talloc_free(subreq);
424 if (ret == -1) {
425 tevent_req_error(req, sys_errno);
426 return;
428 state->ret = ret;
430 tevent_req_done(req);
433 int tstream_readv_pdu_queue_recv(struct tevent_req *req, int *perrno)
435 struct tstream_readv_pdu_queue_state *state = tevent_req_data(req,
436 struct tstream_readv_pdu_queue_state);
437 int ret;
439 ret = tsocket_simple_int_recv(req, perrno);
440 if (ret == 0) {
441 ret = state->ret;
444 tevent_req_received(req);
445 return ret;
448 struct tstream_writev_queue_state {
449 /* this structs are owned by the caller */
450 struct {
451 struct tevent_context *ev;
452 struct tstream_context *stream;
453 const struct iovec *vector;
454 size_t count;
455 } caller;
456 int ret;
459 static void tstream_writev_queue_trigger(struct tevent_req *req,
460 void *private_data);
461 static void tstream_writev_queue_done(struct tevent_req *subreq);
463 struct tevent_req *tstream_writev_queue_send(TALLOC_CTX *mem_ctx,
464 struct tevent_context *ev,
465 struct tstream_context *stream,
466 struct tevent_queue *queue,
467 const struct iovec *vector,
468 size_t count)
470 struct tevent_req *req;
471 struct tstream_writev_queue_state *state;
472 struct tevent_queue_entry *e;
474 req = tevent_req_create(mem_ctx, &state,
475 struct tstream_writev_queue_state);
476 if (!req) {
477 return NULL;
480 state->caller.ev = ev;
481 state->caller.stream = stream;
482 state->caller.vector = vector;
483 state->caller.count = count;
484 state->ret = -1;
487 * we use tevent_queue_add_optimize_empty() with allow_direct
488 * in order to optimize for the empty queue case.
490 e = tevent_queue_add_optimize_empty(
491 queue,
493 req,
494 tstream_writev_queue_trigger,
495 NULL);
496 if (tevent_req_nomem(e, req)) {
497 return tevent_req_post(req, ev);
499 if (!tevent_req_is_in_progress(req)) {
500 return tevent_req_post(req, ev);
503 return req;
506 static void tstream_writev_queue_trigger(struct tevent_req *req,
507 void *private_data)
509 struct tstream_writev_queue_state *state = tevent_req_data(req,
510 struct tstream_writev_queue_state);
511 struct tevent_req *subreq;
513 subreq = tstream_writev_send(state,
514 state->caller.ev,
515 state->caller.stream,
516 state->caller.vector,
517 state->caller.count);
518 if (tevent_req_nomem(subreq, req)) {
519 return;
521 tevent_req_set_callback(subreq, tstream_writev_queue_done ,req);
524 static void tstream_writev_queue_done(struct tevent_req *subreq)
526 struct tevent_req *req = tevent_req_callback_data(subreq,
527 struct tevent_req);
528 struct tstream_writev_queue_state *state = tevent_req_data(req,
529 struct tstream_writev_queue_state);
530 int ret;
531 int sys_errno;
533 ret = tstream_writev_recv(subreq, &sys_errno);
534 talloc_free(subreq);
535 if (ret == -1) {
536 tevent_req_error(req, sys_errno);
537 return;
539 state->ret = ret;
541 tevent_req_done(req);
544 int tstream_writev_queue_recv(struct tevent_req *req, int *perrno)
546 struct tstream_writev_queue_state *state = tevent_req_data(req,
547 struct tstream_writev_queue_state);
548 int ret;
550 ret = tsocket_simple_int_recv(req, perrno);
551 if (ret == 0) {
552 ret = state->ret;
555 tevent_req_received(req);
556 return ret;