s4:kdc: Return NTSTATUS and auditing information from samba_kdc_update_pac() to be...
[Samba.git] / lib / tsocket / tsocket_helpers.c
blobc7ad5312214aebc75e4bbee6c8262825690ae9a9
1 /*
2 Unix SMB/CIFS implementation.
4 Copyright (C) Stefan Metzmacher 2009
6 ** NOTE! The following LGPL license applies to the tsocket
7 ** library. This does NOT imply that all of Samba is released
8 ** under the LGPL
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 3 of the License, or (at your option) any later version.
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Lesser General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public
21 License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "replace.h"
25 #include "system/filesys.h"
26 #include "tsocket.h"
27 #include "tsocket_internal.h"
29 struct tdgram_sendto_queue_state {
30 /* this structs are owned by the caller */
31 struct {
32 struct tevent_context *ev;
33 struct tdgram_context *dgram;
34 const uint8_t *buf;
35 size_t len;
36 const struct tsocket_address *dst;
37 } caller;
38 ssize_t ret;
41 static void tdgram_sendto_queue_trigger(struct tevent_req *req,
42 void *private_data);
43 static void tdgram_sendto_queue_done(struct tevent_req *subreq);
45 struct tevent_req *tdgram_sendto_queue_send(TALLOC_CTX *mem_ctx,
46 struct tevent_context *ev,
47 struct tdgram_context *dgram,
48 struct tevent_queue *queue,
49 const uint8_t *buf,
50 size_t len,
51 struct tsocket_address *dst)
53 struct tevent_req *req;
54 struct tdgram_sendto_queue_state *state;
55 struct tevent_queue_entry *e;
57 req = tevent_req_create(mem_ctx, &state,
58 struct tdgram_sendto_queue_state);
59 if (!req) {
60 return NULL;
63 state->caller.ev = ev;
64 state->caller.dgram = dgram;
65 state->caller.buf = buf;
66 state->caller.len = len;
67 state->caller.dst = dst;
68 state->ret = -1;
71 * we use tevent_queue_add_optimize_empty() with allow_direct
72 * in order to optimize for the empty queue case.
74 e = tevent_queue_add_optimize_empty(
75 queue,
76 ev,
77 req,
78 tdgram_sendto_queue_trigger,
79 NULL);
80 if (tevent_req_nomem(e, req)) {
81 return tevent_req_post(req, ev);
83 if (!tevent_req_is_in_progress(req)) {
84 return tevent_req_post(req, ev);
87 return req;
90 static void tdgram_sendto_queue_trigger(struct tevent_req *req,
91 void *private_data)
93 struct tdgram_sendto_queue_state *state = tevent_req_data(req,
94 struct tdgram_sendto_queue_state);
95 struct tevent_req *subreq;
97 subreq = tdgram_sendto_send(state,
98 state->caller.ev,
99 state->caller.dgram,
100 state->caller.buf,
101 state->caller.len,
102 state->caller.dst);
103 if (tevent_req_nomem(subreq, req)) {
104 return;
106 tevent_req_set_callback(subreq, tdgram_sendto_queue_done, req);
109 static void tdgram_sendto_queue_done(struct tevent_req *subreq)
111 struct tevent_req *req = tevent_req_callback_data(subreq,
112 struct tevent_req);
113 struct tdgram_sendto_queue_state *state = tevent_req_data(req,
114 struct tdgram_sendto_queue_state);
115 ssize_t ret;
116 int sys_errno;
118 ret = tdgram_sendto_recv(subreq, &sys_errno);
119 talloc_free(subreq);
120 if (ret == -1) {
121 tevent_req_error(req, sys_errno);
122 return;
124 state->ret = ret;
126 tevent_req_done(req);
129 ssize_t tdgram_sendto_queue_recv(struct tevent_req *req, int *perrno)
131 struct tdgram_sendto_queue_state *state = tevent_req_data(req,
132 struct tdgram_sendto_queue_state);
133 ssize_t ret;
135 ret = tsocket_simple_int_recv(req, perrno);
136 if (ret == 0) {
137 ret = state->ret;
140 tevent_req_received(req);
141 return ret;
144 struct tstream_readv_pdu_state {
145 /* this structs are owned by the caller */
146 struct {
147 struct tevent_context *ev;
148 struct tstream_context *stream;
149 tstream_readv_pdu_next_vector_t next_vector_fn;
150 void *next_vector_private;
151 } caller;
154 * Each call to the callback resets iov and count
155 * the callback allocated the iov as child of our state,
156 * that means we are allowed to modify and free it.
158 * we should call the callback every time we filled the given
159 * vector and ask for a new vector. We return if the callback
160 * ask for 0 bytes.
162 struct iovec *vector;
163 size_t count;
166 * the total number of bytes we read,
167 * the return value of the _recv function
169 int total_read;
172 static void tstream_readv_pdu_ask_for_next_vector(struct tevent_req *req);
173 static void tstream_readv_pdu_readv_done(struct tevent_req *subreq);
175 struct tevent_req *tstream_readv_pdu_send(TALLOC_CTX *mem_ctx,
176 struct tevent_context *ev,
177 struct tstream_context *stream,
178 tstream_readv_pdu_next_vector_t next_vector_fn,
179 void *next_vector_private)
181 struct tevent_req *req;
182 struct tstream_readv_pdu_state *state;
184 req = tevent_req_create(mem_ctx, &state,
185 struct tstream_readv_pdu_state);
186 if (!req) {
187 return NULL;
190 state->caller.ev = ev;
191 state->caller.stream = stream;
192 state->caller.next_vector_fn = next_vector_fn;
193 state->caller.next_vector_private = next_vector_private;
195 state->vector = NULL;
196 state->count = 0;
197 state->total_read = 0;
199 tstream_readv_pdu_ask_for_next_vector(req);
200 if (!tevent_req_is_in_progress(req)) {
201 goto post;
204 return req;
206 post:
207 return tevent_req_post(req, ev);
210 static void tstream_readv_pdu_ask_for_next_vector(struct tevent_req *req)
212 struct tstream_readv_pdu_state *state = tevent_req_data(req,
213 struct tstream_readv_pdu_state);
214 int ret;
215 size_t to_read = 0;
216 size_t i;
217 struct tevent_req *subreq;
218 bool optimize = false;
219 bool save_optimize = false;
221 if (state->count > 0) {
223 * This is not the first time we asked for a vector,
224 * which means parts of the pdu already arrived.
226 * In this case it make sense to enable
227 * a syscall/performance optimization if the
228 * low level tstream implementation supports it.
230 optimize = true;
233 TALLOC_FREE(state->vector);
234 state->count = 0;
236 ret = state->caller.next_vector_fn(state->caller.stream,
237 state->caller.next_vector_private,
238 state, &state->vector, &state->count);
239 if (ret == -1) {
240 tevent_req_error(req, errno);
241 return;
244 if (state->count == 0) {
245 tevent_req_done(req);
246 return;
249 for (i=0; i < state->count; i++) {
250 size_t tmp = to_read;
251 tmp += state->vector[i].iov_len;
253 if (tmp < to_read) {
254 tevent_req_error(req, EMSGSIZE);
255 return;
258 to_read = tmp;
262 * this is invalid the next vector function should have
263 * reported count == 0.
265 if (to_read == 0) {
266 tevent_req_error(req, EINVAL);
267 return;
270 if (state->total_read + to_read < state->total_read) {
271 tevent_req_error(req, EMSGSIZE);
272 return;
275 if (optimize) {
277 * If the low level stream is a bsd socket
278 * we will get syscall optimization.
280 * If it is not a bsd socket
281 * tstream_bsd_optimize_readv() just returns.
283 save_optimize = tstream_bsd_optimize_readv(state->caller.stream,
284 true);
286 subreq = tstream_readv_send(state,
287 state->caller.ev,
288 state->caller.stream,
289 state->vector,
290 state->count);
291 if (optimize) {
292 tstream_bsd_optimize_readv(state->caller.stream,
293 save_optimize);
295 if (tevent_req_nomem(subreq, req)) {
296 return;
298 tevent_req_set_callback(subreq, tstream_readv_pdu_readv_done, req);
301 static void tstream_readv_pdu_readv_done(struct tevent_req *subreq)
303 struct tevent_req *req = tevent_req_callback_data(subreq,
304 struct tevent_req);
305 struct tstream_readv_pdu_state *state = tevent_req_data(req,
306 struct tstream_readv_pdu_state);
307 int ret;
308 int sys_errno;
310 ret = tstream_readv_recv(subreq, &sys_errno);
311 TALLOC_FREE(subreq);
312 if (ret == -1) {
313 tevent_req_error(req, sys_errno);
314 return;
317 state->total_read += ret;
319 /* ask the callback for a new vector we should fill */
320 tstream_readv_pdu_ask_for_next_vector(req);
323 int tstream_readv_pdu_recv(struct tevent_req *req, int *perrno)
325 struct tstream_readv_pdu_state *state = tevent_req_data(req,
326 struct tstream_readv_pdu_state);
327 int ret;
329 ret = tsocket_simple_int_recv(req, perrno);
330 if (ret == 0) {
331 ret = state->total_read;
334 tevent_req_received(req);
335 return ret;
338 struct tstream_readv_pdu_queue_state {
339 /* this structs are owned by the caller */
340 struct {
341 struct tevent_context *ev;
342 struct tstream_context *stream;
343 tstream_readv_pdu_next_vector_t next_vector_fn;
344 void *next_vector_private;
345 } caller;
346 int ret;
349 static void tstream_readv_pdu_queue_trigger(struct tevent_req *req,
350 void *private_data);
351 static void tstream_readv_pdu_queue_done(struct tevent_req *subreq);
353 struct tevent_req *tstream_readv_pdu_queue_send(TALLOC_CTX *mem_ctx,
354 struct tevent_context *ev,
355 struct tstream_context *stream,
356 struct tevent_queue *queue,
357 tstream_readv_pdu_next_vector_t next_vector_fn,
358 void *next_vector_private)
360 struct tevent_req *req;
361 struct tstream_readv_pdu_queue_state *state;
362 struct tevent_queue_entry *e;
364 req = tevent_req_create(mem_ctx, &state,
365 struct tstream_readv_pdu_queue_state);
366 if (!req) {
367 return NULL;
370 state->caller.ev = ev;
371 state->caller.stream = stream;
372 state->caller.next_vector_fn = next_vector_fn;
373 state->caller.next_vector_private = next_vector_private;
374 state->ret = -1;
377 * we use tevent_queue_add_optimize_empty() with allow_direct
378 * in order to optimize for the empty queue case.
380 e = tevent_queue_add_optimize_empty(
381 queue,
383 req,
384 tstream_readv_pdu_queue_trigger,
385 NULL);
386 if (tevent_req_nomem(e, req)) {
387 return tevent_req_post(req, ev);
389 if (!tevent_req_is_in_progress(req)) {
390 return tevent_req_post(req, ev);
393 return req;
396 static void tstream_readv_pdu_queue_trigger(struct tevent_req *req,
397 void *private_data)
399 struct tstream_readv_pdu_queue_state *state = tevent_req_data(req,
400 struct tstream_readv_pdu_queue_state);
401 struct tevent_req *subreq;
403 subreq = tstream_readv_pdu_send(state,
404 state->caller.ev,
405 state->caller.stream,
406 state->caller.next_vector_fn,
407 state->caller.next_vector_private);
408 if (tevent_req_nomem(subreq, req)) {
409 return;
411 tevent_req_set_callback(subreq, tstream_readv_pdu_queue_done ,req);
414 static void tstream_readv_pdu_queue_done(struct tevent_req *subreq)
416 struct tevent_req *req = tevent_req_callback_data(subreq,
417 struct tevent_req);
418 struct tstream_readv_pdu_queue_state *state = tevent_req_data(req,
419 struct tstream_readv_pdu_queue_state);
420 int ret;
421 int sys_errno;
423 ret = tstream_readv_pdu_recv(subreq, &sys_errno);
424 talloc_free(subreq);
425 if (ret == -1) {
426 tevent_req_error(req, sys_errno);
427 return;
429 state->ret = ret;
431 tevent_req_done(req);
434 int tstream_readv_pdu_queue_recv(struct tevent_req *req, int *perrno)
436 struct tstream_readv_pdu_queue_state *state = tevent_req_data(req,
437 struct tstream_readv_pdu_queue_state);
438 int ret;
440 ret = tsocket_simple_int_recv(req, perrno);
441 if (ret == 0) {
442 ret = state->ret;
445 tevent_req_received(req);
446 return ret;
449 struct tstream_writev_queue_state {
450 /* this structs are owned by the caller */
451 struct {
452 struct tevent_context *ev;
453 struct tstream_context *stream;
454 const struct iovec *vector;
455 size_t count;
456 } caller;
457 int ret;
460 static void tstream_writev_queue_trigger(struct tevent_req *req,
461 void *private_data);
462 static void tstream_writev_queue_done(struct tevent_req *subreq);
464 struct tevent_req *tstream_writev_queue_send(TALLOC_CTX *mem_ctx,
465 struct tevent_context *ev,
466 struct tstream_context *stream,
467 struct tevent_queue *queue,
468 const struct iovec *vector,
469 size_t count)
471 struct tevent_req *req;
472 struct tstream_writev_queue_state *state;
473 struct tevent_queue_entry *e;
475 req = tevent_req_create(mem_ctx, &state,
476 struct tstream_writev_queue_state);
477 if (!req) {
478 return NULL;
481 state->caller.ev = ev;
482 state->caller.stream = stream;
483 state->caller.vector = vector;
484 state->caller.count = count;
485 state->ret = -1;
488 * we use tevent_queue_add_optimize_empty() with allow_direct
489 * in order to optimize for the empty queue case.
491 e = tevent_queue_add_optimize_empty(
492 queue,
494 req,
495 tstream_writev_queue_trigger,
496 NULL);
497 if (tevent_req_nomem(e, req)) {
498 return tevent_req_post(req, ev);
500 if (!tevent_req_is_in_progress(req)) {
501 return tevent_req_post(req, ev);
504 return req;
507 static void tstream_writev_queue_trigger(struct tevent_req *req,
508 void *private_data)
510 struct tstream_writev_queue_state *state = tevent_req_data(req,
511 struct tstream_writev_queue_state);
512 struct tevent_req *subreq;
514 subreq = tstream_writev_send(state,
515 state->caller.ev,
516 state->caller.stream,
517 state->caller.vector,
518 state->caller.count);
519 if (tevent_req_nomem(subreq, req)) {
520 return;
522 tevent_req_set_callback(subreq, tstream_writev_queue_done ,req);
525 static void tstream_writev_queue_done(struct tevent_req *subreq)
527 struct tevent_req *req = tevent_req_callback_data(subreq,
528 struct tevent_req);
529 struct tstream_writev_queue_state *state = tevent_req_data(req,
530 struct tstream_writev_queue_state);
531 int ret;
532 int sys_errno;
534 ret = tstream_writev_recv(subreq, &sys_errno);
535 talloc_free(subreq);
536 if (ret == -1) {
537 tevent_req_error(req, sys_errno);
538 return;
540 state->ret = ret;
542 tevent_req_done(req);
545 int tstream_writev_queue_recv(struct tevent_req *req, int *perrno)
547 struct tstream_writev_queue_state *state = tevent_req_data(req,
548 struct tstream_writev_queue_state);
549 int ret;
551 ret = tsocket_simple_int_recv(req, perrno);
552 if (ret == 0) {
553 ret = state->ret;
556 tevent_req_received(req);
557 return ret;