negoex.idl: use DATA_BLOB for negoex_BYTE_VECTOR
[Samba.git] / source3 / smbd / aio.c
blob32a1ce0e4a0bc74b5bf230419890cd3656de75b7
1 /*
2 Unix SMB/Netbios implementation.
3 Version 3.0
4 async_io read handling using POSIX async io.
5 Copyright (C) Jeremy Allison 2005.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "includes.h"
22 #include "smbd/smbd.h"
23 #include "smbd/globals.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/util/tevent_unix.h"
26 #include "lib/tevent_wait.h"
28 /****************************************************************************
29 Statics plus accessor functions.
30 *****************************************************************************/
32 static int outstanding_aio_calls;
34 int get_outstanding_aio_calls(void)
36 return outstanding_aio_calls;
39 void increment_outstanding_aio_calls(void)
41 outstanding_aio_calls++;
44 void decrement_outstanding_aio_calls(void)
46 outstanding_aio_calls--;
49 /****************************************************************************
50 The buffer we keep around whilst an aio request is in process.
51 *****************************************************************************/
53 struct aio_extra {
54 files_struct *fsp;
55 struct smb_request *smbreq;
56 DATA_BLOB outbuf;
57 struct lock_struct lock;
58 size_t nbyte;
59 off_t offset;
60 bool write_through;
63 /****************************************************************************
64 Accessor function to return write_through state.
65 *****************************************************************************/
67 bool aio_write_through_requested(struct aio_extra *aio_ex)
69 return aio_ex->write_through;
72 static int aio_extra_destructor(struct aio_extra *aio_ex)
74 decrement_outstanding_aio_calls();
75 return 0;
78 /****************************************************************************
79 Create the extended aio struct we must keep around for the lifetime
80 of the aio call.
81 *****************************************************************************/
83 static struct aio_extra *create_aio_extra(TALLOC_CTX *mem_ctx,
84 files_struct *fsp,
85 size_t buflen)
87 struct aio_extra *aio_ex = talloc_zero(mem_ctx, struct aio_extra);
89 if (!aio_ex) {
90 return NULL;
93 /* The output buffer stored in the aio_ex is the start of
94 the smb return buffer. The buffer used in the acb
95 is the start of the reply data portion of that buffer. */
97 if (buflen) {
98 aio_ex->outbuf = data_blob_talloc(aio_ex, NULL, buflen);
99 if (!aio_ex->outbuf.data) {
100 TALLOC_FREE(aio_ex);
101 return NULL;
104 talloc_set_destructor(aio_ex, aio_extra_destructor);
105 aio_ex->fsp = fsp;
106 increment_outstanding_aio_calls();
107 return aio_ex;
110 struct aio_req_fsp_link {
111 files_struct *fsp;
112 struct tevent_req *req;
115 static int aio_del_req_from_fsp(struct aio_req_fsp_link *lnk)
117 unsigned i;
118 files_struct *fsp = lnk->fsp;
119 struct tevent_req *req = lnk->req;
121 for (i=0; i<fsp->num_aio_requests; i++) {
122 if (fsp->aio_requests[i] == req) {
123 break;
126 if (i == fsp->num_aio_requests) {
127 DEBUG(1, ("req %p not found in fsp %p\n", req, fsp));
128 return 0;
130 fsp->num_aio_requests -= 1;
131 fsp->aio_requests[i] = fsp->aio_requests[fsp->num_aio_requests];
133 if (fsp->num_aio_requests == 0) {
134 tevent_wait_done(fsp->deferred_close);
136 return 0;
139 bool aio_add_req_to_fsp(files_struct *fsp, struct tevent_req *req)
141 size_t array_len;
142 struct aio_req_fsp_link *lnk;
144 lnk = talloc(req, struct aio_req_fsp_link);
145 if (lnk == NULL) {
146 return false;
149 array_len = talloc_array_length(fsp->aio_requests);
150 if (array_len <= fsp->num_aio_requests) {
151 struct tevent_req **tmp;
153 tmp = talloc_realloc(
154 fsp, fsp->aio_requests, struct tevent_req *,
155 fsp->num_aio_requests+1);
156 if (tmp == NULL) {
157 TALLOC_FREE(lnk);
158 return false;
160 fsp->aio_requests = tmp;
162 fsp->aio_requests[fsp->num_aio_requests] = req;
163 fsp->num_aio_requests += 1;
165 lnk->fsp = fsp;
166 lnk->req = req;
167 talloc_set_destructor(lnk, aio_del_req_from_fsp);
169 return true;
172 static void aio_pread_smb1_done(struct tevent_req *req);
174 /****************************************************************************
175 Set up an aio request from a SMBreadX call.
176 *****************************************************************************/
178 NTSTATUS schedule_aio_read_and_X(connection_struct *conn,
179 struct smb_request *smbreq,
180 files_struct *fsp, off_t startpos,
181 size_t smb_maxcnt)
183 struct aio_extra *aio_ex;
184 size_t bufsize;
185 size_t min_aio_read_size = lp_aio_read_size(SNUM(conn));
186 struct tevent_req *req;
188 if (fsp->base_fsp != NULL) {
189 /* No AIO on streams yet */
190 DEBUG(10, ("AIO on streams not yet supported\n"));
191 return NT_STATUS_RETRY;
194 if ((!min_aio_read_size || (smb_maxcnt < min_aio_read_size))
195 && !SMB_VFS_AIO_FORCE(fsp)) {
196 /* Too small a read for aio request. */
197 DEBUG(10,("schedule_aio_read_and_X: read size (%u) too small "
198 "for minimum aio_read of %u\n",
199 (unsigned int)smb_maxcnt,
200 (unsigned int)min_aio_read_size ));
201 return NT_STATUS_RETRY;
204 /* Only do this on non-chained and non-chaining reads not using the
205 * write cache. */
206 if (req_is_in_chain(smbreq) || (lp_write_cache_size(SNUM(conn)) != 0)) {
207 return NT_STATUS_RETRY;
210 /* The following is safe from integer wrap as we've already checked
211 smb_maxcnt is 128k or less. Wct is 12 for read replies */
213 bufsize = smb_size + 12 * 2 + smb_maxcnt + 1 /* padding byte */;
215 if ((aio_ex = create_aio_extra(NULL, fsp, bufsize)) == NULL) {
216 DEBUG(10,("schedule_aio_read_and_X: malloc fail.\n"));
217 return NT_STATUS_NO_MEMORY;
220 construct_reply_common_req(smbreq, (char *)aio_ex->outbuf.data);
221 srv_set_message((char *)aio_ex->outbuf.data, 12, 0, True);
222 SCVAL(aio_ex->outbuf.data,smb_vwv0,0xFF); /* Never a chained reply. */
223 SCVAL(smb_buf(aio_ex->outbuf.data), 0, 0); /* padding byte */
225 init_strict_lock_struct(fsp, (uint64_t)smbreq->smbpid,
226 (uint64_t)startpos, (uint64_t)smb_maxcnt, READ_LOCK,
227 &aio_ex->lock);
229 /* Take the lock until the AIO completes. */
230 if (!SMB_VFS_STRICT_LOCK(conn, fsp, &aio_ex->lock)) {
231 TALLOC_FREE(aio_ex);
232 return NT_STATUS_FILE_LOCK_CONFLICT;
235 aio_ex->nbyte = smb_maxcnt;
236 aio_ex->offset = startpos;
238 req = SMB_VFS_PREAD_SEND(aio_ex, fsp->conn->sconn->ev_ctx,
239 fsp,
240 smb_buf(aio_ex->outbuf.data) + 1 /* pad */,
241 smb_maxcnt, startpos);
242 if (req == NULL) {
243 DEBUG(0,("schedule_aio_read_and_X: aio_read failed. "
244 "Error %s\n", strerror(errno) ));
245 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock);
246 TALLOC_FREE(aio_ex);
247 return NT_STATUS_RETRY;
249 tevent_req_set_callback(req, aio_pread_smb1_done, aio_ex);
251 if (!aio_add_req_to_fsp(fsp, req)) {
252 DEBUG(1, ("Could not add req to fsp\n"));
253 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock);
254 TALLOC_FREE(aio_ex);
255 return NT_STATUS_RETRY;
258 aio_ex->smbreq = talloc_move(aio_ex, &smbreq);
260 DEBUG(10,("schedule_aio_read_and_X: scheduled aio_read for file %s, "
261 "offset %.0f, len = %u (mid = %u)\n",
262 fsp_str_dbg(fsp), (double)startpos, (unsigned int)smb_maxcnt,
263 (unsigned int)aio_ex->smbreq->mid ));
265 return NT_STATUS_OK;
268 static void aio_pread_smb1_done(struct tevent_req *req)
270 struct aio_extra *aio_ex = tevent_req_callback_data(
271 req, struct aio_extra);
272 files_struct *fsp = aio_ex->fsp;
273 int outsize;
274 char *outbuf = (char *)aio_ex->outbuf.data;
275 char *data = smb_buf(outbuf) + 1 /* padding byte */;
276 ssize_t nread;
277 int err;
279 nread = SMB_VFS_PREAD_RECV(req, &err);
280 TALLOC_FREE(req);
282 DEBUG(10, ("pread_recv returned %d, err = %s\n", (int)nread,
283 (nread == -1) ? strerror(err) : "no error"));
285 if (fsp == NULL) {
286 DEBUG( 3, ("aio_pread_smb1_done: file closed whilst "
287 "aio outstanding (mid[%llu]).\n",
288 (unsigned long long)aio_ex->smbreq->mid));
289 TALLOC_FREE(aio_ex);
290 return;
293 /* Unlock now we're done. */
294 SMB_VFS_STRICT_UNLOCK(fsp->conn, fsp, &aio_ex->lock);
296 if (nread < 0) {
297 DEBUG( 3, ("handle_aio_read_complete: file %s nread == %d. "
298 "Error = %s\n", fsp_str_dbg(fsp), (int)nread,
299 strerror(err)));
301 ERROR_NT(map_nt_error_from_unix(err));
302 outsize = srv_set_message(outbuf,0,0,true);
303 } else {
304 outsize = srv_set_message(outbuf, 12,
305 nread + 1 /* padding byte */, false);
306 SSVAL(outbuf,smb_vwv2, 0xFFFF); /* Remaining - must be * -1. */
307 SSVAL(outbuf,smb_vwv5, nread);
308 SSVAL(outbuf,smb_vwv6, smb_offset(data,outbuf));
309 SSVAL(outbuf,smb_vwv7, ((nread >> 16) & 1));
310 SSVAL(smb_buf(outbuf), -2, nread);
312 aio_ex->fsp->fh->pos = aio_ex->offset + nread;
313 aio_ex->fsp->fh->position_information = aio_ex->fsp->fh->pos;
315 DEBUG( 3, ("handle_aio_read_complete file %s max=%d "
316 "nread=%d\n", fsp_str_dbg(fsp),
317 (int)aio_ex->nbyte, (int)nread ) );
320 smb_setlen(outbuf, outsize - 4);
321 show_msg(outbuf);
322 if (!srv_send_smb(aio_ex->smbreq->xconn, outbuf,
323 true, aio_ex->smbreq->seqnum+1,
324 IS_CONN_ENCRYPTED(fsp->conn), NULL)) {
325 exit_server_cleanly("handle_aio_read_complete: srv_send_smb "
326 "failed.");
329 DEBUG(10, ("handle_aio_read_complete: scheduled aio_read completed "
330 "for file %s, offset %.0f, len = %u\n",
331 fsp_str_dbg(fsp), (double)aio_ex->offset,
332 (unsigned int)nread));
334 TALLOC_FREE(aio_ex);
337 struct pwrite_fsync_state {
338 struct tevent_context *ev;
339 files_struct *fsp;
340 bool write_through;
341 ssize_t nwritten;
344 static void pwrite_fsync_write_done(struct tevent_req *subreq);
345 static void pwrite_fsync_sync_done(struct tevent_req *subreq);
347 static struct tevent_req *pwrite_fsync_send(TALLOC_CTX *mem_ctx,
348 struct tevent_context *ev,
349 struct files_struct *fsp,
350 const void *data,
351 size_t n, off_t offset,
352 bool write_through)
354 struct tevent_req *req, *subreq;
355 struct pwrite_fsync_state *state;
357 req = tevent_req_create(mem_ctx, &state, struct pwrite_fsync_state);
358 if (req == NULL) {
359 return NULL;
361 state->ev = ev;
362 state->fsp = fsp;
363 state->write_through = write_through;
365 subreq = SMB_VFS_PWRITE_SEND(state, ev, fsp, data, n, offset);
366 if (tevent_req_nomem(subreq, req)) {
367 return tevent_req_post(req, ev);
369 tevent_req_set_callback(subreq, pwrite_fsync_write_done, req);
370 return req;
373 static void pwrite_fsync_write_done(struct tevent_req *subreq)
375 struct tevent_req *req = tevent_req_callback_data(
376 subreq, struct tevent_req);
377 struct pwrite_fsync_state *state = tevent_req_data(
378 req, struct pwrite_fsync_state);
379 connection_struct *conn = state->fsp->conn;
380 int err;
381 bool do_sync;
383 state->nwritten = SMB_VFS_PWRITE_RECV(subreq, &err);
384 TALLOC_FREE(subreq);
385 if (state->nwritten == -1) {
386 tevent_req_error(req, err);
387 return;
390 do_sync = (lp_strict_sync(SNUM(conn)) &&
391 (lp_sync_always(SNUM(conn)) || state->write_through));
392 if (!do_sync) {
393 tevent_req_done(req);
394 return;
397 subreq = SMB_VFS_FSYNC_SEND(state, state->ev, state->fsp);
398 if (tevent_req_nomem(subreq, req)) {
399 return;
401 tevent_req_set_callback(subreq, pwrite_fsync_sync_done, req);
404 static void pwrite_fsync_sync_done(struct tevent_req *subreq)
406 struct tevent_req *req = tevent_req_callback_data(
407 subreq, struct tevent_req);
408 int ret, err;
410 ret = SMB_VFS_FSYNC_RECV(subreq, &err);
411 TALLOC_FREE(subreq);
412 if (ret == -1) {
413 tevent_req_error(req, err);
414 return;
416 tevent_req_done(req);
419 static ssize_t pwrite_fsync_recv(struct tevent_req *req, int *perr)
421 struct pwrite_fsync_state *state = tevent_req_data(
422 req, struct pwrite_fsync_state);
424 if (tevent_req_is_unix_error(req, perr)) {
425 return -1;
427 return state->nwritten;
430 static void aio_pwrite_smb1_done(struct tevent_req *req);
432 /****************************************************************************
433 Set up an aio request from a SMBwriteX call.
434 *****************************************************************************/
436 NTSTATUS schedule_aio_write_and_X(connection_struct *conn,
437 struct smb_request *smbreq,
438 files_struct *fsp, const char *data,
439 off_t startpos,
440 size_t numtowrite)
442 struct aio_extra *aio_ex;
443 size_t bufsize;
444 size_t min_aio_write_size = lp_aio_write_size(SNUM(conn));
445 struct tevent_req *req;
447 if (fsp->base_fsp != NULL) {
448 /* No AIO on streams yet */
449 DEBUG(10, ("AIO on streams not yet supported\n"));
450 return NT_STATUS_RETRY;
453 if ((!min_aio_write_size || (numtowrite < min_aio_write_size))
454 && !SMB_VFS_AIO_FORCE(fsp)) {
455 /* Too small a write for aio request. */
456 DEBUG(10,("schedule_aio_write_and_X: write size (%u) too "
457 "small for minimum aio_write of %u\n",
458 (unsigned int)numtowrite,
459 (unsigned int)min_aio_write_size ));
460 return NT_STATUS_RETRY;
463 /* Only do this on non-chained and non-chaining writes not using the
464 * write cache. */
465 if (req_is_in_chain(smbreq) || (lp_write_cache_size(SNUM(conn)) != 0)) {
466 return NT_STATUS_RETRY;
469 bufsize = smb_size + 6*2;
471 if (!(aio_ex = create_aio_extra(NULL, fsp, bufsize))) {
472 DEBUG(0,("schedule_aio_write_and_X: malloc fail.\n"));
473 return NT_STATUS_NO_MEMORY;
475 aio_ex->write_through = BITSETW(smbreq->vwv+7,0);
477 construct_reply_common_req(smbreq, (char *)aio_ex->outbuf.data);
478 srv_set_message((char *)aio_ex->outbuf.data, 6, 0, True);
479 SCVAL(aio_ex->outbuf.data,smb_vwv0,0xFF); /* Never a chained reply. */
481 init_strict_lock_struct(fsp, (uint64_t)smbreq->smbpid,
482 (uint64_t)startpos, (uint64_t)numtowrite, WRITE_LOCK,
483 &aio_ex->lock);
485 /* Take the lock until the AIO completes. */
486 if (!SMB_VFS_STRICT_LOCK(conn, fsp, &aio_ex->lock)) {
487 TALLOC_FREE(aio_ex);
488 return NT_STATUS_FILE_LOCK_CONFLICT;
491 aio_ex->nbyte = numtowrite;
492 aio_ex->offset = startpos;
494 req = pwrite_fsync_send(aio_ex, fsp->conn->sconn->ev_ctx, fsp,
495 data, numtowrite, startpos,
496 aio_ex->write_through);
497 if (req == NULL) {
498 DEBUG(3,("schedule_aio_wrote_and_X: aio_write failed. "
499 "Error %s\n", strerror(errno) ));
500 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock);
501 TALLOC_FREE(aio_ex);
502 return NT_STATUS_RETRY;
504 tevent_req_set_callback(req, aio_pwrite_smb1_done, aio_ex);
506 if (!aio_add_req_to_fsp(fsp, req)) {
507 DEBUG(1, ("Could not add req to fsp\n"));
508 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock);
509 TALLOC_FREE(aio_ex);
510 return NT_STATUS_RETRY;
513 aio_ex->smbreq = talloc_move(aio_ex, &smbreq);
515 /* This should actually be improved to span the write. */
516 contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WRITE);
517 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WRITE);
519 if (!aio_ex->write_through && !lp_sync_always(SNUM(fsp->conn))
520 && fsp->aio_write_behind) {
521 /* Lie to the client and immediately claim we finished the
522 * write. */
523 SSVAL(aio_ex->outbuf.data,smb_vwv2,numtowrite);
524 SSVAL(aio_ex->outbuf.data,smb_vwv4,(numtowrite>>16)&1);
525 show_msg((char *)aio_ex->outbuf.data);
526 if (!srv_send_smb(aio_ex->smbreq->xconn,
527 (char *)aio_ex->outbuf.data,
528 true, aio_ex->smbreq->seqnum+1,
529 IS_CONN_ENCRYPTED(fsp->conn),
530 &aio_ex->smbreq->pcd)) {
531 exit_server_cleanly("schedule_aio_write_and_X: "
532 "srv_send_smb failed.");
534 DEBUG(10,("schedule_aio_write_and_X: scheduled aio_write "
535 "behind for file %s\n", fsp_str_dbg(fsp)));
538 DEBUG(10,("schedule_aio_write_and_X: scheduled aio_write for file "
539 "%s, offset %.0f, len = %u (mid = %u) "
540 "outstanding_aio_calls = %d\n",
541 fsp_str_dbg(fsp), (double)startpos, (unsigned int)numtowrite,
542 (unsigned int)aio_ex->smbreq->mid,
543 get_outstanding_aio_calls() ));
545 return NT_STATUS_OK;
548 static void aio_pwrite_smb1_done(struct tevent_req *req)
550 struct aio_extra *aio_ex = tevent_req_callback_data(
551 req, struct aio_extra);
552 files_struct *fsp = aio_ex->fsp;
553 char *outbuf = (char *)aio_ex->outbuf.data;
554 ssize_t numtowrite = aio_ex->nbyte;
555 ssize_t nwritten;
556 int err;
558 nwritten = pwrite_fsync_recv(req, &err);
559 TALLOC_FREE(req);
561 DEBUG(10, ("pwrite_recv returned %d, err = %s\n", (int)nwritten,
562 (nwritten == -1) ? strerror(err) : "no error"));
564 if (fsp == NULL) {
565 DEBUG( 3, ("aio_pwrite_smb1_done: file closed whilst "
566 "aio outstanding (mid[%llu]).\n",
567 (unsigned long long)aio_ex->smbreq->mid));
568 TALLOC_FREE(aio_ex);
569 return;
572 /* Unlock now we're done. */
573 SMB_VFS_STRICT_UNLOCK(fsp->conn, fsp, &aio_ex->lock);
575 mark_file_modified(fsp);
577 if (fsp->aio_write_behind) {
579 if (nwritten != numtowrite) {
580 if (nwritten == -1) {
581 DEBUG(5,("handle_aio_write_complete: "
582 "aio_write_behind failed ! File %s "
583 "is corrupt ! Error %s\n",
584 fsp_str_dbg(fsp), strerror(err)));
585 } else {
586 DEBUG(0,("handle_aio_write_complete: "
587 "aio_write_behind failed ! File %s "
588 "is corrupt ! Wanted %u bytes but "
589 "only wrote %d\n", fsp_str_dbg(fsp),
590 (unsigned int)numtowrite,
591 (int)nwritten ));
593 } else {
594 DEBUG(10,("handle_aio_write_complete: "
595 "aio_write_behind completed for file %s\n",
596 fsp_str_dbg(fsp)));
598 /* TODO: should no return success in case of an error !!! */
599 TALLOC_FREE(aio_ex);
600 return;
603 /* We don't need outsize or set_message here as we've already set the
604 fixed size length when we set up the aio call. */
606 if (nwritten == -1) {
607 DEBUG(3, ("handle_aio_write: file %s wanted %u bytes. "
608 "nwritten == %d. Error = %s\n",
609 fsp_str_dbg(fsp), (unsigned int)numtowrite,
610 (int)nwritten, strerror(err)));
612 ERROR_NT(map_nt_error_from_unix(err));
613 srv_set_message(outbuf,0,0,true);
614 } else {
615 SSVAL(outbuf,smb_vwv2,nwritten);
616 SSVAL(outbuf,smb_vwv4,(nwritten>>16)&1);
617 if (nwritten < (ssize_t)numtowrite) {
618 SCVAL(outbuf,smb_rcls,ERRHRD);
619 SSVAL(outbuf,smb_err,ERRdiskfull);
622 DEBUG(3,("handle_aio_write: %s, num=%d wrote=%d\n",
623 fsp_fnum_dbg(fsp), (int)numtowrite, (int)nwritten));
625 aio_ex->fsp->fh->pos = aio_ex->offset + nwritten;
628 show_msg(outbuf);
629 if (!srv_send_smb(aio_ex->smbreq->xconn, outbuf,
630 true, aio_ex->smbreq->seqnum+1,
631 IS_CONN_ENCRYPTED(fsp->conn),
632 NULL)) {
633 exit_server_cleanly("handle_aio_write_complete: "
634 "srv_send_smb failed.");
637 DEBUG(10, ("handle_aio_write_complete: scheduled aio_write completed "
638 "for file %s, offset %.0f, requested %u, written = %u\n",
639 fsp_str_dbg(fsp), (double)aio_ex->offset,
640 (unsigned int)numtowrite, (unsigned int)nwritten));
642 TALLOC_FREE(aio_ex);
645 bool cancel_smb2_aio(struct smb_request *smbreq)
647 struct smbd_smb2_request *smb2req = smbreq->smb2req;
648 struct aio_extra *aio_ex = NULL;
650 if (smb2req) {
651 aio_ex = talloc_get_type(smbreq->async_priv,
652 struct aio_extra);
655 if (aio_ex == NULL) {
656 return false;
659 if (aio_ex->fsp == NULL) {
660 return false;
664 * We let the aio request run. Setting fsp to NULL has the
665 * effect that the _done routines don't send anything out.
668 aio_ex->fsp = NULL;
669 return true;
672 static void aio_pread_smb2_done(struct tevent_req *req);
674 /****************************************************************************
675 Set up an aio request from a SMB2 read call.
676 *****************************************************************************/
678 NTSTATUS schedule_smb2_aio_read(connection_struct *conn,
679 struct smb_request *smbreq,
680 files_struct *fsp,
681 TALLOC_CTX *ctx,
682 DATA_BLOB *preadbuf,
683 off_t startpos,
684 size_t smb_maxcnt)
686 struct aio_extra *aio_ex;
687 size_t min_aio_read_size = lp_aio_read_size(SNUM(conn));
688 struct tevent_req *req;
690 if (fsp->base_fsp != NULL) {
691 /* No AIO on streams yet */
692 DEBUG(10, ("AIO on streams not yet supported\n"));
693 return NT_STATUS_RETRY;
696 if (fsp->op == NULL) {
697 /* No AIO on internal opens. */
698 return NT_STATUS_RETRY;
701 if ((!min_aio_read_size || (smb_maxcnt < min_aio_read_size))
702 && !SMB_VFS_AIO_FORCE(fsp)) {
703 /* Too small a read for aio request. */
704 DEBUG(10,("smb2: read size (%u) too small "
705 "for minimum aio_read of %u\n",
706 (unsigned int)smb_maxcnt,
707 (unsigned int)min_aio_read_size ));
708 return NT_STATUS_RETRY;
711 /* Only do this on reads not using the write cache. */
712 if (lp_write_cache_size(SNUM(conn)) != 0) {
713 return NT_STATUS_RETRY;
716 /* Create the out buffer. */
717 *preadbuf = data_blob_talloc(ctx, NULL, smb_maxcnt);
718 if (preadbuf->data == NULL) {
719 return NT_STATUS_NO_MEMORY;
722 if (!(aio_ex = create_aio_extra(smbreq->smb2req, fsp, 0))) {
723 return NT_STATUS_NO_MEMORY;
726 init_strict_lock_struct(fsp, fsp->op->global->open_persistent_id,
727 (uint64_t)startpos, (uint64_t)smb_maxcnt, READ_LOCK,
728 &aio_ex->lock);
730 /* Take the lock until the AIO completes. */
731 if (!SMB_VFS_STRICT_LOCK(conn, fsp, &aio_ex->lock)) {
732 TALLOC_FREE(aio_ex);
733 return NT_STATUS_FILE_LOCK_CONFLICT;
736 aio_ex->nbyte = smb_maxcnt;
737 aio_ex->offset = startpos;
739 req = SMB_VFS_PREAD_SEND(aio_ex, fsp->conn->sconn->ev_ctx, fsp,
740 preadbuf->data, smb_maxcnt, startpos);
741 if (req == NULL) {
742 DEBUG(0, ("smb2: SMB_VFS_PREAD_SEND failed. "
743 "Error %s\n", strerror(errno)));
744 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock);
745 TALLOC_FREE(aio_ex);
746 return NT_STATUS_RETRY;
748 tevent_req_set_callback(req, aio_pread_smb2_done, aio_ex);
750 if (!aio_add_req_to_fsp(fsp, req)) {
751 DEBUG(1, ("Could not add req to fsp\n"));
752 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock);
753 TALLOC_FREE(aio_ex);
754 return NT_STATUS_RETRY;
757 /* We don't need talloc_move here as both aio_ex and
758 * smbreq are children of smbreq->smb2req. */
759 aio_ex->smbreq = smbreq;
760 smbreq->async_priv = aio_ex;
762 DEBUG(10,("smb2: scheduled aio_read for file %s, "
763 "offset %.0f, len = %u (mid = %u)\n",
764 fsp_str_dbg(fsp), (double)startpos, (unsigned int)smb_maxcnt,
765 (unsigned int)aio_ex->smbreq->mid ));
767 return NT_STATUS_OK;
770 static void aio_pread_smb2_done(struct tevent_req *req)
772 struct aio_extra *aio_ex = tevent_req_callback_data(
773 req, struct aio_extra);
774 struct tevent_req *subreq = aio_ex->smbreq->smb2req->subreq;
775 files_struct *fsp = aio_ex->fsp;
776 NTSTATUS status;
777 ssize_t nread;
778 int err = 0;
780 nread = SMB_VFS_PREAD_RECV(req, &err);
781 TALLOC_FREE(req);
783 DEBUG(10, ("pread_recv returned %d, err = %s\n", (int)nread,
784 (nread == -1) ? strerror(err) : "no error"));
786 if (fsp == NULL) {
787 DEBUG(3, ("%s: request cancelled (mid[%ju])\n",
788 __func__, (uintmax_t)aio_ex->smbreq->mid));
789 TALLOC_FREE(aio_ex);
790 tevent_req_nterror(subreq, NT_STATUS_INTERNAL_ERROR);
791 return;
794 /* Unlock now we're done. */
795 SMB_VFS_STRICT_UNLOCK(fsp->conn, fsp, &aio_ex->lock);
797 /* Common error or success code processing for async or sync
798 read returns. */
800 status = smb2_read_complete(subreq, nread, err);
802 if (nread > 0) {
803 fsp->fh->pos = aio_ex->offset + nread;
804 fsp->fh->position_information = fsp->fh->pos;
807 DEBUG(10, ("smb2: scheduled aio_read completed "
808 "for file %s, offset %.0f, len = %u "
809 "(errcode = %d, NTSTATUS = %s)\n",
810 fsp_str_dbg(aio_ex->fsp),
811 (double)aio_ex->offset,
812 (unsigned int)nread,
813 err, nt_errstr(status)));
815 if (!NT_STATUS_IS_OK(status)) {
816 tevent_req_nterror(subreq, status);
817 return;
819 tevent_req_done(subreq);
822 static void aio_pwrite_smb2_done(struct tevent_req *req);
824 /****************************************************************************
825 Set up an aio request from a SMB2write call.
826 *****************************************************************************/
828 NTSTATUS schedule_aio_smb2_write(connection_struct *conn,
829 struct smb_request *smbreq,
830 files_struct *fsp,
831 uint64_t in_offset,
832 DATA_BLOB in_data,
833 bool write_through)
835 struct aio_extra *aio_ex = NULL;
836 size_t min_aio_write_size = lp_aio_write_size(SNUM(conn));
837 struct tevent_req *req;
839 if (fsp->base_fsp != NULL) {
840 /* No AIO on streams yet */
841 DEBUG(10, ("AIO on streams not yet supported\n"));
842 return NT_STATUS_RETRY;
845 if (fsp->op == NULL) {
846 /* No AIO on internal opens. */
847 return NT_STATUS_RETRY;
850 if ((!min_aio_write_size || (in_data.length < min_aio_write_size))
851 && !SMB_VFS_AIO_FORCE(fsp)) {
852 /* Too small a write for aio request. */
853 DEBUG(10,("smb2: write size (%u) too "
854 "small for minimum aio_write of %u\n",
855 (unsigned int)in_data.length,
856 (unsigned int)min_aio_write_size ));
857 return NT_STATUS_RETRY;
860 /* Only do this on writes not using the write cache. */
861 if (lp_write_cache_size(SNUM(conn)) != 0) {
862 return NT_STATUS_RETRY;
865 if (smbreq->unread_bytes) {
866 /* Can't do async with recvfile. */
867 return NT_STATUS_RETRY;
870 if (!(aio_ex = create_aio_extra(smbreq->smb2req, fsp, 0))) {
871 return NT_STATUS_NO_MEMORY;
874 aio_ex->write_through = write_through;
876 init_strict_lock_struct(fsp, fsp->op->global->open_persistent_id,
877 in_offset, (uint64_t)in_data.length, WRITE_LOCK,
878 &aio_ex->lock);
880 /* Take the lock until the AIO completes. */
881 if (!SMB_VFS_STRICT_LOCK(conn, fsp, &aio_ex->lock)) {
882 TALLOC_FREE(aio_ex);
883 return NT_STATUS_FILE_LOCK_CONFLICT;
886 aio_ex->nbyte = in_data.length;
887 aio_ex->offset = in_offset;
889 req = pwrite_fsync_send(aio_ex, fsp->conn->sconn->ev_ctx, fsp,
890 in_data.data, in_data.length, in_offset,
891 write_through);
892 if (req == NULL) {
893 DEBUG(3, ("smb2: SMB_VFS_PWRITE_SEND failed. "
894 "Error %s\n", strerror(errno)));
895 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock);
896 TALLOC_FREE(aio_ex);
897 return NT_STATUS_RETRY;
899 tevent_req_set_callback(req, aio_pwrite_smb2_done, aio_ex);
901 if (!aio_add_req_to_fsp(fsp, req)) {
902 DEBUG(1, ("Could not add req to fsp\n"));
903 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock);
904 TALLOC_FREE(aio_ex);
905 return NT_STATUS_RETRY;
908 /* We don't need talloc_move here as both aio_ex and
909 * smbreq are children of smbreq->smb2req. */
910 aio_ex->smbreq = smbreq;
911 smbreq->async_priv = aio_ex;
913 /* This should actually be improved to span the write. */
914 contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WRITE);
915 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WRITE);
918 * We don't want to do write behind due to ownership
919 * issues of the request structs. Maybe add it if I
920 * figure those out. JRA.
923 DEBUG(10,("smb2: scheduled aio_write for file "
924 "%s, offset %.0f, len = %u (mid = %u) "
925 "outstanding_aio_calls = %d\n",
926 fsp_str_dbg(fsp),
927 (double)in_offset,
928 (unsigned int)in_data.length,
929 (unsigned int)aio_ex->smbreq->mid,
930 get_outstanding_aio_calls() ));
932 return NT_STATUS_OK;
935 static void aio_pwrite_smb2_done(struct tevent_req *req)
937 struct aio_extra *aio_ex = tevent_req_callback_data(
938 req, struct aio_extra);
939 ssize_t numtowrite = aio_ex->nbyte;
940 struct tevent_req *subreq = aio_ex->smbreq->smb2req->subreq;
941 files_struct *fsp = aio_ex->fsp;
942 NTSTATUS status;
943 ssize_t nwritten;
944 int err = 0;
946 nwritten = pwrite_fsync_recv(req, &err);
947 TALLOC_FREE(req);
949 DEBUG(10, ("pwrite_recv returned %d, err = %s\n", (int)nwritten,
950 (nwritten == -1) ? strerror(err) : "no error"));
952 if (fsp == NULL) {
953 DEBUG(3, ("%s: request cancelled (mid[%ju])\n",
954 __func__, (uintmax_t)aio_ex->smbreq->mid));
955 TALLOC_FREE(aio_ex);
956 tevent_req_nterror(subreq, NT_STATUS_INTERNAL_ERROR);
957 return;
960 /* Unlock now we're done. */
961 SMB_VFS_STRICT_UNLOCK(fsp->conn, fsp, &aio_ex->lock);
963 mark_file_modified(fsp);
965 status = smb2_write_complete_nosync(subreq, nwritten, err);
967 DEBUG(10, ("smb2: scheduled aio_write completed "
968 "for file %s, offset %.0f, requested %u, "
969 "written = %u (errcode = %d, NTSTATUS = %s)\n",
970 fsp_str_dbg(fsp),
971 (double)aio_ex->offset,
972 (unsigned int)numtowrite,
973 (unsigned int)nwritten,
974 err, nt_errstr(status)));
976 if (!NT_STATUS_IS_OK(status)) {
977 tevent_req_nterror(subreq, status);
978 return;
980 tevent_req_done(subreq);