s3/torture: use stack buffer for rbtree loop
[Samba.git] / source3 / smbd / aio.c
blobf141d67316740b9eecbd8f26b2a61991d6fb9e7b
1 /*
2 Unix SMB/Netbios implementation.
3 Version 3.0
4 async_io read handling using POSIX async io.
5 Copyright (C) Jeremy Allison 2005.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "includes.h"
22 #include "smbd/smbd.h"
23 #include "smbd/globals.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/util/tevent_unix.h"
27 /****************************************************************************
28 The buffer we keep around whilst an aio request is in process.
29 *****************************************************************************/
31 struct aio_extra {
32 files_struct *fsp;
33 struct smb_request *smbreq;
34 DATA_BLOB outbuf;
35 struct lock_struct lock;
36 size_t nbyte;
37 off_t offset;
38 bool write_through;
41 /****************************************************************************
42 Accessor function to return write_through state.
43 *****************************************************************************/
45 bool aio_write_through_requested(struct aio_extra *aio_ex)
47 return aio_ex->write_through;
50 /****************************************************************************
51 Create the extended aio struct we must keep around for the lifetime
52 of the aio call.
53 *****************************************************************************/
55 static struct aio_extra *create_aio_extra(TALLOC_CTX *mem_ctx,
56 files_struct *fsp,
57 size_t buflen)
59 struct aio_extra *aio_ex = talloc_zero(mem_ctx, struct aio_extra);
61 if (!aio_ex) {
62 return NULL;
65 /* The output buffer stored in the aio_ex is the start of
66 the smb return buffer. The buffer used in the acb
67 is the start of the reply data portion of that buffer. */
69 if (buflen) {
70 aio_ex->outbuf = data_blob_talloc(aio_ex, NULL, buflen);
71 if (!aio_ex->outbuf.data) {
72 TALLOC_FREE(aio_ex);
73 return NULL;
76 aio_ex->fsp = fsp;
77 return aio_ex;
80 struct aio_req_fsp_link {
81 files_struct *fsp;
82 struct tevent_req *req;
85 static int aio_del_req_from_fsp(struct aio_req_fsp_link *lnk)
87 unsigned i;
88 files_struct *fsp = lnk->fsp;
89 struct tevent_req *req = lnk->req;
91 for (i=0; i<fsp->num_aio_requests; i++) {
92 if (fsp->aio_requests[i] == req) {
93 break;
96 if (i == fsp->num_aio_requests) {
97 DEBUG(1, ("req %p not found in fsp %p\n", req, fsp));
98 return 0;
100 fsp->num_aio_requests -= 1;
101 fsp->aio_requests[i] = fsp->aio_requests[fsp->num_aio_requests];
103 if (fsp->num_aio_requests == 0) {
104 TALLOC_FREE(fsp->aio_requests);
106 return 0;
109 bool aio_add_req_to_fsp(files_struct *fsp, struct tevent_req *req)
111 size_t array_len;
112 struct aio_req_fsp_link *lnk;
114 lnk = talloc(req, struct aio_req_fsp_link);
115 if (lnk == NULL) {
116 return false;
119 array_len = talloc_array_length(fsp->aio_requests);
120 if (array_len <= fsp->num_aio_requests) {
121 struct tevent_req **tmp;
123 if (fsp->num_aio_requests + 10 < 10) {
124 /* Integer wrap. */
125 TALLOC_FREE(lnk);
126 return false;
130 * Allocate in blocks of 10 so we don't allocate
131 * on every aio request.
133 tmp = talloc_realloc(
134 fsp, fsp->aio_requests, struct tevent_req *,
135 fsp->num_aio_requests+10);
136 if (tmp == NULL) {
137 TALLOC_FREE(lnk);
138 return false;
140 fsp->aio_requests = tmp;
142 fsp->aio_requests[fsp->num_aio_requests] = req;
143 fsp->num_aio_requests += 1;
145 lnk->fsp = fsp;
146 lnk->req = req;
147 talloc_set_destructor(lnk, aio_del_req_from_fsp);
149 return true;
152 static void aio_pread_smb1_done(struct tevent_req *req);
154 /****************************************************************************
155 Set up an aio request from a SMBreadX call.
156 *****************************************************************************/
158 NTSTATUS schedule_aio_read_and_X(connection_struct *conn,
159 struct smb_request *smbreq,
160 files_struct *fsp, off_t startpos,
161 size_t smb_maxcnt)
163 struct aio_extra *aio_ex;
164 size_t bufsize;
165 size_t min_aio_read_size = lp_aio_read_size(SNUM(conn));
166 struct tevent_req *req;
167 bool ok;
169 ok = vfs_valid_pread_range(startpos, smb_maxcnt);
170 if (!ok) {
171 return NT_STATUS_INVALID_PARAMETER;
174 if (fsp->base_fsp != NULL) {
175 /* No AIO on streams yet */
176 DEBUG(10, ("AIO on streams not yet supported\n"));
177 return NT_STATUS_RETRY;
180 if ((!min_aio_read_size || (smb_maxcnt < min_aio_read_size))
181 && !SMB_VFS_AIO_FORCE(fsp)) {
182 /* Too small a read for aio request. */
183 DEBUG(10,("schedule_aio_read_and_X: read size (%u) too small "
184 "for minimum aio_read of %u\n",
185 (unsigned int)smb_maxcnt,
186 (unsigned int)min_aio_read_size ));
187 return NT_STATUS_RETRY;
190 /* Only do this on non-chained and non-chaining reads */
191 if (req_is_in_chain(smbreq)) {
192 return NT_STATUS_RETRY;
195 /* The following is safe from integer wrap as we've already checked
196 smb_maxcnt is 128k or less. Wct is 12 for read replies */
198 bufsize = smb_size + 12 * 2 + smb_maxcnt + 1 /* padding byte */;
200 if ((aio_ex = create_aio_extra(NULL, fsp, bufsize)) == NULL) {
201 DEBUG(10,("schedule_aio_read_and_X: malloc fail.\n"));
202 return NT_STATUS_NO_MEMORY;
205 construct_reply_common_req(smbreq, (char *)aio_ex->outbuf.data);
206 srv_set_message((char *)aio_ex->outbuf.data, 12, 0, True);
207 SCVAL(aio_ex->outbuf.data,smb_vwv0,0xFF); /* Never a chained reply. */
208 SCVAL(smb_buf(aio_ex->outbuf.data), 0, 0); /* padding byte */
210 init_strict_lock_struct(fsp, (uint64_t)smbreq->smbpid,
211 (uint64_t)startpos, (uint64_t)smb_maxcnt, READ_LOCK,
212 &aio_ex->lock);
214 /* Take the lock until the AIO completes. */
215 if (!SMB_VFS_STRICT_LOCK_CHECK(conn, fsp, &aio_ex->lock)) {
216 TALLOC_FREE(aio_ex);
217 return NT_STATUS_FILE_LOCK_CONFLICT;
220 aio_ex->nbyte = smb_maxcnt;
221 aio_ex->offset = startpos;
223 req = SMB_VFS_PREAD_SEND(aio_ex, fsp->conn->sconn->ev_ctx,
224 fsp,
225 smb_buf(aio_ex->outbuf.data) + 1 /* pad */,
226 smb_maxcnt, startpos);
227 if (req == NULL) {
228 DEBUG(0,("schedule_aio_read_and_X: aio_read failed. "
229 "Error %s\n", strerror(errno) ));
230 TALLOC_FREE(aio_ex);
231 return NT_STATUS_RETRY;
233 tevent_req_set_callback(req, aio_pread_smb1_done, aio_ex);
235 if (!aio_add_req_to_fsp(fsp, req)) {
236 DEBUG(1, ("Could not add req to fsp\n"));
237 TALLOC_FREE(aio_ex);
238 return NT_STATUS_RETRY;
241 aio_ex->smbreq = talloc_move(aio_ex, &smbreq);
243 DEBUG(10,("schedule_aio_read_and_X: scheduled aio_read for file %s, "
244 "offset %.0f, len = %u (mid = %u)\n",
245 fsp_str_dbg(fsp), (double)startpos, (unsigned int)smb_maxcnt,
246 (unsigned int)aio_ex->smbreq->mid ));
248 return NT_STATUS_OK;
251 static void aio_pread_smb1_done(struct tevent_req *req)
253 struct aio_extra *aio_ex = tevent_req_callback_data(
254 req, struct aio_extra);
255 files_struct *fsp = aio_ex->fsp;
256 size_t outsize;
257 char *outbuf = (char *)aio_ex->outbuf.data;
258 ssize_t nread;
259 struct vfs_aio_state vfs_aio_state;
261 nread = SMB_VFS_PREAD_RECV(req, &vfs_aio_state);
262 TALLOC_FREE(req);
264 DEBUG(10, ("pread_recv returned %d, err = %s\n", (int)nread,
265 (nread == -1) ? strerror(vfs_aio_state.error) : "no error"));
267 if (fsp == NULL) {
268 DEBUG( 3, ("aio_pread_smb1_done: file closed whilst "
269 "aio outstanding (mid[%llu]).\n",
270 (unsigned long long)aio_ex->smbreq->mid));
271 TALLOC_FREE(aio_ex);
272 return;
275 if (nread < 0) {
276 DEBUG( 3, ("handle_aio_read_complete: file %s nread == %d. "
277 "Error = %s\n", fsp_str_dbg(fsp), (int)nread,
278 strerror(vfs_aio_state.error)));
280 ERROR_NT(map_nt_error_from_unix(vfs_aio_state.error));
281 outsize = srv_set_message(outbuf,0,0,true);
282 } else {
283 outsize = setup_readX_header(outbuf, nread);
285 aio_ex->fsp->fh->pos = aio_ex->offset + nread;
286 aio_ex->fsp->fh->position_information = aio_ex->fsp->fh->pos;
288 DEBUG( 3, ("handle_aio_read_complete file %s max=%d "
289 "nread=%d\n", fsp_str_dbg(fsp),
290 (int)aio_ex->nbyte, (int)nread ) );
294 if (outsize <= 4) {
295 DBG_INFO("Invalid outsize (%zu)\n", outsize);
296 TALLOC_FREE(aio_ex);
297 return;
299 outsize -= 4;
300 _smb_setlen_large(outbuf, outsize);
302 show_msg(outbuf);
303 if (!srv_send_smb(aio_ex->smbreq->xconn, outbuf,
304 true, aio_ex->smbreq->seqnum+1,
305 IS_CONN_ENCRYPTED(fsp->conn), NULL)) {
306 exit_server_cleanly("handle_aio_read_complete: srv_send_smb "
307 "failed.");
310 DEBUG(10, ("handle_aio_read_complete: scheduled aio_read completed "
311 "for file %s, offset %.0f, len = %u\n",
312 fsp_str_dbg(fsp), (double)aio_ex->offset,
313 (unsigned int)nread));
315 TALLOC_FREE(aio_ex);
318 struct pwrite_fsync_state {
319 struct tevent_context *ev;
320 files_struct *fsp;
321 bool write_through;
322 ssize_t nwritten;
325 static void pwrite_fsync_write_done(struct tevent_req *subreq);
326 static void pwrite_fsync_sync_done(struct tevent_req *subreq);
328 static struct tevent_req *pwrite_fsync_send(TALLOC_CTX *mem_ctx,
329 struct tevent_context *ev,
330 struct files_struct *fsp,
331 const void *data,
332 size_t n, off_t offset,
333 bool write_through)
335 struct tevent_req *req, *subreq;
336 struct pwrite_fsync_state *state;
337 bool ok;
339 req = tevent_req_create(mem_ctx, &state, struct pwrite_fsync_state);
340 if (req == NULL) {
341 return NULL;
343 state->ev = ev;
344 state->fsp = fsp;
345 state->write_through = write_through;
347 ok = vfs_valid_pwrite_range(offset, n);
348 if (!ok) {
349 tevent_req_error(req, EINVAL);
350 return tevent_req_post(req, ev);
353 if (n == 0) {
354 tevent_req_done(req);
355 return tevent_req_post(req, ev);
358 subreq = SMB_VFS_PWRITE_SEND(state, ev, fsp, data, n, offset);
359 if (tevent_req_nomem(subreq, req)) {
360 return tevent_req_post(req, ev);
362 tevent_req_set_callback(subreq, pwrite_fsync_write_done, req);
363 return req;
366 static void pwrite_fsync_write_done(struct tevent_req *subreq)
368 struct tevent_req *req = tevent_req_callback_data(
369 subreq, struct tevent_req);
370 struct pwrite_fsync_state *state = tevent_req_data(
371 req, struct pwrite_fsync_state);
372 connection_struct *conn = state->fsp->conn;
373 bool do_sync;
374 struct vfs_aio_state vfs_aio_state;
376 state->nwritten = SMB_VFS_PWRITE_RECV(subreq, &vfs_aio_state);
377 TALLOC_FREE(subreq);
378 if (state->nwritten == -1) {
379 tevent_req_error(req, vfs_aio_state.error);
380 return;
383 do_sync = (lp_strict_sync(SNUM(conn)) &&
384 (lp_sync_always(SNUM(conn)) || state->write_through));
385 if (!do_sync) {
386 tevent_req_done(req);
387 return;
390 subreq = SMB_VFS_FSYNC_SEND(state, state->ev, state->fsp);
391 if (tevent_req_nomem(subreq, req)) {
392 return;
394 tevent_req_set_callback(subreq, pwrite_fsync_sync_done, req);
397 static void pwrite_fsync_sync_done(struct tevent_req *subreq)
399 struct tevent_req *req = tevent_req_callback_data(
400 subreq, struct tevent_req);
401 int ret;
402 struct vfs_aio_state vfs_aio_state;
404 ret = SMB_VFS_FSYNC_RECV(subreq, &vfs_aio_state);
405 TALLOC_FREE(subreq);
406 if (ret == -1) {
407 tevent_req_error(req, vfs_aio_state.error);
408 return;
410 tevent_req_done(req);
413 static ssize_t pwrite_fsync_recv(struct tevent_req *req, int *perr)
415 struct pwrite_fsync_state *state = tevent_req_data(
416 req, struct pwrite_fsync_state);
418 if (tevent_req_is_unix_error(req, perr)) {
419 return -1;
421 return state->nwritten;
424 static void aio_pwrite_smb1_done(struct tevent_req *req);
426 /****************************************************************************
427 Set up an aio request from a SMBwriteX call.
428 *****************************************************************************/
430 NTSTATUS schedule_aio_write_and_X(connection_struct *conn,
431 struct smb_request *smbreq,
432 files_struct *fsp, const char *data,
433 off_t startpos,
434 size_t numtowrite)
436 struct aio_extra *aio_ex;
437 size_t bufsize;
438 size_t min_aio_write_size = lp_aio_write_size(SNUM(conn));
439 struct tevent_req *req;
441 if (fsp->base_fsp != NULL) {
442 /* No AIO on streams yet */
443 DEBUG(10, ("AIO on streams not yet supported\n"));
444 return NT_STATUS_RETRY;
447 if ((!min_aio_write_size || (numtowrite < min_aio_write_size))
448 && !SMB_VFS_AIO_FORCE(fsp)) {
449 /* Too small a write for aio request. */
450 DEBUG(10,("schedule_aio_write_and_X: write size (%u) too "
451 "small for minimum aio_write of %u\n",
452 (unsigned int)numtowrite,
453 (unsigned int)min_aio_write_size ));
454 return NT_STATUS_RETRY;
457 /* Only do this on non-chained and non-chaining writes */
458 if (req_is_in_chain(smbreq)) {
459 return NT_STATUS_RETRY;
462 bufsize = smb_size + 6*2;
464 if (!(aio_ex = create_aio_extra(NULL, fsp, bufsize))) {
465 DEBUG(0,("schedule_aio_write_and_X: malloc fail.\n"));
466 return NT_STATUS_NO_MEMORY;
468 aio_ex->write_through = BITSETW(smbreq->vwv+7,0);
470 construct_reply_common_req(smbreq, (char *)aio_ex->outbuf.data);
471 srv_set_message((char *)aio_ex->outbuf.data, 6, 0, True);
472 SCVAL(aio_ex->outbuf.data,smb_vwv0,0xFF); /* Never a chained reply. */
474 init_strict_lock_struct(fsp, (uint64_t)smbreq->smbpid,
475 (uint64_t)startpos, (uint64_t)numtowrite, WRITE_LOCK,
476 &aio_ex->lock);
478 /* Take the lock until the AIO completes. */
479 if (!SMB_VFS_STRICT_LOCK_CHECK(conn, fsp, &aio_ex->lock)) {
480 TALLOC_FREE(aio_ex);
481 return NT_STATUS_FILE_LOCK_CONFLICT;
484 aio_ex->nbyte = numtowrite;
485 aio_ex->offset = startpos;
487 req = pwrite_fsync_send(aio_ex, fsp->conn->sconn->ev_ctx, fsp,
488 data, numtowrite, startpos,
489 aio_ex->write_through);
490 if (req == NULL) {
491 DEBUG(3,("schedule_aio_wrote_and_X: aio_write failed. "
492 "Error %s\n", strerror(errno) ));
493 TALLOC_FREE(aio_ex);
494 return NT_STATUS_RETRY;
496 tevent_req_set_callback(req, aio_pwrite_smb1_done, aio_ex);
498 if (!aio_add_req_to_fsp(fsp, req)) {
499 DEBUG(1, ("Could not add req to fsp\n"));
500 TALLOC_FREE(aio_ex);
501 return NT_STATUS_RETRY;
504 aio_ex->smbreq = talloc_move(aio_ex, &smbreq);
506 /* This should actually be improved to span the write. */
507 contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WRITE);
508 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WRITE);
510 if (!aio_ex->write_through && !lp_sync_always(SNUM(fsp->conn))
511 && fsp->fsp_flags.aio_write_behind)
513 /* Lie to the client and immediately claim we finished the
514 * write. */
515 SSVAL(aio_ex->outbuf.data,smb_vwv2,numtowrite);
516 SSVAL(aio_ex->outbuf.data,smb_vwv4,(numtowrite>>16)&1);
517 show_msg((char *)aio_ex->outbuf.data);
518 if (!srv_send_smb(aio_ex->smbreq->xconn,
519 (char *)aio_ex->outbuf.data,
520 true, aio_ex->smbreq->seqnum+1,
521 IS_CONN_ENCRYPTED(fsp->conn),
522 &aio_ex->smbreq->pcd)) {
523 exit_server_cleanly("schedule_aio_write_and_X: "
524 "srv_send_smb failed.");
526 DEBUG(10,("schedule_aio_write_and_X: scheduled aio_write "
527 "behind for file %s\n", fsp_str_dbg(fsp)));
530 DEBUG(10,("schedule_aio_write_and_X: scheduled aio_write for file "
531 "%s, offset %.0f, len = %u (mid = %u)\n",
532 fsp_str_dbg(fsp), (double)startpos, (unsigned int)numtowrite,
533 (unsigned int)aio_ex->smbreq->mid));
535 return NT_STATUS_OK;
538 static void aio_pwrite_smb1_done(struct tevent_req *req)
540 struct aio_extra *aio_ex = tevent_req_callback_data(
541 req, struct aio_extra);
542 files_struct *fsp = aio_ex->fsp;
543 char *outbuf = (char *)aio_ex->outbuf.data;
544 ssize_t numtowrite = aio_ex->nbyte;
545 ssize_t nwritten;
546 int err;
548 nwritten = pwrite_fsync_recv(req, &err);
549 TALLOC_FREE(req);
551 DEBUG(10, ("pwrite_recv returned %d, err = %s\n", (int)nwritten,
552 (nwritten == -1) ? strerror(err) : "no error"));
554 if (fsp == NULL) {
555 DEBUG( 3, ("aio_pwrite_smb1_done: file closed whilst "
556 "aio outstanding (mid[%llu]).\n",
557 (unsigned long long)aio_ex->smbreq->mid));
558 TALLOC_FREE(aio_ex);
559 return;
562 mark_file_modified(fsp);
564 if (fsp->fsp_flags.aio_write_behind) {
566 if (nwritten != numtowrite) {
567 if (nwritten == -1) {
568 DEBUG(5,("handle_aio_write_complete: "
569 "aio_write_behind failed ! File %s "
570 "is corrupt ! Error %s\n",
571 fsp_str_dbg(fsp), strerror(err)));
572 } else {
573 DEBUG(0,("handle_aio_write_complete: "
574 "aio_write_behind failed ! File %s "
575 "is corrupt ! Wanted %u bytes but "
576 "only wrote %d\n", fsp_str_dbg(fsp),
577 (unsigned int)numtowrite,
578 (int)nwritten ));
580 } else {
581 DEBUG(10,("handle_aio_write_complete: "
582 "aio_write_behind completed for file %s\n",
583 fsp_str_dbg(fsp)));
585 /* TODO: should no return success in case of an error !!! */
586 TALLOC_FREE(aio_ex);
587 return;
590 /* We don't need outsize or set_message here as we've already set the
591 fixed size length when we set up the aio call. */
593 if (nwritten == -1) {
594 DEBUG(3, ("handle_aio_write: file %s wanted %u bytes. "
595 "nwritten == %d. Error = %s\n",
596 fsp_str_dbg(fsp), (unsigned int)numtowrite,
597 (int)nwritten, strerror(err)));
599 ERROR_NT(map_nt_error_from_unix(err));
600 srv_set_message(outbuf,0,0,true);
601 } else {
602 SSVAL(outbuf,smb_vwv2,nwritten);
603 SSVAL(outbuf,smb_vwv4,(nwritten>>16)&1);
604 if (nwritten < (ssize_t)numtowrite) {
605 SCVAL(outbuf,smb_rcls,ERRHRD);
606 SSVAL(outbuf,smb_err,ERRdiskfull);
609 DEBUG(3,("handle_aio_write: %s, num=%d wrote=%d\n",
610 fsp_fnum_dbg(fsp), (int)numtowrite, (int)nwritten));
612 aio_ex->fsp->fh->pos = aio_ex->offset + nwritten;
615 show_msg(outbuf);
616 if (!srv_send_smb(aio_ex->smbreq->xconn, outbuf,
617 true, aio_ex->smbreq->seqnum+1,
618 IS_CONN_ENCRYPTED(fsp->conn),
619 NULL)) {
620 exit_server_cleanly("handle_aio_write_complete: "
621 "srv_send_smb failed.");
624 DEBUG(10, ("handle_aio_write_complete: scheduled aio_write completed "
625 "for file %s, offset %.0f, requested %u, written = %u\n",
626 fsp_str_dbg(fsp), (double)aio_ex->offset,
627 (unsigned int)numtowrite, (unsigned int)nwritten));
629 TALLOC_FREE(aio_ex);
632 bool cancel_smb2_aio(struct smb_request *smbreq)
634 struct smbd_smb2_request *smb2req = smbreq->smb2req;
635 struct aio_extra *aio_ex = NULL;
637 if (smb2req) {
638 aio_ex = talloc_get_type(smbreq->async_priv,
639 struct aio_extra);
642 if (aio_ex == NULL) {
643 return false;
646 if (aio_ex->fsp == NULL) {
647 return false;
651 * We let the aio request run and don't try to cancel it which means
652 * processing of the SMB2 request must continue as normal, cf MS-SMB2
653 * 3.3.5.16:
655 * If the target request is not successfully canceled, processing of
656 * the target request MUST continue and no response is sent to the
657 * cancel request.
660 return false;
663 static void aio_pread_smb2_done(struct tevent_req *req);
665 /****************************************************************************
666 Set up an aio request from a SMB2 read call.
667 *****************************************************************************/
669 NTSTATUS schedule_smb2_aio_read(connection_struct *conn,
670 struct smb_request *smbreq,
671 files_struct *fsp,
672 TALLOC_CTX *ctx,
673 DATA_BLOB *preadbuf,
674 off_t startpos,
675 size_t smb_maxcnt)
677 struct aio_extra *aio_ex;
678 size_t min_aio_read_size = lp_aio_read_size(SNUM(conn));
679 struct tevent_req *req;
680 bool ok;
682 ok = vfs_valid_pread_range(startpos, smb_maxcnt);
683 if (!ok) {
684 return NT_STATUS_INVALID_PARAMETER;
687 if (fsp->base_fsp != NULL) {
688 /* No AIO on streams yet */
689 DEBUG(10, ("AIO on streams not yet supported\n"));
690 return NT_STATUS_RETRY;
693 if (fsp->op == NULL) {
694 /* No AIO on internal opens. */
695 return NT_STATUS_RETRY;
698 if ((!min_aio_read_size || (smb_maxcnt < min_aio_read_size))
699 && !SMB_VFS_AIO_FORCE(fsp)) {
700 /* Too small a read for aio request. */
701 DEBUG(10,("smb2: read size (%u) too small "
702 "for minimum aio_read of %u\n",
703 (unsigned int)smb_maxcnt,
704 (unsigned int)min_aio_read_size ));
705 return NT_STATUS_RETRY;
708 if (smbd_smb2_is_compound(smbreq->smb2req)) {
709 return NT_STATUS_RETRY;
712 /* Create the out buffer. */
713 *preadbuf = data_blob_talloc(ctx, NULL, smb_maxcnt);
714 if (preadbuf->data == NULL) {
715 return NT_STATUS_NO_MEMORY;
718 if (!(aio_ex = create_aio_extra(smbreq->smb2req, fsp, 0))) {
719 return NT_STATUS_NO_MEMORY;
722 init_strict_lock_struct(fsp, fsp->op->global->open_persistent_id,
723 (uint64_t)startpos, (uint64_t)smb_maxcnt, READ_LOCK,
724 &aio_ex->lock);
726 /* Take the lock until the AIO completes. */
727 if (!SMB_VFS_STRICT_LOCK_CHECK(conn, fsp, &aio_ex->lock)) {
728 TALLOC_FREE(aio_ex);
729 return NT_STATUS_FILE_LOCK_CONFLICT;
732 aio_ex->nbyte = smb_maxcnt;
733 aio_ex->offset = startpos;
735 req = SMB_VFS_PREAD_SEND(aio_ex, fsp->conn->sconn->ev_ctx, fsp,
736 preadbuf->data, smb_maxcnt, startpos);
737 if (req == NULL) {
738 DEBUG(0, ("smb2: SMB_VFS_PREAD_SEND failed. "
739 "Error %s\n", strerror(errno)));
740 TALLOC_FREE(aio_ex);
741 return NT_STATUS_RETRY;
743 tevent_req_set_callback(req, aio_pread_smb2_done, aio_ex);
745 if (!aio_add_req_to_fsp(fsp, req)) {
746 DEBUG(1, ("Could not add req to fsp\n"));
747 TALLOC_FREE(aio_ex);
748 return NT_STATUS_RETRY;
751 /* We don't need talloc_move here as both aio_ex and
752 * smbreq are children of smbreq->smb2req. */
753 aio_ex->smbreq = smbreq;
754 smbreq->async_priv = aio_ex;
756 DEBUG(10,("smb2: scheduled aio_read for file %s, "
757 "offset %.0f, len = %u (mid = %u)\n",
758 fsp_str_dbg(fsp), (double)startpos, (unsigned int)smb_maxcnt,
759 (unsigned int)aio_ex->smbreq->mid ));
761 return NT_STATUS_OK;
764 static void aio_pread_smb2_done(struct tevent_req *req)
766 struct aio_extra *aio_ex = tevent_req_callback_data(
767 req, struct aio_extra);
768 struct tevent_req *subreq = aio_ex->smbreq->smb2req->subreq;
769 files_struct *fsp = aio_ex->fsp;
770 NTSTATUS status;
771 ssize_t nread;
772 struct vfs_aio_state vfs_aio_state = { 0 };
774 nread = SMB_VFS_PREAD_RECV(req, &vfs_aio_state);
775 TALLOC_FREE(req);
777 DEBUG(10, ("pread_recv returned %d, err = %s\n", (int)nread,
778 (nread == -1) ? strerror(vfs_aio_state.error) : "no error"));
780 /* Common error or success code processing for async or sync
781 read returns. */
783 status = smb2_read_complete(subreq, nread, vfs_aio_state.error);
785 if (nread > 0) {
786 fsp->fh->pos = aio_ex->offset + nread;
787 fsp->fh->position_information = fsp->fh->pos;
790 DEBUG(10, ("smb2: scheduled aio_read completed "
791 "for file %s, offset %.0f, len = %u "
792 "(errcode = %d, NTSTATUS = %s)\n",
793 fsp_str_dbg(aio_ex->fsp),
794 (double)aio_ex->offset,
795 (unsigned int)nread,
796 vfs_aio_state.error, nt_errstr(status)));
798 if (!NT_STATUS_IS_OK(status)) {
799 tevent_req_nterror(subreq, status);
800 return;
802 tevent_req_done(subreq);
805 static void aio_pwrite_smb2_done(struct tevent_req *req);
807 /****************************************************************************
808 Set up an aio request from a SMB2write call.
809 *****************************************************************************/
811 NTSTATUS schedule_aio_smb2_write(connection_struct *conn,
812 struct smb_request *smbreq,
813 files_struct *fsp,
814 uint64_t in_offset,
815 DATA_BLOB in_data,
816 bool write_through)
818 struct aio_extra *aio_ex = NULL;
819 size_t min_aio_write_size = lp_aio_write_size(SNUM(conn));
820 struct tevent_req *req;
822 if (fsp->base_fsp != NULL) {
823 /* No AIO on streams yet */
824 DEBUG(10, ("AIO on streams not yet supported\n"));
825 return NT_STATUS_RETRY;
828 if (fsp->op == NULL) {
829 /* No AIO on internal opens. */
830 return NT_STATUS_RETRY;
833 if ((!min_aio_write_size || (in_data.length < min_aio_write_size))
834 && !SMB_VFS_AIO_FORCE(fsp)) {
835 /* Too small a write for aio request. */
836 DEBUG(10,("smb2: write size (%u) too "
837 "small for minimum aio_write of %u\n",
838 (unsigned int)in_data.length,
839 (unsigned int)min_aio_write_size ));
840 return NT_STATUS_RETRY;
843 if (smbd_smb2_is_compound(smbreq->smb2req)) {
844 return NT_STATUS_RETRY;
847 if (smbreq->unread_bytes) {
848 /* Can't do async with recvfile. */
849 return NT_STATUS_RETRY;
852 if (!(aio_ex = create_aio_extra(smbreq->smb2req, fsp, 0))) {
853 return NT_STATUS_NO_MEMORY;
856 aio_ex->write_through = write_through;
858 init_strict_lock_struct(fsp, fsp->op->global->open_persistent_id,
859 in_offset, (uint64_t)in_data.length, WRITE_LOCK,
860 &aio_ex->lock);
862 /* Take the lock until the AIO completes. */
863 if (!SMB_VFS_STRICT_LOCK_CHECK(conn, fsp, &aio_ex->lock)) {
864 TALLOC_FREE(aio_ex);
865 return NT_STATUS_FILE_LOCK_CONFLICT;
868 aio_ex->nbyte = in_data.length;
869 aio_ex->offset = in_offset;
871 req = pwrite_fsync_send(aio_ex, fsp->conn->sconn->ev_ctx, fsp,
872 in_data.data, in_data.length, in_offset,
873 write_through);
874 if (req == NULL) {
875 DEBUG(3, ("smb2: SMB_VFS_PWRITE_SEND failed. "
876 "Error %s\n", strerror(errno)));
877 TALLOC_FREE(aio_ex);
878 return NT_STATUS_RETRY;
880 tevent_req_set_callback(req, aio_pwrite_smb2_done, aio_ex);
882 if (!aio_add_req_to_fsp(fsp, req)) {
883 DEBUG(1, ("Could not add req to fsp\n"));
884 TALLOC_FREE(aio_ex);
885 return NT_STATUS_RETRY;
888 /* We don't need talloc_move here as both aio_ex and
889 * smbreq are children of smbreq->smb2req. */
890 aio_ex->smbreq = smbreq;
891 smbreq->async_priv = aio_ex;
893 /* This should actually be improved to span the write. */
894 contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WRITE);
895 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WRITE);
898 * We don't want to do write behind due to ownership
899 * issues of the request structs. Maybe add it if I
900 * figure those out. JRA.
903 DEBUG(10,("smb2: scheduled aio_write for file "
904 "%s, offset %.0f, len = %u (mid = %u)\n",
905 fsp_str_dbg(fsp),
906 (double)in_offset,
907 (unsigned int)in_data.length,
908 (unsigned int)aio_ex->smbreq->mid));
910 return NT_STATUS_OK;
913 static void aio_pwrite_smb2_done(struct tevent_req *req)
915 struct aio_extra *aio_ex = tevent_req_callback_data(
916 req, struct aio_extra);
917 ssize_t numtowrite = aio_ex->nbyte;
918 struct tevent_req *subreq = aio_ex->smbreq->smb2req->subreq;
919 files_struct *fsp = aio_ex->fsp;
920 NTSTATUS status;
921 ssize_t nwritten;
922 int err = 0;
924 nwritten = pwrite_fsync_recv(req, &err);
925 TALLOC_FREE(req);
927 DEBUG(10, ("pwrite_recv returned %d, err = %s\n", (int)nwritten,
928 (nwritten == -1) ? strerror(err) : "no error"));
930 mark_file_modified(fsp);
932 status = smb2_write_complete_nosync(subreq, nwritten, err);
934 DEBUG(10, ("smb2: scheduled aio_write completed "
935 "for file %s, offset %.0f, requested %u, "
936 "written = %u (errcode = %d, NTSTATUS = %s)\n",
937 fsp_str_dbg(fsp),
938 (double)aio_ex->offset,
939 (unsigned int)numtowrite,
940 (unsigned int)nwritten,
941 err, nt_errstr(status)));
943 if (!NT_STATUS_IS_OK(status)) {
944 tevent_req_nterror(subreq, status);
945 return;
947 tevent_req_done(subreq);