Ensure we don't do an SMB2 aio write if RECVFILE is active.
[Samba/bjacke.git] / source3 / smbd / aio.c
blobe8be408eaa1279963b07ecc1264a736a0ab350ca
1 /*
2 Unix SMB/Netbios implementation.
3 Version 3.0
4 async_io read handling using POSIX async io.
5 Copyright (C) Jeremy Allison 2005.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "includes.h"
22 #include "smbd/smbd.h"
23 #include "smbd/globals.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/util/tevent_unix.h"
26 #include "lib/tevent_wait.h"
28 /****************************************************************************
29 The buffer we keep around whilst an aio request is in process.
30 *****************************************************************************/
32 struct aio_extra {
33 files_struct *fsp;
34 struct smb_request *smbreq;
35 DATA_BLOB outbuf;
36 struct lock_struct lock;
37 size_t nbyte;
38 off_t offset;
39 bool write_through;
42 /****************************************************************************
43 Accessor function to return write_through state.
44 *****************************************************************************/
46 bool aio_write_through_requested(struct aio_extra *aio_ex)
48 return aio_ex->write_through;
51 static int aio_extra_destructor(struct aio_extra *aio_ex)
53 outstanding_aio_calls--;
54 return 0;
57 /****************************************************************************
58 Create the extended aio struct we must keep around for the lifetime
59 of the aio call.
60 *****************************************************************************/
62 static struct aio_extra *create_aio_extra(TALLOC_CTX *mem_ctx,
63 files_struct *fsp,
64 size_t buflen)
66 struct aio_extra *aio_ex = talloc_zero(mem_ctx, struct aio_extra);
68 if (!aio_ex) {
69 return NULL;
72 /* The output buffer stored in the aio_ex is the start of
73 the smb return buffer. The buffer used in the acb
74 is the start of the reply data portion of that buffer. */
76 if (buflen) {
77 aio_ex->outbuf = data_blob_talloc(aio_ex, NULL, buflen);
78 if (!aio_ex->outbuf.data) {
79 TALLOC_FREE(aio_ex);
80 return NULL;
83 talloc_set_destructor(aio_ex, aio_extra_destructor);
84 aio_ex->fsp = fsp;
85 outstanding_aio_calls++;
86 return aio_ex;
89 struct aio_req_fsp_link {
90 files_struct *fsp;
91 struct tevent_req *req;
94 static int aio_del_req_from_fsp(struct aio_req_fsp_link *lnk)
96 unsigned i;
97 files_struct *fsp = lnk->fsp;
98 struct tevent_req *req = lnk->req;
100 for (i=0; i<fsp->num_aio_requests; i++) {
101 if (fsp->aio_requests[i] == req) {
102 break;
105 if (i == fsp->num_aio_requests) {
106 DEBUG(1, ("req %p not found in fsp %p\n", req, fsp));
107 return 0;
109 fsp->num_aio_requests -= 1;
110 fsp->aio_requests[i] = fsp->aio_requests[fsp->num_aio_requests];
112 if (fsp->num_aio_requests == 0) {
113 tevent_wait_done(fsp->deferred_close);
115 return 0;
118 static bool aio_add_req_to_fsp(files_struct *fsp, struct tevent_req *req)
120 size_t array_len;
121 struct aio_req_fsp_link *lnk;
123 lnk = talloc(req, struct aio_req_fsp_link);
124 if (lnk == NULL) {
125 return false;
128 array_len = talloc_array_length(fsp->aio_requests);
129 if (array_len <= fsp->num_aio_requests) {
130 struct tevent_req **tmp;
132 tmp = talloc_realloc(
133 fsp, fsp->aio_requests, struct tevent_req *,
134 fsp->num_aio_requests+1);
135 if (tmp == NULL) {
136 TALLOC_FREE(lnk);
137 return false;
139 fsp->aio_requests = tmp;
141 fsp->aio_requests[fsp->num_aio_requests] = req;
142 fsp->num_aio_requests += 1;
144 lnk->fsp = fsp;
145 lnk->req = req;
146 talloc_set_destructor(lnk, aio_del_req_from_fsp);
148 return true;
151 static void aio_pread_smb1_done(struct tevent_req *req);
153 /****************************************************************************
154 Set up an aio request from a SMBreadX call.
155 *****************************************************************************/
157 NTSTATUS schedule_aio_read_and_X(connection_struct *conn,
158 struct smb_request *smbreq,
159 files_struct *fsp, off_t startpos,
160 size_t smb_maxcnt)
162 struct aio_extra *aio_ex;
163 size_t bufsize;
164 size_t min_aio_read_size = lp_aio_read_size(SNUM(conn));
165 struct tevent_req *req;
167 if (fsp->base_fsp != NULL) {
168 /* No AIO on streams yet */
169 DEBUG(10, ("AIO on streams not yet supported\n"));
170 return NT_STATUS_RETRY;
173 if ((!min_aio_read_size || (smb_maxcnt < min_aio_read_size))
174 && !SMB_VFS_AIO_FORCE(fsp)) {
175 /* Too small a read for aio request. */
176 DEBUG(10,("schedule_aio_read_and_X: read size (%u) too small "
177 "for minimum aio_read of %u\n",
178 (unsigned int)smb_maxcnt,
179 (unsigned int)min_aio_read_size ));
180 return NT_STATUS_RETRY;
183 /* Only do this on non-chained and non-chaining reads not using the
184 * write cache. */
185 if (req_is_in_chain(smbreq) || (lp_write_cache_size(SNUM(conn)) != 0)) {
186 return NT_STATUS_RETRY;
189 if (outstanding_aio_calls >= aio_pending_size) {
190 DEBUG(10,("schedule_aio_read_and_X: Already have %d aio "
191 "activities outstanding.\n",
192 outstanding_aio_calls ));
193 return NT_STATUS_RETRY;
196 /* The following is safe from integer wrap as we've already checked
197 smb_maxcnt is 128k or less. Wct is 12 for read replies */
199 bufsize = smb_size + 12 * 2 + smb_maxcnt;
201 if ((aio_ex = create_aio_extra(NULL, fsp, bufsize)) == NULL) {
202 DEBUG(10,("schedule_aio_read_and_X: malloc fail.\n"));
203 return NT_STATUS_NO_MEMORY;
206 construct_reply_common_req(smbreq, (char *)aio_ex->outbuf.data);
207 srv_set_message((char *)aio_ex->outbuf.data, 12, 0, True);
208 SCVAL(aio_ex->outbuf.data,smb_vwv0,0xFF); /* Never a chained reply. */
210 init_strict_lock_struct(fsp, (uint64_t)smbreq->smbpid,
211 (uint64_t)startpos, (uint64_t)smb_maxcnt, READ_LOCK,
212 &aio_ex->lock);
214 /* Take the lock until the AIO completes. */
215 if (!SMB_VFS_STRICT_LOCK(conn, fsp, &aio_ex->lock)) {
216 TALLOC_FREE(aio_ex);
217 return NT_STATUS_FILE_LOCK_CONFLICT;
220 aio_ex->nbyte = smb_maxcnt;
221 aio_ex->offset = startpos;
223 req = SMB_VFS_PREAD_SEND(aio_ex, fsp->conn->sconn->ev_ctx,
224 fsp, smb_buf(aio_ex->outbuf.data),
225 smb_maxcnt, startpos);
226 if (req == NULL) {
227 DEBUG(0,("schedule_aio_read_and_X: aio_read failed. "
228 "Error %s\n", strerror(errno) ));
229 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock);
230 TALLOC_FREE(aio_ex);
231 return NT_STATUS_RETRY;
233 tevent_req_set_callback(req, aio_pread_smb1_done, aio_ex);
235 if (!aio_add_req_to_fsp(fsp, req)) {
236 DEBUG(1, ("Could not add req to fsp\n"));
237 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock);
238 TALLOC_FREE(aio_ex);
239 return NT_STATUS_RETRY;
242 aio_ex->smbreq = talloc_move(aio_ex, &smbreq);
244 DEBUG(10,("schedule_aio_read_and_X: scheduled aio_read for file %s, "
245 "offset %.0f, len = %u (mid = %u)\n",
246 fsp_str_dbg(fsp), (double)startpos, (unsigned int)smb_maxcnt,
247 (unsigned int)aio_ex->smbreq->mid ));
249 return NT_STATUS_OK;
252 static void aio_pread_smb1_done(struct tevent_req *req)
254 struct aio_extra *aio_ex = tevent_req_callback_data(
255 req, struct aio_extra);
256 files_struct *fsp = aio_ex->fsp;
257 int outsize;
258 char *outbuf = (char *)aio_ex->outbuf.data;
259 char *data = smb_buf(outbuf);
260 ssize_t nread;
261 int err;
263 nread = SMB_VFS_PREAD_RECV(req, &err);
264 TALLOC_FREE(req);
266 DEBUG(10, ("pread_recv returned %d, err = %s\n", (int)nread,
267 (nread == -1) ? strerror(err) : "no error"));
269 if (fsp == NULL) {
270 DEBUG( 3, ("aio_pread_smb1_done: file closed whilst "
271 "aio outstanding (mid[%llu]).\n",
272 (unsigned long long)aio_ex->smbreq->mid));
273 TALLOC_FREE(aio_ex);
274 return;
277 /* Unlock now we're done. */
278 SMB_VFS_STRICT_UNLOCK(fsp->conn, fsp, &aio_ex->lock);
280 if (nread < 0) {
281 DEBUG( 3, ("handle_aio_read_complete: file %s nread == %d. "
282 "Error = %s\n", fsp_str_dbg(fsp), (int)nread,
283 strerror(err)));
285 ERROR_NT(map_nt_error_from_unix(err));
286 outsize = srv_set_message(outbuf,0,0,true);
287 } else {
288 outsize = srv_set_message(outbuf, 12, nread, False);
289 SSVAL(outbuf,smb_vwv2, 0xFFFF); /* Remaining - must be * -1. */
290 SSVAL(outbuf,smb_vwv5, nread);
291 SSVAL(outbuf,smb_vwv6, smb_offset(data,outbuf));
292 SSVAL(outbuf,smb_vwv7, ((nread >> 16) & 1));
293 SSVAL(smb_buf(outbuf), -2, nread);
295 aio_ex->fsp->fh->pos = aio_ex->offset + nread;
296 aio_ex->fsp->fh->position_information = aio_ex->fsp->fh->pos;
298 DEBUG( 3, ("handle_aio_read_complete file %s max=%d "
299 "nread=%d\n", fsp_str_dbg(fsp),
300 (int)aio_ex->nbyte, (int)nread ) );
303 smb_setlen(outbuf, outsize - 4);
304 show_msg(outbuf);
305 if (!srv_send_smb(aio_ex->smbreq->sconn, outbuf,
306 true, aio_ex->smbreq->seqnum+1,
307 IS_CONN_ENCRYPTED(fsp->conn), NULL)) {
308 exit_server_cleanly("handle_aio_read_complete: srv_send_smb "
309 "failed.");
312 DEBUG(10, ("handle_aio_read_complete: scheduled aio_read completed "
313 "for file %s, offset %.0f, len = %u\n",
314 fsp_str_dbg(fsp), (double)aio_ex->offset,
315 (unsigned int)nread));
317 TALLOC_FREE(aio_ex);
320 struct pwrite_fsync_state {
321 struct tevent_context *ev;
322 files_struct *fsp;
323 bool write_through;
324 ssize_t nwritten;
327 static void pwrite_fsync_write_done(struct tevent_req *subreq);
328 static void pwrite_fsync_sync_done(struct tevent_req *subreq);
330 static struct tevent_req *pwrite_fsync_send(TALLOC_CTX *mem_ctx,
331 struct tevent_context *ev,
332 struct files_struct *fsp,
333 const void *data,
334 size_t n, off_t offset,
335 bool write_through)
337 struct tevent_req *req, *subreq;
338 struct pwrite_fsync_state *state;
340 req = tevent_req_create(mem_ctx, &state, struct pwrite_fsync_state);
341 if (req == NULL) {
342 return NULL;
344 state->ev = ev;
345 state->fsp = fsp;
346 state->write_through = write_through;
348 subreq = SMB_VFS_PWRITE_SEND(state, ev, fsp, data, n, offset);
349 if (tevent_req_nomem(subreq, req)) {
350 return tevent_req_post(req, ev);
352 tevent_req_set_callback(subreq, pwrite_fsync_write_done, req);
353 return req;
356 static void pwrite_fsync_write_done(struct tevent_req *subreq)
358 struct tevent_req *req = tevent_req_callback_data(
359 subreq, struct tevent_req);
360 struct pwrite_fsync_state *state = tevent_req_data(
361 req, struct pwrite_fsync_state);
362 connection_struct *conn = state->fsp->conn;
363 int err;
364 bool do_sync;
366 state->nwritten = SMB_VFS_PWRITE_RECV(subreq, &err);
367 TALLOC_FREE(subreq);
368 if (state->nwritten == -1) {
369 tevent_req_error(req, err);
370 return;
373 do_sync = (lp_strict_sync(SNUM(conn)) &&
374 (lp_syncalways(SNUM(conn)) || state->write_through));
375 if (!do_sync) {
376 tevent_req_done(req);
377 return;
380 subreq = SMB_VFS_FSYNC_SEND(state, state->ev, state->fsp);
381 if (tevent_req_nomem(subreq, req)) {
382 return;
384 tevent_req_set_callback(subreq, pwrite_fsync_sync_done, req);
387 static void pwrite_fsync_sync_done(struct tevent_req *subreq)
389 struct tevent_req *req = tevent_req_callback_data(
390 subreq, struct tevent_req);
391 int ret, err;
393 ret = SMB_VFS_FSYNC_RECV(subreq, &err);
394 TALLOC_FREE(subreq);
395 if (ret == -1) {
396 tevent_req_error(req, err);
397 return;
399 tevent_req_done(req);
402 static ssize_t pwrite_fsync_recv(struct tevent_req *req, int *perr)
404 struct pwrite_fsync_state *state = tevent_req_data(
405 req, struct pwrite_fsync_state);
407 if (tevent_req_is_unix_error(req, perr)) {
408 return -1;
410 return state->nwritten;
413 static void aio_pwrite_smb1_done(struct tevent_req *req);
415 /****************************************************************************
416 Set up an aio request from a SMBwriteX call.
417 *****************************************************************************/
419 NTSTATUS schedule_aio_write_and_X(connection_struct *conn,
420 struct smb_request *smbreq,
421 files_struct *fsp, const char *data,
422 off_t startpos,
423 size_t numtowrite)
425 struct aio_extra *aio_ex;
426 size_t bufsize;
427 size_t min_aio_write_size = lp_aio_write_size(SNUM(conn));
428 struct tevent_req *req;
430 if (fsp->base_fsp != NULL) {
431 /* No AIO on streams yet */
432 DEBUG(10, ("AIO on streams not yet supported\n"));
433 return NT_STATUS_RETRY;
436 if ((!min_aio_write_size || (numtowrite < min_aio_write_size))
437 && !SMB_VFS_AIO_FORCE(fsp)) {
438 /* Too small a write for aio request. */
439 DEBUG(10,("schedule_aio_write_and_X: write size (%u) too "
440 "small for minimum aio_write of %u\n",
441 (unsigned int)numtowrite,
442 (unsigned int)min_aio_write_size ));
443 return NT_STATUS_RETRY;
446 /* Only do this on non-chained and non-chaining writes not using the
447 * write cache. */
448 if (req_is_in_chain(smbreq) || (lp_write_cache_size(SNUM(conn)) != 0)) {
449 return NT_STATUS_RETRY;
452 if (outstanding_aio_calls >= aio_pending_size) {
453 DEBUG(3,("schedule_aio_write_and_X: Already have %d aio "
454 "activities outstanding.\n",
455 outstanding_aio_calls ));
456 DEBUG(10,("schedule_aio_write_and_X: failed to schedule "
457 "aio_write for file %s, offset %.0f, len = %u "
458 "(mid = %u)\n",
459 fsp_str_dbg(fsp), (double)startpos,
460 (unsigned int)numtowrite,
461 (unsigned int)smbreq->mid ));
462 return NT_STATUS_RETRY;
465 bufsize = smb_size + 6*2;
467 if (!(aio_ex = create_aio_extra(NULL, fsp, bufsize))) {
468 DEBUG(0,("schedule_aio_write_and_X: malloc fail.\n"));
469 return NT_STATUS_NO_MEMORY;
471 aio_ex->write_through = BITSETW(smbreq->vwv+7,0);
473 construct_reply_common_req(smbreq, (char *)aio_ex->outbuf.data);
474 srv_set_message((char *)aio_ex->outbuf.data, 6, 0, True);
475 SCVAL(aio_ex->outbuf.data,smb_vwv0,0xFF); /* Never a chained reply. */
477 init_strict_lock_struct(fsp, (uint64_t)smbreq->smbpid,
478 (uint64_t)startpos, (uint64_t)numtowrite, WRITE_LOCK,
479 &aio_ex->lock);
481 /* Take the lock until the AIO completes. */
482 if (!SMB_VFS_STRICT_LOCK(conn, fsp, &aio_ex->lock)) {
483 TALLOC_FREE(aio_ex);
484 return NT_STATUS_FILE_LOCK_CONFLICT;
487 aio_ex->nbyte = numtowrite;
488 aio_ex->offset = startpos;
490 req = pwrite_fsync_send(aio_ex, fsp->conn->sconn->ev_ctx, fsp,
491 data, numtowrite, startpos,
492 aio_ex->write_through);
493 if (req == NULL) {
494 DEBUG(3,("schedule_aio_wrote_and_X: aio_write failed. "
495 "Error %s\n", strerror(errno) ));
496 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock);
497 TALLOC_FREE(aio_ex);
498 return NT_STATUS_RETRY;
500 tevent_req_set_callback(req, aio_pwrite_smb1_done, aio_ex);
502 if (!aio_add_req_to_fsp(fsp, req)) {
503 DEBUG(1, ("Could not add req to fsp\n"));
504 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock);
505 TALLOC_FREE(aio_ex);
506 return NT_STATUS_RETRY;
509 aio_ex->smbreq = talloc_move(aio_ex, &smbreq);
511 /* This should actually be improved to span the write. */
512 contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WRITE);
513 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WRITE);
515 if (!aio_ex->write_through && !lp_syncalways(SNUM(fsp->conn))
516 && fsp->aio_write_behind) {
517 /* Lie to the client and immediately claim we finished the
518 * write. */
519 SSVAL(aio_ex->outbuf.data,smb_vwv2,numtowrite);
520 SSVAL(aio_ex->outbuf.data,smb_vwv4,(numtowrite>>16)&1);
521 show_msg((char *)aio_ex->outbuf.data);
522 if (!srv_send_smb(aio_ex->smbreq->sconn,
523 (char *)aio_ex->outbuf.data,
524 true, aio_ex->smbreq->seqnum+1,
525 IS_CONN_ENCRYPTED(fsp->conn),
526 &aio_ex->smbreq->pcd)) {
527 exit_server_cleanly("schedule_aio_write_and_X: "
528 "srv_send_smb failed.");
530 DEBUG(10,("schedule_aio_write_and_X: scheduled aio_write "
531 "behind for file %s\n", fsp_str_dbg(fsp)));
534 DEBUG(10,("schedule_aio_write_and_X: scheduled aio_write for file "
535 "%s, offset %.0f, len = %u (mid = %u) "
536 "outstanding_aio_calls = %d\n",
537 fsp_str_dbg(fsp), (double)startpos, (unsigned int)numtowrite,
538 (unsigned int)aio_ex->smbreq->mid, outstanding_aio_calls ));
540 return NT_STATUS_OK;
543 static void aio_pwrite_smb1_done(struct tevent_req *req)
545 struct aio_extra *aio_ex = tevent_req_callback_data(
546 req, struct aio_extra);
547 files_struct *fsp = aio_ex->fsp;
548 char *outbuf = (char *)aio_ex->outbuf.data;
549 ssize_t numtowrite = aio_ex->nbyte;
550 ssize_t nwritten;
551 int err;
553 nwritten = pwrite_fsync_recv(req, &err);
554 TALLOC_FREE(req);
556 DEBUG(10, ("pwrite_recv returned %d, err = %s\n", (int)nwritten,
557 (nwritten == -1) ? strerror(err) : "no error"));
559 if (fsp == NULL) {
560 DEBUG( 3, ("aio_pwrite_smb1_done: file closed whilst "
561 "aio outstanding (mid[%llu]).\n",
562 (unsigned long long)aio_ex->smbreq->mid));
563 TALLOC_FREE(aio_ex);
564 return;
567 /* Unlock now we're done. */
568 SMB_VFS_STRICT_UNLOCK(fsp->conn, fsp, &aio_ex->lock);
570 mark_file_modified(fsp);
572 if (fsp->aio_write_behind) {
574 if (nwritten != numtowrite) {
575 if (nwritten == -1) {
576 DEBUG(5,("handle_aio_write_complete: "
577 "aio_write_behind failed ! File %s "
578 "is corrupt ! Error %s\n",
579 fsp_str_dbg(fsp), strerror(err)));
580 } else {
581 DEBUG(0,("handle_aio_write_complete: "
582 "aio_write_behind failed ! File %s "
583 "is corrupt ! Wanted %u bytes but "
584 "only wrote %d\n", fsp_str_dbg(fsp),
585 (unsigned int)numtowrite,
586 (int)nwritten ));
588 } else {
589 DEBUG(10,("handle_aio_write_complete: "
590 "aio_write_behind completed for file %s\n",
591 fsp_str_dbg(fsp)));
593 /* TODO: should no return success in case of an error !!! */
594 TALLOC_FREE(aio_ex);
595 return;
598 /* We don't need outsize or set_message here as we've already set the
599 fixed size length when we set up the aio call. */
601 if (nwritten == -1) {
602 DEBUG(3, ("handle_aio_write: file %s wanted %u bytes. "
603 "nwritten == %d. Error = %s\n",
604 fsp_str_dbg(fsp), (unsigned int)numtowrite,
605 (int)nwritten, strerror(err)));
607 ERROR_NT(map_nt_error_from_unix(err));
608 srv_set_message(outbuf,0,0,true);
609 } else {
610 SSVAL(outbuf,smb_vwv2,nwritten);
611 SSVAL(outbuf,smb_vwv4,(nwritten>>16)&1);
612 if (nwritten < (ssize_t)numtowrite) {
613 SCVAL(outbuf,smb_rcls,ERRHRD);
614 SSVAL(outbuf,smb_err,ERRdiskfull);
617 DEBUG(3,("handle_aio_write: %s, num=%d wrote=%d\n",
618 fsp_fnum_dbg(fsp), (int)numtowrite, (int)nwritten));
620 aio_ex->fsp->fh->pos = aio_ex->offset + nwritten;
623 show_msg(outbuf);
624 if (!srv_send_smb(aio_ex->smbreq->sconn, outbuf,
625 true, aio_ex->smbreq->seqnum+1,
626 IS_CONN_ENCRYPTED(fsp->conn),
627 NULL)) {
628 exit_server_cleanly("handle_aio_write_complete: "
629 "srv_send_smb failed.");
632 DEBUG(10, ("handle_aio_write_complete: scheduled aio_write completed "
633 "for file %s, offset %.0f, requested %u, written = %u\n",
634 fsp_str_dbg(fsp), (double)aio_ex->offset,
635 (unsigned int)numtowrite, (unsigned int)nwritten));
637 TALLOC_FREE(aio_ex);
640 bool cancel_smb2_aio(struct smb_request *smbreq)
642 struct smbd_smb2_request *smb2req = smbreq->smb2req;
643 struct aio_extra *aio_ex = NULL;
645 if (smb2req) {
646 aio_ex = talloc_get_type(smbreq->async_priv,
647 struct aio_extra);
650 if (aio_ex == NULL) {
651 return false;
654 if (aio_ex->fsp == NULL) {
655 return false;
659 * We let the aio request run. Setting fsp to NULL has the
660 * effect that the _done routines don't send anything out.
663 aio_ex->fsp = NULL;
664 return true;
667 static void aio_pread_smb2_done(struct tevent_req *req);
669 /****************************************************************************
670 Set up an aio request from a SMB2 read call.
671 *****************************************************************************/
673 NTSTATUS schedule_smb2_aio_read(connection_struct *conn,
674 struct smb_request *smbreq,
675 files_struct *fsp,
676 TALLOC_CTX *ctx,
677 DATA_BLOB *preadbuf,
678 off_t startpos,
679 size_t smb_maxcnt)
681 struct aio_extra *aio_ex;
682 size_t min_aio_read_size = lp_aio_read_size(SNUM(conn));
683 struct tevent_req *req;
685 if (fsp->base_fsp != NULL) {
686 /* No AIO on streams yet */
687 DEBUG(10, ("AIO on streams not yet supported\n"));
688 return NT_STATUS_RETRY;
691 if ((!min_aio_read_size || (smb_maxcnt < min_aio_read_size))
692 && !SMB_VFS_AIO_FORCE(fsp)) {
693 /* Too small a read for aio request. */
694 DEBUG(10,("smb2: read size (%u) too small "
695 "for minimum aio_read of %u\n",
696 (unsigned int)smb_maxcnt,
697 (unsigned int)min_aio_read_size ));
698 return NT_STATUS_RETRY;
701 /* Only do this on reads not using the write cache. */
702 if (lp_write_cache_size(SNUM(conn)) != 0) {
703 return NT_STATUS_RETRY;
706 if (outstanding_aio_calls >= aio_pending_size) {
707 DEBUG(10,("smb2: Already have %d aio "
708 "activities outstanding.\n",
709 outstanding_aio_calls ));
710 return NT_STATUS_RETRY;
713 /* Create the out buffer. */
714 *preadbuf = data_blob_talloc(ctx, NULL, smb_maxcnt);
715 if (preadbuf->data == NULL) {
716 return NT_STATUS_NO_MEMORY;
719 if (!(aio_ex = create_aio_extra(smbreq->smb2req, fsp, 0))) {
720 return NT_STATUS_NO_MEMORY;
723 init_strict_lock_struct(fsp, (uint64_t)smbreq->smbpid,
724 (uint64_t)startpos, (uint64_t)smb_maxcnt, READ_LOCK,
725 &aio_ex->lock);
727 /* Take the lock until the AIO completes. */
728 if (!SMB_VFS_STRICT_LOCK(conn, fsp, &aio_ex->lock)) {
729 TALLOC_FREE(aio_ex);
730 return NT_STATUS_FILE_LOCK_CONFLICT;
733 aio_ex->nbyte = smb_maxcnt;
734 aio_ex->offset = startpos;
736 req = SMB_VFS_PREAD_SEND(aio_ex, fsp->conn->sconn->ev_ctx, fsp,
737 preadbuf->data, smb_maxcnt, startpos);
738 if (req == NULL) {
739 DEBUG(0, ("smb2: SMB_VFS_PREAD_SEND failed. "
740 "Error %s\n", strerror(errno)));
741 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock);
742 TALLOC_FREE(aio_ex);
743 return NT_STATUS_RETRY;
745 tevent_req_set_callback(req, aio_pread_smb2_done, aio_ex);
747 if (!aio_add_req_to_fsp(fsp, req)) {
748 DEBUG(1, ("Could not add req to fsp\n"));
749 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock);
750 TALLOC_FREE(aio_ex);
751 return NT_STATUS_RETRY;
754 /* We don't need talloc_move here as both aio_ex and
755 * smbreq are children of smbreq->smb2req. */
756 aio_ex->smbreq = smbreq;
757 smbreq->async_priv = aio_ex;
759 DEBUG(10,("smb2: scheduled aio_read for file %s, "
760 "offset %.0f, len = %u (mid = %u)\n",
761 fsp_str_dbg(fsp), (double)startpos, (unsigned int)smb_maxcnt,
762 (unsigned int)aio_ex->smbreq->mid ));
764 return NT_STATUS_OK;
767 static void aio_pread_smb2_done(struct tevent_req *req)
769 struct aio_extra *aio_ex = tevent_req_callback_data(
770 req, struct aio_extra);
771 struct tevent_req *subreq = aio_ex->smbreq->smb2req->subreq;
772 files_struct *fsp = aio_ex->fsp;
773 NTSTATUS status;
774 ssize_t nread;
775 int err = 0;
777 nread = SMB_VFS_PREAD_RECV(req, &err);
778 TALLOC_FREE(req);
780 DEBUG(10, ("pread_recv returned %d, err = %s\n", (int)nread,
781 (nread == -1) ? strerror(err) : "no error"));
783 if (fsp == NULL) {
784 DEBUG( 3, ("aio_pread_smb2_done: file closed whilst "
785 "aio outstanding (mid[%llu]).\n",
786 (unsigned long long)aio_ex->smbreq->mid));
787 TALLOC_FREE(aio_ex);
788 return;
791 /* Unlock now we're done. */
792 SMB_VFS_STRICT_UNLOCK(fsp->conn, fsp, &aio_ex->lock);
794 /* Common error or success code processing for async or sync
795 read returns. */
797 status = smb2_read_complete(subreq, nread, err);
799 if (nread > 0) {
800 fsp->fh->pos = aio_ex->offset + nread;
801 fsp->fh->position_information = fsp->fh->pos;
804 DEBUG(10, ("smb2: scheduled aio_read completed "
805 "for file %s, offset %.0f, len = %u "
806 "(errcode = %d, NTSTATUS = %s)\n",
807 fsp_str_dbg(aio_ex->fsp),
808 (double)aio_ex->offset,
809 (unsigned int)nread,
810 err, nt_errstr(status)));
812 if (!NT_STATUS_IS_OK(status)) {
813 tevent_req_nterror(subreq, status);
814 return;
816 tevent_req_done(subreq);
819 static void aio_pwrite_smb2_done(struct tevent_req *req);
821 /****************************************************************************
822 Set up an aio request from a SMB2write call.
823 *****************************************************************************/
825 NTSTATUS schedule_aio_smb2_write(connection_struct *conn,
826 struct smb_request *smbreq,
827 files_struct *fsp,
828 uint64_t in_offset,
829 DATA_BLOB in_data,
830 bool write_through)
832 struct aio_extra *aio_ex = NULL;
833 size_t min_aio_write_size = lp_aio_write_size(SNUM(conn));
834 struct tevent_req *req;
836 if (fsp->base_fsp != NULL) {
837 /* No AIO on streams yet */
838 DEBUG(10, ("AIO on streams not yet supported\n"));
839 return NT_STATUS_RETRY;
842 if ((!min_aio_write_size || (in_data.length < min_aio_write_size))
843 && !SMB_VFS_AIO_FORCE(fsp)) {
844 /* Too small a write for aio request. */
845 DEBUG(10,("smb2: write size (%u) too "
846 "small for minimum aio_write of %u\n",
847 (unsigned int)in_data.length,
848 (unsigned int)min_aio_write_size ));
849 return NT_STATUS_RETRY;
852 /* Only do this on writes not using the write cache. */
853 if (lp_write_cache_size(SNUM(conn)) != 0) {
854 return NT_STATUS_RETRY;
857 if (outstanding_aio_calls >= aio_pending_size) {
858 DEBUG(3,("smb2: Already have %d aio "
859 "activities outstanding.\n",
860 outstanding_aio_calls ));
861 return NT_STATUS_RETRY;
864 if (smbreq->unread_bytes) {
865 /* Can't do async with recvfile. */
866 return NT_STATUS_RETRY;
869 if (!(aio_ex = create_aio_extra(smbreq->smb2req, fsp, 0))) {
870 return NT_STATUS_NO_MEMORY;
873 aio_ex->write_through = write_through;
875 init_strict_lock_struct(fsp, (uint64_t)smbreq->smbpid,
876 in_offset, (uint64_t)in_data.length, WRITE_LOCK,
877 &aio_ex->lock);
879 /* Take the lock until the AIO completes. */
880 if (!SMB_VFS_STRICT_LOCK(conn, fsp, &aio_ex->lock)) {
881 TALLOC_FREE(aio_ex);
882 return NT_STATUS_FILE_LOCK_CONFLICT;
885 aio_ex->nbyte = in_data.length;
886 aio_ex->offset = in_offset;
888 req = pwrite_fsync_send(aio_ex, fsp->conn->sconn->ev_ctx, fsp,
889 in_data.data, in_data.length, in_offset,
890 write_through);
891 if (req == NULL) {
892 DEBUG(3, ("smb2: SMB_VFS_PWRITE_SEND failed. "
893 "Error %s\n", strerror(errno)));
894 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock);
895 TALLOC_FREE(aio_ex);
896 return NT_STATUS_RETRY;
898 tevent_req_set_callback(req, aio_pwrite_smb2_done, aio_ex);
900 if (!aio_add_req_to_fsp(fsp, req)) {
901 DEBUG(1, ("Could not add req to fsp\n"));
902 SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock);
903 TALLOC_FREE(aio_ex);
904 return NT_STATUS_RETRY;
907 /* We don't need talloc_move here as both aio_ex and
908 * smbreq are children of smbreq->smb2req. */
909 aio_ex->smbreq = smbreq;
910 smbreq->async_priv = aio_ex;
912 /* This should actually be improved to span the write. */
913 contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WRITE);
914 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WRITE);
917 * We don't want to do write behind due to ownership
918 * issues of the request structs. Maybe add it if I
919 * figure those out. JRA.
922 DEBUG(10,("smb2: scheduled aio_write for file "
923 "%s, offset %.0f, len = %u (mid = %u) "
924 "outstanding_aio_calls = %d\n",
925 fsp_str_dbg(fsp),
926 (double)in_offset,
927 (unsigned int)in_data.length,
928 (unsigned int)aio_ex->smbreq->mid,
929 outstanding_aio_calls ));
931 return NT_STATUS_OK;
934 static void aio_pwrite_smb2_done(struct tevent_req *req)
936 struct aio_extra *aio_ex = tevent_req_callback_data(
937 req, struct aio_extra);
938 ssize_t numtowrite = aio_ex->nbyte;
939 struct tevent_req *subreq = aio_ex->smbreq->smb2req->subreq;
940 files_struct *fsp = aio_ex->fsp;
941 NTSTATUS status;
942 ssize_t nwritten;
943 int err = 0;
945 nwritten = pwrite_fsync_recv(req, &err);
946 TALLOC_FREE(req);
948 DEBUG(10, ("pwrite_recv returned %d, err = %s\n", (int)nwritten,
949 (nwritten == -1) ? strerror(err) : "no error"));
951 if (fsp == NULL) {
952 DEBUG( 3, ("aio_pwrite_smb2_done: file closed whilst "
953 "aio outstanding (mid[%llu]).\n",
954 (unsigned long long)aio_ex->smbreq->mid));
955 TALLOC_FREE(aio_ex);
956 return;
959 /* Unlock now we're done. */
960 SMB_VFS_STRICT_UNLOCK(fsp->conn, fsp, &aio_ex->lock);
962 status = smb2_write_complete_nosync(subreq, nwritten, err);
964 DEBUG(10, ("smb2: scheduled aio_write completed "
965 "for file %s, offset %.0f, requested %u, "
966 "written = %u (errcode = %d, NTSTATUS = %s)\n",
967 fsp_str_dbg(fsp),
968 (double)aio_ex->offset,
969 (unsigned int)numtowrite,
970 (unsigned int)nwritten,
971 err, nt_errstr(status)));
973 if (!NT_STATUS_IS_OK(status)) {
974 tevent_req_nterror(subreq, status);
975 return;
977 tevent_req_done(subreq);