libsmb: Remove unused setup_stat_from_stat_ex()
[Samba.git] / source3 / smbd / smb2_aio.c
blob88aa68d218f5fa9d539fbb52afd2209f8ebcf6f3
1 /*
2 Unix SMB/Netbios implementation.
3 Version 3.0
4 async_io read handling using POSIX async io.
5 Copyright (C) Jeremy Allison 2005.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "includes.h"
22 #include "smbd/smbd.h"
23 #include "smbd/globals.h"
24 #include "../lib/util/tevent_ntstatus.h"
25 #include "../lib/util/tevent_unix.h"
27 /****************************************************************************
28 Accessor function to return write_through state.
29 *****************************************************************************/
31 bool aio_write_through_requested(struct aio_extra *aio_ex)
33 return aio_ex->write_through;
36 /****************************************************************************
37 Create the extended aio struct we must keep around for the lifetime
38 of the aio call.
39 *****************************************************************************/
41 struct aio_extra *create_aio_extra(TALLOC_CTX *mem_ctx,
42 files_struct *fsp,
43 size_t buflen)
45 struct aio_extra *aio_ex = talloc_zero(mem_ctx, struct aio_extra);
47 if (!aio_ex) {
48 return NULL;
51 /* The output buffer stored in the aio_ex is the start of
52 the smb return buffer. The buffer used in the acb
53 is the start of the reply data portion of that buffer. */
55 if (buflen) {
56 aio_ex->outbuf = data_blob_talloc(aio_ex, NULL, buflen);
57 if (!aio_ex->outbuf.data) {
58 TALLOC_FREE(aio_ex);
59 return NULL;
62 aio_ex->fsp = fsp;
63 return aio_ex;
66 struct aio_req_fsp_link {
67 #ifdef DEVELOPER
68 struct smbd_server_connection *sconn;
69 #endif
70 files_struct *fsp;
71 struct tevent_req *req;
74 static int aio_del_req_from_fsp(struct aio_req_fsp_link *lnk)
76 unsigned i;
77 files_struct *fsp = lnk->fsp;
78 struct tevent_req *req = lnk->req;
80 #ifdef DEVELOPER
81 struct files_struct *ifsp = NULL;
82 bool found = false;
85 * When this is called, lnk->fsp must still exist
86 * on the files list for this connection. Panic if not.
88 for (ifsp = lnk->sconn->files; ifsp; ifsp = ifsp->next) {
89 if (ifsp == fsp) {
90 found = true;
93 if (!found) {
94 smb_panic("orphaned lnk on fsp aio list.\n");
96 #endif
98 for (i=0; i<fsp->num_aio_requests; i++) {
99 if (fsp->aio_requests[i] == req) {
100 break;
103 if (i == fsp->num_aio_requests) {
104 DEBUG(1, ("req %p not found in fsp %p\n", req, fsp));
105 return 0;
107 fsp->num_aio_requests -= 1;
108 fsp->aio_requests[i] = fsp->aio_requests[fsp->num_aio_requests];
110 if (fsp->num_aio_requests == 0) {
111 TALLOC_FREE(fsp->aio_requests);
113 return 0;
116 bool aio_add_req_to_fsp(files_struct *fsp, struct tevent_req *req)
118 size_t array_len;
119 struct aio_req_fsp_link *lnk;
121 lnk = talloc(req, struct aio_req_fsp_link);
122 if (lnk == NULL) {
123 return false;
126 array_len = talloc_array_length(fsp->aio_requests);
127 if (array_len <= fsp->num_aio_requests) {
128 struct tevent_req **tmp;
130 if (fsp->num_aio_requests + 10 < 10) {
131 /* Integer wrap. */
132 TALLOC_FREE(lnk);
133 return false;
137 * Allocate in blocks of 10 so we don't allocate
138 * on every aio request.
140 tmp = talloc_realloc(
141 fsp, fsp->aio_requests, struct tevent_req *,
142 fsp->num_aio_requests+10);
143 if (tmp == NULL) {
144 TALLOC_FREE(lnk);
145 return false;
147 fsp->aio_requests = tmp;
149 fsp->aio_requests[fsp->num_aio_requests] = req;
150 fsp->num_aio_requests += 1;
152 lnk->fsp = fsp;
153 lnk->req = req;
154 #ifdef DEVELOPER
155 lnk->sconn = fsp->conn->sconn;
156 #endif
157 talloc_set_destructor(lnk, aio_del_req_from_fsp);
159 return true;
162 struct pwrite_fsync_state {
163 struct tevent_context *ev;
164 files_struct *fsp;
165 bool write_through;
166 ssize_t nwritten;
169 static void pwrite_fsync_write_done(struct tevent_req *subreq);
170 static void pwrite_fsync_sync_done(struct tevent_req *subreq);
172 struct tevent_req *pwrite_fsync_send(TALLOC_CTX *mem_ctx,
173 struct tevent_context *ev,
174 struct files_struct *fsp,
175 const void *data,
176 size_t n, off_t offset,
177 bool write_through)
179 struct tevent_req *req, *subreq;
180 struct pwrite_fsync_state *state;
181 bool ok;
183 req = tevent_req_create(mem_ctx, &state, struct pwrite_fsync_state);
184 if (req == NULL) {
185 return NULL;
187 state->ev = ev;
188 state->fsp = fsp;
189 state->write_through = write_through;
191 ok = vfs_valid_pwrite_range(offset, n);
192 if (!ok) {
193 tevent_req_error(req, EINVAL);
194 return tevent_req_post(req, ev);
197 if (n == 0) {
198 tevent_req_done(req);
199 return tevent_req_post(req, ev);
202 subreq = SMB_VFS_PWRITE_SEND(state, ev, fsp, data, n, offset);
203 if (tevent_req_nomem(subreq, req)) {
204 return tevent_req_post(req, ev);
206 tevent_req_set_callback(subreq, pwrite_fsync_write_done, req);
207 return req;
210 static void pwrite_fsync_write_done(struct tevent_req *subreq)
212 struct tevent_req *req = tevent_req_callback_data(
213 subreq, struct tevent_req);
214 struct pwrite_fsync_state *state = tevent_req_data(
215 req, struct pwrite_fsync_state);
216 connection_struct *conn = state->fsp->conn;
217 bool do_sync;
218 struct vfs_aio_state vfs_aio_state;
220 state->nwritten = SMB_VFS_PWRITE_RECV(subreq, &vfs_aio_state);
221 TALLOC_FREE(subreq);
222 if (state->nwritten == -1) {
223 tevent_req_error(req, vfs_aio_state.error);
224 return;
227 do_sync = (lp_strict_sync(SNUM(conn)) &&
228 (lp_sync_always(SNUM(conn)) || state->write_through));
229 if (!do_sync) {
230 tevent_req_done(req);
231 return;
234 subreq = SMB_VFS_FSYNC_SEND(state, state->ev, state->fsp);
235 if (tevent_req_nomem(subreq, req)) {
236 return;
238 tevent_req_set_callback(subreq, pwrite_fsync_sync_done, req);
241 static void pwrite_fsync_sync_done(struct tevent_req *subreq)
243 struct tevent_req *req = tevent_req_callback_data(
244 subreq, struct tevent_req);
245 int ret;
246 struct vfs_aio_state vfs_aio_state;
248 ret = SMB_VFS_FSYNC_RECV(subreq, &vfs_aio_state);
249 TALLOC_FREE(subreq);
250 if (ret == -1) {
251 tevent_req_error(req, vfs_aio_state.error);
252 return;
254 tevent_req_done(req);
257 ssize_t pwrite_fsync_recv(struct tevent_req *req, int *perr)
259 struct pwrite_fsync_state *state = tevent_req_data(
260 req, struct pwrite_fsync_state);
262 if (tevent_req_is_unix_error(req, perr)) {
263 return -1;
265 return state->nwritten;
268 bool cancel_smb2_aio(struct smb_request *smbreq)
270 struct smbd_smb2_request *smb2req = smbreq->smb2req;
271 struct aio_extra *aio_ex = NULL;
273 if (smb2req) {
274 aio_ex = talloc_get_type(smbreq->async_priv,
275 struct aio_extra);
278 if (aio_ex == NULL) {
279 return false;
282 if (aio_ex->fsp == NULL) {
283 return false;
287 * We let the aio request run and don't try to cancel it which means
288 * processing of the SMB2 request must continue as normal, cf MS-SMB2
289 * 3.3.5.16:
291 * If the target request is not successfully canceled, processing of
292 * the target request MUST continue and no response is sent to the
293 * cancel request.
296 return false;
299 static void aio_pread_smb2_done(struct tevent_req *req);
301 /****************************************************************************
302 Set up an aio request from a SMB2 read call.
303 *****************************************************************************/
305 NTSTATUS schedule_smb2_aio_read(connection_struct *conn,
306 struct smb_request *smbreq,
307 files_struct *fsp,
308 TALLOC_CTX *ctx,
309 DATA_BLOB *preadbuf,
310 off_t startpos,
311 size_t smb_maxcnt)
313 struct aio_extra *aio_ex;
314 size_t min_aio_read_size = lp_aio_read_size(SNUM(conn));
315 struct tevent_req *req;
316 bool is_compound = false;
317 bool is_last_in_compound = false;
318 bool ok;
320 ok = vfs_valid_pread_range(startpos, smb_maxcnt);
321 if (!ok) {
322 return NT_STATUS_INVALID_PARAMETER;
325 if (fsp_is_alternate_stream(fsp)) {
326 DEBUG(10, ("AIO on streams not yet supported\n"));
327 return NT_STATUS_RETRY;
330 if (fsp->op == NULL) {
331 /* No AIO on internal opens. */
332 return NT_STATUS_RETRY;
335 if ((!min_aio_read_size || (smb_maxcnt < min_aio_read_size))
336 && !SMB_VFS_AIO_FORCE(fsp)) {
337 /* Too small a read for aio request. */
338 DEBUG(10,("smb2: read size (%u) too small "
339 "for minimum aio_read of %u\n",
340 (unsigned int)smb_maxcnt,
341 (unsigned int)min_aio_read_size ));
342 return NT_STATUS_RETRY;
345 is_compound = smbd_smb2_is_compound(smbreq->smb2req);
346 is_last_in_compound = smbd_smb2_is_last_in_compound(smbreq->smb2req);
348 if (is_compound && !is_last_in_compound) {
350 * Only allow going async if this is the last
351 * request in a compound.
353 return NT_STATUS_RETRY;
356 /* Create the out buffer. */
357 *preadbuf = data_blob_talloc(ctx, NULL, smb_maxcnt);
358 if (preadbuf->data == NULL) {
359 return NT_STATUS_NO_MEMORY;
362 if (!(aio_ex = create_aio_extra(smbreq->smb2req, fsp, 0))) {
363 return NT_STATUS_NO_MEMORY;
366 init_strict_lock_struct(fsp,
367 fsp->op->global->open_persistent_id,
368 (uint64_t)startpos,
369 (uint64_t)smb_maxcnt,
370 READ_LOCK,
371 lp_posix_cifsu_locktype(fsp),
372 &aio_ex->lock);
374 /* Take the lock until the AIO completes. */
375 if (!SMB_VFS_STRICT_LOCK_CHECK(conn, fsp, &aio_ex->lock)) {
376 TALLOC_FREE(aio_ex);
377 return NT_STATUS_FILE_LOCK_CONFLICT;
380 aio_ex->nbyte = smb_maxcnt;
381 aio_ex->offset = startpos;
383 req = SMB_VFS_PREAD_SEND(aio_ex, fsp->conn->sconn->ev_ctx, fsp,
384 preadbuf->data, smb_maxcnt, startpos);
385 if (req == NULL) {
386 DEBUG(0, ("smb2: SMB_VFS_PREAD_SEND failed. "
387 "Error %s\n", strerror(errno)));
388 TALLOC_FREE(aio_ex);
389 return NT_STATUS_RETRY;
391 tevent_req_set_callback(req, aio_pread_smb2_done, aio_ex);
393 if (!aio_add_req_to_fsp(fsp, req)) {
394 DEBUG(1, ("Could not add req to fsp\n"));
395 TALLOC_FREE(aio_ex);
396 return NT_STATUS_RETRY;
399 /* We don't need talloc_move here as both aio_ex and
400 * smbreq are children of smbreq->smb2req. */
401 aio_ex->smbreq = smbreq;
402 smbreq->async_priv = aio_ex;
404 DEBUG(10,("smb2: scheduled aio_read for file %s, "
405 "offset %.0f, len = %u (mid = %u)\n",
406 fsp_str_dbg(fsp), (double)startpos, (unsigned int)smb_maxcnt,
407 (unsigned int)aio_ex->smbreq->mid ));
409 return NT_STATUS_OK;
412 static void aio_pread_smb2_done(struct tevent_req *req)
414 struct aio_extra *aio_ex = tevent_req_callback_data(
415 req, struct aio_extra);
416 struct tevent_req *subreq = aio_ex->smbreq->smb2req->subreq;
417 files_struct *fsp = aio_ex->fsp;
418 NTSTATUS status;
419 ssize_t nread;
420 struct vfs_aio_state vfs_aio_state = { 0 };
422 nread = SMB_VFS_PREAD_RECV(req, &vfs_aio_state);
423 TALLOC_FREE(req);
425 DEBUG(10, ("pread_recv returned %d, err = %s\n", (int)nread,
426 (nread == -1) ? strerror(vfs_aio_state.error) : "no error"));
428 /* Common error or success code processing for async or sync
429 read returns. */
431 status = smb2_read_complete(subreq, nread, vfs_aio_state.error);
433 if (nread > 0) {
434 fh_set_pos(fsp->fh, aio_ex->offset + nread);
435 fh_set_position_information(fsp->fh,
436 fh_get_pos(fsp->fh));
439 DEBUG(10, ("smb2: scheduled aio_read completed "
440 "for file %s, offset %.0f, len = %u "
441 "(errcode = %d, NTSTATUS = %s)\n",
442 fsp_str_dbg(aio_ex->fsp),
443 (double)aio_ex->offset,
444 (unsigned int)nread,
445 vfs_aio_state.error, nt_errstr(status)));
447 if (tevent_req_nterror(subreq, status)) {
448 return;
450 tevent_req_done(subreq);
453 static void aio_pwrite_smb2_done(struct tevent_req *req);
455 /****************************************************************************
456 Set up an aio request from a SMB2write call.
457 *****************************************************************************/
459 NTSTATUS schedule_aio_smb2_write(connection_struct *conn,
460 struct smb_request *smbreq,
461 files_struct *fsp,
462 uint64_t in_offset,
463 DATA_BLOB in_data,
464 bool write_through)
466 struct aio_extra *aio_ex = NULL;
467 size_t min_aio_write_size = lp_aio_write_size(SNUM(conn));
468 struct tevent_req *req;
469 bool is_compound = false;
470 bool is_last_in_compound = false;
472 if (fsp_is_alternate_stream(fsp)) {
473 /* No AIO on streams yet */
474 DEBUG(10, ("AIO on streams not yet supported\n"));
475 return NT_STATUS_RETRY;
478 if (fsp->op == NULL) {
479 /* No AIO on internal opens. */
480 return NT_STATUS_RETRY;
483 if ((!min_aio_write_size || (in_data.length < min_aio_write_size))
484 && !SMB_VFS_AIO_FORCE(fsp)) {
485 /* Too small a write for aio request. */
486 DEBUG(10,("smb2: write size (%u) too "
487 "small for minimum aio_write of %u\n",
488 (unsigned int)in_data.length,
489 (unsigned int)min_aio_write_size ));
490 return NT_STATUS_RETRY;
493 is_compound = smbd_smb2_is_compound(smbreq->smb2req);
494 is_last_in_compound = smbd_smb2_is_last_in_compound(smbreq->smb2req);
496 if (is_compound && !is_last_in_compound) {
498 * Only allow going async if this is the last
499 * request in a compound.
501 return NT_STATUS_RETRY;
504 if (smbreq->unread_bytes) {
505 /* Can't do async with recvfile. */
506 return NT_STATUS_RETRY;
509 if (!(aio_ex = create_aio_extra(smbreq->smb2req, fsp, 0))) {
510 return NT_STATUS_NO_MEMORY;
513 aio_ex->write_through = write_through;
515 init_strict_lock_struct(fsp,
516 fsp->op->global->open_persistent_id,
517 in_offset,
518 (uint64_t)in_data.length,
519 WRITE_LOCK,
520 lp_posix_cifsu_locktype(fsp),
521 &aio_ex->lock);
523 /* Take the lock until the AIO completes. */
524 if (!SMB_VFS_STRICT_LOCK_CHECK(conn, fsp, &aio_ex->lock)) {
525 TALLOC_FREE(aio_ex);
526 return NT_STATUS_FILE_LOCK_CONFLICT;
529 aio_ex->nbyte = in_data.length;
530 aio_ex->offset = in_offset;
532 req = pwrite_fsync_send(aio_ex, fsp->conn->sconn->ev_ctx, fsp,
533 in_data.data, in_data.length, in_offset,
534 write_through);
535 if (req == NULL) {
536 DEBUG(3, ("smb2: SMB_VFS_PWRITE_SEND failed. "
537 "Error %s\n", strerror(errno)));
538 TALLOC_FREE(aio_ex);
539 return NT_STATUS_RETRY;
541 tevent_req_set_callback(req, aio_pwrite_smb2_done, aio_ex);
543 if (!aio_add_req_to_fsp(fsp, req)) {
544 DEBUG(1, ("Could not add req to fsp\n"));
545 TALLOC_FREE(aio_ex);
546 return NT_STATUS_RETRY;
549 /* We don't need talloc_move here as both aio_ex and
550 * smbreq are children of smbreq->smb2req. */
551 aio_ex->smbreq = smbreq;
552 smbreq->async_priv = aio_ex;
554 /* This should actually be improved to span the write. */
555 contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WRITE);
556 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WRITE);
559 * We don't want to do write behind due to ownership
560 * issues of the request structs. Maybe add it if I
561 * figure those out. JRA.
564 DEBUG(10,("smb2: scheduled aio_write for file "
565 "%s, offset %.0f, len = %u (mid = %u)\n",
566 fsp_str_dbg(fsp),
567 (double)in_offset,
568 (unsigned int)in_data.length,
569 (unsigned int)aio_ex->smbreq->mid));
571 return NT_STATUS_OK;
574 static void aio_pwrite_smb2_done(struct tevent_req *req)
576 struct aio_extra *aio_ex = tevent_req_callback_data(
577 req, struct aio_extra);
578 ssize_t numtowrite = aio_ex->nbyte;
579 struct tevent_req *subreq = aio_ex->smbreq->smb2req->subreq;
580 files_struct *fsp = aio_ex->fsp;
581 NTSTATUS status;
582 ssize_t nwritten;
583 int err = 0;
585 nwritten = pwrite_fsync_recv(req, &err);
586 TALLOC_FREE(req);
588 DEBUG(10, ("pwrite_recv returned %d, err = %s\n", (int)nwritten,
589 (nwritten == -1) ? strerror(err) : "no error"));
591 mark_file_modified(fsp);
593 status = smb2_write_complete_nosync(subreq, nwritten, err);
595 DEBUG(10, ("smb2: scheduled aio_write completed "
596 "for file %s, offset %.0f, requested %u, "
597 "written = %u (errcode = %d, NTSTATUS = %s)\n",
598 fsp_str_dbg(fsp),
599 (double)aio_ex->offset,
600 (unsigned int)numtowrite,
601 (unsigned int)nwritten,
602 err, nt_errstr(status)));
604 if (tevent_req_nterror(subreq, status)) {
605 return;
607 tevent_req_done(subreq);