Add fake-validate option
[Samba/vfs_proxy.git] / source4 / ntvfs / proxy / vfs_proxy.c
blob66a1e9e58eed0f6349cf593b75a52d5b07807277
1 /*
2 Unix SMB/PROXY implementation.
4 CIFS PROXY NTVFS filesystem backend
6 Copyright (C) Andrew Tridgell 2003
7 Copyright (C) James J Myers 2003 <myersjj@samba.org>
8 Copyright (C) Sam Liddicott <sam@liddicott.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>.
24 this implements a CIFS->CIFS NTVFS filesystem caching proxy.
28 #define TALLOC_ABORT(why) smb_panic(why)
29 #warning handle SMB_FLAGS2_COMPRESSED flag from client: http://msdn2.microsoft.com/en-us/library/cc246254.aspx
31 #define PROXY_NTIOCTL_MAXDATA 0x20000
33 #include "includes.h"
34 #include "libcli/raw/libcliraw.h"
35 #include "libcli/smb_composite/smb_composite.h"
36 #include "auth/auth.h"
37 #include "auth/credentials/credentials.h"
38 #include "ntvfs/ntvfs.h"
39 #include "../lib/util/dlinklist.h"
40 #include "param/param.h"
41 #include "libcli/resolve/resolve.h"
42 #include "libcli/libcli.h"
43 #include "libcli/raw/ioctl.h"
44 #include "librpc/gen_ndr/ndr_misc.h"
45 #include "librpc/gen_ndr/ndr_proxy.h"
46 #include "librpc/ndr/ndr_table.h"
47 #include "lib/cache/cache.h"
48 #include "lib/compression/zlib.h"
49 #include "libcli/raw/raw_proto.h"
51 struct proxy_file {
52 struct proxy_file *prev, *next;
53 uint16_t fnum;
54 struct ntvfs_handle *h;
55 struct cache_file_entry *cache;
56 int readahead_pending;
59 /* this is stored in ntvfs_private */
60 struct proxy_private {
61 struct smbcli_tree *tree;
62 struct smbcli_transport *transport;
63 struct ntvfs_module_context *ntvfs;
64 struct async_info *pending;
65 struct proxy_file *files;
66 bool map_generic;
67 bool map_trans2;
68 bool cache_enabled;
69 int cache_readahead; /* default read-ahead window size */
70 int cache_readaheadblock; /* size of each read-ahead request */
71 ssize_t cache_validatesize; /* chunk size to validate, results in a read this size on remote server */
72 char *remote_server;
73 char *remote_share;
74 struct cache_context *cache;
75 int readahead_spare; /* amount of pending non-user generated requests */
76 bool fake_oplock; /* useful for testing, smbclient never asks for oplock */
77 bool fake_valid; /* useful for testing, smbclient never asks for oplock */
78 uint16_t nttrans_fnum; /* we need a handle for non-proxy operations */
81 struct async_info_map;
83 /* a structure used to pass information to an async handler */
84 struct async_info {
85 struct async_info *next, *prev;
86 struct proxy_private *proxy;
87 struct ntvfs_request *req;
88 struct smbcli_request *c_req;
89 struct proxy_file *f;
90 struct async_info_map *chain;
91 void *parms;
94 /* used to chain async callbacks */
95 struct async_info_map {
96 struct async_info_map *next, *prev;
97 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
98 void *parms1;
99 void *parms2;
100 struct async_info *async;
103 struct ntioctl_rpc_unmap_info {
104 void* io;
105 const struct ndr_interface_call *calls;
106 const struct ndr_interface_table *table;
107 uint32_t opnum;
110 /* a structure used to pass information to an async handler */
111 struct async_rpclite_send {
112 const struct ndr_interface_call* call;
113 void* struct_ptr;
116 #define SETUP_PID private->tree->session->pid = req->smbpid
118 #define RPCLITE_SETUP_FILE_HERE(f, h) do { \
119 if ((h = ntvfs_find_handle(private->ntvfs, req, r->in.fnum)) && \
120 (f = ntvfs_handle_get_backend_data(h, ntvfs))) { \
121 r->in.fnum = f->fnum; \
122 } else { \
123 r->out.result = NT_STATUS_INVALID_HANDLE; \
124 return NT_STATUS_OK; \
126 } while (0)
128 #define SETUP_FILE_HERE(f) do { \
129 f = ntvfs_handle_get_backend_data(io->generic.in.file.ntvfs, ntvfs); \
130 if (!f) return NT_STATUS_INVALID_HANDLE; \
131 io->generic.in.file.fnum = f->fnum; \
132 } while (0)
134 #define SETUP_FILE do { \
135 struct proxy_file *f; \
136 SETUP_FILE_HERE(f); \
137 } while (0)
139 #define SETUP_PID_AND_FILE do { \
140 SETUP_PID; \
141 SETUP_FILE; \
142 } while (0)
144 /* remove the MAY_ASYNC from a request, useful for testing */
145 #define MAKE_SYNC_REQ(req) do { req->async_states->state &= ~NTVFS_ASYNC_STATE_MAY_ASYNC; } while(0)
147 #define PROXY_SERVER "proxy:server"
148 #define PROXY_USER "proxy:user"
149 #define PROXY_PASSWORD "proxy:password"
150 #define PROXY_DOMAIN "proxy:domain"
151 #define PROXY_SHARE "proxy:share"
152 #define PROXY_USE_MACHINE_ACCT "proxy:use-machine-account"
153 #define PROXY_MAP_GENERIC "proxy:map-generic"
154 #define PROXY_MAP_TRANS2 "proxy:map-trans2"
156 #define PROXY_CACHE_ENABLED "proxy:cache-enabled"
157 #define PROXY_CACHE_ENABLED_DEFAULT false
159 #define PROXY_CACHE_READAHEAD "proxy:cache-readahead"
160 #define PROXY_CACHE_READAHEAD_DEFAULT 32768
161 /* size of each read-ahead request. */
162 #define PROXY_CACHE_READAHEAD_BLOCK "proxy:cache-readaheadblock"
163 /* the read-ahead block should always be less than max negotiated data */
164 #define PROXY_CACHE_READAHEAD_BLOCK_DEFAULT 4096
166 #define PROXY_CACHE_VALIDATE_SIZE "proxy:validate-size"
167 #define PROXY_CACHE_VALIDATE_SIZE_DEFAULT 256 /* 10M */
169 #define PROXY_FAKE_OPLOCK "proxy:fake-oplock"
170 #define PROXY_FAKE_OPLOCK_DEFAULT false
172 #define PROXY_FAKE_VALID "proxy:fake-valid"
173 #define PROXY_FAKE_VALID_DEFAULT false
175 /* how many read-ahead requests can be pending per mid */
176 #define PROXY_REQUEST_LIMIT "proxy:request-limit"
177 #define PROXY_REQUEST_LIMIT_DEFAULT 100
179 #define PROXY_USE_MACHINE_ACCT_DEFAULT false
180 /* These two really should be: true, and possibly not even configurable */
181 #define PROXY_MAP_GENERIC_DEFAULT true
182 #define PROXY_MAP_TRANS2_DEFAULT true
184 /* is the remote server a proxy? */
185 #define PROXY_REMOTE_SERVER(private) \
186 ((private)->tree->session->transport->negotiate.capabilities & CAP_COMPRESSION \
187 && (strcmp("A:",private->tree->device)==0))
189 /* A few forward declarations */
190 static NTSTATUS sync_chain_handler(struct smbcli_request *c_req);
191 static void async_chain_handler(struct smbcli_request *c_req);
192 static void async_read_handler(struct smbcli_request *c_req);
193 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
194 struct ntvfs_request *req, union smb_ioctl *io);
196 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
197 struct smbcli_tree *tree,
198 struct ntvfs_module_context *ntvfs,
199 const struct ndr_interface_table *table,
200 uint32_t opnum, void *r);
201 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
202 union smb_read *io, struct proxy_file *f, struct proxy_Read *r);
203 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
204 union smb_read *io, struct proxy_file *f);
205 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
206 union smb_write *io, struct proxy_file *f);
207 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
208 union smb_write *io, struct proxy_file *f);
209 static NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status);
212 a handler for oplock break events from the server - these need to be passed
213 along to the client
215 static bool oplock_handler(struct smbcli_transport *transport, uint16_t tid, uint16_t fnum, uint8_t level, void *p_private)
217 struct proxy_private *private = p_private;
218 NTSTATUS status;
219 struct ntvfs_handle *h = NULL;
220 struct proxy_file *f;
222 for (f=private->files; f; f=f->next) {
223 if (f->fnum != fnum) continue;
224 h = f->h;
225 break;
228 if (!h) {
229 DEBUG(5,("vfs_proxy: ignoring oplock break level %d for fnum %d\n", level, fnum));
230 return true;
233 /* If we don't have an oplock, then we can't rely on the cache */
234 cache_handle_stale(f);
236 DEBUG(5,("vfs_proxy: sending oplock break level %d for fnum %d\n", level, fnum));
237 status = ntvfs_send_oplock_break(private->ntvfs, h, level);
238 if (!NT_STATUS_IS_OK(status)) return false;
239 return true;
243 get file handle from clients fnum, (from ntvfs/ipc/vfs_ipc.c at metze suggestion)
245 static struct ntvfs_handle *ntvfs_find_handle(struct ntvfs_module_context *ntvfs,
246 struct ntvfs_request *req,
247 uint16_t fnum)
249 DATA_BLOB key;
250 uint16_t _fnum;
253 * the fnum is already in host byteorder
254 * but ntvfs_handle_search_by_wire_key() expects
255 * network byteorder
257 SSVAL(&_fnum, 0, fnum);
258 key = data_blob_const(&_fnum, 2);
260 return ntvfs_handle_search_by_wire_key(ntvfs, req, &key);
264 connect to a share - used when a tree_connect operation comes in.
266 static NTSTATUS proxy_connect(struct ntvfs_module_context *ntvfs,
267 struct ntvfs_request *req, const char *sharename)
269 NTSTATUS status;
270 struct proxy_private *private;
271 const char *host, *user, *pass, *domain, *remote_share;
272 struct smb_composite_connect io;
273 struct composite_context *creq;
274 struct share_config *scfg = ntvfs->ctx->config;
275 int nttrans_fnum;
277 struct cli_credentials *credentials;
278 bool machine_account;
280 /* Here we need to determine which server to connect to.
281 * For now we use parametric options, type proxy.
282 * Later we will use security=server and auth_server.c.
284 host = share_string_option(scfg, PROXY_SERVER, NULL);
285 user = share_string_option(scfg, PROXY_USER, NULL);
286 pass = share_string_option(scfg, PROXY_PASSWORD, NULL);
287 domain = share_string_option(scfg, PROXY_DOMAIN, NULL);
288 remote_share = share_string_option(scfg, PROXY_SHARE, NULL);
289 if (!remote_share) {
290 remote_share = sharename;
293 machine_account = share_bool_option(scfg, PROXY_USE_MACHINE_ACCT, PROXY_USE_MACHINE_ACCT_DEFAULT);
295 private = talloc_zero(ntvfs, struct proxy_private);
296 if (!private) {
297 return NT_STATUS_NO_MEMORY;
300 ntvfs->private_data = private;
302 if (!host) {
303 DEBUG(1,("PROXY backend: You must supply server\n"));
304 return NT_STATUS_INVALID_PARAMETER;
307 if (user && pass) {
308 DEBUG(5, ("PROXY backend: Using specified password\n"));
309 credentials = cli_credentials_init(private);
310 if (!credentials) {
311 return NT_STATUS_NO_MEMORY;
313 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
314 cli_credentials_set_username(credentials, user, CRED_SPECIFIED);
315 if (domain) {
316 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
318 cli_credentials_set_password(credentials, pass, CRED_SPECIFIED);
319 } else if (machine_account) {
320 DEBUG(5, ("PROXY backend: Using machine account\n"));
321 credentials = cli_credentials_init(private);
322 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
323 if (domain) {
324 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
326 status = cli_credentials_set_machine_account(credentials, ntvfs->ctx->lp_ctx);
327 if (!NT_STATUS_IS_OK(status)) {
328 return status;
330 } else if (req->session_info->credentials) {
331 DEBUG(5, ("PROXY backend: Using delegated credentials\n"));
332 credentials = req->session_info->credentials;
333 } else {
334 DEBUG(1,("PROXY backend: NO delegated credentials found: You must supply server, user and password or the client must supply delegated credentials\n"));
335 return NT_STATUS_INVALID_PARAMETER;
338 /* connect to the server, using the smbd event context */
339 io.in.dest_host = host;
340 io.in.dest_ports = lp_smb_ports(ntvfs->ctx->lp_ctx);
341 io.in.socket_options = lp_socket_options(ntvfs->ctx->lp_ctx);
342 io.in.called_name = host;
343 io.in.credentials = credentials;
344 io.in.fallback_to_anonymous = false;
345 io.in.workgroup = lp_workgroup(ntvfs->ctx->lp_ctx);
346 io.in.service = remote_share;
347 io.in.service_type = "?????";
348 io.in.iconv_convenience = lp_iconv_convenience(ntvfs->ctx->lp_ctx);
349 io.in.gensec_settings = lp_gensec_settings(private, ntvfs->ctx->lp_ctx);
350 lp_smbcli_options(ntvfs->ctx->lp_ctx, &io.in.options);
351 lp_smbcli_session_options(ntvfs->ctx->lp_ctx, &io.in.session_options);
353 creq = smb_composite_connect_send(&io, private,
354 lp_resolve_context(ntvfs->ctx->lp_ctx),
355 ntvfs->ctx->event_ctx);
356 status = smb_composite_connect_recv(creq, private);
357 NT_STATUS_NOT_OK_RETURN(status);
359 private->tree = io.out.tree;
361 private->transport = private->tree->session->transport;
362 SETUP_PID;
363 private->ntvfs = ntvfs;
365 ntvfs->ctx->fs_type = talloc_strdup(ntvfs->ctx, io.out.tree->fs_type);
366 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->fs_type);
367 ntvfs->ctx->dev_type = talloc_strdup(ntvfs->ctx, io.out.tree->device);
368 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->dev_type);
370 /* we need to receive oplock break requests from the server */
371 smbcli_oplock_handler(private->transport, oplock_handler, private);
373 private->map_generic = share_bool_option(scfg, PROXY_MAP_GENERIC, PROXY_MAP_GENERIC_DEFAULT);
375 private->map_trans2 = share_bool_option(scfg, PROXY_MAP_TRANS2, PROXY_MAP_TRANS2_DEFAULT);
377 private->cache_validatesize = 1024 * (long long) share_int_option(scfg, PROXY_CACHE_VALIDATE_SIZE, PROXY_CACHE_VALIDATE_SIZE_DEFAULT);
379 if (strcmp("A:",private->tree->device)==0) {
380 private->cache_enabled = share_bool_option(scfg, PROXY_CACHE_ENABLED, PROXY_CACHE_ENABLED_DEFAULT);
381 private->cache_readahead = share_int_option(scfg, PROXY_CACHE_READAHEAD, PROXY_CACHE_READAHEAD_DEFAULT);
382 private->cache_readaheadblock = share_int_option(scfg, PROXY_CACHE_READAHEAD_BLOCK,
383 MIN(private->cache_readahead,PROXY_CACHE_READAHEAD_BLOCK_DEFAULT));
384 private->fake_oplock = share_bool_option(scfg, PROXY_FAKE_OPLOCK, PROXY_FAKE_OPLOCK_DEFAULT);
385 private->fake_valid = share_bool_option(scfg, PROXY_FAKE_VALID, PROXY_FAKE_VALID_DEFAULT);
386 private->readahead_spare = share_int_option(scfg, PROXY_REQUEST_LIMIT, PROXY_REQUEST_LIMIT_DEFAULT);
387 private->cache = new_cache_context(private, lp_proxy_cache_root(ntvfs->ctx->lp_ctx), host, remote_share);
388 DEBUG(0,("proxy tree connect caching for: %s (%s : %s) %s read-ahead: %d\n",
389 remote_share, private->tree->device,private->tree->fs_type,
390 (private->cache_enabled)?"enabled":"disabled",
391 private->cache_readahead));
392 } else {
393 private->cache_enabled = false;
394 DEBUG(0,("No caching or read-ahead for: %s (%s : %s)\n",
395 remote_share, private->tree->device,private->tree->fs_type));
398 private->remote_server = strlower_talloc(private, host);
399 private->remote_share = strlower_talloc(private, remote_share);
401 /* some proxy operations will not be performed on files, so open a handle
402 now that we can use for such things. We won't bother to close it on
403 shutdown, as the remote server ought to be able to close it for us
404 and we might be shutting down because the remote server went away and
405 so we don't want to delay further */
406 nttrans_fnum=smbcli_nt_create_full(private->tree, "\\",
407 NTCREATEX_FLAGS_OPEN_DIRECTORY,
408 SEC_FILE_READ_DATA,
409 FILE_ATTRIBUTE_NORMAL,
410 NTCREATEX_SHARE_ACCESS_MASK,
411 NTCREATEX_DISP_OPEN,
412 NTCREATEX_OPTIONS_DIRECTORY,
413 NTCREATEX_IMPERSONATION_IMPERSONATION);
414 if (nttrans_fnum < 0) {
415 DEBUG(5,("Could not open handle for ntioctl %d\n",private->nttrans_fnum));
416 return NT_STATUS_UNSUCCESSFUL;
418 private->nttrans_fnum=nttrans_fnum;
419 DEBUG(5,("Got nttrans handle %d\n",private->nttrans_fnum));
421 return NT_STATUS_OK;
425 disconnect from a share
427 static NTSTATUS proxy_disconnect(struct ntvfs_module_context *ntvfs)
429 struct proxy_private *private = ntvfs->private_data;
430 struct async_info *a, *an;
432 /* first cleanup pending requests */
433 for (a=private->pending; a; a = an) {
434 an = a->next;
435 smbcli_request_destroy(a->c_req);
436 talloc_free(a);
439 talloc_free(private);
440 ntvfs->private_data = NULL;
442 return NT_STATUS_OK;
446 destroy an async info structure
448 static int async_info_destructor(struct async_info *async)
450 DLIST_REMOVE(async->proxy->pending, async);
451 return 0;
455 a handler for simple async replies
456 this handler can only be used for functions that don't return any
457 parameters (those that just return a status code)
459 static void async_simple(struct smbcli_request *c_req)
461 struct async_info *async = c_req->async.private;
462 struct ntvfs_request *req = async->req;
463 req->async_states->status = smbcli_request_simple_recv(c_req);
464 talloc_free(async);
465 req->async_states->send_fn(req);
468 /* hopefully this will optimize away */
469 #define TYPE_CHECK(type,check) do { \
470 type=check; \
471 t=t; \
472 } while (0)
474 /* save some typing for the simple functions */
475 #define ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, file, achain, error) do { \
476 if (!c_req) return (error); \
477 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
479 struct async_info *async; \
480 async = talloc(req, struct async_info); \
481 if (!async) return (error); \
482 async->parms = io; \
483 async->req = req; \
484 async->f = file; \
485 async->proxy = private; \
486 async->c_req = c_req; \
487 async->chain = achain; \
488 DLIST_ADD(private->pending, async); \
489 c_req->async.private = async; \
490 talloc_set_destructor(async, async_info_destructor); \
492 c_req->async.fn = async_fn; \
493 } while (0)
495 #define ASYNC_RECV_TAIL_F(io, async_fn, file) do { \
496 if (!c_req) return NT_STATUS_UNSUCCESSFUL; \
497 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
499 struct async_info *async; \
500 async = talloc(req, struct async_info); \
501 if (!async) return NT_STATUS_NO_MEMORY; \
502 async->parms = io; \
503 async->req = req; \
504 async->f = file; \
505 async->proxy = private; \
506 async->c_req = c_req; \
507 DLIST_ADD(private->pending, async); \
508 c_req->async.private = async; \
509 talloc_set_destructor(async, async_info_destructor); \
511 c_req->async.fn = async_fn; \
512 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
513 return NT_STATUS_OK; \
514 } while (0)
516 #define ASYNC_RECV_TAIL(io, async_fn) ASYNC_RECV_TAIL_F(io, async_fn, NULL)
518 #define SIMPLE_ASYNC_TAIL ASYNC_RECV_TAIL(NULL, async_simple)
520 /* managers for chained async-callback.
521 The model of async handlers has changed.
522 backend async functions should be of the form:
523 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
524 And if async->c_req is NULL then an earlier chain has already rec'd the
525 request.
526 ADD_ASYNC_RECV_TAIL is used to add chained handlers.
527 The chained handler manager async_chain_handler is installed the usual way
528 and uses the io pointer to point to the first async_map record
529 static void async_chain_handler(struct smbcli_request *c_req).
530 It is safe to call ADD_ASYNC_RECV_TAIL before the chain manager is installed
531 and often desirable.
533 /* async_chain_handler has an async_info struct so that it can be safely inserted
534 into pending, but the io struct will point to (struct async_info_map *)
535 chained async_info_map will be in c_req->async.private */
536 #define ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_fn) do { \
537 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
538 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
539 } while(0)
541 #define ASYNC_RECV_TAIL_HANDLER(io, async_fn) do { \
542 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
543 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
544 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
545 return NT_STATUS_OK; \
546 } while(0)
549 DEBUG(0,("ADD_ASYNC_RECV_TAIL %s %s:%d\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%s\n", __FUNCTION__,__FILE__,__LINE__, \
550 creq, creq?talloc_get_name(creq):NULL, creq?talloc_get_name(creq):NULL,\
551 io1, io1?talloc_get_name(io1):NULL, io1?talloc_get_name(io1):NULL, \
552 io2, io2?talloc_get_name(io2):NULL, io2?talloc_get_name(io2):NULL, \
553 file, file?"file":"null", file?"file":"null", #async_fn)); \
555 #define ADD_ASYNC_RECV_TAIL(creq, io1, io2, file, async_fn, error) do { \
556 if (! creq) return (error); \
558 struct async_info_map *async_map=talloc(NULL, struct async_info_map); \
559 if (! async_map) return (error); \
560 async_map->async=talloc(async_map, struct async_info); \
561 if (! async_map->async) return (error); \
562 async_map->parms1=io1; \
563 async_map->parms2=io2; \
564 async_map->fn=async_fn; \
565 async_map->async->parms = io1; \
566 async_map->async->req = req; \
567 async_map->async->f = file; \
568 async_map->async->proxy = private; \
569 async_map->async->c_req = creq; \
570 /* If async_chain_handler is installed, get the list from param */ \
571 if (creq->async.fn == async_chain_handler || creq->async.fn == async_read_handler) { \
572 struct async_info *i=creq->async.private; \
573 DLIST_ADD_END(i->chain, async_map, struct async_info_map *); \
574 } else if (creq->async.fn) { \
575 /* incompatible handler installed */ \
576 return (error); \
577 } else { \
578 DLIST_ADD_END(creq->async.private, async_map, struct async_info_map *); \
581 } while(0)
583 /* try and unify cache open function interface with this macro */
584 #define cache_open(cache_context, f, io, oplock, readahead_window) \
585 (io->generic.level == RAW_OPEN_NTCREATEX && \
586 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID)\
587 ?(cache_fileid_open(cache_context, f, (const uint64_t*)(io->generic.in.fname), oplock, readahead_window))\
588 :(cache_filename_open(cache_context, f, SMB_OPEN_IN_FILE(io), oplock, readahead_window))
591 delete a file - the dirtype specifies the file types to include in the search.
592 The name can contain PROXY wildcards, but rarely does (except with OS/2 clients)
594 static NTSTATUS proxy_unlink(struct ntvfs_module_context *ntvfs,
595 struct ntvfs_request *req, union smb_unlink *unl)
597 struct proxy_private *private = ntvfs->private_data;
598 struct smbcli_request *c_req;
600 SETUP_PID;
602 /* see if the front end will allow us to perform this
603 function asynchronously. */
604 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
605 return smb_raw_unlink(private->tree, unl);
608 c_req = smb_raw_unlink_send(private->tree, unl);
610 SIMPLE_ASYNC_TAIL;
614 a handler for async ioctl replies
616 static void async_ioctl(struct smbcli_request *c_req)
618 struct async_info *async = c_req->async.private;
619 struct ntvfs_request *req = async->req;
620 req->async_states->status = smb_raw_ioctl_recv(c_req, req, async->parms);
621 talloc_free(async);
622 req->async_states->send_fn(req);
626 ioctl interface
628 static NTSTATUS proxy_ioctl(struct ntvfs_module_context *ntvfs,
629 struct ntvfs_request *req, union smb_ioctl *io)
631 struct proxy_private *private = ntvfs->private_data;
632 struct smbcli_request *c_req;
634 if (io->ntioctl.level == RAW_IOCTL_NTIOCTL
635 && io->ntioctl.in.function == FSCTL_UFOPROXY_RPCLITE) {
636 return proxy_rpclite(ntvfs, req, io);
639 SETUP_PID_AND_FILE;
641 /* see if the front end will allow us to perform this
642 function asynchronously. */
643 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
644 return smb_raw_ioctl(private->tree, req, io);
647 c_req = smb_raw_ioctl_send(private->tree, io);
649 ASYNC_RECV_TAIL(io, async_ioctl);
653 check if a directory exists
655 static NTSTATUS proxy_chkpath(struct ntvfs_module_context *ntvfs,
656 struct ntvfs_request *req, union smb_chkpath *cp)
658 struct proxy_private *private = ntvfs->private_data;
659 struct smbcli_request *c_req;
661 SETUP_PID;
663 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
664 return smb_raw_chkpath(private->tree, cp);
667 c_req = smb_raw_chkpath_send(private->tree, cp);
669 SIMPLE_ASYNC_TAIL;
673 a handler for async qpathinfo replies
675 static void async_qpathinfo(struct smbcli_request *c_req)
677 struct async_info *async = c_req->async.private;
678 struct ntvfs_request *req = async->req;
679 req->async_states->status = smb_raw_pathinfo_recv(c_req, req, async->parms);
680 talloc_free(async);
681 req->async_states->send_fn(req);
685 return info on a pathname
687 static NTSTATUS proxy_qpathinfo(struct ntvfs_module_context *ntvfs,
688 struct ntvfs_request *req, union smb_fileinfo *info)
690 struct proxy_private *private = ntvfs->private_data;
691 struct smbcli_request *c_req;
693 SETUP_PID;
695 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
696 return smb_raw_pathinfo(private->tree, req, info);
699 c_req = smb_raw_pathinfo_send(private->tree, info);
701 ASYNC_RECV_TAIL(info, async_qpathinfo);
705 a handler for async qfileinfo replies
707 static void async_qfileinfo(struct smbcli_request *c_req)
709 struct async_info *async = c_req->async.private;
710 struct ntvfs_request *req = async->req;
711 req->async_states->status = smb_raw_fileinfo_recv(c_req, req, async->parms);
712 talloc_free(async);
713 req->async_states->send_fn(req);
717 query info on a open file
719 static NTSTATUS proxy_qfileinfo(struct ntvfs_module_context *ntvfs,
720 struct ntvfs_request *req, union smb_fileinfo *io)
722 struct proxy_private *private = ntvfs->private_data;
723 struct smbcli_request *c_req;
725 SETUP_PID_AND_FILE;
727 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
728 return smb_raw_fileinfo(private->tree, req, io);
731 c_req = smb_raw_fileinfo_send(private->tree, io);
733 ASYNC_RECV_TAIL(io, async_qfileinfo);
737 set info on a pathname
739 static NTSTATUS proxy_setpathinfo(struct ntvfs_module_context *ntvfs,
740 struct ntvfs_request *req, union smb_setfileinfo *st)
742 struct proxy_private *private = ntvfs->private_data;
743 struct smbcli_request *c_req;
745 SETUP_PID;
747 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
748 return smb_raw_setpathinfo(private->tree, st);
751 c_req = smb_raw_setpathinfo_send(private->tree, st);
753 SIMPLE_ASYNC_TAIL;
758 a handler for async open replies
760 static void async_open(struct smbcli_request *c_req)
762 struct async_info *async = c_req->async.private;
763 struct proxy_private *proxy = async->proxy;
764 struct ntvfs_request *req = async->req;
765 struct proxy_file *f = async->f;
766 union smb_open *io = async->parms;
767 union smb_handle *file;
769 talloc_free(async);
770 req->async_states->status = smb_raw_open_recv(c_req, req, io);
771 SMB_OPEN_OUT_FILE(io, file);
772 f->fnum = file->fnum;
773 file->ntvfs = NULL;
774 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
775 req->async_states->status = ntvfs_handle_set_backend_data(f->h, proxy->ntvfs, f);
776 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
777 file->ntvfs = f->h;
778 DLIST_ADD(proxy->files, f);
780 if (proxy->cache_enabled) {
781 bool oplock=(io->generic.out.oplock_level != OPLOCK_NONE) || proxy->fake_oplock;
782 f->cache=cache_open(proxy->cache, f, io, oplock, proxy->cache_readahead);
783 if (proxy->fake_valid) {
784 cache_handle_validated(f, cache_handle_len(f));
786 if (! PROXY_REMOTE_SERVER(proxy)) cache_handle_novalidate(f);
789 failed:
790 req->async_states->send_fn(req);
794 open a file
796 static NTSTATUS proxy_open(struct ntvfs_module_context *ntvfs,
797 struct ntvfs_request *req, union smb_open *io)
799 struct proxy_private *private = ntvfs->private_data;
800 struct smbcli_request *c_req;
801 struct ntvfs_handle *h;
802 struct proxy_file *f;
803 NTSTATUS status;
805 SETUP_PID;
807 if (io->generic.level != RAW_OPEN_GENERIC &&
808 private->map_generic) {
809 return ntvfs_map_open(ntvfs, req, io);
812 status = ntvfs_handle_new(ntvfs, req, &h);
813 NT_STATUS_NOT_OK_RETURN(status);
815 f = talloc_zero(h, struct proxy_file);
816 NT_STATUS_HAVE_NO_MEMORY(f);
817 f->h = h;
819 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
820 union smb_handle *file;
822 status = smb_raw_open(private->tree, req, io);
823 NT_STATUS_NOT_OK_RETURN(status);
825 SMB_OPEN_OUT_FILE(io, file);
826 f->fnum = file->fnum;
827 file->ntvfs = NULL;
828 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
829 NT_STATUS_NOT_OK_RETURN(status);
830 file->ntvfs = f->h;
831 DLIST_ADD(private->files, f);
833 if (private->cache_enabled) {
834 bool oplock=(io->generic.out.oplock_level != OPLOCK_NONE) || private->fake_oplock;
836 f->cache=cache_open(private->cache, f, io, oplock, private->cache_readahead);
837 if (private->fake_valid) {
838 cache_handle_validated(f, cache_handle_len(f));
840 if (! PROXY_REMOTE_SERVER(private)) cache_handle_novalidate(f);
843 return NT_STATUS_OK;
846 c_req = smb_raw_open_send(private->tree, io);
848 ASYNC_RECV_TAIL_F(io, async_open, f);
852 create a directory
854 static NTSTATUS proxy_mkdir(struct ntvfs_module_context *ntvfs,
855 struct ntvfs_request *req, union smb_mkdir *md)
857 struct proxy_private *private = ntvfs->private_data;
858 struct smbcli_request *c_req;
860 SETUP_PID;
862 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
863 return smb_raw_mkdir(private->tree, md);
866 c_req = smb_raw_mkdir_send(private->tree, md);
868 SIMPLE_ASYNC_TAIL;
872 remove a directory
874 static NTSTATUS proxy_rmdir(struct ntvfs_module_context *ntvfs,
875 struct ntvfs_request *req, struct smb_rmdir *rd)
877 struct proxy_private *private = ntvfs->private_data;
878 struct smbcli_request *c_req;
880 SETUP_PID;
882 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
883 return smb_raw_rmdir(private->tree, rd);
885 c_req = smb_raw_rmdir_send(private->tree, rd);
887 SIMPLE_ASYNC_TAIL;
891 rename a set of files
893 static NTSTATUS proxy_rename(struct ntvfs_module_context *ntvfs,
894 struct ntvfs_request *req, union smb_rename *ren)
896 struct proxy_private *private = ntvfs->private_data;
897 struct smbcli_request *c_req;
899 SETUP_PID;
901 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
902 return smb_raw_rename(private->tree, ren);
905 c_req = smb_raw_rename_send(private->tree, ren);
907 SIMPLE_ASYNC_TAIL;
911 copy a set of files
913 static NTSTATUS proxy_copy(struct ntvfs_module_context *ntvfs,
914 struct ntvfs_request *req, struct smb_copy *cp)
916 return NT_STATUS_NOT_SUPPORTED;
919 /* we only define this seperately so we can easily spot read calls in
920 pending based on ( c_req->private.fn == async_read_handler ) */
921 static void async_read_handler(struct smbcli_request *c_req)
923 async_chain_handler(c_req);
926 NTSTATUS async_readahead_dec(struct async_info *async, void* io1, void* io2, NTSTATUS status)
928 struct proxy_private *private = async->proxy;
929 struct smbcli_request *c_req = async->c_req;
930 struct proxy_file *f = async->f;
931 union smb_read *io = async->parms;
933 /* if request is not already received by a chained handler, read it */
934 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
936 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
937 f->readahead_pending, private->readahead_spare));
939 f->readahead_pending--;
940 private->readahead_spare++;
942 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
943 f->readahead_pending, private->readahead_spare));
945 return status;
949 a handler for async read replies - speculative read-aheads.
950 It merely saves in the cache. The async chain handler will call send_fn if
951 there is one, or if sync_chain_handler is used the send_fn is called by
952 the ntvfs back end.
954 NTSTATUS async_read_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
956 struct smbcli_request *c_req = async->c_req;
957 struct proxy_file *f = async->f;
958 union smb_read *io = async->parms;
960 /* if request is not already received by a chained handler, read it */
961 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
963 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
964 get_friendly_nt_error_msg(status)));
966 NT_STATUS_NOT_OK_RETURN(status);
968 /* if it was a validate read we don't to save anything unless it failed.
969 Until we use Proxy_read structs we can't tell, so guess */
970 if (io->generic.out.nread == io->generic.in.maxcnt &&
971 io->generic.in.mincnt < io->generic.in.maxcnt) {
972 /* looks like a validate read, just move the validate pointer, the
973 original read-request has already been satisfied from cache */
974 DEBUG(3,("%s megavalidate suceeded, validate to %lld\n",__FUNCTION__,
975 io->generic.in.offset + io->generic.out.nread));
976 cache_handle_validated(f, io->generic.in.offset + io->generic.out.nread);
977 } else {
978 DEBUG(5,("Not a mega-validate, save %d in cache\n",io->generic.out.nread));
979 cache_handle_save(f, io->generic.out.data,
980 io->generic.out.nread,
981 io->generic.in.offset);
984 DEBUG(3,("%s finished %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
985 return status;
988 /* handler for fragmented reads */
989 NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
991 struct smbcli_request *c_req = async->c_req;
992 struct ntvfs_request *req = async->req;
993 struct proxy_file *f = async->f;
994 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
995 /* this is the io against which the fragment is to be applied */
996 union smb_read *io = talloc_get_type_abort(io1, union smb_read);
997 /* this is the io for the read that issued the callback */
998 union smb_read *io_frag = fragment->io_frag; /* async->parms; */
999 struct async_read_fragments* fragments=fragment->fragments;
1001 /* if request is not already received by a chained handler, read it */
1002 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
1003 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
1005 DEBUG(3,("\n\n%s async_read status: %s\n",__FUNCTION__,
1006 get_friendly_nt_error_msg(status)));
1008 fragment->status = status;
1010 /* remove fragment from fragments */
1011 DLIST_REMOVE(fragments->fragments, fragment);
1013 #warning maybe read requests beyond the short read won't return NT_STATUS_OK with nread=0
1014 /* in which case if we will want to collate all responses and return a valid read
1015 for the leading NT_STATUS_OK fragments */
1017 /* did this one fail, inducing a general fragments failure? */
1018 if (!NT_STATUS_IS_OK(fragment->status)) {
1019 /* preserve the status of the fragment with the smallest offset
1020 when we can work out how */
1021 if (NT_STATUS_IS_OK(fragments->status)) {
1022 fragments->status=fragment->status;
1025 cache_handle_novalidate(f);
1026 DEBUG(5,("** Devalidated proxy due to read failure\n"));
1027 } else {
1028 /* No fragments have yet failed, keep collecting responses */
1029 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
1030 /* Find memcpy window, copy data from the io_frag to the io */
1031 off_t start_offset=MAX(io_frag->generic.in.offset, io->generic.in.offset);
1032 /* used to use mincnt */
1033 off_t io_extent=io->generic.in.offset + io->generic.in.maxcnt;
1034 off_t end_offset=MIN(io_extent, extent);
1035 /* ASSERT(start_offset <= end_offset) */
1036 /* ASSERT(start_offset <= io_extent) */
1037 if (start_offset >= io_extent) {
1038 DEBUG(3,("useless read-ahead tagged on to: %s",__location__));
1039 } else {
1040 uint8_t* dst=io->generic.out.data+(start_offset - io->generic.in.offset);
1041 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
1042 /* src == dst in cases where we did not latch onto someone elses
1043 read, but are handling our own */
1044 if (src != dst)
1045 memcpy(dst, src, end_offset - start_offset);
1048 /* There should be a better way to detect, but it needs the proxy rpc struct
1049 not ths smb_read struct */
1050 if (io_frag->generic.out.nread < io_frag->generic.in.maxcnt) {
1051 DEBUG(5,("\n** Devalidated proxy due to small read: %lld min=%lld, max=%lld\n",
1052 (long long) io_frag->generic.out.nread,
1053 (long long) io_frag->generic.in.mincnt,
1054 (long long) io_frag->generic.in.maxcnt));
1055 cache_handle_novalidate(f);
1058 /* We broke up the original read. If not enough of this sub-read has
1059 been read, and then some of then next block, it could leave holes!
1060 We will only acknowledge up to the first partial read, and treat
1061 it as a small read. If server can return NT_STATUS_OK for a partial
1062 read so can we, so we preserve the response.
1063 "enough" is all of it (maxcnt), except on the last block, when it has to
1064 be enough to fill io->generic.in.mincnt. We know it is the last block
1065 if nread is small but we could fill io->generic.in.mincnt */
1066 if (io_frag->generic.out.nread < io_frag->generic.in.mincnt &&
1067 end_offset < io->generic.in.offset + io->generic.in.mincnt) {
1068 DEBUG(4,("Fragmented read only partially successful\n"));
1070 /* Shrink the master nread (or grow to this size if we are first partial */
1071 if (! fragments->partial ||
1072 (io->generic.in.offset + io->generic.out.nread) > extent) {
1073 io->generic.out.nread = extent - io->generic.in.offset;
1076 /* stop any further successes from extending the partial read */
1077 fragments->partial=true;
1078 } else {
1079 /* only grow the master nwritten if we haven't logged a partial write */
1080 if (! fragments->partial &&
1081 (io->generic.in.offset + io->generic.out.nread) < extent ) {
1082 io->generic.out.nread = MIN(io->generic.in.maxcnt, extent - io->generic.in.offset);
1087 /* Was it the last fragment, or do we know enought to send a response? */
1088 if (! fragments->fragments) {
1089 DEBUG(5,("Async read re-fragmented with %d of %d %s\n",
1090 io->generic.out.nread, io->generic.in.mincnt,
1091 get_friendly_nt_error_msg(fragments->status)));
1092 if (fragments->async) {
1093 req->async_states->status=fragments->status;
1094 DEBUG(5,("Fragments async response sending\n"));
1095 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
1096 /* esp. as they may be attached to by other reads. Maybe attachees should be taking reference, but how will they
1097 know the top level they need to take reference too.. */
1098 #warning should really queue a sender here, not call it */
1099 req->async_states->send_fn(req);
1100 DEBUG(5,("Async response sent\n"));
1101 } else {
1102 DEBUG(5,("Fragments SYNC return\n"));
1106 /* because a c_req may be shared by many req, chained handlers must return
1107 a status pertaining to the general validity of this specific c_req, not
1108 to their own private processing of the c_req for the benefit of their req
1109 which is returned in fragments->status
1111 return status;
1114 /* Issue read-ahead X bytes where X is the window size calculation based on
1115 server_latency * server_session_bandwidth
1116 where latency is the idle (link) latency and bandwidth is less than or equal_to
1117 to actual bandwidth available to the server.
1118 Read-ahead should honour locked areas in whatever way is neccessary (who knows?)
1119 read_ahead is defined here and not in the cache engine because it requires too
1120 much knowledge of private structures
1122 /* The concept is buggy unless we can tell the next proxy that these are
1123 read-aheads, otherwise chained proxy setups will each read-ahead of the
1124 read-ahead which can put a larger load on the final server.
1125 Also we probably need to distinguish between
1126 * cache-less read-ahead
1127 * cache-revalidating read-ahead
1129 NTSTATUS read_ahead(struct proxy_file *f, struct ntvfs_module_context *ntvfs,
1130 union smb_read *io, ssize_t as_read)
1132 struct proxy_private *private = ntvfs->private_data;
1133 struct smbcli_tree *tree = private->tree;
1134 struct cache_file_entry *cache;
1135 off_t next_position; /* this read offset+length+window */
1136 off_t end_position; /* position we read-ahead to */
1137 off_t cache_populated;
1138 off_t read_position, new_extent;
1140 if (! PROXY_REMOTE_SERVER(private)) return NT_STATUS_UNSUCCESSFUL;
1141 DEBUG(5,("A\n"));
1142 if (private->cache_readahead==0 || ! private->cache_enabled || ! f->cache) return NT_STATUS_UNSUCCESSFUL;
1143 DEBUG(5,("B\n"));
1144 cache=talloc_get_type_abort(f->cache, struct cache_file_entry);
1145 DEBUG(5,("C\n"));
1146 /* don't read-ahead if we are in bulk validate mode */
1147 if (cache->status & CACHE_VALIDATE) return NT_STATUS_UNSUCCESSFUL;
1148 DEBUG(5,("D\n"));
1149 /* if we can't trust what we read-ahead anyway then don't bother although
1150 * if delta-reads are enabled we can do so in order to get something to
1151 * delta against */
1152 DEBUG(CACHE_DEBUG_LEVEL,("DOING Asking read-aheads: len %lld ra-extend %lld as-read %lld RA %d (%d)\n",
1153 (long long int)(cache_len(cache)),
1154 (long long int)(cache->readahead_extent),
1155 (long long int)(as_read),
1156 cache->readahead_window,private->cache_readahead));
1157 if (private->cache_readahead ==0 || ! (cache->status & CACHE_READ_AHEAD) ) {
1158 DEBUG(CACHE_DEBUG_LEVEL,("FAILED Asking read-aheads: Can't read-ahead as no read-ahead on this file: %x\n",
1159 cache->status));
1160 return NT_STATUS_UNSUCCESSFUL;
1163 /* as_read is the mincnt bytes of a request being made or the
1164 out.nread of completed sync requests
1165 Here we presume that as_read bytes WILL be read. If there is a cache-ahead like ours,
1166 then this may often NOT be the case if readahead_window < requestsize; so we will
1167 get a small read, leaving a hole in the cache, and as we don't yet handle sparse caches,
1168 all future read-ahead will be wasted, so we need to adjust the read-ahead handler to handle
1169 this and have failed sparse writes adjust the cache->readahead_extent back to actual size */
1171 /* predict the file pointers next position */
1172 next_position=io->generic.in.offset + as_read;
1173 DEBUG(5,("Next position: %lld (%lld + %lld)\n",
1174 (long long int)next_position,
1175 (long long int)io->generic.in.offset,
1176 (long long int)as_read));
1177 /* calculate the limit of the validated or requested cache */
1178 cache_populated=MAX(cache->validated_extent, cache->readahead_extent);
1180 /* will the new read take us beyond the current extent without gaps? */
1181 if (cache_populated < io->generic.in.offset) {
1182 /* this read-ahead is a read-behind-pointer */
1183 new_extent=cache_populated;
1184 } else {
1185 new_extent=MAX(next_position, cache_populated);
1188 /* as far as we can tell new_extent is the smallest offset that doesn't
1189 have a pending read request on. Of course if we got a short read then
1190 we will have a cache-gap which we can't handle and need to read from
1191 a shrunk readahead_extent, which we don't currently handle */
1192 read_position=new_extent;
1194 /* of course if we know how big the remote file is we should limit at that */
1195 /* we should also mark-out which read-ahead requests are pending so that we
1196 * don't repeat them while they are in-transit. */
1197 /* we can't really use next_position until we can have caches with holes
1198 UNLESS next_position < new_extent, because a next_position well before
1199 new_extent is no reason to extend it further, we only want to extended
1200 with read-aheads if we have cause to suppose the read-ahead data will
1201 be wanted, i.e. the next_position is near new_extent.
1202 So we can't justify reading beyond window+next_position, but if
1203 next_position is leaving gaps, we use new_extent instead */
1204 end_position=MIN(new_extent, next_position) + cache->readahead_window;
1205 DEBUG(5,("** Read-ahead loop %lld < %lld window=%d, end=%lld, quota: %d\n",
1206 (long long int)read_position,
1207 (long long int)(next_position + cache->readahead_window),
1208 cache->readahead_window,
1209 (long long int)end_position,
1210 private->readahead_spare));
1211 /* do we even need to read? */
1212 if (! (read_position < end_position)) return NT_STATUS_OK;
1214 /* readahead_spare is for the whole session (mid/tid?) and may need sharing
1215 out over files and other tree-connects or something */
1216 while (read_position < end_position &&
1217 private->readahead_spare > 0) {
1218 struct smbcli_request *c_req = NULL;
1219 ssize_t read_remaining = end_position - read_position;
1220 ssize_t read_block = MIN(private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32),
1221 MIN(read_remaining, private->cache_readaheadblock));
1222 void *req = NULL; /* for the ASYNC_REC_TAIL_F_ORPHAN macro */
1223 uint8_t* data;
1224 union smb_read *io_copy=talloc_memdup_type(NULL, io, union smb_read);
1226 if (! io_copy)
1227 return NT_STATUS_NO_MEMORY;
1229 #warning we are ignoring read_for_execute as far as the cache goes
1230 io_copy->generic.in.read_for_execute=io->readx.in.read_for_execute;
1231 io_copy->generic.in.offset=read_position;
1232 io_copy->generic.in.mincnt=read_block;
1233 io_copy->generic.in.maxcnt=read_block;
1234 /* what is generic.in.remaining for? */
1235 io_copy->generic.in.remaining = MIN(65535,read_remaining);
1236 io_copy->generic.out.nread=0;
1238 #warning someone must own io_copy, tree, maybe?
1239 data=talloc_zero_size(io_copy, io_copy->generic.in.maxcnt);
1240 DEBUG(5,("Talloc read-ahead buffer %p size %d\n",data, io_copy->generic.in.maxcnt));
1241 if (! data) {
1242 talloc_free(io_copy);
1243 return NT_STATUS_NO_MEMORY;
1245 io_copy->generic.out.data=data;
1247 /* are we able to pull anything from the cache to validate this read-ahead?
1248 NOTE: there is no point in reading ahead merely to re-validate the
1249 cache if we don't have oplocks and can't save it....
1250 ... or maybe there is if we think a read will come that can be matched
1251 up to this reponse while it is still on the wire */
1252 #warning so we need to distinguish between pipe-line read-ahead and revalidation
1253 if (/*(cache->status & CACHE_READ)!=0 && */
1254 cache_len(cache) >
1255 (io_copy->generic.in.offset + io_copy->generic.in.mincnt) &&
1256 cache->validated_extent <
1257 (io_copy->generic.in.offset + io_copy->generic.in.maxcnt)) {
1258 ssize_t pre_fill;
1260 pre_fill = cache_raw_read(cache, data,
1261 io_copy->generic.in.offset,
1262 io_copy->generic.in.maxcnt);
1263 DEBUG(5,("Data read into %p %d\n",data, pre_fill));
1264 if (pre_fill > 0 && pre_fill >= io_copy->generic.in.mincnt) {
1265 io_copy->generic.out.nread=pre_fill;
1266 read_block=pre_fill;
1270 c_req = proxy_smb_raw_read_send(ntvfs, io_copy, f, NULL);
1272 if (c_req) {
1273 private->readahead_spare--;
1274 f->readahead_pending++;
1275 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead level %d request %p offset=%d size=%d\n",io_copy->generic.level,c_req,(int)read_position,(int)read_block));
1276 if (cache->readahead_extent < read_position+read_block)
1277 cache->readahead_extent=read_position+read_block;
1278 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
1279 /* so we can decrease read-ahead counter for this session */
1280 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_readahead_dec, NT_STATUS_INTERNAL_ERROR);
1281 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io_copy, async_read_handler);
1283 /* Make these be owned by the async struct so they are freed when the callback ends or is cancelled */
1284 talloc_steal(c_req->async.private, c_req);
1285 talloc_steal(c_req->async.private, io_copy);
1286 read_position+=read_block;
1287 } else {
1288 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead request FAILED offset=%d size=%d\n",(int)read_position,(int)read_block));
1289 talloc_free(io_copy);
1290 break;
1294 DEBUG(CACHE_DEBUG_LEVEL,("DONE: Asking read-aheads\n"));
1295 return NT_STATUS_OK;
1298 struct proxy_validate_parts_parts {
1299 struct proxy_Read* r;
1300 struct ntvfs_request *req;
1301 struct proxy_file *f;
1302 struct async_read_fragments *fragments;
1303 off_t offset;
1304 ssize_t remaining;
1305 bool complete;
1306 declare_checksum(digest);
1307 struct MD5Context context;
1310 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts);
1311 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status);
1312 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
1313 struct proxy_validate_parts_parts *parts);
1315 /* this will be the new struct proxy_Read based read function, for now
1316 it just deals with non-cached based validate to a regular server */
1317 static NTSTATUS proxy_validate(struct ntvfs_module_context *ntvfs,
1318 struct ntvfs_request *req,
1319 struct proxy_Read *r,
1320 struct proxy_file *f)
1322 struct proxy_private *private = ntvfs->private_data;
1323 struct proxy_validate_parts_parts *parts;
1324 struct async_read_fragments *fragments;
1325 NTSTATUS status;
1327 if (!f) return NT_STATUS_INVALID_HANDLE;
1329 DEBUG(5,("%s: fnum=%d\n",__FUNCTION__,f->fnum));
1331 parts = talloc_zero(req, struct proxy_validate_parts_parts);
1332 DEBUG(5,("%s: parts=%p\n",__FUNCTION__,parts));
1333 NT_STATUS_HAVE_NO_MEMORY(parts);
1335 fragments = talloc_zero(parts, struct async_read_fragments);
1336 NT_STATUS_HAVE_NO_MEMORY(fragments);
1338 parts->fragments=fragments;
1340 parts->r=r;
1341 parts->f=f;
1342 parts->req=req;
1343 /* processed offset */
1344 parts->offset=r->in.offset;
1345 parts->remaining=r->in.maxcnt;
1346 fragments->async=true;
1348 MD5Init (&parts->context);
1350 /* start a read-loop which will continue in the callback until it is
1351 all done */
1352 status=proxy_validate_parts(ntvfs, parts);
1353 if (parts->complete) {
1354 /* Make sure we are not async */
1355 DEBUG(5,("%s: completed EARLY\n",__FUNCTION__));
1356 return proxy_validate_complete(parts);
1359 /* Assert if status!=NT_STATUS_OK then parts->complete==true */
1360 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1361 DEBUG(5,("%s: returning ASYNC\n",__FUNCTION__));
1362 return status;
1365 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts)
1367 NTSTATUS status;
1368 struct proxy_Read* r=parts->r;
1369 MD5Final(parts->digest, &parts->context);
1371 status = parts->fragments->status;
1372 r->out.result = status;
1373 r->out.response.generic.count=r->out.nread;
1375 DEBUG(5,("%s: %s nread=%d\n",__FUNCTION__, get_friendly_nt_error_msg (status),
1376 r->out.response.generic.count));
1378 DEBUG(5,("Anticipated validated digest for size: %lld\n", (long long) r->in.maxcnt));
1379 dump_data (5, r->in.digest.digest, sizeof(parts->digest));
1380 DEBUG(5,("read digest for size %lld\n",(long long) parts->offset));
1381 dump_data (5, parts->digest, sizeof(parts->digest));
1383 if (NT_STATUS_IS_OK(status) &&
1384 (memcmp(parts->digest, r->in.digest.digest, sizeof(parts->digest))==0)) {
1385 r->out.flags = PROXY_USE_CACHE | PROXY_VALIDATE;
1386 DEBUG(5,("======= VALIDATED FINE \n\n\n"));
1387 } else if (r->in.flags & PROXY_USE_ZLIB) {
1388 ssize_t size = r->out.response.generic.count;
1389 DEBUG(5,("======= VALIDATED WRONG \n\n\n"));
1390 if (compress_block(r->out.response.generic.data, &size) ) {
1391 r->out.flags|=PROXY_USE_ZLIB;
1392 r->out.response.compress.count=size;
1393 r->out.response.compress.data=r->out.response.generic.data;
1394 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
1395 __FUNCTION__,r->out.nread,size,size*100/r->out.nread));
1399 /* assert: this must only be true if we are in a callback */
1400 if (parts->req->async_states->state & NTVFS_ASYNC_STATE_ASYNC) {
1401 /* we are async complete, we need to call the sendfn */
1402 parts->req->async_states->status=status;
1403 DEBUG(5,("Fragments async response sending\n"));
1405 parts->req->async_states->send_fn(parts->req);
1406 return NT_STATUS_OK;
1408 return status;
1411 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1413 struct smbcli_request *c_req = async->c_req;
1414 struct ntvfs_request *req = async->req;
1415 struct proxy_file *f = async->f;
1416 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
1417 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
1418 /* this is the io against which the fragment is to be applied */
1419 struct proxy_validate_parts_parts *parts = talloc_get_type_abort(io1, struct proxy_validate_parts_parts);
1420 struct proxy_Read* r=parts->r;
1421 /* this is the io for the read that issued the callback */
1422 union smb_read *io_frag = fragment->io_frag;
1423 struct async_read_fragments* fragments=fragment->fragments;
1425 DEBUG(5,("%s: parts=%p c_req=%p io_frag=%p\n",__FUNCTION__,parts, c_req, io_frag));
1426 /* if request is not already received by a chained handler, read it */
1427 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
1428 DEBUG(5,("%s: status %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
1430 fragment->status=status;
1432 if (NT_STATUS_IS_OK(status)) {
1433 /* TODO: If we are not sequentially "next" the queue until we can do it */
1434 /* log this data in r->out.generic.data */
1435 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
1436 /* Find memcpy window, copy data from the io_frag to the io */
1437 off_t start_offset=MAX(io_frag->generic.in.offset, r->in.offset);
1438 /* Don't want to go past mincnt */
1439 off_t io_extent=r->in.offset + r->in.mincnt;
1440 off_t end_offset=MIN(io_extent, extent);
1442 /* ASSERT(start_offset <= end_offset) */
1443 /* ASSERT(start_offset <= io_extent) */
1444 if (! (start_offset >= io_extent)) {
1445 uint8_t* dst=r->out.response.generic.data + (start_offset - r->in.offset);
1446 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
1447 /* src == dst in cases where we did not latch onto someone elses
1448 read, but are handling our own */
1449 if (src != dst)
1450 memcpy(dst, src, end_offset - start_offset);
1451 r->out.nread=end_offset - r->in.offset;
1454 MD5Update(&parts->context, io_frag->generic.out.data,
1455 io_frag->generic.out.nread);
1457 parts->fragments->status=status;
1458 status=proxy_validate_parts(ntvfs, parts);
1459 } else {
1460 parts->fragments->status=status;
1463 DLIST_REMOVE(fragments->fragments, fragment);
1464 /* this will free the io_frag too */
1465 talloc_free(fragment);
1467 if (parts->complete || NT_STATUS_IS_ERR(status)) {
1468 /* this will call sendfn, the chain handler won't know... but
1469 should have no more handlers queued */
1470 return proxy_validate_complete(parts);
1473 return NT_STATUS_OK;
1476 /* continue a read loop, possibly from a callback */
1477 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
1478 struct proxy_validate_parts_parts *parts)
1480 struct proxy_private *private = ntvfs->private_data;
1481 union smb_read *io_frag;
1482 struct async_read_fragment *fragment;
1483 struct smbcli_request *c_req = NULL;
1484 ssize_t size=private->tree->session->transport->negotiate.max_xmit \
1485 - (MIN_SMB_SIZE+32);
1487 /* Have we already read enough? */
1488 if (parts->offset >= (parts->r->in.offset + parts->r->in.maxcnt)) {
1489 parts->complete=true;
1490 return NT_STATUS_OK;
1493 size=MIN(size, parts->remaining);
1495 fragment=talloc_zero(parts->fragments, struct async_read_fragment);
1496 NT_STATUS_HAVE_NO_MEMORY(fragment);
1498 io_frag = talloc_zero(fragment, union smb_read);
1499 NT_STATUS_HAVE_NO_MEMORY(io_frag);
1501 io_frag->generic.out.data = talloc_size(io_frag, size);
1502 NT_STATUS_HAVE_NO_MEMORY(io_frag->generic.out.data);
1504 io_frag->generic.level = RAW_READ_GENERIC;
1505 io_frag->generic.in.file.fnum = parts->r->in.fnum;
1506 io_frag->generic.in.offset = parts->offset;
1507 io_frag->generic.in.mincnt = size;
1508 io_frag->generic.in.maxcnt = size;
1509 io_frag->generic.in.remaining = 0;
1510 #warning maybe true is more permissive?
1511 io_frag->generic.in.read_for_execute = false;
1513 //c_req = smb_raw_read_send(ntvfs, io_frag, parts->f, parts->r);
1514 c_req = smb_raw_read_send(private->tree, io_frag);
1515 NT_STATUS_HAVE_NO_MEMORY(c_req);
1517 parts->offset+=size;
1518 parts->remaining-=size;
1519 fragment->c_req = c_req;
1520 fragment->io_frag = io_frag;
1521 fragment->fragments=parts->fragments;
1522 DLIST_ADD(parts->fragments->fragments, fragment);
1524 { void* req=NULL;
1525 ADD_ASYNC_RECV_TAIL(c_req, parts, fragment, parts->f, async_proxy_validate_parts, NT_STATUS_INTERNAL_ERROR);
1526 ASYNC_RECV_TAIL_F_ORPHAN(io_frag, async_read_handler, parts->f, c_req->async.private, NT_STATUS_UNSUCCESSFUL);
1529 DEBUG(5,("%s: issued read parts=%p c_req=%p io_frag=%p\n",__FUNCTION__,parts, c_req, io_frag));
1531 return NT_STATUS_OK;
1535 read from a file
1537 static NTSTATUS proxy_read(struct ntvfs_module_context *ntvfs,
1538 struct ntvfs_request *req, union smb_read *io)
1540 struct proxy_private *private = ntvfs->private_data;
1541 struct smbcli_request *c_req;
1542 struct proxy_file *f;
1543 struct async_read_fragments *fragments=NULL;
1544 /* how much of read-from-cache is certainly valid */
1545 ssize_t valid=0;
1546 off_t offset=io->generic.in.offset+valid;
1547 off_t limit=io->generic.in.offset+io->generic.in.mincnt;
1549 SETUP_PID;
1551 if (io->generic.level != RAW_READ_GENERIC &&
1552 private->map_generic) {
1553 return ntvfs_map_read(ntvfs, req, io);
1556 SETUP_FILE_HERE(f);
1558 DEBUG(3,("%s offset=%lld, mincnt=%d, maxcnt=%d\n",__FUNCTION__,
1559 io->generic.in.offset,
1560 io->generic.in.mincnt,
1561 io->generic.in.maxcnt));
1562 io->generic.out.nread=0;
1563 /* attempt to read from cache. if nread becomes non-zero then we
1564 have cache to validate. Instead of returning "valid" value, cache_read
1565 should probably return an async_read_fragment structure */
1567 if (private->cache_enabled) {
1568 NTSTATUS status=cache_smb_raw_read(f->cache, ntvfs, req, io, &valid);
1570 if (NT_STATUS_IS_OK(status)) {
1571 /* if we read enough valid data, return it */
1572 if (valid > 0 && valid>=io->generic.in.mincnt) {
1573 /* valid will not be bigger than maxcnt */
1574 io->generic.out.nread=valid;
1575 DEBUG(1,("Read from cache offset=%d size=%d\n",
1576 (int)(io->generic.in.offset),
1577 (int)(io->generic.out.nread)) );
1578 return status;
1583 fragments=talloc_zero(req, struct async_read_fragments);
1584 fragments->async=!!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC);
1585 /* See if there are pending reads that would satisfy this request
1586 We have a validated read up to io->generic.out.nread. Anything between
1587 this and mincnt MUST be read, but we could first try and attach to
1588 any pending read-ahead on the same file.
1589 If those read-aheads fail we will re-issue a regular read from the
1590 callback handler and hope it hasn't taken too long. */
1592 /* offset is the extentof the file from which we still need to find
1593 matching read-requests. */
1594 offset=io->generic.in.offset+valid;
1595 /* limit is the byte beyond the last byte for which we need a request.
1596 This used to be mincnt, but is now maxcnt to cope with validate reads.
1597 Maybe we can switch back to mincnt when proxy_read struct is used
1598 instead of smb_read.
1600 limit=io->generic.in.offset+io->generic.in.maxcnt;
1602 while (offset < limit) {
1603 /* Should look for the read-ahead with offset <= in.offset+out.nread
1604 with the longest span, but there is only likely to be one anyway so
1605 just take the first */
1606 struct async_info* pending=private->pending;
1607 union smb_read *readahead_io=NULL;
1608 DEBUG(5,("Looping reads from offset=%lld, end=%lld\n",offset,limit));
1609 while(pending) {
1610 if (pending->c_req->async.fn == async_read_handler) {
1611 struct async_info *async=talloc_get_type_abort(pending->c_req->async.private, struct async_info);
1612 readahead_io=talloc_get_type_abort(async->parms, union smb_read);
1614 if (readahead_io->generic.in.file.fnum == io->generic.in.file.fnum &&
1615 readahead_io->generic.in.offset <= offset &&
1616 readahead_io->generic.in.offset +
1617 readahead_io->generic.in.mincnt > offset) break;
1619 readahead_io=NULL;
1620 pending=pending->next;
1622 /* ASSERT(readahead_io == pending->c_req->async.params) */
1623 if (pending && readahead_io) {
1624 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
1625 fragment->fragments=fragments;
1626 fragment->io_frag=readahead_io;
1627 fragment->c_req = pending->c_req;
1628 /* we found one, so attach to it. We DO need a talloc_reference
1629 because the original send_fn might be called before ALL chained
1630 handlers, and our handler will call its own send_fn first. ugh.
1631 Maybe we need to seperate reverse-mapping callbacks with data users? */
1632 /* Note: the read-ahead io is passed as io, and our req io is
1633 in io_frag->io */
1634 //talloc_reference(req, pending->req);
1635 DEBUG(5,("Attach to read for offset=%lld length=%d\n",
1636 readahead_io->generic.in.offset,
1637 readahead_io->generic.in.mincnt));
1638 ADD_ASYNC_RECV_TAIL(pending->c_req, io, fragment, f,
1639 async_read_fragment, NT_STATUS_INTERNAL_ERROR);
1640 DEBUG(5,("Attached OK\n"));
1641 #warning we don't want to return if we fail to attach, just break
1642 DLIST_ADD(fragments->fragments, fragment);
1643 /* updated offset for which we have reads */
1644 offset=readahead_io->generic.in.offset + readahead_io->generic.in.mincnt;
1645 } else {
1646 /* there are no pending reads to fill this so issue one up to
1647 the maximum supported read size. We could see when the next
1648 pending read is (if any) and only read up till there... later...
1649 Issue a fragment request for what is left, clone io.
1650 In the case that there were no fragments this will be the orginal read
1651 but with a cloned io struct */
1652 off_t next_offset;
1653 struct proxy_Read *r=NULL; /* used only for VALIDATE promotion */
1654 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
1655 union smb_read *io_frag=talloc_memdup_type(req, io, union smb_read);
1656 ssize_t offset_inc=offset-io_frag->generic.in.offset;
1657 /* 250 is a guess at ndr rpc overheads */
1658 ssize_t readsize=MIN(PROXY_NTIOCTL_MAXDATA,
1659 private->tree->session->transport->negotiate.max_xmit) \
1660 - (MIN_SMB_SIZE+32);
1661 if (readsize > 0xFFFF) readsize = 0xFFFF; /* - (MIN_SMB_SIZE+250) ?? */
1662 readsize=MIN(limit-offset, readsize);
1664 DEBUG(5,("Issuing direct read\n"));
1665 /* reduce the cached read (if any). nread is unsigned */
1666 if (io_frag->generic.out.nread > offset_inc) {
1667 io_frag->generic.out.nread-=offset_inc;
1668 /* don't make nread buffer look too big */
1669 if (io_frag->generic.out.nread > readsize)
1670 io_frag->generic.out.nread = readsize;
1671 } else {
1672 io_frag->generic.out.nread=0;
1674 /* adjust the data pointer so we read to the right place */
1675 io_frag->generic.out.data+=offset_inc;
1676 io_frag->generic.in.offset=offset;
1677 io_frag->generic.in.maxcnt=readsize;
1678 /* we don't mind mincnt being smaller if this is the last frag,
1679 but then we can already handle it being bigger but not reached...
1680 The spell would be:
1681 MIN(io_frag->generic.in.mincnt, io_frag->generic.in.maxcnt);
1683 io_frag->generic.in.mincnt=readsize;
1684 fragment->fragments=fragments;
1685 fragment->io_frag=io_frag;
1686 #warning attach to send_fn handler
1687 /* what if someone attaches to us? Our send_fn is called from our
1688 chained handler which will be before their handler and io will
1689 already be freed. We need to keep a reference to the io and the data
1690 but we don't know where it came from in order to take a reference.
1691 We need therefore to tackle calling of send_fn AFTER all other handlers */
1693 /* Calculate next offset (in advance) */
1694 next_offset=io_frag->generic.in.offset + io_frag->generic.in.mincnt;
1696 /* if we are (going to be) the last fragment and we are in VALIDATE
1697 mode, see if we can do a bulk validate now.
1698 io->generic.in.mincnt == io->generic.in.maxcnt is to make sure we
1699 don't do a validate on a receive validate read
1701 if (private->cache_validatesize && PROXY_REMOTE_SERVER(private) &&
1702 next_offset >= limit && (f->cache && f->cache->status & CACHE_VALIDATE)) {
1703 ssize_t length=private->cache_validatesize;
1704 declare_checksum(digest);
1706 DEBUG(5,("last read, maybe mega validate: frag length %zu, offset %llu\n",
1707 length, (unsigned long long) offset));
1708 NTSTATUS status=cache_smb_raw_checksum(f->cache, offset, &length, digest);
1709 /* no point in doing it if md5'd length < current out.nread
1710 remember: out.data contains this requests cached response
1711 if validate succeeds */
1712 if (NT_STATUS_IS_OK(status) && (length > io_frag->generic.out.nread)) {
1713 /* upgrade the read, allocate the proxy_read struct here
1714 and fill in the extras, no more out-of-band stuff */
1715 DEBUG(5,("%s: Promoting to validate read: %lld\n",__FUNCTION__,(long long) length));
1716 dump_data (5, digest, sizeof(digest));
1718 r=talloc_zero(io_frag, struct proxy_Read);
1719 memcpy(r->in.digest.digest, digest, sizeof(digest));
1720 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE;
1721 io_frag->generic.in.maxcnt = length;
1722 /* the proxy send function will calculate the checksum based on *data */
1723 } else {
1724 /* not enough in cache to make it worthwhile anymore */
1725 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x len=%lld\n",
1726 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0,
1727 (unsigned long long)length));
1728 cache_handle_novalidate(f);
1729 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x\n",
1730 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0));
1732 } else {
1733 if (f->cache && f->cache->status & CACHE_VALIDATE) {
1734 DEBUG(5,(">>>Not last frag, no validate read: %lld %lld\n",
1735 (long long) next_offset,
1736 (long long) limit));
1740 DEBUG(5,("Frag read sending offset=%lld min=%d, size=%d\n",
1741 io_frag->generic.in.offset,io_frag->generic.in.mincnt,
1742 io_frag->generic.in.maxcnt));
1743 c_req = proxy_smb_raw_read_send(ntvfs, io_frag, f, r);
1744 DEBUG(5,("Frag read sent offset=%lld size=%d MID=%d\n",
1745 io_frag->generic.in.offset,io_frag->generic.in.maxcnt,c_req->mid));
1746 fragment->c_req=c_req;
1747 DLIST_ADD(fragments->fragments, fragment);
1748 ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
1749 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_read_fragment, NT_STATUS_INTERNAL_ERROR);
1750 DEBUG(5,("Frag response chained\n"));
1751 /* normally we would only install the chain_handler if we wanted async
1752 response, but as it is the async_read_fragment handler that calls send_fn
1753 based on fragments->async, instead of async_chain_handler, we don't
1754 need to worry about this call completing async'ly while we are
1755 waiting on the other attached calls. Otherwise we would not attach
1756 the async_chain_handler (via async_read_handler) because of the wait
1757 below */
1758 { /* We don't want the chain handler calling send_fn as it is done by the fragment handler */
1759 void* req=NULL;
1760 /* call async_chain_hander not read handler so that folk can't
1761 attach to it, till we solve the problem above */
1762 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1764 offset = next_offset;
1766 DEBUG(5,("Next fragment\n"));
1769 /* do we still need a final fragment? Issue a read */
1771 DEBUG(5,("No frags left to read\n"));
1774 /* issue new round of read-aheads */
1775 DEBUG(5,("== Read aheads asread-%d\n",io->generic.in.mincnt));
1776 if (f->cache && ! (f->cache->status & CACHE_VALIDATE)) read_ahead(f, ntvfs, io, io->generic.in.mincnt);
1777 DEBUG(5,("== Done Read aheads\n"));
1779 /* If we have fragments but we are not called async, we must sync-wait on them */
1780 /* did we map the entire request to pending reads? */
1781 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1782 struct async_read_fragment *fragment;
1783 DEBUG(5,("Sync waiting\n"));
1784 /* fragment get's free'd during the chain_handler so we start at
1785 the top each time */
1786 for (fragment = fragments->fragments; fragment; fragment = fragments->fragments) {
1787 /* Any fragments async handled while we sync-wait on one
1788 will remove themselves from the list and not get sync waited */
1789 sync_chain_handler(fragment->c_req);
1790 /* if we have a non-ok result AND we know we have all the responses
1791 up to extent, then we could quit the loop early and change the
1792 fragments->async to true so the final irrelevant responses would
1793 come async and we could send our response now - but we don't
1794 track that detail until we have cache-maps that we can use to
1795 track the responded fragments and combine responsed linear extents
1796 if (! NT_STATUS_IS_OK(fragments->status) && xxx ) */
1798 DEBUG(5,("Sync return of proxy_read: %s\n",get_friendly_nt_error_msg (fragments->status)));
1799 return fragments->status;
1802 DEBUG(5,("Async returning\n"));
1803 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1804 return NT_STATUS_OK;
1808 a handler to de-fragment async write replies back to one request.
1809 Can cope with out-of-order async responses by waiting for all responses
1810 on an NT_STATUS_OK case so that nwritten is properly adjusted
1812 NTSTATUS async_write_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1814 struct smbcli_request *c_req = async->c_req;
1815 struct ntvfs_request *req = async->req;
1816 struct proxy_file *f=async->f;
1817 struct async_write_fragment* fragment=talloc_get_type_abort(io2, struct async_write_fragment);
1818 /* this is the io against which the fragment is to be applied */
1819 union smb_write *io = talloc_get_type_abort(io1, union smb_write);
1820 /* this is the io for the write that issued the callback */
1821 union smb_write *io_frag = fragment->io_frag; /* async->parms; */
1822 struct async_write_fragments* fragments=fragment->fragments;
1823 ssize_t extent=0;
1825 /* if request is not already received by a chained handler, read it */
1826 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
1827 if (c_req) status=smb_raw_write_recv(c_req, io_frag);
1829 DEBUG(3,("%s async_write status: %s\n",__FUNCTION__,
1830 get_friendly_nt_error_msg(status)));
1832 fragment->status = status;
1834 DLIST_REMOVE(fragments->fragments, fragment);
1836 /* did this one fail? */
1837 if (! NT_STATUS_IS_OK(fragment->status)) {
1838 if (NT_STATUS_IS_OK(fragments->status)) {
1839 fragments->status=fragment->status;
1841 } else {
1842 /* No fragments have yet failed, keep collecting responses */
1843 extent = io_frag->generic.in.offset + io_frag->generic.out.nwritten;
1845 /* we broke up the write so it could all be written. If only some has
1846 been written of this block, and then some of then next block,
1847 it could leave unwritten holes! We will only acknowledge up to the
1848 first partial write, and let the client deal with it.
1849 If server can return NT_STATUS_OK for a partial write so can we */
1850 if (io_frag->generic.out.nwritten != io_frag->generic.in.count) {
1851 DEBUG(4,("Fragmented write only partially successful\n"));
1853 /* Shrink the master nwritten */
1854 if ( ! fragments->partial ||
1855 (io->generic.in.offset + io->generic.out.nwritten) > extent) {
1856 io->generic.out.nwritten = extent - io->generic.in.offset;
1858 /* stop any further successes from extended the partial write */
1859 fragments->partial=true;
1860 } else {
1861 /* only grow the master nwritten if we haven't logged a partial write */
1862 if (! fragments->partial &&
1863 (io->generic.in.offset + io->generic.out.nwritten) < extent ) {
1864 io->generic.out.nwritten = extent - io->generic.in.offset;
1869 /* if this was the last fragment, clean up */
1870 if (! fragments->fragments) {
1871 DEBUG(5,("Async write re-fragmented with %d of %d\n",
1872 io->generic.out.nwritten,
1873 io->generic.in.count));
1874 if (NT_STATUS_IS_OK(fragments->status)) {
1875 cache_handle_save(f, io->generic.in.data, io->generic.out.nwritten,
1876 io->generic.in.offset);
1878 if (fragments->async) {
1879 req->async_states->status=fragments->status;
1880 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
1881 req->async_states->send_fn(req);
1882 DEBUG(5,("Async response sent\n"));
1883 } else {
1884 DEBUG(5,("Fragments SYNC return\n"));
1888 return status;
1892 a handler for async write replies
1894 NTSTATUS async_write_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1896 struct smbcli_request *c_req = async->c_req;
1897 struct ntvfs_request *req = async->req;
1898 struct proxy_file *f=async->f;
1899 union smb_write *io=async->parms;
1901 if (c_req)
1902 status = smb_raw_write_recv(c_req, async->parms);
1904 cache_handle_save(f, io->generic.in.data,
1905 io->generic.out.nwritten,
1906 io->generic.in.offset);
1908 return status;
1912 write to a file
1914 static NTSTATUS proxy_write(struct ntvfs_module_context *ntvfs,
1915 struct ntvfs_request *req, union smb_write *io)
1917 struct proxy_private *private = ntvfs->private_data;
1918 struct smbcli_request *c_req;
1919 struct proxy_file *f;
1921 SETUP_PID;
1923 if (io->generic.level != RAW_WRITE_GENERIC &&
1924 private->map_generic) {
1925 return ntvfs_map_write(ntvfs, req, io);
1927 SETUP_FILE_HERE(f);
1929 DEBUG(5,("proxy_write offset=%lld size=%d\n",io->generic.in.offset, io->generic.in.count));
1930 #warning ERROR get rid of this
1931 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1932 NTSTATUS status;
1933 if (PROXY_REMOTE_SERVER(private)) {
1934 /* Do a proxy write */
1935 status=proxy_smb_raw_write(ntvfs, io, f);
1936 } else if (io->generic.in.count >
1937 private->tree->session->transport->negotiate.max_xmit) {
1939 /* smbcli_write can deal with large writes, which are bigger than
1940 tree->session->transport->negotiate.max_xmit */
1941 ssize_t size=smbcli_write(private->tree,
1942 io->generic.in.file.fnum,
1943 io->generic.in.wmode,
1944 io->generic.in.data,
1945 io->generic.in.offset,
1946 io->generic.in.count);
1948 if (size==io->generic.in.count || size > 0) {
1949 io->generic.out.nwritten=size;
1950 status=NT_STATUS_OK;
1951 } else {
1952 status=NT_STATUS_UNSUCCESSFUL;
1954 } else {
1955 status=smb_raw_write(private->tree, io);
1958 /* Save write in cache */
1959 if (NT_STATUS_IS_OK(status)) {
1960 cache_handle_save(f, io->generic.in.data,
1961 io->generic.out.nwritten,
1962 io->generic.in.offset);
1965 return status;
1968 /* smb_raw_write_send can't deal with large writes, which are bigger than
1969 tree->session->transport->negotiate.max_xmit so we have to break it up
1970 trying to preserve the async nature of the call as much as possible */
1971 if (PROXY_REMOTE_SERVER(private)) {
1972 DEBUG(5,("== %s call proxy_smb_raw_write_send\n",__FUNCTION__));
1973 c_req = proxy_smb_raw_write_send(ntvfs, io, f);
1974 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
1975 } else if (io->generic.in.count <=
1976 private->tree->session->transport->negotiate.max_xmit) {
1977 DEBUG(5,("== %s call smb_raw_write_send\n",__FUNCTION__));
1978 c_req = smb_raw_write_send(private->tree, io);
1979 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
1980 } else {
1981 ssize_t remaining = io->generic.in.count;
1982 #warning Need an audit of these magin numbers MIN_SMB_SIZE+32
1983 int block = (private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32));
1984 int done = 0;
1985 struct async_write_fragments *fragments = talloc_zero(req, struct async_write_fragments);
1987 DEBUG(3,("== %s Client sending too-big write sized %d, negotiated limit %d\n",
1988 __FUNCTION__, io->generic.in.count,
1989 private->tree->session->transport->negotiate.max_xmit));
1991 fragments->io = io;
1992 io->generic.out.nwritten=0;
1993 io->generic.out.remaining=0;
1995 do {
1996 union smb_write *io_frag = talloc_zero(fragments, union smb_write);
1997 struct async_write_fragment *fragment = talloc_zero(fragments, struct async_write_fragment);
1998 ssize_t size = MIN(block, remaining);
2000 fragment->fragments = fragments;
2001 fragment->io_frag = io_frag;
2003 io_frag->generic.level = io->generic.level;
2004 io_frag->generic.in.file.fnum = io->generic.in.file.fnum;
2005 io_frag->generic.in.wmode = io->generic.in.wmode;
2006 io_frag->generic.in.count = size;
2007 io_frag->generic.in.offset = io->generic.in.offset + done;
2008 io_frag->generic.in.data = io->generic.in.data + done;
2010 c_req = proxy_smb_raw_write_send(ntvfs, io_frag, f);
2011 if (! c_req) {
2012 /* let pending requests clean-up when ready */
2013 fragments->status=NT_STATUS_UNSUCCESSFUL;
2014 talloc_steal(NULL, fragments);
2015 DEBUG(3,("Can't send request fragment\n"));
2016 return NT_STATUS_UNSUCCESSFUL;
2019 DEBUG(5,("Frag write sent offset=%lld size=%d MID=%d\n",
2020 io_frag->generic.in.offset,io_frag->generic.in.count,c_req->mid));
2021 fragment->c_req=c_req;
2022 DLIST_ADD(fragments->fragments, fragment);
2024 // ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
2025 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_write_fragment, NT_STATUS_INTERNAL_ERROR);
2026 DEBUG(5,("Frag response chained\n"));
2028 remaining -= size;
2029 done += size;
2030 } while(remaining > 0);
2032 /* this strategy has the callback chain attached to each c_req, so we
2033 don't use the ASYNC_RECV_TAIL* to install a general one */
2036 ASYNC_RECV_TAIL_HANDLER(io, async_chain_handler);
2040 a handler for async seek replies
2042 static void async_seek(struct smbcli_request *c_req)
2044 struct async_info *async = c_req->async.private;
2045 struct ntvfs_request *req = async->req;
2046 req->async_states->status = smb_raw_seek_recv(c_req, async->parms);
2047 talloc_free(async);
2048 req->async_states->send_fn(req);
2052 seek in a file
2054 static NTSTATUS proxy_seek(struct ntvfs_module_context *ntvfs,
2055 struct ntvfs_request *req,
2056 union smb_seek *io)
2058 struct proxy_private *private = ntvfs->private_data;
2059 struct smbcli_request *c_req;
2061 SETUP_PID_AND_FILE;
2063 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2064 return smb_raw_seek(private->tree, io);
2067 c_req = smb_raw_seek_send(private->tree, io);
2069 ASYNC_RECV_TAIL(io, async_seek);
2073 flush a file
2075 static NTSTATUS proxy_flush(struct ntvfs_module_context *ntvfs,
2076 struct ntvfs_request *req,
2077 union smb_flush *io)
2079 struct proxy_private *private = ntvfs->private_data;
2080 struct smbcli_request *c_req;
2082 SETUP_PID;
2083 switch (io->generic.level) {
2084 case RAW_FLUSH_FLUSH:
2085 SETUP_FILE;
2086 break;
2087 case RAW_FLUSH_ALL:
2088 io->generic.in.file.fnum = 0xFFFF;
2089 break;
2090 case RAW_FLUSH_SMB2:
2091 return NT_STATUS_INVALID_LEVEL;
2094 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2095 return smb_raw_flush(private->tree, io);
2098 c_req = smb_raw_flush_send(private->tree, io);
2100 SIMPLE_ASYNC_TAIL;
2104 close a file
2106 static NTSTATUS proxy_close(struct ntvfs_module_context *ntvfs,
2107 struct ntvfs_request *req, union smb_close *io)
2109 struct proxy_private *private = ntvfs->private_data;
2110 struct smbcli_request *c_req;
2111 struct proxy_file *f;
2112 union smb_close io2;
2114 SETUP_PID;
2116 if (io->generic.level != RAW_CLOSE_GENERIC &&
2117 private->map_generic) {
2118 return ntvfs_map_close(ntvfs, req, io);
2120 SETUP_FILE_HERE(f);
2121 /* Note, we aren't free-ing f, or it's h here. Should we?
2122 even if file-close fails, we'll remove it from the list,
2123 what else would we do? Maybe we should not remove until
2124 after the proxied call completes? */
2125 DLIST_REMOVE(private->files, f);
2127 /* possibly samba can't do RAW_CLOSE_SEND yet */
2128 if (! (c_req = smb_raw_close_send(private->tree, io))) {
2129 if (io->generic.level == RAW_CLOSE_GENERIC) {
2130 ZERO_STRUCT(io2);
2131 io2.close.level = RAW_CLOSE_CLOSE;
2132 io2.close.in.file = io->generic.in.file;
2133 io2.close.in.write_time = io->generic.in.write_time;
2134 io = &io2;
2136 c_req = smb_raw_close_send(private->tree, io);
2139 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2140 return smbcli_request_simple_recv(c_req);
2143 SIMPLE_ASYNC_TAIL;
2147 exit - closing files open by the pid
2149 static NTSTATUS proxy_exit(struct ntvfs_module_context *ntvfs,
2150 struct ntvfs_request *req)
2152 struct proxy_private *private = ntvfs->private_data;
2153 struct smbcli_request *c_req;
2155 SETUP_PID;
2157 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2158 return smb_raw_exit(private->tree->session);
2161 c_req = smb_raw_exit_send(private->tree->session);
2163 SIMPLE_ASYNC_TAIL;
2167 logoff - closing files open by the user
2169 static NTSTATUS proxy_logoff(struct ntvfs_module_context *ntvfs,
2170 struct ntvfs_request *req)
2172 /* we can't do this right in the proxy backend .... */
2173 return NT_STATUS_OK;
2177 setup for an async call - nothing to do yet
2179 static NTSTATUS proxy_async_setup(struct ntvfs_module_context *ntvfs,
2180 struct ntvfs_request *req,
2181 void *private)
2183 return NT_STATUS_OK;
2187 cancel an async call
2189 static NTSTATUS proxy_cancel(struct ntvfs_module_context *ntvfs,
2190 struct ntvfs_request *req)
2192 struct proxy_private *private = ntvfs->private_data;
2193 struct async_info *a;
2195 /* find the matching request */
2196 for (a=private->pending;a;a=a->next) {
2197 if (a->req == req) {
2198 break;
2202 if (a == NULL) {
2203 return NT_STATUS_INVALID_PARAMETER;
2206 return smb_raw_ntcancel(a->c_req);
2210 lock a byte range
2212 static NTSTATUS proxy_lock(struct ntvfs_module_context *ntvfs,
2213 struct ntvfs_request *req, union smb_lock *io)
2215 struct proxy_private *private = ntvfs->private_data;
2216 struct smbcli_request *c_req;
2218 SETUP_PID;
2220 if (io->generic.level != RAW_LOCK_GENERIC &&
2221 private->map_generic) {
2222 return ntvfs_map_lock(ntvfs, req, io);
2224 SETUP_FILE;
2226 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2227 return smb_raw_lock(private->tree, io);
2230 c_req = smb_raw_lock_send(private->tree, io);
2231 SIMPLE_ASYNC_TAIL;
2235 set info on a open file
2237 static NTSTATUS proxy_setfileinfo(struct ntvfs_module_context *ntvfs,
2238 struct ntvfs_request *req,
2239 union smb_setfileinfo *io)
2241 struct proxy_private *private = ntvfs->private_data;
2242 struct smbcli_request *c_req;
2244 SETUP_PID_AND_FILE;
2246 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2247 return smb_raw_setfileinfo(private->tree, io);
2249 c_req = smb_raw_setfileinfo_send(private->tree, io);
2251 SIMPLE_ASYNC_TAIL;
2256 a handler for async fsinfo replies
2258 static void async_fsinfo(struct smbcli_request *c_req)
2260 struct async_info *async = c_req->async.private;
2261 struct ntvfs_request *req = async->req;
2262 req->async_states->status = smb_raw_fsinfo_recv(c_req, req, async->parms);
2263 talloc_free(async);
2264 req->async_states->send_fn(req);
2268 return filesystem space info
2270 static NTSTATUS proxy_fsinfo(struct ntvfs_module_context *ntvfs,
2271 struct ntvfs_request *req, union smb_fsinfo *fs)
2273 struct proxy_private *private = ntvfs->private_data;
2274 struct smbcli_request *c_req;
2276 SETUP_PID;
2278 /* QFS Proxy */
2279 if (fs->generic.level == RAW_QFS_PROXY_INFO) {
2280 fs->proxy_info.out.major_version=1;
2281 fs->proxy_info.out.minor_version=0;
2282 fs->proxy_info.out.capability=0;
2283 return NT_STATUS_OK;
2286 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2287 return smb_raw_fsinfo(private->tree, req, fs);
2290 c_req = smb_raw_fsinfo_send(private->tree, req, fs);
2292 ASYNC_RECV_TAIL(fs, async_fsinfo);
2296 return print queue info
2298 static NTSTATUS proxy_lpq(struct ntvfs_module_context *ntvfs,
2299 struct ntvfs_request *req, union smb_lpq *lpq)
2301 return NT_STATUS_NOT_SUPPORTED;
2305 list files in a directory matching a wildcard pattern
2307 static NTSTATUS proxy_search_first(struct ntvfs_module_context *ntvfs,
2308 struct ntvfs_request *req, union smb_search_first *io,
2309 void *search_private,
2310 bool (*callback)(void *, const union smb_search_data *))
2312 struct proxy_private *private = ntvfs->private_data;
2314 SETUP_PID;
2316 return smb_raw_search_first(private->tree, req, io, search_private, callback);
2319 /* continue a search */
2320 static NTSTATUS proxy_search_next(struct ntvfs_module_context *ntvfs,
2321 struct ntvfs_request *req, union smb_search_next *io,
2322 void *search_private,
2323 bool (*callback)(void *, const union smb_search_data *))
2325 struct proxy_private *private = ntvfs->private_data;
2327 SETUP_PID;
2329 return smb_raw_search_next(private->tree, req, io, search_private, callback);
2332 /* close a search */
2333 static NTSTATUS proxy_search_close(struct ntvfs_module_context *ntvfs,
2334 struct ntvfs_request *req, union smb_search_close *io)
2336 struct proxy_private *private = ntvfs->private_data;
2338 SETUP_PID;
2340 return smb_raw_search_close(private->tree, io);
2344 a handler for async trans2 replies
2346 static void async_trans2(struct smbcli_request *c_req)
2348 struct async_info *async = c_req->async.private;
2349 struct ntvfs_request *req = async->req;
2350 req->async_states->status = smb_raw_trans2_recv(c_req, req, async->parms);
2351 talloc_free(async);
2352 req->async_states->send_fn(req);
2355 /* raw trans2 */
2356 static NTSTATUS proxy_trans2(struct ntvfs_module_context *ntvfs,
2357 struct ntvfs_request *req,
2358 struct smb_trans2 *trans2)
2360 struct proxy_private *private = ntvfs->private_data;
2361 struct smbcli_request *c_req;
2363 if (private->map_trans2) {
2364 return NT_STATUS_NOT_IMPLEMENTED;
2367 SETUP_PID;
2368 #warning we should be mapping file handles here
2370 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2371 return smb_raw_trans2(private->tree, req, trans2);
2374 c_req = smb_raw_trans2_send(private->tree, trans2);
2376 ASYNC_RECV_TAIL(trans2, async_trans2);
2380 /* SMBtrans - not used on file shares */
2381 static NTSTATUS proxy_trans(struct ntvfs_module_context *ntvfs,
2382 struct ntvfs_request *req,
2383 struct smb_trans2 *trans2)
2385 return NT_STATUS_ACCESS_DENIED;
2389 a handler for async change notify replies
2391 static void async_changenotify(struct smbcli_request *c_req)
2393 struct async_info *async = c_req->async.private;
2394 struct ntvfs_request *req = async->req;
2395 req->async_states->status = smb_raw_changenotify_recv(c_req, req, async->parms);
2396 talloc_free(async);
2397 req->async_states->send_fn(req);
2400 /* change notify request - always async */
2401 static NTSTATUS proxy_notify(struct ntvfs_module_context *ntvfs,
2402 struct ntvfs_request *req,
2403 union smb_notify *io)
2405 struct proxy_private *private = ntvfs->private_data;
2406 struct smbcli_request *c_req;
2407 int saved_timeout = private->transport->options.request_timeout;
2408 struct proxy_file *f;
2410 if (io->nttrans.level != RAW_NOTIFY_NTTRANS) {
2411 return NT_STATUS_NOT_IMPLEMENTED;
2414 SETUP_PID;
2416 f = ntvfs_handle_get_backend_data(io->nttrans.in.file.ntvfs, ntvfs);
2417 if (!f) return NT_STATUS_INVALID_HANDLE;
2418 io->nttrans.in.file.fnum = f->fnum;
2420 /* this request doesn't make sense unless its async */
2421 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2422 return NT_STATUS_INVALID_PARAMETER;
2425 /* we must not timeout on notify requests - they wait
2426 forever */
2427 private->transport->options.request_timeout = 0;
2429 c_req = smb_raw_changenotify_send(private->tree, io);
2431 private->transport->options.request_timeout = saved_timeout;
2433 ASYNC_RECV_TAIL(io, async_changenotify);
2437 * A hander for converting from rpc struct replies to ntioctl
2439 static NTSTATUS proxy_rpclite_map_async_send(
2440 struct ntvfs_module_context *ntvfs,
2441 struct ntvfs_request *req,
2442 void *io1, void *io2, NTSTATUS status)
2444 union smb_ioctl* io=talloc_get_type_abort(io1, union smb_ioctl);
2445 struct async_rpclite_send *rpclite_send=talloc_get_type_abort(io2, struct async_rpclite_send);
2446 void* r=rpclite_send->struct_ptr;
2447 struct ndr_push* push;
2448 const struct ndr_interface_call* call=rpclite_send->call;
2449 enum ndr_err_code ndr_err;
2450 DATA_BLOB ndr;
2452 talloc_free(rpclite_send);
2454 push = ndr_push_init_ctx(req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2455 NT_STATUS_HAVE_NO_MEMORY(push);
2457 if (0) {
2458 push->flags |= LIBNDR_FLAG_BIGENDIAN;
2461 ndr_err = call->ndr_push(push, NDR_OUT, r);
2462 status=ndr_map_error2ntstatus(ndr_err);
2464 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2465 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
2466 nt_errstr(status)));
2467 return status;
2470 ndr=ndr_push_blob(push);
2471 //if (ndr.length > io->ntioctl.in.max_data) {
2472 DEBUG(3,("%s NDR size %d, max_size %d %p\n",__FUNCTION__, ndr.length,
2473 io->ntioctl.in.max_data, ndr.data));
2474 io->ntioctl.out.blob=ndr;
2475 return status;
2479 * A handler for sending async rpclite Read replies that were mapped to union smb_read
2481 static NTSTATUS rpclite_proxy_Read_map_async_send(
2482 struct ntvfs_module_context *ntvfs,
2483 struct ntvfs_request *req,
2484 void *io1, void *io2, NTSTATUS status)
2486 struct proxy_Read* r=talloc_get_type_abort(io1, struct proxy_Read);
2487 union smb_read* io=talloc_get_type_abort(io2, union smb_read);
2489 /* status here is a result of proxy_read, it doesn't reflect the status
2490 of the rpc transport or relates calls, just the read operation */
2491 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2492 r->out.result=status;
2494 if (! NT_STATUS_IS_OK(status)) {
2495 /* We can't use result as a discriminator in IDL, so nread and flags always exist */
2496 r->out.nread=0;
2497 r->out.flags=0;
2498 } else {
2499 ssize_t size=io->readx.out.nread;
2500 r->out.flags=0;
2501 r->out.nread=io->readx.out.nread;
2503 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE) && io->readx.out.nread>0) {
2504 declare_checksum(digest);
2505 checksum_block(digest, io->readx.out.data, io->readx.out.nread);
2507 DEBUG(5,("New digest for size: %lld\n", (long long) io->readx.out.nread));
2508 dump_data (5, digest, sizeof(digest));
2509 DEBUG(5,("Cached digest\n"));
2510 dump_data (5, r->in.digest.digest, sizeof(digest));
2512 if (memcmp(digest, r->in.digest.digest, sizeof(digest))==0) {
2513 r->out.flags=PROXY_USE_CACHE;
2514 DEBUG(5,("%s: Use cached data len=%lld\n",__FUNCTION__,
2515 (long long)r->out.nread));
2516 if (r->in.flags & PROXY_VALIDATE) {
2517 r->out.flags |= PROXY_VALIDATE;
2518 DEBUG(5,("%s: Use VALIDATED len=%lld, %lld\n",__FUNCTION__,
2519 (long long)r->out.nread, (long long) io->readx.out.nread));
2521 goto done;
2523 DEBUG(5,("Cache does not match\n"));
2526 if (r->in.flags & PROXY_VALIDATE) {
2527 /* validate failed, shrink read to mincnt - so we don't fill link */
2528 r->out.nread=MIN(r->out.nread, r->in.mincnt);
2529 size=r->out.nread;
2530 DEBUG(5,("VALIDATE failed, shrink read of %d from %d to %d\n",
2531 r->in.maxcnt,r->out.nread,MIN(r->out.nread, r->in.mincnt)));
2534 if (r->in.flags & PROXY_USE_ZLIB) {
2535 if (compress_block(io->readx.out.data, &size) ) {
2536 r->out.flags|=PROXY_USE_ZLIB;
2537 r->out.response.compress.count=size;
2538 r->out.response.compress.data=io->readx.out.data;
2539 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
2540 __FUNCTION__,r->out.nread,size,size*100/r->out.nread));
2541 goto done;
2545 DEBUG(5,("%s: Compression not worthwhile\n", __FUNCTION__));
2546 r->out.response.generic.count=io->readx.out.nread;
2547 r->out.response.generic.data=io->readx.out.data;
2550 done:
2552 /* Or should we return NT_STATUS_OK ?*/
2553 DEBUG(5,("Finish %s status %s\n",__FUNCTION__,get_friendly_nt_error_msg(status)));
2555 /* the rpc transport succeeded even if the operation did not */
2556 return NT_STATUS_OK;
2560 * RPC implementation of Read
2562 static NTSTATUS rpclite_proxy_Read(struct ntvfs_module_context *ntvfs,
2563 struct ntvfs_request *req, struct proxy_Read *r)
2565 struct proxy_private *private = ntvfs->private_data;
2566 union smb_read* io=talloc(req, union smb_read);
2567 NTSTATUS status;
2568 struct proxy_file *f;
2569 struct ntvfs_handle *h;
2571 NT_STATUS_HAVE_NO_MEMORY(io);
2573 /* if next hop is a proxy just repeat this call also handle VALIDATE check
2574 that means have own callback handlers too... */
2575 SETUP_PID;
2577 RPCLITE_SETUP_FILE_HERE(f, h);
2579 DEBUG(5,("Opnum: proxy_Read min=%d max=%d offset=%lld, fnum=%d\n",
2580 r->in.mincnt, r->in.maxcnt, r->in.offset, r->in.fnum));
2581 DEBUG(5,("Anticipated digest\n"));
2582 dump_data (5, r->in.digest.digest, sizeof(r->in.digest.digest));
2584 /* If the remove end is a proxy, jusr fixup file handle and passthrough,
2585 but update cache on the way back
2586 if (PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
2589 /* prepare for response */
2590 r->out.response.generic.data=talloc_array(io, uint8_t, r->in.maxcnt);
2591 NT_STATUS_HAVE_NO_MEMORY(r->out.response.generic.data);
2593 if (! PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
2594 return proxy_validate(ntvfs, req, r, f);
2597 /* pack up an smb_read request and dispatch here */
2598 io->readx.level=RAW_READ_READX;
2599 io->readx.in.file.ntvfs=h;
2600 io->readx.in.mincnt=r->in.mincnt;
2601 io->readx.in.maxcnt=r->in.maxcnt;
2602 io->readx.in.offset=r->in.offset;
2603 io->readx.in.remaining=r->in.remaining;
2604 /* and something to hold the answer */
2605 io->readx.out.data=r->out.response.generic.data;
2607 /* so we get to pack the io->*.out response */
2608 status = ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Read_map_async_send);
2609 NT_STATUS_NOT_OK_RETURN(status);
2611 /* so the read will get processed normally */
2612 return proxy_read(ntvfs, req, io);
2616 * A handler for sending async rpclite Write replies
2618 static NTSTATUS rpclite_proxy_Write_map_async_send(
2619 struct ntvfs_module_context *ntvfs,
2620 struct ntvfs_request *req,
2621 void *io1, void *io2, NTSTATUS status)
2623 struct proxy_Write* r=talloc_get_type_abort(io1, struct proxy_Write);
2624 union smb_write* io=talloc_get_type_abort(io2, union smb_write);
2626 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2627 r->out.result=status;
2629 r->out.nwritten=io->writex.out.nwritten;
2630 r->out.remaining=io->writex.out.remaining;
2632 /* the rpc transport succeeded even if the operation did not */
2633 return NT_STATUS_OK;
2637 * RPC implementation of write
2639 static NTSTATUS rpclite_proxy_Write(struct ntvfs_module_context *ntvfs,
2640 struct ntvfs_request *req, struct proxy_Write *r)
2642 struct proxy_private *private = ntvfs->private_data;
2643 union smb_write* io=talloc(req, union smb_write);
2644 NTSTATUS status;
2645 struct proxy_file* f;
2646 struct ntvfs_handle *h;
2648 SETUP_PID;
2650 RPCLITE_SETUP_FILE_HERE(f,h);
2652 DEBUG(5,("Opnum: proxy_Write count=%d offset=%lld, fnum=%d\n",
2653 r->in.count, r->in.offset, r->in.fnum));
2655 /* pack up an smb_write request and dispatch here */
2656 io->writex.level=RAW_WRITE_WRITEX;
2657 io->writex.in.file.ntvfs=h;
2658 io->writex.in.offset=r->in.offset;
2659 io->writex.in.wmode=r->in.mode;
2660 io->writex.in.count=r->in.count;
2662 /* and the data */
2663 if (PROXY_USE_ZLIB & r->in.flags) {
2664 ssize_t count=r->in.data.generic.count;
2665 io->writex.in.data=uncompress_block_talloc(io, r->in.data.compress.data,
2666 &count, r->in.count);
2667 if (count != r->in.count || !io->writex.in.data) {
2668 /* Didn't uncompress properly, but the RPC layer worked */
2669 r->out.result=NT_STATUS_BAD_COMPRESSION_BUFFER;
2670 return NT_STATUS_OK;
2672 } else {
2673 io->writex.in.data=r->in.data.generic.data;
2676 /* so we get to pack the io->*.out response */
2677 status=ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Write_map_async_send);
2678 NT_STATUS_NOT_OK_RETURN(status);
2680 /* so the read will get processed normally */
2681 return proxy_write(ntvfs, req, io);
2684 /* unmarshall ntioctl and rpc-dispatch, but push async map handler to convert
2685 back from rpc struct to ntioctl */
2686 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
2687 struct ntvfs_request *req, union smb_ioctl *io)
2689 struct proxy_private *private = ntvfs->private_data;
2690 DATA_BLOB *request;
2691 struct ndr_syntax_id* syntax_id;
2692 uint32_t opnum;
2693 const struct ndr_interface_table *table;
2694 struct ndr_pull* pull;
2695 void* r;
2696 NTSTATUS status;
2697 struct async_rpclite_send *rpclite_send;
2698 enum ndr_err_code ndr_err;
2700 SETUP_PID;
2702 /* We don't care about io->generic.in.file, ntvfs layer already proved it was valid,
2703 our operations will have the fnum embedded in them anyway */
2704 DEBUG(5,("START %s blob-size %d\n",__FUNCTION__,io->ntioctl.in.blob.length));
2705 /* unpack the NDR */
2706 request=&io->ntioctl.in.blob;
2708 pull = ndr_pull_init_blob(request, req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2709 NT_STATUS_HAVE_NO_MEMORY(pull);
2710 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
2711 DEBUG(5,("%s pull init'd\n",__FUNCTION__));
2713 /* the blob is 4-aligned because it was memcpy'd */
2714 syntax_id=talloc_zero(pull, struct ndr_syntax_id);
2715 NT_STATUS_HAVE_NO_MEMORY(syntax_id);
2717 ndr_err=ndr_pull_ndr_syntax_id(pull, NDR_SCALARS, syntax_id);
2718 status=ndr_map_error2ntstatus(ndr_err);
2719 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2720 DEBUG(2,("Can't read syntax-id: %s\n",nt_errstr(status)));
2721 return status;
2724 /* now find the struct ndr_interface_table * for this syntax_id */
2725 table=ndr_table_by_uuid(&syntax_id->uuid);
2726 if (! table) {
2727 DEBUG(5,("Can't find table for uuid: %s\n",GUID_string(debug_ctx(),&syntax_id->uuid)));
2728 return NT_STATUS_NO_GUID_TRANSLATION;
2731 ndr_err=ndr_pull_uint32(pull, NDR_SCALARS, &opnum);
2732 status=ndr_map_error2ntstatus(ndr_err);
2733 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2734 DEBUG(2,("Can't read op-num: %s\n",nt_errstr(status)));
2735 return status;
2737 DEBUG(5,("%s opnum %d\n",__FUNCTION__,opnum));
2739 DEBUG(10,("rpc request data:\n"));
2740 dump_data(10, pull->data, pull->data_size);
2742 r = talloc_named(req, table->calls[opnum].struct_size, "struct %s",
2743 table->calls[opnum].name);
2744 NT_STATUS_HAVE_NO_MEMORY(r);
2746 ndr_err=table->calls[opnum].ndr_pull(pull, NDR_IN, r);
2747 status=ndr_map_error2ntstatus(ndr_err);
2748 DEBUG(5,("%s opnum %d pulled status %s\n",__FUNCTION__,opnum,get_friendly_nt_error_msg (status)));
2749 NT_STATUS_NOT_OK_RETURN(status);
2751 rpclite_send=talloc(req, struct async_rpclite_send);
2752 NT_STATUS_HAVE_NO_MEMORY(rpclite_send);
2753 rpclite_send->call=&table->calls[opnum];
2754 rpclite_send->struct_ptr=r;
2755 /* need to push conversion function to convert from r to io */
2756 status=ntvfs_map_async_setup(ntvfs, req, io, rpclite_send, proxy_rpclite_map_async_send);
2758 /* Magically despatch the call based on syntax_id, table and opnum.
2759 But there is no table of handlers.... so until then*/
2760 if (0==strcasecmp(table->name,"rpcproxy")) {
2761 switch(opnum) {
2762 case(NDR_PROXY_READ):
2763 status=rpclite_proxy_Read(ntvfs, req, r);
2764 break;
2765 case(NDR_PROXY_WRITE):
2766 status=rpclite_proxy_Write(ntvfs, req, r);
2767 break;
2768 default:
2769 DEBUG(3,("Can't despatch %s:%d\n",table->name, opnum));
2770 return NT_STATUS_PROCEDURE_NOT_FOUND;
2772 } else {
2773 DEBUG(5,("Can't despatch %s:%d %s\n",table->name, opnum,
2774 GUID_string(debug_ctx(),&syntax_id->uuid)));
2775 return NT_STATUS_NO_GUID_TRANSLATION;
2778 /* status is the status of the rpc layer. If it is NT_STATUS_OK then
2779 the handler status is in r->out.result */
2780 return ntvfs_map_async_finish(req, status);
2783 /* unpack the ntioctl to make some rpc_struct */
2784 NTSTATUS ntioctl_rpc_unmap(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2786 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
2787 struct proxy_private *proxy=async->proxy;
2788 struct smbcli_request *c_req = async->c_req;
2789 void* r=io1;
2790 struct ntioctl_rpc_unmap_info *info=talloc_get_type_abort(io2, struct ntioctl_rpc_unmap_info);
2791 union smb_ioctl* io =talloc_get_type_abort(info->io, union smb_ioctl);
2792 const struct ndr_interface_call *calls=info->calls;
2793 enum ndr_err_code ndr_err;
2794 DATA_BLOB *response;
2795 struct ndr_pull* pull;
2797 DEBUG(5,("START %s io2=%p\n",__FUNCTION__,io2));
2798 DEBUG(5,("%s op %s ntioctl: %s\n",
2799 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
2800 NT_STATUS_NOT_OK_RETURN(status);
2802 if (c_req) {
2803 DEBUG(5,("%s io2 MID=%d\n",__FUNCTION__,c_req->mid));
2804 status = smb_raw_ioctl_recv(c_req, io, io);
2805 #define SESSION_INFO proxy->remote_server, proxy->remote_share
2806 /* This status is the ntioctl wrapper status */
2807 if (! NT_STATUS_IS_OK(status)) {
2808 DEBUG(3,("[\\\\%s\\%s] RPC %s failed for %s: %s\n",SESSION_INFO,
2809 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
2810 if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) return status;
2811 return NT_STATUS_UNSUCCESSFUL;
2815 dump_data(10, io->ntioctl.out.blob.data, io->ntioctl.out.blob.length);
2817 response=&io->ntioctl.out.blob;
2818 pull = ndr_pull_init_blob(response, r, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2819 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
2821 NT_STATUS_HAVE_NO_MEMORY(pull);
2823 ndr_err=calls->ndr_pull(pull, NDR_OUT, r);
2824 #warning can we free pull here?
2825 status=ndr_map_error2ntstatus(ndr_err);
2827 DEBUG(5,("END %s op status %s\n",
2828 __FUNCTION__, get_friendly_nt_error_msg(status)));
2829 return status;
2833 send an ntioctl request based on a NDR encoding.
2835 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
2836 struct smbcli_tree *tree,
2837 struct ntvfs_module_context *ntvfs,
2838 const struct ndr_interface_table *table,
2839 uint32_t opnum,
2840 void *r)
2842 struct proxy_private *private = ntvfs->private_data;
2843 struct smbcli_request * c_req;
2844 struct ndr_push *push;
2845 NTSTATUS status;
2846 DATA_BLOB request;
2847 enum ndr_err_code ndr_err;
2848 union smb_ioctl *io=talloc_zero(r, union smb_ioctl);
2851 /* setup for a ndr_push_* call, we can't free push until the message
2852 actually hits the wire */
2853 push = ndr_push_init_ctx(io, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2854 if (!push) return NULL;
2856 /* first push interface table identifiers */
2857 ndr_err=ndr_push_ndr_syntax_id(push, NDR_SCALARS, &table->syntax_id);
2858 status=ndr_map_error2ntstatus(ndr_err);
2860 if (! NT_STATUS_IS_OK(status)) return NULL;
2862 ndr_err=ndr_push_uint32(push, NDR_SCALARS, opnum);
2863 status=ndr_map_error2ntstatus(ndr_err);
2864 if (! NT_STATUS_IS_OK(status)) return NULL;
2866 if (0) {
2867 push->flags |= LIBNDR_FLAG_BIGENDIAN;
2870 /* push the structure into a blob */
2871 ndr_err = table->calls[opnum].ndr_push(push, NDR_IN, r);
2872 status=ndr_map_error2ntstatus(ndr_err);
2873 if (!NT_STATUS_IS_OK(status)) {
2874 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
2875 nt_errstr(status)));
2876 return NULL;
2879 /* retrieve the blob */
2880 request = ndr_push_blob(push);
2882 io->ntioctl.level=RAW_IOCTL_NTIOCTL;
2883 io->ntioctl.in.function=FSCTL_UFOPROXY_RPCLITE;
2884 io->ntioctl.in.file.fnum=private->nttrans_fnum;
2885 io->ntioctl.in.fsctl=false;
2886 io->ntioctl.in.filter=0;
2887 io->ntioctl.in.max_data=PROXY_NTIOCTL_MAXDATA;
2888 io->ntioctl.in.blob=request;
2890 DEBUG(10,("smbcli_request packet:\n"));
2891 dump_data(10, request.data, request.length);
2893 c_req = smb_raw_ioctl_send(tree, io);
2895 if (! c_req) {
2896 return NULL;
2899 dump_data(10, c_req->out.data, c_req->out.data_size);
2901 { void* req=NULL;
2902 struct ntioctl_rpc_unmap_info* info=talloc_zero(r, struct ntioctl_rpc_unmap_info);
2903 info->io=io;
2904 info->table=table;
2905 info->opnum=opnum;
2906 info->calls=&table->calls[opnum];
2907 ADD_ASYNC_RECV_TAIL(c_req, r, info, NULL, ntioctl_rpc_unmap, NULL);
2910 return c_req;
2914 client helpers, mapping between proxy RPC calls and smbcli_* calls.
2918 * If the sync_chain_handler is called directly it unplugs the async handler
2919 which (as well as preventing loops) will also avoid req->send_fn being
2920 called - which is also nice! */
2921 NTSTATUS sync_chain_handler(struct smbcli_request *c_req)
2923 struct async_info *async=NULL;
2924 /* the first callback which will actually receive the c_req response */
2925 struct async_info_map *async_map;
2926 NTSTATUS status=NT_STATUS_OK;
2927 struct async_info_map** chain;
2929 DEBUG(5,("%s\n",__FUNCTION__));
2930 if (! c_req) return NT_STATUS_UNSUCCESSFUL;
2932 /* If there is a handler installed, it is using async_info to chain */
2933 if (c_req->async.fn) {
2934 /* not safe to talloc_free async if send_fn has been called for the request
2935 against which async was allocated, so steal it (and free below) or neither */
2936 async = talloc_get_type_abort(c_req->async.private, struct async_info);
2937 talloc_steal(NULL, async);
2938 chain=&async->chain;
2939 async_map = talloc_get_type_abort(*chain, struct async_info_map);
2940 } else {
2941 chain=(struct async_info_map**)&c_req->async.private;
2942 async_map = talloc_get_type_abort(*chain, struct async_info_map);
2945 /* unplug c_req->async.fn as if a callback handler calls smb_*_recv
2946 in order to receive the response, smbcli_transport_finish_recv will
2947 call us again and then call the c-req->async.fn
2948 Perhaps we should merely call smbcli_request_receive() IF
2949 c_req->request_state <= SMBCLI_REQUEST_RECV, but that might not
2950 help multi-part replies... except all parts are receive before
2951 callback if a handler WAS set */
2952 c_req->async.fn=NULL;
2954 /* Should we raise an error? Should we simple_recv? */
2955 while(async_map) {
2956 /* remove this one from the list before we call. We do this in case
2957 some callbacks free their async_map but also so that callbacks
2958 can navigate the async_map chain to add additional callbacks to
2959 the end - e.g. so that tag-along reads can call send_fn after
2960 the send_fn of the request they tagged along to, thus preserving
2961 the async response order - which may be a waste of time? */
2962 DLIST_REMOVE(*chain, async_map);
2964 DEBUG(5,("Callback for async_map=%p pre-status %s\n",async_map, get_friendly_nt_error_msg(status)));
2965 if (async_map->fn) {
2966 status=async_map->fn(async_map->async,
2967 async_map->parms1, async_map->parms2, status);
2969 DEBUG(5,("Callback complete for async_map=%p status %s\n",async_map, get_friendly_nt_error_msg(status)));
2970 /* Note: the callback may have added to the chain */
2971 #warning Async_maps have a null talloc_context, it is unclear who should own them
2972 /* it can't be c_req as it stops us chaining more than one, maybe it
2973 should be req but there isn't always a req. However sync_chain_handler
2974 will always free it if called */
2975 DEBUG(6,("Will free async map %p\n",async_map));
2976 #warning put me back
2977 talloc_free(async_map);
2978 DEBUG(6,("Free'd async_map\n"));
2979 if (*chain)
2980 async_map=talloc_get_type_abort(*chain, struct async_info_map);
2981 else
2982 async_map=NULL;
2983 DEBUG(6,("Switch to async_map %p\n",async_map));
2985 /* The first callback will have read c_req, thus talloc_free'ing it,
2986 so we don't let the other callbacks get hurt playing with it */
2987 if (async_map && async_map->async)
2988 async_map->async->c_req=NULL;
2991 talloc_free(async);
2993 DEBUG(5,("%s complete: %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
2994 return status;
2997 /* If the async handler is called, then the send_fn is called */
2998 static void async_chain_handler(struct smbcli_request *c_req)
3000 struct async_info *async = talloc_get_type_abort(c_req->async.private, struct async_info);
3001 struct ntvfs_request *req = async->req;
3002 NTSTATUS status;
3004 if (c_req->state <= SMBCLI_REQUEST_RECV) {
3005 /* Looks like async handlers has been called sync'ly */
3006 smb_panic("async_chain_handler called asyncly on req %p\n");
3009 status=sync_chain_handler(c_req);
3011 /* Should we insist that a chain'd handler does this?
3012 Which makes it hard to intercept the data by adding handlers
3013 before the send_fn handler sends it... */
3014 if (req) {
3015 req->async_states->status=status;
3016 req->async_states->send_fn(req);
3020 /* unpack the rpc struct to make some smb_write */
3021 NTSTATUS async_proxy_smb_raw_write_rpc(struct async_info *async,
3022 void* io1, void* io2, NTSTATUS status)
3024 union smb_write* io =talloc_get_type(io1, union smb_write);
3025 struct proxy_Write* r=talloc_get_type(io2, struct proxy_Write);
3027 DEBUG(5,("START: %s convert from rpc to smb with pre-status %s\n",__FUNCTION__,
3028 get_friendly_nt_error_msg (status)));
3029 DEBUG(3,("Write response for offset=%lld\n",io->generic.in.offset));
3030 NT_STATUS_NOT_OK_RETURN(status);
3032 status=r->out.result;
3033 DEBUG(5,("%s wrapped status: %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
3034 NT_STATUS_NOT_OK_RETURN(status);
3036 io->generic.out.remaining = r->out.remaining;
3037 io->generic.out.nwritten = r->out.nwritten;
3039 DEBUG(5,("END: %s status %s\n",__FUNCTION__,
3040 get_friendly_nt_error_msg (status)));
3041 return status;
3044 /* upgrade from smb to NDR and then send.
3045 The caller should ADD_ASYNC_RECV_TAIL the handler that tries to receive the response*/
3046 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
3047 union smb_write *io,
3048 struct proxy_file *f)
3050 struct proxy_private *private = ntvfs->private_data;
3051 struct smbcli_tree *tree=private->tree;
3053 if (PROXY_REMOTE_SERVER(private)) {
3054 struct smbcli_request *c_req;
3055 struct proxy_Write *r=talloc_zero(io, struct proxy_Write);
3056 ssize_t size;
3058 if (! r) return NULL;
3060 size=io->generic.in.count;
3061 /* upgrade the write */
3062 r->in.fnum = io->generic.in.file.fnum;
3063 r->in.offset = io->generic.in.offset;
3064 r->in.count = io->generic.in.count;
3065 r->in.mode = io->generic.in.wmode;
3066 // r->in.remaining = io->generic.in.remaining;
3067 #warning remove this
3068 /* prepare to lie */
3069 r->out.nwritten=r->in.count;
3070 r->out.remaining=0;
3072 /* try to compress */
3073 #warning compress!
3074 r->in.data.compress.data=compress_block_talloc(r, io->generic.in.data, &size);
3075 if (r->in.data.compress.data) {
3076 r->in.data.compress.count=size;
3077 r->in.flags = PROXY_USE_ZLIB;
3078 } else {
3079 r->in.flags = 0;
3080 /* we'll honour const, honest gov */
3081 r->in.data.generic.data=discard_const(io->generic.in.data);
3082 r->in.data.generic.count=io->generic.in.count;
3085 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
3086 ntvfs,
3087 &ndr_table_rpcproxy,
3088 NDR_PROXY_WRITE, r);
3089 if (! c_req) return NULL;
3091 /* yeah, filthy abuse of f */
3092 { void* req=NULL;
3093 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_write_rpc, NULL);
3096 return c_req;
3097 } else {
3098 return smb_raw_write_send(tree, io);
3102 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
3103 union smb_write *io,
3104 struct proxy_file *f)
3106 struct proxy_private *proxy = ntvfs->private_data;
3107 struct smbcli_tree *tree=proxy->tree;
3109 if (PROXY_REMOTE_SERVER(proxy)) {
3110 struct smbcli_request *c_req = proxy_smb_raw_write_send(ntvfs, io, f);
3111 return sync_chain_handler(c_req);
3112 } else {
3113 struct smbcli_request *c_req = smb_raw_write_send(tree, io);
3114 return smb_raw_write_recv(c_req, io);
3118 /* unpack the rpc struct to make some smb_read response */
3119 NTSTATUS async_proxy_smb_raw_read_rpc(struct async_info *async,
3120 void* io1, void* io2, NTSTATUS status)
3122 union smb_read* io =talloc_get_type_abort(io1, union smb_read);
3123 struct proxy_Read* r=talloc_get_type_abort(io2, struct proxy_Read);
3125 DEBUG(5,("\n>>\n%s() rpc status: %s\n",__FUNCTION__,
3126 get_friendly_nt_error_msg(status)));
3127 NT_STATUS_NOT_OK_RETURN(status);
3129 status=r->out.result;
3130 DEBUG(5,("%s() wrapped status: %s\n",__FUNCTION__,
3131 get_friendly_nt_error_msg(status)));
3132 NT_STATUS_NOT_OK_RETURN(status);
3134 io->generic.out.remaining = 0; /*r->out.response.generic.remaining;*/
3135 io->generic.out.compaction_mode = 0;
3137 if (r->out.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
3138 /* Use the io we already setup!
3139 if out.flags & PROXY_VALIDATE, we may need to validate more in
3140 cache then r->out.nread would suggest, see io->generic.out.nread */
3141 if (r->out.flags & PROXY_VALIDATE)
3142 io->generic.out.nread=io->generic.in.maxcnt;
3143 DEBUG(5,("Using cached data: size=%lld\n",
3144 (long long) io->generic.out.nread));
3145 return status;
3148 if (r->in.flags & PROXY_VALIDATE) {
3149 DEBUG(5,("Cached data did not validate, flags: %x\n",r->out.flags));
3150 /* turn off validate on this file */
3151 //cache_handle_novalidate(f);
3152 #warning turn off validate on this file - do an nread<maxcnt later
3155 if (r->in.flags & PROXY_USE_CACHE) {
3156 DEBUG(5,("Cached data did not match\n"));
3159 io->generic.out.nread = r->out.nread;
3161 /* we may need to uncompress */
3162 if (r->out.flags & PROXY_USE_ZLIB) {
3163 ssize_t size=r->out.response.compress.count;
3164 if (! uncompress_block_to(io->generic.out.data,
3165 r->out.response.compress.data, &size,
3166 io->generic.in.maxcnt) ||
3167 size != r->out.nread) {
3168 io->generic.out.nread=size;
3169 status=NT_STATUS_INVALID_USER_BUFFER;
3171 } else if (io->generic.out.data != r->out.response.generic.data) {
3172 //Assert(r->out.nread == r->out.generic.out.count);
3173 memcpy(io->generic.out.data, r->out.response.generic.data, io->generic.out.nread);
3176 return status;
3179 /* Warning: Assumes that if io->generic.out.nread is not zero, then some
3180 data has been pre-read into io->generic.out.data and can be used for
3181 proxy<->proxy optimized reads */
3182 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
3183 union smb_read *io,
3184 struct proxy_file *f,
3185 struct proxy_Read *r)
3187 struct proxy_private *private = ntvfs->private_data;
3188 #warning we are using out.nread as a out-of-band parameter
3189 if (PROXY_REMOTE_SERVER(private)) {
3191 struct smbcli_request *c_req;
3192 if (! r) {
3193 r=talloc_zero(io, struct proxy_Read);
3196 if (! r) return NULL;
3198 r->in.fnum = io->generic.in.file.fnum;
3199 r->in.read_for_execute=io->generic.in.read_for_execute;
3200 r->in.offset = io->generic.in.offset;
3201 r->in.mincnt = io->generic.in.mincnt;
3202 r->in.maxcnt = io->generic.in.maxcnt;
3203 r->in.remaining = io->generic.in.remaining;
3204 r->in.flags |= PROXY_USE_ZLIB;
3205 if (! (r->in.flags & PROXY_VALIDATE) &&
3206 io->generic.out.data && io->generic.out.nread > 0) {
3207 /* maybe we should limit digest size to MIN(nread, maxcnt) to
3208 permit the caller to provider a larger nread as part of
3209 a split read */
3210 checksum_block(r->in.digest.digest, io->generic.out.data,
3211 io->generic.out.nread);
3213 if (io->generic.out.nread > r->in.maxcnt) {
3214 DEBUG(0,("Cache from nread is too big for requested read struct, ignoring cache\n"));
3215 } else {
3216 r->in.mincnt = io->generic.out.nread;
3217 r->in.maxcnt = io->generic.out.nread;
3218 r->in.flags |= PROXY_USE_CACHE;
3219 /* PROXY_VALIDATE will have been set by caller */
3223 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
3224 DEBUG(3,("Cache digest length=%lld\n", (long long)r->in.maxcnt));
3225 dump_data (3, r->in.digest.digest, sizeof(r->in.digest.digest));
3228 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
3229 ntvfs,
3230 &ndr_table_rpcproxy,
3231 NDR_PROXY_READ, r);
3232 if (! c_req) return NULL;
3234 { void* req=NULL;
3235 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_read_rpc, NULL);
3238 return c_req;
3239 } else {
3240 return smb_raw_read_send(private->tree, io);
3244 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
3245 union smb_read *io,
3246 struct proxy_file *f)
3248 struct proxy_private *proxy = ntvfs->private_data;
3249 struct smbcli_tree *tree=proxy->tree;
3251 if (PROXY_REMOTE_SERVER(proxy)) {
3252 struct smbcli_request *c_req = proxy_smb_raw_read_send(ntvfs, io, f, NULL);
3253 return sync_chain_handler(c_req);
3254 } else {
3255 struct smbcli_request *c_req = smb_raw_read_send(tree, io);
3256 return smb_raw_read_recv(c_req, io);
3262 initialise the PROXY->PROXY backend, registering ourselves with the ntvfs subsystem
3264 NTSTATUS ntvfs_proxy_init(void)
3266 NTSTATUS ret;
3267 struct ntvfs_ops ops;
3268 NTVFS_CURRENT_CRITICAL_SIZES(vers);
3270 ZERO_STRUCT(ops);
3272 /* fill in the name and type */
3273 ops.name = "proxy";
3274 ops.type = NTVFS_DISK;
3276 /* fill in all the operations */
3277 ops.connect = proxy_connect;
3278 ops.disconnect = proxy_disconnect;
3279 ops.unlink = proxy_unlink;
3280 ops.chkpath = proxy_chkpath;
3281 ops.qpathinfo = proxy_qpathinfo;
3282 ops.setpathinfo = proxy_setpathinfo;
3283 ops.open = proxy_open;
3284 ops.mkdir = proxy_mkdir;
3285 ops.rmdir = proxy_rmdir;
3286 ops.rename = proxy_rename;
3287 ops.copy = proxy_copy;
3288 ops.ioctl = proxy_ioctl;
3289 ops.read = proxy_read;
3290 ops.write = proxy_write;
3291 ops.seek = proxy_seek;
3292 ops.flush = proxy_flush;
3293 ops.close = proxy_close;
3294 ops.exit = proxy_exit;
3295 ops.lock = proxy_lock;
3296 ops.setfileinfo = proxy_setfileinfo;
3297 ops.qfileinfo = proxy_qfileinfo;
3298 ops.fsinfo = proxy_fsinfo;
3299 ops.lpq = proxy_lpq;
3300 ops.search_first = proxy_search_first;
3301 ops.search_next = proxy_search_next;
3302 ops.search_close = proxy_search_close;
3303 ops.trans = proxy_trans;
3304 ops.logoff = proxy_logoff;
3305 ops.async_setup = proxy_async_setup;
3306 ops.cancel = proxy_cancel;
3307 ops.notify = proxy_notify;
3308 ops.trans2 = proxy_trans2;
3310 /* register ourselves with the NTVFS subsystem. We register
3311 under the name 'proxy'. */
3312 ret = ntvfs_register(&ops, &vers);
3314 if (!NT_STATUS_IS_OK(ret)) {
3315 DEBUG(0,("Failed to register PROXY backend!\n"));
3318 return ret;