Basic validate-on-read
[Samba/vfs_proxy.git] / source4 / ntvfs / proxy / vfs_proxy.c
blob5f9399cc4f2f1d4610be8b7b2270354fdd9b0c18
1 /*
2 Unix SMB/PROXY implementation.
4 CIFS PROXY NTVFS filesystem backend
6 Copyright (C) Andrew Tridgell 2003
7 Copyright (C) James J Myers 2003 <myersjj@samba.org>
8 Copyright (C) Sam Liddicott <sam@liddicott.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>.
24 this implements a CIFS->CIFS NTVFS filesystem caching proxy.
28 #define TALLOC_ABORT(why) smb_panic(why)
29 #warning handle SMB_FLAGS2_COMPRESSED flag from client: http://msdn2.microsoft.com/en-us/library/cc246254.aspx
31 #define PROXY_NTIOCTL_MAXDATA 0x20000
33 #include "includes.h"
34 #include "libcli/raw/libcliraw.h"
35 #include "libcli/smb_composite/smb_composite.h"
36 #include "auth/auth.h"
37 #include "auth/credentials/credentials.h"
38 #include "ntvfs/ntvfs.h"
39 #include "../lib/util/dlinklist.h"
40 #include "param/param.h"
41 #include "libcli/resolve/resolve.h"
42 #include "libcli/libcli.h"
43 #include "libcli/raw/ioctl.h"
44 #include "librpc/gen_ndr/ndr_misc.h"
45 #include "librpc/gen_ndr/ndr_proxy.h"
46 #include "librpc/ndr/ndr_table.h"
47 #include "lib/cache/cache.h"
48 #include "lib/compression/zlib.h"
49 #include "libcli/raw/raw_proto.h"
51 struct proxy_file {
52 struct proxy_file *prev, *next;
53 uint16_t fnum;
54 struct ntvfs_handle *h;
55 struct cache_file_entry *cache;
56 int readahead_pending;
59 /* this is stored in ntvfs_private */
60 struct proxy_private {
61 struct smbcli_tree *tree;
62 struct smbcli_transport *transport;
63 struct ntvfs_module_context *ntvfs;
64 struct async_info *pending;
65 struct proxy_file *files;
66 bool map_generic;
67 bool map_trans2;
68 bool cache_enabled;
69 int cache_readahead; /* default read-ahead window size */
70 int cache_readaheadblock; /* size of each read-ahead request */
71 ssize_t cache_validatesize; /* chunk size to validate, results in a read this size on remote server */
72 char *remote_server;
73 char *remote_share;
74 struct cache_context *cache;
75 int readahead_spare; /* amount of pending non-user generated requests */
76 bool fake_oplock; /* useful for testing, smbclient never asks for oplock */
79 struct async_info_map;
81 /* a structure used to pass information to an async handler */
82 struct async_info {
83 struct async_info *next, *prev;
84 struct proxy_private *proxy;
85 struct ntvfs_request *req;
86 struct smbcli_request *c_req;
87 struct proxy_file *f;
88 struct async_info_map *chain;
89 void *parms;
92 /* used to chain async callbacks */
93 struct async_info_map {
94 struct async_info_map *next, *prev;
95 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
96 void *parms1;
97 void *parms2;
98 struct async_info *async;
101 struct ntioctl_rpc_unmap_info {
102 void* io;
103 const struct ndr_interface_call *calls;
104 const struct ndr_interface_table *table;
105 uint32_t opnum;
108 /* a structure used to pass information to an async handler */
109 struct async_rpclite_send {
110 const struct ndr_interface_call* call;
111 void* struct_ptr;
114 #define SETUP_PID private->tree->session->pid = req->smbpid
116 #define SETUP_FILE_HERE(f) do { \
117 f = ntvfs_handle_get_backend_data(io->generic.in.file.ntvfs, ntvfs); \
118 if (!f) return NT_STATUS_INVALID_HANDLE; \
119 io->generic.in.file.fnum = f->fnum; \
120 } while (0)
122 #define SETUP_FILE do { \
123 struct proxy_file *f; \
124 SETUP_FILE_HERE(f); \
125 } while (0)
127 #define SETUP_PID_AND_FILE do { \
128 SETUP_PID; \
129 SETUP_FILE; \
130 } while (0)
132 /* remove the MAY_ASYNC from a request, useful for testing */
133 #define MAKE_SYNC_REQ(req) do { req->async_states->state &= ~NTVFS_ASYNC_STATE_MAY_ASYNC; } while(0)
135 #define PROXY_SERVER "proxy:server"
136 #define PROXY_USER "proxy:user"
137 #define PROXY_PASSWORD "proxy:password"
138 #define PROXY_DOMAIN "proxy:domain"
139 #define PROXY_SHARE "proxy:share"
140 #define PROXY_USE_MACHINE_ACCT "proxy:use-machine-account"
141 #define PROXY_MAP_GENERIC "proxy:map-generic"
142 #define PROXY_MAP_TRANS2 "proxy:map-trans2"
144 #define PROXY_CACHE_ENABLED "proxy:cache-enabled"
145 #define PROXY_CACHE_ENABLED_DEFAULT false
147 #define PROXY_CACHE_READAHEAD "proxy:cache-readahead"
148 #define PROXY_CACHE_READAHEAD_DEFAULT 32768
149 /* size of each read-ahead request. */
150 #define PROXY_CACHE_READAHEAD_BLOCK "proxy:cache-readaheadblock"
151 /* the read-ahead block should always be less than max negotiated data */
152 #define PROXY_CACHE_READAHEAD_BLOCK_DEFAULT 4096
154 #define PROXY_CACHE_VALIDATE_SIZE "proxy:validate-size"
155 #define PROXY_CACHE_VALIDATE_SIZE_DEFAULT 256 /* 10M */
157 #define PROXY_FAKE_OPLOCK "proxy:fake-oplock"
158 #define PROXY_FAKE_OPLOCK_DEFAULT false
160 /* how many read-ahead requests can be pending per mid */
161 #define PROXY_REQUEST_LIMIT "proxy:request-limit"
162 #define PROXY_REQUEST_LIMIT_DEFAULT 100
164 #define PROXY_USE_MACHINE_ACCT_DEFAULT false
165 /* These two really should be: true, and possibly not even configurable */
166 #define PROXY_MAP_GENERIC_DEFAULT true
167 #define PROXY_MAP_TRANS2_DEFAULT true
169 /* is the remote server a proxy? */
170 #define PROXY_REMOTE_SERVER(private) \
171 ((private)->tree->session->transport->negotiate.capabilities & CAP_COMPRESSION \
172 && (strcmp("A:",private->tree->device)==0))
174 /* A few forward declarations */
175 static NTSTATUS sync_chain_handler(struct smbcli_request *c_req);
176 static void async_chain_handler(struct smbcli_request *c_req);
177 static void async_read_handler(struct smbcli_request *c_req);
178 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
179 struct ntvfs_request *req, union smb_ioctl *io);
181 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
182 struct smbcli_tree *tree,
183 struct ntvfs_module_context *ntvfs,
184 uint16_t fnum, const struct ndr_interface_table *table,
185 uint32_t opnum, void *r);
186 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
187 union smb_read *io, struct proxy_file *f, struct proxy_Read *r);
188 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
189 union smb_read *io, struct proxy_file *f);
190 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
191 union smb_write *io, struct proxy_file *f);
192 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
193 union smb_write *io, struct proxy_file *f);
194 static NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status);
197 a handler for oplock break events from the server - these need to be passed
198 along to the client
200 static bool oplock_handler(struct smbcli_transport *transport, uint16_t tid, uint16_t fnum, uint8_t level, void *p_private)
202 struct proxy_private *private = p_private;
203 NTSTATUS status;
204 struct ntvfs_handle *h = NULL;
205 struct proxy_file *f;
207 for (f=private->files; f; f=f->next) {
208 if (f->fnum != fnum) continue;
209 h = f->h;
210 break;
213 if (!h) {
214 DEBUG(5,("vfs_proxy: ignoring oplock break level %d for fnum %d\n", level, fnum));
215 return true;
218 /* If we don't have an oplock, then we can't rely on the cache */
219 cache_handle_stale(f);
221 DEBUG(5,("vfs_proxy: sending oplock break level %d for fnum %d\n", level, fnum));
222 status = ntvfs_send_oplock_break(private->ntvfs, h, level);
223 if (!NT_STATUS_IS_OK(status)) return false;
224 return true;
228 connect to a share - used when a tree_connect operation comes in.
230 static NTSTATUS proxy_connect(struct ntvfs_module_context *ntvfs,
231 struct ntvfs_request *req, const char *sharename)
233 NTSTATUS status;
234 struct proxy_private *private;
235 const char *host, *user, *pass, *domain, *remote_share;
236 struct smb_composite_connect io;
237 struct composite_context *creq;
238 struct share_config *scfg = ntvfs->ctx->config;
240 struct cli_credentials *credentials;
241 bool machine_account;
243 /* Here we need to determine which server to connect to.
244 * For now we use parametric options, type proxy.
245 * Later we will use security=server and auth_server.c.
247 host = share_string_option(scfg, PROXY_SERVER, NULL);
248 user = share_string_option(scfg, PROXY_USER, NULL);
249 pass = share_string_option(scfg, PROXY_PASSWORD, NULL);
250 domain = share_string_option(scfg, PROXY_DOMAIN, NULL);
251 remote_share = share_string_option(scfg, PROXY_SHARE, NULL);
252 if (!remote_share) {
253 remote_share = sharename;
256 machine_account = share_bool_option(scfg, PROXY_USE_MACHINE_ACCT, PROXY_USE_MACHINE_ACCT_DEFAULT);
258 private = talloc_zero(ntvfs, struct proxy_private);
259 if (!private) {
260 return NT_STATUS_NO_MEMORY;
263 ntvfs->private_data = private;
265 if (!host) {
266 DEBUG(1,("PROXY backend: You must supply server\n"));
267 return NT_STATUS_INVALID_PARAMETER;
270 if (user && pass) {
271 DEBUG(5, ("PROXY backend: Using specified password\n"));
272 credentials = cli_credentials_init(private);
273 if (!credentials) {
274 return NT_STATUS_NO_MEMORY;
276 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
277 cli_credentials_set_username(credentials, user, CRED_SPECIFIED);
278 if (domain) {
279 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
281 cli_credentials_set_password(credentials, pass, CRED_SPECIFIED);
282 } else if (machine_account) {
283 DEBUG(5, ("PROXY backend: Using machine account\n"));
284 credentials = cli_credentials_init(private);
285 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
286 if (domain) {
287 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
289 status = cli_credentials_set_machine_account(credentials, ntvfs->ctx->lp_ctx);
290 if (!NT_STATUS_IS_OK(status)) {
291 return status;
293 } else if (req->session_info->credentials) {
294 DEBUG(5, ("PROXY backend: Using delegated credentials\n"));
295 credentials = req->session_info->credentials;
296 } else {
297 DEBUG(1,("PROXY backend: NO delegated credentials found: You must supply server, user and password or the client must supply delegated credentials\n"));
298 return NT_STATUS_INVALID_PARAMETER;
301 /* connect to the server, using the smbd event context */
302 io.in.dest_host = host;
303 io.in.dest_ports = lp_smb_ports(ntvfs->ctx->lp_ctx);
304 io.in.socket_options = lp_socket_options(ntvfs->ctx->lp_ctx);
305 io.in.called_name = host;
306 io.in.credentials = credentials;
307 io.in.fallback_to_anonymous = false;
308 io.in.workgroup = lp_workgroup(ntvfs->ctx->lp_ctx);
309 io.in.service = remote_share;
310 io.in.service_type = "?????";
311 io.in.iconv_convenience = lp_iconv_convenience(ntvfs->ctx->lp_ctx);
312 io.in.gensec_settings = lp_gensec_settings(private, ntvfs->ctx->lp_ctx);
313 lp_smbcli_options(ntvfs->ctx->lp_ctx, &io.in.options);
314 lp_smbcli_session_options(ntvfs->ctx->lp_ctx, &io.in.session_options);
316 creq = smb_composite_connect_send(&io, private,
317 lp_resolve_context(ntvfs->ctx->lp_ctx),
318 ntvfs->ctx->event_ctx);
319 status = smb_composite_connect_recv(creq, private);
320 NT_STATUS_NOT_OK_RETURN(status);
322 private->tree = io.out.tree;
324 private->transport = private->tree->session->transport;
325 SETUP_PID;
326 private->ntvfs = ntvfs;
328 ntvfs->ctx->fs_type = talloc_strdup(ntvfs->ctx, io.out.tree->fs_type);
329 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->fs_type);
330 ntvfs->ctx->dev_type = talloc_strdup(ntvfs->ctx, io.out.tree->device);
331 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->dev_type);
333 /* we need to receive oplock break requests from the server */
334 smbcli_oplock_handler(private->transport, oplock_handler, private);
336 private->map_generic = share_bool_option(scfg, PROXY_MAP_GENERIC, PROXY_MAP_GENERIC_DEFAULT);
338 private->map_trans2 = share_bool_option(scfg, PROXY_MAP_TRANS2, PROXY_MAP_TRANS2_DEFAULT);
340 private->cache_validatesize = 1024 * (long long) share_int_option(scfg, PROXY_CACHE_VALIDATE_SIZE, PROXY_CACHE_VALIDATE_SIZE_DEFAULT);
342 if (strcmp("A:",private->tree->device)==0) {
343 private->cache_enabled = share_bool_option(scfg, PROXY_CACHE_ENABLED, PROXY_CACHE_ENABLED_DEFAULT);
344 private->cache_readahead = share_int_option(scfg, PROXY_CACHE_READAHEAD, PROXY_CACHE_READAHEAD_DEFAULT);
345 private->cache_readaheadblock = share_int_option(scfg, PROXY_CACHE_READAHEAD_BLOCK,
346 MIN(private->cache_readahead,PROXY_CACHE_READAHEAD_BLOCK_DEFAULT));
347 private->fake_oplock = share_bool_option(scfg, PROXY_FAKE_OPLOCK, PROXY_FAKE_OPLOCK_DEFAULT);
348 private->readahead_spare = share_int_option(scfg, PROXY_REQUEST_LIMIT, PROXY_REQUEST_LIMIT_DEFAULT);
349 private->cache = new_cache_context(private, lp_proxy_cache_root(ntvfs->ctx->lp_ctx), host, remote_share);
350 DEBUG(0,("proxy tree connect caching for: %s (%s : %s) %s read-ahead: %d\n",
351 remote_share, private->tree->device,private->tree->fs_type,
352 (private->cache_enabled)?"enabled":"disabled",
353 private->cache_readahead));
354 } else {
355 private->cache_enabled = false;
356 DEBUG(0,("No caching or read-ahead for: %s (%s : %s)\n",
357 remote_share, private->tree->device,private->tree->fs_type));
360 private->remote_server = strlower_talloc(private, host);
361 private->remote_share = strlower_talloc(private, remote_share);
363 return NT_STATUS_OK;
367 disconnect from a share
369 static NTSTATUS proxy_disconnect(struct ntvfs_module_context *ntvfs)
371 struct proxy_private *private = ntvfs->private_data;
372 struct async_info *a, *an;
374 /* first cleanup pending requests */
375 for (a=private->pending; a; a = an) {
376 an = a->next;
377 smbcli_request_destroy(a->c_req);
378 talloc_free(a);
381 talloc_free(private);
382 ntvfs->private_data = NULL;
384 return NT_STATUS_OK;
388 destroy an async info structure
390 static int async_info_destructor(struct async_info *async)
392 DLIST_REMOVE(async->proxy->pending, async);
393 return 0;
397 a handler for simple async replies
398 this handler can only be used for functions that don't return any
399 parameters (those that just return a status code)
401 static void async_simple(struct smbcli_request *c_req)
403 struct async_info *async = c_req->async.private;
404 struct ntvfs_request *req = async->req;
405 req->async_states->status = smbcli_request_simple_recv(c_req);
406 talloc_free(async);
407 req->async_states->send_fn(req);
410 /* hopefully this will optimize away */
411 #define TYPE_CHECK(type,check) do { \
412 type=check; \
413 t=t; \
414 } while (0)
416 /* save some typing for the simple functions */
417 #define ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, file, achain, error) do { \
418 if (!c_req) return (error); \
419 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
421 struct async_info *async; \
422 async = talloc(req, struct async_info); \
423 if (!async) return (error); \
424 async->parms = io; \
425 async->req = req; \
426 async->f = file; \
427 async->proxy = private; \
428 async->c_req = c_req; \
429 async->chain = achain; \
430 DLIST_ADD(private->pending, async); \
431 c_req->async.private = async; \
432 talloc_set_destructor(async, async_info_destructor); \
434 c_req->async.fn = async_fn; \
435 } while (0)
437 #define ASYNC_RECV_TAIL_F(io, async_fn, file) do { \
438 if (!c_req) return NT_STATUS_UNSUCCESSFUL; \
439 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
441 struct async_info *async; \
442 async = talloc(req, struct async_info); \
443 if (!async) return NT_STATUS_NO_MEMORY; \
444 async->parms = io; \
445 async->req = req; \
446 async->f = file; \
447 async->proxy = private; \
448 async->c_req = c_req; \
449 DLIST_ADD(private->pending, async); \
450 c_req->async.private = async; \
451 talloc_set_destructor(async, async_info_destructor); \
453 c_req->async.fn = async_fn; \
454 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
455 return NT_STATUS_OK; \
456 } while (0)
458 #define ASYNC_RECV_TAIL(io, async_fn) ASYNC_RECV_TAIL_F(io, async_fn, NULL)
460 #define SIMPLE_ASYNC_TAIL ASYNC_RECV_TAIL(NULL, async_simple)
462 /* managers for chained async-callback.
463 The model of async handlers has changed.
464 backend async functions should be of the form:
465 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
466 And if async->c_req is NULL then an earlier chain has already rec'd the
467 request.
468 ADD_ASYNC_RECV_TAIL is used to add chained handlers.
469 The chained handler manager async_chain_handler is installed the usual way
470 and uses the io pointer to point to the first async_map record
471 static void async_chain_handler(struct smbcli_request *c_req).
472 It is safe to call ADD_ASYNC_RECV_TAIL before the chain manager is installed
473 and often desirable.
475 /* async_chain_handler has an async_info struct so that it can be safely inserted
476 into pending, but the io struct will point to (struct async_info_map *)
477 chained async_info_map will be in c_req->async.private */
478 #define ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_fn) do { \
479 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
480 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
481 } while(0)
483 #define ASYNC_RECV_TAIL_HANDLER(io, async_fn) do { \
484 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
485 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
486 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
487 return NT_STATUS_OK; \
488 } while(0)
491 DEBUG(0,("ADD_ASYNC_RECV_TAIL %s %s:%d\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%s\n", __FUNCTION__,__FILE__,__LINE__, \
492 creq, creq?talloc_get_name(creq):NULL, creq?talloc_get_name(creq):NULL,\
493 io1, io1?talloc_get_name(io1):NULL, io1?talloc_get_name(io1):NULL, \
494 io2, io2?talloc_get_name(io2):NULL, io2?talloc_get_name(io2):NULL, \
495 file, file?"file":"null", file?"file":"null", #async_fn)); \
497 #define ADD_ASYNC_RECV_TAIL(creq, io1, io2, file, async_fn, error) do { \
498 if (! creq) return (error); \
500 struct async_info_map *async_map=talloc(NULL, struct async_info_map); \
501 if (! async_map) return (error); \
502 async_map->async=talloc(async_map, struct async_info); \
503 if (! async_map->async) return (error); \
504 async_map->parms1=io1; \
505 async_map->parms2=io2; \
506 async_map->fn=async_fn; \
507 async_map->async->parms = io1; \
508 async_map->async->req = req; \
509 async_map->async->f = file; \
510 async_map->async->proxy = private; \
511 async_map->async->c_req = creq; \
512 /* If async_chain_handler is installed, get the list from param */ \
513 if (creq->async.fn == async_chain_handler || creq->async.fn == async_read_handler) { \
514 struct async_info *i=creq->async.private; \
515 DLIST_ADD_END(i->chain, async_map, struct async_info_map *); \
516 } else if (creq->async.fn) { \
517 /* incompatible handler installed */ \
518 return (error); \
519 } else { \
520 DLIST_ADD_END(creq->async.private, async_map, struct async_info_map *); \
523 } while(0)
525 /* try and unify cache open function interface with this macro */
526 #define cache_open(cache_context, f, io, oplock, readahead_window) \
527 (io->generic.level == RAW_OPEN_NTCREATEX && \
528 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID)\
529 ?(cache_fileid_open(cache_context, f, (const uint64_t*)(io->generic.in.fname), oplock, readahead_window))\
530 :(cache_filename_open(cache_context, f, SMB_OPEN_IN_FILE(io), oplock, readahead_window))
533 delete a file - the dirtype specifies the file types to include in the search.
534 The name can contain PROXY wildcards, but rarely does (except with OS/2 clients)
536 static NTSTATUS proxy_unlink(struct ntvfs_module_context *ntvfs,
537 struct ntvfs_request *req, union smb_unlink *unl)
539 struct proxy_private *private = ntvfs->private_data;
540 struct smbcli_request *c_req;
542 SETUP_PID;
544 /* see if the front end will allow us to perform this
545 function asynchronously. */
546 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
547 return smb_raw_unlink(private->tree, unl);
550 c_req = smb_raw_unlink_send(private->tree, unl);
552 SIMPLE_ASYNC_TAIL;
556 a handler for async ioctl replies
558 static void async_ioctl(struct smbcli_request *c_req)
560 struct async_info *async = c_req->async.private;
561 struct ntvfs_request *req = async->req;
562 req->async_states->status = smb_raw_ioctl_recv(c_req, req, async->parms);
563 talloc_free(async);
564 req->async_states->send_fn(req);
568 ioctl interface
570 static NTSTATUS proxy_ioctl(struct ntvfs_module_context *ntvfs,
571 struct ntvfs_request *req, union smb_ioctl *io)
573 struct proxy_private *private = ntvfs->private_data;
574 struct smbcli_request *c_req;
576 if (io->ntioctl.level == RAW_IOCTL_NTIOCTL
577 && io->ntioctl.in.function == FSCTL_UFOPROXY_RPCLITE) {
578 return proxy_rpclite(ntvfs, req, io);
581 SETUP_PID_AND_FILE;
583 /* see if the front end will allow us to perform this
584 function asynchronously. */
585 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
586 return smb_raw_ioctl(private->tree, req, io);
589 c_req = smb_raw_ioctl_send(private->tree, io);
591 ASYNC_RECV_TAIL(io, async_ioctl);
595 check if a directory exists
597 static NTSTATUS proxy_chkpath(struct ntvfs_module_context *ntvfs,
598 struct ntvfs_request *req, union smb_chkpath *cp)
600 struct proxy_private *private = ntvfs->private_data;
601 struct smbcli_request *c_req;
603 SETUP_PID;
605 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
606 return smb_raw_chkpath(private->tree, cp);
609 c_req = smb_raw_chkpath_send(private->tree, cp);
611 SIMPLE_ASYNC_TAIL;
615 a handler for async qpathinfo replies
617 static void async_qpathinfo(struct smbcli_request *c_req)
619 struct async_info *async = c_req->async.private;
620 struct ntvfs_request *req = async->req;
621 req->async_states->status = smb_raw_pathinfo_recv(c_req, req, async->parms);
622 talloc_free(async);
623 req->async_states->send_fn(req);
627 return info on a pathname
629 static NTSTATUS proxy_qpathinfo(struct ntvfs_module_context *ntvfs,
630 struct ntvfs_request *req, union smb_fileinfo *info)
632 struct proxy_private *private = ntvfs->private_data;
633 struct smbcli_request *c_req;
635 SETUP_PID;
637 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
638 return smb_raw_pathinfo(private->tree, req, info);
641 c_req = smb_raw_pathinfo_send(private->tree, info);
643 ASYNC_RECV_TAIL(info, async_qpathinfo);
647 a handler for async qfileinfo replies
649 static void async_qfileinfo(struct smbcli_request *c_req)
651 struct async_info *async = c_req->async.private;
652 struct ntvfs_request *req = async->req;
653 req->async_states->status = smb_raw_fileinfo_recv(c_req, req, async->parms);
654 talloc_free(async);
655 req->async_states->send_fn(req);
659 query info on a open file
661 static NTSTATUS proxy_qfileinfo(struct ntvfs_module_context *ntvfs,
662 struct ntvfs_request *req, union smb_fileinfo *io)
664 struct proxy_private *private = ntvfs->private_data;
665 struct smbcli_request *c_req;
667 SETUP_PID_AND_FILE;
669 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
670 return smb_raw_fileinfo(private->tree, req, io);
673 c_req = smb_raw_fileinfo_send(private->tree, io);
675 ASYNC_RECV_TAIL(io, async_qfileinfo);
679 set info on a pathname
681 static NTSTATUS proxy_setpathinfo(struct ntvfs_module_context *ntvfs,
682 struct ntvfs_request *req, union smb_setfileinfo *st)
684 struct proxy_private *private = ntvfs->private_data;
685 struct smbcli_request *c_req;
687 SETUP_PID;
689 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
690 return smb_raw_setpathinfo(private->tree, st);
693 c_req = smb_raw_setpathinfo_send(private->tree, st);
695 SIMPLE_ASYNC_TAIL;
700 a handler for async open replies
702 static void async_open(struct smbcli_request *c_req)
704 struct async_info *async = c_req->async.private;
705 struct proxy_private *proxy = async->proxy;
706 struct ntvfs_request *req = async->req;
707 struct proxy_file *f = async->f;
708 union smb_open *io = async->parms;
709 union smb_handle *file;
711 talloc_free(async);
712 req->async_states->status = smb_raw_open_recv(c_req, req, io);
713 SMB_OPEN_OUT_FILE(io, file);
714 f->fnum = file->fnum;
715 file->ntvfs = NULL;
716 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
717 req->async_states->status = ntvfs_handle_set_backend_data(f->h, proxy->ntvfs, f);
718 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
719 file->ntvfs = f->h;
720 DLIST_ADD(proxy->files, f);
722 if (proxy->cache_enabled) {
723 bool oplock=(io->generic.out.oplock_level != OPLOCK_NONE) || proxy->fake_oplock;
724 f->cache=cache_open(proxy->cache, f, io, oplock, proxy->cache_readahead);
725 if (! PROXY_REMOTE_SERVER(proxy)) cache_handle_novalidate(f);
728 failed:
729 req->async_states->send_fn(req);
733 open a file
735 static NTSTATUS proxy_open(struct ntvfs_module_context *ntvfs,
736 struct ntvfs_request *req, union smb_open *io)
738 struct proxy_private *private = ntvfs->private_data;
739 struct smbcli_request *c_req;
740 struct ntvfs_handle *h;
741 struct proxy_file *f;
742 NTSTATUS status;
744 SETUP_PID;
746 if (io->generic.level != RAW_OPEN_GENERIC &&
747 private->map_generic) {
748 return ntvfs_map_open(ntvfs, req, io);
751 status = ntvfs_handle_new(ntvfs, req, &h);
752 NT_STATUS_NOT_OK_RETURN(status);
754 f = talloc_zero(h, struct proxy_file);
755 NT_STATUS_HAVE_NO_MEMORY(f);
756 f->h = h;
758 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
759 union smb_handle *file;
761 status = smb_raw_open(private->tree, req, io);
762 NT_STATUS_NOT_OK_RETURN(status);
764 SMB_OPEN_OUT_FILE(io, file);
765 f->fnum = file->fnum;
766 file->ntvfs = NULL;
767 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
768 NT_STATUS_NOT_OK_RETURN(status);
769 file->ntvfs = f->h;
770 DLIST_ADD(private->files, f);
772 if (private->cache_enabled) {
773 bool oplock=(io->generic.out.oplock_level != OPLOCK_NONE) || private->fake_oplock;
775 f->cache=cache_open(private->cache, f, io, oplock, private->cache_readahead);
776 if (! PROXY_REMOTE_SERVER(private)) cache_handle_novalidate(f);
779 return NT_STATUS_OK;
782 c_req = smb_raw_open_send(private->tree, io);
784 ASYNC_RECV_TAIL_F(io, async_open, f);
788 create a directory
790 static NTSTATUS proxy_mkdir(struct ntvfs_module_context *ntvfs,
791 struct ntvfs_request *req, union smb_mkdir *md)
793 struct proxy_private *private = ntvfs->private_data;
794 struct smbcli_request *c_req;
796 SETUP_PID;
798 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
799 return smb_raw_mkdir(private->tree, md);
802 c_req = smb_raw_mkdir_send(private->tree, md);
804 SIMPLE_ASYNC_TAIL;
808 remove a directory
810 static NTSTATUS proxy_rmdir(struct ntvfs_module_context *ntvfs,
811 struct ntvfs_request *req, struct smb_rmdir *rd)
813 struct proxy_private *private = ntvfs->private_data;
814 struct smbcli_request *c_req;
816 SETUP_PID;
818 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
819 return smb_raw_rmdir(private->tree, rd);
821 c_req = smb_raw_rmdir_send(private->tree, rd);
823 SIMPLE_ASYNC_TAIL;
827 rename a set of files
829 static NTSTATUS proxy_rename(struct ntvfs_module_context *ntvfs,
830 struct ntvfs_request *req, union smb_rename *ren)
832 struct proxy_private *private = ntvfs->private_data;
833 struct smbcli_request *c_req;
835 SETUP_PID;
837 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
838 return smb_raw_rename(private->tree, ren);
841 c_req = smb_raw_rename_send(private->tree, ren);
843 SIMPLE_ASYNC_TAIL;
847 copy a set of files
849 static NTSTATUS proxy_copy(struct ntvfs_module_context *ntvfs,
850 struct ntvfs_request *req, struct smb_copy *cp)
852 return NT_STATUS_NOT_SUPPORTED;
855 /* we only define this seperately so we can easily spot read calls in
856 pending based on ( c_req->private.fn == async_read_handler ) */
857 static void async_read_handler(struct smbcli_request *c_req)
859 async_chain_handler(c_req);
862 NTSTATUS async_readahead_dec(struct async_info *async, void* io1, void* io2, NTSTATUS status)
864 struct proxy_private *private = async->proxy;
865 struct smbcli_request *c_req = async->c_req;
866 struct proxy_file *f = async->f;
867 union smb_read *io = async->parms;
869 /* if request is not already received by a chained handler, read it */
870 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
872 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
873 f->readahead_pending, private->readahead_spare));
875 f->readahead_pending--;
876 private->readahead_spare++;
878 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
879 f->readahead_pending, private->readahead_spare));
881 return status;
885 a handler for async read replies - speculative read-aheads.
886 It merely saves in the cache. The async chain handler will call send_fn if
887 there is one, or if sync_chain_handler is used the send_fn is called by
888 the ntvfs back end.
890 NTSTATUS async_read_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
892 struct smbcli_request *c_req = async->c_req;
893 struct proxy_file *f = async->f;
894 union smb_read *io = async->parms;
896 /* if request is not already received by a chained handler, read it */
897 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
899 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
900 get_friendly_nt_error_msg(status)));
902 NT_STATUS_NOT_OK_RETURN(status);
904 /* if it was a validate read we don't to save anything unless it failed.
905 Until we use Proxy_read structs we can't tell, so guess */
906 if (io->generic.out.nread == io->generic.in.maxcnt &&
907 io->generic.in.mincnt < io->generic.in.maxcnt) {
908 /* looks like a validate read, just move the validate pointer, the
909 original read-request has already been satisfied from cache */
910 DEBUG(3,("%s megavalidate suceeded, validate to %lld\n",__FUNCTION__,
911 io->generic.in.offset + io->generic.out.nread));
912 cache_handle_validated(f, io->generic.in.offset + io->generic.out.nread);
913 } else {
914 DEBUG(5,("Not a mega-validate, save %d in cache\n",io->generic.out.nread));
915 cache_handle_save(f, io->generic.out.data,
916 io->generic.out.nread,
917 io->generic.in.offset);
920 DEBUG(3,("%s finished %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
921 return status;
924 /* handler for fragmented reads */
925 NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
927 struct smbcli_request *c_req = async->c_req;
928 struct ntvfs_request *req = async->req;
929 struct proxy_file *f = async->f;
930 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
931 /* this is the io against which the fragment is to be applied */
932 union smb_read *io = talloc_get_type_abort(io1, union smb_read);
933 /* this is the io for the read that issued the callback */
934 union smb_read *io_frag = fragment->io_frag; /* async->parms; */
935 struct async_read_fragments* fragments=fragment->fragments;
937 /* if request is not already received by a chained handler, read it */
938 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
939 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
941 DEBUG(3,("\n\n%s async_read status: %s\n",__FUNCTION__,
942 get_friendly_nt_error_msg(status)));
944 fragment->status = status;
946 /* remove fragment from fragments */
947 DLIST_REMOVE(fragments->fragments, fragment);
949 #warning maybe read requests beyond the short read won't return NT_STATUS_OK with nread=0
950 /* in which case if we will want to collate all responses and return a valid read
951 for the leading NT_STATUS_OK fragments */
953 /* did this one fail, inducing a general fragments failure? */
954 if (!NT_STATUS_IS_OK(fragment->status)) {
955 /* preserve the status of the fragment with the smallest offset
956 when we can work out how */
957 if (NT_STATUS_IS_OK(fragments->status)) {
958 fragments->status=fragment->status;
961 cache_handle_novalidate(f);
962 DEBUG(5,("** Devalidated proxy due to read failure\n"));
963 } else {
964 /* No fragments have yet failed, keep collecting responses */
965 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
966 /* Find memcpy window, copy data from the io_frag to the io */
967 off_t start_offset=MAX(io_frag->generic.in.offset, io->generic.in.offset);
968 /* used to use mincnt */
969 off_t io_extent=io->generic.in.offset + io->generic.in.maxcnt;
970 off_t end_offset=MIN(io_extent, extent);
971 /* ASSERT(start_offset <= end_offset) */
972 /* ASSERT(start_offset <= io_extent) */
973 if (start_offset >= io_extent) {
974 DEBUG(3,("useless read-ahead tagged on to: %s",__location__));
975 } else {
976 uint8_t* dst=io->generic.out.data+(start_offset - io->generic.in.offset);
977 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
978 /* src == dst in cases where we did not latch onto someone elses
979 read, but are handling our own */
980 if (src != dst)
981 memcpy(dst, src, end_offset - start_offset);
984 /* There should be a better way to detect, but it needs the proxy rpc struct
985 not ths smb_read struct */
986 if (io_frag->generic.out.nread < io_frag->generic.in.maxcnt) {
987 DEBUG(5,("\n** Devalidated proxy due to small read: %lld min=%lld, max=%lld\n",
988 (long long) io_frag->generic.out.nread,
989 (long long) io_frag->generic.in.mincnt,
990 (long long) io_frag->generic.in.maxcnt));
991 cache_handle_novalidate(f);
994 /* We broke up the original read. If not enough of this sub-read has
995 been read, and then some of then next block, it could leave holes!
996 We will only acknowledge up to the first partial read, and treat
997 it as a small read. If server can return NT_STATUS_OK for a partial
998 read so can we, so we preserve the response.
999 "enough" is all of it (maxcnt), except on the last block, when it has to
1000 be enough to fill io->generic.in.mincnt. We know it is the last block
1001 if nread is small but we could fill io->generic.in.mincnt */
1002 if (io_frag->generic.out.nread < io_frag->generic.in.mincnt &&
1003 end_offset < io->generic.in.offset + io->generic.in.mincnt) {
1004 DEBUG(4,("Fragmented read only partially successful\n"));
1006 /* Shrink the master nread (or grow to this size if we are first partial */
1007 if (! fragments->partial ||
1008 (io->generic.in.offset + io->generic.out.nread) > extent) {
1009 io->generic.out.nread = extent - io->generic.in.offset;
1012 /* stop any further successes from extending the partial read */
1013 fragments->partial=true;
1014 } else {
1015 /* only grow the master nwritten if we haven't logged a partial write */
1016 if (! fragments->partial &&
1017 (io->generic.in.offset + io->generic.out.nread) < extent ) {
1018 io->generic.out.nread = MIN(io->generic.in.maxcnt, extent - io->generic.in.offset);
1023 /* Was it the last fragment, or do we know enought to send a response? */
1024 if (! fragments->fragments) {
1025 DEBUG(5,("Async read re-fragmented with %d of %d %s\n",
1026 io->generic.out.nread, io->generic.in.mincnt,
1027 get_friendly_nt_error_msg(fragments->status)));
1028 if (fragments->async) {
1029 req->async_states->status=fragments->status;
1030 DEBUG(5,("Fragments async response sending\n"));
1031 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
1032 /* esp. as they may be attached to by other reads. Maybe attachees should be taking reference, but how will they
1033 know the top level they need to take reference too.. */
1034 #warning should really queue a sender here, not call it */
1035 req->async_states->send_fn(req);
1036 DEBUG(5,("Async response sent\n"));
1037 } else {
1038 DEBUG(5,("Fragments SYNC return\n"));
1042 /* because a c_req may be shared by many req, chained handlers must return
1043 a status pertaining to the general validity of this specific c_req, not
1044 to their own private processing of the c_req for the benefit of their req
1045 which is returned in fragments->status
1047 return status;
1050 /* Issue read-ahead X bytes where X is the window size calculation based on
1051 server_latency * server_session_bandwidth
1052 where latency is the idle (link) latency and bandwidth is less than or equal_to
1053 to actual bandwidth available to the server.
1054 Read-ahead should honour locked areas in whatever way is neccessary (who knows?)
1055 read_ahead is defined here and not in the cache engine because it requires too
1056 much knowledge of private structures
1058 /* The concept is buggy unless we can tell the next proxy that these are
1059 read-aheads, otherwise chained proxy setups will each read-ahead of the
1060 read-ahead which can put a larger load on the final server.
1061 Also we probably need to distinguish between
1062 * cache-less read-ahead
1063 * cache-revalidating read-ahead
1065 NTSTATUS read_ahead(struct proxy_file *f, struct ntvfs_module_context *ntvfs,
1066 union smb_read *io, ssize_t as_read)
1068 struct proxy_private *private = ntvfs->private_data;
1069 struct smbcli_tree *tree = private->tree;
1070 struct cache_file_entry *cache;
1071 off_t next_position; /* this read offset+length+window */
1072 off_t end_position; /* position we read-ahead to */
1073 off_t cache_populated;
1074 off_t read_position, new_extent;
1076 if (! PROXY_REMOTE_SERVER(private)) return NT_STATUS_UNSUCCESSFUL;
1077 DEBUG(5,("A\n"));
1078 if (private->cache_readahead==0 || ! private->cache_enabled || ! f->cache) return NT_STATUS_UNSUCCESSFUL;
1079 DEBUG(5,("B\n"));
1080 cache=talloc_get_type_abort(f->cache, struct cache_file_entry);
1081 DEBUG(5,("C\n"));
1082 /* don't read-ahead if we are in bulk validate mode */
1083 if (cache->status & CACHE_VALIDATE) return NT_STATUS_UNSUCCESSFUL;
1084 DEBUG(5,("D\n"));
1085 /* if we can't trust what we read-ahead anyway then don't bother although
1086 * if delta-reads are enabled we can do so in order to get something to
1087 * delta against */
1088 DEBUG(CACHE_DEBUG_LEVEL,("DOING Asking read-aheads: len %lld ra-extend %lld as-read %lld RA %d (%d)\n",
1089 (long long int)(cache_len(cache)),
1090 (long long int)(cache->readahead_extent),
1091 (long long int)(as_read),
1092 cache->readahead_window,private->cache_readahead));
1093 if (private->cache_readahead ==0 || ! (cache->status & CACHE_READ_AHEAD) ) {
1094 DEBUG(CACHE_DEBUG_LEVEL,("FAILED Asking read-aheads: Can't read-ahead as no read-ahead on this file: %x\n",
1095 cache->status));
1096 return NT_STATUS_UNSUCCESSFUL;
1099 /* as_read is the mincnt bytes of a request being made or the
1100 out.nread of completed sync requests
1101 Here we presume that as_read bytes WILL be read. If there is a cache-ahead like ours,
1102 then this may often NOT be the case if readahead_window < requestsize; so we will
1103 get a small read, leaving a hole in the cache, and as we don't yet handle sparse caches,
1104 all future read-ahead will be wasted, so we need to adjust the read-ahead handler to handle
1105 this and have failed sparse writes adjust the cache->readahead_extent back to actual size */
1107 /* predict the file pointers next position */
1108 next_position=io->generic.in.offset + as_read;
1109 DEBUG(5,("Next position: %lld (%lld + %lld)\n",
1110 (long long int)next_position,
1111 (long long int)io->generic.in.offset,
1112 (long long int)as_read));
1113 /* calculate the limit of the validated or requested cache */
1114 cache_populated=MAX(cache->validated_extent, cache->readahead_extent);
1116 /* will the new read take us beyond the current extent without gaps? */
1117 if (cache_populated < io->generic.in.offset) {
1118 /* this read-ahead is a read-behind-pointer */
1119 new_extent=cache_populated;
1120 } else {
1121 new_extent=MAX(next_position, cache_populated);
1124 /* as far as we can tell new_extent is the smallest offset that doesn't
1125 have a pending read request on. Of course if we got a short read then
1126 we will have a cache-gap which we can't handle and need to read from
1127 a shrunk readahead_extent, which we don't currently handle */
1128 read_position=new_extent;
1130 /* of course if we know how big the remote file is we should limit at that */
1131 /* we should also mark-out which read-ahead requests are pending so that we
1132 * don't repeat them while they are in-transit. */
1133 /* we can't really use next_position until we can have caches with holes
1134 UNLESS next_position < new_extent, because a next_position well before
1135 new_extent is no reason to extend it further, we only want to extended
1136 with read-aheads if we have cause to suppose the read-ahead data will
1137 be wanted, i.e. the next_position is near new_extent.
1138 So we can't justify reading beyond window+next_position, but if
1139 next_position is leaving gaps, we use new_extent instead */
1140 end_position=MIN(new_extent, next_position) + cache->readahead_window;
1141 DEBUG(5,("** Read-ahead loop %lld < %lld window=%d, end=%lld, quota: %d\n",
1142 (long long int)read_position,
1143 (long long int)(next_position + cache->readahead_window),
1144 cache->readahead_window,
1145 (long long int)end_position,
1146 private->readahead_spare));
1147 /* do we even need to read? */
1148 if (! (read_position < end_position)) return NT_STATUS_OK;
1150 /* readahead_spare is for the whole session (mid/tid?) and may need sharing
1151 out over files and other tree-connects or something */
1152 while (read_position < end_position &&
1153 private->readahead_spare > 0) {
1154 struct smbcli_request *c_req = NULL;
1155 ssize_t read_remaining = end_position - read_position;
1156 ssize_t read_block = MIN(private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32),
1157 MIN(read_remaining, private->cache_readaheadblock));
1158 void *req = NULL; /* for the ASYNC_REC_TAIL_F_ORPHAN macro */
1159 uint8_t* data;
1160 union smb_read *io_copy=talloc_memdup_type(NULL, io, union smb_read);
1162 if (! io_copy)
1163 return NT_STATUS_NO_MEMORY;
1165 #warning we are ignoring read_for_execute as far as the cache goes
1166 io_copy->generic.in.read_for_execute=io->readx.in.read_for_execute;
1167 io_copy->generic.in.offset=read_position;
1168 io_copy->generic.in.mincnt=read_block;
1169 io_copy->generic.in.maxcnt=read_block;
1170 /* what is generic.in.remaining for? */
1171 io_copy->generic.in.remaining = MIN(65535,read_remaining);
1172 io_copy->generic.out.nread=0;
1174 #warning someone must own io_copy, tree, maybe?
1175 data=talloc_zero_size(io_copy, io_copy->generic.in.maxcnt);
1176 DEBUG(5,("Talloc read-ahead buffer %p size %d\n",data, io_copy->generic.in.maxcnt));
1177 if (! data) {
1178 talloc_free(io_copy);
1179 return NT_STATUS_NO_MEMORY;
1181 io_copy->generic.out.data=data;
1183 /* are we able to pull anything from the cache to validate this read-ahead?
1184 NOTE: there is no point in reading ahead merely to re-validate the
1185 cache if we don't have oplocks and can't save it....
1186 ... or maybe there is if we think a read will come that can be matched
1187 up to this reponse while it is still on the wire */
1188 #warning so we need to distinguish between pipe-line read-ahead and revalidation
1189 if (/*(cache->status & CACHE_READ)!=0 && */
1190 cache_len(cache) >
1191 (io_copy->generic.in.offset + io_copy->generic.in.mincnt) &&
1192 cache->validated_extent <
1193 (io_copy->generic.in.offset + io_copy->generic.in.maxcnt)) {
1194 ssize_t pre_fill;
1196 pre_fill = cache_raw_read(cache, data,
1197 io_copy->generic.in.offset,
1198 io_copy->generic.in.maxcnt);
1199 DEBUG(5,("Data read into %p %d\n",data, pre_fill));
1200 if (pre_fill > 0 && pre_fill >= io_copy->generic.in.mincnt) {
1201 io_copy->generic.out.nread=pre_fill;
1202 read_block=pre_fill;
1206 c_req = proxy_smb_raw_read_send(ntvfs, io_copy, f, NULL);
1208 if (c_req) {
1209 private->readahead_spare--;
1210 f->readahead_pending++;
1211 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead level %d request %p offset=%d size=%d\n",io_copy->generic.level,c_req,(int)read_position,(int)read_block));
1212 if (cache->readahead_extent < read_position+read_block)
1213 cache->readahead_extent=read_position+read_block;
1214 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
1215 /* so we can decrease read-ahead counter for this session */
1216 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_readahead_dec, NT_STATUS_INTERNAL_ERROR);
1217 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io_copy, async_read_handler);
1219 /* Make these be owned by the async struct so they are freed when the callback ends or is cancelled */
1220 talloc_steal(c_req->async.private, c_req);
1221 talloc_steal(c_req->async.private, io_copy);
1222 read_position+=read_block;
1223 } else {
1224 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead request FAILED offset=%d size=%d\n",(int)read_position,(int)read_block));
1225 talloc_free(io_copy);
1226 break;
1230 DEBUG(CACHE_DEBUG_LEVEL,("DONE: Asking read-aheads\n"));
1231 return NT_STATUS_OK;
1234 struct proxy_validate_parts_parts {
1235 struct proxy_Read* r;
1236 struct ntvfs_request *req;
1237 struct proxy_file *f;
1238 struct async_read_fragments *fragments;
1239 off_t offset;
1240 ssize_t remaining;
1241 bool complete;
1242 declare_checksum(digest);
1243 struct MD5Context context;
1246 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts);
1247 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status);
1248 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
1249 struct proxy_validate_parts_parts *parts);
1251 /* this will be the new struct proxy_Read based read function, for now
1252 it just deals with non-cached based validate to a regular server */
1253 static NTSTATUS proxy_validate(struct ntvfs_module_context *ntvfs,
1254 struct ntvfs_request *req,
1255 struct proxy_Read *r,
1256 union smb_handle *file)
1258 struct proxy_private *private = ntvfs->private_data;
1259 struct proxy_validate_parts_parts *parts;
1260 struct async_read_fragments *fragments;
1261 struct proxy_file *f;
1262 NTSTATUS status;
1264 f = ntvfs_handle_get_backend_data(file->ntvfs, ntvfs);
1265 if (!f) return NT_STATUS_INVALID_HANDLE;
1266 r->in.fnum = f->fnum;
1268 DEBUG(5,("%s: fnum=%d\n",__FUNCTION__,f->fnum));
1270 parts = talloc_zero(req, struct proxy_validate_parts_parts);
1271 DEBUG(5,("%s: parts=%p\n",__FUNCTION__,parts));
1272 NT_STATUS_HAVE_NO_MEMORY(parts);
1274 fragments = talloc_zero(parts, struct async_read_fragments);
1275 NT_STATUS_HAVE_NO_MEMORY(fragments);
1277 parts->fragments=fragments;
1279 parts->r=r;
1280 parts->f=f;
1281 parts->req=req;
1282 /* processed offset */
1283 parts->offset=r->in.offset;
1284 parts->remaining=r->in.maxcnt;
1285 fragments->async=true;
1287 MD5Init (&parts->context);
1289 /* start a read-loop which will continue in the callback until it is
1290 all done */
1291 status=proxy_validate_parts(ntvfs, parts);
1292 if (parts->complete) {
1293 /* Make sure we are not async */
1294 DEBUG(5,("%s: completed EARLY\n",__FUNCTION__));
1295 return proxy_validate_complete(parts);
1298 /* Assert if status!=NT_STATUS_OK then parts->complete==true */
1299 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1300 DEBUG(5,("%s: returning ASYNC\n",__FUNCTION__));
1301 return status;
1304 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts)
1306 NTSTATUS status;
1307 struct proxy_Read* r=parts->r;
1308 MD5Final(parts->digest, &parts->context);
1310 status = parts->fragments->status;
1311 r->out.result = status;
1312 r->out.response.generic.count=r->out.nread;
1314 DEBUG(5,("%s: %s nread=%d\n",__FUNCTION__, get_friendly_nt_error_msg (status),
1315 r->out.response.generic.count));
1317 DEBUG(5,("Anticipated validated digest for size: %lld\n", (long long) r->in.maxcnt));
1318 dump_data (5, r->in.digest.digest, sizeof(parts->digest));
1319 DEBUG(5,("read digest for size %lld\n",(long long) parts->offset));
1320 dump_data (5, parts->digest, sizeof(parts->digest));
1322 if (NT_STATUS_IS_OK(status) &&
1323 (memcmp(parts->digest, r->in.digest.digest, sizeof(parts->digest))==0)) {
1324 r->out.flags = PROXY_USE_CACHE | PROXY_VALIDATE;
1325 DEBUG(5,("======= VALIDATED FINE \n\n\n"));
1326 } else if (r->in.flags & PROXY_USE_ZLIB) {
1327 ssize_t size = r->out.response.generic.count;
1328 DEBUG(5,("======= VALIDATED WRONG \n\n\n"));
1329 if (compress_block(r->out.response.generic.data, &size) ) {
1330 r->out.flags|=PROXY_USE_ZLIB;
1331 r->out.response.compress.count=size;
1332 r->out.response.compress.data=r->out.response.generic.data;
1333 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
1334 __FUNCTION__,r->out.nread,size,size*100/r->out.nread));
1338 /* assert: this must only be true if we are in a callback */
1339 if (parts->req->async_states->state & NTVFS_ASYNC_STATE_ASYNC) {
1340 /* we are async complete, we need to call the sendfn */
1341 parts->req->async_states->status=status;
1342 DEBUG(5,("Fragments async response sending\n"));
1344 parts->req->async_states->send_fn(parts->req);
1345 return NT_STATUS_OK;
1347 return status;
1350 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1352 struct smbcli_request *c_req = async->c_req;
1353 struct ntvfs_request *req = async->req;
1354 struct proxy_file *f = async->f;
1355 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
1356 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
1357 /* this is the io against which the fragment is to be applied */
1358 struct proxy_validate_parts_parts *parts = talloc_get_type_abort(io1, struct proxy_validate_parts_parts);
1359 struct proxy_Read* r=parts->r;
1360 /* this is the io for the read that issued the callback */
1361 union smb_read *io_frag = fragment->io_frag;
1362 struct async_read_fragments* fragments=fragment->fragments;
1364 DEBUG(5,("%s: parts=%p c_req=%p io_frag=%p\n",__FUNCTION__,parts, c_req, io_frag));
1365 /* if request is not already received by a chained handler, read it */
1366 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
1367 DEBUG(5,("%s: status %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
1369 fragment->status=status;
1371 if (NT_STATUS_IS_OK(status)) {
1372 /* TODO: If we are not sequentially "next" the queue until we can do it */
1373 /* log this data in r->out.generic.data */
1374 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
1375 /* Find memcpy window, copy data from the io_frag to the io */
1376 off_t start_offset=MAX(io_frag->generic.in.offset, r->in.offset);
1377 /* Don't want to go past mincnt */
1378 off_t io_extent=r->in.offset + r->in.mincnt;
1379 off_t end_offset=MIN(io_extent, extent);
1381 /* ASSERT(start_offset <= end_offset) */
1382 /* ASSERT(start_offset <= io_extent) */
1383 if (! (start_offset >= io_extent)) {
1384 uint8_t* dst=r->out.response.generic.data + (start_offset - r->in.offset);
1385 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
1386 /* src == dst in cases where we did not latch onto someone elses
1387 read, but are handling our own */
1388 if (src != dst)
1389 memcpy(dst, src, end_offset - start_offset);
1390 r->out.nread=end_offset - r->in.offset;
1393 MD5Update(&parts->context, io_frag->generic.out.data,
1394 io_frag->generic.out.nread);
1396 parts->fragments->status=status;
1397 status=proxy_validate_parts(ntvfs, parts);
1398 } else {
1399 parts->fragments->status=status;
1402 DLIST_REMOVE(fragments->fragments, fragment);
1403 /* this will free the io_frag too */
1404 talloc_free(fragment);
1406 if (parts->complete || NT_STATUS_IS_ERR(status)) {
1407 /* this will call sendfn, the chain handler won't know... but
1408 should have no more handlers queued */
1409 return proxy_validate_complete(parts);
1412 return NT_STATUS_OK;
1415 /* continue a read loop, possibly from a callback */
1416 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
1417 struct proxy_validate_parts_parts *parts)
1419 struct proxy_private *private = ntvfs->private_data;
1420 union smb_read *io_frag;
1421 struct async_read_fragment *fragment;
1422 struct smbcli_request *c_req = NULL;
1423 ssize_t size=private->tree->session->transport->negotiate.max_xmit \
1424 - (MIN_SMB_SIZE+32);
1426 /* Have we already read enough? */
1427 if (parts->offset >= (parts->r->in.offset + parts->r->in.maxcnt)) {
1428 parts->complete=true;
1429 return NT_STATUS_OK;
1432 size=MIN(size, parts->remaining);
1434 fragment=talloc_zero(parts->fragments, struct async_read_fragment);
1435 NT_STATUS_HAVE_NO_MEMORY(fragment);
1437 io_frag = talloc_zero(fragment, union smb_read);
1438 NT_STATUS_HAVE_NO_MEMORY(io_frag);
1440 io_frag->generic.out.data = talloc_size(io_frag, size);
1441 NT_STATUS_HAVE_NO_MEMORY(io_frag->generic.out.data);
1443 io_frag->generic.level = RAW_READ_GENERIC;
1444 io_frag->generic.in.file.fnum = parts->r->in.fnum;
1445 io_frag->generic.in.offset = parts->offset;
1446 io_frag->generic.in.mincnt = size;
1447 io_frag->generic.in.maxcnt = size;
1448 io_frag->generic.in.remaining = 0;
1449 #warning maybe true is more permissive?
1450 io_frag->generic.in.read_for_execute = false;
1452 //c_req = smb_raw_read_send(ntvfs, io_frag, parts->f, parts->r);
1453 c_req = smb_raw_read_send(private->tree, io_frag);
1454 NT_STATUS_HAVE_NO_MEMORY(c_req);
1456 parts->offset+=size;
1457 parts->remaining-=size;
1458 fragment->c_req = c_req;
1459 fragment->io_frag = io_frag;
1460 fragment->fragments=parts->fragments;
1461 DLIST_ADD(parts->fragments->fragments, fragment);
1463 { void* req=NULL;
1464 ADD_ASYNC_RECV_TAIL(c_req, parts, fragment, parts->f, async_proxy_validate_parts, NT_STATUS_INTERNAL_ERROR);
1465 ASYNC_RECV_TAIL_F_ORPHAN(io_frag, async_read_handler, parts->f, c_req->async.private, NT_STATUS_UNSUCCESSFUL);
1468 DEBUG(5,("%s: issued read parts=%p c_req=%p io_frag=%p\n",__FUNCTION__,parts, c_req, io_frag));
1470 return NT_STATUS_OK;
1474 read from a file
1476 static NTSTATUS proxy_read(struct ntvfs_module_context *ntvfs,
1477 struct ntvfs_request *req, union smb_read *io)
1479 struct proxy_private *private = ntvfs->private_data;
1480 struct smbcli_request *c_req;
1481 struct proxy_file *f;
1482 struct async_read_fragments *fragments=NULL;
1483 /* how much of read-from-cache is certainly valid */
1484 ssize_t valid=0;
1485 off_t offset=io->generic.in.offset+valid;
1486 off_t limit=io->generic.in.offset+io->generic.in.mincnt;
1488 SETUP_PID;
1490 if (io->generic.level != RAW_READ_GENERIC &&
1491 private->map_generic) {
1492 return ntvfs_map_read(ntvfs, req, io);
1495 SETUP_FILE_HERE(f);
1497 DEBUG(3,("%s offset=%lld, mincnt=%d, maxcnt=%d\n",__FUNCTION__,
1498 io->generic.in.offset,
1499 io->generic.in.mincnt,
1500 io->generic.in.maxcnt));
1501 io->generic.out.nread=0;
1502 /* attempt to read from cache. if nread becomes non-zero then we
1503 have cache to validate. Instead of returning "valid" value, cache_read
1504 should probably return an async_read_fragment structure */
1506 if (private->cache_enabled) {
1507 NTSTATUS status=cache_smb_raw_read(f->cache, ntvfs, req, io, &valid);
1509 if (NT_STATUS_IS_OK(status)) {
1510 /* if we read enough valid data, return it */
1511 if (valid > 0 && valid>=io->generic.in.mincnt) {
1512 /* valid will not be bigger than maxcnt */
1513 io->generic.out.nread=valid;
1514 DEBUG(1,("Read from cache offset=%d size=%d\n",
1515 (int)(io->generic.in.offset),
1516 (int)(io->generic.out.nread)) );
1517 return status;
1522 fragments=talloc_zero(req, struct async_read_fragments);
1523 fragments->async=!!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC);
1524 /* See if there are pending reads that would satisfy this request
1525 We have a validated read up to io->generic.out.nread. Anything between
1526 this and mincnt MUST be read, but we could first try and attach to
1527 any pending read-ahead on the same file.
1528 If those read-aheads fail we will re-issue a regular read from the
1529 callback handler and hope it hasn't taken too long. */
1531 /* offset is the extentof the file from which we still need to find
1532 matching read-requests. */
1533 offset=io->generic.in.offset+valid;
1534 /* limit is the byte beyond the last byte for which we need a request.
1535 This used to be mincnt, but is now maxcnt to cope with validate reads.
1536 Maybe we can switch back to mincnt when proxy_read struct is used
1537 instead of smb_read.
1539 limit=io->generic.in.offset+io->generic.in.maxcnt;
1541 while (offset < limit) {
1542 /* Should look for the read-ahead with offset <= in.offset+out.nread
1543 with the longest span, but there is only likely to be one anyway so
1544 just take the first */
1545 struct async_info* pending=private->pending;
1546 union smb_read *readahead_io=NULL;
1547 DEBUG(5,("Looping reads from offset=%lld, end=%lld\n",offset,limit));
1548 while(pending) {
1549 if (pending->c_req->async.fn == async_read_handler) {
1550 struct async_info *async=talloc_get_type_abort(pending->c_req->async.private, struct async_info);
1551 readahead_io=talloc_get_type_abort(async->parms, union smb_read);
1553 if (readahead_io->generic.in.file.fnum == io->generic.in.file.fnum &&
1554 readahead_io->generic.in.offset <= offset &&
1555 readahead_io->generic.in.offset +
1556 readahead_io->generic.in.mincnt > offset) break;
1558 readahead_io=NULL;
1559 pending=pending->next;
1561 /* ASSERT(readahead_io == pending->c_req->async.params) */
1562 if (pending && readahead_io) {
1563 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
1564 fragment->fragments=fragments;
1565 fragment->io_frag=readahead_io;
1566 fragment->c_req = pending->c_req;
1567 /* we found one, so attach to it. We DO need a talloc_reference
1568 because the original send_fn might be called before ALL chained
1569 handlers, and our handler will call its own send_fn first. ugh.
1570 Maybe we need to seperate reverse-mapping callbacks with data users? */
1571 /* Note: the read-ahead io is passed as io, and our req io is
1572 in io_frag->io */
1573 //talloc_reference(req, pending->req);
1574 DEBUG(5,("Attach to read for offset=%lld length=%d\n",
1575 readahead_io->generic.in.offset,
1576 readahead_io->generic.in.mincnt));
1577 ADD_ASYNC_RECV_TAIL(pending->c_req, io, fragment, f,
1578 async_read_fragment, NT_STATUS_INTERNAL_ERROR);
1579 DEBUG(5,("Attached OK\n"));
1580 #warning we don't want to return if we fail to attach, just break
1581 DLIST_ADD(fragments->fragments, fragment);
1582 /* updated offset for which we have reads */
1583 offset=readahead_io->generic.in.offset + readahead_io->generic.in.mincnt;
1584 } else {
1585 /* there are no pending reads to fill this so issue one up to
1586 the maximum supported read size. We could see when the next
1587 pending read is (if any) and only read up till there... later...
1588 Issue a fragment request for what is left, clone io.
1589 In the case that there were no fragments this will be the orginal read
1590 but with a cloned io struct */
1591 off_t next_offset;
1592 struct proxy_Read *r=NULL; /* used only for VALIDATE promotion */
1593 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
1594 union smb_read *io_frag=talloc_memdup_type(req, io, union smb_read);
1595 ssize_t offset_inc=offset-io_frag->generic.in.offset;
1596 /* 250 is a guess at ndr rpc overheads */
1597 ssize_t readsize=MIN(PROXY_NTIOCTL_MAXDATA,
1598 private->tree->session->transport->negotiate.max_xmit) \
1599 - (MIN_SMB_SIZE+32);
1600 if (readsize > 0xFFFF) readsize = 0xFFFF; /* - (MIN_SMB_SIZE+250) ?? */
1601 readsize=MIN(limit-offset, readsize);
1603 DEBUG(5,("Issuing direct read\n"));
1604 /* reduce the cached read (if any). nread is unsigned */
1605 if (io_frag->generic.out.nread > offset_inc) {
1606 io_frag->generic.out.nread-=offset_inc;
1607 /* don't make nread buffer look too big */
1608 if (io_frag->generic.out.nread > readsize)
1609 io_frag->generic.out.nread = readsize;
1610 } else {
1611 io_frag->generic.out.nread=0;
1613 /* adjust the data pointer so we read to the right place */
1614 io_frag->generic.out.data+=offset_inc;
1615 io_frag->generic.in.offset=offset;
1616 io_frag->generic.in.maxcnt=readsize;
1617 /* we don't mind mincnt being smaller if this is the last frag,
1618 but then we can already handle it being bigger but not reached...
1619 The spell would be:
1620 MIN(io_frag->generic.in.mincnt, io_frag->generic.in.maxcnt);
1622 io_frag->generic.in.mincnt=readsize;
1623 fragment->fragments=fragments;
1624 fragment->io_frag=io_frag;
1625 #warning attach to send_fn handler
1626 /* what if someone attaches to us? Our send_fn is called from our
1627 chained handler which will be before their handler and io will
1628 already be freed. We need to keep a reference to the io and the data
1629 but we don't know where it came from in order to take a reference.
1630 We need therefore to tackle calling of send_fn AFTER all other handlers */
1632 /* Calculate next offset (in advance) */
1633 next_offset=io_frag->generic.in.offset + io_frag->generic.in.mincnt;
1635 /* if we are (going to be) the last fragment and we are in VALIDATE
1636 mode, see if we can do a bulk validate now.
1637 io->generic.in.mincnt == io->generic.in.maxcnt is to make sure we
1638 don't do a validate on a receive validate read
1640 if (private->cache_validatesize && PROXY_REMOTE_SERVER(private) &&
1641 next_offset >= limit && (f->cache && f->cache->status & CACHE_VALIDATE)) {
1642 ssize_t length=private->cache_validatesize;
1643 declare_checksum(digest);
1645 DEBUG(5,("last read, maybe mega validate: frag length %zu, offset %llu\n",
1646 length, (unsigned long long) offset));
1647 NTSTATUS status=cache_smb_raw_checksum(f->cache, offset, &length, digest);
1648 /* no point in doing it if md5'd length < current out.nread
1649 remember: out.data contains this requests cached response
1650 if validate succeeds */
1651 if (NT_STATUS_IS_OK(status) && (length > io_frag->generic.out.nread)) {
1652 /* upgrade the read, allocate the proxy_read struct here
1653 and fill in the extras, no more out-of-band stuff */
1654 DEBUG(5,("%s: Promoting to validate read: %lld\n",__FUNCTION__,(long long) length));
1655 dump_data (5, digest, sizeof(digest));
1657 r=talloc_zero(io_frag, struct proxy_Read);
1658 memcpy(r->in.digest.digest, digest, sizeof(digest));
1659 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE;
1660 io_frag->generic.in.maxcnt = length;
1661 /* the proxy send function will calculate the checksum based on *data */
1662 } else {
1663 /* not enough in cache to make it worthwhile anymore */
1664 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x len=%lld\n",
1665 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0,
1666 (unsigned long long)length));
1667 cache_handle_novalidate(f);
1668 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x\n",
1669 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0));
1671 } else {
1672 if (f->cache && f->cache->status & CACHE_VALIDATE) {
1673 DEBUG(5,(">>>Not last frag, no validate read: %lld %lld\n",
1674 (long long) next_offset,
1675 (long long) limit));
1679 DEBUG(5,("Frag read sending offset=%lld min=%d, size=%d\n",
1680 io_frag->generic.in.offset,io_frag->generic.in.mincnt,
1681 io_frag->generic.in.maxcnt));
1682 c_req = proxy_smb_raw_read_send(ntvfs, io_frag, f, r);
1683 DEBUG(5,("Frag read sent offset=%lld size=%d MID=%d\n",
1684 io_frag->generic.in.offset,io_frag->generic.in.maxcnt,c_req->mid));
1685 fragment->c_req=c_req;
1686 DLIST_ADD(fragments->fragments, fragment);
1687 ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
1688 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_read_fragment, NT_STATUS_INTERNAL_ERROR);
1689 DEBUG(5,("Frag response chained\n"));
1690 /* normally we would only install the chain_handler if we wanted async
1691 response, but as it is the async_read_fragment handler that calls send_fn
1692 based on fragments->async, instead of async_chain_handler, we don't
1693 need to worry about this call completing async'ly while we are
1694 waiting on the other attached calls. Otherwise we would not attach
1695 the async_chain_handler (via async_read_handler) because of the wait
1696 below */
1697 { /* We don't want the chain handler calling send_fn as it is done by the fragment handler */
1698 void* req=NULL;
1699 /* call async_chain_hander not read handler so that folk can't
1700 attach to it, till we solve the problem above */
1701 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1703 offset = next_offset;
1705 DEBUG(5,("Next fragment\n"));
1708 /* do we still need a final fragment? Issue a read */
1710 DEBUG(5,("No frags left to read\n"));
1713 /* issue new round of read-aheads */
1714 DEBUG(5,("== Read aheads asread-%d\n",io->generic.in.mincnt));
1715 if (f->cache && ! (f->cache->status & CACHE_VALIDATE)) read_ahead(f, ntvfs, io, io->generic.in.mincnt);
1716 DEBUG(5,("== Done Read aheads\n"));
1718 /* If we have fragments but we are not called async, we must sync-wait on them */
1719 /* did we map the entire request to pending reads? */
1720 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1721 struct async_read_fragment *fragment;
1722 DEBUG(5,("Sync waiting\n"));
1723 /* fragment get's free'd during the chain_handler so we start at
1724 the top each time */
1725 for (fragment = fragments->fragments; fragment; fragment = fragments->fragments) {
1726 /* Any fragments async handled while we sync-wait on one
1727 will remove themselves from the list and not get sync waited */
1728 sync_chain_handler(fragment->c_req);
1729 /* if we have a non-ok result AND we know we have all the responses
1730 up to extent, then we could quit the loop early and change the
1731 fragments->async to true so the final irrelevant responses would
1732 come async and we could send our response now - but we don't
1733 track that detail until we have cache-maps that we can use to
1734 track the responded fragments and combine responsed linear extents
1735 if (! NT_STATUS_IS_OK(fragments->status) && xxx ) */
1737 DEBUG(5,("Sync return of proxy_read: %s\n",get_friendly_nt_error_msg (fragments->status)));
1738 return fragments->status;
1741 DEBUG(5,("Async returning\n"));
1742 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1743 return NT_STATUS_OK;
1747 a handler to de-fragment async write replies back to one request.
1748 Can cope with out-of-order async responses by waiting for all responses
1749 on an NT_STATUS_OK case so that nwritten is properly adjusted
1751 NTSTATUS async_write_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1753 struct smbcli_request *c_req = async->c_req;
1754 struct ntvfs_request *req = async->req;
1755 struct proxy_file *f=async->f;
1756 struct async_write_fragment* fragment=talloc_get_type_abort(io2, struct async_write_fragment);
1757 /* this is the io against which the fragment is to be applied */
1758 union smb_write *io = talloc_get_type_abort(io1, union smb_write);
1759 /* this is the io for the write that issued the callback */
1760 union smb_write *io_frag = fragment->io_frag; /* async->parms; */
1761 struct async_write_fragments* fragments=fragment->fragments;
1762 ssize_t extent=0;
1764 /* if request is not already received by a chained handler, read it */
1765 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
1766 if (c_req) status=smb_raw_write_recv(c_req, io_frag);
1768 DEBUG(3,("%s async_write status: %s\n",__FUNCTION__,
1769 get_friendly_nt_error_msg(status)));
1771 fragment->status = status;
1773 DLIST_REMOVE(fragments->fragments, fragment);
1775 /* did this one fail? */
1776 if (! NT_STATUS_IS_OK(fragment->status)) {
1777 if (NT_STATUS_IS_OK(fragments->status)) {
1778 fragments->status=fragment->status;
1780 } else {
1781 /* No fragments have yet failed, keep collecting responses */
1782 extent = io_frag->generic.in.offset + io_frag->generic.out.nwritten;
1784 /* we broke up the write so it could all be written. If only some has
1785 been written of this block, and then some of then next block,
1786 it could leave unwritten holes! We will only acknowledge up to the
1787 first partial write, and let the client deal with it.
1788 If server can return NT_STATUS_OK for a partial write so can we */
1789 if (io_frag->generic.out.nwritten != io_frag->generic.in.count) {
1790 DEBUG(4,("Fragmented write only partially successful\n"));
1792 /* Shrink the master nwritten */
1793 if ( ! fragments->partial ||
1794 (io->generic.in.offset + io->generic.out.nwritten) > extent) {
1795 io->generic.out.nwritten = extent - io->generic.in.offset;
1797 /* stop any further successes from extended the partial write */
1798 fragments->partial=true;
1799 } else {
1800 /* only grow the master nwritten if we haven't logged a partial write */
1801 if (! fragments->partial &&
1802 (io->generic.in.offset + io->generic.out.nwritten) < extent ) {
1803 io->generic.out.nwritten = extent - io->generic.in.offset;
1808 /* if this was the last fragment, clean up */
1809 if (! fragments->fragments) {
1810 DEBUG(5,("Async write re-fragmented with %d of %d\n",
1811 io->generic.out.nwritten,
1812 io->generic.in.count));
1813 if (NT_STATUS_IS_OK(fragments->status)) {
1814 cache_handle_save(f, io->generic.in.data, io->generic.out.nwritten,
1815 io->generic.in.offset);
1817 if (fragments->async) {
1818 req->async_states->status=fragments->status;
1819 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
1820 req->async_states->send_fn(req);
1821 DEBUG(5,("Async response sent\n"));
1822 } else {
1823 DEBUG(5,("Fragments SYNC return\n"));
1827 return status;
1831 a handler for async write replies
1833 NTSTATUS async_write_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1835 struct smbcli_request *c_req = async->c_req;
1836 struct ntvfs_request *req = async->req;
1837 struct proxy_file *f=async->f;
1838 union smb_write *io=async->parms;
1840 if (c_req)
1841 status = smb_raw_write_recv(c_req, async->parms);
1843 cache_handle_save(f, io->generic.in.data,
1844 io->generic.out.nwritten,
1845 io->generic.in.offset);
1847 return status;
1851 write to a file
1853 static NTSTATUS proxy_write(struct ntvfs_module_context *ntvfs,
1854 struct ntvfs_request *req, union smb_write *io)
1856 struct proxy_private *private = ntvfs->private_data;
1857 struct smbcli_request *c_req;
1858 struct proxy_file *f;
1860 SETUP_PID;
1862 if (io->generic.level != RAW_WRITE_GENERIC &&
1863 private->map_generic) {
1864 return ntvfs_map_write(ntvfs, req, io);
1866 SETUP_FILE_HERE(f);
1868 DEBUG(5,("proxy_write offset=%lld size=%d\n",io->generic.in.offset, io->generic.in.count));
1869 #warning ERROR get rid of this
1870 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1871 NTSTATUS status;
1872 if (PROXY_REMOTE_SERVER(private)) {
1873 /* Do a proxy write */
1874 status=proxy_smb_raw_write(ntvfs, io, f);
1875 } else if (io->generic.in.count >
1876 private->tree->session->transport->negotiate.max_xmit) {
1878 /* smbcli_write can deal with large writes, which are bigger than
1879 tree->session->transport->negotiate.max_xmit */
1880 ssize_t size=smbcli_write(private->tree,
1881 io->generic.in.file.fnum,
1882 io->generic.in.wmode,
1883 io->generic.in.data,
1884 io->generic.in.offset,
1885 io->generic.in.count);
1887 if (size==io->generic.in.count || size > 0) {
1888 io->generic.out.nwritten=size;
1889 status=NT_STATUS_OK;
1890 } else {
1891 status=NT_STATUS_UNSUCCESSFUL;
1893 } else {
1894 status=smb_raw_write(private->tree, io);
1897 /* Save write in cache */
1898 if (NT_STATUS_IS_OK(status)) {
1899 cache_handle_save(f, io->generic.in.data,
1900 io->generic.out.nwritten,
1901 io->generic.in.offset);
1904 return status;
1907 /* smb_raw_write_send can't deal with large writes, which are bigger than
1908 tree->session->transport->negotiate.max_xmit so we have to break it up
1909 trying to preserve the async nature of the call as much as possible */
1910 if (PROXY_REMOTE_SERVER(private)) {
1911 DEBUG(5,("== %s call proxy_smb_raw_write_send\n",__FUNCTION__));
1912 c_req = proxy_smb_raw_write_send(ntvfs, io, f);
1913 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
1914 } else if (io->generic.in.count <=
1915 private->tree->session->transport->negotiate.max_xmit) {
1916 DEBUG(5,("== %s call smb_raw_write_send\n",__FUNCTION__));
1917 c_req = smb_raw_write_send(private->tree, io);
1918 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
1919 } else {
1920 ssize_t remaining = io->generic.in.count;
1921 #warning Need an audit of these magin numbers MIN_SMB_SIZE+32
1922 int block = (private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32));
1923 int done = 0;
1924 struct async_write_fragments *fragments = talloc_zero(req, struct async_write_fragments);
1926 DEBUG(3,("== %s Client sending too-big write sized %d, negotiated limit %d\n",
1927 __FUNCTION__, io->generic.in.count,
1928 private->tree->session->transport->negotiate.max_xmit));
1930 fragments->io = io;
1931 io->generic.out.nwritten=0;
1932 io->generic.out.remaining=0;
1934 do {
1935 union smb_write *io_frag = talloc_zero(fragments, union smb_write);
1936 struct async_write_fragment *fragment = talloc_zero(fragments, struct async_write_fragment);
1937 ssize_t size = MIN(block, remaining);
1939 fragment->fragments = fragments;
1940 fragment->io_frag = io_frag;
1942 io_frag->generic.level = io->generic.level;
1943 io_frag->generic.in.file.fnum = io->generic.in.file.fnum;
1944 io_frag->generic.in.wmode = io->generic.in.wmode;
1945 io_frag->generic.in.count = size;
1946 io_frag->generic.in.offset = io->generic.in.offset + done;
1947 io_frag->generic.in.data = io->generic.in.data + done;
1949 c_req = proxy_smb_raw_write_send(ntvfs, io_frag, f);
1950 if (! c_req) {
1951 /* let pending requests clean-up when ready */
1952 fragments->status=NT_STATUS_UNSUCCESSFUL;
1953 talloc_steal(NULL, fragments);
1954 DEBUG(3,("Can't send request fragment\n"));
1955 return NT_STATUS_UNSUCCESSFUL;
1958 DEBUG(5,("Frag write sent offset=%lld size=%d MID=%d\n",
1959 io_frag->generic.in.offset,io_frag->generic.in.count,c_req->mid));
1960 fragment->c_req=c_req;
1961 DLIST_ADD(fragments->fragments, fragment);
1963 // ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
1964 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_write_fragment, NT_STATUS_INTERNAL_ERROR);
1965 DEBUG(5,("Frag response chained\n"));
1967 remaining -= size;
1968 done += size;
1969 } while(remaining > 0);
1971 /* this strategy has the callback chain attached to each c_req, so we
1972 don't use the ASYNC_RECV_TAIL* to install a general one */
1975 ASYNC_RECV_TAIL_HANDLER(io, async_chain_handler);
1979 a handler for async seek replies
1981 static void async_seek(struct smbcli_request *c_req)
1983 struct async_info *async = c_req->async.private;
1984 struct ntvfs_request *req = async->req;
1985 req->async_states->status = smb_raw_seek_recv(c_req, async->parms);
1986 talloc_free(async);
1987 req->async_states->send_fn(req);
1991 seek in a file
1993 static NTSTATUS proxy_seek(struct ntvfs_module_context *ntvfs,
1994 struct ntvfs_request *req,
1995 union smb_seek *io)
1997 struct proxy_private *private = ntvfs->private_data;
1998 struct smbcli_request *c_req;
2000 SETUP_PID_AND_FILE;
2002 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2003 return smb_raw_seek(private->tree, io);
2006 c_req = smb_raw_seek_send(private->tree, io);
2008 ASYNC_RECV_TAIL(io, async_seek);
2012 flush a file
2014 static NTSTATUS proxy_flush(struct ntvfs_module_context *ntvfs,
2015 struct ntvfs_request *req,
2016 union smb_flush *io)
2018 struct proxy_private *private = ntvfs->private_data;
2019 struct smbcli_request *c_req;
2021 SETUP_PID;
2022 switch (io->generic.level) {
2023 case RAW_FLUSH_FLUSH:
2024 SETUP_FILE;
2025 break;
2026 case RAW_FLUSH_ALL:
2027 io->generic.in.file.fnum = 0xFFFF;
2028 break;
2029 case RAW_FLUSH_SMB2:
2030 return NT_STATUS_INVALID_LEVEL;
2033 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2034 return smb_raw_flush(private->tree, io);
2037 c_req = smb_raw_flush_send(private->tree, io);
2039 SIMPLE_ASYNC_TAIL;
2043 close a file
2045 static NTSTATUS proxy_close(struct ntvfs_module_context *ntvfs,
2046 struct ntvfs_request *req, union smb_close *io)
2048 struct proxy_private *private = ntvfs->private_data;
2049 struct smbcli_request *c_req;
2050 struct proxy_file *f;
2051 union smb_close io2;
2053 SETUP_PID;
2055 if (io->generic.level != RAW_CLOSE_GENERIC &&
2056 private->map_generic) {
2057 return ntvfs_map_close(ntvfs, req, io);
2059 SETUP_FILE_HERE(f);
2060 /* Note, we aren't free-ing f, or it's h here. Should we?
2061 even if file-close fails, we'll remove it from the list,
2062 what else would we do? Maybe we should not remove until
2063 after the proxied call completes? */
2064 DLIST_REMOVE(private->files, f);
2066 /* possibly samba can't do RAW_CLOSE_SEND yet */
2067 if (! (c_req = smb_raw_close_send(private->tree, io))) {
2068 if (io->generic.level == RAW_CLOSE_GENERIC) {
2069 ZERO_STRUCT(io2);
2070 io2.close.level = RAW_CLOSE_CLOSE;
2071 io2.close.in.file = io->generic.in.file;
2072 io2.close.in.write_time = io->generic.in.write_time;
2073 io = &io2;
2075 c_req = smb_raw_close_send(private->tree, io);
2078 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2079 return smbcli_request_simple_recv(c_req);
2082 SIMPLE_ASYNC_TAIL;
2086 exit - closing files open by the pid
2088 static NTSTATUS proxy_exit(struct ntvfs_module_context *ntvfs,
2089 struct ntvfs_request *req)
2091 struct proxy_private *private = ntvfs->private_data;
2092 struct smbcli_request *c_req;
2094 SETUP_PID;
2096 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2097 return smb_raw_exit(private->tree->session);
2100 c_req = smb_raw_exit_send(private->tree->session);
2102 SIMPLE_ASYNC_TAIL;
2106 logoff - closing files open by the user
2108 static NTSTATUS proxy_logoff(struct ntvfs_module_context *ntvfs,
2109 struct ntvfs_request *req)
2111 /* we can't do this right in the proxy backend .... */
2112 return NT_STATUS_OK;
2116 setup for an async call - nothing to do yet
2118 static NTSTATUS proxy_async_setup(struct ntvfs_module_context *ntvfs,
2119 struct ntvfs_request *req,
2120 void *private)
2122 return NT_STATUS_OK;
2126 cancel an async call
2128 static NTSTATUS proxy_cancel(struct ntvfs_module_context *ntvfs,
2129 struct ntvfs_request *req)
2131 struct proxy_private *private = ntvfs->private_data;
2132 struct async_info *a;
2134 /* find the matching request */
2135 for (a=private->pending;a;a=a->next) {
2136 if (a->req == req) {
2137 break;
2141 if (a == NULL) {
2142 return NT_STATUS_INVALID_PARAMETER;
2145 return smb_raw_ntcancel(a->c_req);
2149 lock a byte range
2151 static NTSTATUS proxy_lock(struct ntvfs_module_context *ntvfs,
2152 struct ntvfs_request *req, union smb_lock *io)
2154 struct proxy_private *private = ntvfs->private_data;
2155 struct smbcli_request *c_req;
2157 SETUP_PID;
2159 if (io->generic.level != RAW_LOCK_GENERIC &&
2160 private->map_generic) {
2161 return ntvfs_map_lock(ntvfs, req, io);
2163 SETUP_FILE;
2165 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2166 return smb_raw_lock(private->tree, io);
2169 c_req = smb_raw_lock_send(private->tree, io);
2170 SIMPLE_ASYNC_TAIL;
2174 set info on a open file
2176 static NTSTATUS proxy_setfileinfo(struct ntvfs_module_context *ntvfs,
2177 struct ntvfs_request *req,
2178 union smb_setfileinfo *io)
2180 struct proxy_private *private = ntvfs->private_data;
2181 struct smbcli_request *c_req;
2183 SETUP_PID_AND_FILE;
2185 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2186 return smb_raw_setfileinfo(private->tree, io);
2188 c_req = smb_raw_setfileinfo_send(private->tree, io);
2190 SIMPLE_ASYNC_TAIL;
2195 a handler for async fsinfo replies
2197 static void async_fsinfo(struct smbcli_request *c_req)
2199 struct async_info *async = c_req->async.private;
2200 struct ntvfs_request *req = async->req;
2201 req->async_states->status = smb_raw_fsinfo_recv(c_req, req, async->parms);
2202 talloc_free(async);
2203 req->async_states->send_fn(req);
2207 return filesystem space info
2209 static NTSTATUS proxy_fsinfo(struct ntvfs_module_context *ntvfs,
2210 struct ntvfs_request *req, union smb_fsinfo *fs)
2212 struct proxy_private *private = ntvfs->private_data;
2213 struct smbcli_request *c_req;
2215 SETUP_PID;
2217 /* QFS Proxy */
2218 if (fs->generic.level == RAW_QFS_PROXY_INFO) {
2219 fs->proxy_info.out.major_version=1;
2220 fs->proxy_info.out.minor_version=0;
2221 fs->proxy_info.out.capability=0;
2222 return NT_STATUS_OK;
2225 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2226 return smb_raw_fsinfo(private->tree, req, fs);
2229 c_req = smb_raw_fsinfo_send(private->tree, req, fs);
2231 ASYNC_RECV_TAIL(fs, async_fsinfo);
2235 return print queue info
2237 static NTSTATUS proxy_lpq(struct ntvfs_module_context *ntvfs,
2238 struct ntvfs_request *req, union smb_lpq *lpq)
2240 return NT_STATUS_NOT_SUPPORTED;
2244 list files in a directory matching a wildcard pattern
2246 static NTSTATUS proxy_search_first(struct ntvfs_module_context *ntvfs,
2247 struct ntvfs_request *req, union smb_search_first *io,
2248 void *search_private,
2249 bool (*callback)(void *, const union smb_search_data *))
2251 struct proxy_private *private = ntvfs->private_data;
2253 SETUP_PID;
2255 return smb_raw_search_first(private->tree, req, io, search_private, callback);
2258 /* continue a search */
2259 static NTSTATUS proxy_search_next(struct ntvfs_module_context *ntvfs,
2260 struct ntvfs_request *req, union smb_search_next *io,
2261 void *search_private,
2262 bool (*callback)(void *, const union smb_search_data *))
2264 struct proxy_private *private = ntvfs->private_data;
2266 SETUP_PID;
2268 return smb_raw_search_next(private->tree, req, io, search_private, callback);
2271 /* close a search */
2272 static NTSTATUS proxy_search_close(struct ntvfs_module_context *ntvfs,
2273 struct ntvfs_request *req, union smb_search_close *io)
2275 struct proxy_private *private = ntvfs->private_data;
2277 SETUP_PID;
2279 return smb_raw_search_close(private->tree, io);
2283 a handler for async trans2 replies
2285 static void async_trans2(struct smbcli_request *c_req)
2287 struct async_info *async = c_req->async.private;
2288 struct ntvfs_request *req = async->req;
2289 req->async_states->status = smb_raw_trans2_recv(c_req, req, async->parms);
2290 talloc_free(async);
2291 req->async_states->send_fn(req);
2294 /* raw trans2 */
2295 static NTSTATUS proxy_trans2(struct ntvfs_module_context *ntvfs,
2296 struct ntvfs_request *req,
2297 struct smb_trans2 *trans2)
2299 struct proxy_private *private = ntvfs->private_data;
2300 struct smbcli_request *c_req;
2302 if (private->map_trans2) {
2303 return NT_STATUS_NOT_IMPLEMENTED;
2306 SETUP_PID;
2307 #warning we should be mapping file handles here
2309 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2310 return smb_raw_trans2(private->tree, req, trans2);
2313 c_req = smb_raw_trans2_send(private->tree, trans2);
2315 ASYNC_RECV_TAIL(trans2, async_trans2);
2319 /* SMBtrans - not used on file shares */
2320 static NTSTATUS proxy_trans(struct ntvfs_module_context *ntvfs,
2321 struct ntvfs_request *req,
2322 struct smb_trans2 *trans2)
2324 return NT_STATUS_ACCESS_DENIED;
2328 a handler for async change notify replies
2330 static void async_changenotify(struct smbcli_request *c_req)
2332 struct async_info *async = c_req->async.private;
2333 struct ntvfs_request *req = async->req;
2334 req->async_states->status = smb_raw_changenotify_recv(c_req, req, async->parms);
2335 talloc_free(async);
2336 req->async_states->send_fn(req);
2339 /* change notify request - always async */
2340 static NTSTATUS proxy_notify(struct ntvfs_module_context *ntvfs,
2341 struct ntvfs_request *req,
2342 union smb_notify *io)
2344 struct proxy_private *private = ntvfs->private_data;
2345 struct smbcli_request *c_req;
2346 int saved_timeout = private->transport->options.request_timeout;
2347 struct proxy_file *f;
2349 if (io->nttrans.level != RAW_NOTIFY_NTTRANS) {
2350 return NT_STATUS_NOT_IMPLEMENTED;
2353 SETUP_PID;
2355 f = ntvfs_handle_get_backend_data(io->nttrans.in.file.ntvfs, ntvfs);
2356 if (!f) return NT_STATUS_INVALID_HANDLE;
2357 io->nttrans.in.file.fnum = f->fnum;
2359 /* this request doesn't make sense unless its async */
2360 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2361 return NT_STATUS_INVALID_PARAMETER;
2364 /* we must not timeout on notify requests - they wait
2365 forever */
2366 private->transport->options.request_timeout = 0;
2368 c_req = smb_raw_changenotify_send(private->tree, io);
2370 private->transport->options.request_timeout = saved_timeout;
2372 ASYNC_RECV_TAIL(io, async_changenotify);
2376 * A hander for converting from rpc struct replies to ntioctl
2378 static NTSTATUS proxy_rpclite_map_async_send(
2379 struct ntvfs_module_context *ntvfs,
2380 struct ntvfs_request *req,
2381 void *io1, void *io2, NTSTATUS status)
2383 union smb_ioctl* io=talloc_get_type_abort(io1, union smb_ioctl);
2384 struct async_rpclite_send *rpclite_send=talloc_get_type_abort(io2, struct async_rpclite_send);
2385 void* r=rpclite_send->struct_ptr;
2386 struct ndr_push* push;
2387 const struct ndr_interface_call* call=rpclite_send->call;
2388 enum ndr_err_code ndr_err;
2389 DATA_BLOB ndr;
2391 talloc_free(rpclite_send);
2393 push = ndr_push_init_ctx(req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2394 NT_STATUS_HAVE_NO_MEMORY(push);
2396 if (0) {
2397 push->flags |= LIBNDR_FLAG_BIGENDIAN;
2400 ndr_err = call->ndr_push(push, NDR_OUT, r);
2401 status=ndr_map_error2ntstatus(ndr_err);
2403 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2404 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
2405 nt_errstr(status)));
2406 return status;
2409 ndr=ndr_push_blob(push);
2410 //if (ndr.length > io->ntioctl.in.max_data) {
2411 DEBUG(3,("%s NDR size %d, max_size %d %p\n",__FUNCTION__, ndr.length,
2412 io->ntioctl.in.max_data, ndr.data));
2413 io->ntioctl.out.blob=ndr;
2414 return status;
2418 * A handler for sending async rpclite Read replies that were mapped to union smb_read
2420 static NTSTATUS rpclite_proxy_Read_map_async_send(
2421 struct ntvfs_module_context *ntvfs,
2422 struct ntvfs_request *req,
2423 void *io1, void *io2, NTSTATUS status)
2425 struct proxy_Read* r=talloc_get_type_abort(io1, struct proxy_Read);
2426 union smb_read* io=talloc_get_type_abort(io2, union smb_read);
2428 /* status here is a result of proxy_read, it doesn't reflect the status
2429 of the rpc transport or relates calls, just the read operation */
2430 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2431 r->out.result=status;
2433 if (! NT_STATUS_IS_OK(status)) {
2434 /* We can't use result as a discriminator in IDL, so nread and flags always exist */
2435 r->out.nread=0;
2436 r->out.flags=0;
2437 } else {
2438 ssize_t size=io->readx.out.nread;
2439 r->out.flags=0;
2440 r->out.nread=io->readx.out.nread;
2442 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE) && io->readx.out.nread>0) {
2443 declare_checksum(digest);
2444 checksum_block(digest, io->readx.out.data, io->readx.out.nread);
2446 DEBUG(5,("New digest for size: %lld\n", (long long) io->readx.out.nread));
2447 dump_data (5, digest, sizeof(digest));
2448 DEBUG(5,("Cached digest\n"));
2449 dump_data (5, r->in.digest.digest, sizeof(digest));
2451 if (memcmp(digest, r->in.digest.digest, sizeof(digest))==0) {
2452 r->out.flags=PROXY_USE_CACHE;
2453 DEBUG(5,("%s: Use cached data len=%lld\n",__FUNCTION__,
2454 (long long)r->out.nread));
2455 if (r->in.flags & PROXY_VALIDATE) {
2456 r->out.flags |= PROXY_VALIDATE;
2457 DEBUG(5,("%s: Use VALIDATED len=%lld, %lld\n",__FUNCTION__,
2458 (long long)r->out.nread, (long long) io->readx.out.nread));
2460 goto done;
2462 DEBUG(5,("Cache does not match\n"));
2465 if (r->in.flags & PROXY_VALIDATE) {
2466 /* validate failed, shrink read to mincnt - so we don't fill link */
2467 r->out.nread=MIN(r->out.nread, r->in.mincnt);
2468 size=r->out.nread;
2469 DEBUG(5,("VALIDATE failed, shrink read of %d from %d to %d\n",
2470 r->in.maxcnt,r->out.nread,MIN(r->out.nread, r->in.mincnt)));
2473 if (r->in.flags & PROXY_USE_ZLIB) {
2474 if (compress_block(io->readx.out.data, &size) ) {
2475 r->out.flags|=PROXY_USE_ZLIB;
2476 r->out.response.compress.count=size;
2477 r->out.response.compress.data=io->readx.out.data;
2478 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
2479 __FUNCTION__,r->out.nread,size,size*100/r->out.nread));
2480 goto done;
2484 DEBUG(5,("%s: Compression not worthwhile\n", __FUNCTION__));
2485 r->out.response.generic.count=io->readx.out.nread;
2486 r->out.response.generic.data=io->readx.out.data;
2489 done:
2491 /* Or should we return NT_STATUS_OK ?*/
2492 DEBUG(5,("Finish %s status %s\n",__FUNCTION__,get_friendly_nt_error_msg(status)));
2494 /* the rpc transport succeeded even if the operation did not */
2495 return NT_STATUS_OK;
2499 * RPC implementation of Read
2501 static NTSTATUS rpclite_proxy_Read(struct ntvfs_module_context *ntvfs,
2502 struct ntvfs_request *req, struct proxy_Read *r,
2503 union smb_handle file)
2505 struct proxy_private *private = ntvfs->private_data;
2506 union smb_read* io=talloc(req, union smb_read);
2507 NTSTATUS status;
2509 NT_STATUS_HAVE_NO_MEMORY(io);
2510 /* if next hop is a proxy just repeat this call also handle VALIDATE check
2511 that means have own callback handlers too... */
2512 SETUP_PID;
2514 DEBUG(5,("Opnum: proxy_Read min=%d max=%d offset=%lld, fnum=%d\n",
2515 r->in.mincnt, r->in.maxcnt, r->in.offset, r->in.fnum));
2516 DEBUG(5,("Anticipated digest\n"));
2517 dump_data (5, r->in.digest.digest, sizeof(r->in.digest.digest));
2519 /* If the remove end is a proxy, jusr fixup file handle and passthrough,
2520 but update cache on the way back
2521 if (PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
2524 /* prepare for response */
2525 r->out.response.generic.data=talloc_array(io, uint8_t, r->in.maxcnt);
2526 NT_STATUS_HAVE_NO_MEMORY(r->out.response.generic.data);
2528 if (! PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
2529 return proxy_validate(ntvfs, req, r, &file);
2532 /* pack up an smb_read request and dispatch here */
2533 io->readx.level=RAW_READ_READX;
2534 io->readx.in.file=file;
2535 io->readx.in.mincnt=r->in.mincnt;
2536 io->readx.in.maxcnt=r->in.maxcnt;
2537 io->readx.in.offset=r->in.offset;
2538 io->readx.in.remaining=r->in.remaining;
2539 /* and something to hold the answer */
2540 io->readx.out.data=r->out.response.generic.data;
2542 /* so we get to pack the io->*.out response */
2543 status = ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Read_map_async_send);
2544 NT_STATUS_NOT_OK_RETURN(status);
2546 /* so the read will get processed normally */
2547 return proxy_read(ntvfs, req, io);
2551 * A handler for sending async rpclite Write replies
2553 static NTSTATUS rpclite_proxy_Write_map_async_send(
2554 struct ntvfs_module_context *ntvfs,
2555 struct ntvfs_request *req,
2556 void *io1, void *io2, NTSTATUS status)
2558 struct proxy_Write* r=talloc_get_type_abort(io1, struct proxy_Write);
2559 union smb_write* io=talloc_get_type_abort(io2, union smb_write);
2561 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2562 r->out.result=status;
2564 r->out.nwritten=io->writex.out.nwritten;
2565 r->out.remaining=io->writex.out.remaining;
2567 /* the rpc transport succeeded even if the operation did not */
2568 return NT_STATUS_OK;
2572 * RPC implementation of write
2574 static NTSTATUS rpclite_proxy_Write(struct ntvfs_module_context *ntvfs,
2575 struct ntvfs_request *req, struct proxy_Write *r,
2576 union smb_handle file)
2578 struct proxy_private *private = ntvfs->private_data;
2579 union smb_write* io=talloc(req, union smb_write);
2580 NTSTATUS status;
2582 SETUP_PID;
2584 DEBUG(5,("Opnum: proxy_Write count=%d offset=%lld, fnum=%d\n",
2585 r->in.count, r->in.offset, r->in.fnum));
2587 /* pack up an smb_write request and dispatch here */
2588 io->writex.level=RAW_WRITE_WRITEX;
2589 io->writex.in.file=file;
2590 io->writex.in.offset=r->in.offset;
2591 io->writex.in.wmode=r->in.mode;
2592 io->writex.in.count=r->in.count;
2594 /* and the data */
2595 if (PROXY_USE_ZLIB & r->in.flags) {
2596 ssize_t count=r->in.data.generic.count;
2597 io->writex.in.data=uncompress_block_talloc(io, r->in.data.compress.data,
2598 &count, r->in.count);
2599 if (count != r->in.count || !io->writex.in.data) {
2600 /* Didn't uncompress properly, but the RPC layer worked */
2601 r->out.result=NT_STATUS_BAD_COMPRESSION_BUFFER;
2602 return NT_STATUS_OK;
2604 } else {
2605 io->writex.in.data=r->in.data.generic.data;
2608 /* so we get to pack the io->*.out response */
2609 status=ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Write_map_async_send);
2610 NT_STATUS_NOT_OK_RETURN(status);
2612 /* so the read will get processed normally */
2613 return proxy_write(ntvfs, req, io);
2616 /* unmarshall ntioctl and rpc-dispatch, but push async map handler to convert
2617 back from rpc struct to ntioctl */
2618 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
2619 struct ntvfs_request *req, union smb_ioctl *io)
2621 struct proxy_private *private = ntvfs->private_data;
2622 DATA_BLOB *request;
2623 struct ndr_syntax_id* syntax_id;
2624 uint32_t opnum;
2625 const struct ndr_interface_table *table;
2626 struct ndr_pull* pull;
2627 void* r;
2628 NTSTATUS status;
2629 struct async_rpclite_send *rpclite_send;
2630 enum ndr_err_code ndr_err;
2632 SETUP_PID;
2634 DEBUG(5,("START %s blob-size %d\n",__FUNCTION__,io->ntioctl.in.blob.length));
2635 /* unpack the NDR */
2636 request=&io->ntioctl.in.blob;
2638 pull = ndr_pull_init_blob(request, req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2639 NT_STATUS_HAVE_NO_MEMORY(pull);
2640 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
2641 DEBUG(5,("%s pull init'd\n",__FUNCTION__));
2643 /* the blob is 4-aligned because it was memcpy'd */
2644 syntax_id=talloc_zero(pull, struct ndr_syntax_id);
2645 NT_STATUS_HAVE_NO_MEMORY(syntax_id);
2647 ndr_err=ndr_pull_ndr_syntax_id(pull, NDR_SCALARS, syntax_id);
2648 status=ndr_map_error2ntstatus(ndr_err);
2649 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2650 DEBUG(2,("Can't read syntax-id: %s\n",nt_errstr(status)));
2651 return status;
2654 /* now find the struct ndr_interface_table * for this syntax_id */
2655 table=ndr_table_by_uuid(&syntax_id->uuid);
2656 if (! table) {
2657 DEBUG(5,("Can't find table for uuid: %s\n",GUID_string(debug_ctx(),&syntax_id->uuid)));
2658 return NT_STATUS_NO_GUID_TRANSLATION;
2661 ndr_err=ndr_pull_uint32(pull, NDR_SCALARS, &opnum);
2662 status=ndr_map_error2ntstatus(ndr_err);
2663 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2664 DEBUG(2,("Can't read op-num: %s\n",nt_errstr(status)));
2665 return status;
2667 DEBUG(5,("%s opnum %d\n",__FUNCTION__,opnum));
2669 DEBUG(10,("rpc request data:\n"));
2670 dump_data(10, pull->data, pull->data_size);
2672 r = talloc_named(req, table->calls[opnum].struct_size, "struct %s",
2673 table->calls[opnum].name);
2674 NT_STATUS_HAVE_NO_MEMORY(r);
2676 ndr_err=table->calls[opnum].ndr_pull(pull, NDR_IN, r);
2677 status=ndr_map_error2ntstatus(ndr_err);
2678 DEBUG(5,("%s opnum %d pulled status %s\n",__FUNCTION__,opnum,get_friendly_nt_error_msg (status)));
2679 NT_STATUS_NOT_OK_RETURN(status);
2681 rpclite_send=talloc(req, struct async_rpclite_send);
2682 NT_STATUS_HAVE_NO_MEMORY(rpclite_send);
2683 rpclite_send->call=&table->calls[opnum];
2684 rpclite_send->struct_ptr=r;
2685 /* need to push conversion function to convert from r to io */
2686 status=ntvfs_map_async_setup(ntvfs, req, io, rpclite_send, proxy_rpclite_map_async_send);
2688 /* Magically despatch the call based on syntax_id, table and opnum.
2689 But there is no table of handlers.... so until then*/
2690 if (0==strcasecmp(table->name,"rpcproxy")) {
2691 switch(opnum) {
2692 case(NDR_PROXY_READ):
2693 status=rpclite_proxy_Read(ntvfs, req, r, io->generic.in.file);
2694 break;
2695 case(NDR_PROXY_WRITE):
2696 status=rpclite_proxy_Write(ntvfs, req, r, io->generic.in.file);
2697 break;
2698 default:
2699 DEBUG(3,("Can't despatch %s:%d\n",table->name, opnum));
2700 return NT_STATUS_PROCEDURE_NOT_FOUND;
2702 } else {
2703 DEBUG(5,("Can't despatch %s:%d %s\n",table->name, opnum,
2704 GUID_string(debug_ctx(),&syntax_id->uuid)));
2705 return NT_STATUS_NO_GUID_TRANSLATION;
2708 /* status is the status of the rpc layer. If it is NT_STATUS_OK then
2709 the handler status is in r->out.result */
2710 return ntvfs_map_async_finish(req, status);
2713 /* unpack the ntioctl to make some rpc_struct */
2714 NTSTATUS ntioctl_rpc_unmap(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2716 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
2717 struct proxy_private *proxy=async->proxy;
2718 struct smbcli_request *c_req = async->c_req;
2719 void* r=io1;
2720 struct ntioctl_rpc_unmap_info *info=talloc_get_type_abort(io2, struct ntioctl_rpc_unmap_info);
2721 union smb_ioctl* io =talloc_get_type_abort(info->io, union smb_ioctl);
2722 const struct ndr_interface_call *calls=info->calls;
2723 enum ndr_err_code ndr_err;
2724 DATA_BLOB *response;
2725 struct ndr_pull* pull;
2727 DEBUG(5,("START %s io2=%p\n",__FUNCTION__,io2));
2728 DEBUG(5,("%s op %s ntioctl: %s\n",
2729 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
2730 NT_STATUS_NOT_OK_RETURN(status);
2732 if (c_req) {
2733 DEBUG(5,("%s io2 MID=%d\n",__FUNCTION__,c_req->mid));
2734 status = smb_raw_ioctl_recv(c_req, io, io);
2735 #define SESSION_INFO proxy->remote_server, proxy->remote_share
2736 /* This status is the ntioctl wrapper status */
2737 if (! NT_STATUS_IS_OK(status)) {
2738 DEBUG(3,("[\\\\%s\\%s] RPC %s failed for %s: %s\n",SESSION_INFO,
2739 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
2740 if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) return status;
2741 return NT_STATUS_UNSUCCESSFUL;
2745 dump_data(10, io->ntioctl.out.blob.data, io->ntioctl.out.blob.length);
2747 response=&io->ntioctl.out.blob;
2748 pull = ndr_pull_init_blob(response, r, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2749 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
2751 NT_STATUS_HAVE_NO_MEMORY(pull);
2753 ndr_err=calls->ndr_pull(pull, NDR_OUT, r);
2754 #warning can we free pull here?
2755 status=ndr_map_error2ntstatus(ndr_err);
2757 DEBUG(5,("END %s op status %s\n",
2758 __FUNCTION__, get_friendly_nt_error_msg(status)));
2759 return status;
2763 send an ntioctl request based on a NDR encoding.
2765 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
2766 struct smbcli_tree *tree,
2767 struct ntvfs_module_context *ntvfs,
2768 uint16_t fnum,
2769 const struct ndr_interface_table *table,
2770 uint32_t opnum,
2771 void *r)
2773 struct proxy_private *private = ntvfs->private_data;
2774 struct smbcli_request * c_req;
2775 struct ndr_push *push;
2776 NTSTATUS status;
2777 DATA_BLOB request;
2778 enum ndr_err_code ndr_err;
2779 union smb_ioctl *io=talloc_zero(r, union smb_ioctl);
2782 /* setup for a ndr_push_* call, we can't free push until the message
2783 actually hits the wire */
2784 push = ndr_push_init_ctx(io, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2785 if (!push) return NULL;
2787 /* first push interface table identifiers */
2788 ndr_err=ndr_push_ndr_syntax_id(push, NDR_SCALARS, &table->syntax_id);
2789 status=ndr_map_error2ntstatus(ndr_err);
2791 if (! NT_STATUS_IS_OK(status)) return NULL;
2793 ndr_err=ndr_push_uint32(push, NDR_SCALARS, opnum);
2794 status=ndr_map_error2ntstatus(ndr_err);
2795 if (! NT_STATUS_IS_OK(status)) return NULL;
2797 if (0) {
2798 push->flags |= LIBNDR_FLAG_BIGENDIAN;
2801 /* push the structure into a blob */
2802 ndr_err = table->calls[opnum].ndr_push(push, NDR_IN, r);
2803 status=ndr_map_error2ntstatus(ndr_err);
2804 if (!NT_STATUS_IS_OK(status)) {
2805 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
2806 nt_errstr(status)));
2807 return NULL;
2810 /* retrieve the blob */
2811 request = ndr_push_blob(push);
2813 io->ntioctl.level=RAW_IOCTL_NTIOCTL;
2814 io->ntioctl.in.function=FSCTL_UFOPROXY_RPCLITE;
2815 io->ntioctl.in.file.fnum=fnum;
2816 io->ntioctl.in.fsctl=false;
2817 io->ntioctl.in.filter=0;
2818 io->ntioctl.in.max_data=PROXY_NTIOCTL_MAXDATA;
2819 io->ntioctl.in.blob=request;
2821 DEBUG(10,("smbcli_request packet:\n"));
2822 dump_data(10, request.data, request.length);
2824 c_req = smb_raw_ioctl_send(tree, io);
2826 if (! c_req) {
2827 return NULL;
2830 dump_data(10, c_req->out.data, c_req->out.data_size);
2832 { void* req=NULL;
2833 struct ntioctl_rpc_unmap_info* info=talloc_zero(r, struct ntioctl_rpc_unmap_info);
2834 info->io=io;
2835 info->table=table;
2836 info->opnum=opnum;
2837 info->calls=&table->calls[opnum];
2838 ADD_ASYNC_RECV_TAIL(c_req, r, info, NULL, ntioctl_rpc_unmap, NULL);
2841 return c_req;
2845 client helpers, mapping between proxy RPC calls and smbcli_* calls.
2849 * If the sync_chain_handler is called directly it unplugs the async handler
2850 which (as well as preventing loops) will also avoid req->send_fn being
2851 called - which is also nice! */
2852 NTSTATUS sync_chain_handler(struct smbcli_request *c_req)
2854 struct async_info *async=NULL;
2855 /* the first callback which will actually receive the c_req response */
2856 struct async_info_map *async_map;
2857 NTSTATUS status=NT_STATUS_OK;
2858 struct async_info_map** chain;
2860 DEBUG(5,("%s\n",__FUNCTION__));
2861 if (! c_req) return NT_STATUS_UNSUCCESSFUL;
2863 /* If there is a handler installed, it is using async_info to chain */
2864 if (c_req->async.fn) {
2865 /* not safe to talloc_free async if send_fn has been called for the request
2866 against which async was allocated, so steal it (and free below) or neither */
2867 async = talloc_get_type_abort(c_req->async.private, struct async_info);
2868 talloc_steal(NULL, async);
2869 chain=&async->chain;
2870 async_map = talloc_get_type_abort(*chain, struct async_info_map);
2871 } else {
2872 chain=(struct async_info_map**)&c_req->async.private;
2873 async_map = talloc_get_type_abort(*chain, struct async_info_map);
2876 /* unplug c_req->async.fn as if a callback handler calls smb_*_recv
2877 in order to receive the response, smbcli_transport_finish_recv will
2878 call us again and then call the c-req->async.fn
2879 Perhaps we should merely call smbcli_request_receive() IF
2880 c_req->request_state <= SMBCLI_REQUEST_RECV, but that might not
2881 help multi-part replies... except all parts are receive before
2882 callback if a handler WAS set */
2883 c_req->async.fn=NULL;
2885 /* Should we raise an error? Should we simple_recv? */
2886 while(async_map) {
2887 /* remove this one from the list before we call. We do this in case
2888 some callbacks free their async_map but also so that callbacks
2889 can navigate the async_map chain to add additional callbacks to
2890 the end - e.g. so that tag-along reads can call send_fn after
2891 the send_fn of the request they tagged along to, thus preserving
2892 the async response order - which may be a waste of time? */
2893 DLIST_REMOVE(*chain, async_map);
2895 DEBUG(5,("Callback for async_map=%p pre-status %s\n",async_map, get_friendly_nt_error_msg(status)));
2896 if (async_map->fn) {
2897 status=async_map->fn(async_map->async,
2898 async_map->parms1, async_map->parms2, status);
2900 DEBUG(5,("Callback complete for async_map=%p status %s\n",async_map, get_friendly_nt_error_msg(status)));
2901 /* Note: the callback may have added to the chain */
2902 #warning Async_maps have a null talloc_context, it is unclear who should own them
2903 /* it can't be c_req as it stops us chaining more than one, maybe it
2904 should be req but there isn't always a req. However sync_chain_handler
2905 will always free it if called */
2906 DEBUG(6,("Will free async map %p\n",async_map));
2907 #warning put me back
2908 talloc_free(async_map);
2909 DEBUG(6,("Free'd async_map\n"));
2910 if (*chain)
2911 async_map=talloc_get_type_abort(*chain, struct async_info_map);
2912 else
2913 async_map=NULL;
2914 DEBUG(6,("Switch to async_map %p\n",async_map));
2916 /* The first callback will have read c_req, thus talloc_free'ing it,
2917 so we don't let the other callbacks get hurt playing with it */
2918 if (async_map && async_map->async)
2919 async_map->async->c_req=NULL;
2922 talloc_free(async);
2924 DEBUG(5,("%s complete: %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
2925 return status;
2928 /* If the async handler is called, then the send_fn is called */
2929 static void async_chain_handler(struct smbcli_request *c_req)
2931 struct async_info *async = talloc_get_type_abort(c_req->async.private, struct async_info);
2932 struct ntvfs_request *req = async->req;
2933 NTSTATUS status;
2935 if (c_req->state <= SMBCLI_REQUEST_RECV) {
2936 /* Looks like async handlers has been called sync'ly */
2937 smb_panic("async_chain_handler called asyncly on req %p\n");
2940 status=sync_chain_handler(c_req);
2942 /* Should we insist that a chain'd handler does this?
2943 Which makes it hard to intercept the data by adding handlers
2944 before the send_fn handler sends it... */
2945 if (req) {
2946 req->async_states->status=status;
2947 req->async_states->send_fn(req);
2951 /* unpack the rpc struct to make some smb_write */
2952 NTSTATUS async_proxy_smb_raw_write_rpc(struct async_info *async,
2953 void* io1, void* io2, NTSTATUS status)
2955 union smb_write* io =talloc_get_type(io1, union smb_write);
2956 struct proxy_Write* r=talloc_get_type(io2, struct proxy_Write);
2958 DEBUG(5,("START: %s convert from rpc to smb with pre-status %s\n",__FUNCTION__,
2959 get_friendly_nt_error_msg (status)));
2960 DEBUG(3,("Write response for offset=%lld\n",io->generic.in.offset));
2961 NT_STATUS_NOT_OK_RETURN(status);
2963 status=r->out.result;
2964 DEBUG(5,("%s wrapped status: %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2965 NT_STATUS_NOT_OK_RETURN(status);
2967 io->generic.out.remaining = r->out.remaining;
2968 io->generic.out.nwritten = r->out.nwritten;
2970 DEBUG(5,("END: %s status %s\n",__FUNCTION__,
2971 get_friendly_nt_error_msg (status)));
2972 return status;
2975 /* upgrade from smb to NDR and then send.
2976 The caller should ADD_ASYNC_RECV_TAIL the handler that tries to receive the response*/
2977 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
2978 union smb_write *io,
2979 struct proxy_file *f)
2981 struct proxy_private *private = ntvfs->private_data;
2982 struct smbcli_tree *tree=private->tree;
2984 if (PROXY_REMOTE_SERVER(private)) {
2985 struct smbcli_request *c_req;
2986 struct proxy_Write *r=talloc_zero(io, struct proxy_Write);
2987 ssize_t size;
2989 if (! r) return NULL;
2991 size=io->generic.in.count;
2992 /* upgrade the write */
2993 r->in.fnum = io->generic.in.file.fnum;
2994 r->in.offset = io->generic.in.offset;
2995 r->in.count = io->generic.in.count;
2996 r->in.mode = io->generic.in.wmode;
2997 // r->in.remaining = io->generic.in.remaining;
2998 #warning remove this
2999 /* prepare to lie */
3000 r->out.nwritten=r->in.count;
3001 r->out.remaining=0;
3003 /* try to compress */
3004 #warning compress!
3005 r->in.data.compress.data=compress_block_talloc(r, io->generic.in.data, &size);
3006 if (r->in.data.compress.data) {
3007 r->in.data.compress.count=size;
3008 r->in.flags = PROXY_USE_ZLIB;
3009 } else {
3010 r->in.flags = 0;
3011 /* we'll honour const, honest gov */
3012 r->in.data.generic.data=discard_const(io->generic.in.data);
3013 r->in.data.generic.count=io->generic.in.count;
3016 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
3017 ntvfs,
3018 io->generic.in.file.fnum,
3019 &ndr_table_rpcproxy,
3020 NDR_PROXY_WRITE, r);
3021 if (! c_req) return NULL;
3023 /* yeah, filthy abuse of f */
3024 { void* req=NULL;
3025 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_write_rpc, NULL);
3028 return c_req;
3029 } else {
3030 return smb_raw_write_send(tree, io);
3034 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
3035 union smb_write *io,
3036 struct proxy_file *f)
3038 struct proxy_private *proxy = ntvfs->private_data;
3039 struct smbcli_tree *tree=proxy->tree;
3041 if (PROXY_REMOTE_SERVER(proxy)) {
3042 struct smbcli_request *c_req = proxy_smb_raw_write_send(ntvfs, io, f);
3043 return sync_chain_handler(c_req);
3044 } else {
3045 struct smbcli_request *c_req = smb_raw_write_send(tree, io);
3046 return smb_raw_write_recv(c_req, io);
3050 /* unpack the rpc struct to make some smb_read response */
3051 NTSTATUS async_proxy_smb_raw_read_rpc(struct async_info *async,
3052 void* io1, void* io2, NTSTATUS status)
3054 union smb_read* io =talloc_get_type_abort(io1, union smb_read);
3055 struct proxy_Read* r=talloc_get_type_abort(io2, struct proxy_Read);
3057 DEBUG(5,("\n>>\n%s() rpc status: %s\n",__FUNCTION__,
3058 get_friendly_nt_error_msg(status)));
3059 NT_STATUS_NOT_OK_RETURN(status);
3061 status=r->out.result;
3062 DEBUG(5,("%s() wrapped status: %s\n",__FUNCTION__,
3063 get_friendly_nt_error_msg(status)));
3064 NT_STATUS_NOT_OK_RETURN(status);
3066 io->generic.out.remaining = 0; /*r->out.response.generic.remaining;*/
3067 io->generic.out.compaction_mode = 0;
3069 if (r->out.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
3070 /* Use the io we already setup!
3071 if out.flags & PROXY_VALIDATE, we may need to validate more in
3072 cache then r->out.nread would suggest, see io->generic.out.nread */
3073 if (r->out.flags & PROXY_VALIDATE)
3074 io->generic.out.nread=io->generic.in.maxcnt;
3075 DEBUG(5,("Using cached data: size=%lld\n",
3076 (long long) io->generic.out.nread));
3077 return status;
3080 if (r->in.flags & PROXY_VALIDATE) {
3081 DEBUG(5,("Cached data did not validate, flags: %x\n",r->out.flags));
3082 /* turn off validate on this file */
3083 //cache_handle_novalidate(f);
3084 #warning turn off validate on this file - do an nread<maxcnt later
3087 if (r->in.flags & PROXY_USE_CACHE) {
3088 DEBUG(5,("Cached data did not match\n"));
3091 io->generic.out.nread = r->out.nread;
3093 /* we may need to uncompress */
3094 if (r->out.flags & PROXY_USE_ZLIB) {
3095 ssize_t size=r->out.response.compress.count;
3096 if (! uncompress_block_to(io->generic.out.data,
3097 r->out.response.compress.data, &size,
3098 io->generic.in.maxcnt) ||
3099 size != r->out.nread) {
3100 io->generic.out.nread=size;
3101 status=NT_STATUS_INVALID_USER_BUFFER;
3103 } else if (io->generic.out.data != r->out.response.generic.data) {
3104 //Assert(r->out.nread == r->out.generic.out.count);
3105 memcpy(io->generic.out.data, r->out.response.generic.data, io->generic.out.nread);
3108 return status;
3111 /* Warning: Assumes that if io->generic.out.nread is not zero, then some
3112 data has been pre-read into io->generic.out.data and can be used for
3113 proxy<->proxy optimized reads */
3114 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
3115 union smb_read *io,
3116 struct proxy_file *f,
3117 struct proxy_Read *r)
3119 struct proxy_private *private = ntvfs->private_data;
3120 #warning we are using out.nread as a out-of-band parameter
3121 if (PROXY_REMOTE_SERVER(private)) {
3123 struct smbcli_request *c_req;
3124 if (! r) {
3125 r=talloc_zero(io, struct proxy_Read);
3128 if (! r) return NULL;
3130 r->in.fnum = io->generic.in.file.fnum;
3131 r->in.read_for_execute=io->generic.in.read_for_execute;
3132 r->in.offset = io->generic.in.offset;
3133 r->in.mincnt = io->generic.in.mincnt;
3134 r->in.maxcnt = io->generic.in.maxcnt;
3135 r->in.remaining = io->generic.in.remaining;
3136 r->in.flags |= PROXY_USE_ZLIB;
3137 if (! (r->in.flags & PROXY_VALIDATE) &&
3138 io->generic.out.data && io->generic.out.nread > 0) {
3139 /* maybe we should limit digest size to MIN(nread, maxcnt) to
3140 permit the caller to provider a larger nread as part of
3141 a split read */
3142 checksum_block(r->in.digest.digest, io->generic.out.data,
3143 io->generic.out.nread);
3145 if (io->generic.out.nread > r->in.maxcnt) {
3146 DEBUG(0,("Cache from nread is too big for requested read struct, ignoring cache\n"));
3147 } else {
3148 r->in.mincnt = io->generic.out.nread;
3149 r->in.maxcnt = io->generic.out.nread;
3150 r->in.flags |= PROXY_USE_CACHE;
3151 /* PROXY_VALIDATE will have been set by caller */
3155 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
3156 DEBUG(3,("Cache digest length=%lld\n", (long long)r->in.maxcnt));
3157 dump_data (3, r->in.digest.digest, sizeof(r->in.digest.digest));
3160 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
3161 ntvfs,
3162 io->generic.in.file.fnum,
3163 &ndr_table_rpcproxy,
3164 NDR_PROXY_READ, r);
3165 if (! c_req) return NULL;
3167 { void* req=NULL;
3168 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_read_rpc, NULL);
3171 return c_req;
3172 } else {
3173 return smb_raw_read_send(private->tree, io);
3177 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
3178 union smb_read *io,
3179 struct proxy_file *f)
3181 struct proxy_private *proxy = ntvfs->private_data;
3182 struct smbcli_tree *tree=proxy->tree;
3184 if (PROXY_REMOTE_SERVER(proxy)) {
3185 struct smbcli_request *c_req = proxy_smb_raw_read_send(ntvfs, io, f, NULL);
3186 return sync_chain_handler(c_req);
3187 } else {
3188 struct smbcli_request *c_req = smb_raw_read_send(tree, io);
3189 return smb_raw_read_recv(c_req, io);
3195 initialise the PROXY->PROXY backend, registering ourselves with the ntvfs subsystem
3197 NTSTATUS ntvfs_proxy_init(void)
3199 NTSTATUS ret;
3200 struct ntvfs_ops ops;
3201 NTVFS_CURRENT_CRITICAL_SIZES(vers);
3203 ZERO_STRUCT(ops);
3205 /* fill in the name and type */
3206 ops.name = "proxy";
3207 ops.type = NTVFS_DISK;
3209 /* fill in all the operations */
3210 ops.connect = proxy_connect;
3211 ops.disconnect = proxy_disconnect;
3212 ops.unlink = proxy_unlink;
3213 ops.chkpath = proxy_chkpath;
3214 ops.qpathinfo = proxy_qpathinfo;
3215 ops.setpathinfo = proxy_setpathinfo;
3216 ops.open = proxy_open;
3217 ops.mkdir = proxy_mkdir;
3218 ops.rmdir = proxy_rmdir;
3219 ops.rename = proxy_rename;
3220 ops.copy = proxy_copy;
3221 ops.ioctl = proxy_ioctl;
3222 ops.read = proxy_read;
3223 ops.write = proxy_write;
3224 ops.seek = proxy_seek;
3225 ops.flush = proxy_flush;
3226 ops.close = proxy_close;
3227 ops.exit = proxy_exit;
3228 ops.lock = proxy_lock;
3229 ops.setfileinfo = proxy_setfileinfo;
3230 ops.qfileinfo = proxy_qfileinfo;
3231 ops.fsinfo = proxy_fsinfo;
3232 ops.lpq = proxy_lpq;
3233 ops.search_first = proxy_search_first;
3234 ops.search_next = proxy_search_next;
3235 ops.search_close = proxy_search_close;
3236 ops.trans = proxy_trans;
3237 ops.logoff = proxy_logoff;
3238 ops.async_setup = proxy_async_setup;
3239 ops.cancel = proxy_cancel;
3240 ops.notify = proxy_notify;
3241 ops.trans2 = proxy_trans2;
3243 /* register ourselves with the NTVFS subsystem. We register
3244 under the name 'proxy'. */
3245 ret = ntvfs_register(&ops, &vers);
3247 if (!NT_STATUS_IS_OK(ret)) {
3248 DEBUG(0,("Failed to register PROXY backend!\n"));
3251 return ret;