Open root dir handle to use for ntioctl rpclite
[Samba/vfs_proxy.git] / source4 / ntvfs / proxy / vfs_proxy.c
blob72f73384ebe77133a488b63fd16853a1b5be2f65
1 /*
2 Unix SMB/PROXY implementation.
4 CIFS PROXY NTVFS filesystem backend
6 Copyright (C) Andrew Tridgell 2003
7 Copyright (C) James J Myers 2003 <myersjj@samba.org>
8 Copyright (C) Sam Liddicott <sam@liddicott.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>.
24 this implements a CIFS->CIFS NTVFS filesystem caching proxy.
28 #define TALLOC_ABORT(why) smb_panic(why)
29 #warning handle SMB_FLAGS2_COMPRESSED flag from client: http://msdn2.microsoft.com/en-us/library/cc246254.aspx
31 #define PROXY_NTIOCTL_MAXDATA 0x20000
33 #include "includes.h"
34 #include "libcli/raw/libcliraw.h"
35 #include "libcli/smb_composite/smb_composite.h"
36 #include "auth/auth.h"
37 #include "auth/credentials/credentials.h"
38 #include "ntvfs/ntvfs.h"
39 #include "../lib/util/dlinklist.h"
40 #include "param/param.h"
41 #include "libcli/resolve/resolve.h"
42 #include "libcli/libcli.h"
43 #include "libcli/raw/ioctl.h"
44 #include "librpc/gen_ndr/ndr_misc.h"
45 #include "librpc/gen_ndr/ndr_proxy.h"
46 #include "librpc/ndr/ndr_table.h"
47 #include "lib/cache/cache.h"
48 #include "lib/compression/zlib.h"
49 #include "libcli/raw/raw_proto.h"
51 struct proxy_file {
52 struct proxy_file *prev, *next;
53 uint16_t fnum;
54 struct ntvfs_handle *h;
55 struct cache_file_entry *cache;
56 int readahead_pending;
59 /* this is stored in ntvfs_private */
60 struct proxy_private {
61 struct smbcli_tree *tree;
62 struct smbcli_transport *transport;
63 struct ntvfs_module_context *ntvfs;
64 struct async_info *pending;
65 struct proxy_file *files;
66 bool map_generic;
67 bool map_trans2;
68 bool cache_enabled;
69 int cache_readahead; /* default read-ahead window size */
70 int cache_readaheadblock; /* size of each read-ahead request */
71 ssize_t cache_validatesize; /* chunk size to validate, results in a read this size on remote server */
72 char *remote_server;
73 char *remote_share;
74 struct cache_context *cache;
75 int readahead_spare; /* amount of pending non-user generated requests */
76 bool fake_oplock; /* useful for testing, smbclient never asks for oplock */
77 uint16_t nttrans_fnum; /* we need a handle for non-proxy operations */
80 struct async_info_map;
82 /* a structure used to pass information to an async handler */
83 struct async_info {
84 struct async_info *next, *prev;
85 struct proxy_private *proxy;
86 struct ntvfs_request *req;
87 struct smbcli_request *c_req;
88 struct proxy_file *f;
89 struct async_info_map *chain;
90 void *parms;
93 /* used to chain async callbacks */
94 struct async_info_map {
95 struct async_info_map *next, *prev;
96 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
97 void *parms1;
98 void *parms2;
99 struct async_info *async;
102 struct ntioctl_rpc_unmap_info {
103 void* io;
104 const struct ndr_interface_call *calls;
105 const struct ndr_interface_table *table;
106 uint32_t opnum;
109 /* a structure used to pass information to an async handler */
110 struct async_rpclite_send {
111 const struct ndr_interface_call* call;
112 void* struct_ptr;
115 #define SETUP_PID private->tree->session->pid = req->smbpid
117 #define SETUP_FILE_HERE(f) do { \
118 f = ntvfs_handle_get_backend_data(io->generic.in.file.ntvfs, ntvfs); \
119 if (!f) return NT_STATUS_INVALID_HANDLE; \
120 io->generic.in.file.fnum = f->fnum; \
121 } while (0)
123 #define SETUP_FILE do { \
124 struct proxy_file *f; \
125 SETUP_FILE_HERE(f); \
126 } while (0)
128 #define SETUP_PID_AND_FILE do { \
129 SETUP_PID; \
130 SETUP_FILE; \
131 } while (0)
133 /* remove the MAY_ASYNC from a request, useful for testing */
134 #define MAKE_SYNC_REQ(req) do { req->async_states->state &= ~NTVFS_ASYNC_STATE_MAY_ASYNC; } while(0)
136 #define PROXY_SERVER "proxy:server"
137 #define PROXY_USER "proxy:user"
138 #define PROXY_PASSWORD "proxy:password"
139 #define PROXY_DOMAIN "proxy:domain"
140 #define PROXY_SHARE "proxy:share"
141 #define PROXY_USE_MACHINE_ACCT "proxy:use-machine-account"
142 #define PROXY_MAP_GENERIC "proxy:map-generic"
143 #define PROXY_MAP_TRANS2 "proxy:map-trans2"
145 #define PROXY_CACHE_ENABLED "proxy:cache-enabled"
146 #define PROXY_CACHE_ENABLED_DEFAULT false
148 #define PROXY_CACHE_READAHEAD "proxy:cache-readahead"
149 #define PROXY_CACHE_READAHEAD_DEFAULT 32768
150 /* size of each read-ahead request. */
151 #define PROXY_CACHE_READAHEAD_BLOCK "proxy:cache-readaheadblock"
152 /* the read-ahead block should always be less than max negotiated data */
153 #define PROXY_CACHE_READAHEAD_BLOCK_DEFAULT 4096
155 #define PROXY_CACHE_VALIDATE_SIZE "proxy:validate-size"
156 #define PROXY_CACHE_VALIDATE_SIZE_DEFAULT 256 /* 10M */
158 #define PROXY_FAKE_OPLOCK "proxy:fake-oplock"
159 #define PROXY_FAKE_OPLOCK_DEFAULT false
161 /* how many read-ahead requests can be pending per mid */
162 #define PROXY_REQUEST_LIMIT "proxy:request-limit"
163 #define PROXY_REQUEST_LIMIT_DEFAULT 100
165 #define PROXY_USE_MACHINE_ACCT_DEFAULT false
166 /* These two really should be: true, and possibly not even configurable */
167 #define PROXY_MAP_GENERIC_DEFAULT true
168 #define PROXY_MAP_TRANS2_DEFAULT true
170 /* is the remote server a proxy? */
171 #define PROXY_REMOTE_SERVER(private) \
172 ((private)->tree->session->transport->negotiate.capabilities & CAP_COMPRESSION \
173 && (strcmp("A:",private->tree->device)==0))
175 /* A few forward declarations */
176 static NTSTATUS sync_chain_handler(struct smbcli_request *c_req);
177 static void async_chain_handler(struct smbcli_request *c_req);
178 static void async_read_handler(struct smbcli_request *c_req);
179 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
180 struct ntvfs_request *req, union smb_ioctl *io);
182 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
183 struct smbcli_tree *tree,
184 struct ntvfs_module_context *ntvfs,
185 uint16_t fnum, const struct ndr_interface_table *table,
186 uint32_t opnum, void *r);
187 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
188 union smb_read *io, struct proxy_file *f, struct proxy_Read *r);
189 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
190 union smb_read *io, struct proxy_file *f);
191 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
192 union smb_write *io, struct proxy_file *f);
193 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
194 union smb_write *io, struct proxy_file *f);
195 static NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status);
198 a handler for oplock break events from the server - these need to be passed
199 along to the client
201 static bool oplock_handler(struct smbcli_transport *transport, uint16_t tid, uint16_t fnum, uint8_t level, void *p_private)
203 struct proxy_private *private = p_private;
204 NTSTATUS status;
205 struct ntvfs_handle *h = NULL;
206 struct proxy_file *f;
208 for (f=private->files; f; f=f->next) {
209 if (f->fnum != fnum) continue;
210 h = f->h;
211 break;
214 if (!h) {
215 DEBUG(5,("vfs_proxy: ignoring oplock break level %d for fnum %d\n", level, fnum));
216 return true;
219 /* If we don't have an oplock, then we can't rely on the cache */
220 cache_handle_stale(f);
222 DEBUG(5,("vfs_proxy: sending oplock break level %d for fnum %d\n", level, fnum));
223 status = ntvfs_send_oplock_break(private->ntvfs, h, level);
224 if (!NT_STATUS_IS_OK(status)) return false;
225 return true;
229 connect to a share - used when a tree_connect operation comes in.
231 static NTSTATUS proxy_connect(struct ntvfs_module_context *ntvfs,
232 struct ntvfs_request *req, const char *sharename)
234 NTSTATUS status;
235 struct proxy_private *private;
236 const char *host, *user, *pass, *domain, *remote_share;
237 struct smb_composite_connect io;
238 struct composite_context *creq;
239 struct share_config *scfg = ntvfs->ctx->config;
240 int nttrans_fnum;
242 struct cli_credentials *credentials;
243 bool machine_account;
245 /* Here we need to determine which server to connect to.
246 * For now we use parametric options, type proxy.
247 * Later we will use security=server and auth_server.c.
249 host = share_string_option(scfg, PROXY_SERVER, NULL);
250 user = share_string_option(scfg, PROXY_USER, NULL);
251 pass = share_string_option(scfg, PROXY_PASSWORD, NULL);
252 domain = share_string_option(scfg, PROXY_DOMAIN, NULL);
253 remote_share = share_string_option(scfg, PROXY_SHARE, NULL);
254 if (!remote_share) {
255 remote_share = sharename;
258 machine_account = share_bool_option(scfg, PROXY_USE_MACHINE_ACCT, PROXY_USE_MACHINE_ACCT_DEFAULT);
260 private = talloc_zero(ntvfs, struct proxy_private);
261 if (!private) {
262 return NT_STATUS_NO_MEMORY;
265 ntvfs->private_data = private;
267 if (!host) {
268 DEBUG(1,("PROXY backend: You must supply server\n"));
269 return NT_STATUS_INVALID_PARAMETER;
272 if (user && pass) {
273 DEBUG(5, ("PROXY backend: Using specified password\n"));
274 credentials = cli_credentials_init(private);
275 if (!credentials) {
276 return NT_STATUS_NO_MEMORY;
278 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
279 cli_credentials_set_username(credentials, user, CRED_SPECIFIED);
280 if (domain) {
281 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
283 cli_credentials_set_password(credentials, pass, CRED_SPECIFIED);
284 } else if (machine_account) {
285 DEBUG(5, ("PROXY backend: Using machine account\n"));
286 credentials = cli_credentials_init(private);
287 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
288 if (domain) {
289 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
291 status = cli_credentials_set_machine_account(credentials, ntvfs->ctx->lp_ctx);
292 if (!NT_STATUS_IS_OK(status)) {
293 return status;
295 } else if (req->session_info->credentials) {
296 DEBUG(5, ("PROXY backend: Using delegated credentials\n"));
297 credentials = req->session_info->credentials;
298 } else {
299 DEBUG(1,("PROXY backend: NO delegated credentials found: You must supply server, user and password or the client must supply delegated credentials\n"));
300 return NT_STATUS_INVALID_PARAMETER;
303 /* connect to the server, using the smbd event context */
304 io.in.dest_host = host;
305 io.in.dest_ports = lp_smb_ports(ntvfs->ctx->lp_ctx);
306 io.in.socket_options = lp_socket_options(ntvfs->ctx->lp_ctx);
307 io.in.called_name = host;
308 io.in.credentials = credentials;
309 io.in.fallback_to_anonymous = false;
310 io.in.workgroup = lp_workgroup(ntvfs->ctx->lp_ctx);
311 io.in.service = remote_share;
312 io.in.service_type = "?????";
313 io.in.iconv_convenience = lp_iconv_convenience(ntvfs->ctx->lp_ctx);
314 io.in.gensec_settings = lp_gensec_settings(private, ntvfs->ctx->lp_ctx);
315 lp_smbcli_options(ntvfs->ctx->lp_ctx, &io.in.options);
316 lp_smbcli_session_options(ntvfs->ctx->lp_ctx, &io.in.session_options);
318 creq = smb_composite_connect_send(&io, private,
319 lp_resolve_context(ntvfs->ctx->lp_ctx),
320 ntvfs->ctx->event_ctx);
321 status = smb_composite_connect_recv(creq, private);
322 NT_STATUS_NOT_OK_RETURN(status);
324 private->tree = io.out.tree;
326 private->transport = private->tree->session->transport;
327 SETUP_PID;
328 private->ntvfs = ntvfs;
330 ntvfs->ctx->fs_type = talloc_strdup(ntvfs->ctx, io.out.tree->fs_type);
331 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->fs_type);
332 ntvfs->ctx->dev_type = talloc_strdup(ntvfs->ctx, io.out.tree->device);
333 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->dev_type);
335 /* we need to receive oplock break requests from the server */
336 smbcli_oplock_handler(private->transport, oplock_handler, private);
338 private->map_generic = share_bool_option(scfg, PROXY_MAP_GENERIC, PROXY_MAP_GENERIC_DEFAULT);
340 private->map_trans2 = share_bool_option(scfg, PROXY_MAP_TRANS2, PROXY_MAP_TRANS2_DEFAULT);
342 private->cache_validatesize = 1024 * (long long) share_int_option(scfg, PROXY_CACHE_VALIDATE_SIZE, PROXY_CACHE_VALIDATE_SIZE_DEFAULT);
344 if (strcmp("A:",private->tree->device)==0) {
345 private->cache_enabled = share_bool_option(scfg, PROXY_CACHE_ENABLED, PROXY_CACHE_ENABLED_DEFAULT);
346 private->cache_readahead = share_int_option(scfg, PROXY_CACHE_READAHEAD, PROXY_CACHE_READAHEAD_DEFAULT);
347 private->cache_readaheadblock = share_int_option(scfg, PROXY_CACHE_READAHEAD_BLOCK,
348 MIN(private->cache_readahead,PROXY_CACHE_READAHEAD_BLOCK_DEFAULT));
349 private->fake_oplock = share_bool_option(scfg, PROXY_FAKE_OPLOCK, PROXY_FAKE_OPLOCK_DEFAULT);
350 private->readahead_spare = share_int_option(scfg, PROXY_REQUEST_LIMIT, PROXY_REQUEST_LIMIT_DEFAULT);
351 private->cache = new_cache_context(private, lp_proxy_cache_root(ntvfs->ctx->lp_ctx), host, remote_share);
352 DEBUG(0,("proxy tree connect caching for: %s (%s : %s) %s read-ahead: %d\n",
353 remote_share, private->tree->device,private->tree->fs_type,
354 (private->cache_enabled)?"enabled":"disabled",
355 private->cache_readahead));
356 } else {
357 private->cache_enabled = false;
358 DEBUG(0,("No caching or read-ahead for: %s (%s : %s)\n",
359 remote_share, private->tree->device,private->tree->fs_type));
362 private->remote_server = strlower_talloc(private, host);
363 private->remote_share = strlower_talloc(private, remote_share);
365 /* some proxy operations will not be performed on files, so open a handle
366 now that we can use for such things. We won't bother to close it on
367 shutdown, as the remote server ought to be able to close it for us
368 and we might be shutting down because the remote server went away and
369 so we don't want to delay further */
370 nttrans_fnum=smbcli_nt_create_full(private->tree, "\\",
371 NTCREATEX_FLAGS_OPEN_DIRECTORY,
372 SEC_FILE_READ_DATA,
373 FILE_ATTRIBUTE_NORMAL,
374 NTCREATEX_SHARE_ACCESS_MASK,
375 NTCREATEX_DISP_OPEN,
376 NTCREATEX_OPTIONS_DIRECTORY,
377 NTCREATEX_IMPERSONATION_IMPERSONATION);
378 if (nttrans_fnum < 0) {
379 DEBUG(5,("Could not open handle for ntioctl %d\n",private->nttrans_fnum));
380 return NT_STATUS_UNSUCCESSFUL;
382 private->nttrans_fnum=nttrans_fnum;
383 DEBUG(5,("Got nttrans handle %d\n",private->nttrans_fnum));
385 return NT_STATUS_OK;
389 disconnect from a share
391 static NTSTATUS proxy_disconnect(struct ntvfs_module_context *ntvfs)
393 struct proxy_private *private = ntvfs->private_data;
394 struct async_info *a, *an;
396 /* first cleanup pending requests */
397 for (a=private->pending; a; a = an) {
398 an = a->next;
399 smbcli_request_destroy(a->c_req);
400 talloc_free(a);
403 talloc_free(private);
404 ntvfs->private_data = NULL;
406 return NT_STATUS_OK;
410 destroy an async info structure
412 static int async_info_destructor(struct async_info *async)
414 DLIST_REMOVE(async->proxy->pending, async);
415 return 0;
419 a handler for simple async replies
420 this handler can only be used for functions that don't return any
421 parameters (those that just return a status code)
423 static void async_simple(struct smbcli_request *c_req)
425 struct async_info *async = c_req->async.private;
426 struct ntvfs_request *req = async->req;
427 req->async_states->status = smbcli_request_simple_recv(c_req);
428 talloc_free(async);
429 req->async_states->send_fn(req);
432 /* hopefully this will optimize away */
433 #define TYPE_CHECK(type,check) do { \
434 type=check; \
435 t=t; \
436 } while (0)
438 /* save some typing for the simple functions */
439 #define ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, file, achain, error) do { \
440 if (!c_req) return (error); \
441 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
443 struct async_info *async; \
444 async = talloc(req, struct async_info); \
445 if (!async) return (error); \
446 async->parms = io; \
447 async->req = req; \
448 async->f = file; \
449 async->proxy = private; \
450 async->c_req = c_req; \
451 async->chain = achain; \
452 DLIST_ADD(private->pending, async); \
453 c_req->async.private = async; \
454 talloc_set_destructor(async, async_info_destructor); \
456 c_req->async.fn = async_fn; \
457 } while (0)
459 #define ASYNC_RECV_TAIL_F(io, async_fn, file) do { \
460 if (!c_req) return NT_STATUS_UNSUCCESSFUL; \
461 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
463 struct async_info *async; \
464 async = talloc(req, struct async_info); \
465 if (!async) return NT_STATUS_NO_MEMORY; \
466 async->parms = io; \
467 async->req = req; \
468 async->f = file; \
469 async->proxy = private; \
470 async->c_req = c_req; \
471 DLIST_ADD(private->pending, async); \
472 c_req->async.private = async; \
473 talloc_set_destructor(async, async_info_destructor); \
475 c_req->async.fn = async_fn; \
476 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
477 return NT_STATUS_OK; \
478 } while (0)
480 #define ASYNC_RECV_TAIL(io, async_fn) ASYNC_RECV_TAIL_F(io, async_fn, NULL)
482 #define SIMPLE_ASYNC_TAIL ASYNC_RECV_TAIL(NULL, async_simple)
484 /* managers for chained async-callback.
485 The model of async handlers has changed.
486 backend async functions should be of the form:
487 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
488 And if async->c_req is NULL then an earlier chain has already rec'd the
489 request.
490 ADD_ASYNC_RECV_TAIL is used to add chained handlers.
491 The chained handler manager async_chain_handler is installed the usual way
492 and uses the io pointer to point to the first async_map record
493 static void async_chain_handler(struct smbcli_request *c_req).
494 It is safe to call ADD_ASYNC_RECV_TAIL before the chain manager is installed
495 and often desirable.
497 /* async_chain_handler has an async_info struct so that it can be safely inserted
498 into pending, but the io struct will point to (struct async_info_map *)
499 chained async_info_map will be in c_req->async.private */
500 #define ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_fn) do { \
501 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
502 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
503 } while(0)
505 #define ASYNC_RECV_TAIL_HANDLER(io, async_fn) do { \
506 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
507 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
508 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
509 return NT_STATUS_OK; \
510 } while(0)
513 DEBUG(0,("ADD_ASYNC_RECV_TAIL %s %s:%d\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%s\n", __FUNCTION__,__FILE__,__LINE__, \
514 creq, creq?talloc_get_name(creq):NULL, creq?talloc_get_name(creq):NULL,\
515 io1, io1?talloc_get_name(io1):NULL, io1?talloc_get_name(io1):NULL, \
516 io2, io2?talloc_get_name(io2):NULL, io2?talloc_get_name(io2):NULL, \
517 file, file?"file":"null", file?"file":"null", #async_fn)); \
519 #define ADD_ASYNC_RECV_TAIL(creq, io1, io2, file, async_fn, error) do { \
520 if (! creq) return (error); \
522 struct async_info_map *async_map=talloc(NULL, struct async_info_map); \
523 if (! async_map) return (error); \
524 async_map->async=talloc(async_map, struct async_info); \
525 if (! async_map->async) return (error); \
526 async_map->parms1=io1; \
527 async_map->parms2=io2; \
528 async_map->fn=async_fn; \
529 async_map->async->parms = io1; \
530 async_map->async->req = req; \
531 async_map->async->f = file; \
532 async_map->async->proxy = private; \
533 async_map->async->c_req = creq; \
534 /* If async_chain_handler is installed, get the list from param */ \
535 if (creq->async.fn == async_chain_handler || creq->async.fn == async_read_handler) { \
536 struct async_info *i=creq->async.private; \
537 DLIST_ADD_END(i->chain, async_map, struct async_info_map *); \
538 } else if (creq->async.fn) { \
539 /* incompatible handler installed */ \
540 return (error); \
541 } else { \
542 DLIST_ADD_END(creq->async.private, async_map, struct async_info_map *); \
545 } while(0)
547 /* try and unify cache open function interface with this macro */
548 #define cache_open(cache_context, f, io, oplock, readahead_window) \
549 (io->generic.level == RAW_OPEN_NTCREATEX && \
550 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID)\
551 ?(cache_fileid_open(cache_context, f, (const uint64_t*)(io->generic.in.fname), oplock, readahead_window))\
552 :(cache_filename_open(cache_context, f, SMB_OPEN_IN_FILE(io), oplock, readahead_window))
555 delete a file - the dirtype specifies the file types to include in the search.
556 The name can contain PROXY wildcards, but rarely does (except with OS/2 clients)
558 static NTSTATUS proxy_unlink(struct ntvfs_module_context *ntvfs,
559 struct ntvfs_request *req, union smb_unlink *unl)
561 struct proxy_private *private = ntvfs->private_data;
562 struct smbcli_request *c_req;
564 SETUP_PID;
566 /* see if the front end will allow us to perform this
567 function asynchronously. */
568 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
569 return smb_raw_unlink(private->tree, unl);
572 c_req = smb_raw_unlink_send(private->tree, unl);
574 SIMPLE_ASYNC_TAIL;
578 a handler for async ioctl replies
580 static void async_ioctl(struct smbcli_request *c_req)
582 struct async_info *async = c_req->async.private;
583 struct ntvfs_request *req = async->req;
584 req->async_states->status = smb_raw_ioctl_recv(c_req, req, async->parms);
585 talloc_free(async);
586 req->async_states->send_fn(req);
590 ioctl interface
592 static NTSTATUS proxy_ioctl(struct ntvfs_module_context *ntvfs,
593 struct ntvfs_request *req, union smb_ioctl *io)
595 struct proxy_private *private = ntvfs->private_data;
596 struct smbcli_request *c_req;
598 if (io->ntioctl.level == RAW_IOCTL_NTIOCTL
599 && io->ntioctl.in.function == FSCTL_UFOPROXY_RPCLITE) {
600 return proxy_rpclite(ntvfs, req, io);
603 SETUP_PID_AND_FILE;
605 /* see if the front end will allow us to perform this
606 function asynchronously. */
607 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
608 return smb_raw_ioctl(private->tree, req, io);
611 c_req = smb_raw_ioctl_send(private->tree, io);
613 ASYNC_RECV_TAIL(io, async_ioctl);
617 check if a directory exists
619 static NTSTATUS proxy_chkpath(struct ntvfs_module_context *ntvfs,
620 struct ntvfs_request *req, union smb_chkpath *cp)
622 struct proxy_private *private = ntvfs->private_data;
623 struct smbcli_request *c_req;
625 SETUP_PID;
627 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
628 return smb_raw_chkpath(private->tree, cp);
631 c_req = smb_raw_chkpath_send(private->tree, cp);
633 SIMPLE_ASYNC_TAIL;
637 a handler for async qpathinfo replies
639 static void async_qpathinfo(struct smbcli_request *c_req)
641 struct async_info *async = c_req->async.private;
642 struct ntvfs_request *req = async->req;
643 req->async_states->status = smb_raw_pathinfo_recv(c_req, req, async->parms);
644 talloc_free(async);
645 req->async_states->send_fn(req);
649 return info on a pathname
651 static NTSTATUS proxy_qpathinfo(struct ntvfs_module_context *ntvfs,
652 struct ntvfs_request *req, union smb_fileinfo *info)
654 struct proxy_private *private = ntvfs->private_data;
655 struct smbcli_request *c_req;
657 SETUP_PID;
659 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
660 return smb_raw_pathinfo(private->tree, req, info);
663 c_req = smb_raw_pathinfo_send(private->tree, info);
665 ASYNC_RECV_TAIL(info, async_qpathinfo);
669 a handler for async qfileinfo replies
671 static void async_qfileinfo(struct smbcli_request *c_req)
673 struct async_info *async = c_req->async.private;
674 struct ntvfs_request *req = async->req;
675 req->async_states->status = smb_raw_fileinfo_recv(c_req, req, async->parms);
676 talloc_free(async);
677 req->async_states->send_fn(req);
681 query info on a open file
683 static NTSTATUS proxy_qfileinfo(struct ntvfs_module_context *ntvfs,
684 struct ntvfs_request *req, union smb_fileinfo *io)
686 struct proxy_private *private = ntvfs->private_data;
687 struct smbcli_request *c_req;
689 SETUP_PID_AND_FILE;
691 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
692 return smb_raw_fileinfo(private->tree, req, io);
695 c_req = smb_raw_fileinfo_send(private->tree, io);
697 ASYNC_RECV_TAIL(io, async_qfileinfo);
701 set info on a pathname
703 static NTSTATUS proxy_setpathinfo(struct ntvfs_module_context *ntvfs,
704 struct ntvfs_request *req, union smb_setfileinfo *st)
706 struct proxy_private *private = ntvfs->private_data;
707 struct smbcli_request *c_req;
709 SETUP_PID;
711 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
712 return smb_raw_setpathinfo(private->tree, st);
715 c_req = smb_raw_setpathinfo_send(private->tree, st);
717 SIMPLE_ASYNC_TAIL;
722 a handler for async open replies
724 static void async_open(struct smbcli_request *c_req)
726 struct async_info *async = c_req->async.private;
727 struct proxy_private *proxy = async->proxy;
728 struct ntvfs_request *req = async->req;
729 struct proxy_file *f = async->f;
730 union smb_open *io = async->parms;
731 union smb_handle *file;
733 talloc_free(async);
734 req->async_states->status = smb_raw_open_recv(c_req, req, io);
735 SMB_OPEN_OUT_FILE(io, file);
736 f->fnum = file->fnum;
737 file->ntvfs = NULL;
738 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
739 req->async_states->status = ntvfs_handle_set_backend_data(f->h, proxy->ntvfs, f);
740 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
741 file->ntvfs = f->h;
742 DLIST_ADD(proxy->files, f);
744 if (proxy->cache_enabled) {
745 bool oplock=(io->generic.out.oplock_level != OPLOCK_NONE) || proxy->fake_oplock;
746 f->cache=cache_open(proxy->cache, f, io, oplock, proxy->cache_readahead);
747 if (! PROXY_REMOTE_SERVER(proxy)) cache_handle_novalidate(f);
750 failed:
751 req->async_states->send_fn(req);
755 open a file
757 static NTSTATUS proxy_open(struct ntvfs_module_context *ntvfs,
758 struct ntvfs_request *req, union smb_open *io)
760 struct proxy_private *private = ntvfs->private_data;
761 struct smbcli_request *c_req;
762 struct ntvfs_handle *h;
763 struct proxy_file *f;
764 NTSTATUS status;
766 SETUP_PID;
768 if (io->generic.level != RAW_OPEN_GENERIC &&
769 private->map_generic) {
770 return ntvfs_map_open(ntvfs, req, io);
773 status = ntvfs_handle_new(ntvfs, req, &h);
774 NT_STATUS_NOT_OK_RETURN(status);
776 f = talloc_zero(h, struct proxy_file);
777 NT_STATUS_HAVE_NO_MEMORY(f);
778 f->h = h;
780 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
781 union smb_handle *file;
783 status = smb_raw_open(private->tree, req, io);
784 NT_STATUS_NOT_OK_RETURN(status);
786 SMB_OPEN_OUT_FILE(io, file);
787 f->fnum = file->fnum;
788 file->ntvfs = NULL;
789 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
790 NT_STATUS_NOT_OK_RETURN(status);
791 file->ntvfs = f->h;
792 DLIST_ADD(private->files, f);
794 if (private->cache_enabled) {
795 bool oplock=(io->generic.out.oplock_level != OPLOCK_NONE) || private->fake_oplock;
797 f->cache=cache_open(private->cache, f, io, oplock, private->cache_readahead);
798 if (! PROXY_REMOTE_SERVER(private)) cache_handle_novalidate(f);
801 return NT_STATUS_OK;
804 c_req = smb_raw_open_send(private->tree, io);
806 ASYNC_RECV_TAIL_F(io, async_open, f);
810 create a directory
812 static NTSTATUS proxy_mkdir(struct ntvfs_module_context *ntvfs,
813 struct ntvfs_request *req, union smb_mkdir *md)
815 struct proxy_private *private = ntvfs->private_data;
816 struct smbcli_request *c_req;
818 SETUP_PID;
820 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
821 return smb_raw_mkdir(private->tree, md);
824 c_req = smb_raw_mkdir_send(private->tree, md);
826 SIMPLE_ASYNC_TAIL;
830 remove a directory
832 static NTSTATUS proxy_rmdir(struct ntvfs_module_context *ntvfs,
833 struct ntvfs_request *req, struct smb_rmdir *rd)
835 struct proxy_private *private = ntvfs->private_data;
836 struct smbcli_request *c_req;
838 SETUP_PID;
840 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
841 return smb_raw_rmdir(private->tree, rd);
843 c_req = smb_raw_rmdir_send(private->tree, rd);
845 SIMPLE_ASYNC_TAIL;
849 rename a set of files
851 static NTSTATUS proxy_rename(struct ntvfs_module_context *ntvfs,
852 struct ntvfs_request *req, union smb_rename *ren)
854 struct proxy_private *private = ntvfs->private_data;
855 struct smbcli_request *c_req;
857 SETUP_PID;
859 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
860 return smb_raw_rename(private->tree, ren);
863 c_req = smb_raw_rename_send(private->tree, ren);
865 SIMPLE_ASYNC_TAIL;
869 copy a set of files
871 static NTSTATUS proxy_copy(struct ntvfs_module_context *ntvfs,
872 struct ntvfs_request *req, struct smb_copy *cp)
874 return NT_STATUS_NOT_SUPPORTED;
877 /* we only define this seperately so we can easily spot read calls in
878 pending based on ( c_req->private.fn == async_read_handler ) */
879 static void async_read_handler(struct smbcli_request *c_req)
881 async_chain_handler(c_req);
884 NTSTATUS async_readahead_dec(struct async_info *async, void* io1, void* io2, NTSTATUS status)
886 struct proxy_private *private = async->proxy;
887 struct smbcli_request *c_req = async->c_req;
888 struct proxy_file *f = async->f;
889 union smb_read *io = async->parms;
891 /* if request is not already received by a chained handler, read it */
892 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
894 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
895 f->readahead_pending, private->readahead_spare));
897 f->readahead_pending--;
898 private->readahead_spare++;
900 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
901 f->readahead_pending, private->readahead_spare));
903 return status;
907 a handler for async read replies - speculative read-aheads.
908 It merely saves in the cache. The async chain handler will call send_fn if
909 there is one, or if sync_chain_handler is used the send_fn is called by
910 the ntvfs back end.
912 NTSTATUS async_read_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
914 struct smbcli_request *c_req = async->c_req;
915 struct proxy_file *f = async->f;
916 union smb_read *io = async->parms;
918 /* if request is not already received by a chained handler, read it */
919 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
921 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
922 get_friendly_nt_error_msg(status)));
924 NT_STATUS_NOT_OK_RETURN(status);
926 /* if it was a validate read we don't to save anything unless it failed.
927 Until we use Proxy_read structs we can't tell, so guess */
928 if (io->generic.out.nread == io->generic.in.maxcnt &&
929 io->generic.in.mincnt < io->generic.in.maxcnt) {
930 /* looks like a validate read, just move the validate pointer, the
931 original read-request has already been satisfied from cache */
932 DEBUG(3,("%s megavalidate suceeded, validate to %lld\n",__FUNCTION__,
933 io->generic.in.offset + io->generic.out.nread));
934 cache_handle_validated(f, io->generic.in.offset + io->generic.out.nread);
935 } else {
936 DEBUG(5,("Not a mega-validate, save %d in cache\n",io->generic.out.nread));
937 cache_handle_save(f, io->generic.out.data,
938 io->generic.out.nread,
939 io->generic.in.offset);
942 DEBUG(3,("%s finished %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
943 return status;
946 /* handler for fragmented reads */
947 NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
949 struct smbcli_request *c_req = async->c_req;
950 struct ntvfs_request *req = async->req;
951 struct proxy_file *f = async->f;
952 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
953 /* this is the io against which the fragment is to be applied */
954 union smb_read *io = talloc_get_type_abort(io1, union smb_read);
955 /* this is the io for the read that issued the callback */
956 union smb_read *io_frag = fragment->io_frag; /* async->parms; */
957 struct async_read_fragments* fragments=fragment->fragments;
959 /* if request is not already received by a chained handler, read it */
960 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
961 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
963 DEBUG(3,("\n\n%s async_read status: %s\n",__FUNCTION__,
964 get_friendly_nt_error_msg(status)));
966 fragment->status = status;
968 /* remove fragment from fragments */
969 DLIST_REMOVE(fragments->fragments, fragment);
971 #warning maybe read requests beyond the short read won't return NT_STATUS_OK with nread=0
972 /* in which case if we will want to collate all responses and return a valid read
973 for the leading NT_STATUS_OK fragments */
975 /* did this one fail, inducing a general fragments failure? */
976 if (!NT_STATUS_IS_OK(fragment->status)) {
977 /* preserve the status of the fragment with the smallest offset
978 when we can work out how */
979 if (NT_STATUS_IS_OK(fragments->status)) {
980 fragments->status=fragment->status;
983 cache_handle_novalidate(f);
984 DEBUG(5,("** Devalidated proxy due to read failure\n"));
985 } else {
986 /* No fragments have yet failed, keep collecting responses */
987 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
988 /* Find memcpy window, copy data from the io_frag to the io */
989 off_t start_offset=MAX(io_frag->generic.in.offset, io->generic.in.offset);
990 /* used to use mincnt */
991 off_t io_extent=io->generic.in.offset + io->generic.in.maxcnt;
992 off_t end_offset=MIN(io_extent, extent);
993 /* ASSERT(start_offset <= end_offset) */
994 /* ASSERT(start_offset <= io_extent) */
995 if (start_offset >= io_extent) {
996 DEBUG(3,("useless read-ahead tagged on to: %s",__location__));
997 } else {
998 uint8_t* dst=io->generic.out.data+(start_offset - io->generic.in.offset);
999 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
1000 /* src == dst in cases where we did not latch onto someone elses
1001 read, but are handling our own */
1002 if (src != dst)
1003 memcpy(dst, src, end_offset - start_offset);
1006 /* There should be a better way to detect, but it needs the proxy rpc struct
1007 not ths smb_read struct */
1008 if (io_frag->generic.out.nread < io_frag->generic.in.maxcnt) {
1009 DEBUG(5,("\n** Devalidated proxy due to small read: %lld min=%lld, max=%lld\n",
1010 (long long) io_frag->generic.out.nread,
1011 (long long) io_frag->generic.in.mincnt,
1012 (long long) io_frag->generic.in.maxcnt));
1013 cache_handle_novalidate(f);
1016 /* We broke up the original read. If not enough of this sub-read has
1017 been read, and then some of then next block, it could leave holes!
1018 We will only acknowledge up to the first partial read, and treat
1019 it as a small read. If server can return NT_STATUS_OK for a partial
1020 read so can we, so we preserve the response.
1021 "enough" is all of it (maxcnt), except on the last block, when it has to
1022 be enough to fill io->generic.in.mincnt. We know it is the last block
1023 if nread is small but we could fill io->generic.in.mincnt */
1024 if (io_frag->generic.out.nread < io_frag->generic.in.mincnt &&
1025 end_offset < io->generic.in.offset + io->generic.in.mincnt) {
1026 DEBUG(4,("Fragmented read only partially successful\n"));
1028 /* Shrink the master nread (or grow to this size if we are first partial */
1029 if (! fragments->partial ||
1030 (io->generic.in.offset + io->generic.out.nread) > extent) {
1031 io->generic.out.nread = extent - io->generic.in.offset;
1034 /* stop any further successes from extending the partial read */
1035 fragments->partial=true;
1036 } else {
1037 /* only grow the master nwritten if we haven't logged a partial write */
1038 if (! fragments->partial &&
1039 (io->generic.in.offset + io->generic.out.nread) < extent ) {
1040 io->generic.out.nread = MIN(io->generic.in.maxcnt, extent - io->generic.in.offset);
1045 /* Was it the last fragment, or do we know enought to send a response? */
1046 if (! fragments->fragments) {
1047 DEBUG(5,("Async read re-fragmented with %d of %d %s\n",
1048 io->generic.out.nread, io->generic.in.mincnt,
1049 get_friendly_nt_error_msg(fragments->status)));
1050 if (fragments->async) {
1051 req->async_states->status=fragments->status;
1052 DEBUG(5,("Fragments async response sending\n"));
1053 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
1054 /* esp. as they may be attached to by other reads. Maybe attachees should be taking reference, but how will they
1055 know the top level they need to take reference too.. */
1056 #warning should really queue a sender here, not call it */
1057 req->async_states->send_fn(req);
1058 DEBUG(5,("Async response sent\n"));
1059 } else {
1060 DEBUG(5,("Fragments SYNC return\n"));
1064 /* because a c_req may be shared by many req, chained handlers must return
1065 a status pertaining to the general validity of this specific c_req, not
1066 to their own private processing of the c_req for the benefit of their req
1067 which is returned in fragments->status
1069 return status;
1072 /* Issue read-ahead X bytes where X is the window size calculation based on
1073 server_latency * server_session_bandwidth
1074 where latency is the idle (link) latency and bandwidth is less than or equal_to
1075 to actual bandwidth available to the server.
1076 Read-ahead should honour locked areas in whatever way is neccessary (who knows?)
1077 read_ahead is defined here and not in the cache engine because it requires too
1078 much knowledge of private structures
1080 /* The concept is buggy unless we can tell the next proxy that these are
1081 read-aheads, otherwise chained proxy setups will each read-ahead of the
1082 read-ahead which can put a larger load on the final server.
1083 Also we probably need to distinguish between
1084 * cache-less read-ahead
1085 * cache-revalidating read-ahead
1087 NTSTATUS read_ahead(struct proxy_file *f, struct ntvfs_module_context *ntvfs,
1088 union smb_read *io, ssize_t as_read)
1090 struct proxy_private *private = ntvfs->private_data;
1091 struct smbcli_tree *tree = private->tree;
1092 struct cache_file_entry *cache;
1093 off_t next_position; /* this read offset+length+window */
1094 off_t end_position; /* position we read-ahead to */
1095 off_t cache_populated;
1096 off_t read_position, new_extent;
1098 if (! PROXY_REMOTE_SERVER(private)) return NT_STATUS_UNSUCCESSFUL;
1099 DEBUG(5,("A\n"));
1100 if (private->cache_readahead==0 || ! private->cache_enabled || ! f->cache) return NT_STATUS_UNSUCCESSFUL;
1101 DEBUG(5,("B\n"));
1102 cache=talloc_get_type_abort(f->cache, struct cache_file_entry);
1103 DEBUG(5,("C\n"));
1104 /* don't read-ahead if we are in bulk validate mode */
1105 if (cache->status & CACHE_VALIDATE) return NT_STATUS_UNSUCCESSFUL;
1106 DEBUG(5,("D\n"));
1107 /* if we can't trust what we read-ahead anyway then don't bother although
1108 * if delta-reads are enabled we can do so in order to get something to
1109 * delta against */
1110 DEBUG(CACHE_DEBUG_LEVEL,("DOING Asking read-aheads: len %lld ra-extend %lld as-read %lld RA %d (%d)\n",
1111 (long long int)(cache_len(cache)),
1112 (long long int)(cache->readahead_extent),
1113 (long long int)(as_read),
1114 cache->readahead_window,private->cache_readahead));
1115 if (private->cache_readahead ==0 || ! (cache->status & CACHE_READ_AHEAD) ) {
1116 DEBUG(CACHE_DEBUG_LEVEL,("FAILED Asking read-aheads: Can't read-ahead as no read-ahead on this file: %x\n",
1117 cache->status));
1118 return NT_STATUS_UNSUCCESSFUL;
1121 /* as_read is the mincnt bytes of a request being made or the
1122 out.nread of completed sync requests
1123 Here we presume that as_read bytes WILL be read. If there is a cache-ahead like ours,
1124 then this may often NOT be the case if readahead_window < requestsize; so we will
1125 get a small read, leaving a hole in the cache, and as we don't yet handle sparse caches,
1126 all future read-ahead will be wasted, so we need to adjust the read-ahead handler to handle
1127 this and have failed sparse writes adjust the cache->readahead_extent back to actual size */
1129 /* predict the file pointers next position */
1130 next_position=io->generic.in.offset + as_read;
1131 DEBUG(5,("Next position: %lld (%lld + %lld)\n",
1132 (long long int)next_position,
1133 (long long int)io->generic.in.offset,
1134 (long long int)as_read));
1135 /* calculate the limit of the validated or requested cache */
1136 cache_populated=MAX(cache->validated_extent, cache->readahead_extent);
1138 /* will the new read take us beyond the current extent without gaps? */
1139 if (cache_populated < io->generic.in.offset) {
1140 /* this read-ahead is a read-behind-pointer */
1141 new_extent=cache_populated;
1142 } else {
1143 new_extent=MAX(next_position, cache_populated);
1146 /* as far as we can tell new_extent is the smallest offset that doesn't
1147 have a pending read request on. Of course if we got a short read then
1148 we will have a cache-gap which we can't handle and need to read from
1149 a shrunk readahead_extent, which we don't currently handle */
1150 read_position=new_extent;
1152 /* of course if we know how big the remote file is we should limit at that */
1153 /* we should also mark-out which read-ahead requests are pending so that we
1154 * don't repeat them while they are in-transit. */
1155 /* we can't really use next_position until we can have caches with holes
1156 UNLESS next_position < new_extent, because a next_position well before
1157 new_extent is no reason to extend it further, we only want to extended
1158 with read-aheads if we have cause to suppose the read-ahead data will
1159 be wanted, i.e. the next_position is near new_extent.
1160 So we can't justify reading beyond window+next_position, but if
1161 next_position is leaving gaps, we use new_extent instead */
1162 end_position=MIN(new_extent, next_position) + cache->readahead_window;
1163 DEBUG(5,("** Read-ahead loop %lld < %lld window=%d, end=%lld, quota: %d\n",
1164 (long long int)read_position,
1165 (long long int)(next_position + cache->readahead_window),
1166 cache->readahead_window,
1167 (long long int)end_position,
1168 private->readahead_spare));
1169 /* do we even need to read? */
1170 if (! (read_position < end_position)) return NT_STATUS_OK;
1172 /* readahead_spare is for the whole session (mid/tid?) and may need sharing
1173 out over files and other tree-connects or something */
1174 while (read_position < end_position &&
1175 private->readahead_spare > 0) {
1176 struct smbcli_request *c_req = NULL;
1177 ssize_t read_remaining = end_position - read_position;
1178 ssize_t read_block = MIN(private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32),
1179 MIN(read_remaining, private->cache_readaheadblock));
1180 void *req = NULL; /* for the ASYNC_REC_TAIL_F_ORPHAN macro */
1181 uint8_t* data;
1182 union smb_read *io_copy=talloc_memdup_type(NULL, io, union smb_read);
1184 if (! io_copy)
1185 return NT_STATUS_NO_MEMORY;
1187 #warning we are ignoring read_for_execute as far as the cache goes
1188 io_copy->generic.in.read_for_execute=io->readx.in.read_for_execute;
1189 io_copy->generic.in.offset=read_position;
1190 io_copy->generic.in.mincnt=read_block;
1191 io_copy->generic.in.maxcnt=read_block;
1192 /* what is generic.in.remaining for? */
1193 io_copy->generic.in.remaining = MIN(65535,read_remaining);
1194 io_copy->generic.out.nread=0;
1196 #warning someone must own io_copy, tree, maybe?
1197 data=talloc_zero_size(io_copy, io_copy->generic.in.maxcnt);
1198 DEBUG(5,("Talloc read-ahead buffer %p size %d\n",data, io_copy->generic.in.maxcnt));
1199 if (! data) {
1200 talloc_free(io_copy);
1201 return NT_STATUS_NO_MEMORY;
1203 io_copy->generic.out.data=data;
1205 /* are we able to pull anything from the cache to validate this read-ahead?
1206 NOTE: there is no point in reading ahead merely to re-validate the
1207 cache if we don't have oplocks and can't save it....
1208 ... or maybe there is if we think a read will come that can be matched
1209 up to this reponse while it is still on the wire */
1210 #warning so we need to distinguish between pipe-line read-ahead and revalidation
1211 if (/*(cache->status & CACHE_READ)!=0 && */
1212 cache_len(cache) >
1213 (io_copy->generic.in.offset + io_copy->generic.in.mincnt) &&
1214 cache->validated_extent <
1215 (io_copy->generic.in.offset + io_copy->generic.in.maxcnt)) {
1216 ssize_t pre_fill;
1218 pre_fill = cache_raw_read(cache, data,
1219 io_copy->generic.in.offset,
1220 io_copy->generic.in.maxcnt);
1221 DEBUG(5,("Data read into %p %d\n",data, pre_fill));
1222 if (pre_fill > 0 && pre_fill >= io_copy->generic.in.mincnt) {
1223 io_copy->generic.out.nread=pre_fill;
1224 read_block=pre_fill;
1228 c_req = proxy_smb_raw_read_send(ntvfs, io_copy, f, NULL);
1230 if (c_req) {
1231 private->readahead_spare--;
1232 f->readahead_pending++;
1233 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead level %d request %p offset=%d size=%d\n",io_copy->generic.level,c_req,(int)read_position,(int)read_block));
1234 if (cache->readahead_extent < read_position+read_block)
1235 cache->readahead_extent=read_position+read_block;
1236 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
1237 /* so we can decrease read-ahead counter for this session */
1238 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_readahead_dec, NT_STATUS_INTERNAL_ERROR);
1239 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io_copy, async_read_handler);
1241 /* Make these be owned by the async struct so they are freed when the callback ends or is cancelled */
1242 talloc_steal(c_req->async.private, c_req);
1243 talloc_steal(c_req->async.private, io_copy);
1244 read_position+=read_block;
1245 } else {
1246 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead request FAILED offset=%d size=%d\n",(int)read_position,(int)read_block));
1247 talloc_free(io_copy);
1248 break;
1252 DEBUG(CACHE_DEBUG_LEVEL,("DONE: Asking read-aheads\n"));
1253 return NT_STATUS_OK;
1256 struct proxy_validate_parts_parts {
1257 struct proxy_Read* r;
1258 struct ntvfs_request *req;
1259 struct proxy_file *f;
1260 struct async_read_fragments *fragments;
1261 off_t offset;
1262 ssize_t remaining;
1263 bool complete;
1264 declare_checksum(digest);
1265 struct MD5Context context;
1268 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts);
1269 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status);
1270 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
1271 struct proxy_validate_parts_parts *parts);
1273 /* this will be the new struct proxy_Read based read function, for now
1274 it just deals with non-cached based validate to a regular server */
1275 static NTSTATUS proxy_validate(struct ntvfs_module_context *ntvfs,
1276 struct ntvfs_request *req,
1277 struct proxy_Read *r,
1278 union smb_handle *file)
1280 struct proxy_private *private = ntvfs->private_data;
1281 struct proxy_validate_parts_parts *parts;
1282 struct async_read_fragments *fragments;
1283 struct proxy_file *f;
1284 NTSTATUS status;
1286 f = ntvfs_handle_get_backend_data(file->ntvfs, ntvfs);
1287 if (!f) return NT_STATUS_INVALID_HANDLE;
1288 r->in.fnum = f->fnum;
1290 DEBUG(5,("%s: fnum=%d\n",__FUNCTION__,f->fnum));
1292 parts = talloc_zero(req, struct proxy_validate_parts_parts);
1293 DEBUG(5,("%s: parts=%p\n",__FUNCTION__,parts));
1294 NT_STATUS_HAVE_NO_MEMORY(parts);
1296 fragments = talloc_zero(parts, struct async_read_fragments);
1297 NT_STATUS_HAVE_NO_MEMORY(fragments);
1299 parts->fragments=fragments;
1301 parts->r=r;
1302 parts->f=f;
1303 parts->req=req;
1304 /* processed offset */
1305 parts->offset=r->in.offset;
1306 parts->remaining=r->in.maxcnt;
1307 fragments->async=true;
1309 MD5Init (&parts->context);
1311 /* start a read-loop which will continue in the callback until it is
1312 all done */
1313 status=proxy_validate_parts(ntvfs, parts);
1314 if (parts->complete) {
1315 /* Make sure we are not async */
1316 DEBUG(5,("%s: completed EARLY\n",__FUNCTION__));
1317 return proxy_validate_complete(parts);
1320 /* Assert if status!=NT_STATUS_OK then parts->complete==true */
1321 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1322 DEBUG(5,("%s: returning ASYNC\n",__FUNCTION__));
1323 return status;
1326 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts)
1328 NTSTATUS status;
1329 struct proxy_Read* r=parts->r;
1330 MD5Final(parts->digest, &parts->context);
1332 status = parts->fragments->status;
1333 r->out.result = status;
1334 r->out.response.generic.count=r->out.nread;
1336 DEBUG(5,("%s: %s nread=%d\n",__FUNCTION__, get_friendly_nt_error_msg (status),
1337 r->out.response.generic.count));
1339 DEBUG(5,("Anticipated validated digest for size: %lld\n", (long long) r->in.maxcnt));
1340 dump_data (5, r->in.digest.digest, sizeof(parts->digest));
1341 DEBUG(5,("read digest for size %lld\n",(long long) parts->offset));
1342 dump_data (5, parts->digest, sizeof(parts->digest));
1344 if (NT_STATUS_IS_OK(status) &&
1345 (memcmp(parts->digest, r->in.digest.digest, sizeof(parts->digest))==0)) {
1346 r->out.flags = PROXY_USE_CACHE | PROXY_VALIDATE;
1347 DEBUG(5,("======= VALIDATED FINE \n\n\n"));
1348 } else if (r->in.flags & PROXY_USE_ZLIB) {
1349 ssize_t size = r->out.response.generic.count;
1350 DEBUG(5,("======= VALIDATED WRONG \n\n\n"));
1351 if (compress_block(r->out.response.generic.data, &size) ) {
1352 r->out.flags|=PROXY_USE_ZLIB;
1353 r->out.response.compress.count=size;
1354 r->out.response.compress.data=r->out.response.generic.data;
1355 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
1356 __FUNCTION__,r->out.nread,size,size*100/r->out.nread));
1360 /* assert: this must only be true if we are in a callback */
1361 if (parts->req->async_states->state & NTVFS_ASYNC_STATE_ASYNC) {
1362 /* we are async complete, we need to call the sendfn */
1363 parts->req->async_states->status=status;
1364 DEBUG(5,("Fragments async response sending\n"));
1366 parts->req->async_states->send_fn(parts->req);
1367 return NT_STATUS_OK;
1369 return status;
1372 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1374 struct smbcli_request *c_req = async->c_req;
1375 struct ntvfs_request *req = async->req;
1376 struct proxy_file *f = async->f;
1377 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
1378 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
1379 /* this is the io against which the fragment is to be applied */
1380 struct proxy_validate_parts_parts *parts = talloc_get_type_abort(io1, struct proxy_validate_parts_parts);
1381 struct proxy_Read* r=parts->r;
1382 /* this is the io for the read that issued the callback */
1383 union smb_read *io_frag = fragment->io_frag;
1384 struct async_read_fragments* fragments=fragment->fragments;
1386 DEBUG(5,("%s: parts=%p c_req=%p io_frag=%p\n",__FUNCTION__,parts, c_req, io_frag));
1387 /* if request is not already received by a chained handler, read it */
1388 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
1389 DEBUG(5,("%s: status %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
1391 fragment->status=status;
1393 if (NT_STATUS_IS_OK(status)) {
1394 /* TODO: If we are not sequentially "next" the queue until we can do it */
1395 /* log this data in r->out.generic.data */
1396 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
1397 /* Find memcpy window, copy data from the io_frag to the io */
1398 off_t start_offset=MAX(io_frag->generic.in.offset, r->in.offset);
1399 /* Don't want to go past mincnt */
1400 off_t io_extent=r->in.offset + r->in.mincnt;
1401 off_t end_offset=MIN(io_extent, extent);
1403 /* ASSERT(start_offset <= end_offset) */
1404 /* ASSERT(start_offset <= io_extent) */
1405 if (! (start_offset >= io_extent)) {
1406 uint8_t* dst=r->out.response.generic.data + (start_offset - r->in.offset);
1407 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
1408 /* src == dst in cases where we did not latch onto someone elses
1409 read, but are handling our own */
1410 if (src != dst)
1411 memcpy(dst, src, end_offset - start_offset);
1412 r->out.nread=end_offset - r->in.offset;
1415 MD5Update(&parts->context, io_frag->generic.out.data,
1416 io_frag->generic.out.nread);
1418 parts->fragments->status=status;
1419 status=proxy_validate_parts(ntvfs, parts);
1420 } else {
1421 parts->fragments->status=status;
1424 DLIST_REMOVE(fragments->fragments, fragment);
1425 /* this will free the io_frag too */
1426 talloc_free(fragment);
1428 if (parts->complete || NT_STATUS_IS_ERR(status)) {
1429 /* this will call sendfn, the chain handler won't know... but
1430 should have no more handlers queued */
1431 return proxy_validate_complete(parts);
1434 return NT_STATUS_OK;
1437 /* continue a read loop, possibly from a callback */
1438 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
1439 struct proxy_validate_parts_parts *parts)
1441 struct proxy_private *private = ntvfs->private_data;
1442 union smb_read *io_frag;
1443 struct async_read_fragment *fragment;
1444 struct smbcli_request *c_req = NULL;
1445 ssize_t size=private->tree->session->transport->negotiate.max_xmit \
1446 - (MIN_SMB_SIZE+32);
1448 /* Have we already read enough? */
1449 if (parts->offset >= (parts->r->in.offset + parts->r->in.maxcnt)) {
1450 parts->complete=true;
1451 return NT_STATUS_OK;
1454 size=MIN(size, parts->remaining);
1456 fragment=talloc_zero(parts->fragments, struct async_read_fragment);
1457 NT_STATUS_HAVE_NO_MEMORY(fragment);
1459 io_frag = talloc_zero(fragment, union smb_read);
1460 NT_STATUS_HAVE_NO_MEMORY(io_frag);
1462 io_frag->generic.out.data = talloc_size(io_frag, size);
1463 NT_STATUS_HAVE_NO_MEMORY(io_frag->generic.out.data);
1465 io_frag->generic.level = RAW_READ_GENERIC;
1466 io_frag->generic.in.file.fnum = parts->r->in.fnum;
1467 io_frag->generic.in.offset = parts->offset;
1468 io_frag->generic.in.mincnt = size;
1469 io_frag->generic.in.maxcnt = size;
1470 io_frag->generic.in.remaining = 0;
1471 #warning maybe true is more permissive?
1472 io_frag->generic.in.read_for_execute = false;
1474 //c_req = smb_raw_read_send(ntvfs, io_frag, parts->f, parts->r);
1475 c_req = smb_raw_read_send(private->tree, io_frag);
1476 NT_STATUS_HAVE_NO_MEMORY(c_req);
1478 parts->offset+=size;
1479 parts->remaining-=size;
1480 fragment->c_req = c_req;
1481 fragment->io_frag = io_frag;
1482 fragment->fragments=parts->fragments;
1483 DLIST_ADD(parts->fragments->fragments, fragment);
1485 { void* req=NULL;
1486 ADD_ASYNC_RECV_TAIL(c_req, parts, fragment, parts->f, async_proxy_validate_parts, NT_STATUS_INTERNAL_ERROR);
1487 ASYNC_RECV_TAIL_F_ORPHAN(io_frag, async_read_handler, parts->f, c_req->async.private, NT_STATUS_UNSUCCESSFUL);
1490 DEBUG(5,("%s: issued read parts=%p c_req=%p io_frag=%p\n",__FUNCTION__,parts, c_req, io_frag));
1492 return NT_STATUS_OK;
1496 read from a file
1498 static NTSTATUS proxy_read(struct ntvfs_module_context *ntvfs,
1499 struct ntvfs_request *req, union smb_read *io)
1501 struct proxy_private *private = ntvfs->private_data;
1502 struct smbcli_request *c_req;
1503 struct proxy_file *f;
1504 struct async_read_fragments *fragments=NULL;
1505 /* how much of read-from-cache is certainly valid */
1506 ssize_t valid=0;
1507 off_t offset=io->generic.in.offset+valid;
1508 off_t limit=io->generic.in.offset+io->generic.in.mincnt;
1510 SETUP_PID;
1512 if (io->generic.level != RAW_READ_GENERIC &&
1513 private->map_generic) {
1514 return ntvfs_map_read(ntvfs, req, io);
1517 SETUP_FILE_HERE(f);
1519 DEBUG(3,("%s offset=%lld, mincnt=%d, maxcnt=%d\n",__FUNCTION__,
1520 io->generic.in.offset,
1521 io->generic.in.mincnt,
1522 io->generic.in.maxcnt));
1523 io->generic.out.nread=0;
1524 /* attempt to read from cache. if nread becomes non-zero then we
1525 have cache to validate. Instead of returning "valid" value, cache_read
1526 should probably return an async_read_fragment structure */
1528 if (private->cache_enabled) {
1529 NTSTATUS status=cache_smb_raw_read(f->cache, ntvfs, req, io, &valid);
1531 if (NT_STATUS_IS_OK(status)) {
1532 /* if we read enough valid data, return it */
1533 if (valid > 0 && valid>=io->generic.in.mincnt) {
1534 /* valid will not be bigger than maxcnt */
1535 io->generic.out.nread=valid;
1536 DEBUG(1,("Read from cache offset=%d size=%d\n",
1537 (int)(io->generic.in.offset),
1538 (int)(io->generic.out.nread)) );
1539 return status;
1544 fragments=talloc_zero(req, struct async_read_fragments);
1545 fragments->async=!!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC);
1546 /* See if there are pending reads that would satisfy this request
1547 We have a validated read up to io->generic.out.nread. Anything between
1548 this and mincnt MUST be read, but we could first try and attach to
1549 any pending read-ahead on the same file.
1550 If those read-aheads fail we will re-issue a regular read from the
1551 callback handler and hope it hasn't taken too long. */
1553 /* offset is the extentof the file from which we still need to find
1554 matching read-requests. */
1555 offset=io->generic.in.offset+valid;
1556 /* limit is the byte beyond the last byte for which we need a request.
1557 This used to be mincnt, but is now maxcnt to cope with validate reads.
1558 Maybe we can switch back to mincnt when proxy_read struct is used
1559 instead of smb_read.
1561 limit=io->generic.in.offset+io->generic.in.maxcnt;
1563 while (offset < limit) {
1564 /* Should look for the read-ahead with offset <= in.offset+out.nread
1565 with the longest span, but there is only likely to be one anyway so
1566 just take the first */
1567 struct async_info* pending=private->pending;
1568 union smb_read *readahead_io=NULL;
1569 DEBUG(5,("Looping reads from offset=%lld, end=%lld\n",offset,limit));
1570 while(pending) {
1571 if (pending->c_req->async.fn == async_read_handler) {
1572 struct async_info *async=talloc_get_type_abort(pending->c_req->async.private, struct async_info);
1573 readahead_io=talloc_get_type_abort(async->parms, union smb_read);
1575 if (readahead_io->generic.in.file.fnum == io->generic.in.file.fnum &&
1576 readahead_io->generic.in.offset <= offset &&
1577 readahead_io->generic.in.offset +
1578 readahead_io->generic.in.mincnt > offset) break;
1580 readahead_io=NULL;
1581 pending=pending->next;
1583 /* ASSERT(readahead_io == pending->c_req->async.params) */
1584 if (pending && readahead_io) {
1585 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
1586 fragment->fragments=fragments;
1587 fragment->io_frag=readahead_io;
1588 fragment->c_req = pending->c_req;
1589 /* we found one, so attach to it. We DO need a talloc_reference
1590 because the original send_fn might be called before ALL chained
1591 handlers, and our handler will call its own send_fn first. ugh.
1592 Maybe we need to seperate reverse-mapping callbacks with data users? */
1593 /* Note: the read-ahead io is passed as io, and our req io is
1594 in io_frag->io */
1595 //talloc_reference(req, pending->req);
1596 DEBUG(5,("Attach to read for offset=%lld length=%d\n",
1597 readahead_io->generic.in.offset,
1598 readahead_io->generic.in.mincnt));
1599 ADD_ASYNC_RECV_TAIL(pending->c_req, io, fragment, f,
1600 async_read_fragment, NT_STATUS_INTERNAL_ERROR);
1601 DEBUG(5,("Attached OK\n"));
1602 #warning we don't want to return if we fail to attach, just break
1603 DLIST_ADD(fragments->fragments, fragment);
1604 /* updated offset for which we have reads */
1605 offset=readahead_io->generic.in.offset + readahead_io->generic.in.mincnt;
1606 } else {
1607 /* there are no pending reads to fill this so issue one up to
1608 the maximum supported read size. We could see when the next
1609 pending read is (if any) and only read up till there... later...
1610 Issue a fragment request for what is left, clone io.
1611 In the case that there were no fragments this will be the orginal read
1612 but with a cloned io struct */
1613 off_t next_offset;
1614 struct proxy_Read *r=NULL; /* used only for VALIDATE promotion */
1615 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
1616 union smb_read *io_frag=talloc_memdup_type(req, io, union smb_read);
1617 ssize_t offset_inc=offset-io_frag->generic.in.offset;
1618 /* 250 is a guess at ndr rpc overheads */
1619 ssize_t readsize=MIN(PROXY_NTIOCTL_MAXDATA,
1620 private->tree->session->transport->negotiate.max_xmit) \
1621 - (MIN_SMB_SIZE+32);
1622 if (readsize > 0xFFFF) readsize = 0xFFFF; /* - (MIN_SMB_SIZE+250) ?? */
1623 readsize=MIN(limit-offset, readsize);
1625 DEBUG(5,("Issuing direct read\n"));
1626 /* reduce the cached read (if any). nread is unsigned */
1627 if (io_frag->generic.out.nread > offset_inc) {
1628 io_frag->generic.out.nread-=offset_inc;
1629 /* don't make nread buffer look too big */
1630 if (io_frag->generic.out.nread > readsize)
1631 io_frag->generic.out.nread = readsize;
1632 } else {
1633 io_frag->generic.out.nread=0;
1635 /* adjust the data pointer so we read to the right place */
1636 io_frag->generic.out.data+=offset_inc;
1637 io_frag->generic.in.offset=offset;
1638 io_frag->generic.in.maxcnt=readsize;
1639 /* we don't mind mincnt being smaller if this is the last frag,
1640 but then we can already handle it being bigger but not reached...
1641 The spell would be:
1642 MIN(io_frag->generic.in.mincnt, io_frag->generic.in.maxcnt);
1644 io_frag->generic.in.mincnt=readsize;
1645 fragment->fragments=fragments;
1646 fragment->io_frag=io_frag;
1647 #warning attach to send_fn handler
1648 /* what if someone attaches to us? Our send_fn is called from our
1649 chained handler which will be before their handler and io will
1650 already be freed. We need to keep a reference to the io and the data
1651 but we don't know where it came from in order to take a reference.
1652 We need therefore to tackle calling of send_fn AFTER all other handlers */
1654 /* Calculate next offset (in advance) */
1655 next_offset=io_frag->generic.in.offset + io_frag->generic.in.mincnt;
1657 /* if we are (going to be) the last fragment and we are in VALIDATE
1658 mode, see if we can do a bulk validate now.
1659 io->generic.in.mincnt == io->generic.in.maxcnt is to make sure we
1660 don't do a validate on a receive validate read
1662 if (private->cache_validatesize && PROXY_REMOTE_SERVER(private) &&
1663 next_offset >= limit && (f->cache && f->cache->status & CACHE_VALIDATE)) {
1664 ssize_t length=private->cache_validatesize;
1665 declare_checksum(digest);
1667 DEBUG(5,("last read, maybe mega validate: frag length %zu, offset %llu\n",
1668 length, (unsigned long long) offset));
1669 NTSTATUS status=cache_smb_raw_checksum(f->cache, offset, &length, digest);
1670 /* no point in doing it if md5'd length < current out.nread
1671 remember: out.data contains this requests cached response
1672 if validate succeeds */
1673 if (NT_STATUS_IS_OK(status) && (length > io_frag->generic.out.nread)) {
1674 /* upgrade the read, allocate the proxy_read struct here
1675 and fill in the extras, no more out-of-band stuff */
1676 DEBUG(5,("%s: Promoting to validate read: %lld\n",__FUNCTION__,(long long) length));
1677 dump_data (5, digest, sizeof(digest));
1679 r=talloc_zero(io_frag, struct proxy_Read);
1680 memcpy(r->in.digest.digest, digest, sizeof(digest));
1681 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE;
1682 io_frag->generic.in.maxcnt = length;
1683 /* the proxy send function will calculate the checksum based on *data */
1684 } else {
1685 /* not enough in cache to make it worthwhile anymore */
1686 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x len=%lld\n",
1687 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0,
1688 (unsigned long long)length));
1689 cache_handle_novalidate(f);
1690 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x\n",
1691 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0));
1693 } else {
1694 if (f->cache && f->cache->status & CACHE_VALIDATE) {
1695 DEBUG(5,(">>>Not last frag, no validate read: %lld %lld\n",
1696 (long long) next_offset,
1697 (long long) limit));
1701 DEBUG(5,("Frag read sending offset=%lld min=%d, size=%d\n",
1702 io_frag->generic.in.offset,io_frag->generic.in.mincnt,
1703 io_frag->generic.in.maxcnt));
1704 c_req = proxy_smb_raw_read_send(ntvfs, io_frag, f, r);
1705 DEBUG(5,("Frag read sent offset=%lld size=%d MID=%d\n",
1706 io_frag->generic.in.offset,io_frag->generic.in.maxcnt,c_req->mid));
1707 fragment->c_req=c_req;
1708 DLIST_ADD(fragments->fragments, fragment);
1709 ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
1710 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_read_fragment, NT_STATUS_INTERNAL_ERROR);
1711 DEBUG(5,("Frag response chained\n"));
1712 /* normally we would only install the chain_handler if we wanted async
1713 response, but as it is the async_read_fragment handler that calls send_fn
1714 based on fragments->async, instead of async_chain_handler, we don't
1715 need to worry about this call completing async'ly while we are
1716 waiting on the other attached calls. Otherwise we would not attach
1717 the async_chain_handler (via async_read_handler) because of the wait
1718 below */
1719 { /* We don't want the chain handler calling send_fn as it is done by the fragment handler */
1720 void* req=NULL;
1721 /* call async_chain_hander not read handler so that folk can't
1722 attach to it, till we solve the problem above */
1723 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1725 offset = next_offset;
1727 DEBUG(5,("Next fragment\n"));
1730 /* do we still need a final fragment? Issue a read */
1732 DEBUG(5,("No frags left to read\n"));
1735 /* issue new round of read-aheads */
1736 DEBUG(5,("== Read aheads asread-%d\n",io->generic.in.mincnt));
1737 if (f->cache && ! (f->cache->status & CACHE_VALIDATE)) read_ahead(f, ntvfs, io, io->generic.in.mincnt);
1738 DEBUG(5,("== Done Read aheads\n"));
1740 /* If we have fragments but we are not called async, we must sync-wait on them */
1741 /* did we map the entire request to pending reads? */
1742 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1743 struct async_read_fragment *fragment;
1744 DEBUG(5,("Sync waiting\n"));
1745 /* fragment get's free'd during the chain_handler so we start at
1746 the top each time */
1747 for (fragment = fragments->fragments; fragment; fragment = fragments->fragments) {
1748 /* Any fragments async handled while we sync-wait on one
1749 will remove themselves from the list and not get sync waited */
1750 sync_chain_handler(fragment->c_req);
1751 /* if we have a non-ok result AND we know we have all the responses
1752 up to extent, then we could quit the loop early and change the
1753 fragments->async to true so the final irrelevant responses would
1754 come async and we could send our response now - but we don't
1755 track that detail until we have cache-maps that we can use to
1756 track the responded fragments and combine responsed linear extents
1757 if (! NT_STATUS_IS_OK(fragments->status) && xxx ) */
1759 DEBUG(5,("Sync return of proxy_read: %s\n",get_friendly_nt_error_msg (fragments->status)));
1760 return fragments->status;
1763 DEBUG(5,("Async returning\n"));
1764 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1765 return NT_STATUS_OK;
1769 a handler to de-fragment async write replies back to one request.
1770 Can cope with out-of-order async responses by waiting for all responses
1771 on an NT_STATUS_OK case so that nwritten is properly adjusted
1773 NTSTATUS async_write_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1775 struct smbcli_request *c_req = async->c_req;
1776 struct ntvfs_request *req = async->req;
1777 struct proxy_file *f=async->f;
1778 struct async_write_fragment* fragment=talloc_get_type_abort(io2, struct async_write_fragment);
1779 /* this is the io against which the fragment is to be applied */
1780 union smb_write *io = talloc_get_type_abort(io1, union smb_write);
1781 /* this is the io for the write that issued the callback */
1782 union smb_write *io_frag = fragment->io_frag; /* async->parms; */
1783 struct async_write_fragments* fragments=fragment->fragments;
1784 ssize_t extent=0;
1786 /* if request is not already received by a chained handler, read it */
1787 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
1788 if (c_req) status=smb_raw_write_recv(c_req, io_frag);
1790 DEBUG(3,("%s async_write status: %s\n",__FUNCTION__,
1791 get_friendly_nt_error_msg(status)));
1793 fragment->status = status;
1795 DLIST_REMOVE(fragments->fragments, fragment);
1797 /* did this one fail? */
1798 if (! NT_STATUS_IS_OK(fragment->status)) {
1799 if (NT_STATUS_IS_OK(fragments->status)) {
1800 fragments->status=fragment->status;
1802 } else {
1803 /* No fragments have yet failed, keep collecting responses */
1804 extent = io_frag->generic.in.offset + io_frag->generic.out.nwritten;
1806 /* we broke up the write so it could all be written. If only some has
1807 been written of this block, and then some of then next block,
1808 it could leave unwritten holes! We will only acknowledge up to the
1809 first partial write, and let the client deal with it.
1810 If server can return NT_STATUS_OK for a partial write so can we */
1811 if (io_frag->generic.out.nwritten != io_frag->generic.in.count) {
1812 DEBUG(4,("Fragmented write only partially successful\n"));
1814 /* Shrink the master nwritten */
1815 if ( ! fragments->partial ||
1816 (io->generic.in.offset + io->generic.out.nwritten) > extent) {
1817 io->generic.out.nwritten = extent - io->generic.in.offset;
1819 /* stop any further successes from extended the partial write */
1820 fragments->partial=true;
1821 } else {
1822 /* only grow the master nwritten if we haven't logged a partial write */
1823 if (! fragments->partial &&
1824 (io->generic.in.offset + io->generic.out.nwritten) < extent ) {
1825 io->generic.out.nwritten = extent - io->generic.in.offset;
1830 /* if this was the last fragment, clean up */
1831 if (! fragments->fragments) {
1832 DEBUG(5,("Async write re-fragmented with %d of %d\n",
1833 io->generic.out.nwritten,
1834 io->generic.in.count));
1835 if (NT_STATUS_IS_OK(fragments->status)) {
1836 cache_handle_save(f, io->generic.in.data, io->generic.out.nwritten,
1837 io->generic.in.offset);
1839 if (fragments->async) {
1840 req->async_states->status=fragments->status;
1841 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
1842 req->async_states->send_fn(req);
1843 DEBUG(5,("Async response sent\n"));
1844 } else {
1845 DEBUG(5,("Fragments SYNC return\n"));
1849 return status;
1853 a handler for async write replies
1855 NTSTATUS async_write_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1857 struct smbcli_request *c_req = async->c_req;
1858 struct ntvfs_request *req = async->req;
1859 struct proxy_file *f=async->f;
1860 union smb_write *io=async->parms;
1862 if (c_req)
1863 status = smb_raw_write_recv(c_req, async->parms);
1865 cache_handle_save(f, io->generic.in.data,
1866 io->generic.out.nwritten,
1867 io->generic.in.offset);
1869 return status;
1873 write to a file
1875 static NTSTATUS proxy_write(struct ntvfs_module_context *ntvfs,
1876 struct ntvfs_request *req, union smb_write *io)
1878 struct proxy_private *private = ntvfs->private_data;
1879 struct smbcli_request *c_req;
1880 struct proxy_file *f;
1882 SETUP_PID;
1884 if (io->generic.level != RAW_WRITE_GENERIC &&
1885 private->map_generic) {
1886 return ntvfs_map_write(ntvfs, req, io);
1888 SETUP_FILE_HERE(f);
1890 DEBUG(5,("proxy_write offset=%lld size=%d\n",io->generic.in.offset, io->generic.in.count));
1891 #warning ERROR get rid of this
1892 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1893 NTSTATUS status;
1894 if (PROXY_REMOTE_SERVER(private)) {
1895 /* Do a proxy write */
1896 status=proxy_smb_raw_write(ntvfs, io, f);
1897 } else if (io->generic.in.count >
1898 private->tree->session->transport->negotiate.max_xmit) {
1900 /* smbcli_write can deal with large writes, which are bigger than
1901 tree->session->transport->negotiate.max_xmit */
1902 ssize_t size=smbcli_write(private->tree,
1903 io->generic.in.file.fnum,
1904 io->generic.in.wmode,
1905 io->generic.in.data,
1906 io->generic.in.offset,
1907 io->generic.in.count);
1909 if (size==io->generic.in.count || size > 0) {
1910 io->generic.out.nwritten=size;
1911 status=NT_STATUS_OK;
1912 } else {
1913 status=NT_STATUS_UNSUCCESSFUL;
1915 } else {
1916 status=smb_raw_write(private->tree, io);
1919 /* Save write in cache */
1920 if (NT_STATUS_IS_OK(status)) {
1921 cache_handle_save(f, io->generic.in.data,
1922 io->generic.out.nwritten,
1923 io->generic.in.offset);
1926 return status;
1929 /* smb_raw_write_send can't deal with large writes, which are bigger than
1930 tree->session->transport->negotiate.max_xmit so we have to break it up
1931 trying to preserve the async nature of the call as much as possible */
1932 if (PROXY_REMOTE_SERVER(private)) {
1933 DEBUG(5,("== %s call proxy_smb_raw_write_send\n",__FUNCTION__));
1934 c_req = proxy_smb_raw_write_send(ntvfs, io, f);
1935 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
1936 } else if (io->generic.in.count <=
1937 private->tree->session->transport->negotiate.max_xmit) {
1938 DEBUG(5,("== %s call smb_raw_write_send\n",__FUNCTION__));
1939 c_req = smb_raw_write_send(private->tree, io);
1940 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
1941 } else {
1942 ssize_t remaining = io->generic.in.count;
1943 #warning Need an audit of these magin numbers MIN_SMB_SIZE+32
1944 int block = (private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32));
1945 int done = 0;
1946 struct async_write_fragments *fragments = talloc_zero(req, struct async_write_fragments);
1948 DEBUG(3,("== %s Client sending too-big write sized %d, negotiated limit %d\n",
1949 __FUNCTION__, io->generic.in.count,
1950 private->tree->session->transport->negotiate.max_xmit));
1952 fragments->io = io;
1953 io->generic.out.nwritten=0;
1954 io->generic.out.remaining=0;
1956 do {
1957 union smb_write *io_frag = talloc_zero(fragments, union smb_write);
1958 struct async_write_fragment *fragment = talloc_zero(fragments, struct async_write_fragment);
1959 ssize_t size = MIN(block, remaining);
1961 fragment->fragments = fragments;
1962 fragment->io_frag = io_frag;
1964 io_frag->generic.level = io->generic.level;
1965 io_frag->generic.in.file.fnum = io->generic.in.file.fnum;
1966 io_frag->generic.in.wmode = io->generic.in.wmode;
1967 io_frag->generic.in.count = size;
1968 io_frag->generic.in.offset = io->generic.in.offset + done;
1969 io_frag->generic.in.data = io->generic.in.data + done;
1971 c_req = proxy_smb_raw_write_send(ntvfs, io_frag, f);
1972 if (! c_req) {
1973 /* let pending requests clean-up when ready */
1974 fragments->status=NT_STATUS_UNSUCCESSFUL;
1975 talloc_steal(NULL, fragments);
1976 DEBUG(3,("Can't send request fragment\n"));
1977 return NT_STATUS_UNSUCCESSFUL;
1980 DEBUG(5,("Frag write sent offset=%lld size=%d MID=%d\n",
1981 io_frag->generic.in.offset,io_frag->generic.in.count,c_req->mid));
1982 fragment->c_req=c_req;
1983 DLIST_ADD(fragments->fragments, fragment);
1985 // ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
1986 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_write_fragment, NT_STATUS_INTERNAL_ERROR);
1987 DEBUG(5,("Frag response chained\n"));
1989 remaining -= size;
1990 done += size;
1991 } while(remaining > 0);
1993 /* this strategy has the callback chain attached to each c_req, so we
1994 don't use the ASYNC_RECV_TAIL* to install a general one */
1997 ASYNC_RECV_TAIL_HANDLER(io, async_chain_handler);
2001 a handler for async seek replies
2003 static void async_seek(struct smbcli_request *c_req)
2005 struct async_info *async = c_req->async.private;
2006 struct ntvfs_request *req = async->req;
2007 req->async_states->status = smb_raw_seek_recv(c_req, async->parms);
2008 talloc_free(async);
2009 req->async_states->send_fn(req);
2013 seek in a file
2015 static NTSTATUS proxy_seek(struct ntvfs_module_context *ntvfs,
2016 struct ntvfs_request *req,
2017 union smb_seek *io)
2019 struct proxy_private *private = ntvfs->private_data;
2020 struct smbcli_request *c_req;
2022 SETUP_PID_AND_FILE;
2024 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2025 return smb_raw_seek(private->tree, io);
2028 c_req = smb_raw_seek_send(private->tree, io);
2030 ASYNC_RECV_TAIL(io, async_seek);
2034 flush a file
2036 static NTSTATUS proxy_flush(struct ntvfs_module_context *ntvfs,
2037 struct ntvfs_request *req,
2038 union smb_flush *io)
2040 struct proxy_private *private = ntvfs->private_data;
2041 struct smbcli_request *c_req;
2043 SETUP_PID;
2044 switch (io->generic.level) {
2045 case RAW_FLUSH_FLUSH:
2046 SETUP_FILE;
2047 break;
2048 case RAW_FLUSH_ALL:
2049 io->generic.in.file.fnum = 0xFFFF;
2050 break;
2051 case RAW_FLUSH_SMB2:
2052 return NT_STATUS_INVALID_LEVEL;
2055 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2056 return smb_raw_flush(private->tree, io);
2059 c_req = smb_raw_flush_send(private->tree, io);
2061 SIMPLE_ASYNC_TAIL;
2065 close a file
2067 static NTSTATUS proxy_close(struct ntvfs_module_context *ntvfs,
2068 struct ntvfs_request *req, union smb_close *io)
2070 struct proxy_private *private = ntvfs->private_data;
2071 struct smbcli_request *c_req;
2072 struct proxy_file *f;
2073 union smb_close io2;
2075 SETUP_PID;
2077 if (io->generic.level != RAW_CLOSE_GENERIC &&
2078 private->map_generic) {
2079 return ntvfs_map_close(ntvfs, req, io);
2081 SETUP_FILE_HERE(f);
2082 /* Note, we aren't free-ing f, or it's h here. Should we?
2083 even if file-close fails, we'll remove it from the list,
2084 what else would we do? Maybe we should not remove until
2085 after the proxied call completes? */
2086 DLIST_REMOVE(private->files, f);
2088 /* possibly samba can't do RAW_CLOSE_SEND yet */
2089 if (! (c_req = smb_raw_close_send(private->tree, io))) {
2090 if (io->generic.level == RAW_CLOSE_GENERIC) {
2091 ZERO_STRUCT(io2);
2092 io2.close.level = RAW_CLOSE_CLOSE;
2093 io2.close.in.file = io->generic.in.file;
2094 io2.close.in.write_time = io->generic.in.write_time;
2095 io = &io2;
2097 c_req = smb_raw_close_send(private->tree, io);
2100 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2101 return smbcli_request_simple_recv(c_req);
2104 SIMPLE_ASYNC_TAIL;
2108 exit - closing files open by the pid
2110 static NTSTATUS proxy_exit(struct ntvfs_module_context *ntvfs,
2111 struct ntvfs_request *req)
2113 struct proxy_private *private = ntvfs->private_data;
2114 struct smbcli_request *c_req;
2116 SETUP_PID;
2118 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2119 return smb_raw_exit(private->tree->session);
2122 c_req = smb_raw_exit_send(private->tree->session);
2124 SIMPLE_ASYNC_TAIL;
2128 logoff - closing files open by the user
2130 static NTSTATUS proxy_logoff(struct ntvfs_module_context *ntvfs,
2131 struct ntvfs_request *req)
2133 /* we can't do this right in the proxy backend .... */
2134 return NT_STATUS_OK;
2138 setup for an async call - nothing to do yet
2140 static NTSTATUS proxy_async_setup(struct ntvfs_module_context *ntvfs,
2141 struct ntvfs_request *req,
2142 void *private)
2144 return NT_STATUS_OK;
2148 cancel an async call
2150 static NTSTATUS proxy_cancel(struct ntvfs_module_context *ntvfs,
2151 struct ntvfs_request *req)
2153 struct proxy_private *private = ntvfs->private_data;
2154 struct async_info *a;
2156 /* find the matching request */
2157 for (a=private->pending;a;a=a->next) {
2158 if (a->req == req) {
2159 break;
2163 if (a == NULL) {
2164 return NT_STATUS_INVALID_PARAMETER;
2167 return smb_raw_ntcancel(a->c_req);
2171 lock a byte range
2173 static NTSTATUS proxy_lock(struct ntvfs_module_context *ntvfs,
2174 struct ntvfs_request *req, union smb_lock *io)
2176 struct proxy_private *private = ntvfs->private_data;
2177 struct smbcli_request *c_req;
2179 SETUP_PID;
2181 if (io->generic.level != RAW_LOCK_GENERIC &&
2182 private->map_generic) {
2183 return ntvfs_map_lock(ntvfs, req, io);
2185 SETUP_FILE;
2187 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2188 return smb_raw_lock(private->tree, io);
2191 c_req = smb_raw_lock_send(private->tree, io);
2192 SIMPLE_ASYNC_TAIL;
2196 set info on a open file
2198 static NTSTATUS proxy_setfileinfo(struct ntvfs_module_context *ntvfs,
2199 struct ntvfs_request *req,
2200 union smb_setfileinfo *io)
2202 struct proxy_private *private = ntvfs->private_data;
2203 struct smbcli_request *c_req;
2205 SETUP_PID_AND_FILE;
2207 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2208 return smb_raw_setfileinfo(private->tree, io);
2210 c_req = smb_raw_setfileinfo_send(private->tree, io);
2212 SIMPLE_ASYNC_TAIL;
2217 a handler for async fsinfo replies
2219 static void async_fsinfo(struct smbcli_request *c_req)
2221 struct async_info *async = c_req->async.private;
2222 struct ntvfs_request *req = async->req;
2223 req->async_states->status = smb_raw_fsinfo_recv(c_req, req, async->parms);
2224 talloc_free(async);
2225 req->async_states->send_fn(req);
2229 return filesystem space info
2231 static NTSTATUS proxy_fsinfo(struct ntvfs_module_context *ntvfs,
2232 struct ntvfs_request *req, union smb_fsinfo *fs)
2234 struct proxy_private *private = ntvfs->private_data;
2235 struct smbcli_request *c_req;
2237 SETUP_PID;
2239 /* QFS Proxy */
2240 if (fs->generic.level == RAW_QFS_PROXY_INFO) {
2241 fs->proxy_info.out.major_version=1;
2242 fs->proxy_info.out.minor_version=0;
2243 fs->proxy_info.out.capability=0;
2244 return NT_STATUS_OK;
2247 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2248 return smb_raw_fsinfo(private->tree, req, fs);
2251 c_req = smb_raw_fsinfo_send(private->tree, req, fs);
2253 ASYNC_RECV_TAIL(fs, async_fsinfo);
2257 return print queue info
2259 static NTSTATUS proxy_lpq(struct ntvfs_module_context *ntvfs,
2260 struct ntvfs_request *req, union smb_lpq *lpq)
2262 return NT_STATUS_NOT_SUPPORTED;
2266 list files in a directory matching a wildcard pattern
2268 static NTSTATUS proxy_search_first(struct ntvfs_module_context *ntvfs,
2269 struct ntvfs_request *req, union smb_search_first *io,
2270 void *search_private,
2271 bool (*callback)(void *, const union smb_search_data *))
2273 struct proxy_private *private = ntvfs->private_data;
2275 SETUP_PID;
2277 return smb_raw_search_first(private->tree, req, io, search_private, callback);
2280 /* continue a search */
2281 static NTSTATUS proxy_search_next(struct ntvfs_module_context *ntvfs,
2282 struct ntvfs_request *req, union smb_search_next *io,
2283 void *search_private,
2284 bool (*callback)(void *, const union smb_search_data *))
2286 struct proxy_private *private = ntvfs->private_data;
2288 SETUP_PID;
2290 return smb_raw_search_next(private->tree, req, io, search_private, callback);
2293 /* close a search */
2294 static NTSTATUS proxy_search_close(struct ntvfs_module_context *ntvfs,
2295 struct ntvfs_request *req, union smb_search_close *io)
2297 struct proxy_private *private = ntvfs->private_data;
2299 SETUP_PID;
2301 return smb_raw_search_close(private->tree, io);
2305 a handler for async trans2 replies
2307 static void async_trans2(struct smbcli_request *c_req)
2309 struct async_info *async = c_req->async.private;
2310 struct ntvfs_request *req = async->req;
2311 req->async_states->status = smb_raw_trans2_recv(c_req, req, async->parms);
2312 talloc_free(async);
2313 req->async_states->send_fn(req);
2316 /* raw trans2 */
2317 static NTSTATUS proxy_trans2(struct ntvfs_module_context *ntvfs,
2318 struct ntvfs_request *req,
2319 struct smb_trans2 *trans2)
2321 struct proxy_private *private = ntvfs->private_data;
2322 struct smbcli_request *c_req;
2324 if (private->map_trans2) {
2325 return NT_STATUS_NOT_IMPLEMENTED;
2328 SETUP_PID;
2329 #warning we should be mapping file handles here
2331 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2332 return smb_raw_trans2(private->tree, req, trans2);
2335 c_req = smb_raw_trans2_send(private->tree, trans2);
2337 ASYNC_RECV_TAIL(trans2, async_trans2);
2341 /* SMBtrans - not used on file shares */
2342 static NTSTATUS proxy_trans(struct ntvfs_module_context *ntvfs,
2343 struct ntvfs_request *req,
2344 struct smb_trans2 *trans2)
2346 return NT_STATUS_ACCESS_DENIED;
2350 a handler for async change notify replies
2352 static void async_changenotify(struct smbcli_request *c_req)
2354 struct async_info *async = c_req->async.private;
2355 struct ntvfs_request *req = async->req;
2356 req->async_states->status = smb_raw_changenotify_recv(c_req, req, async->parms);
2357 talloc_free(async);
2358 req->async_states->send_fn(req);
2361 /* change notify request - always async */
2362 static NTSTATUS proxy_notify(struct ntvfs_module_context *ntvfs,
2363 struct ntvfs_request *req,
2364 union smb_notify *io)
2366 struct proxy_private *private = ntvfs->private_data;
2367 struct smbcli_request *c_req;
2368 int saved_timeout = private->transport->options.request_timeout;
2369 struct proxy_file *f;
2371 if (io->nttrans.level != RAW_NOTIFY_NTTRANS) {
2372 return NT_STATUS_NOT_IMPLEMENTED;
2375 SETUP_PID;
2377 f = ntvfs_handle_get_backend_data(io->nttrans.in.file.ntvfs, ntvfs);
2378 if (!f) return NT_STATUS_INVALID_HANDLE;
2379 io->nttrans.in.file.fnum = f->fnum;
2381 /* this request doesn't make sense unless its async */
2382 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2383 return NT_STATUS_INVALID_PARAMETER;
2386 /* we must not timeout on notify requests - they wait
2387 forever */
2388 private->transport->options.request_timeout = 0;
2390 c_req = smb_raw_changenotify_send(private->tree, io);
2392 private->transport->options.request_timeout = saved_timeout;
2394 ASYNC_RECV_TAIL(io, async_changenotify);
2398 * A hander for converting from rpc struct replies to ntioctl
2400 static NTSTATUS proxy_rpclite_map_async_send(
2401 struct ntvfs_module_context *ntvfs,
2402 struct ntvfs_request *req,
2403 void *io1, void *io2, NTSTATUS status)
2405 union smb_ioctl* io=talloc_get_type_abort(io1, union smb_ioctl);
2406 struct async_rpclite_send *rpclite_send=talloc_get_type_abort(io2, struct async_rpclite_send);
2407 void* r=rpclite_send->struct_ptr;
2408 struct ndr_push* push;
2409 const struct ndr_interface_call* call=rpclite_send->call;
2410 enum ndr_err_code ndr_err;
2411 DATA_BLOB ndr;
2413 talloc_free(rpclite_send);
2415 push = ndr_push_init_ctx(req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2416 NT_STATUS_HAVE_NO_MEMORY(push);
2418 if (0) {
2419 push->flags |= LIBNDR_FLAG_BIGENDIAN;
2422 ndr_err = call->ndr_push(push, NDR_OUT, r);
2423 status=ndr_map_error2ntstatus(ndr_err);
2425 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2426 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
2427 nt_errstr(status)));
2428 return status;
2431 ndr=ndr_push_blob(push);
2432 //if (ndr.length > io->ntioctl.in.max_data) {
2433 DEBUG(3,("%s NDR size %d, max_size %d %p\n",__FUNCTION__, ndr.length,
2434 io->ntioctl.in.max_data, ndr.data));
2435 io->ntioctl.out.blob=ndr;
2436 return status;
2440 * A handler for sending async rpclite Read replies that were mapped to union smb_read
2442 static NTSTATUS rpclite_proxy_Read_map_async_send(
2443 struct ntvfs_module_context *ntvfs,
2444 struct ntvfs_request *req,
2445 void *io1, void *io2, NTSTATUS status)
2447 struct proxy_Read* r=talloc_get_type_abort(io1, struct proxy_Read);
2448 union smb_read* io=talloc_get_type_abort(io2, union smb_read);
2450 /* status here is a result of proxy_read, it doesn't reflect the status
2451 of the rpc transport or relates calls, just the read operation */
2452 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2453 r->out.result=status;
2455 if (! NT_STATUS_IS_OK(status)) {
2456 /* We can't use result as a discriminator in IDL, so nread and flags always exist */
2457 r->out.nread=0;
2458 r->out.flags=0;
2459 } else {
2460 ssize_t size=io->readx.out.nread;
2461 r->out.flags=0;
2462 r->out.nread=io->readx.out.nread;
2464 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE) && io->readx.out.nread>0) {
2465 declare_checksum(digest);
2466 checksum_block(digest, io->readx.out.data, io->readx.out.nread);
2468 DEBUG(5,("New digest for size: %lld\n", (long long) io->readx.out.nread));
2469 dump_data (5, digest, sizeof(digest));
2470 DEBUG(5,("Cached digest\n"));
2471 dump_data (5, r->in.digest.digest, sizeof(digest));
2473 if (memcmp(digest, r->in.digest.digest, sizeof(digest))==0) {
2474 r->out.flags=PROXY_USE_CACHE;
2475 DEBUG(5,("%s: Use cached data len=%lld\n",__FUNCTION__,
2476 (long long)r->out.nread));
2477 if (r->in.flags & PROXY_VALIDATE) {
2478 r->out.flags |= PROXY_VALIDATE;
2479 DEBUG(5,("%s: Use VALIDATED len=%lld, %lld\n",__FUNCTION__,
2480 (long long)r->out.nread, (long long) io->readx.out.nread));
2482 goto done;
2484 DEBUG(5,("Cache does not match\n"));
2487 if (r->in.flags & PROXY_VALIDATE) {
2488 /* validate failed, shrink read to mincnt - so we don't fill link */
2489 r->out.nread=MIN(r->out.nread, r->in.mincnt);
2490 size=r->out.nread;
2491 DEBUG(5,("VALIDATE failed, shrink read of %d from %d to %d\n",
2492 r->in.maxcnt,r->out.nread,MIN(r->out.nread, r->in.mincnt)));
2495 if (r->in.flags & PROXY_USE_ZLIB) {
2496 if (compress_block(io->readx.out.data, &size) ) {
2497 r->out.flags|=PROXY_USE_ZLIB;
2498 r->out.response.compress.count=size;
2499 r->out.response.compress.data=io->readx.out.data;
2500 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
2501 __FUNCTION__,r->out.nread,size,size*100/r->out.nread));
2502 goto done;
2506 DEBUG(5,("%s: Compression not worthwhile\n", __FUNCTION__));
2507 r->out.response.generic.count=io->readx.out.nread;
2508 r->out.response.generic.data=io->readx.out.data;
2511 done:
2513 /* Or should we return NT_STATUS_OK ?*/
2514 DEBUG(5,("Finish %s status %s\n",__FUNCTION__,get_friendly_nt_error_msg(status)));
2516 /* the rpc transport succeeded even if the operation did not */
2517 return NT_STATUS_OK;
2521 * RPC implementation of Read
2523 static NTSTATUS rpclite_proxy_Read(struct ntvfs_module_context *ntvfs,
2524 struct ntvfs_request *req, struct proxy_Read *r,
2525 union smb_handle file)
2527 struct proxy_private *private = ntvfs->private_data;
2528 union smb_read* io=talloc(req, union smb_read);
2529 NTSTATUS status;
2531 NT_STATUS_HAVE_NO_MEMORY(io);
2532 /* if next hop is a proxy just repeat this call also handle VALIDATE check
2533 that means have own callback handlers too... */
2534 SETUP_PID;
2536 DEBUG(5,("Opnum: proxy_Read min=%d max=%d offset=%lld, fnum=%d\n",
2537 r->in.mincnt, r->in.maxcnt, r->in.offset, r->in.fnum));
2538 DEBUG(5,("Anticipated digest\n"));
2539 dump_data (5, r->in.digest.digest, sizeof(r->in.digest.digest));
2541 /* If the remove end is a proxy, jusr fixup file handle and passthrough,
2542 but update cache on the way back
2543 if (PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
2546 /* prepare for response */
2547 r->out.response.generic.data=talloc_array(io, uint8_t, r->in.maxcnt);
2548 NT_STATUS_HAVE_NO_MEMORY(r->out.response.generic.data);
2550 if (! PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
2551 return proxy_validate(ntvfs, req, r, &file);
2554 /* pack up an smb_read request and dispatch here */
2555 io->readx.level=RAW_READ_READX;
2556 io->readx.in.file=file;
2557 io->readx.in.mincnt=r->in.mincnt;
2558 io->readx.in.maxcnt=r->in.maxcnt;
2559 io->readx.in.offset=r->in.offset;
2560 io->readx.in.remaining=r->in.remaining;
2561 /* and something to hold the answer */
2562 io->readx.out.data=r->out.response.generic.data;
2564 /* so we get to pack the io->*.out response */
2565 status = ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Read_map_async_send);
2566 NT_STATUS_NOT_OK_RETURN(status);
2568 /* so the read will get processed normally */
2569 return proxy_read(ntvfs, req, io);
2573 * A handler for sending async rpclite Write replies
2575 static NTSTATUS rpclite_proxy_Write_map_async_send(
2576 struct ntvfs_module_context *ntvfs,
2577 struct ntvfs_request *req,
2578 void *io1, void *io2, NTSTATUS status)
2580 struct proxy_Write* r=talloc_get_type_abort(io1, struct proxy_Write);
2581 union smb_write* io=talloc_get_type_abort(io2, union smb_write);
2583 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2584 r->out.result=status;
2586 r->out.nwritten=io->writex.out.nwritten;
2587 r->out.remaining=io->writex.out.remaining;
2589 /* the rpc transport succeeded even if the operation did not */
2590 return NT_STATUS_OK;
2594 * RPC implementation of write
2596 static NTSTATUS rpclite_proxy_Write(struct ntvfs_module_context *ntvfs,
2597 struct ntvfs_request *req, struct proxy_Write *r,
2598 union smb_handle file)
2600 struct proxy_private *private = ntvfs->private_data;
2601 union smb_write* io=talloc(req, union smb_write);
2602 NTSTATUS status;
2604 SETUP_PID;
2606 DEBUG(5,("Opnum: proxy_Write count=%d offset=%lld, fnum=%d\n",
2607 r->in.count, r->in.offset, r->in.fnum));
2609 /* pack up an smb_write request and dispatch here */
2610 io->writex.level=RAW_WRITE_WRITEX;
2611 io->writex.in.file=file;
2612 io->writex.in.offset=r->in.offset;
2613 io->writex.in.wmode=r->in.mode;
2614 io->writex.in.count=r->in.count;
2616 /* and the data */
2617 if (PROXY_USE_ZLIB & r->in.flags) {
2618 ssize_t count=r->in.data.generic.count;
2619 io->writex.in.data=uncompress_block_talloc(io, r->in.data.compress.data,
2620 &count, r->in.count);
2621 if (count != r->in.count || !io->writex.in.data) {
2622 /* Didn't uncompress properly, but the RPC layer worked */
2623 r->out.result=NT_STATUS_BAD_COMPRESSION_BUFFER;
2624 return NT_STATUS_OK;
2626 } else {
2627 io->writex.in.data=r->in.data.generic.data;
2630 /* so we get to pack the io->*.out response */
2631 status=ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Write_map_async_send);
2632 NT_STATUS_NOT_OK_RETURN(status);
2634 /* so the read will get processed normally */
2635 return proxy_write(ntvfs, req, io);
2638 /* unmarshall ntioctl and rpc-dispatch, but push async map handler to convert
2639 back from rpc struct to ntioctl */
2640 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
2641 struct ntvfs_request *req, union smb_ioctl *io)
2643 struct proxy_private *private = ntvfs->private_data;
2644 DATA_BLOB *request;
2645 struct ndr_syntax_id* syntax_id;
2646 uint32_t opnum;
2647 const struct ndr_interface_table *table;
2648 struct ndr_pull* pull;
2649 void* r;
2650 NTSTATUS status;
2651 struct async_rpclite_send *rpclite_send;
2652 enum ndr_err_code ndr_err;
2654 SETUP_PID;
2656 DEBUG(5,("START %s blob-size %d\n",__FUNCTION__,io->ntioctl.in.blob.length));
2657 /* unpack the NDR */
2658 request=&io->ntioctl.in.blob;
2660 pull = ndr_pull_init_blob(request, req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2661 NT_STATUS_HAVE_NO_MEMORY(pull);
2662 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
2663 DEBUG(5,("%s pull init'd\n",__FUNCTION__));
2665 /* the blob is 4-aligned because it was memcpy'd */
2666 syntax_id=talloc_zero(pull, struct ndr_syntax_id);
2667 NT_STATUS_HAVE_NO_MEMORY(syntax_id);
2669 ndr_err=ndr_pull_ndr_syntax_id(pull, NDR_SCALARS, syntax_id);
2670 status=ndr_map_error2ntstatus(ndr_err);
2671 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2672 DEBUG(2,("Can't read syntax-id: %s\n",nt_errstr(status)));
2673 return status;
2676 /* now find the struct ndr_interface_table * for this syntax_id */
2677 table=ndr_table_by_uuid(&syntax_id->uuid);
2678 if (! table) {
2679 DEBUG(5,("Can't find table for uuid: %s\n",GUID_string(debug_ctx(),&syntax_id->uuid)));
2680 return NT_STATUS_NO_GUID_TRANSLATION;
2683 ndr_err=ndr_pull_uint32(pull, NDR_SCALARS, &opnum);
2684 status=ndr_map_error2ntstatus(ndr_err);
2685 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2686 DEBUG(2,("Can't read op-num: %s\n",nt_errstr(status)));
2687 return status;
2689 DEBUG(5,("%s opnum %d\n",__FUNCTION__,opnum));
2691 DEBUG(10,("rpc request data:\n"));
2692 dump_data(10, pull->data, pull->data_size);
2694 r = talloc_named(req, table->calls[opnum].struct_size, "struct %s",
2695 table->calls[opnum].name);
2696 NT_STATUS_HAVE_NO_MEMORY(r);
2698 ndr_err=table->calls[opnum].ndr_pull(pull, NDR_IN, r);
2699 status=ndr_map_error2ntstatus(ndr_err);
2700 DEBUG(5,("%s opnum %d pulled status %s\n",__FUNCTION__,opnum,get_friendly_nt_error_msg (status)));
2701 NT_STATUS_NOT_OK_RETURN(status);
2703 rpclite_send=talloc(req, struct async_rpclite_send);
2704 NT_STATUS_HAVE_NO_MEMORY(rpclite_send);
2705 rpclite_send->call=&table->calls[opnum];
2706 rpclite_send->struct_ptr=r;
2707 /* need to push conversion function to convert from r to io */
2708 status=ntvfs_map_async_setup(ntvfs, req, io, rpclite_send, proxy_rpclite_map_async_send);
2710 /* Magically despatch the call based on syntax_id, table and opnum.
2711 But there is no table of handlers.... so until then*/
2712 if (0==strcasecmp(table->name,"rpcproxy")) {
2713 switch(opnum) {
2714 case(NDR_PROXY_READ):
2715 status=rpclite_proxy_Read(ntvfs, req, r, io->generic.in.file);
2716 break;
2717 case(NDR_PROXY_WRITE):
2718 status=rpclite_proxy_Write(ntvfs, req, r, io->generic.in.file);
2719 break;
2720 default:
2721 DEBUG(3,("Can't despatch %s:%d\n",table->name, opnum));
2722 return NT_STATUS_PROCEDURE_NOT_FOUND;
2724 } else {
2725 DEBUG(5,("Can't despatch %s:%d %s\n",table->name, opnum,
2726 GUID_string(debug_ctx(),&syntax_id->uuid)));
2727 return NT_STATUS_NO_GUID_TRANSLATION;
2730 /* status is the status of the rpc layer. If it is NT_STATUS_OK then
2731 the handler status is in r->out.result */
2732 return ntvfs_map_async_finish(req, status);
2735 /* unpack the ntioctl to make some rpc_struct */
2736 NTSTATUS ntioctl_rpc_unmap(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2738 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
2739 struct proxy_private *proxy=async->proxy;
2740 struct smbcli_request *c_req = async->c_req;
2741 void* r=io1;
2742 struct ntioctl_rpc_unmap_info *info=talloc_get_type_abort(io2, struct ntioctl_rpc_unmap_info);
2743 union smb_ioctl* io =talloc_get_type_abort(info->io, union smb_ioctl);
2744 const struct ndr_interface_call *calls=info->calls;
2745 enum ndr_err_code ndr_err;
2746 DATA_BLOB *response;
2747 struct ndr_pull* pull;
2749 DEBUG(5,("START %s io2=%p\n",__FUNCTION__,io2));
2750 DEBUG(5,("%s op %s ntioctl: %s\n",
2751 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
2752 NT_STATUS_NOT_OK_RETURN(status);
2754 if (c_req) {
2755 DEBUG(5,("%s io2 MID=%d\n",__FUNCTION__,c_req->mid));
2756 status = smb_raw_ioctl_recv(c_req, io, io);
2757 #define SESSION_INFO proxy->remote_server, proxy->remote_share
2758 /* This status is the ntioctl wrapper status */
2759 if (! NT_STATUS_IS_OK(status)) {
2760 DEBUG(3,("[\\\\%s\\%s] RPC %s failed for %s: %s\n",SESSION_INFO,
2761 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
2762 if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) return status;
2763 return NT_STATUS_UNSUCCESSFUL;
2767 dump_data(10, io->ntioctl.out.blob.data, io->ntioctl.out.blob.length);
2769 response=&io->ntioctl.out.blob;
2770 pull = ndr_pull_init_blob(response, r, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2771 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
2773 NT_STATUS_HAVE_NO_MEMORY(pull);
2775 ndr_err=calls->ndr_pull(pull, NDR_OUT, r);
2776 #warning can we free pull here?
2777 status=ndr_map_error2ntstatus(ndr_err);
2779 DEBUG(5,("END %s op status %s\n",
2780 __FUNCTION__, get_friendly_nt_error_msg(status)));
2781 return status;
2785 send an ntioctl request based on a NDR encoding.
2787 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
2788 struct smbcli_tree *tree,
2789 struct ntvfs_module_context *ntvfs,
2790 uint16_t fnum,
2791 const struct ndr_interface_table *table,
2792 uint32_t opnum,
2793 void *r)
2795 struct proxy_private *private = ntvfs->private_data;
2796 struct smbcli_request * c_req;
2797 struct ndr_push *push;
2798 NTSTATUS status;
2799 DATA_BLOB request;
2800 enum ndr_err_code ndr_err;
2801 union smb_ioctl *io=talloc_zero(r, union smb_ioctl);
2804 /* setup for a ndr_push_* call, we can't free push until the message
2805 actually hits the wire */
2806 push = ndr_push_init_ctx(io, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2807 if (!push) return NULL;
2809 /* first push interface table identifiers */
2810 ndr_err=ndr_push_ndr_syntax_id(push, NDR_SCALARS, &table->syntax_id);
2811 status=ndr_map_error2ntstatus(ndr_err);
2813 if (! NT_STATUS_IS_OK(status)) return NULL;
2815 ndr_err=ndr_push_uint32(push, NDR_SCALARS, opnum);
2816 status=ndr_map_error2ntstatus(ndr_err);
2817 if (! NT_STATUS_IS_OK(status)) return NULL;
2819 if (0) {
2820 push->flags |= LIBNDR_FLAG_BIGENDIAN;
2823 /* push the structure into a blob */
2824 ndr_err = table->calls[opnum].ndr_push(push, NDR_IN, r);
2825 status=ndr_map_error2ntstatus(ndr_err);
2826 if (!NT_STATUS_IS_OK(status)) {
2827 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
2828 nt_errstr(status)));
2829 return NULL;
2832 /* retrieve the blob */
2833 request = ndr_push_blob(push);
2835 io->ntioctl.level=RAW_IOCTL_NTIOCTL;
2836 io->ntioctl.in.function=FSCTL_UFOPROXY_RPCLITE;
2837 io->ntioctl.in.file.fnum=fnum;
2838 io->ntioctl.in.fsctl=false;
2839 io->ntioctl.in.filter=0;
2840 io->ntioctl.in.max_data=PROXY_NTIOCTL_MAXDATA;
2841 io->ntioctl.in.blob=request;
2843 DEBUG(10,("smbcli_request packet:\n"));
2844 dump_data(10, request.data, request.length);
2846 c_req = smb_raw_ioctl_send(tree, io);
2848 if (! c_req) {
2849 return NULL;
2852 dump_data(10, c_req->out.data, c_req->out.data_size);
2854 { void* req=NULL;
2855 struct ntioctl_rpc_unmap_info* info=talloc_zero(r, struct ntioctl_rpc_unmap_info);
2856 info->io=io;
2857 info->table=table;
2858 info->opnum=opnum;
2859 info->calls=&table->calls[opnum];
2860 ADD_ASYNC_RECV_TAIL(c_req, r, info, NULL, ntioctl_rpc_unmap, NULL);
2863 return c_req;
2867 client helpers, mapping between proxy RPC calls and smbcli_* calls.
2871 * If the sync_chain_handler is called directly it unplugs the async handler
2872 which (as well as preventing loops) will also avoid req->send_fn being
2873 called - which is also nice! */
2874 NTSTATUS sync_chain_handler(struct smbcli_request *c_req)
2876 struct async_info *async=NULL;
2877 /* the first callback which will actually receive the c_req response */
2878 struct async_info_map *async_map;
2879 NTSTATUS status=NT_STATUS_OK;
2880 struct async_info_map** chain;
2882 DEBUG(5,("%s\n",__FUNCTION__));
2883 if (! c_req) return NT_STATUS_UNSUCCESSFUL;
2885 /* If there is a handler installed, it is using async_info to chain */
2886 if (c_req->async.fn) {
2887 /* not safe to talloc_free async if send_fn has been called for the request
2888 against which async was allocated, so steal it (and free below) or neither */
2889 async = talloc_get_type_abort(c_req->async.private, struct async_info);
2890 talloc_steal(NULL, async);
2891 chain=&async->chain;
2892 async_map = talloc_get_type_abort(*chain, struct async_info_map);
2893 } else {
2894 chain=(struct async_info_map**)&c_req->async.private;
2895 async_map = talloc_get_type_abort(*chain, struct async_info_map);
2898 /* unplug c_req->async.fn as if a callback handler calls smb_*_recv
2899 in order to receive the response, smbcli_transport_finish_recv will
2900 call us again and then call the c-req->async.fn
2901 Perhaps we should merely call smbcli_request_receive() IF
2902 c_req->request_state <= SMBCLI_REQUEST_RECV, but that might not
2903 help multi-part replies... except all parts are receive before
2904 callback if a handler WAS set */
2905 c_req->async.fn=NULL;
2907 /* Should we raise an error? Should we simple_recv? */
2908 while(async_map) {
2909 /* remove this one from the list before we call. We do this in case
2910 some callbacks free their async_map but also so that callbacks
2911 can navigate the async_map chain to add additional callbacks to
2912 the end - e.g. so that tag-along reads can call send_fn after
2913 the send_fn of the request they tagged along to, thus preserving
2914 the async response order - which may be a waste of time? */
2915 DLIST_REMOVE(*chain, async_map);
2917 DEBUG(5,("Callback for async_map=%p pre-status %s\n",async_map, get_friendly_nt_error_msg(status)));
2918 if (async_map->fn) {
2919 status=async_map->fn(async_map->async,
2920 async_map->parms1, async_map->parms2, status);
2922 DEBUG(5,("Callback complete for async_map=%p status %s\n",async_map, get_friendly_nt_error_msg(status)));
2923 /* Note: the callback may have added to the chain */
2924 #warning Async_maps have a null talloc_context, it is unclear who should own them
2925 /* it can't be c_req as it stops us chaining more than one, maybe it
2926 should be req but there isn't always a req. However sync_chain_handler
2927 will always free it if called */
2928 DEBUG(6,("Will free async map %p\n",async_map));
2929 #warning put me back
2930 talloc_free(async_map);
2931 DEBUG(6,("Free'd async_map\n"));
2932 if (*chain)
2933 async_map=talloc_get_type_abort(*chain, struct async_info_map);
2934 else
2935 async_map=NULL;
2936 DEBUG(6,("Switch to async_map %p\n",async_map));
2938 /* The first callback will have read c_req, thus talloc_free'ing it,
2939 so we don't let the other callbacks get hurt playing with it */
2940 if (async_map && async_map->async)
2941 async_map->async->c_req=NULL;
2944 talloc_free(async);
2946 DEBUG(5,("%s complete: %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
2947 return status;
2950 /* If the async handler is called, then the send_fn is called */
2951 static void async_chain_handler(struct smbcli_request *c_req)
2953 struct async_info *async = talloc_get_type_abort(c_req->async.private, struct async_info);
2954 struct ntvfs_request *req = async->req;
2955 NTSTATUS status;
2957 if (c_req->state <= SMBCLI_REQUEST_RECV) {
2958 /* Looks like async handlers has been called sync'ly */
2959 smb_panic("async_chain_handler called asyncly on req %p\n");
2962 status=sync_chain_handler(c_req);
2964 /* Should we insist that a chain'd handler does this?
2965 Which makes it hard to intercept the data by adding handlers
2966 before the send_fn handler sends it... */
2967 if (req) {
2968 req->async_states->status=status;
2969 req->async_states->send_fn(req);
2973 /* unpack the rpc struct to make some smb_write */
2974 NTSTATUS async_proxy_smb_raw_write_rpc(struct async_info *async,
2975 void* io1, void* io2, NTSTATUS status)
2977 union smb_write* io =talloc_get_type(io1, union smb_write);
2978 struct proxy_Write* r=talloc_get_type(io2, struct proxy_Write);
2980 DEBUG(5,("START: %s convert from rpc to smb with pre-status %s\n",__FUNCTION__,
2981 get_friendly_nt_error_msg (status)));
2982 DEBUG(3,("Write response for offset=%lld\n",io->generic.in.offset));
2983 NT_STATUS_NOT_OK_RETURN(status);
2985 status=r->out.result;
2986 DEBUG(5,("%s wrapped status: %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2987 NT_STATUS_NOT_OK_RETURN(status);
2989 io->generic.out.remaining = r->out.remaining;
2990 io->generic.out.nwritten = r->out.nwritten;
2992 DEBUG(5,("END: %s status %s\n",__FUNCTION__,
2993 get_friendly_nt_error_msg (status)));
2994 return status;
2997 /* upgrade from smb to NDR and then send.
2998 The caller should ADD_ASYNC_RECV_TAIL the handler that tries to receive the response*/
2999 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
3000 union smb_write *io,
3001 struct proxy_file *f)
3003 struct proxy_private *private = ntvfs->private_data;
3004 struct smbcli_tree *tree=private->tree;
3006 if (PROXY_REMOTE_SERVER(private)) {
3007 struct smbcli_request *c_req;
3008 struct proxy_Write *r=talloc_zero(io, struct proxy_Write);
3009 ssize_t size;
3011 if (! r) return NULL;
3013 size=io->generic.in.count;
3014 /* upgrade the write */
3015 r->in.fnum = io->generic.in.file.fnum;
3016 r->in.offset = io->generic.in.offset;
3017 r->in.count = io->generic.in.count;
3018 r->in.mode = io->generic.in.wmode;
3019 // r->in.remaining = io->generic.in.remaining;
3020 #warning remove this
3021 /* prepare to lie */
3022 r->out.nwritten=r->in.count;
3023 r->out.remaining=0;
3025 /* try to compress */
3026 #warning compress!
3027 r->in.data.compress.data=compress_block_talloc(r, io->generic.in.data, &size);
3028 if (r->in.data.compress.data) {
3029 r->in.data.compress.count=size;
3030 r->in.flags = PROXY_USE_ZLIB;
3031 } else {
3032 r->in.flags = 0;
3033 /* we'll honour const, honest gov */
3034 r->in.data.generic.data=discard_const(io->generic.in.data);
3035 r->in.data.generic.count=io->generic.in.count;
3038 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
3039 ntvfs,
3040 io->generic.in.file.fnum,
3041 &ndr_table_rpcproxy,
3042 NDR_PROXY_WRITE, r);
3043 if (! c_req) return NULL;
3045 /* yeah, filthy abuse of f */
3046 { void* req=NULL;
3047 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_write_rpc, NULL);
3050 return c_req;
3051 } else {
3052 return smb_raw_write_send(tree, io);
3056 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
3057 union smb_write *io,
3058 struct proxy_file *f)
3060 struct proxy_private *proxy = ntvfs->private_data;
3061 struct smbcli_tree *tree=proxy->tree;
3063 if (PROXY_REMOTE_SERVER(proxy)) {
3064 struct smbcli_request *c_req = proxy_smb_raw_write_send(ntvfs, io, f);
3065 return sync_chain_handler(c_req);
3066 } else {
3067 struct smbcli_request *c_req = smb_raw_write_send(tree, io);
3068 return smb_raw_write_recv(c_req, io);
3072 /* unpack the rpc struct to make some smb_read response */
3073 NTSTATUS async_proxy_smb_raw_read_rpc(struct async_info *async,
3074 void* io1, void* io2, NTSTATUS status)
3076 union smb_read* io =talloc_get_type_abort(io1, union smb_read);
3077 struct proxy_Read* r=talloc_get_type_abort(io2, struct proxy_Read);
3079 DEBUG(5,("\n>>\n%s() rpc status: %s\n",__FUNCTION__,
3080 get_friendly_nt_error_msg(status)));
3081 NT_STATUS_NOT_OK_RETURN(status);
3083 status=r->out.result;
3084 DEBUG(5,("%s() wrapped status: %s\n",__FUNCTION__,
3085 get_friendly_nt_error_msg(status)));
3086 NT_STATUS_NOT_OK_RETURN(status);
3088 io->generic.out.remaining = 0; /*r->out.response.generic.remaining;*/
3089 io->generic.out.compaction_mode = 0;
3091 if (r->out.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
3092 /* Use the io we already setup!
3093 if out.flags & PROXY_VALIDATE, we may need to validate more in
3094 cache then r->out.nread would suggest, see io->generic.out.nread */
3095 if (r->out.flags & PROXY_VALIDATE)
3096 io->generic.out.nread=io->generic.in.maxcnt;
3097 DEBUG(5,("Using cached data: size=%lld\n",
3098 (long long) io->generic.out.nread));
3099 return status;
3102 if (r->in.flags & PROXY_VALIDATE) {
3103 DEBUG(5,("Cached data did not validate, flags: %x\n",r->out.flags));
3104 /* turn off validate on this file */
3105 //cache_handle_novalidate(f);
3106 #warning turn off validate on this file - do an nread<maxcnt later
3109 if (r->in.flags & PROXY_USE_CACHE) {
3110 DEBUG(5,("Cached data did not match\n"));
3113 io->generic.out.nread = r->out.nread;
3115 /* we may need to uncompress */
3116 if (r->out.flags & PROXY_USE_ZLIB) {
3117 ssize_t size=r->out.response.compress.count;
3118 if (! uncompress_block_to(io->generic.out.data,
3119 r->out.response.compress.data, &size,
3120 io->generic.in.maxcnt) ||
3121 size != r->out.nread) {
3122 io->generic.out.nread=size;
3123 status=NT_STATUS_INVALID_USER_BUFFER;
3125 } else if (io->generic.out.data != r->out.response.generic.data) {
3126 //Assert(r->out.nread == r->out.generic.out.count);
3127 memcpy(io->generic.out.data, r->out.response.generic.data, io->generic.out.nread);
3130 return status;
3133 /* Warning: Assumes that if io->generic.out.nread is not zero, then some
3134 data has been pre-read into io->generic.out.data and can be used for
3135 proxy<->proxy optimized reads */
3136 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
3137 union smb_read *io,
3138 struct proxy_file *f,
3139 struct proxy_Read *r)
3141 struct proxy_private *private = ntvfs->private_data;
3142 #warning we are using out.nread as a out-of-band parameter
3143 if (PROXY_REMOTE_SERVER(private)) {
3145 struct smbcli_request *c_req;
3146 if (! r) {
3147 r=talloc_zero(io, struct proxy_Read);
3150 if (! r) return NULL;
3152 r->in.fnum = io->generic.in.file.fnum;
3153 r->in.read_for_execute=io->generic.in.read_for_execute;
3154 r->in.offset = io->generic.in.offset;
3155 r->in.mincnt = io->generic.in.mincnt;
3156 r->in.maxcnt = io->generic.in.maxcnt;
3157 r->in.remaining = io->generic.in.remaining;
3158 r->in.flags |= PROXY_USE_ZLIB;
3159 if (! (r->in.flags & PROXY_VALIDATE) &&
3160 io->generic.out.data && io->generic.out.nread > 0) {
3161 /* maybe we should limit digest size to MIN(nread, maxcnt) to
3162 permit the caller to provider a larger nread as part of
3163 a split read */
3164 checksum_block(r->in.digest.digest, io->generic.out.data,
3165 io->generic.out.nread);
3167 if (io->generic.out.nread > r->in.maxcnt) {
3168 DEBUG(0,("Cache from nread is too big for requested read struct, ignoring cache\n"));
3169 } else {
3170 r->in.mincnt = io->generic.out.nread;
3171 r->in.maxcnt = io->generic.out.nread;
3172 r->in.flags |= PROXY_USE_CACHE;
3173 /* PROXY_VALIDATE will have been set by caller */
3177 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
3178 DEBUG(3,("Cache digest length=%lld\n", (long long)r->in.maxcnt));
3179 dump_data (3, r->in.digest.digest, sizeof(r->in.digest.digest));
3182 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
3183 ntvfs,
3184 io->generic.in.file.fnum,
3185 &ndr_table_rpcproxy,
3186 NDR_PROXY_READ, r);
3187 if (! c_req) return NULL;
3189 { void* req=NULL;
3190 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_read_rpc, NULL);
3193 return c_req;
3194 } else {
3195 return smb_raw_read_send(private->tree, io);
3199 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
3200 union smb_read *io,
3201 struct proxy_file *f)
3203 struct proxy_private *proxy = ntvfs->private_data;
3204 struct smbcli_tree *tree=proxy->tree;
3206 if (PROXY_REMOTE_SERVER(proxy)) {
3207 struct smbcli_request *c_req = proxy_smb_raw_read_send(ntvfs, io, f, NULL);
3208 return sync_chain_handler(c_req);
3209 } else {
3210 struct smbcli_request *c_req = smb_raw_read_send(tree, io);
3211 return smb_raw_read_recv(c_req, io);
3217 initialise the PROXY->PROXY backend, registering ourselves with the ntvfs subsystem
3219 NTSTATUS ntvfs_proxy_init(void)
3221 NTSTATUS ret;
3222 struct ntvfs_ops ops;
3223 NTVFS_CURRENT_CRITICAL_SIZES(vers);
3225 ZERO_STRUCT(ops);
3227 /* fill in the name and type */
3228 ops.name = "proxy";
3229 ops.type = NTVFS_DISK;
3231 /* fill in all the operations */
3232 ops.connect = proxy_connect;
3233 ops.disconnect = proxy_disconnect;
3234 ops.unlink = proxy_unlink;
3235 ops.chkpath = proxy_chkpath;
3236 ops.qpathinfo = proxy_qpathinfo;
3237 ops.setpathinfo = proxy_setpathinfo;
3238 ops.open = proxy_open;
3239 ops.mkdir = proxy_mkdir;
3240 ops.rmdir = proxy_rmdir;
3241 ops.rename = proxy_rename;
3242 ops.copy = proxy_copy;
3243 ops.ioctl = proxy_ioctl;
3244 ops.read = proxy_read;
3245 ops.write = proxy_write;
3246 ops.seek = proxy_seek;
3247 ops.flush = proxy_flush;
3248 ops.close = proxy_close;
3249 ops.exit = proxy_exit;
3250 ops.lock = proxy_lock;
3251 ops.setfileinfo = proxy_setfileinfo;
3252 ops.qfileinfo = proxy_qfileinfo;
3253 ops.fsinfo = proxy_fsinfo;
3254 ops.lpq = proxy_lpq;
3255 ops.search_first = proxy_search_first;
3256 ops.search_next = proxy_search_next;
3257 ops.search_close = proxy_search_close;
3258 ops.trans = proxy_trans;
3259 ops.logoff = proxy_logoff;
3260 ops.async_setup = proxy_async_setup;
3261 ops.cancel = proxy_cancel;
3262 ops.notify = proxy_notify;
3263 ops.trans2 = proxy_trans2;
3265 /* register ourselves with the NTVFS subsystem. We register
3266 under the name 'proxy'. */
3267 ret = ntvfs_register(&ops, &vers);
3269 if (!NT_STATUS_IS_OK(ret)) {
3270 DEBUG(0,("Failed to register PROXY backend!\n"));
3273 return ret;