Stop using specific file handle for rpclite ntioctl transport
[Samba/vfs_proxy.git] / source4 / ntvfs / proxy / vfs_proxy.c
blobb6095c5ecb466ef6b392105114f8a6d254090102
1 /*
2 Unix SMB/PROXY implementation.
4 CIFS PROXY NTVFS filesystem backend
6 Copyright (C) Andrew Tridgell 2003
7 Copyright (C) James J Myers 2003 <myersjj@samba.org>
8 Copyright (C) Sam Liddicott <sam@liddicott.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>.
24 this implements a CIFS->CIFS NTVFS filesystem caching proxy.
28 #define TALLOC_ABORT(why) smb_panic(why)
29 #warning handle SMB_FLAGS2_COMPRESSED flag from client: http://msdn2.microsoft.com/en-us/library/cc246254.aspx
31 #define PROXY_NTIOCTL_MAXDATA 0x20000
33 #include "includes.h"
34 #include "libcli/raw/libcliraw.h"
35 #include "libcli/smb_composite/smb_composite.h"
36 #include "auth/auth.h"
37 #include "auth/credentials/credentials.h"
38 #include "ntvfs/ntvfs.h"
39 #include "../lib/util/dlinklist.h"
40 #include "param/param.h"
41 #include "libcli/resolve/resolve.h"
42 #include "libcli/libcli.h"
43 #include "libcli/raw/ioctl.h"
44 #include "librpc/gen_ndr/ndr_misc.h"
45 #include "librpc/gen_ndr/ndr_proxy.h"
46 #include "librpc/ndr/ndr_table.h"
47 #include "lib/cache/cache.h"
48 #include "lib/compression/zlib.h"
49 #include "libcli/raw/raw_proto.h"
51 struct proxy_file {
52 struct proxy_file *prev, *next;
53 uint16_t fnum;
54 struct ntvfs_handle *h;
55 struct cache_file_entry *cache;
56 int readahead_pending;
59 /* this is stored in ntvfs_private */
60 struct proxy_private {
61 struct smbcli_tree *tree;
62 struct smbcli_transport *transport;
63 struct ntvfs_module_context *ntvfs;
64 struct async_info *pending;
65 struct proxy_file *files;
66 bool map_generic;
67 bool map_trans2;
68 bool cache_enabled;
69 int cache_readahead; /* default read-ahead window size */
70 int cache_readaheadblock; /* size of each read-ahead request */
71 ssize_t cache_validatesize; /* chunk size to validate, results in a read this size on remote server */
72 char *remote_server;
73 char *remote_share;
74 struct cache_context *cache;
75 int readahead_spare; /* amount of pending non-user generated requests */
76 bool fake_oplock; /* useful for testing, smbclient never asks for oplock */
77 uint16_t nttrans_fnum; /* we need a handle for non-proxy operations */
80 struct async_info_map;
82 /* a structure used to pass information to an async handler */
83 struct async_info {
84 struct async_info *next, *prev;
85 struct proxy_private *proxy;
86 struct ntvfs_request *req;
87 struct smbcli_request *c_req;
88 struct proxy_file *f;
89 struct async_info_map *chain;
90 void *parms;
93 /* used to chain async callbacks */
94 struct async_info_map {
95 struct async_info_map *next, *prev;
96 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
97 void *parms1;
98 void *parms2;
99 struct async_info *async;
102 struct ntioctl_rpc_unmap_info {
103 void* io;
104 const struct ndr_interface_call *calls;
105 const struct ndr_interface_table *table;
106 uint32_t opnum;
109 /* a structure used to pass information to an async handler */
110 struct async_rpclite_send {
111 const struct ndr_interface_call* call;
112 void* struct_ptr;
115 #define SETUP_PID private->tree->session->pid = req->smbpid
117 #define RPCLITE_SETUP_FILE_HERE(f, h) do { \
118 if ((h = ntvfs_find_handle(private->ntvfs, req, r->in.fnum)) && \
119 (f = ntvfs_handle_get_backend_data(h, ntvfs))) { \
120 r->in.fnum = f->fnum; \
121 } else { \
122 r->out.result = NT_STATUS_INVALID_HANDLE; \
123 return NT_STATUS_OK; \
125 } while (0)
127 #define SETUP_FILE_HERE(f) do { \
128 f = ntvfs_handle_get_backend_data(io->generic.in.file.ntvfs, ntvfs); \
129 if (!f) return NT_STATUS_INVALID_HANDLE; \
130 io->generic.in.file.fnum = f->fnum; \
131 } while (0)
133 #define SETUP_FILE do { \
134 struct proxy_file *f; \
135 SETUP_FILE_HERE(f); \
136 } while (0)
138 #define SETUP_PID_AND_FILE do { \
139 SETUP_PID; \
140 SETUP_FILE; \
141 } while (0)
143 /* remove the MAY_ASYNC from a request, useful for testing */
144 #define MAKE_SYNC_REQ(req) do { req->async_states->state &= ~NTVFS_ASYNC_STATE_MAY_ASYNC; } while(0)
146 #define PROXY_SERVER "proxy:server"
147 #define PROXY_USER "proxy:user"
148 #define PROXY_PASSWORD "proxy:password"
149 #define PROXY_DOMAIN "proxy:domain"
150 #define PROXY_SHARE "proxy:share"
151 #define PROXY_USE_MACHINE_ACCT "proxy:use-machine-account"
152 #define PROXY_MAP_GENERIC "proxy:map-generic"
153 #define PROXY_MAP_TRANS2 "proxy:map-trans2"
155 #define PROXY_CACHE_ENABLED "proxy:cache-enabled"
156 #define PROXY_CACHE_ENABLED_DEFAULT false
158 #define PROXY_CACHE_READAHEAD "proxy:cache-readahead"
159 #define PROXY_CACHE_READAHEAD_DEFAULT 32768
160 /* size of each read-ahead request. */
161 #define PROXY_CACHE_READAHEAD_BLOCK "proxy:cache-readaheadblock"
162 /* the read-ahead block should always be less than max negotiated data */
163 #define PROXY_CACHE_READAHEAD_BLOCK_DEFAULT 4096
165 #define PROXY_CACHE_VALIDATE_SIZE "proxy:validate-size"
166 #define PROXY_CACHE_VALIDATE_SIZE_DEFAULT 256 /* 10M */
168 #define PROXY_FAKE_OPLOCK "proxy:fake-oplock"
169 #define PROXY_FAKE_OPLOCK_DEFAULT false
171 /* how many read-ahead requests can be pending per mid */
172 #define PROXY_REQUEST_LIMIT "proxy:request-limit"
173 #define PROXY_REQUEST_LIMIT_DEFAULT 100
175 #define PROXY_USE_MACHINE_ACCT_DEFAULT false
176 /* These two really should be: true, and possibly not even configurable */
177 #define PROXY_MAP_GENERIC_DEFAULT true
178 #define PROXY_MAP_TRANS2_DEFAULT true
180 /* is the remote server a proxy? */
181 #define PROXY_REMOTE_SERVER(private) \
182 ((private)->tree->session->transport->negotiate.capabilities & CAP_COMPRESSION \
183 && (strcmp("A:",private->tree->device)==0))
185 /* A few forward declarations */
186 static NTSTATUS sync_chain_handler(struct smbcli_request *c_req);
187 static void async_chain_handler(struct smbcli_request *c_req);
188 static void async_read_handler(struct smbcli_request *c_req);
189 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
190 struct ntvfs_request *req, union smb_ioctl *io);
192 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
193 struct smbcli_tree *tree,
194 struct ntvfs_module_context *ntvfs,
195 const struct ndr_interface_table *table,
196 uint32_t opnum, void *r);
197 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
198 union smb_read *io, struct proxy_file *f, struct proxy_Read *r);
199 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
200 union smb_read *io, struct proxy_file *f);
201 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
202 union smb_write *io, struct proxy_file *f);
203 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
204 union smb_write *io, struct proxy_file *f);
205 static NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status);
208 a handler for oplock break events from the server - these need to be passed
209 along to the client
211 static bool oplock_handler(struct smbcli_transport *transport, uint16_t tid, uint16_t fnum, uint8_t level, void *p_private)
213 struct proxy_private *private = p_private;
214 NTSTATUS status;
215 struct ntvfs_handle *h = NULL;
216 struct proxy_file *f;
218 for (f=private->files; f; f=f->next) {
219 if (f->fnum != fnum) continue;
220 h = f->h;
221 break;
224 if (!h) {
225 DEBUG(5,("vfs_proxy: ignoring oplock break level %d for fnum %d\n", level, fnum));
226 return true;
229 /* If we don't have an oplock, then we can't rely on the cache */
230 cache_handle_stale(f);
232 DEBUG(5,("vfs_proxy: sending oplock break level %d for fnum %d\n", level, fnum));
233 status = ntvfs_send_oplock_break(private->ntvfs, h, level);
234 if (!NT_STATUS_IS_OK(status)) return false;
235 return true;
239 get file handle from clients fnum, (from ntvfs/ipc/vfs_ipc.c at metze suggestion)
241 static struct ntvfs_handle *ntvfs_find_handle(struct ntvfs_module_context *ntvfs,
242 struct ntvfs_request *req,
243 uint16_t fnum)
245 DATA_BLOB key;
246 uint16_t _fnum;
249 * the fnum is already in host byteorder
250 * but ntvfs_handle_search_by_wire_key() expects
251 * network byteorder
253 SSVAL(&_fnum, 0, fnum);
254 key = data_blob_const(&_fnum, 2);
256 return ntvfs_handle_search_by_wire_key(ntvfs, req, &key);
260 connect to a share - used when a tree_connect operation comes in.
262 static NTSTATUS proxy_connect(struct ntvfs_module_context *ntvfs,
263 struct ntvfs_request *req, const char *sharename)
265 NTSTATUS status;
266 struct proxy_private *private;
267 const char *host, *user, *pass, *domain, *remote_share;
268 struct smb_composite_connect io;
269 struct composite_context *creq;
270 struct share_config *scfg = ntvfs->ctx->config;
271 int nttrans_fnum;
273 struct cli_credentials *credentials;
274 bool machine_account;
276 /* Here we need to determine which server to connect to.
277 * For now we use parametric options, type proxy.
278 * Later we will use security=server and auth_server.c.
280 host = share_string_option(scfg, PROXY_SERVER, NULL);
281 user = share_string_option(scfg, PROXY_USER, NULL);
282 pass = share_string_option(scfg, PROXY_PASSWORD, NULL);
283 domain = share_string_option(scfg, PROXY_DOMAIN, NULL);
284 remote_share = share_string_option(scfg, PROXY_SHARE, NULL);
285 if (!remote_share) {
286 remote_share = sharename;
289 machine_account = share_bool_option(scfg, PROXY_USE_MACHINE_ACCT, PROXY_USE_MACHINE_ACCT_DEFAULT);
291 private = talloc_zero(ntvfs, struct proxy_private);
292 if (!private) {
293 return NT_STATUS_NO_MEMORY;
296 ntvfs->private_data = private;
298 if (!host) {
299 DEBUG(1,("PROXY backend: You must supply server\n"));
300 return NT_STATUS_INVALID_PARAMETER;
303 if (user && pass) {
304 DEBUG(5, ("PROXY backend: Using specified password\n"));
305 credentials = cli_credentials_init(private);
306 if (!credentials) {
307 return NT_STATUS_NO_MEMORY;
309 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
310 cli_credentials_set_username(credentials, user, CRED_SPECIFIED);
311 if (domain) {
312 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
314 cli_credentials_set_password(credentials, pass, CRED_SPECIFIED);
315 } else if (machine_account) {
316 DEBUG(5, ("PROXY backend: Using machine account\n"));
317 credentials = cli_credentials_init(private);
318 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
319 if (domain) {
320 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
322 status = cli_credentials_set_machine_account(credentials, ntvfs->ctx->lp_ctx);
323 if (!NT_STATUS_IS_OK(status)) {
324 return status;
326 } else if (req->session_info->credentials) {
327 DEBUG(5, ("PROXY backend: Using delegated credentials\n"));
328 credentials = req->session_info->credentials;
329 } else {
330 DEBUG(1,("PROXY backend: NO delegated credentials found: You must supply server, user and password or the client must supply delegated credentials\n"));
331 return NT_STATUS_INVALID_PARAMETER;
334 /* connect to the server, using the smbd event context */
335 io.in.dest_host = host;
336 io.in.dest_ports = lp_smb_ports(ntvfs->ctx->lp_ctx);
337 io.in.socket_options = lp_socket_options(ntvfs->ctx->lp_ctx);
338 io.in.called_name = host;
339 io.in.credentials = credentials;
340 io.in.fallback_to_anonymous = false;
341 io.in.workgroup = lp_workgroup(ntvfs->ctx->lp_ctx);
342 io.in.service = remote_share;
343 io.in.service_type = "?????";
344 io.in.iconv_convenience = lp_iconv_convenience(ntvfs->ctx->lp_ctx);
345 io.in.gensec_settings = lp_gensec_settings(private, ntvfs->ctx->lp_ctx);
346 lp_smbcli_options(ntvfs->ctx->lp_ctx, &io.in.options);
347 lp_smbcli_session_options(ntvfs->ctx->lp_ctx, &io.in.session_options);
349 creq = smb_composite_connect_send(&io, private,
350 lp_resolve_context(ntvfs->ctx->lp_ctx),
351 ntvfs->ctx->event_ctx);
352 status = smb_composite_connect_recv(creq, private);
353 NT_STATUS_NOT_OK_RETURN(status);
355 private->tree = io.out.tree;
357 private->transport = private->tree->session->transport;
358 SETUP_PID;
359 private->ntvfs = ntvfs;
361 ntvfs->ctx->fs_type = talloc_strdup(ntvfs->ctx, io.out.tree->fs_type);
362 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->fs_type);
363 ntvfs->ctx->dev_type = talloc_strdup(ntvfs->ctx, io.out.tree->device);
364 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->dev_type);
366 /* we need to receive oplock break requests from the server */
367 smbcli_oplock_handler(private->transport, oplock_handler, private);
369 private->map_generic = share_bool_option(scfg, PROXY_MAP_GENERIC, PROXY_MAP_GENERIC_DEFAULT);
371 private->map_trans2 = share_bool_option(scfg, PROXY_MAP_TRANS2, PROXY_MAP_TRANS2_DEFAULT);
373 private->cache_validatesize = 1024 * (long long) share_int_option(scfg, PROXY_CACHE_VALIDATE_SIZE, PROXY_CACHE_VALIDATE_SIZE_DEFAULT);
375 if (strcmp("A:",private->tree->device)==0) {
376 private->cache_enabled = share_bool_option(scfg, PROXY_CACHE_ENABLED, PROXY_CACHE_ENABLED_DEFAULT);
377 private->cache_readahead = share_int_option(scfg, PROXY_CACHE_READAHEAD, PROXY_CACHE_READAHEAD_DEFAULT);
378 private->cache_readaheadblock = share_int_option(scfg, PROXY_CACHE_READAHEAD_BLOCK,
379 MIN(private->cache_readahead,PROXY_CACHE_READAHEAD_BLOCK_DEFAULT));
380 private->fake_oplock = share_bool_option(scfg, PROXY_FAKE_OPLOCK, PROXY_FAKE_OPLOCK_DEFAULT);
381 private->readahead_spare = share_int_option(scfg, PROXY_REQUEST_LIMIT, PROXY_REQUEST_LIMIT_DEFAULT);
382 private->cache = new_cache_context(private, lp_proxy_cache_root(ntvfs->ctx->lp_ctx), host, remote_share);
383 DEBUG(0,("proxy tree connect caching for: %s (%s : %s) %s read-ahead: %d\n",
384 remote_share, private->tree->device,private->tree->fs_type,
385 (private->cache_enabled)?"enabled":"disabled",
386 private->cache_readahead));
387 } else {
388 private->cache_enabled = false;
389 DEBUG(0,("No caching or read-ahead for: %s (%s : %s)\n",
390 remote_share, private->tree->device,private->tree->fs_type));
393 private->remote_server = strlower_talloc(private, host);
394 private->remote_share = strlower_talloc(private, remote_share);
396 /* some proxy operations will not be performed on files, so open a handle
397 now that we can use for such things. We won't bother to close it on
398 shutdown, as the remote server ought to be able to close it for us
399 and we might be shutting down because the remote server went away and
400 so we don't want to delay further */
401 nttrans_fnum=smbcli_nt_create_full(private->tree, "\\",
402 NTCREATEX_FLAGS_OPEN_DIRECTORY,
403 SEC_FILE_READ_DATA,
404 FILE_ATTRIBUTE_NORMAL,
405 NTCREATEX_SHARE_ACCESS_MASK,
406 NTCREATEX_DISP_OPEN,
407 NTCREATEX_OPTIONS_DIRECTORY,
408 NTCREATEX_IMPERSONATION_IMPERSONATION);
409 if (nttrans_fnum < 0) {
410 DEBUG(5,("Could not open handle for ntioctl %d\n",private->nttrans_fnum));
411 return NT_STATUS_UNSUCCESSFUL;
413 private->nttrans_fnum=nttrans_fnum;
414 DEBUG(5,("Got nttrans handle %d\n",private->nttrans_fnum));
416 return NT_STATUS_OK;
420 disconnect from a share
422 static NTSTATUS proxy_disconnect(struct ntvfs_module_context *ntvfs)
424 struct proxy_private *private = ntvfs->private_data;
425 struct async_info *a, *an;
427 /* first cleanup pending requests */
428 for (a=private->pending; a; a = an) {
429 an = a->next;
430 smbcli_request_destroy(a->c_req);
431 talloc_free(a);
434 talloc_free(private);
435 ntvfs->private_data = NULL;
437 return NT_STATUS_OK;
441 destroy an async info structure
443 static int async_info_destructor(struct async_info *async)
445 DLIST_REMOVE(async->proxy->pending, async);
446 return 0;
450 a handler for simple async replies
451 this handler can only be used for functions that don't return any
452 parameters (those that just return a status code)
454 static void async_simple(struct smbcli_request *c_req)
456 struct async_info *async = c_req->async.private;
457 struct ntvfs_request *req = async->req;
458 req->async_states->status = smbcli_request_simple_recv(c_req);
459 talloc_free(async);
460 req->async_states->send_fn(req);
463 /* hopefully this will optimize away */
464 #define TYPE_CHECK(type,check) do { \
465 type=check; \
466 t=t; \
467 } while (0)
469 /* save some typing for the simple functions */
470 #define ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, file, achain, error) do { \
471 if (!c_req) return (error); \
472 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
474 struct async_info *async; \
475 async = talloc(req, struct async_info); \
476 if (!async) return (error); \
477 async->parms = io; \
478 async->req = req; \
479 async->f = file; \
480 async->proxy = private; \
481 async->c_req = c_req; \
482 async->chain = achain; \
483 DLIST_ADD(private->pending, async); \
484 c_req->async.private = async; \
485 talloc_set_destructor(async, async_info_destructor); \
487 c_req->async.fn = async_fn; \
488 } while (0)
490 #define ASYNC_RECV_TAIL_F(io, async_fn, file) do { \
491 if (!c_req) return NT_STATUS_UNSUCCESSFUL; \
492 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
494 struct async_info *async; \
495 async = talloc(req, struct async_info); \
496 if (!async) return NT_STATUS_NO_MEMORY; \
497 async->parms = io; \
498 async->req = req; \
499 async->f = file; \
500 async->proxy = private; \
501 async->c_req = c_req; \
502 DLIST_ADD(private->pending, async); \
503 c_req->async.private = async; \
504 talloc_set_destructor(async, async_info_destructor); \
506 c_req->async.fn = async_fn; \
507 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
508 return NT_STATUS_OK; \
509 } while (0)
511 #define ASYNC_RECV_TAIL(io, async_fn) ASYNC_RECV_TAIL_F(io, async_fn, NULL)
513 #define SIMPLE_ASYNC_TAIL ASYNC_RECV_TAIL(NULL, async_simple)
515 /* managers for chained async-callback.
516 The model of async handlers has changed.
517 backend async functions should be of the form:
518 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
519 And if async->c_req is NULL then an earlier chain has already rec'd the
520 request.
521 ADD_ASYNC_RECV_TAIL is used to add chained handlers.
522 The chained handler manager async_chain_handler is installed the usual way
523 and uses the io pointer to point to the first async_map record
524 static void async_chain_handler(struct smbcli_request *c_req).
525 It is safe to call ADD_ASYNC_RECV_TAIL before the chain manager is installed
526 and often desirable.
528 /* async_chain_handler has an async_info struct so that it can be safely inserted
529 into pending, but the io struct will point to (struct async_info_map *)
530 chained async_info_map will be in c_req->async.private */
531 #define ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_fn) do { \
532 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
533 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
534 } while(0)
536 #define ASYNC_RECV_TAIL_HANDLER(io, async_fn) do { \
537 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
538 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
539 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
540 return NT_STATUS_OK; \
541 } while(0)
544 DEBUG(0,("ADD_ASYNC_RECV_TAIL %s %s:%d\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%s\n", __FUNCTION__,__FILE__,__LINE__, \
545 creq, creq?talloc_get_name(creq):NULL, creq?talloc_get_name(creq):NULL,\
546 io1, io1?talloc_get_name(io1):NULL, io1?talloc_get_name(io1):NULL, \
547 io2, io2?talloc_get_name(io2):NULL, io2?talloc_get_name(io2):NULL, \
548 file, file?"file":"null", file?"file":"null", #async_fn)); \
550 #define ADD_ASYNC_RECV_TAIL(creq, io1, io2, file, async_fn, error) do { \
551 if (! creq) return (error); \
553 struct async_info_map *async_map=talloc(NULL, struct async_info_map); \
554 if (! async_map) return (error); \
555 async_map->async=talloc(async_map, struct async_info); \
556 if (! async_map->async) return (error); \
557 async_map->parms1=io1; \
558 async_map->parms2=io2; \
559 async_map->fn=async_fn; \
560 async_map->async->parms = io1; \
561 async_map->async->req = req; \
562 async_map->async->f = file; \
563 async_map->async->proxy = private; \
564 async_map->async->c_req = creq; \
565 /* If async_chain_handler is installed, get the list from param */ \
566 if (creq->async.fn == async_chain_handler || creq->async.fn == async_read_handler) { \
567 struct async_info *i=creq->async.private; \
568 DLIST_ADD_END(i->chain, async_map, struct async_info_map *); \
569 } else if (creq->async.fn) { \
570 /* incompatible handler installed */ \
571 return (error); \
572 } else { \
573 DLIST_ADD_END(creq->async.private, async_map, struct async_info_map *); \
576 } while(0)
578 /* try and unify cache open function interface with this macro */
579 #define cache_open(cache_context, f, io, oplock, readahead_window) \
580 (io->generic.level == RAW_OPEN_NTCREATEX && \
581 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID)\
582 ?(cache_fileid_open(cache_context, f, (const uint64_t*)(io->generic.in.fname), oplock, readahead_window))\
583 :(cache_filename_open(cache_context, f, SMB_OPEN_IN_FILE(io), oplock, readahead_window))
586 delete a file - the dirtype specifies the file types to include in the search.
587 The name can contain PROXY wildcards, but rarely does (except with OS/2 clients)
589 static NTSTATUS proxy_unlink(struct ntvfs_module_context *ntvfs,
590 struct ntvfs_request *req, union smb_unlink *unl)
592 struct proxy_private *private = ntvfs->private_data;
593 struct smbcli_request *c_req;
595 SETUP_PID;
597 /* see if the front end will allow us to perform this
598 function asynchronously. */
599 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
600 return smb_raw_unlink(private->tree, unl);
603 c_req = smb_raw_unlink_send(private->tree, unl);
605 SIMPLE_ASYNC_TAIL;
609 a handler for async ioctl replies
611 static void async_ioctl(struct smbcli_request *c_req)
613 struct async_info *async = c_req->async.private;
614 struct ntvfs_request *req = async->req;
615 req->async_states->status = smb_raw_ioctl_recv(c_req, req, async->parms);
616 talloc_free(async);
617 req->async_states->send_fn(req);
621 ioctl interface
623 static NTSTATUS proxy_ioctl(struct ntvfs_module_context *ntvfs,
624 struct ntvfs_request *req, union smb_ioctl *io)
626 struct proxy_private *private = ntvfs->private_data;
627 struct smbcli_request *c_req;
629 if (io->ntioctl.level == RAW_IOCTL_NTIOCTL
630 && io->ntioctl.in.function == FSCTL_UFOPROXY_RPCLITE) {
631 return proxy_rpclite(ntvfs, req, io);
634 SETUP_PID_AND_FILE;
636 /* see if the front end will allow us to perform this
637 function asynchronously. */
638 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
639 return smb_raw_ioctl(private->tree, req, io);
642 c_req = smb_raw_ioctl_send(private->tree, io);
644 ASYNC_RECV_TAIL(io, async_ioctl);
648 check if a directory exists
650 static NTSTATUS proxy_chkpath(struct ntvfs_module_context *ntvfs,
651 struct ntvfs_request *req, union smb_chkpath *cp)
653 struct proxy_private *private = ntvfs->private_data;
654 struct smbcli_request *c_req;
656 SETUP_PID;
658 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
659 return smb_raw_chkpath(private->tree, cp);
662 c_req = smb_raw_chkpath_send(private->tree, cp);
664 SIMPLE_ASYNC_TAIL;
668 a handler for async qpathinfo replies
670 static void async_qpathinfo(struct smbcli_request *c_req)
672 struct async_info *async = c_req->async.private;
673 struct ntvfs_request *req = async->req;
674 req->async_states->status = smb_raw_pathinfo_recv(c_req, req, async->parms);
675 talloc_free(async);
676 req->async_states->send_fn(req);
680 return info on a pathname
682 static NTSTATUS proxy_qpathinfo(struct ntvfs_module_context *ntvfs,
683 struct ntvfs_request *req, union smb_fileinfo *info)
685 struct proxy_private *private = ntvfs->private_data;
686 struct smbcli_request *c_req;
688 SETUP_PID;
690 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
691 return smb_raw_pathinfo(private->tree, req, info);
694 c_req = smb_raw_pathinfo_send(private->tree, info);
696 ASYNC_RECV_TAIL(info, async_qpathinfo);
700 a handler for async qfileinfo replies
702 static void async_qfileinfo(struct smbcli_request *c_req)
704 struct async_info *async = c_req->async.private;
705 struct ntvfs_request *req = async->req;
706 req->async_states->status = smb_raw_fileinfo_recv(c_req, req, async->parms);
707 talloc_free(async);
708 req->async_states->send_fn(req);
712 query info on a open file
714 static NTSTATUS proxy_qfileinfo(struct ntvfs_module_context *ntvfs,
715 struct ntvfs_request *req, union smb_fileinfo *io)
717 struct proxy_private *private = ntvfs->private_data;
718 struct smbcli_request *c_req;
720 SETUP_PID_AND_FILE;
722 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
723 return smb_raw_fileinfo(private->tree, req, io);
726 c_req = smb_raw_fileinfo_send(private->tree, io);
728 ASYNC_RECV_TAIL(io, async_qfileinfo);
732 set info on a pathname
734 static NTSTATUS proxy_setpathinfo(struct ntvfs_module_context *ntvfs,
735 struct ntvfs_request *req, union smb_setfileinfo *st)
737 struct proxy_private *private = ntvfs->private_data;
738 struct smbcli_request *c_req;
740 SETUP_PID;
742 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
743 return smb_raw_setpathinfo(private->tree, st);
746 c_req = smb_raw_setpathinfo_send(private->tree, st);
748 SIMPLE_ASYNC_TAIL;
753 a handler for async open replies
755 static void async_open(struct smbcli_request *c_req)
757 struct async_info *async = c_req->async.private;
758 struct proxy_private *proxy = async->proxy;
759 struct ntvfs_request *req = async->req;
760 struct proxy_file *f = async->f;
761 union smb_open *io = async->parms;
762 union smb_handle *file;
764 talloc_free(async);
765 req->async_states->status = smb_raw_open_recv(c_req, req, io);
766 SMB_OPEN_OUT_FILE(io, file);
767 f->fnum = file->fnum;
768 file->ntvfs = NULL;
769 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
770 req->async_states->status = ntvfs_handle_set_backend_data(f->h, proxy->ntvfs, f);
771 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
772 file->ntvfs = f->h;
773 DLIST_ADD(proxy->files, f);
775 if (proxy->cache_enabled) {
776 bool oplock=(io->generic.out.oplock_level != OPLOCK_NONE) || proxy->fake_oplock;
777 f->cache=cache_open(proxy->cache, f, io, oplock, proxy->cache_readahead);
778 if (! PROXY_REMOTE_SERVER(proxy)) cache_handle_novalidate(f);
781 failed:
782 req->async_states->send_fn(req);
786 open a file
788 static NTSTATUS proxy_open(struct ntvfs_module_context *ntvfs,
789 struct ntvfs_request *req, union smb_open *io)
791 struct proxy_private *private = ntvfs->private_data;
792 struct smbcli_request *c_req;
793 struct ntvfs_handle *h;
794 struct proxy_file *f;
795 NTSTATUS status;
797 SETUP_PID;
799 if (io->generic.level != RAW_OPEN_GENERIC &&
800 private->map_generic) {
801 return ntvfs_map_open(ntvfs, req, io);
804 status = ntvfs_handle_new(ntvfs, req, &h);
805 NT_STATUS_NOT_OK_RETURN(status);
807 f = talloc_zero(h, struct proxy_file);
808 NT_STATUS_HAVE_NO_MEMORY(f);
809 f->h = h;
811 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
812 union smb_handle *file;
814 status = smb_raw_open(private->tree, req, io);
815 NT_STATUS_NOT_OK_RETURN(status);
817 SMB_OPEN_OUT_FILE(io, file);
818 f->fnum = file->fnum;
819 file->ntvfs = NULL;
820 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
821 NT_STATUS_NOT_OK_RETURN(status);
822 file->ntvfs = f->h;
823 DLIST_ADD(private->files, f);
825 if (private->cache_enabled) {
826 bool oplock=(io->generic.out.oplock_level != OPLOCK_NONE) || private->fake_oplock;
828 f->cache=cache_open(private->cache, f, io, oplock, private->cache_readahead);
829 if (! PROXY_REMOTE_SERVER(private)) cache_handle_novalidate(f);
832 return NT_STATUS_OK;
835 c_req = smb_raw_open_send(private->tree, io);
837 ASYNC_RECV_TAIL_F(io, async_open, f);
841 create a directory
843 static NTSTATUS proxy_mkdir(struct ntvfs_module_context *ntvfs,
844 struct ntvfs_request *req, union smb_mkdir *md)
846 struct proxy_private *private = ntvfs->private_data;
847 struct smbcli_request *c_req;
849 SETUP_PID;
851 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
852 return smb_raw_mkdir(private->tree, md);
855 c_req = smb_raw_mkdir_send(private->tree, md);
857 SIMPLE_ASYNC_TAIL;
861 remove a directory
863 static NTSTATUS proxy_rmdir(struct ntvfs_module_context *ntvfs,
864 struct ntvfs_request *req, struct smb_rmdir *rd)
866 struct proxy_private *private = ntvfs->private_data;
867 struct smbcli_request *c_req;
869 SETUP_PID;
871 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
872 return smb_raw_rmdir(private->tree, rd);
874 c_req = smb_raw_rmdir_send(private->tree, rd);
876 SIMPLE_ASYNC_TAIL;
880 rename a set of files
882 static NTSTATUS proxy_rename(struct ntvfs_module_context *ntvfs,
883 struct ntvfs_request *req, union smb_rename *ren)
885 struct proxy_private *private = ntvfs->private_data;
886 struct smbcli_request *c_req;
888 SETUP_PID;
890 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
891 return smb_raw_rename(private->tree, ren);
894 c_req = smb_raw_rename_send(private->tree, ren);
896 SIMPLE_ASYNC_TAIL;
900 copy a set of files
902 static NTSTATUS proxy_copy(struct ntvfs_module_context *ntvfs,
903 struct ntvfs_request *req, struct smb_copy *cp)
905 return NT_STATUS_NOT_SUPPORTED;
908 /* we only define this seperately so we can easily spot read calls in
909 pending based on ( c_req->private.fn == async_read_handler ) */
910 static void async_read_handler(struct smbcli_request *c_req)
912 async_chain_handler(c_req);
915 NTSTATUS async_readahead_dec(struct async_info *async, void* io1, void* io2, NTSTATUS status)
917 struct proxy_private *private = async->proxy;
918 struct smbcli_request *c_req = async->c_req;
919 struct proxy_file *f = async->f;
920 union smb_read *io = async->parms;
922 /* if request is not already received by a chained handler, read it */
923 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
925 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
926 f->readahead_pending, private->readahead_spare));
928 f->readahead_pending--;
929 private->readahead_spare++;
931 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
932 f->readahead_pending, private->readahead_spare));
934 return status;
938 a handler for async read replies - speculative read-aheads.
939 It merely saves in the cache. The async chain handler will call send_fn if
940 there is one, or if sync_chain_handler is used the send_fn is called by
941 the ntvfs back end.
943 NTSTATUS async_read_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
945 struct smbcli_request *c_req = async->c_req;
946 struct proxy_file *f = async->f;
947 union smb_read *io = async->parms;
949 /* if request is not already received by a chained handler, read it */
950 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
952 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
953 get_friendly_nt_error_msg(status)));
955 NT_STATUS_NOT_OK_RETURN(status);
957 /* if it was a validate read we don't to save anything unless it failed.
958 Until we use Proxy_read structs we can't tell, so guess */
959 if (io->generic.out.nread == io->generic.in.maxcnt &&
960 io->generic.in.mincnt < io->generic.in.maxcnt) {
961 /* looks like a validate read, just move the validate pointer, the
962 original read-request has already been satisfied from cache */
963 DEBUG(3,("%s megavalidate suceeded, validate to %lld\n",__FUNCTION__,
964 io->generic.in.offset + io->generic.out.nread));
965 cache_handle_validated(f, io->generic.in.offset + io->generic.out.nread);
966 } else {
967 DEBUG(5,("Not a mega-validate, save %d in cache\n",io->generic.out.nread));
968 cache_handle_save(f, io->generic.out.data,
969 io->generic.out.nread,
970 io->generic.in.offset);
973 DEBUG(3,("%s finished %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
974 return status;
977 /* handler for fragmented reads */
978 NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
980 struct smbcli_request *c_req = async->c_req;
981 struct ntvfs_request *req = async->req;
982 struct proxy_file *f = async->f;
983 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
984 /* this is the io against which the fragment is to be applied */
985 union smb_read *io = talloc_get_type_abort(io1, union smb_read);
986 /* this is the io for the read that issued the callback */
987 union smb_read *io_frag = fragment->io_frag; /* async->parms; */
988 struct async_read_fragments* fragments=fragment->fragments;
990 /* if request is not already received by a chained handler, read it */
991 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
992 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
994 DEBUG(3,("\n\n%s async_read status: %s\n",__FUNCTION__,
995 get_friendly_nt_error_msg(status)));
997 fragment->status = status;
999 /* remove fragment from fragments */
1000 DLIST_REMOVE(fragments->fragments, fragment);
1002 #warning maybe read requests beyond the short read won't return NT_STATUS_OK with nread=0
1003 /* in which case if we will want to collate all responses and return a valid read
1004 for the leading NT_STATUS_OK fragments */
1006 /* did this one fail, inducing a general fragments failure? */
1007 if (!NT_STATUS_IS_OK(fragment->status)) {
1008 /* preserve the status of the fragment with the smallest offset
1009 when we can work out how */
1010 if (NT_STATUS_IS_OK(fragments->status)) {
1011 fragments->status=fragment->status;
1014 cache_handle_novalidate(f);
1015 DEBUG(5,("** Devalidated proxy due to read failure\n"));
1016 } else {
1017 /* No fragments have yet failed, keep collecting responses */
1018 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
1019 /* Find memcpy window, copy data from the io_frag to the io */
1020 off_t start_offset=MAX(io_frag->generic.in.offset, io->generic.in.offset);
1021 /* used to use mincnt */
1022 off_t io_extent=io->generic.in.offset + io->generic.in.maxcnt;
1023 off_t end_offset=MIN(io_extent, extent);
1024 /* ASSERT(start_offset <= end_offset) */
1025 /* ASSERT(start_offset <= io_extent) */
1026 if (start_offset >= io_extent) {
1027 DEBUG(3,("useless read-ahead tagged on to: %s",__location__));
1028 } else {
1029 uint8_t* dst=io->generic.out.data+(start_offset - io->generic.in.offset);
1030 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
1031 /* src == dst in cases where we did not latch onto someone elses
1032 read, but are handling our own */
1033 if (src != dst)
1034 memcpy(dst, src, end_offset - start_offset);
1037 /* There should be a better way to detect, but it needs the proxy rpc struct
1038 not ths smb_read struct */
1039 if (io_frag->generic.out.nread < io_frag->generic.in.maxcnt) {
1040 DEBUG(5,("\n** Devalidated proxy due to small read: %lld min=%lld, max=%lld\n",
1041 (long long) io_frag->generic.out.nread,
1042 (long long) io_frag->generic.in.mincnt,
1043 (long long) io_frag->generic.in.maxcnt));
1044 cache_handle_novalidate(f);
1047 /* We broke up the original read. If not enough of this sub-read has
1048 been read, and then some of then next block, it could leave holes!
1049 We will only acknowledge up to the first partial read, and treat
1050 it as a small read. If server can return NT_STATUS_OK for a partial
1051 read so can we, so we preserve the response.
1052 "enough" is all of it (maxcnt), except on the last block, when it has to
1053 be enough to fill io->generic.in.mincnt. We know it is the last block
1054 if nread is small but we could fill io->generic.in.mincnt */
1055 if (io_frag->generic.out.nread < io_frag->generic.in.mincnt &&
1056 end_offset < io->generic.in.offset + io->generic.in.mincnt) {
1057 DEBUG(4,("Fragmented read only partially successful\n"));
1059 /* Shrink the master nread (or grow to this size if we are first partial */
1060 if (! fragments->partial ||
1061 (io->generic.in.offset + io->generic.out.nread) > extent) {
1062 io->generic.out.nread = extent - io->generic.in.offset;
1065 /* stop any further successes from extending the partial read */
1066 fragments->partial=true;
1067 } else {
1068 /* only grow the master nwritten if we haven't logged a partial write */
1069 if (! fragments->partial &&
1070 (io->generic.in.offset + io->generic.out.nread) < extent ) {
1071 io->generic.out.nread = MIN(io->generic.in.maxcnt, extent - io->generic.in.offset);
1076 /* Was it the last fragment, or do we know enought to send a response? */
1077 if (! fragments->fragments) {
1078 DEBUG(5,("Async read re-fragmented with %d of %d %s\n",
1079 io->generic.out.nread, io->generic.in.mincnt,
1080 get_friendly_nt_error_msg(fragments->status)));
1081 if (fragments->async) {
1082 req->async_states->status=fragments->status;
1083 DEBUG(5,("Fragments async response sending\n"));
1084 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
1085 /* esp. as they may be attached to by other reads. Maybe attachees should be taking reference, but how will they
1086 know the top level they need to take reference too.. */
1087 #warning should really queue a sender here, not call it */
1088 req->async_states->send_fn(req);
1089 DEBUG(5,("Async response sent\n"));
1090 } else {
1091 DEBUG(5,("Fragments SYNC return\n"));
1095 /* because a c_req may be shared by many req, chained handlers must return
1096 a status pertaining to the general validity of this specific c_req, not
1097 to their own private processing of the c_req for the benefit of their req
1098 which is returned in fragments->status
1100 return status;
1103 /* Issue read-ahead X bytes where X is the window size calculation based on
1104 server_latency * server_session_bandwidth
1105 where latency is the idle (link) latency and bandwidth is less than or equal_to
1106 to actual bandwidth available to the server.
1107 Read-ahead should honour locked areas in whatever way is neccessary (who knows?)
1108 read_ahead is defined here and not in the cache engine because it requires too
1109 much knowledge of private structures
1111 /* The concept is buggy unless we can tell the next proxy that these are
1112 read-aheads, otherwise chained proxy setups will each read-ahead of the
1113 read-ahead which can put a larger load on the final server.
1114 Also we probably need to distinguish between
1115 * cache-less read-ahead
1116 * cache-revalidating read-ahead
1118 NTSTATUS read_ahead(struct proxy_file *f, struct ntvfs_module_context *ntvfs,
1119 union smb_read *io, ssize_t as_read)
1121 struct proxy_private *private = ntvfs->private_data;
1122 struct smbcli_tree *tree = private->tree;
1123 struct cache_file_entry *cache;
1124 off_t next_position; /* this read offset+length+window */
1125 off_t end_position; /* position we read-ahead to */
1126 off_t cache_populated;
1127 off_t read_position, new_extent;
1129 if (! PROXY_REMOTE_SERVER(private)) return NT_STATUS_UNSUCCESSFUL;
1130 DEBUG(5,("A\n"));
1131 if (private->cache_readahead==0 || ! private->cache_enabled || ! f->cache) return NT_STATUS_UNSUCCESSFUL;
1132 DEBUG(5,("B\n"));
1133 cache=talloc_get_type_abort(f->cache, struct cache_file_entry);
1134 DEBUG(5,("C\n"));
1135 /* don't read-ahead if we are in bulk validate mode */
1136 if (cache->status & CACHE_VALIDATE) return NT_STATUS_UNSUCCESSFUL;
1137 DEBUG(5,("D\n"));
1138 /* if we can't trust what we read-ahead anyway then don't bother although
1139 * if delta-reads are enabled we can do so in order to get something to
1140 * delta against */
1141 DEBUG(CACHE_DEBUG_LEVEL,("DOING Asking read-aheads: len %lld ra-extend %lld as-read %lld RA %d (%d)\n",
1142 (long long int)(cache_len(cache)),
1143 (long long int)(cache->readahead_extent),
1144 (long long int)(as_read),
1145 cache->readahead_window,private->cache_readahead));
1146 if (private->cache_readahead ==0 || ! (cache->status & CACHE_READ_AHEAD) ) {
1147 DEBUG(CACHE_DEBUG_LEVEL,("FAILED Asking read-aheads: Can't read-ahead as no read-ahead on this file: %x\n",
1148 cache->status));
1149 return NT_STATUS_UNSUCCESSFUL;
1152 /* as_read is the mincnt bytes of a request being made or the
1153 out.nread of completed sync requests
1154 Here we presume that as_read bytes WILL be read. If there is a cache-ahead like ours,
1155 then this may often NOT be the case if readahead_window < requestsize; so we will
1156 get a small read, leaving a hole in the cache, and as we don't yet handle sparse caches,
1157 all future read-ahead will be wasted, so we need to adjust the read-ahead handler to handle
1158 this and have failed sparse writes adjust the cache->readahead_extent back to actual size */
1160 /* predict the file pointers next position */
1161 next_position=io->generic.in.offset + as_read;
1162 DEBUG(5,("Next position: %lld (%lld + %lld)\n",
1163 (long long int)next_position,
1164 (long long int)io->generic.in.offset,
1165 (long long int)as_read));
1166 /* calculate the limit of the validated or requested cache */
1167 cache_populated=MAX(cache->validated_extent, cache->readahead_extent);
1169 /* will the new read take us beyond the current extent without gaps? */
1170 if (cache_populated < io->generic.in.offset) {
1171 /* this read-ahead is a read-behind-pointer */
1172 new_extent=cache_populated;
1173 } else {
1174 new_extent=MAX(next_position, cache_populated);
1177 /* as far as we can tell new_extent is the smallest offset that doesn't
1178 have a pending read request on. Of course if we got a short read then
1179 we will have a cache-gap which we can't handle and need to read from
1180 a shrunk readahead_extent, which we don't currently handle */
1181 read_position=new_extent;
1183 /* of course if we know how big the remote file is we should limit at that */
1184 /* we should also mark-out which read-ahead requests are pending so that we
1185 * don't repeat them while they are in-transit. */
1186 /* we can't really use next_position until we can have caches with holes
1187 UNLESS next_position < new_extent, because a next_position well before
1188 new_extent is no reason to extend it further, we only want to extended
1189 with read-aheads if we have cause to suppose the read-ahead data will
1190 be wanted, i.e. the next_position is near new_extent.
1191 So we can't justify reading beyond window+next_position, but if
1192 next_position is leaving gaps, we use new_extent instead */
1193 end_position=MIN(new_extent, next_position) + cache->readahead_window;
1194 DEBUG(5,("** Read-ahead loop %lld < %lld window=%d, end=%lld, quota: %d\n",
1195 (long long int)read_position,
1196 (long long int)(next_position + cache->readahead_window),
1197 cache->readahead_window,
1198 (long long int)end_position,
1199 private->readahead_spare));
1200 /* do we even need to read? */
1201 if (! (read_position < end_position)) return NT_STATUS_OK;
1203 /* readahead_spare is for the whole session (mid/tid?) and may need sharing
1204 out over files and other tree-connects or something */
1205 while (read_position < end_position &&
1206 private->readahead_spare > 0) {
1207 struct smbcli_request *c_req = NULL;
1208 ssize_t read_remaining = end_position - read_position;
1209 ssize_t read_block = MIN(private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32),
1210 MIN(read_remaining, private->cache_readaheadblock));
1211 void *req = NULL; /* for the ASYNC_REC_TAIL_F_ORPHAN macro */
1212 uint8_t* data;
1213 union smb_read *io_copy=talloc_memdup_type(NULL, io, union smb_read);
1215 if (! io_copy)
1216 return NT_STATUS_NO_MEMORY;
1218 #warning we are ignoring read_for_execute as far as the cache goes
1219 io_copy->generic.in.read_for_execute=io->readx.in.read_for_execute;
1220 io_copy->generic.in.offset=read_position;
1221 io_copy->generic.in.mincnt=read_block;
1222 io_copy->generic.in.maxcnt=read_block;
1223 /* what is generic.in.remaining for? */
1224 io_copy->generic.in.remaining = MIN(65535,read_remaining);
1225 io_copy->generic.out.nread=0;
1227 #warning someone must own io_copy, tree, maybe?
1228 data=talloc_zero_size(io_copy, io_copy->generic.in.maxcnt);
1229 DEBUG(5,("Talloc read-ahead buffer %p size %d\n",data, io_copy->generic.in.maxcnt));
1230 if (! data) {
1231 talloc_free(io_copy);
1232 return NT_STATUS_NO_MEMORY;
1234 io_copy->generic.out.data=data;
1236 /* are we able to pull anything from the cache to validate this read-ahead?
1237 NOTE: there is no point in reading ahead merely to re-validate the
1238 cache if we don't have oplocks and can't save it....
1239 ... or maybe there is if we think a read will come that can be matched
1240 up to this reponse while it is still on the wire */
1241 #warning so we need to distinguish between pipe-line read-ahead and revalidation
1242 if (/*(cache->status & CACHE_READ)!=0 && */
1243 cache_len(cache) >
1244 (io_copy->generic.in.offset + io_copy->generic.in.mincnt) &&
1245 cache->validated_extent <
1246 (io_copy->generic.in.offset + io_copy->generic.in.maxcnt)) {
1247 ssize_t pre_fill;
1249 pre_fill = cache_raw_read(cache, data,
1250 io_copy->generic.in.offset,
1251 io_copy->generic.in.maxcnt);
1252 DEBUG(5,("Data read into %p %d\n",data, pre_fill));
1253 if (pre_fill > 0 && pre_fill >= io_copy->generic.in.mincnt) {
1254 io_copy->generic.out.nread=pre_fill;
1255 read_block=pre_fill;
1259 c_req = proxy_smb_raw_read_send(ntvfs, io_copy, f, NULL);
1261 if (c_req) {
1262 private->readahead_spare--;
1263 f->readahead_pending++;
1264 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead level %d request %p offset=%d size=%d\n",io_copy->generic.level,c_req,(int)read_position,(int)read_block));
1265 if (cache->readahead_extent < read_position+read_block)
1266 cache->readahead_extent=read_position+read_block;
1267 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
1268 /* so we can decrease read-ahead counter for this session */
1269 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_readahead_dec, NT_STATUS_INTERNAL_ERROR);
1270 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io_copy, async_read_handler);
1272 /* Make these be owned by the async struct so they are freed when the callback ends or is cancelled */
1273 talloc_steal(c_req->async.private, c_req);
1274 talloc_steal(c_req->async.private, io_copy);
1275 read_position+=read_block;
1276 } else {
1277 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead request FAILED offset=%d size=%d\n",(int)read_position,(int)read_block));
1278 talloc_free(io_copy);
1279 break;
1283 DEBUG(CACHE_DEBUG_LEVEL,("DONE: Asking read-aheads\n"));
1284 return NT_STATUS_OK;
1287 struct proxy_validate_parts_parts {
1288 struct proxy_Read* r;
1289 struct ntvfs_request *req;
1290 struct proxy_file *f;
1291 struct async_read_fragments *fragments;
1292 off_t offset;
1293 ssize_t remaining;
1294 bool complete;
1295 declare_checksum(digest);
1296 struct MD5Context context;
1299 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts);
1300 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status);
1301 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
1302 struct proxy_validate_parts_parts *parts);
1304 /* this will be the new struct proxy_Read based read function, for now
1305 it just deals with non-cached based validate to a regular server */
1306 static NTSTATUS proxy_validate(struct ntvfs_module_context *ntvfs,
1307 struct ntvfs_request *req,
1308 struct proxy_Read *r,
1309 struct proxy_file *f)
1311 struct proxy_private *private = ntvfs->private_data;
1312 struct proxy_validate_parts_parts *parts;
1313 struct async_read_fragments *fragments;
1314 NTSTATUS status;
1316 if (!f) return NT_STATUS_INVALID_HANDLE;
1318 DEBUG(5,("%s: fnum=%d\n",__FUNCTION__,f->fnum));
1320 parts = talloc_zero(req, struct proxy_validate_parts_parts);
1321 DEBUG(5,("%s: parts=%p\n",__FUNCTION__,parts));
1322 NT_STATUS_HAVE_NO_MEMORY(parts);
1324 fragments = talloc_zero(parts, struct async_read_fragments);
1325 NT_STATUS_HAVE_NO_MEMORY(fragments);
1327 parts->fragments=fragments;
1329 parts->r=r;
1330 parts->f=f;
1331 parts->req=req;
1332 /* processed offset */
1333 parts->offset=r->in.offset;
1334 parts->remaining=r->in.maxcnt;
1335 fragments->async=true;
1337 MD5Init (&parts->context);
1339 /* start a read-loop which will continue in the callback until it is
1340 all done */
1341 status=proxy_validate_parts(ntvfs, parts);
1342 if (parts->complete) {
1343 /* Make sure we are not async */
1344 DEBUG(5,("%s: completed EARLY\n",__FUNCTION__));
1345 return proxy_validate_complete(parts);
1348 /* Assert if status!=NT_STATUS_OK then parts->complete==true */
1349 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1350 DEBUG(5,("%s: returning ASYNC\n",__FUNCTION__));
1351 return status;
1354 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts)
1356 NTSTATUS status;
1357 struct proxy_Read* r=parts->r;
1358 MD5Final(parts->digest, &parts->context);
1360 status = parts->fragments->status;
1361 r->out.result = status;
1362 r->out.response.generic.count=r->out.nread;
1364 DEBUG(5,("%s: %s nread=%d\n",__FUNCTION__, get_friendly_nt_error_msg (status),
1365 r->out.response.generic.count));
1367 DEBUG(5,("Anticipated validated digest for size: %lld\n", (long long) r->in.maxcnt));
1368 dump_data (5, r->in.digest.digest, sizeof(parts->digest));
1369 DEBUG(5,("read digest for size %lld\n",(long long) parts->offset));
1370 dump_data (5, parts->digest, sizeof(parts->digest));
1372 if (NT_STATUS_IS_OK(status) &&
1373 (memcmp(parts->digest, r->in.digest.digest, sizeof(parts->digest))==0)) {
1374 r->out.flags = PROXY_USE_CACHE | PROXY_VALIDATE;
1375 DEBUG(5,("======= VALIDATED FINE \n\n\n"));
1376 } else if (r->in.flags & PROXY_USE_ZLIB) {
1377 ssize_t size = r->out.response.generic.count;
1378 DEBUG(5,("======= VALIDATED WRONG \n\n\n"));
1379 if (compress_block(r->out.response.generic.data, &size) ) {
1380 r->out.flags|=PROXY_USE_ZLIB;
1381 r->out.response.compress.count=size;
1382 r->out.response.compress.data=r->out.response.generic.data;
1383 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
1384 __FUNCTION__,r->out.nread,size,size*100/r->out.nread));
1388 /* assert: this must only be true if we are in a callback */
1389 if (parts->req->async_states->state & NTVFS_ASYNC_STATE_ASYNC) {
1390 /* we are async complete, we need to call the sendfn */
1391 parts->req->async_states->status=status;
1392 DEBUG(5,("Fragments async response sending\n"));
1394 parts->req->async_states->send_fn(parts->req);
1395 return NT_STATUS_OK;
1397 return status;
1400 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1402 struct smbcli_request *c_req = async->c_req;
1403 struct ntvfs_request *req = async->req;
1404 struct proxy_file *f = async->f;
1405 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
1406 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
1407 /* this is the io against which the fragment is to be applied */
1408 struct proxy_validate_parts_parts *parts = talloc_get_type_abort(io1, struct proxy_validate_parts_parts);
1409 struct proxy_Read* r=parts->r;
1410 /* this is the io for the read that issued the callback */
1411 union smb_read *io_frag = fragment->io_frag;
1412 struct async_read_fragments* fragments=fragment->fragments;
1414 DEBUG(5,("%s: parts=%p c_req=%p io_frag=%p\n",__FUNCTION__,parts, c_req, io_frag));
1415 /* if request is not already received by a chained handler, read it */
1416 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
1417 DEBUG(5,("%s: status %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
1419 fragment->status=status;
1421 if (NT_STATUS_IS_OK(status)) {
1422 /* TODO: If we are not sequentially "next" the queue until we can do it */
1423 /* log this data in r->out.generic.data */
1424 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
1425 /* Find memcpy window, copy data from the io_frag to the io */
1426 off_t start_offset=MAX(io_frag->generic.in.offset, r->in.offset);
1427 /* Don't want to go past mincnt */
1428 off_t io_extent=r->in.offset + r->in.mincnt;
1429 off_t end_offset=MIN(io_extent, extent);
1431 /* ASSERT(start_offset <= end_offset) */
1432 /* ASSERT(start_offset <= io_extent) */
1433 if (! (start_offset >= io_extent)) {
1434 uint8_t* dst=r->out.response.generic.data + (start_offset - r->in.offset);
1435 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
1436 /* src == dst in cases where we did not latch onto someone elses
1437 read, but are handling our own */
1438 if (src != dst)
1439 memcpy(dst, src, end_offset - start_offset);
1440 r->out.nread=end_offset - r->in.offset;
1443 MD5Update(&parts->context, io_frag->generic.out.data,
1444 io_frag->generic.out.nread);
1446 parts->fragments->status=status;
1447 status=proxy_validate_parts(ntvfs, parts);
1448 } else {
1449 parts->fragments->status=status;
1452 DLIST_REMOVE(fragments->fragments, fragment);
1453 /* this will free the io_frag too */
1454 talloc_free(fragment);
1456 if (parts->complete || NT_STATUS_IS_ERR(status)) {
1457 /* this will call sendfn, the chain handler won't know... but
1458 should have no more handlers queued */
1459 return proxy_validate_complete(parts);
1462 return NT_STATUS_OK;
1465 /* continue a read loop, possibly from a callback */
1466 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
1467 struct proxy_validate_parts_parts *parts)
1469 struct proxy_private *private = ntvfs->private_data;
1470 union smb_read *io_frag;
1471 struct async_read_fragment *fragment;
1472 struct smbcli_request *c_req = NULL;
1473 ssize_t size=private->tree->session->transport->negotiate.max_xmit \
1474 - (MIN_SMB_SIZE+32);
1476 /* Have we already read enough? */
1477 if (parts->offset >= (parts->r->in.offset + parts->r->in.maxcnt)) {
1478 parts->complete=true;
1479 return NT_STATUS_OK;
1482 size=MIN(size, parts->remaining);
1484 fragment=talloc_zero(parts->fragments, struct async_read_fragment);
1485 NT_STATUS_HAVE_NO_MEMORY(fragment);
1487 io_frag = talloc_zero(fragment, union smb_read);
1488 NT_STATUS_HAVE_NO_MEMORY(io_frag);
1490 io_frag->generic.out.data = talloc_size(io_frag, size);
1491 NT_STATUS_HAVE_NO_MEMORY(io_frag->generic.out.data);
1493 io_frag->generic.level = RAW_READ_GENERIC;
1494 io_frag->generic.in.file.fnum = parts->r->in.fnum;
1495 io_frag->generic.in.offset = parts->offset;
1496 io_frag->generic.in.mincnt = size;
1497 io_frag->generic.in.maxcnt = size;
1498 io_frag->generic.in.remaining = 0;
1499 #warning maybe true is more permissive?
1500 io_frag->generic.in.read_for_execute = false;
1502 //c_req = smb_raw_read_send(ntvfs, io_frag, parts->f, parts->r);
1503 c_req = smb_raw_read_send(private->tree, io_frag);
1504 NT_STATUS_HAVE_NO_MEMORY(c_req);
1506 parts->offset+=size;
1507 parts->remaining-=size;
1508 fragment->c_req = c_req;
1509 fragment->io_frag = io_frag;
1510 fragment->fragments=parts->fragments;
1511 DLIST_ADD(parts->fragments->fragments, fragment);
1513 { void* req=NULL;
1514 ADD_ASYNC_RECV_TAIL(c_req, parts, fragment, parts->f, async_proxy_validate_parts, NT_STATUS_INTERNAL_ERROR);
1515 ASYNC_RECV_TAIL_F_ORPHAN(io_frag, async_read_handler, parts->f, c_req->async.private, NT_STATUS_UNSUCCESSFUL);
1518 DEBUG(5,("%s: issued read parts=%p c_req=%p io_frag=%p\n",__FUNCTION__,parts, c_req, io_frag));
1520 return NT_STATUS_OK;
1524 read from a file
1526 static NTSTATUS proxy_read(struct ntvfs_module_context *ntvfs,
1527 struct ntvfs_request *req, union smb_read *io)
1529 struct proxy_private *private = ntvfs->private_data;
1530 struct smbcli_request *c_req;
1531 struct proxy_file *f;
1532 struct async_read_fragments *fragments=NULL;
1533 /* how much of read-from-cache is certainly valid */
1534 ssize_t valid=0;
1535 off_t offset=io->generic.in.offset+valid;
1536 off_t limit=io->generic.in.offset+io->generic.in.mincnt;
1538 SETUP_PID;
1540 if (io->generic.level != RAW_READ_GENERIC &&
1541 private->map_generic) {
1542 return ntvfs_map_read(ntvfs, req, io);
1545 SETUP_FILE_HERE(f);
1547 DEBUG(3,("%s offset=%lld, mincnt=%d, maxcnt=%d\n",__FUNCTION__,
1548 io->generic.in.offset,
1549 io->generic.in.mincnt,
1550 io->generic.in.maxcnt));
1551 io->generic.out.nread=0;
1552 /* attempt to read from cache. if nread becomes non-zero then we
1553 have cache to validate. Instead of returning "valid" value, cache_read
1554 should probably return an async_read_fragment structure */
1556 if (private->cache_enabled) {
1557 NTSTATUS status=cache_smb_raw_read(f->cache, ntvfs, req, io, &valid);
1559 if (NT_STATUS_IS_OK(status)) {
1560 /* if we read enough valid data, return it */
1561 if (valid > 0 && valid>=io->generic.in.mincnt) {
1562 /* valid will not be bigger than maxcnt */
1563 io->generic.out.nread=valid;
1564 DEBUG(1,("Read from cache offset=%d size=%d\n",
1565 (int)(io->generic.in.offset),
1566 (int)(io->generic.out.nread)) );
1567 return status;
1572 fragments=talloc_zero(req, struct async_read_fragments);
1573 fragments->async=!!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC);
1574 /* See if there are pending reads that would satisfy this request
1575 We have a validated read up to io->generic.out.nread. Anything between
1576 this and mincnt MUST be read, but we could first try and attach to
1577 any pending read-ahead on the same file.
1578 If those read-aheads fail we will re-issue a regular read from the
1579 callback handler and hope it hasn't taken too long. */
1581 /* offset is the extentof the file from which we still need to find
1582 matching read-requests. */
1583 offset=io->generic.in.offset+valid;
1584 /* limit is the byte beyond the last byte for which we need a request.
1585 This used to be mincnt, but is now maxcnt to cope with validate reads.
1586 Maybe we can switch back to mincnt when proxy_read struct is used
1587 instead of smb_read.
1589 limit=io->generic.in.offset+io->generic.in.maxcnt;
1591 while (offset < limit) {
1592 /* Should look for the read-ahead with offset <= in.offset+out.nread
1593 with the longest span, but there is only likely to be one anyway so
1594 just take the first */
1595 struct async_info* pending=private->pending;
1596 union smb_read *readahead_io=NULL;
1597 DEBUG(5,("Looping reads from offset=%lld, end=%lld\n",offset,limit));
1598 while(pending) {
1599 if (pending->c_req->async.fn == async_read_handler) {
1600 struct async_info *async=talloc_get_type_abort(pending->c_req->async.private, struct async_info);
1601 readahead_io=talloc_get_type_abort(async->parms, union smb_read);
1603 if (readahead_io->generic.in.file.fnum == io->generic.in.file.fnum &&
1604 readahead_io->generic.in.offset <= offset &&
1605 readahead_io->generic.in.offset +
1606 readahead_io->generic.in.mincnt > offset) break;
1608 readahead_io=NULL;
1609 pending=pending->next;
1611 /* ASSERT(readahead_io == pending->c_req->async.params) */
1612 if (pending && readahead_io) {
1613 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
1614 fragment->fragments=fragments;
1615 fragment->io_frag=readahead_io;
1616 fragment->c_req = pending->c_req;
1617 /* we found one, so attach to it. We DO need a talloc_reference
1618 because the original send_fn might be called before ALL chained
1619 handlers, and our handler will call its own send_fn first. ugh.
1620 Maybe we need to seperate reverse-mapping callbacks with data users? */
1621 /* Note: the read-ahead io is passed as io, and our req io is
1622 in io_frag->io */
1623 //talloc_reference(req, pending->req);
1624 DEBUG(5,("Attach to read for offset=%lld length=%d\n",
1625 readahead_io->generic.in.offset,
1626 readahead_io->generic.in.mincnt));
1627 ADD_ASYNC_RECV_TAIL(pending->c_req, io, fragment, f,
1628 async_read_fragment, NT_STATUS_INTERNAL_ERROR);
1629 DEBUG(5,("Attached OK\n"));
1630 #warning we don't want to return if we fail to attach, just break
1631 DLIST_ADD(fragments->fragments, fragment);
1632 /* updated offset for which we have reads */
1633 offset=readahead_io->generic.in.offset + readahead_io->generic.in.mincnt;
1634 } else {
1635 /* there are no pending reads to fill this so issue one up to
1636 the maximum supported read size. We could see when the next
1637 pending read is (if any) and only read up till there... later...
1638 Issue a fragment request for what is left, clone io.
1639 In the case that there were no fragments this will be the orginal read
1640 but with a cloned io struct */
1641 off_t next_offset;
1642 struct proxy_Read *r=NULL; /* used only for VALIDATE promotion */
1643 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
1644 union smb_read *io_frag=talloc_memdup_type(req, io, union smb_read);
1645 ssize_t offset_inc=offset-io_frag->generic.in.offset;
1646 /* 250 is a guess at ndr rpc overheads */
1647 ssize_t readsize=MIN(PROXY_NTIOCTL_MAXDATA,
1648 private->tree->session->transport->negotiate.max_xmit) \
1649 - (MIN_SMB_SIZE+32);
1650 if (readsize > 0xFFFF) readsize = 0xFFFF; /* - (MIN_SMB_SIZE+250) ?? */
1651 readsize=MIN(limit-offset, readsize);
1653 DEBUG(5,("Issuing direct read\n"));
1654 /* reduce the cached read (if any). nread is unsigned */
1655 if (io_frag->generic.out.nread > offset_inc) {
1656 io_frag->generic.out.nread-=offset_inc;
1657 /* don't make nread buffer look too big */
1658 if (io_frag->generic.out.nread > readsize)
1659 io_frag->generic.out.nread = readsize;
1660 } else {
1661 io_frag->generic.out.nread=0;
1663 /* adjust the data pointer so we read to the right place */
1664 io_frag->generic.out.data+=offset_inc;
1665 io_frag->generic.in.offset=offset;
1666 io_frag->generic.in.maxcnt=readsize;
1667 /* we don't mind mincnt being smaller if this is the last frag,
1668 but then we can already handle it being bigger but not reached...
1669 The spell would be:
1670 MIN(io_frag->generic.in.mincnt, io_frag->generic.in.maxcnt);
1672 io_frag->generic.in.mincnt=readsize;
1673 fragment->fragments=fragments;
1674 fragment->io_frag=io_frag;
1675 #warning attach to send_fn handler
1676 /* what if someone attaches to us? Our send_fn is called from our
1677 chained handler which will be before their handler and io will
1678 already be freed. We need to keep a reference to the io and the data
1679 but we don't know where it came from in order to take a reference.
1680 We need therefore to tackle calling of send_fn AFTER all other handlers */
1682 /* Calculate next offset (in advance) */
1683 next_offset=io_frag->generic.in.offset + io_frag->generic.in.mincnt;
1685 /* if we are (going to be) the last fragment and we are in VALIDATE
1686 mode, see if we can do a bulk validate now.
1687 io->generic.in.mincnt == io->generic.in.maxcnt is to make sure we
1688 don't do a validate on a receive validate read
1690 if (private->cache_validatesize && PROXY_REMOTE_SERVER(private) &&
1691 next_offset >= limit && (f->cache && f->cache->status & CACHE_VALIDATE)) {
1692 ssize_t length=private->cache_validatesize;
1693 declare_checksum(digest);
1695 DEBUG(5,("last read, maybe mega validate: frag length %zu, offset %llu\n",
1696 length, (unsigned long long) offset));
1697 NTSTATUS status=cache_smb_raw_checksum(f->cache, offset, &length, digest);
1698 /* no point in doing it if md5'd length < current out.nread
1699 remember: out.data contains this requests cached response
1700 if validate succeeds */
1701 if (NT_STATUS_IS_OK(status) && (length > io_frag->generic.out.nread)) {
1702 /* upgrade the read, allocate the proxy_read struct here
1703 and fill in the extras, no more out-of-band stuff */
1704 DEBUG(5,("%s: Promoting to validate read: %lld\n",__FUNCTION__,(long long) length));
1705 dump_data (5, digest, sizeof(digest));
1707 r=talloc_zero(io_frag, struct proxy_Read);
1708 memcpy(r->in.digest.digest, digest, sizeof(digest));
1709 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE;
1710 io_frag->generic.in.maxcnt = length;
1711 /* the proxy send function will calculate the checksum based on *data */
1712 } else {
1713 /* not enough in cache to make it worthwhile anymore */
1714 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x len=%lld\n",
1715 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0,
1716 (unsigned long long)length));
1717 cache_handle_novalidate(f);
1718 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x\n",
1719 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0));
1721 } else {
1722 if (f->cache && f->cache->status & CACHE_VALIDATE) {
1723 DEBUG(5,(">>>Not last frag, no validate read: %lld %lld\n",
1724 (long long) next_offset,
1725 (long long) limit));
1729 DEBUG(5,("Frag read sending offset=%lld min=%d, size=%d\n",
1730 io_frag->generic.in.offset,io_frag->generic.in.mincnt,
1731 io_frag->generic.in.maxcnt));
1732 c_req = proxy_smb_raw_read_send(ntvfs, io_frag, f, r);
1733 DEBUG(5,("Frag read sent offset=%lld size=%d MID=%d\n",
1734 io_frag->generic.in.offset,io_frag->generic.in.maxcnt,c_req->mid));
1735 fragment->c_req=c_req;
1736 DLIST_ADD(fragments->fragments, fragment);
1737 ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
1738 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_read_fragment, NT_STATUS_INTERNAL_ERROR);
1739 DEBUG(5,("Frag response chained\n"));
1740 /* normally we would only install the chain_handler if we wanted async
1741 response, but as it is the async_read_fragment handler that calls send_fn
1742 based on fragments->async, instead of async_chain_handler, we don't
1743 need to worry about this call completing async'ly while we are
1744 waiting on the other attached calls. Otherwise we would not attach
1745 the async_chain_handler (via async_read_handler) because of the wait
1746 below */
1747 { /* We don't want the chain handler calling send_fn as it is done by the fragment handler */
1748 void* req=NULL;
1749 /* call async_chain_hander not read handler so that folk can't
1750 attach to it, till we solve the problem above */
1751 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1753 offset = next_offset;
1755 DEBUG(5,("Next fragment\n"));
1758 /* do we still need a final fragment? Issue a read */
1760 DEBUG(5,("No frags left to read\n"));
1763 /* issue new round of read-aheads */
1764 DEBUG(5,("== Read aheads asread-%d\n",io->generic.in.mincnt));
1765 if (f->cache && ! (f->cache->status & CACHE_VALIDATE)) read_ahead(f, ntvfs, io, io->generic.in.mincnt);
1766 DEBUG(5,("== Done Read aheads\n"));
1768 /* If we have fragments but we are not called async, we must sync-wait on them */
1769 /* did we map the entire request to pending reads? */
1770 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1771 struct async_read_fragment *fragment;
1772 DEBUG(5,("Sync waiting\n"));
1773 /* fragment get's free'd during the chain_handler so we start at
1774 the top each time */
1775 for (fragment = fragments->fragments; fragment; fragment = fragments->fragments) {
1776 /* Any fragments async handled while we sync-wait on one
1777 will remove themselves from the list and not get sync waited */
1778 sync_chain_handler(fragment->c_req);
1779 /* if we have a non-ok result AND we know we have all the responses
1780 up to extent, then we could quit the loop early and change the
1781 fragments->async to true so the final irrelevant responses would
1782 come async and we could send our response now - but we don't
1783 track that detail until we have cache-maps that we can use to
1784 track the responded fragments and combine responsed linear extents
1785 if (! NT_STATUS_IS_OK(fragments->status) && xxx ) */
1787 DEBUG(5,("Sync return of proxy_read: %s\n",get_friendly_nt_error_msg (fragments->status)));
1788 return fragments->status;
1791 DEBUG(5,("Async returning\n"));
1792 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1793 return NT_STATUS_OK;
1797 a handler to de-fragment async write replies back to one request.
1798 Can cope with out-of-order async responses by waiting for all responses
1799 on an NT_STATUS_OK case so that nwritten is properly adjusted
1801 NTSTATUS async_write_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1803 struct smbcli_request *c_req = async->c_req;
1804 struct ntvfs_request *req = async->req;
1805 struct proxy_file *f=async->f;
1806 struct async_write_fragment* fragment=talloc_get_type_abort(io2, struct async_write_fragment);
1807 /* this is the io against which the fragment is to be applied */
1808 union smb_write *io = talloc_get_type_abort(io1, union smb_write);
1809 /* this is the io for the write that issued the callback */
1810 union smb_write *io_frag = fragment->io_frag; /* async->parms; */
1811 struct async_write_fragments* fragments=fragment->fragments;
1812 ssize_t extent=0;
1814 /* if request is not already received by a chained handler, read it */
1815 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
1816 if (c_req) status=smb_raw_write_recv(c_req, io_frag);
1818 DEBUG(3,("%s async_write status: %s\n",__FUNCTION__,
1819 get_friendly_nt_error_msg(status)));
1821 fragment->status = status;
1823 DLIST_REMOVE(fragments->fragments, fragment);
1825 /* did this one fail? */
1826 if (! NT_STATUS_IS_OK(fragment->status)) {
1827 if (NT_STATUS_IS_OK(fragments->status)) {
1828 fragments->status=fragment->status;
1830 } else {
1831 /* No fragments have yet failed, keep collecting responses */
1832 extent = io_frag->generic.in.offset + io_frag->generic.out.nwritten;
1834 /* we broke up the write so it could all be written. If only some has
1835 been written of this block, and then some of then next block,
1836 it could leave unwritten holes! We will only acknowledge up to the
1837 first partial write, and let the client deal with it.
1838 If server can return NT_STATUS_OK for a partial write so can we */
1839 if (io_frag->generic.out.nwritten != io_frag->generic.in.count) {
1840 DEBUG(4,("Fragmented write only partially successful\n"));
1842 /* Shrink the master nwritten */
1843 if ( ! fragments->partial ||
1844 (io->generic.in.offset + io->generic.out.nwritten) > extent) {
1845 io->generic.out.nwritten = extent - io->generic.in.offset;
1847 /* stop any further successes from extended the partial write */
1848 fragments->partial=true;
1849 } else {
1850 /* only grow the master nwritten if we haven't logged a partial write */
1851 if (! fragments->partial &&
1852 (io->generic.in.offset + io->generic.out.nwritten) < extent ) {
1853 io->generic.out.nwritten = extent - io->generic.in.offset;
1858 /* if this was the last fragment, clean up */
1859 if (! fragments->fragments) {
1860 DEBUG(5,("Async write re-fragmented with %d of %d\n",
1861 io->generic.out.nwritten,
1862 io->generic.in.count));
1863 if (NT_STATUS_IS_OK(fragments->status)) {
1864 cache_handle_save(f, io->generic.in.data, io->generic.out.nwritten,
1865 io->generic.in.offset);
1867 if (fragments->async) {
1868 req->async_states->status=fragments->status;
1869 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
1870 req->async_states->send_fn(req);
1871 DEBUG(5,("Async response sent\n"));
1872 } else {
1873 DEBUG(5,("Fragments SYNC return\n"));
1877 return status;
1881 a handler for async write replies
1883 NTSTATUS async_write_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1885 struct smbcli_request *c_req = async->c_req;
1886 struct ntvfs_request *req = async->req;
1887 struct proxy_file *f=async->f;
1888 union smb_write *io=async->parms;
1890 if (c_req)
1891 status = smb_raw_write_recv(c_req, async->parms);
1893 cache_handle_save(f, io->generic.in.data,
1894 io->generic.out.nwritten,
1895 io->generic.in.offset);
1897 return status;
1901 write to a file
1903 static NTSTATUS proxy_write(struct ntvfs_module_context *ntvfs,
1904 struct ntvfs_request *req, union smb_write *io)
1906 struct proxy_private *private = ntvfs->private_data;
1907 struct smbcli_request *c_req;
1908 struct proxy_file *f;
1910 SETUP_PID;
1912 if (io->generic.level != RAW_WRITE_GENERIC &&
1913 private->map_generic) {
1914 return ntvfs_map_write(ntvfs, req, io);
1916 SETUP_FILE_HERE(f);
1918 DEBUG(5,("proxy_write offset=%lld size=%d\n",io->generic.in.offset, io->generic.in.count));
1919 #warning ERROR get rid of this
1920 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1921 NTSTATUS status;
1922 if (PROXY_REMOTE_SERVER(private)) {
1923 /* Do a proxy write */
1924 status=proxy_smb_raw_write(ntvfs, io, f);
1925 } else if (io->generic.in.count >
1926 private->tree->session->transport->negotiate.max_xmit) {
1928 /* smbcli_write can deal with large writes, which are bigger than
1929 tree->session->transport->negotiate.max_xmit */
1930 ssize_t size=smbcli_write(private->tree,
1931 io->generic.in.file.fnum,
1932 io->generic.in.wmode,
1933 io->generic.in.data,
1934 io->generic.in.offset,
1935 io->generic.in.count);
1937 if (size==io->generic.in.count || size > 0) {
1938 io->generic.out.nwritten=size;
1939 status=NT_STATUS_OK;
1940 } else {
1941 status=NT_STATUS_UNSUCCESSFUL;
1943 } else {
1944 status=smb_raw_write(private->tree, io);
1947 /* Save write in cache */
1948 if (NT_STATUS_IS_OK(status)) {
1949 cache_handle_save(f, io->generic.in.data,
1950 io->generic.out.nwritten,
1951 io->generic.in.offset);
1954 return status;
1957 /* smb_raw_write_send can't deal with large writes, which are bigger than
1958 tree->session->transport->negotiate.max_xmit so we have to break it up
1959 trying to preserve the async nature of the call as much as possible */
1960 if (PROXY_REMOTE_SERVER(private)) {
1961 DEBUG(5,("== %s call proxy_smb_raw_write_send\n",__FUNCTION__));
1962 c_req = proxy_smb_raw_write_send(ntvfs, io, f);
1963 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
1964 } else if (io->generic.in.count <=
1965 private->tree->session->transport->negotiate.max_xmit) {
1966 DEBUG(5,("== %s call smb_raw_write_send\n",__FUNCTION__));
1967 c_req = smb_raw_write_send(private->tree, io);
1968 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
1969 } else {
1970 ssize_t remaining = io->generic.in.count;
1971 #warning Need an audit of these magin numbers MIN_SMB_SIZE+32
1972 int block = (private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32));
1973 int done = 0;
1974 struct async_write_fragments *fragments = talloc_zero(req, struct async_write_fragments);
1976 DEBUG(3,("== %s Client sending too-big write sized %d, negotiated limit %d\n",
1977 __FUNCTION__, io->generic.in.count,
1978 private->tree->session->transport->negotiate.max_xmit));
1980 fragments->io = io;
1981 io->generic.out.nwritten=0;
1982 io->generic.out.remaining=0;
1984 do {
1985 union smb_write *io_frag = talloc_zero(fragments, union smb_write);
1986 struct async_write_fragment *fragment = talloc_zero(fragments, struct async_write_fragment);
1987 ssize_t size = MIN(block, remaining);
1989 fragment->fragments = fragments;
1990 fragment->io_frag = io_frag;
1992 io_frag->generic.level = io->generic.level;
1993 io_frag->generic.in.file.fnum = io->generic.in.file.fnum;
1994 io_frag->generic.in.wmode = io->generic.in.wmode;
1995 io_frag->generic.in.count = size;
1996 io_frag->generic.in.offset = io->generic.in.offset + done;
1997 io_frag->generic.in.data = io->generic.in.data + done;
1999 c_req = proxy_smb_raw_write_send(ntvfs, io_frag, f);
2000 if (! c_req) {
2001 /* let pending requests clean-up when ready */
2002 fragments->status=NT_STATUS_UNSUCCESSFUL;
2003 talloc_steal(NULL, fragments);
2004 DEBUG(3,("Can't send request fragment\n"));
2005 return NT_STATUS_UNSUCCESSFUL;
2008 DEBUG(5,("Frag write sent offset=%lld size=%d MID=%d\n",
2009 io_frag->generic.in.offset,io_frag->generic.in.count,c_req->mid));
2010 fragment->c_req=c_req;
2011 DLIST_ADD(fragments->fragments, fragment);
2013 // ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
2014 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_write_fragment, NT_STATUS_INTERNAL_ERROR);
2015 DEBUG(5,("Frag response chained\n"));
2017 remaining -= size;
2018 done += size;
2019 } while(remaining > 0);
2021 /* this strategy has the callback chain attached to each c_req, so we
2022 don't use the ASYNC_RECV_TAIL* to install a general one */
2025 ASYNC_RECV_TAIL_HANDLER(io, async_chain_handler);
2029 a handler for async seek replies
2031 static void async_seek(struct smbcli_request *c_req)
2033 struct async_info *async = c_req->async.private;
2034 struct ntvfs_request *req = async->req;
2035 req->async_states->status = smb_raw_seek_recv(c_req, async->parms);
2036 talloc_free(async);
2037 req->async_states->send_fn(req);
2041 seek in a file
2043 static NTSTATUS proxy_seek(struct ntvfs_module_context *ntvfs,
2044 struct ntvfs_request *req,
2045 union smb_seek *io)
2047 struct proxy_private *private = ntvfs->private_data;
2048 struct smbcli_request *c_req;
2050 SETUP_PID_AND_FILE;
2052 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2053 return smb_raw_seek(private->tree, io);
2056 c_req = smb_raw_seek_send(private->tree, io);
2058 ASYNC_RECV_TAIL(io, async_seek);
2062 flush a file
2064 static NTSTATUS proxy_flush(struct ntvfs_module_context *ntvfs,
2065 struct ntvfs_request *req,
2066 union smb_flush *io)
2068 struct proxy_private *private = ntvfs->private_data;
2069 struct smbcli_request *c_req;
2071 SETUP_PID;
2072 switch (io->generic.level) {
2073 case RAW_FLUSH_FLUSH:
2074 SETUP_FILE;
2075 break;
2076 case RAW_FLUSH_ALL:
2077 io->generic.in.file.fnum = 0xFFFF;
2078 break;
2079 case RAW_FLUSH_SMB2:
2080 return NT_STATUS_INVALID_LEVEL;
2083 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2084 return smb_raw_flush(private->tree, io);
2087 c_req = smb_raw_flush_send(private->tree, io);
2089 SIMPLE_ASYNC_TAIL;
2093 close a file
2095 static NTSTATUS proxy_close(struct ntvfs_module_context *ntvfs,
2096 struct ntvfs_request *req, union smb_close *io)
2098 struct proxy_private *private = ntvfs->private_data;
2099 struct smbcli_request *c_req;
2100 struct proxy_file *f;
2101 union smb_close io2;
2103 SETUP_PID;
2105 if (io->generic.level != RAW_CLOSE_GENERIC &&
2106 private->map_generic) {
2107 return ntvfs_map_close(ntvfs, req, io);
2109 SETUP_FILE_HERE(f);
2110 /* Note, we aren't free-ing f, or it's h here. Should we?
2111 even if file-close fails, we'll remove it from the list,
2112 what else would we do? Maybe we should not remove until
2113 after the proxied call completes? */
2114 DLIST_REMOVE(private->files, f);
2116 /* possibly samba can't do RAW_CLOSE_SEND yet */
2117 if (! (c_req = smb_raw_close_send(private->tree, io))) {
2118 if (io->generic.level == RAW_CLOSE_GENERIC) {
2119 ZERO_STRUCT(io2);
2120 io2.close.level = RAW_CLOSE_CLOSE;
2121 io2.close.in.file = io->generic.in.file;
2122 io2.close.in.write_time = io->generic.in.write_time;
2123 io = &io2;
2125 c_req = smb_raw_close_send(private->tree, io);
2128 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2129 return smbcli_request_simple_recv(c_req);
2132 SIMPLE_ASYNC_TAIL;
2136 exit - closing files open by the pid
2138 static NTSTATUS proxy_exit(struct ntvfs_module_context *ntvfs,
2139 struct ntvfs_request *req)
2141 struct proxy_private *private = ntvfs->private_data;
2142 struct smbcli_request *c_req;
2144 SETUP_PID;
2146 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2147 return smb_raw_exit(private->tree->session);
2150 c_req = smb_raw_exit_send(private->tree->session);
2152 SIMPLE_ASYNC_TAIL;
2156 logoff - closing files open by the user
2158 static NTSTATUS proxy_logoff(struct ntvfs_module_context *ntvfs,
2159 struct ntvfs_request *req)
2161 /* we can't do this right in the proxy backend .... */
2162 return NT_STATUS_OK;
2166 setup for an async call - nothing to do yet
2168 static NTSTATUS proxy_async_setup(struct ntvfs_module_context *ntvfs,
2169 struct ntvfs_request *req,
2170 void *private)
2172 return NT_STATUS_OK;
2176 cancel an async call
2178 static NTSTATUS proxy_cancel(struct ntvfs_module_context *ntvfs,
2179 struct ntvfs_request *req)
2181 struct proxy_private *private = ntvfs->private_data;
2182 struct async_info *a;
2184 /* find the matching request */
2185 for (a=private->pending;a;a=a->next) {
2186 if (a->req == req) {
2187 break;
2191 if (a == NULL) {
2192 return NT_STATUS_INVALID_PARAMETER;
2195 return smb_raw_ntcancel(a->c_req);
2199 lock a byte range
2201 static NTSTATUS proxy_lock(struct ntvfs_module_context *ntvfs,
2202 struct ntvfs_request *req, union smb_lock *io)
2204 struct proxy_private *private = ntvfs->private_data;
2205 struct smbcli_request *c_req;
2207 SETUP_PID;
2209 if (io->generic.level != RAW_LOCK_GENERIC &&
2210 private->map_generic) {
2211 return ntvfs_map_lock(ntvfs, req, io);
2213 SETUP_FILE;
2215 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2216 return smb_raw_lock(private->tree, io);
2219 c_req = smb_raw_lock_send(private->tree, io);
2220 SIMPLE_ASYNC_TAIL;
2224 set info on a open file
2226 static NTSTATUS proxy_setfileinfo(struct ntvfs_module_context *ntvfs,
2227 struct ntvfs_request *req,
2228 union smb_setfileinfo *io)
2230 struct proxy_private *private = ntvfs->private_data;
2231 struct smbcli_request *c_req;
2233 SETUP_PID_AND_FILE;
2235 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2236 return smb_raw_setfileinfo(private->tree, io);
2238 c_req = smb_raw_setfileinfo_send(private->tree, io);
2240 SIMPLE_ASYNC_TAIL;
2245 a handler for async fsinfo replies
2247 static void async_fsinfo(struct smbcli_request *c_req)
2249 struct async_info *async = c_req->async.private;
2250 struct ntvfs_request *req = async->req;
2251 req->async_states->status = smb_raw_fsinfo_recv(c_req, req, async->parms);
2252 talloc_free(async);
2253 req->async_states->send_fn(req);
2257 return filesystem space info
2259 static NTSTATUS proxy_fsinfo(struct ntvfs_module_context *ntvfs,
2260 struct ntvfs_request *req, union smb_fsinfo *fs)
2262 struct proxy_private *private = ntvfs->private_data;
2263 struct smbcli_request *c_req;
2265 SETUP_PID;
2267 /* QFS Proxy */
2268 if (fs->generic.level == RAW_QFS_PROXY_INFO) {
2269 fs->proxy_info.out.major_version=1;
2270 fs->proxy_info.out.minor_version=0;
2271 fs->proxy_info.out.capability=0;
2272 return NT_STATUS_OK;
2275 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2276 return smb_raw_fsinfo(private->tree, req, fs);
2279 c_req = smb_raw_fsinfo_send(private->tree, req, fs);
2281 ASYNC_RECV_TAIL(fs, async_fsinfo);
2285 return print queue info
2287 static NTSTATUS proxy_lpq(struct ntvfs_module_context *ntvfs,
2288 struct ntvfs_request *req, union smb_lpq *lpq)
2290 return NT_STATUS_NOT_SUPPORTED;
2294 list files in a directory matching a wildcard pattern
2296 static NTSTATUS proxy_search_first(struct ntvfs_module_context *ntvfs,
2297 struct ntvfs_request *req, union smb_search_first *io,
2298 void *search_private,
2299 bool (*callback)(void *, const union smb_search_data *))
2301 struct proxy_private *private = ntvfs->private_data;
2303 SETUP_PID;
2305 return smb_raw_search_first(private->tree, req, io, search_private, callback);
2308 /* continue a search */
2309 static NTSTATUS proxy_search_next(struct ntvfs_module_context *ntvfs,
2310 struct ntvfs_request *req, union smb_search_next *io,
2311 void *search_private,
2312 bool (*callback)(void *, const union smb_search_data *))
2314 struct proxy_private *private = ntvfs->private_data;
2316 SETUP_PID;
2318 return smb_raw_search_next(private->tree, req, io, search_private, callback);
2321 /* close a search */
2322 static NTSTATUS proxy_search_close(struct ntvfs_module_context *ntvfs,
2323 struct ntvfs_request *req, union smb_search_close *io)
2325 struct proxy_private *private = ntvfs->private_data;
2327 SETUP_PID;
2329 return smb_raw_search_close(private->tree, io);
2333 a handler for async trans2 replies
2335 static void async_trans2(struct smbcli_request *c_req)
2337 struct async_info *async = c_req->async.private;
2338 struct ntvfs_request *req = async->req;
2339 req->async_states->status = smb_raw_trans2_recv(c_req, req, async->parms);
2340 talloc_free(async);
2341 req->async_states->send_fn(req);
2344 /* raw trans2 */
2345 static NTSTATUS proxy_trans2(struct ntvfs_module_context *ntvfs,
2346 struct ntvfs_request *req,
2347 struct smb_trans2 *trans2)
2349 struct proxy_private *private = ntvfs->private_data;
2350 struct smbcli_request *c_req;
2352 if (private->map_trans2) {
2353 return NT_STATUS_NOT_IMPLEMENTED;
2356 SETUP_PID;
2357 #warning we should be mapping file handles here
2359 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2360 return smb_raw_trans2(private->tree, req, trans2);
2363 c_req = smb_raw_trans2_send(private->tree, trans2);
2365 ASYNC_RECV_TAIL(trans2, async_trans2);
2369 /* SMBtrans - not used on file shares */
2370 static NTSTATUS proxy_trans(struct ntvfs_module_context *ntvfs,
2371 struct ntvfs_request *req,
2372 struct smb_trans2 *trans2)
2374 return NT_STATUS_ACCESS_DENIED;
2378 a handler for async change notify replies
2380 static void async_changenotify(struct smbcli_request *c_req)
2382 struct async_info *async = c_req->async.private;
2383 struct ntvfs_request *req = async->req;
2384 req->async_states->status = smb_raw_changenotify_recv(c_req, req, async->parms);
2385 talloc_free(async);
2386 req->async_states->send_fn(req);
2389 /* change notify request - always async */
2390 static NTSTATUS proxy_notify(struct ntvfs_module_context *ntvfs,
2391 struct ntvfs_request *req,
2392 union smb_notify *io)
2394 struct proxy_private *private = ntvfs->private_data;
2395 struct smbcli_request *c_req;
2396 int saved_timeout = private->transport->options.request_timeout;
2397 struct proxy_file *f;
2399 if (io->nttrans.level != RAW_NOTIFY_NTTRANS) {
2400 return NT_STATUS_NOT_IMPLEMENTED;
2403 SETUP_PID;
2405 f = ntvfs_handle_get_backend_data(io->nttrans.in.file.ntvfs, ntvfs);
2406 if (!f) return NT_STATUS_INVALID_HANDLE;
2407 io->nttrans.in.file.fnum = f->fnum;
2409 /* this request doesn't make sense unless its async */
2410 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2411 return NT_STATUS_INVALID_PARAMETER;
2414 /* we must not timeout on notify requests - they wait
2415 forever */
2416 private->transport->options.request_timeout = 0;
2418 c_req = smb_raw_changenotify_send(private->tree, io);
2420 private->transport->options.request_timeout = saved_timeout;
2422 ASYNC_RECV_TAIL(io, async_changenotify);
2426 * A hander for converting from rpc struct replies to ntioctl
2428 static NTSTATUS proxy_rpclite_map_async_send(
2429 struct ntvfs_module_context *ntvfs,
2430 struct ntvfs_request *req,
2431 void *io1, void *io2, NTSTATUS status)
2433 union smb_ioctl* io=talloc_get_type_abort(io1, union smb_ioctl);
2434 struct async_rpclite_send *rpclite_send=talloc_get_type_abort(io2, struct async_rpclite_send);
2435 void* r=rpclite_send->struct_ptr;
2436 struct ndr_push* push;
2437 const struct ndr_interface_call* call=rpclite_send->call;
2438 enum ndr_err_code ndr_err;
2439 DATA_BLOB ndr;
2441 talloc_free(rpclite_send);
2443 push = ndr_push_init_ctx(req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2444 NT_STATUS_HAVE_NO_MEMORY(push);
2446 if (0) {
2447 push->flags |= LIBNDR_FLAG_BIGENDIAN;
2450 ndr_err = call->ndr_push(push, NDR_OUT, r);
2451 status=ndr_map_error2ntstatus(ndr_err);
2453 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2454 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
2455 nt_errstr(status)));
2456 return status;
2459 ndr=ndr_push_blob(push);
2460 //if (ndr.length > io->ntioctl.in.max_data) {
2461 DEBUG(3,("%s NDR size %d, max_size %d %p\n",__FUNCTION__, ndr.length,
2462 io->ntioctl.in.max_data, ndr.data));
2463 io->ntioctl.out.blob=ndr;
2464 return status;
2468 * A handler for sending async rpclite Read replies that were mapped to union smb_read
2470 static NTSTATUS rpclite_proxy_Read_map_async_send(
2471 struct ntvfs_module_context *ntvfs,
2472 struct ntvfs_request *req,
2473 void *io1, void *io2, NTSTATUS status)
2475 struct proxy_Read* r=talloc_get_type_abort(io1, struct proxy_Read);
2476 union smb_read* io=talloc_get_type_abort(io2, union smb_read);
2478 /* status here is a result of proxy_read, it doesn't reflect the status
2479 of the rpc transport or relates calls, just the read operation */
2480 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2481 r->out.result=status;
2483 if (! NT_STATUS_IS_OK(status)) {
2484 /* We can't use result as a discriminator in IDL, so nread and flags always exist */
2485 r->out.nread=0;
2486 r->out.flags=0;
2487 } else {
2488 ssize_t size=io->readx.out.nread;
2489 r->out.flags=0;
2490 r->out.nread=io->readx.out.nread;
2492 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE) && io->readx.out.nread>0) {
2493 declare_checksum(digest);
2494 checksum_block(digest, io->readx.out.data, io->readx.out.nread);
2496 DEBUG(5,("New digest for size: %lld\n", (long long) io->readx.out.nread));
2497 dump_data (5, digest, sizeof(digest));
2498 DEBUG(5,("Cached digest\n"));
2499 dump_data (5, r->in.digest.digest, sizeof(digest));
2501 if (memcmp(digest, r->in.digest.digest, sizeof(digest))==0) {
2502 r->out.flags=PROXY_USE_CACHE;
2503 DEBUG(5,("%s: Use cached data len=%lld\n",__FUNCTION__,
2504 (long long)r->out.nread));
2505 if (r->in.flags & PROXY_VALIDATE) {
2506 r->out.flags |= PROXY_VALIDATE;
2507 DEBUG(5,("%s: Use VALIDATED len=%lld, %lld\n",__FUNCTION__,
2508 (long long)r->out.nread, (long long) io->readx.out.nread));
2510 goto done;
2512 DEBUG(5,("Cache does not match\n"));
2515 if (r->in.flags & PROXY_VALIDATE) {
2516 /* validate failed, shrink read to mincnt - so we don't fill link */
2517 r->out.nread=MIN(r->out.nread, r->in.mincnt);
2518 size=r->out.nread;
2519 DEBUG(5,("VALIDATE failed, shrink read of %d from %d to %d\n",
2520 r->in.maxcnt,r->out.nread,MIN(r->out.nread, r->in.mincnt)));
2523 if (r->in.flags & PROXY_USE_ZLIB) {
2524 if (compress_block(io->readx.out.data, &size) ) {
2525 r->out.flags|=PROXY_USE_ZLIB;
2526 r->out.response.compress.count=size;
2527 r->out.response.compress.data=io->readx.out.data;
2528 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
2529 __FUNCTION__,r->out.nread,size,size*100/r->out.nread));
2530 goto done;
2534 DEBUG(5,("%s: Compression not worthwhile\n", __FUNCTION__));
2535 r->out.response.generic.count=io->readx.out.nread;
2536 r->out.response.generic.data=io->readx.out.data;
2539 done:
2541 /* Or should we return NT_STATUS_OK ?*/
2542 DEBUG(5,("Finish %s status %s\n",__FUNCTION__,get_friendly_nt_error_msg(status)));
2544 /* the rpc transport succeeded even if the operation did not */
2545 return NT_STATUS_OK;
2549 * RPC implementation of Read
2551 static NTSTATUS rpclite_proxy_Read(struct ntvfs_module_context *ntvfs,
2552 struct ntvfs_request *req, struct proxy_Read *r)
2554 struct proxy_private *private = ntvfs->private_data;
2555 union smb_read* io=talloc(req, union smb_read);
2556 NTSTATUS status;
2557 struct proxy_file *f;
2558 struct ntvfs_handle *h;
2560 NT_STATUS_HAVE_NO_MEMORY(io);
2562 /* if next hop is a proxy just repeat this call also handle VALIDATE check
2563 that means have own callback handlers too... */
2564 SETUP_PID;
2566 RPCLITE_SETUP_FILE_HERE(f, h);
2568 DEBUG(5,("Opnum: proxy_Read min=%d max=%d offset=%lld, fnum=%d\n",
2569 r->in.mincnt, r->in.maxcnt, r->in.offset, r->in.fnum));
2570 DEBUG(5,("Anticipated digest\n"));
2571 dump_data (5, r->in.digest.digest, sizeof(r->in.digest.digest));
2573 /* If the remove end is a proxy, jusr fixup file handle and passthrough,
2574 but update cache on the way back
2575 if (PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
2578 /* prepare for response */
2579 r->out.response.generic.data=talloc_array(io, uint8_t, r->in.maxcnt);
2580 NT_STATUS_HAVE_NO_MEMORY(r->out.response.generic.data);
2582 if (! PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
2583 return proxy_validate(ntvfs, req, r, f);
2586 /* pack up an smb_read request and dispatch here */
2587 io->readx.level=RAW_READ_READX;
2588 io->readx.in.file.ntvfs=h;
2589 io->readx.in.mincnt=r->in.mincnt;
2590 io->readx.in.maxcnt=r->in.maxcnt;
2591 io->readx.in.offset=r->in.offset;
2592 io->readx.in.remaining=r->in.remaining;
2593 /* and something to hold the answer */
2594 io->readx.out.data=r->out.response.generic.data;
2596 /* so we get to pack the io->*.out response */
2597 status = ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Read_map_async_send);
2598 NT_STATUS_NOT_OK_RETURN(status);
2600 /* so the read will get processed normally */
2601 return proxy_read(ntvfs, req, io);
2605 * A handler for sending async rpclite Write replies
2607 static NTSTATUS rpclite_proxy_Write_map_async_send(
2608 struct ntvfs_module_context *ntvfs,
2609 struct ntvfs_request *req,
2610 void *io1, void *io2, NTSTATUS status)
2612 struct proxy_Write* r=talloc_get_type_abort(io1, struct proxy_Write);
2613 union smb_write* io=talloc_get_type_abort(io2, union smb_write);
2615 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2616 r->out.result=status;
2618 r->out.nwritten=io->writex.out.nwritten;
2619 r->out.remaining=io->writex.out.remaining;
2621 /* the rpc transport succeeded even if the operation did not */
2622 return NT_STATUS_OK;
2626 * RPC implementation of write
2628 static NTSTATUS rpclite_proxy_Write(struct ntvfs_module_context *ntvfs,
2629 struct ntvfs_request *req, struct proxy_Write *r)
2631 struct proxy_private *private = ntvfs->private_data;
2632 union smb_write* io=talloc(req, union smb_write);
2633 NTSTATUS status;
2634 struct proxy_file* f;
2635 struct ntvfs_handle *h;
2637 SETUP_PID;
2639 RPCLITE_SETUP_FILE_HERE(f,h);
2641 DEBUG(5,("Opnum: proxy_Write count=%d offset=%lld, fnum=%d\n",
2642 r->in.count, r->in.offset, r->in.fnum));
2644 /* pack up an smb_write request and dispatch here */
2645 io->writex.level=RAW_WRITE_WRITEX;
2646 io->writex.in.file.ntvfs=h;
2647 io->writex.in.offset=r->in.offset;
2648 io->writex.in.wmode=r->in.mode;
2649 io->writex.in.count=r->in.count;
2651 /* and the data */
2652 if (PROXY_USE_ZLIB & r->in.flags) {
2653 ssize_t count=r->in.data.generic.count;
2654 io->writex.in.data=uncompress_block_talloc(io, r->in.data.compress.data,
2655 &count, r->in.count);
2656 if (count != r->in.count || !io->writex.in.data) {
2657 /* Didn't uncompress properly, but the RPC layer worked */
2658 r->out.result=NT_STATUS_BAD_COMPRESSION_BUFFER;
2659 return NT_STATUS_OK;
2661 } else {
2662 io->writex.in.data=r->in.data.generic.data;
2665 /* so we get to pack the io->*.out response */
2666 status=ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Write_map_async_send);
2667 NT_STATUS_NOT_OK_RETURN(status);
2669 /* so the read will get processed normally */
2670 return proxy_write(ntvfs, req, io);
2673 /* unmarshall ntioctl and rpc-dispatch, but push async map handler to convert
2674 back from rpc struct to ntioctl */
2675 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
2676 struct ntvfs_request *req, union smb_ioctl *io)
2678 struct proxy_private *private = ntvfs->private_data;
2679 DATA_BLOB *request;
2680 struct ndr_syntax_id* syntax_id;
2681 uint32_t opnum;
2682 const struct ndr_interface_table *table;
2683 struct ndr_pull* pull;
2684 void* r;
2685 NTSTATUS status;
2686 struct async_rpclite_send *rpclite_send;
2687 enum ndr_err_code ndr_err;
2689 SETUP_PID;
2691 /* We don't care about io->generic.in.file, ntvfs layer already proved it was valid,
2692 our operations will have the fnum embedded in them anyway */
2693 DEBUG(5,("START %s blob-size %d\n",__FUNCTION__,io->ntioctl.in.blob.length));
2694 /* unpack the NDR */
2695 request=&io->ntioctl.in.blob;
2697 pull = ndr_pull_init_blob(request, req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2698 NT_STATUS_HAVE_NO_MEMORY(pull);
2699 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
2700 DEBUG(5,("%s pull init'd\n",__FUNCTION__));
2702 /* the blob is 4-aligned because it was memcpy'd */
2703 syntax_id=talloc_zero(pull, struct ndr_syntax_id);
2704 NT_STATUS_HAVE_NO_MEMORY(syntax_id);
2706 ndr_err=ndr_pull_ndr_syntax_id(pull, NDR_SCALARS, syntax_id);
2707 status=ndr_map_error2ntstatus(ndr_err);
2708 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2709 DEBUG(2,("Can't read syntax-id: %s\n",nt_errstr(status)));
2710 return status;
2713 /* now find the struct ndr_interface_table * for this syntax_id */
2714 table=ndr_table_by_uuid(&syntax_id->uuid);
2715 if (! table) {
2716 DEBUG(5,("Can't find table for uuid: %s\n",GUID_string(debug_ctx(),&syntax_id->uuid)));
2717 return NT_STATUS_NO_GUID_TRANSLATION;
2720 ndr_err=ndr_pull_uint32(pull, NDR_SCALARS, &opnum);
2721 status=ndr_map_error2ntstatus(ndr_err);
2722 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2723 DEBUG(2,("Can't read op-num: %s\n",nt_errstr(status)));
2724 return status;
2726 DEBUG(5,("%s opnum %d\n",__FUNCTION__,opnum));
2728 DEBUG(10,("rpc request data:\n"));
2729 dump_data(10, pull->data, pull->data_size);
2731 r = talloc_named(req, table->calls[opnum].struct_size, "struct %s",
2732 table->calls[opnum].name);
2733 NT_STATUS_HAVE_NO_MEMORY(r);
2735 ndr_err=table->calls[opnum].ndr_pull(pull, NDR_IN, r);
2736 status=ndr_map_error2ntstatus(ndr_err);
2737 DEBUG(5,("%s opnum %d pulled status %s\n",__FUNCTION__,opnum,get_friendly_nt_error_msg (status)));
2738 NT_STATUS_NOT_OK_RETURN(status);
2740 rpclite_send=talloc(req, struct async_rpclite_send);
2741 NT_STATUS_HAVE_NO_MEMORY(rpclite_send);
2742 rpclite_send->call=&table->calls[opnum];
2743 rpclite_send->struct_ptr=r;
2744 /* need to push conversion function to convert from r to io */
2745 status=ntvfs_map_async_setup(ntvfs, req, io, rpclite_send, proxy_rpclite_map_async_send);
2747 /* Magically despatch the call based on syntax_id, table and opnum.
2748 But there is no table of handlers.... so until then*/
2749 if (0==strcasecmp(table->name,"rpcproxy")) {
2750 switch(opnum) {
2751 case(NDR_PROXY_READ):
2752 status=rpclite_proxy_Read(ntvfs, req, r);
2753 break;
2754 case(NDR_PROXY_WRITE):
2755 status=rpclite_proxy_Write(ntvfs, req, r);
2756 break;
2757 default:
2758 DEBUG(3,("Can't despatch %s:%d\n",table->name, opnum));
2759 return NT_STATUS_PROCEDURE_NOT_FOUND;
2761 } else {
2762 DEBUG(5,("Can't despatch %s:%d %s\n",table->name, opnum,
2763 GUID_string(debug_ctx(),&syntax_id->uuid)));
2764 return NT_STATUS_NO_GUID_TRANSLATION;
2767 /* status is the status of the rpc layer. If it is NT_STATUS_OK then
2768 the handler status is in r->out.result */
2769 return ntvfs_map_async_finish(req, status);
2772 /* unpack the ntioctl to make some rpc_struct */
2773 NTSTATUS ntioctl_rpc_unmap(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2775 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
2776 struct proxy_private *proxy=async->proxy;
2777 struct smbcli_request *c_req = async->c_req;
2778 void* r=io1;
2779 struct ntioctl_rpc_unmap_info *info=talloc_get_type_abort(io2, struct ntioctl_rpc_unmap_info);
2780 union smb_ioctl* io =talloc_get_type_abort(info->io, union smb_ioctl);
2781 const struct ndr_interface_call *calls=info->calls;
2782 enum ndr_err_code ndr_err;
2783 DATA_BLOB *response;
2784 struct ndr_pull* pull;
2786 DEBUG(5,("START %s io2=%p\n",__FUNCTION__,io2));
2787 DEBUG(5,("%s op %s ntioctl: %s\n",
2788 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
2789 NT_STATUS_NOT_OK_RETURN(status);
2791 if (c_req) {
2792 DEBUG(5,("%s io2 MID=%d\n",__FUNCTION__,c_req->mid));
2793 status = smb_raw_ioctl_recv(c_req, io, io);
2794 #define SESSION_INFO proxy->remote_server, proxy->remote_share
2795 /* This status is the ntioctl wrapper status */
2796 if (! NT_STATUS_IS_OK(status)) {
2797 DEBUG(3,("[\\\\%s\\%s] RPC %s failed for %s: %s\n",SESSION_INFO,
2798 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
2799 if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) return status;
2800 return NT_STATUS_UNSUCCESSFUL;
2804 dump_data(10, io->ntioctl.out.blob.data, io->ntioctl.out.blob.length);
2806 response=&io->ntioctl.out.blob;
2807 pull = ndr_pull_init_blob(response, r, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2808 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
2810 NT_STATUS_HAVE_NO_MEMORY(pull);
2812 ndr_err=calls->ndr_pull(pull, NDR_OUT, r);
2813 #warning can we free pull here?
2814 status=ndr_map_error2ntstatus(ndr_err);
2816 DEBUG(5,("END %s op status %s\n",
2817 __FUNCTION__, get_friendly_nt_error_msg(status)));
2818 return status;
2822 send an ntioctl request based on a NDR encoding.
2824 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
2825 struct smbcli_tree *tree,
2826 struct ntvfs_module_context *ntvfs,
2827 const struct ndr_interface_table *table,
2828 uint32_t opnum,
2829 void *r)
2831 struct proxy_private *private = ntvfs->private_data;
2832 struct smbcli_request * c_req;
2833 struct ndr_push *push;
2834 NTSTATUS status;
2835 DATA_BLOB request;
2836 enum ndr_err_code ndr_err;
2837 union smb_ioctl *io=talloc_zero(r, union smb_ioctl);
2840 /* setup for a ndr_push_* call, we can't free push until the message
2841 actually hits the wire */
2842 push = ndr_push_init_ctx(io, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2843 if (!push) return NULL;
2845 /* first push interface table identifiers */
2846 ndr_err=ndr_push_ndr_syntax_id(push, NDR_SCALARS, &table->syntax_id);
2847 status=ndr_map_error2ntstatus(ndr_err);
2849 if (! NT_STATUS_IS_OK(status)) return NULL;
2851 ndr_err=ndr_push_uint32(push, NDR_SCALARS, opnum);
2852 status=ndr_map_error2ntstatus(ndr_err);
2853 if (! NT_STATUS_IS_OK(status)) return NULL;
2855 if (0) {
2856 push->flags |= LIBNDR_FLAG_BIGENDIAN;
2859 /* push the structure into a blob */
2860 ndr_err = table->calls[opnum].ndr_push(push, NDR_IN, r);
2861 status=ndr_map_error2ntstatus(ndr_err);
2862 if (!NT_STATUS_IS_OK(status)) {
2863 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
2864 nt_errstr(status)));
2865 return NULL;
2868 /* retrieve the blob */
2869 request = ndr_push_blob(push);
2871 io->ntioctl.level=RAW_IOCTL_NTIOCTL;
2872 io->ntioctl.in.function=FSCTL_UFOPROXY_RPCLITE;
2873 io->ntioctl.in.file.fnum=private->nttrans_fnum;
2874 io->ntioctl.in.fsctl=false;
2875 io->ntioctl.in.filter=0;
2876 io->ntioctl.in.max_data=PROXY_NTIOCTL_MAXDATA;
2877 io->ntioctl.in.blob=request;
2879 DEBUG(10,("smbcli_request packet:\n"));
2880 dump_data(10, request.data, request.length);
2882 c_req = smb_raw_ioctl_send(tree, io);
2884 if (! c_req) {
2885 return NULL;
2888 dump_data(10, c_req->out.data, c_req->out.data_size);
2890 { void* req=NULL;
2891 struct ntioctl_rpc_unmap_info* info=talloc_zero(r, struct ntioctl_rpc_unmap_info);
2892 info->io=io;
2893 info->table=table;
2894 info->opnum=opnum;
2895 info->calls=&table->calls[opnum];
2896 ADD_ASYNC_RECV_TAIL(c_req, r, info, NULL, ntioctl_rpc_unmap, NULL);
2899 return c_req;
2903 client helpers, mapping between proxy RPC calls and smbcli_* calls.
2907 * If the sync_chain_handler is called directly it unplugs the async handler
2908 which (as well as preventing loops) will also avoid req->send_fn being
2909 called - which is also nice! */
2910 NTSTATUS sync_chain_handler(struct smbcli_request *c_req)
2912 struct async_info *async=NULL;
2913 /* the first callback which will actually receive the c_req response */
2914 struct async_info_map *async_map;
2915 NTSTATUS status=NT_STATUS_OK;
2916 struct async_info_map** chain;
2918 DEBUG(5,("%s\n",__FUNCTION__));
2919 if (! c_req) return NT_STATUS_UNSUCCESSFUL;
2921 /* If there is a handler installed, it is using async_info to chain */
2922 if (c_req->async.fn) {
2923 /* not safe to talloc_free async if send_fn has been called for the request
2924 against which async was allocated, so steal it (and free below) or neither */
2925 async = talloc_get_type_abort(c_req->async.private, struct async_info);
2926 talloc_steal(NULL, async);
2927 chain=&async->chain;
2928 async_map = talloc_get_type_abort(*chain, struct async_info_map);
2929 } else {
2930 chain=(struct async_info_map**)&c_req->async.private;
2931 async_map = talloc_get_type_abort(*chain, struct async_info_map);
2934 /* unplug c_req->async.fn as if a callback handler calls smb_*_recv
2935 in order to receive the response, smbcli_transport_finish_recv will
2936 call us again and then call the c-req->async.fn
2937 Perhaps we should merely call smbcli_request_receive() IF
2938 c_req->request_state <= SMBCLI_REQUEST_RECV, but that might not
2939 help multi-part replies... except all parts are receive before
2940 callback if a handler WAS set */
2941 c_req->async.fn=NULL;
2943 /* Should we raise an error? Should we simple_recv? */
2944 while(async_map) {
2945 /* remove this one from the list before we call. We do this in case
2946 some callbacks free their async_map but also so that callbacks
2947 can navigate the async_map chain to add additional callbacks to
2948 the end - e.g. so that tag-along reads can call send_fn after
2949 the send_fn of the request they tagged along to, thus preserving
2950 the async response order - which may be a waste of time? */
2951 DLIST_REMOVE(*chain, async_map);
2953 DEBUG(5,("Callback for async_map=%p pre-status %s\n",async_map, get_friendly_nt_error_msg(status)));
2954 if (async_map->fn) {
2955 status=async_map->fn(async_map->async,
2956 async_map->parms1, async_map->parms2, status);
2958 DEBUG(5,("Callback complete for async_map=%p status %s\n",async_map, get_friendly_nt_error_msg(status)));
2959 /* Note: the callback may have added to the chain */
2960 #warning Async_maps have a null talloc_context, it is unclear who should own them
2961 /* it can't be c_req as it stops us chaining more than one, maybe it
2962 should be req but there isn't always a req. However sync_chain_handler
2963 will always free it if called */
2964 DEBUG(6,("Will free async map %p\n",async_map));
2965 #warning put me back
2966 talloc_free(async_map);
2967 DEBUG(6,("Free'd async_map\n"));
2968 if (*chain)
2969 async_map=talloc_get_type_abort(*chain, struct async_info_map);
2970 else
2971 async_map=NULL;
2972 DEBUG(6,("Switch to async_map %p\n",async_map));
2974 /* The first callback will have read c_req, thus talloc_free'ing it,
2975 so we don't let the other callbacks get hurt playing with it */
2976 if (async_map && async_map->async)
2977 async_map->async->c_req=NULL;
2980 talloc_free(async);
2982 DEBUG(5,("%s complete: %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
2983 return status;
2986 /* If the async handler is called, then the send_fn is called */
2987 static void async_chain_handler(struct smbcli_request *c_req)
2989 struct async_info *async = talloc_get_type_abort(c_req->async.private, struct async_info);
2990 struct ntvfs_request *req = async->req;
2991 NTSTATUS status;
2993 if (c_req->state <= SMBCLI_REQUEST_RECV) {
2994 /* Looks like async handlers has been called sync'ly */
2995 smb_panic("async_chain_handler called asyncly on req %p\n");
2998 status=sync_chain_handler(c_req);
3000 /* Should we insist that a chain'd handler does this?
3001 Which makes it hard to intercept the data by adding handlers
3002 before the send_fn handler sends it... */
3003 if (req) {
3004 req->async_states->status=status;
3005 req->async_states->send_fn(req);
3009 /* unpack the rpc struct to make some smb_write */
3010 NTSTATUS async_proxy_smb_raw_write_rpc(struct async_info *async,
3011 void* io1, void* io2, NTSTATUS status)
3013 union smb_write* io =talloc_get_type(io1, union smb_write);
3014 struct proxy_Write* r=talloc_get_type(io2, struct proxy_Write);
3016 DEBUG(5,("START: %s convert from rpc to smb with pre-status %s\n",__FUNCTION__,
3017 get_friendly_nt_error_msg (status)));
3018 DEBUG(3,("Write response for offset=%lld\n",io->generic.in.offset));
3019 NT_STATUS_NOT_OK_RETURN(status);
3021 status=r->out.result;
3022 DEBUG(5,("%s wrapped status: %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
3023 NT_STATUS_NOT_OK_RETURN(status);
3025 io->generic.out.remaining = r->out.remaining;
3026 io->generic.out.nwritten = r->out.nwritten;
3028 DEBUG(5,("END: %s status %s\n",__FUNCTION__,
3029 get_friendly_nt_error_msg (status)));
3030 return status;
3033 /* upgrade from smb to NDR and then send.
3034 The caller should ADD_ASYNC_RECV_TAIL the handler that tries to receive the response*/
3035 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
3036 union smb_write *io,
3037 struct proxy_file *f)
3039 struct proxy_private *private = ntvfs->private_data;
3040 struct smbcli_tree *tree=private->tree;
3042 if (PROXY_REMOTE_SERVER(private)) {
3043 struct smbcli_request *c_req;
3044 struct proxy_Write *r=talloc_zero(io, struct proxy_Write);
3045 ssize_t size;
3047 if (! r) return NULL;
3049 size=io->generic.in.count;
3050 /* upgrade the write */
3051 r->in.fnum = io->generic.in.file.fnum;
3052 r->in.offset = io->generic.in.offset;
3053 r->in.count = io->generic.in.count;
3054 r->in.mode = io->generic.in.wmode;
3055 // r->in.remaining = io->generic.in.remaining;
3056 #warning remove this
3057 /* prepare to lie */
3058 r->out.nwritten=r->in.count;
3059 r->out.remaining=0;
3061 /* try to compress */
3062 #warning compress!
3063 r->in.data.compress.data=compress_block_talloc(r, io->generic.in.data, &size);
3064 if (r->in.data.compress.data) {
3065 r->in.data.compress.count=size;
3066 r->in.flags = PROXY_USE_ZLIB;
3067 } else {
3068 r->in.flags = 0;
3069 /* we'll honour const, honest gov */
3070 r->in.data.generic.data=discard_const(io->generic.in.data);
3071 r->in.data.generic.count=io->generic.in.count;
3074 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
3075 ntvfs,
3076 &ndr_table_rpcproxy,
3077 NDR_PROXY_WRITE, r);
3078 if (! c_req) return NULL;
3080 /* yeah, filthy abuse of f */
3081 { void* req=NULL;
3082 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_write_rpc, NULL);
3085 return c_req;
3086 } else {
3087 return smb_raw_write_send(tree, io);
3091 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
3092 union smb_write *io,
3093 struct proxy_file *f)
3095 struct proxy_private *proxy = ntvfs->private_data;
3096 struct smbcli_tree *tree=proxy->tree;
3098 if (PROXY_REMOTE_SERVER(proxy)) {
3099 struct smbcli_request *c_req = proxy_smb_raw_write_send(ntvfs, io, f);
3100 return sync_chain_handler(c_req);
3101 } else {
3102 struct smbcli_request *c_req = smb_raw_write_send(tree, io);
3103 return smb_raw_write_recv(c_req, io);
3107 /* unpack the rpc struct to make some smb_read response */
3108 NTSTATUS async_proxy_smb_raw_read_rpc(struct async_info *async,
3109 void* io1, void* io2, NTSTATUS status)
3111 union smb_read* io =talloc_get_type_abort(io1, union smb_read);
3112 struct proxy_Read* r=talloc_get_type_abort(io2, struct proxy_Read);
3114 DEBUG(5,("\n>>\n%s() rpc status: %s\n",__FUNCTION__,
3115 get_friendly_nt_error_msg(status)));
3116 NT_STATUS_NOT_OK_RETURN(status);
3118 status=r->out.result;
3119 DEBUG(5,("%s() wrapped status: %s\n",__FUNCTION__,
3120 get_friendly_nt_error_msg(status)));
3121 NT_STATUS_NOT_OK_RETURN(status);
3123 io->generic.out.remaining = 0; /*r->out.response.generic.remaining;*/
3124 io->generic.out.compaction_mode = 0;
3126 if (r->out.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
3127 /* Use the io we already setup!
3128 if out.flags & PROXY_VALIDATE, we may need to validate more in
3129 cache then r->out.nread would suggest, see io->generic.out.nread */
3130 if (r->out.flags & PROXY_VALIDATE)
3131 io->generic.out.nread=io->generic.in.maxcnt;
3132 DEBUG(5,("Using cached data: size=%lld\n",
3133 (long long) io->generic.out.nread));
3134 return status;
3137 if (r->in.flags & PROXY_VALIDATE) {
3138 DEBUG(5,("Cached data did not validate, flags: %x\n",r->out.flags));
3139 /* turn off validate on this file */
3140 //cache_handle_novalidate(f);
3141 #warning turn off validate on this file - do an nread<maxcnt later
3144 if (r->in.flags & PROXY_USE_CACHE) {
3145 DEBUG(5,("Cached data did not match\n"));
3148 io->generic.out.nread = r->out.nread;
3150 /* we may need to uncompress */
3151 if (r->out.flags & PROXY_USE_ZLIB) {
3152 ssize_t size=r->out.response.compress.count;
3153 if (! uncompress_block_to(io->generic.out.data,
3154 r->out.response.compress.data, &size,
3155 io->generic.in.maxcnt) ||
3156 size != r->out.nread) {
3157 io->generic.out.nread=size;
3158 status=NT_STATUS_INVALID_USER_BUFFER;
3160 } else if (io->generic.out.data != r->out.response.generic.data) {
3161 //Assert(r->out.nread == r->out.generic.out.count);
3162 memcpy(io->generic.out.data, r->out.response.generic.data, io->generic.out.nread);
3165 return status;
3168 /* Warning: Assumes that if io->generic.out.nread is not zero, then some
3169 data has been pre-read into io->generic.out.data and can be used for
3170 proxy<->proxy optimized reads */
3171 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
3172 union smb_read *io,
3173 struct proxy_file *f,
3174 struct proxy_Read *r)
3176 struct proxy_private *private = ntvfs->private_data;
3177 #warning we are using out.nread as a out-of-band parameter
3178 if (PROXY_REMOTE_SERVER(private)) {
3180 struct smbcli_request *c_req;
3181 if (! r) {
3182 r=talloc_zero(io, struct proxy_Read);
3185 if (! r) return NULL;
3187 r->in.fnum = io->generic.in.file.fnum;
3188 r->in.read_for_execute=io->generic.in.read_for_execute;
3189 r->in.offset = io->generic.in.offset;
3190 r->in.mincnt = io->generic.in.mincnt;
3191 r->in.maxcnt = io->generic.in.maxcnt;
3192 r->in.remaining = io->generic.in.remaining;
3193 r->in.flags |= PROXY_USE_ZLIB;
3194 if (! (r->in.flags & PROXY_VALIDATE) &&
3195 io->generic.out.data && io->generic.out.nread > 0) {
3196 /* maybe we should limit digest size to MIN(nread, maxcnt) to
3197 permit the caller to provider a larger nread as part of
3198 a split read */
3199 checksum_block(r->in.digest.digest, io->generic.out.data,
3200 io->generic.out.nread);
3202 if (io->generic.out.nread > r->in.maxcnt) {
3203 DEBUG(0,("Cache from nread is too big for requested read struct, ignoring cache\n"));
3204 } else {
3205 r->in.mincnt = io->generic.out.nread;
3206 r->in.maxcnt = io->generic.out.nread;
3207 r->in.flags |= PROXY_USE_CACHE;
3208 /* PROXY_VALIDATE will have been set by caller */
3212 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
3213 DEBUG(3,("Cache digest length=%lld\n", (long long)r->in.maxcnt));
3214 dump_data (3, r->in.digest.digest, sizeof(r->in.digest.digest));
3217 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
3218 ntvfs,
3219 &ndr_table_rpcproxy,
3220 NDR_PROXY_READ, r);
3221 if (! c_req) return NULL;
3223 { void* req=NULL;
3224 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_read_rpc, NULL);
3227 return c_req;
3228 } else {
3229 return smb_raw_read_send(private->tree, io);
3233 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
3234 union smb_read *io,
3235 struct proxy_file *f)
3237 struct proxy_private *proxy = ntvfs->private_data;
3238 struct smbcli_tree *tree=proxy->tree;
3240 if (PROXY_REMOTE_SERVER(proxy)) {
3241 struct smbcli_request *c_req = proxy_smb_raw_read_send(ntvfs, io, f, NULL);
3242 return sync_chain_handler(c_req);
3243 } else {
3244 struct smbcli_request *c_req = smb_raw_read_send(tree, io);
3245 return smb_raw_read_recv(c_req, io);
3251 initialise the PROXY->PROXY backend, registering ourselves with the ntvfs subsystem
3253 NTSTATUS ntvfs_proxy_init(void)
3255 NTSTATUS ret;
3256 struct ntvfs_ops ops;
3257 NTVFS_CURRENT_CRITICAL_SIZES(vers);
3259 ZERO_STRUCT(ops);
3261 /* fill in the name and type */
3262 ops.name = "proxy";
3263 ops.type = NTVFS_DISK;
3265 /* fill in all the operations */
3266 ops.connect = proxy_connect;
3267 ops.disconnect = proxy_disconnect;
3268 ops.unlink = proxy_unlink;
3269 ops.chkpath = proxy_chkpath;
3270 ops.qpathinfo = proxy_qpathinfo;
3271 ops.setpathinfo = proxy_setpathinfo;
3272 ops.open = proxy_open;
3273 ops.mkdir = proxy_mkdir;
3274 ops.rmdir = proxy_rmdir;
3275 ops.rename = proxy_rename;
3276 ops.copy = proxy_copy;
3277 ops.ioctl = proxy_ioctl;
3278 ops.read = proxy_read;
3279 ops.write = proxy_write;
3280 ops.seek = proxy_seek;
3281 ops.flush = proxy_flush;
3282 ops.close = proxy_close;
3283 ops.exit = proxy_exit;
3284 ops.lock = proxy_lock;
3285 ops.setfileinfo = proxy_setfileinfo;
3286 ops.qfileinfo = proxy_qfileinfo;
3287 ops.fsinfo = proxy_fsinfo;
3288 ops.lpq = proxy_lpq;
3289 ops.search_first = proxy_search_first;
3290 ops.search_next = proxy_search_next;
3291 ops.search_close = proxy_search_close;
3292 ops.trans = proxy_trans;
3293 ops.logoff = proxy_logoff;
3294 ops.async_setup = proxy_async_setup;
3295 ops.cancel = proxy_cancel;
3296 ops.notify = proxy_notify;
3297 ops.trans2 = proxy_trans2;
3299 /* register ourselves with the NTVFS subsystem. We register
3300 under the name 'proxy'. */
3301 ret = ntvfs_register(&ops, &vers);
3303 if (!NT_STATUS_IS_OK(ret)) {
3304 DEBUG(0,("Failed to register PROXY backend!\n"));
3307 return ret;