Fix broken oplock handling
[Samba/vfs_proxy.git] / source4 / ntvfs / proxy / vfs_proxy.c
blob6ee36bf934dc722804109ab004b6db052533f92a
1 /*
2 Unix SMB/PROXY implementation.
4 CIFS PROXY NTVFS filesystem backend
6 Copyright (C) Andrew Tridgell 2003
7 Copyright (C) James J Myers 2003 <myersjj@samba.org>
8 Copyright (C) Sam Liddicott <sam@liddicott.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>.
24 this implements a CIFS->CIFS NTVFS filesystem caching proxy.
28 #define TALLOC_ABORT(why) smb_panic(why)
29 #warning handle SMB_FLAGS2_COMPRESSED flag from client: http://msdn2.microsoft.com/en-us/library/cc246254.aspx
31 #define PROXY_NTIOCTL_MAXDATA 0x20000
33 #include "includes.h"
34 #include "libcli/raw/libcliraw.h"
35 #include "libcli/smb_composite/smb_composite.h"
36 #include "auth/auth.h"
37 #include "auth/credentials/credentials.h"
38 #include "ntvfs/ntvfs.h"
39 #include "../lib/util/dlinklist.h"
40 #include "param/param.h"
41 #include "libcli/resolve/resolve.h"
42 #include "libcli/libcli.h"
43 #include "libcli/raw/ioctl.h"
44 #include "librpc/gen_ndr/ndr_misc.h"
45 #include "librpc/gen_ndr/ndr_proxy.h"
46 #include "librpc/ndr/ndr_table.h"
47 #include "lib/cache/cache.h"
48 #include "lib/compression/zlib.h"
49 #include "libcli/raw/raw_proto.h"
51 struct proxy_file {
52 struct proxy_file *prev, *next;
53 uint16_t fnum;
54 struct ntvfs_handle *h;
55 struct cache_file_entry *cache;
56 int readahead_pending;
57 int oplock;
60 /* this is stored in ntvfs_private */
61 struct proxy_private {
62 struct smbcli_tree *tree;
63 struct smbcli_transport *transport;
64 struct ntvfs_module_context *ntvfs;
65 struct async_info *pending;
66 struct proxy_file *files;
67 bool map_generic;
68 bool map_trans2;
69 bool cache_enabled;
70 int cache_readahead; /* default read-ahead window size */
71 int cache_readaheadblock; /* size of each read-ahead request */
72 ssize_t cache_validatesize; /* chunk size to validate, results in a read this size on remote server */
73 char *remote_server;
74 char *remote_share;
75 struct cache_context *cache;
76 int readahead_spare; /* amount of pending non-user generated requests */
77 bool fake_oplock; /* useful for testing, smbclient never asks for oplock */
78 bool fake_valid; /* useful for testing, smbclient never asks for oplock */
79 uint16_t nttrans_fnum; /* we need a handle for non-proxy operations */
82 struct async_info_map;
84 /* a structure used to pass information to an async handler */
85 struct async_info {
86 struct async_info *next, *prev;
87 struct proxy_private *proxy;
88 struct ntvfs_request *req;
89 struct smbcli_request *c_req;
90 struct proxy_file *f;
91 struct async_info_map *chain;
92 void *parms;
95 /* used to chain async callbacks */
96 struct async_info_map {
97 struct async_info_map *next, *prev;
98 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
99 void *parms1;
100 void *parms2;
101 struct async_info *async;
104 struct ntioctl_rpc_unmap_info {
105 void* io;
106 const struct ndr_interface_call *calls;
107 const struct ndr_interface_table *table;
108 uint32_t opnum;
111 /* a structure used to pass information to an async handler */
112 struct async_rpclite_send {
113 const struct ndr_interface_call* call;
114 void* struct_ptr;
117 #define SETUP_PID private->tree->session->pid = req->smbpid
119 #define RPCLITE_SETUP_FILE_HERE(f, h) do { \
120 if ((h = ntvfs_find_handle(private->ntvfs, req, r->in.fnum)) && \
121 (f = ntvfs_handle_get_backend_data(h, ntvfs))) { \
122 r->in.fnum = f->fnum; \
123 } else { \
124 r->out.result = NT_STATUS_INVALID_HANDLE; \
125 return NT_STATUS_OK; \
127 } while (0)
129 #define SETUP_FILE_HERE(f) do { \
130 f = ntvfs_handle_get_backend_data(io->generic.in.file.ntvfs, ntvfs); \
131 if (!f) return NT_STATUS_INVALID_HANDLE; \
132 io->generic.in.file.fnum = f->fnum; \
133 } while (0)
135 #define SETUP_FILE do { \
136 struct proxy_file *f; \
137 SETUP_FILE_HERE(f); \
138 } while (0)
140 #define SETUP_PID_AND_FILE do { \
141 SETUP_PID; \
142 SETUP_FILE; \
143 } while (0)
145 /* remove the MAY_ASYNC from a request, useful for testing */
146 #define MAKE_SYNC_REQ(req) do { req->async_states->state &= ~NTVFS_ASYNC_STATE_MAY_ASYNC; } while(0)
148 #define PROXY_SERVER "proxy:server"
149 #define PROXY_USER "proxy:user"
150 #define PROXY_PASSWORD "proxy:password"
151 #define PROXY_DOMAIN "proxy:domain"
152 #define PROXY_SHARE "proxy:share"
153 #define PROXY_USE_MACHINE_ACCT "proxy:use-machine-account"
154 #define PROXY_MAP_GENERIC "proxy:map-generic"
155 #define PROXY_MAP_TRANS2 "proxy:map-trans2"
157 #define PROXY_CACHE_ENABLED "proxy:cache-enabled"
158 #define PROXY_CACHE_ENABLED_DEFAULT false
160 #define PROXY_CACHE_READAHEAD "proxy:cache-readahead"
161 #define PROXY_CACHE_READAHEAD_DEFAULT 32768
162 /* size of each read-ahead request. */
163 #define PROXY_CACHE_READAHEAD_BLOCK "proxy:cache-readaheadblock"
164 /* the read-ahead block should always be less than max negotiated data */
165 #define PROXY_CACHE_READAHEAD_BLOCK_DEFAULT 4096
167 #define PROXY_CACHE_VALIDATE_SIZE "proxy:validate-size"
168 #define PROXY_CACHE_VALIDATE_SIZE_DEFAULT 256 /* 10M */
170 #define PROXY_FAKE_OPLOCK "proxy:fake-oplock"
171 #define PROXY_FAKE_OPLOCK_DEFAULT false
173 #define PROXY_FAKE_VALID "proxy:fake-valid"
174 #define PROXY_FAKE_VALID_DEFAULT false
176 /* how many read-ahead requests can be pending per mid */
177 #define PROXY_REQUEST_LIMIT "proxy:request-limit"
178 #define PROXY_REQUEST_LIMIT_DEFAULT 100
180 #define PROXY_USE_MACHINE_ACCT_DEFAULT false
181 /* These two really should be: true, and possibly not even configurable */
182 #define PROXY_MAP_GENERIC_DEFAULT true
183 #define PROXY_MAP_TRANS2_DEFAULT true
185 /* is the remote server a proxy? */
186 #define PROXY_REMOTE_SERVER(private) \
187 ((private)->tree->session->transport->negotiate.capabilities & CAP_COMPRESSION \
188 && (strcmp("A:",private->tree->device)==0))
190 /* A few forward declarations */
191 static NTSTATUS sync_chain_handler(struct smbcli_request *c_req);
192 static void async_chain_handler(struct smbcli_request *c_req);
193 static void async_read_handler(struct smbcli_request *c_req);
194 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
195 struct ntvfs_request *req, union smb_ioctl *io);
197 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
198 struct smbcli_tree *tree,
199 struct ntvfs_module_context *ntvfs,
200 const struct ndr_interface_table *table,
201 uint32_t opnum, void *r);
202 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
203 union smb_read *io, struct proxy_file *f, struct proxy_Read *r);
204 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
205 union smb_read *io, struct proxy_file *f);
206 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
207 union smb_write *io, struct proxy_file *f);
208 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
209 union smb_write *io, struct proxy_file *f);
210 static NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status);
213 a handler for oplock break events from the server - these need to be passed
214 along to the client
216 static bool oplock_handler(struct smbcli_transport *transport, uint16_t tid, uint16_t fnum, uint8_t level, void *p_private)
218 struct proxy_private *private = p_private;
219 NTSTATUS status;
220 struct ntvfs_handle *h = NULL;
221 struct proxy_file *f;
223 for (f=private->files; f; f=f->next) {
224 if (f->fnum != fnum) continue;
225 h = f->h;
226 break;
229 if (!h) {
230 DEBUG(5,("vfs_proxy: ignoring oplock break level %d for fnum %d\n", level, fnum));
231 return true;
234 if (level==OPLOCK_BREAK_TO_LEVEL_II) {
235 f->oplock=LEVEL_II_OPLOCK_RETURN;
236 } else {
237 /* If we don't have an oplock, then we can't rely on the cache */
238 cache_handle_stale(f);
239 f->oplock=NO_OPLOCK_RETURN;
242 DEBUG(5,("vfs_proxy: sending oplock break level %d for fnum %d\n", level, fnum));
243 status = ntvfs_send_oplock_break(private->ntvfs, h, level);
244 if (!NT_STATUS_IS_OK(status)) return false;
245 return true;
249 get file handle from clients fnum, (from ntvfs/ipc/vfs_ipc.c at metze suggestion)
251 static struct ntvfs_handle *ntvfs_find_handle(struct ntvfs_module_context *ntvfs,
252 struct ntvfs_request *req,
253 uint16_t fnum)
255 DATA_BLOB key;
256 uint16_t _fnum;
259 * the fnum is already in host byteorder
260 * but ntvfs_handle_search_by_wire_key() expects
261 * network byteorder
263 SSVAL(&_fnum, 0, fnum);
264 key = data_blob_const(&_fnum, 2);
266 return ntvfs_handle_search_by_wire_key(ntvfs, req, &key);
270 connect to a share - used when a tree_connect operation comes in.
272 static NTSTATUS proxy_connect(struct ntvfs_module_context *ntvfs,
273 struct ntvfs_request *req, const char *sharename)
275 NTSTATUS status;
276 struct proxy_private *private;
277 const char *host, *user, *pass, *domain, *remote_share;
278 struct smb_composite_connect io;
279 struct composite_context *creq;
280 struct share_config *scfg = ntvfs->ctx->config;
281 int nttrans_fnum;
283 struct cli_credentials *credentials;
284 bool machine_account;
286 /* Here we need to determine which server to connect to.
287 * For now we use parametric options, type proxy.
288 * Later we will use security=server and auth_server.c.
290 host = share_string_option(scfg, PROXY_SERVER, NULL);
291 user = share_string_option(scfg, PROXY_USER, NULL);
292 pass = share_string_option(scfg, PROXY_PASSWORD, NULL);
293 domain = share_string_option(scfg, PROXY_DOMAIN, NULL);
294 remote_share = share_string_option(scfg, PROXY_SHARE, NULL);
295 if (!remote_share) {
296 remote_share = sharename;
299 machine_account = share_bool_option(scfg, PROXY_USE_MACHINE_ACCT, PROXY_USE_MACHINE_ACCT_DEFAULT);
301 private = talloc_zero(ntvfs, struct proxy_private);
302 if (!private) {
303 return NT_STATUS_NO_MEMORY;
306 ntvfs->private_data = private;
308 if (!host) {
309 DEBUG(1,("PROXY backend: You must supply server\n"));
310 return NT_STATUS_INVALID_PARAMETER;
313 if (user && pass) {
314 DEBUG(5, ("PROXY backend: Using specified password\n"));
315 credentials = cli_credentials_init(private);
316 if (!credentials) {
317 return NT_STATUS_NO_MEMORY;
319 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
320 cli_credentials_set_username(credentials, user, CRED_SPECIFIED);
321 if (domain) {
322 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
324 cli_credentials_set_password(credentials, pass, CRED_SPECIFIED);
325 } else if (machine_account) {
326 DEBUG(5, ("PROXY backend: Using machine account\n"));
327 credentials = cli_credentials_init(private);
328 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
329 if (domain) {
330 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
332 status = cli_credentials_set_machine_account(credentials, ntvfs->ctx->lp_ctx);
333 if (!NT_STATUS_IS_OK(status)) {
334 return status;
336 } else if (req->session_info->credentials) {
337 DEBUG(5, ("PROXY backend: Using delegated credentials\n"));
338 credentials = req->session_info->credentials;
339 } else {
340 DEBUG(1,("PROXY backend: NO delegated credentials found: You must supply server, user and password or the client must supply delegated credentials\n"));
341 return NT_STATUS_INVALID_PARAMETER;
344 /* connect to the server, using the smbd event context */
345 io.in.dest_host = host;
346 io.in.dest_ports = lp_smb_ports(ntvfs->ctx->lp_ctx);
347 io.in.socket_options = lp_socket_options(ntvfs->ctx->lp_ctx);
348 io.in.called_name = host;
349 io.in.credentials = credentials;
350 io.in.fallback_to_anonymous = false;
351 io.in.workgroup = lp_workgroup(ntvfs->ctx->lp_ctx);
352 io.in.service = remote_share;
353 io.in.service_type = "?????";
354 io.in.iconv_convenience = lp_iconv_convenience(ntvfs->ctx->lp_ctx);
355 io.in.gensec_settings = lp_gensec_settings(private, ntvfs->ctx->lp_ctx);
356 lp_smbcli_options(ntvfs->ctx->lp_ctx, &io.in.options);
357 lp_smbcli_session_options(ntvfs->ctx->lp_ctx, &io.in.session_options);
359 creq = smb_composite_connect_send(&io, private,
360 lp_resolve_context(ntvfs->ctx->lp_ctx),
361 ntvfs->ctx->event_ctx);
362 status = smb_composite_connect_recv(creq, private);
363 NT_STATUS_NOT_OK_RETURN(status);
365 private->tree = io.out.tree;
367 private->transport = private->tree->session->transport;
368 SETUP_PID;
369 private->ntvfs = ntvfs;
371 ntvfs->ctx->fs_type = talloc_strdup(ntvfs->ctx, io.out.tree->fs_type);
372 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->fs_type);
373 ntvfs->ctx->dev_type = talloc_strdup(ntvfs->ctx, io.out.tree->device);
374 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->dev_type);
376 /* we need to receive oplock break requests from the server */
377 smbcli_oplock_handler(private->transport, oplock_handler, private);
379 private->map_generic = share_bool_option(scfg, PROXY_MAP_GENERIC, PROXY_MAP_GENERIC_DEFAULT);
381 private->map_trans2 = share_bool_option(scfg, PROXY_MAP_TRANS2, PROXY_MAP_TRANS2_DEFAULT);
383 private->cache_validatesize = 1024 * (long long) share_int_option(scfg, PROXY_CACHE_VALIDATE_SIZE, PROXY_CACHE_VALIDATE_SIZE_DEFAULT);
385 if (strcmp("A:",private->tree->device)==0) {
386 private->cache_enabled = share_bool_option(scfg, PROXY_CACHE_ENABLED, PROXY_CACHE_ENABLED_DEFAULT);
387 private->cache_readahead = share_int_option(scfg, PROXY_CACHE_READAHEAD, PROXY_CACHE_READAHEAD_DEFAULT);
388 private->cache_readaheadblock = share_int_option(scfg, PROXY_CACHE_READAHEAD_BLOCK,
389 MIN(private->cache_readahead,PROXY_CACHE_READAHEAD_BLOCK_DEFAULT));
390 private->fake_oplock = share_bool_option(scfg, PROXY_FAKE_OPLOCK, PROXY_FAKE_OPLOCK_DEFAULT);
391 private->fake_valid = share_bool_option(scfg, PROXY_FAKE_VALID, PROXY_FAKE_VALID_DEFAULT);
392 private->readahead_spare = share_int_option(scfg, PROXY_REQUEST_LIMIT, PROXY_REQUEST_LIMIT_DEFAULT);
393 private->cache = new_cache_context(private, lp_proxy_cache_root(ntvfs->ctx->lp_ctx), host, remote_share);
394 DEBUG(0,("proxy tree connect caching for: %s (%s : %s) %s read-ahead: %d\n",
395 remote_share, private->tree->device,private->tree->fs_type,
396 (private->cache_enabled)?"enabled":"disabled",
397 private->cache_readahead));
398 } else {
399 private->cache_enabled = false;
400 DEBUG(0,("No caching or read-ahead for: %s (%s : %s)\n",
401 remote_share, private->tree->device,private->tree->fs_type));
404 private->remote_server = strlower_talloc(private, host);
405 private->remote_share = strlower_talloc(private, remote_share);
407 /* some proxy operations will not be performed on files, so open a handle
408 now that we can use for such things. We won't bother to close it on
409 shutdown, as the remote server ought to be able to close it for us
410 and we might be shutting down because the remote server went away and
411 so we don't want to delay further */
412 nttrans_fnum=smbcli_nt_create_full(private->tree, "\\",
413 NTCREATEX_FLAGS_OPEN_DIRECTORY,
414 SEC_FILE_READ_DATA,
415 FILE_ATTRIBUTE_NORMAL,
416 NTCREATEX_SHARE_ACCESS_MASK,
417 NTCREATEX_DISP_OPEN,
418 NTCREATEX_OPTIONS_DIRECTORY,
419 NTCREATEX_IMPERSONATION_IMPERSONATION);
420 if (nttrans_fnum < 0) {
421 DEBUG(5,("Could not open handle for ntioctl %d\n",private->nttrans_fnum));
422 return NT_STATUS_UNSUCCESSFUL;
424 private->nttrans_fnum=nttrans_fnum;
425 DEBUG(5,("Got nttrans handle %d\n",private->nttrans_fnum));
427 return NT_STATUS_OK;
431 disconnect from a share
433 static NTSTATUS proxy_disconnect(struct ntvfs_module_context *ntvfs)
435 struct proxy_private *private = ntvfs->private_data;
436 struct async_info *a, *an;
438 /* first cleanup pending requests */
439 for (a=private->pending; a; a = an) {
440 an = a->next;
441 smbcli_request_destroy(a->c_req);
442 talloc_free(a);
445 talloc_free(private);
446 ntvfs->private_data = NULL;
448 return NT_STATUS_OK;
452 destroy an async info structure
454 static int async_info_destructor(struct async_info *async)
456 DLIST_REMOVE(async->proxy->pending, async);
457 return 0;
461 a handler for simple async replies
462 this handler can only be used for functions that don't return any
463 parameters (those that just return a status code)
465 static void async_simple(struct smbcli_request *c_req)
467 struct async_info *async = c_req->async.private;
468 struct ntvfs_request *req = async->req;
469 req->async_states->status = smbcli_request_simple_recv(c_req);
470 talloc_free(async);
471 req->async_states->send_fn(req);
474 /* hopefully this will optimize away */
475 #define TYPE_CHECK(type,check) do { \
476 type=check; \
477 t=t; \
478 } while (0)
480 /* save some typing for the simple functions */
481 #define ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, file, achain, error) do { \
482 if (!c_req) return (error); \
483 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
485 struct async_info *async; \
486 async = talloc(req, struct async_info); \
487 if (!async) return (error); \
488 async->parms = io; \
489 async->req = req; \
490 async->f = file; \
491 async->proxy = private; \
492 async->c_req = c_req; \
493 async->chain = achain; \
494 DLIST_ADD(private->pending, async); \
495 c_req->async.private = async; \
496 talloc_set_destructor(async, async_info_destructor); \
498 c_req->async.fn = async_fn; \
499 } while (0)
501 #define ASYNC_RECV_TAIL_F(io, async_fn, file) do { \
502 if (!c_req) return NT_STATUS_UNSUCCESSFUL; \
503 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
505 struct async_info *async; \
506 async = talloc(req, struct async_info); \
507 if (!async) return NT_STATUS_NO_MEMORY; \
508 async->parms = io; \
509 async->req = req; \
510 async->f = file; \
511 async->proxy = private; \
512 async->c_req = c_req; \
513 DLIST_ADD(private->pending, async); \
514 c_req->async.private = async; \
515 talloc_set_destructor(async, async_info_destructor); \
517 c_req->async.fn = async_fn; \
518 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
519 return NT_STATUS_OK; \
520 } while (0)
522 #define ASYNC_RECV_TAIL(io, async_fn) ASYNC_RECV_TAIL_F(io, async_fn, NULL)
524 #define SIMPLE_ASYNC_TAIL ASYNC_RECV_TAIL(NULL, async_simple)
526 /* managers for chained async-callback.
527 The model of async handlers has changed.
528 backend async functions should be of the form:
529 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
530 And if async->c_req is NULL then an earlier chain has already rec'd the
531 request.
532 ADD_ASYNC_RECV_TAIL is used to add chained handlers.
533 The chained handler manager async_chain_handler is installed the usual way
534 and uses the io pointer to point to the first async_map record
535 static void async_chain_handler(struct smbcli_request *c_req).
536 It is safe to call ADD_ASYNC_RECV_TAIL before the chain manager is installed
537 and often desirable.
539 /* async_chain_handler has an async_info struct so that it can be safely inserted
540 into pending, but the io struct will point to (struct async_info_map *)
541 chained async_info_map will be in c_req->async.private */
542 #define ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_fn) do { \
543 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
544 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
545 } while(0)
547 #define ASYNC_RECV_TAIL_HANDLER(io, async_fn) do { \
548 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
549 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
550 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
551 return NT_STATUS_OK; \
552 } while(0)
555 DEBUG(0,("ADD_ASYNC_RECV_TAIL %s %s:%d\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%s\n", __FUNCTION__,__FILE__,__LINE__, \
556 creq, creq?talloc_get_name(creq):NULL, creq?talloc_get_name(creq):NULL,\
557 io1, io1?talloc_get_name(io1):NULL, io1?talloc_get_name(io1):NULL, \
558 io2, io2?talloc_get_name(io2):NULL, io2?talloc_get_name(io2):NULL, \
559 file, file?"file":"null", file?"file":"null", #async_fn)); \
561 #define ADD_ASYNC_RECV_TAIL(creq, io1, io2, file, async_fn, error) do { \
562 if (! creq) return (error); \
564 struct async_info_map *async_map=talloc(NULL, struct async_info_map); \
565 if (! async_map) return (error); \
566 async_map->async=talloc(async_map, struct async_info); \
567 if (! async_map->async) return (error); \
568 async_map->parms1=io1; \
569 async_map->parms2=io2; \
570 async_map->fn=async_fn; \
571 async_map->async->parms = io1; \
572 async_map->async->req = req; \
573 async_map->async->f = file; \
574 async_map->async->proxy = private; \
575 async_map->async->c_req = creq; \
576 /* If async_chain_handler is installed, get the list from param */ \
577 if (creq->async.fn == async_chain_handler || creq->async.fn == async_read_handler) { \
578 struct async_info *i=creq->async.private; \
579 DLIST_ADD_END(i->chain, async_map, struct async_info_map *); \
580 } else if (creq->async.fn) { \
581 /* incompatible handler installed */ \
582 return (error); \
583 } else { \
584 DLIST_ADD_END(creq->async.private, async_map, struct async_info_map *); \
587 } while(0)
589 /* try and unify cache open function interface with this macro */
590 #define cache_open(cache_context, f, io, oplock, readahead_window) \
591 (io->generic.level == RAW_OPEN_NTCREATEX && \
592 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID)\
593 ?(cache_fileid_open(cache_context, f, (const uint64_t*)(io->generic.in.fname), oplock, readahead_window))\
594 :(cache_filename_open(cache_context, f, SMB_OPEN_IN_FILE(io), oplock, readahead_window))
597 delete a file - the dirtype specifies the file types to include in the search.
598 The name can contain PROXY wildcards, but rarely does (except with OS/2 clients)
600 static NTSTATUS proxy_unlink(struct ntvfs_module_context *ntvfs,
601 struct ntvfs_request *req, union smb_unlink *unl)
603 struct proxy_private *private = ntvfs->private_data;
604 struct smbcli_request *c_req;
606 SETUP_PID;
608 /* see if the front end will allow us to perform this
609 function asynchronously. */
610 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
611 return smb_raw_unlink(private->tree, unl);
614 c_req = smb_raw_unlink_send(private->tree, unl);
616 SIMPLE_ASYNC_TAIL;
620 a handler for async ioctl replies
622 static void async_ioctl(struct smbcli_request *c_req)
624 struct async_info *async = c_req->async.private;
625 struct ntvfs_request *req = async->req;
626 req->async_states->status = smb_raw_ioctl_recv(c_req, req, async->parms);
627 talloc_free(async);
628 req->async_states->send_fn(req);
632 ioctl interface
634 static NTSTATUS proxy_ioctl(struct ntvfs_module_context *ntvfs,
635 struct ntvfs_request *req, union smb_ioctl *io)
637 struct proxy_private *private = ntvfs->private_data;
638 struct smbcli_request *c_req;
640 if (io->ntioctl.level == RAW_IOCTL_NTIOCTL
641 && io->ntioctl.in.function == FSCTL_UFOPROXY_RPCLITE) {
642 return proxy_rpclite(ntvfs, req, io);
645 SETUP_PID_AND_FILE;
647 /* see if the front end will allow us to perform this
648 function asynchronously. */
649 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
650 return smb_raw_ioctl(private->tree, req, io);
653 c_req = smb_raw_ioctl_send(private->tree, io);
655 ASYNC_RECV_TAIL(io, async_ioctl);
659 check if a directory exists
661 static NTSTATUS proxy_chkpath(struct ntvfs_module_context *ntvfs,
662 struct ntvfs_request *req, union smb_chkpath *cp)
664 struct proxy_private *private = ntvfs->private_data;
665 struct smbcli_request *c_req;
667 SETUP_PID;
669 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
670 return smb_raw_chkpath(private->tree, cp);
673 c_req = smb_raw_chkpath_send(private->tree, cp);
675 SIMPLE_ASYNC_TAIL;
679 a handler for async qpathinfo replies
681 static void async_qpathinfo(struct smbcli_request *c_req)
683 struct async_info *async = c_req->async.private;
684 struct ntvfs_request *req = async->req;
685 req->async_states->status = smb_raw_pathinfo_recv(c_req, req, async->parms);
686 talloc_free(async);
687 req->async_states->send_fn(req);
691 return info on a pathname
693 static NTSTATUS proxy_qpathinfo(struct ntvfs_module_context *ntvfs,
694 struct ntvfs_request *req, union smb_fileinfo *info)
696 struct proxy_private *private = ntvfs->private_data;
697 struct smbcli_request *c_req;
699 SETUP_PID;
701 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
702 return smb_raw_pathinfo(private->tree, req, info);
705 c_req = smb_raw_pathinfo_send(private->tree, info);
707 ASYNC_RECV_TAIL(info, async_qpathinfo);
711 a handler for async qfileinfo replies
713 static void async_qfileinfo(struct smbcli_request *c_req)
715 struct async_info *async = c_req->async.private;
716 struct ntvfs_request *req = async->req;
717 req->async_states->status = smb_raw_fileinfo_recv(c_req, req, async->parms);
718 talloc_free(async);
719 req->async_states->send_fn(req);
723 query info on a open file
725 static NTSTATUS proxy_qfileinfo(struct ntvfs_module_context *ntvfs,
726 struct ntvfs_request *req, union smb_fileinfo *io)
728 struct proxy_private *private = ntvfs->private_data;
729 struct smbcli_request *c_req;
731 SETUP_PID_AND_FILE;
733 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
734 return smb_raw_fileinfo(private->tree, req, io);
737 c_req = smb_raw_fileinfo_send(private->tree, io);
739 ASYNC_RECV_TAIL(io, async_qfileinfo);
743 set info on a pathname
745 static NTSTATUS proxy_setpathinfo(struct ntvfs_module_context *ntvfs,
746 struct ntvfs_request *req, union smb_setfileinfo *st)
748 struct proxy_private *private = ntvfs->private_data;
749 struct smbcli_request *c_req;
751 SETUP_PID;
753 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
754 return smb_raw_setpathinfo(private->tree, st);
757 c_req = smb_raw_setpathinfo_send(private->tree, st);
759 SIMPLE_ASYNC_TAIL;
764 a handler for async open replies
766 static void async_open(struct smbcli_request *c_req)
768 struct async_info *async = c_req->async.private;
769 struct proxy_private *proxy = async->proxy;
770 struct ntvfs_request *req = async->req;
771 struct proxy_file *f = async->f;
772 union smb_open *io = async->parms;
773 union smb_handle *file;
775 talloc_free(async);
776 req->async_states->status = smb_raw_open_recv(c_req, req, io);
777 SMB_OPEN_OUT_FILE(io, file);
778 f->fnum = file->fnum;
779 file->ntvfs = NULL;
780 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
781 req->async_states->status = ntvfs_handle_set_backend_data(f->h, proxy->ntvfs, f);
782 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
783 file->ntvfs = f->h;
784 DLIST_ADD(proxy->files, f);
786 f->oplock=io->generic.out.oplock_level;
788 if (proxy->cache_enabled) {
789 f->cache=cache_open(proxy->cache, f, io, f->oplock, proxy->cache_readahead);
790 if (proxy->fake_valid) {
791 cache_handle_validated(f, cache_handle_len(f));
793 if (! PROXY_REMOTE_SERVER(proxy)) cache_handle_novalidate(f);
796 failed:
797 req->async_states->send_fn(req);
801 open a file
803 static NTSTATUS proxy_open(struct ntvfs_module_context *ntvfs,
804 struct ntvfs_request *req, union smb_open *io)
806 struct proxy_private *private = ntvfs->private_data;
807 struct smbcli_request *c_req;
808 struct ntvfs_handle *h;
809 struct proxy_file *f;
810 NTSTATUS status;
812 SETUP_PID;
814 if (io->generic.level != RAW_OPEN_GENERIC &&
815 private->map_generic) {
816 return ntvfs_map_open(ntvfs, req, io);
819 status = ntvfs_handle_new(ntvfs, req, &h);
820 #warning should we free this handle if the open fails?
821 NT_STATUS_NOT_OK_RETURN(status);
823 f = talloc_zero(h, struct proxy_file);
824 NT_STATUS_HAVE_NO_MEMORY(f);
825 f->h = h;
827 /* if oplocks aren't requested, optionally override and request them */
828 if (! (io->generic.in.flags & (OPENX_FLAGS_REQUEST_OPLOCK | OPENX_FLAGS_REQUEST_BATCH_OPLOCK))
829 && private->fake_oplock) {
830 io->generic.in.flags |= OPENX_FLAGS_REQUEST_OPLOCK | OPENX_FLAGS_REQUEST_BATCH_OPLOCK;
833 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
834 union smb_handle *file;
836 status = smb_raw_open(private->tree, req, io);
837 NT_STATUS_NOT_OK_RETURN(status);
839 SMB_OPEN_OUT_FILE(io, file);
840 f->fnum = file->fnum;
841 file->ntvfs = NULL;
842 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
843 NT_STATUS_NOT_OK_RETURN(status);
844 file->ntvfs = f->h;
845 DLIST_ADD(private->files, f);
847 f->oplock=io->generic.out.oplock_level;
849 if (private->cache_enabled) {
850 f->cache=cache_open(private->cache, f, io, f->oplock, private->cache_readahead);
851 if (private->fake_valid) {
852 cache_handle_validated(f, cache_handle_len(f));
854 if (! PROXY_REMOTE_SERVER(private)) cache_handle_novalidate(f);
857 return NT_STATUS_OK;
860 c_req = smb_raw_open_send(private->tree, io);
862 ASYNC_RECV_TAIL_F(io, async_open, f);
866 create a directory
868 static NTSTATUS proxy_mkdir(struct ntvfs_module_context *ntvfs,
869 struct ntvfs_request *req, union smb_mkdir *md)
871 struct proxy_private *private = ntvfs->private_data;
872 struct smbcli_request *c_req;
874 SETUP_PID;
876 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
877 return smb_raw_mkdir(private->tree, md);
880 c_req = smb_raw_mkdir_send(private->tree, md);
882 SIMPLE_ASYNC_TAIL;
886 remove a directory
888 static NTSTATUS proxy_rmdir(struct ntvfs_module_context *ntvfs,
889 struct ntvfs_request *req, struct smb_rmdir *rd)
891 struct proxy_private *private = ntvfs->private_data;
892 struct smbcli_request *c_req;
894 SETUP_PID;
896 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
897 return smb_raw_rmdir(private->tree, rd);
899 c_req = smb_raw_rmdir_send(private->tree, rd);
901 SIMPLE_ASYNC_TAIL;
905 rename a set of files
907 static NTSTATUS proxy_rename(struct ntvfs_module_context *ntvfs,
908 struct ntvfs_request *req, union smb_rename *ren)
910 struct proxy_private *private = ntvfs->private_data;
911 struct smbcli_request *c_req;
913 SETUP_PID;
915 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
916 return smb_raw_rename(private->tree, ren);
919 c_req = smb_raw_rename_send(private->tree, ren);
921 SIMPLE_ASYNC_TAIL;
925 copy a set of files
927 static NTSTATUS proxy_copy(struct ntvfs_module_context *ntvfs,
928 struct ntvfs_request *req, struct smb_copy *cp)
930 return NT_STATUS_NOT_SUPPORTED;
933 /* we only define this seperately so we can easily spot read calls in
934 pending based on ( c_req->private.fn == async_read_handler ) */
935 static void async_read_handler(struct smbcli_request *c_req)
937 async_chain_handler(c_req);
940 NTSTATUS async_readahead_dec(struct async_info *async, void* io1, void* io2, NTSTATUS status)
942 struct proxy_private *private = async->proxy;
943 struct smbcli_request *c_req = async->c_req;
944 struct proxy_file *f = async->f;
945 union smb_read *io = async->parms;
947 /* if request is not already received by a chained handler, read it */
948 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
950 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
951 f->readahead_pending, private->readahead_spare));
953 f->readahead_pending--;
954 private->readahead_spare++;
956 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
957 f->readahead_pending, private->readahead_spare));
959 return status;
963 a handler for async read replies - speculative read-aheads.
964 It merely saves in the cache. The async chain handler will call send_fn if
965 there is one, or if sync_chain_handler is used the send_fn is called by
966 the ntvfs back end.
968 NTSTATUS async_read_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
970 struct smbcli_request *c_req = async->c_req;
971 struct proxy_file *f = async->f;
972 union smb_read *io = async->parms;
974 /* if request is not already received by a chained handler, read it */
975 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
977 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
978 get_friendly_nt_error_msg(status)));
980 NT_STATUS_NOT_OK_RETURN(status);
982 /* if it was a validate read we don't to save anything unless it failed.
983 Until we use Proxy_read structs we can't tell, so guess */
984 if (io->generic.out.nread == io->generic.in.maxcnt &&
985 io->generic.in.mincnt < io->generic.in.maxcnt) {
986 /* looks like a validate read, just move the validate pointer, the
987 original read-request has already been satisfied from cache */
988 DEBUG(3,("%s megavalidate suceeded, validate to %lld\n",__FUNCTION__,
989 io->generic.in.offset + io->generic.out.nread));
990 cache_handle_validated(f, io->generic.in.offset + io->generic.out.nread);
991 } else {
992 DEBUG(5,("Not a mega-validate, save %d in cache\n",io->generic.out.nread));
993 cache_handle_save(f, io->generic.out.data,
994 io->generic.out.nread,
995 io->generic.in.offset);
998 DEBUG(3,("%s finished %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
999 return status;
1002 /* handler for fragmented reads */
1003 NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1005 struct smbcli_request *c_req = async->c_req;
1006 struct ntvfs_request *req = async->req;
1007 struct proxy_file *f = async->f;
1008 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
1009 /* this is the io against which the fragment is to be applied */
1010 union smb_read *io = talloc_get_type_abort(io1, union smb_read);
1011 /* this is the io for the read that issued the callback */
1012 union smb_read *io_frag = fragment->io_frag; /* async->parms; */
1013 struct async_read_fragments* fragments=fragment->fragments;
1015 /* if request is not already received by a chained handler, read it */
1016 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
1017 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
1019 DEBUG(3,("\n\n%s async_read status: %s\n",__FUNCTION__,
1020 get_friendly_nt_error_msg(status)));
1022 fragment->status = status;
1024 /* remove fragment from fragments */
1025 DLIST_REMOVE(fragments->fragments, fragment);
1027 #warning maybe read requests beyond the short read won't return NT_STATUS_OK with nread=0
1028 /* in which case if we will want to collate all responses and return a valid read
1029 for the leading NT_STATUS_OK fragments */
1031 /* did this one fail, inducing a general fragments failure? */
1032 if (!NT_STATUS_IS_OK(fragment->status)) {
1033 /* preserve the status of the fragment with the smallest offset
1034 when we can work out how */
1035 if (NT_STATUS_IS_OK(fragments->status)) {
1036 fragments->status=fragment->status;
1039 cache_handle_novalidate(f);
1040 DEBUG(5,("** Devalidated proxy due to read failure\n"));
1041 } else {
1042 /* No fragments have yet failed, keep collecting responses */
1043 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
1044 /* Find memcpy window, copy data from the io_frag to the io */
1045 off_t start_offset=MAX(io_frag->generic.in.offset, io->generic.in.offset);
1046 /* used to use mincnt */
1047 off_t io_extent=io->generic.in.offset + io->generic.in.maxcnt;
1048 off_t end_offset=MIN(io_extent, extent);
1049 /* ASSERT(start_offset <= end_offset) */
1050 /* ASSERT(start_offset <= io_extent) */
1051 if (start_offset >= io_extent) {
1052 DEBUG(3,("useless read-ahead tagged on to: %s",__location__));
1053 } else {
1054 uint8_t* dst=io->generic.out.data+(start_offset - io->generic.in.offset);
1055 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
1056 /* src == dst in cases where we did not latch onto someone elses
1057 read, but are handling our own */
1058 if (src != dst)
1059 memcpy(dst, src, end_offset - start_offset);
1062 /* There should be a better way to detect, but it needs the proxy rpc struct
1063 not ths smb_read struct */
1064 if (io_frag->generic.out.nread < io_frag->generic.in.maxcnt) {
1065 DEBUG(5,("\n** Devalidated proxy due to small read: %lld min=%lld, max=%lld\n",
1066 (long long) io_frag->generic.out.nread,
1067 (long long) io_frag->generic.in.mincnt,
1068 (long long) io_frag->generic.in.maxcnt));
1069 cache_handle_novalidate(f);
1072 /* We broke up the original read. If not enough of this sub-read has
1073 been read, and then some of then next block, it could leave holes!
1074 We will only acknowledge up to the first partial read, and treat
1075 it as a small read. If server can return NT_STATUS_OK for a partial
1076 read so can we, so we preserve the response.
1077 "enough" is all of it (maxcnt), except on the last block, when it has to
1078 be enough to fill io->generic.in.mincnt. We know it is the last block
1079 if nread is small but we could fill io->generic.in.mincnt */
1080 if (io_frag->generic.out.nread < io_frag->generic.in.mincnt &&
1081 end_offset < io->generic.in.offset + io->generic.in.mincnt) {
1082 DEBUG(4,("Fragmented read only partially successful\n"));
1084 /* Shrink the master nread (or grow to this size if we are first partial */
1085 if (! fragments->partial ||
1086 (io->generic.in.offset + io->generic.out.nread) > extent) {
1087 io->generic.out.nread = extent - io->generic.in.offset;
1090 /* stop any further successes from extending the partial read */
1091 fragments->partial=true;
1092 } else {
1093 /* only grow the master nwritten if we haven't logged a partial write */
1094 if (! fragments->partial &&
1095 (io->generic.in.offset + io->generic.out.nread) < extent ) {
1096 io->generic.out.nread = MIN(io->generic.in.maxcnt, extent - io->generic.in.offset);
1101 /* Was it the last fragment, or do we know enought to send a response? */
1102 if (! fragments->fragments) {
1103 DEBUG(5,("Async read re-fragmented with %d of %d %s\n",
1104 io->generic.out.nread, io->generic.in.mincnt,
1105 get_friendly_nt_error_msg(fragments->status)));
1106 if (fragments->async) {
1107 req->async_states->status=fragments->status;
1108 DEBUG(5,("Fragments async response sending\n"));
1109 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
1110 /* esp. as they may be attached to by other reads. Maybe attachees should be taking reference, but how will they
1111 know the top level they need to take reference too.. */
1112 #warning should really queue a sender here, not call it */
1113 req->async_states->send_fn(req);
1114 DEBUG(5,("Async response sent\n"));
1115 } else {
1116 DEBUG(5,("Fragments SYNC return\n"));
1120 /* because a c_req may be shared by many req, chained handlers must return
1121 a status pertaining to the general validity of this specific c_req, not
1122 to their own private processing of the c_req for the benefit of their req
1123 which is returned in fragments->status
1125 return status;
1128 /* Issue read-ahead X bytes where X is the window size calculation based on
1129 server_latency * server_session_bandwidth
1130 where latency is the idle (link) latency and bandwidth is less than or equal_to
1131 to actual bandwidth available to the server.
1132 Read-ahead should honour locked areas in whatever way is neccessary (who knows?)
1133 read_ahead is defined here and not in the cache engine because it requires too
1134 much knowledge of private structures
1136 /* The concept is buggy unless we can tell the next proxy that these are
1137 read-aheads, otherwise chained proxy setups will each read-ahead of the
1138 read-ahead which can put a larger load on the final server.
1139 Also we probably need to distinguish between
1140 * cache-less read-ahead
1141 * cache-revalidating read-ahead
1143 NTSTATUS read_ahead(struct proxy_file *f, struct ntvfs_module_context *ntvfs,
1144 union smb_read *io, ssize_t as_read)
1146 struct proxy_private *private = ntvfs->private_data;
1147 struct smbcli_tree *tree = private->tree;
1148 struct cache_file_entry *cache;
1149 off_t next_position; /* this read offset+length+window */
1150 off_t end_position; /* position we read-ahead to */
1151 off_t cache_populated;
1152 off_t read_position, new_extent;
1154 if (! PROXY_REMOTE_SERVER(private)) return NT_STATUS_UNSUCCESSFUL;
1155 DEBUG(5,("A\n"));
1156 if (private->cache_readahead==0 || ! private->cache_enabled || ! f->cache) return NT_STATUS_UNSUCCESSFUL;
1157 DEBUG(5,("B\n"));
1158 cache=talloc_get_type_abort(f->cache, struct cache_file_entry);
1159 DEBUG(5,("C\n"));
1160 /* don't read-ahead if we are in bulk validate mode */
1161 if (cache->status & CACHE_VALIDATE) return NT_STATUS_UNSUCCESSFUL;
1162 DEBUG(5,("D\n"));
1163 /* if we can't trust what we read-ahead anyway then don't bother although
1164 * if delta-reads are enabled we can do so in order to get something to
1165 * delta against */
1166 DEBUG(CACHE_DEBUG_LEVEL,("DOING Asking read-aheads: len %lld ra-extend %lld as-read %lld RA %d (%d)\n",
1167 (long long int)(cache_len(cache)),
1168 (long long int)(cache->readahead_extent),
1169 (long long int)(as_read),
1170 cache->readahead_window,private->cache_readahead));
1171 if (private->cache_readahead ==0 || ! (cache->status & CACHE_READ_AHEAD) ) {
1172 DEBUG(CACHE_DEBUG_LEVEL,("FAILED Asking read-aheads: Can't read-ahead as no read-ahead on this file: %x\n",
1173 cache->status));
1174 return NT_STATUS_UNSUCCESSFUL;
1177 /* as_read is the mincnt bytes of a request being made or the
1178 out.nread of completed sync requests
1179 Here we presume that as_read bytes WILL be read. If there is a cache-ahead like ours,
1180 then this may often NOT be the case if readahead_window < requestsize; so we will
1181 get a small read, leaving a hole in the cache, and as we don't yet handle sparse caches,
1182 all future read-ahead will be wasted, so we need to adjust the read-ahead handler to handle
1183 this and have failed sparse writes adjust the cache->readahead_extent back to actual size */
1185 /* predict the file pointers next position */
1186 next_position=io->generic.in.offset + as_read;
1187 DEBUG(5,("Next position: %lld (%lld + %lld)\n",
1188 (long long int)next_position,
1189 (long long int)io->generic.in.offset,
1190 (long long int)as_read));
1191 /* calculate the limit of the validated or requested cache */
1192 cache_populated=MAX(cache->validated_extent, cache->readahead_extent);
1194 /* will the new read take us beyond the current extent without gaps? */
1195 if (cache_populated < io->generic.in.offset) {
1196 /* this read-ahead is a read-behind-pointer */
1197 new_extent=cache_populated;
1198 } else {
1199 new_extent=MAX(next_position, cache_populated);
1202 /* as far as we can tell new_extent is the smallest offset that doesn't
1203 have a pending read request on. Of course if we got a short read then
1204 we will have a cache-gap which we can't handle and need to read from
1205 a shrunk readahead_extent, which we don't currently handle */
1206 read_position=new_extent;
1208 /* of course if we know how big the remote file is we should limit at that */
1209 /* we should also mark-out which read-ahead requests are pending so that we
1210 * don't repeat them while they are in-transit. */
1211 /* we can't really use next_position until we can have caches with holes
1212 UNLESS next_position < new_extent, because a next_position well before
1213 new_extent is no reason to extend it further, we only want to extended
1214 with read-aheads if we have cause to suppose the read-ahead data will
1215 be wanted, i.e. the next_position is near new_extent.
1216 So we can't justify reading beyond window+next_position, but if
1217 next_position is leaving gaps, we use new_extent instead */
1218 end_position=MIN(new_extent, next_position) + cache->readahead_window;
1219 DEBUG(5,("** Read-ahead loop %lld < %lld window=%d, end=%lld, quota: %d\n",
1220 (long long int)read_position,
1221 (long long int)(next_position + cache->readahead_window),
1222 cache->readahead_window,
1223 (long long int)end_position,
1224 private->readahead_spare));
1225 /* do we even need to read? */
1226 if (! (read_position < end_position)) return NT_STATUS_OK;
1228 /* readahead_spare is for the whole session (mid/tid?) and may need sharing
1229 out over files and other tree-connects or something */
1230 while (read_position < end_position &&
1231 private->readahead_spare > 0) {
1232 struct smbcli_request *c_req = NULL;
1233 ssize_t read_remaining = end_position - read_position;
1234 ssize_t read_block = MIN(private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32),
1235 MIN(read_remaining, private->cache_readaheadblock));
1236 void *req = NULL; /* for the ASYNC_REC_TAIL_F_ORPHAN macro */
1237 uint8_t* data;
1238 union smb_read *io_copy=talloc_memdup_type(NULL, io, union smb_read);
1240 if (! io_copy)
1241 return NT_STATUS_NO_MEMORY;
1243 #warning we are ignoring read_for_execute as far as the cache goes
1244 io_copy->generic.in.read_for_execute=io->readx.in.read_for_execute;
1245 io_copy->generic.in.offset=read_position;
1246 io_copy->generic.in.mincnt=read_block;
1247 io_copy->generic.in.maxcnt=read_block;
1248 /* what is generic.in.remaining for? */
1249 io_copy->generic.in.remaining = MIN(65535,read_remaining);
1250 io_copy->generic.out.nread=0;
1252 #warning someone must own io_copy, tree, maybe?
1253 data=talloc_zero_size(io_copy, io_copy->generic.in.maxcnt);
1254 DEBUG(5,("Talloc read-ahead buffer %p size %d\n",data, io_copy->generic.in.maxcnt));
1255 if (! data) {
1256 talloc_free(io_copy);
1257 return NT_STATUS_NO_MEMORY;
1259 io_copy->generic.out.data=data;
1261 /* are we able to pull anything from the cache to validate this read-ahead?
1262 NOTE: there is no point in reading ahead merely to re-validate the
1263 cache if we don't have oplocks and can't save it....
1264 ... or maybe there is if we think a read will come that can be matched
1265 up to this reponse while it is still on the wire */
1266 #warning so we need to distinguish between pipe-line read-ahead and revalidation
1267 if (/*(cache->status & CACHE_READ)!=0 && */
1268 cache_len(cache) >
1269 (io_copy->generic.in.offset + io_copy->generic.in.mincnt) &&
1270 cache->validated_extent <
1271 (io_copy->generic.in.offset + io_copy->generic.in.maxcnt)) {
1272 ssize_t pre_fill;
1274 pre_fill = cache_raw_read(cache, data,
1275 io_copy->generic.in.offset,
1276 io_copy->generic.in.maxcnt);
1277 DEBUG(5,("Data read into %p %d\n",data, pre_fill));
1278 if (pre_fill > 0 && pre_fill >= io_copy->generic.in.mincnt) {
1279 io_copy->generic.out.nread=pre_fill;
1280 read_block=pre_fill;
1284 c_req = proxy_smb_raw_read_send(ntvfs, io_copy, f, NULL);
1286 if (c_req) {
1287 private->readahead_spare--;
1288 f->readahead_pending++;
1289 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead level %d request %p offset=%d size=%d\n",io_copy->generic.level,c_req,(int)read_position,(int)read_block));
1290 if (cache->readahead_extent < read_position+read_block)
1291 cache->readahead_extent=read_position+read_block;
1292 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
1293 /* so we can decrease read-ahead counter for this session */
1294 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_readahead_dec, NT_STATUS_INTERNAL_ERROR);
1295 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io_copy, async_read_handler);
1297 /* Make these be owned by the async struct so they are freed when the callback ends or is cancelled */
1298 talloc_steal(c_req->async.private, c_req);
1299 talloc_steal(c_req->async.private, io_copy);
1300 read_position+=read_block;
1301 } else {
1302 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead request FAILED offset=%d size=%d\n",(int)read_position,(int)read_block));
1303 talloc_free(io_copy);
1304 break;
1308 DEBUG(CACHE_DEBUG_LEVEL,("DONE: Asking read-aheads\n"));
1309 return NT_STATUS_OK;
1312 struct proxy_validate_parts_parts {
1313 struct proxy_Read* r;
1314 struct ntvfs_request *req;
1315 struct proxy_file *f;
1316 struct async_read_fragments *fragments;
1317 off_t offset;
1318 ssize_t remaining;
1319 bool complete;
1320 declare_checksum(digest);
1321 struct MD5Context context;
1324 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts);
1325 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status);
1326 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
1327 struct proxy_validate_parts_parts *parts);
1329 /* this will be the new struct proxy_Read based read function, for now
1330 it just deals with non-cached based validate to a regular server */
1331 static NTSTATUS proxy_validate(struct ntvfs_module_context *ntvfs,
1332 struct ntvfs_request *req,
1333 struct proxy_Read *r,
1334 struct proxy_file *f)
1336 struct proxy_private *private = ntvfs->private_data;
1337 struct proxy_validate_parts_parts *parts;
1338 struct async_read_fragments *fragments;
1339 NTSTATUS status;
1341 if (!f) return NT_STATUS_INVALID_HANDLE;
1343 DEBUG(5,("%s: fnum=%d\n",__FUNCTION__,f->fnum));
1345 parts = talloc_zero(req, struct proxy_validate_parts_parts);
1346 DEBUG(5,("%s: parts=%p\n",__FUNCTION__,parts));
1347 NT_STATUS_HAVE_NO_MEMORY(parts);
1349 fragments = talloc_zero(parts, struct async_read_fragments);
1350 NT_STATUS_HAVE_NO_MEMORY(fragments);
1352 parts->fragments=fragments;
1354 parts->r=r;
1355 parts->f=f;
1356 parts->req=req;
1357 /* processed offset */
1358 parts->offset=r->in.offset;
1359 parts->remaining=r->in.maxcnt;
1360 fragments->async=true;
1362 MD5Init (&parts->context);
1364 /* start a read-loop which will continue in the callback until it is
1365 all done */
1366 status=proxy_validate_parts(ntvfs, parts);
1367 if (parts->complete) {
1368 /* Make sure we are not async */
1369 DEBUG(5,("%s: completed EARLY\n",__FUNCTION__));
1370 return proxy_validate_complete(parts);
1373 /* Assert if status!=NT_STATUS_OK then parts->complete==true */
1374 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1375 DEBUG(5,("%s: returning ASYNC\n",__FUNCTION__));
1376 return status;
1379 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts)
1381 NTSTATUS status;
1382 struct proxy_Read* r=parts->r;
1383 MD5Final(parts->digest, &parts->context);
1385 status = parts->fragments->status;
1386 r->out.result = status;
1387 r->out.response.generic.count=r->out.nread;
1389 DEBUG(5,("%s: %s nread=%d\n",__FUNCTION__, get_friendly_nt_error_msg (status),
1390 r->out.response.generic.count));
1392 DEBUG(5,("Anticipated validated digest for size: %lld\n", (long long) r->in.maxcnt));
1393 dump_data (5, r->in.digest.digest, sizeof(parts->digest));
1394 DEBUG(5,("read digest for size %lld\n",(long long) parts->offset));
1395 dump_data (5, parts->digest, sizeof(parts->digest));
1397 if (NT_STATUS_IS_OK(status) &&
1398 (memcmp(parts->digest, r->in.digest.digest, sizeof(parts->digest))==0)) {
1399 r->out.flags = PROXY_USE_CACHE | PROXY_VALIDATE;
1400 DEBUG(5,("======= VALIDATED FINE \n\n\n"));
1401 } else if (r->in.flags & PROXY_USE_ZLIB) {
1402 ssize_t size = r->out.response.generic.count;
1403 DEBUG(5,("======= VALIDATED WRONG \n\n\n"));
1404 if (compress_block(r->out.response.generic.data, &size) ) {
1405 r->out.flags|=PROXY_USE_ZLIB;
1406 r->out.response.compress.count=size;
1407 r->out.response.compress.data=r->out.response.generic.data;
1408 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
1409 __FUNCTION__,r->out.nread,size,size*100/r->out.nread));
1413 /* assert: this must only be true if we are in a callback */
1414 if (parts->req->async_states->state & NTVFS_ASYNC_STATE_ASYNC) {
1415 /* we are async complete, we need to call the sendfn */
1416 parts->req->async_states->status=status;
1417 DEBUG(5,("Fragments async response sending\n"));
1419 parts->req->async_states->send_fn(parts->req);
1420 return NT_STATUS_OK;
1422 return status;
1425 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1427 struct smbcli_request *c_req = async->c_req;
1428 struct ntvfs_request *req = async->req;
1429 struct proxy_file *f = async->f;
1430 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
1431 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
1432 /* this is the io against which the fragment is to be applied */
1433 struct proxy_validate_parts_parts *parts = talloc_get_type_abort(io1, struct proxy_validate_parts_parts);
1434 struct proxy_Read* r=parts->r;
1435 /* this is the io for the read that issued the callback */
1436 union smb_read *io_frag = fragment->io_frag;
1437 struct async_read_fragments* fragments=fragment->fragments;
1439 DEBUG(5,("%s: parts=%p c_req=%p io_frag=%p\n",__FUNCTION__,parts, c_req, io_frag));
1440 /* if request is not already received by a chained handler, read it */
1441 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
1442 DEBUG(5,("%s: status %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
1444 fragment->status=status;
1446 if (NT_STATUS_IS_OK(status)) {
1447 /* TODO: If we are not sequentially "next" the queue until we can do it */
1448 /* log this data in r->out.generic.data */
1449 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
1450 /* Find memcpy window, copy data from the io_frag to the io */
1451 off_t start_offset=MAX(io_frag->generic.in.offset, r->in.offset);
1452 /* Don't want to go past mincnt */
1453 off_t io_extent=r->in.offset + r->in.mincnt;
1454 off_t end_offset=MIN(io_extent, extent);
1456 /* ASSERT(start_offset <= end_offset) */
1457 /* ASSERT(start_offset <= io_extent) */
1458 if (! (start_offset >= io_extent)) {
1459 uint8_t* dst=r->out.response.generic.data + (start_offset - r->in.offset);
1460 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
1461 /* src == dst in cases where we did not latch onto someone elses
1462 read, but are handling our own */
1463 if (src != dst)
1464 memcpy(dst, src, end_offset - start_offset);
1465 r->out.nread=end_offset - r->in.offset;
1468 MD5Update(&parts->context, io_frag->generic.out.data,
1469 io_frag->generic.out.nread);
1471 parts->fragments->status=status;
1472 status=proxy_validate_parts(ntvfs, parts);
1473 } else {
1474 parts->fragments->status=status;
1477 DLIST_REMOVE(fragments->fragments, fragment);
1478 /* this will free the io_frag too */
1479 talloc_free(fragment);
1481 if (parts->complete || NT_STATUS_IS_ERR(status)) {
1482 /* this will call sendfn, the chain handler won't know... but
1483 should have no more handlers queued */
1484 return proxy_validate_complete(parts);
1487 return NT_STATUS_OK;
1490 /* continue a read loop, possibly from a callback */
1491 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
1492 struct proxy_validate_parts_parts *parts)
1494 struct proxy_private *private = ntvfs->private_data;
1495 union smb_read *io_frag;
1496 struct async_read_fragment *fragment;
1497 struct smbcli_request *c_req = NULL;
1498 ssize_t size=private->tree->session->transport->negotiate.max_xmit \
1499 - (MIN_SMB_SIZE+32);
1501 /* Have we already read enough? */
1502 if (parts->offset >= (parts->r->in.offset + parts->r->in.maxcnt)) {
1503 parts->complete=true;
1504 return NT_STATUS_OK;
1507 size=MIN(size, parts->remaining);
1509 fragment=talloc_zero(parts->fragments, struct async_read_fragment);
1510 NT_STATUS_HAVE_NO_MEMORY(fragment);
1512 io_frag = talloc_zero(fragment, union smb_read);
1513 NT_STATUS_HAVE_NO_MEMORY(io_frag);
1515 io_frag->generic.out.data = talloc_size(io_frag, size);
1516 NT_STATUS_HAVE_NO_MEMORY(io_frag->generic.out.data);
1518 io_frag->generic.level = RAW_READ_GENERIC;
1519 io_frag->generic.in.file.fnum = parts->r->in.fnum;
1520 io_frag->generic.in.offset = parts->offset;
1521 io_frag->generic.in.mincnt = size;
1522 io_frag->generic.in.maxcnt = size;
1523 io_frag->generic.in.remaining = 0;
1524 #warning maybe true is more permissive?
1525 io_frag->generic.in.read_for_execute = false;
1527 //c_req = smb_raw_read_send(ntvfs, io_frag, parts->f, parts->r);
1528 c_req = smb_raw_read_send(private->tree, io_frag);
1529 NT_STATUS_HAVE_NO_MEMORY(c_req);
1531 parts->offset+=size;
1532 parts->remaining-=size;
1533 fragment->c_req = c_req;
1534 fragment->io_frag = io_frag;
1535 fragment->fragments=parts->fragments;
1536 DLIST_ADD(parts->fragments->fragments, fragment);
1538 { void* req=NULL;
1539 ADD_ASYNC_RECV_TAIL(c_req, parts, fragment, parts->f, async_proxy_validate_parts, NT_STATUS_INTERNAL_ERROR);
1540 ASYNC_RECV_TAIL_F_ORPHAN(io_frag, async_read_handler, parts->f, c_req->async.private, NT_STATUS_UNSUCCESSFUL);
1543 DEBUG(5,("%s: issued read parts=%p c_req=%p io_frag=%p\n",__FUNCTION__,parts, c_req, io_frag));
1545 return NT_STATUS_OK;
1549 read from a file
1551 static NTSTATUS proxy_read(struct ntvfs_module_context *ntvfs,
1552 struct ntvfs_request *req, union smb_read *io)
1554 struct proxy_private *private = ntvfs->private_data;
1555 struct smbcli_request *c_req;
1556 struct proxy_file *f;
1557 struct async_read_fragments *fragments=NULL;
1558 /* how much of read-from-cache is certainly valid */
1559 ssize_t valid=0;
1560 off_t offset=io->generic.in.offset+valid;
1561 off_t limit=io->generic.in.offset+io->generic.in.mincnt;
1563 SETUP_PID;
1565 if (io->generic.level != RAW_READ_GENERIC &&
1566 private->map_generic) {
1567 return ntvfs_map_read(ntvfs, req, io);
1570 SETUP_FILE_HERE(f);
1572 DEBUG(3,("%s offset=%lld, mincnt=%d, maxcnt=%d\n",__FUNCTION__,
1573 io->generic.in.offset,
1574 io->generic.in.mincnt,
1575 io->generic.in.maxcnt));
1576 io->generic.out.nread=0;
1577 /* attempt to read from cache. if nread becomes non-zero then we
1578 have cache to validate. Instead of returning "valid" value, cache_read
1579 should probably return an async_read_fragment structure */
1581 if (private->cache_enabled) {
1582 NTSTATUS status=cache_smb_raw_read(f->cache, ntvfs, req, io, &valid);
1584 if (NT_STATUS_IS_OK(status)) {
1585 /* if we read enough valid data, return it */
1586 if (valid > 0 && valid>=io->generic.in.mincnt) {
1587 /* valid will not be bigger than maxcnt */
1588 io->generic.out.nread=valid;
1589 DEBUG(1,("Read from cache offset=%d size=%d\n",
1590 (int)(io->generic.in.offset),
1591 (int)(io->generic.out.nread)) );
1592 return status;
1597 fragments=talloc_zero(req, struct async_read_fragments);
1598 fragments->async=!!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC);
1599 /* See if there are pending reads that would satisfy this request
1600 We have a validated read up to io->generic.out.nread. Anything between
1601 this and mincnt MUST be read, but we could first try and attach to
1602 any pending read-ahead on the same file.
1603 If those read-aheads fail we will re-issue a regular read from the
1604 callback handler and hope it hasn't taken too long. */
1606 /* offset is the extentof the file from which we still need to find
1607 matching read-requests. */
1608 offset=io->generic.in.offset+valid;
1609 /* limit is the byte beyond the last byte for which we need a request.
1610 This used to be mincnt, but is now maxcnt to cope with validate reads.
1611 Maybe we can switch back to mincnt when proxy_read struct is used
1612 instead of smb_read.
1614 limit=io->generic.in.offset+io->generic.in.maxcnt;
1616 while (offset < limit) {
1617 /* Should look for the read-ahead with offset <= in.offset+out.nread
1618 with the longest span, but there is only likely to be one anyway so
1619 just take the first */
1620 struct async_info* pending=private->pending;
1621 union smb_read *readahead_io=NULL;
1622 DEBUG(5,("Looping reads from offset=%lld, end=%lld\n",offset,limit));
1623 while(pending) {
1624 if (pending->c_req->async.fn == async_read_handler) {
1625 struct async_info *async=talloc_get_type_abort(pending->c_req->async.private, struct async_info);
1626 readahead_io=talloc_get_type_abort(async->parms, union smb_read);
1628 if (readahead_io->generic.in.file.fnum == io->generic.in.file.fnum &&
1629 readahead_io->generic.in.offset <= offset &&
1630 readahead_io->generic.in.offset +
1631 readahead_io->generic.in.mincnt > offset) break;
1633 readahead_io=NULL;
1634 pending=pending->next;
1636 /* ASSERT(readahead_io == pending->c_req->async.params) */
1637 if (pending && readahead_io) {
1638 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
1639 fragment->fragments=fragments;
1640 fragment->io_frag=readahead_io;
1641 fragment->c_req = pending->c_req;
1642 /* we found one, so attach to it. We DO need a talloc_reference
1643 because the original send_fn might be called before ALL chained
1644 handlers, and our handler will call its own send_fn first. ugh.
1645 Maybe we need to seperate reverse-mapping callbacks with data users? */
1646 /* Note: the read-ahead io is passed as io, and our req io is
1647 in io_frag->io */
1648 //talloc_reference(req, pending->req);
1649 DEBUG(5,("Attach to read for offset=%lld length=%d\n",
1650 readahead_io->generic.in.offset,
1651 readahead_io->generic.in.mincnt));
1652 ADD_ASYNC_RECV_TAIL(pending->c_req, io, fragment, f,
1653 async_read_fragment, NT_STATUS_INTERNAL_ERROR);
1654 DEBUG(5,("Attached OK\n"));
1655 #warning we don't want to return if we fail to attach, just break
1656 DLIST_ADD(fragments->fragments, fragment);
1657 /* updated offset for which we have reads */
1658 offset=readahead_io->generic.in.offset + readahead_io->generic.in.mincnt;
1659 } else {
1660 /* there are no pending reads to fill this so issue one up to
1661 the maximum supported read size. We could see when the next
1662 pending read is (if any) and only read up till there... later...
1663 Issue a fragment request for what is left, clone io.
1664 In the case that there were no fragments this will be the orginal read
1665 but with a cloned io struct */
1666 off_t next_offset;
1667 struct proxy_Read *r=NULL; /* used only for VALIDATE promotion */
1668 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
1669 union smb_read *io_frag=talloc_memdup_type(req, io, union smb_read);
1670 ssize_t offset_inc=offset-io_frag->generic.in.offset;
1671 /* 250 is a guess at ndr rpc overheads */
1672 ssize_t readsize=MIN(PROXY_NTIOCTL_MAXDATA,
1673 private->tree->session->transport->negotiate.max_xmit) \
1674 - (MIN_SMB_SIZE+32);
1675 if (readsize > 0xFFFF) readsize = 0xFFFF; /* - (MIN_SMB_SIZE+250) ?? */
1676 readsize=MIN(limit-offset, readsize);
1678 DEBUG(5,("Issuing direct read\n"));
1679 /* reduce the cached read (if any). nread is unsigned */
1680 if (io_frag->generic.out.nread > offset_inc) {
1681 io_frag->generic.out.nread-=offset_inc;
1682 /* don't make nread buffer look too big */
1683 if (io_frag->generic.out.nread > readsize)
1684 io_frag->generic.out.nread = readsize;
1685 } else {
1686 io_frag->generic.out.nread=0;
1688 /* adjust the data pointer so we read to the right place */
1689 io_frag->generic.out.data+=offset_inc;
1690 io_frag->generic.in.offset=offset;
1691 io_frag->generic.in.maxcnt=readsize;
1692 /* we don't mind mincnt being smaller if this is the last frag,
1693 but then we can already handle it being bigger but not reached...
1694 The spell would be:
1695 MIN(io_frag->generic.in.mincnt, io_frag->generic.in.maxcnt);
1697 io_frag->generic.in.mincnt=readsize;
1698 fragment->fragments=fragments;
1699 fragment->io_frag=io_frag;
1700 #warning attach to send_fn handler
1701 /* what if someone attaches to us? Our send_fn is called from our
1702 chained handler which will be before their handler and io will
1703 already be freed. We need to keep a reference to the io and the data
1704 but we don't know where it came from in order to take a reference.
1705 We need therefore to tackle calling of send_fn AFTER all other handlers */
1707 /* Calculate next offset (in advance) */
1708 next_offset=io_frag->generic.in.offset + io_frag->generic.in.mincnt;
1710 /* if we are (going to be) the last fragment and we are in VALIDATE
1711 mode, see if we can do a bulk validate now.
1712 io->generic.in.mincnt == io->generic.in.maxcnt is to make sure we
1713 don't do a validate on a receive validate read
1715 if (private->cache_validatesize && PROXY_REMOTE_SERVER(private) &&
1716 next_offset >= limit && (f->cache && f->cache->status & CACHE_VALIDATE)) {
1717 ssize_t length=private->cache_validatesize;
1718 declare_checksum(digest);
1720 DEBUG(5,("last read, maybe mega validate: frag length %zu, offset %llu\n",
1721 length, (unsigned long long) offset));
1722 NTSTATUS status=cache_smb_raw_checksum(f->cache, offset, &length, digest);
1723 /* no point in doing it if md5'd length < current out.nread
1724 remember: out.data contains this requests cached response
1725 if validate succeeds */
1726 if (NT_STATUS_IS_OK(status) && (length > io_frag->generic.out.nread)) {
1727 /* upgrade the read, allocate the proxy_read struct here
1728 and fill in the extras, no more out-of-band stuff */
1729 DEBUG(5,("%s: Promoting to validate read: %lld\n",__FUNCTION__,(long long) length));
1730 dump_data (5, digest, sizeof(digest));
1732 r=talloc_zero(io_frag, struct proxy_Read);
1733 memcpy(r->in.digest.digest, digest, sizeof(digest));
1734 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE;
1735 io_frag->generic.in.maxcnt = length;
1736 /* the proxy send function will calculate the checksum based on *data */
1737 } else {
1738 /* not enough in cache to make it worthwhile anymore */
1739 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x len=%lld\n",
1740 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0,
1741 (unsigned long long)length));
1742 cache_handle_novalidate(f);
1743 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x\n",
1744 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0));
1746 } else {
1747 if (f->cache && f->cache->status & CACHE_VALIDATE) {
1748 DEBUG(5,(">>>Not last frag, no validate read: %lld %lld\n",
1749 (long long) next_offset,
1750 (long long) limit));
1754 DEBUG(5,("Frag read sending offset=%lld min=%d, size=%d\n",
1755 io_frag->generic.in.offset,io_frag->generic.in.mincnt,
1756 io_frag->generic.in.maxcnt));
1757 c_req = proxy_smb_raw_read_send(ntvfs, io_frag, f, r);
1758 DEBUG(5,("Frag read sent offset=%lld size=%d MID=%d\n",
1759 io_frag->generic.in.offset,io_frag->generic.in.maxcnt,c_req->mid));
1760 fragment->c_req=c_req;
1761 DLIST_ADD(fragments->fragments, fragment);
1762 ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
1763 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_read_fragment, NT_STATUS_INTERNAL_ERROR);
1764 DEBUG(5,("Frag response chained\n"));
1765 /* normally we would only install the chain_handler if we wanted async
1766 response, but as it is the async_read_fragment handler that calls send_fn
1767 based on fragments->async, instead of async_chain_handler, we don't
1768 need to worry about this call completing async'ly while we are
1769 waiting on the other attached calls. Otherwise we would not attach
1770 the async_chain_handler (via async_read_handler) because of the wait
1771 below */
1772 { /* We don't want the chain handler calling send_fn as it is done by the fragment handler */
1773 void* req=NULL;
1774 /* call async_chain_hander not read handler so that folk can't
1775 attach to it, till we solve the problem above */
1776 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1778 offset = next_offset;
1780 DEBUG(5,("Next fragment\n"));
1783 /* do we still need a final fragment? Issue a read */
1785 DEBUG(5,("No frags left to read\n"));
1788 /* issue new round of read-aheads */
1789 DEBUG(5,("== Read aheads asread-%d\n",io->generic.in.mincnt));
1790 if (f->cache && ! (f->cache->status & CACHE_VALIDATE)) read_ahead(f, ntvfs, io, io->generic.in.mincnt);
1791 DEBUG(5,("== Done Read aheads\n"));
1793 /* If we have fragments but we are not called async, we must sync-wait on them */
1794 /* did we map the entire request to pending reads? */
1795 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1796 struct async_read_fragment *fragment;
1797 DEBUG(5,("Sync waiting\n"));
1798 /* fragment get's free'd during the chain_handler so we start at
1799 the top each time */
1800 for (fragment = fragments->fragments; fragment; fragment = fragments->fragments) {
1801 /* Any fragments async handled while we sync-wait on one
1802 will remove themselves from the list and not get sync waited */
1803 sync_chain_handler(fragment->c_req);
1804 /* if we have a non-ok result AND we know we have all the responses
1805 up to extent, then we could quit the loop early and change the
1806 fragments->async to true so the final irrelevant responses would
1807 come async and we could send our response now - but we don't
1808 track that detail until we have cache-maps that we can use to
1809 track the responded fragments and combine responsed linear extents
1810 if (! NT_STATUS_IS_OK(fragments->status) && xxx ) */
1812 DEBUG(5,("Sync return of proxy_read: %s\n",get_friendly_nt_error_msg (fragments->status)));
1813 return fragments->status;
1816 DEBUG(5,("Async returning\n"));
1817 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1818 return NT_STATUS_OK;
1822 a handler to de-fragment async write replies back to one request.
1823 Can cope with out-of-order async responses by waiting for all responses
1824 on an NT_STATUS_OK case so that nwritten is properly adjusted
1826 NTSTATUS async_write_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1828 struct smbcli_request *c_req = async->c_req;
1829 struct ntvfs_request *req = async->req;
1830 struct proxy_file *f=async->f;
1831 struct async_write_fragment* fragment=talloc_get_type_abort(io2, struct async_write_fragment);
1832 /* this is the io against which the fragment is to be applied */
1833 union smb_write *io = talloc_get_type_abort(io1, union smb_write);
1834 /* this is the io for the write that issued the callback */
1835 union smb_write *io_frag = fragment->io_frag; /* async->parms; */
1836 struct async_write_fragments* fragments=fragment->fragments;
1837 ssize_t extent=0;
1839 /* if request is not already received by a chained handler, read it */
1840 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
1841 if (c_req) status=smb_raw_write_recv(c_req, io_frag);
1843 DEBUG(3,("%s async_write status: %s\n",__FUNCTION__,
1844 get_friendly_nt_error_msg(status)));
1846 fragment->status = status;
1848 DLIST_REMOVE(fragments->fragments, fragment);
1850 /* did this one fail? */
1851 if (! NT_STATUS_IS_OK(fragment->status)) {
1852 if (NT_STATUS_IS_OK(fragments->status)) {
1853 fragments->status=fragment->status;
1855 } else {
1856 /* No fragments have yet failed, keep collecting responses */
1857 extent = io_frag->generic.in.offset + io_frag->generic.out.nwritten;
1859 /* we broke up the write so it could all be written. If only some has
1860 been written of this block, and then some of then next block,
1861 it could leave unwritten holes! We will only acknowledge up to the
1862 first partial write, and let the client deal with it.
1863 If server can return NT_STATUS_OK for a partial write so can we */
1864 if (io_frag->generic.out.nwritten != io_frag->generic.in.count) {
1865 DEBUG(4,("Fragmented write only partially successful\n"));
1867 /* Shrink the master nwritten */
1868 if ( ! fragments->partial ||
1869 (io->generic.in.offset + io->generic.out.nwritten) > extent) {
1870 io->generic.out.nwritten = extent - io->generic.in.offset;
1872 /* stop any further successes from extended the partial write */
1873 fragments->partial=true;
1874 } else {
1875 /* only grow the master nwritten if we haven't logged a partial write */
1876 if (! fragments->partial &&
1877 (io->generic.in.offset + io->generic.out.nwritten) < extent ) {
1878 io->generic.out.nwritten = extent - io->generic.in.offset;
1883 /* if this was the last fragment, clean up */
1884 if (! fragments->fragments) {
1885 DEBUG(5,("Async write re-fragmented with %d of %d\n",
1886 io->generic.out.nwritten,
1887 io->generic.in.count));
1888 if (NT_STATUS_IS_OK(fragments->status)) {
1889 cache_handle_save(f, io->generic.in.data, io->generic.out.nwritten,
1890 io->generic.in.offset);
1892 if (fragments->async) {
1893 req->async_states->status=fragments->status;
1894 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
1895 req->async_states->send_fn(req);
1896 DEBUG(5,("Async response sent\n"));
1897 } else {
1898 DEBUG(5,("Fragments SYNC return\n"));
1902 return status;
1906 a handler for async write replies
1908 NTSTATUS async_write_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1910 struct smbcli_request *c_req = async->c_req;
1911 struct ntvfs_request *req = async->req;
1912 struct proxy_file *f=async->f;
1913 union smb_write *io=async->parms;
1915 if (c_req)
1916 status = smb_raw_write_recv(c_req, async->parms);
1918 cache_handle_save(f, io->generic.in.data,
1919 io->generic.out.nwritten,
1920 io->generic.in.offset);
1922 return status;
1926 write to a file
1928 static NTSTATUS proxy_write(struct ntvfs_module_context *ntvfs,
1929 struct ntvfs_request *req, union smb_write *io)
1931 struct proxy_private *private = ntvfs->private_data;
1932 struct smbcli_request *c_req;
1933 struct proxy_file *f;
1935 SETUP_PID;
1937 if (io->generic.level != RAW_WRITE_GENERIC &&
1938 private->map_generic) {
1939 return ntvfs_map_write(ntvfs, req, io);
1941 SETUP_FILE_HERE(f);
1943 DEBUG(5,("proxy_write offset=%lld size=%d\n",io->generic.in.offset, io->generic.in.count));
1944 #warning ERROR get rid of this
1945 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1946 NTSTATUS status;
1947 if (PROXY_REMOTE_SERVER(private)) {
1948 /* Do a proxy write */
1949 status=proxy_smb_raw_write(ntvfs, io, f);
1950 } else if (io->generic.in.count >
1951 private->tree->session->transport->negotiate.max_xmit) {
1953 /* smbcli_write can deal with large writes, which are bigger than
1954 tree->session->transport->negotiate.max_xmit */
1955 ssize_t size=smbcli_write(private->tree,
1956 io->generic.in.file.fnum,
1957 io->generic.in.wmode,
1958 io->generic.in.data,
1959 io->generic.in.offset,
1960 io->generic.in.count);
1962 if (size==io->generic.in.count || size > 0) {
1963 io->generic.out.nwritten=size;
1964 status=NT_STATUS_OK;
1965 } else {
1966 status=NT_STATUS_UNSUCCESSFUL;
1968 } else {
1969 status=smb_raw_write(private->tree, io);
1972 /* Save write in cache */
1973 if (NT_STATUS_IS_OK(status)) {
1974 cache_handle_save(f, io->generic.in.data,
1975 io->generic.out.nwritten,
1976 io->generic.in.offset);
1979 return status;
1982 /* smb_raw_write_send can't deal with large writes, which are bigger than
1983 tree->session->transport->negotiate.max_xmit so we have to break it up
1984 trying to preserve the async nature of the call as much as possible */
1985 if (PROXY_REMOTE_SERVER(private)) {
1986 DEBUG(5,("== %s call proxy_smb_raw_write_send\n",__FUNCTION__));
1987 c_req = proxy_smb_raw_write_send(ntvfs, io, f);
1988 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
1989 } else if (io->generic.in.count <=
1990 private->tree->session->transport->negotiate.max_xmit) {
1991 DEBUG(5,("== %s call smb_raw_write_send\n",__FUNCTION__));
1992 c_req = smb_raw_write_send(private->tree, io);
1993 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
1994 } else {
1995 ssize_t remaining = io->generic.in.count;
1996 #warning Need an audit of these magin numbers MIN_SMB_SIZE+32
1997 int block = (private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32));
1998 int done = 0;
1999 struct async_write_fragments *fragments = talloc_zero(req, struct async_write_fragments);
2001 DEBUG(3,("== %s Client sending too-big write sized %d, negotiated limit %d\n",
2002 __FUNCTION__, io->generic.in.count,
2003 private->tree->session->transport->negotiate.max_xmit));
2005 fragments->io = io;
2006 io->generic.out.nwritten=0;
2007 io->generic.out.remaining=0;
2009 do {
2010 union smb_write *io_frag = talloc_zero(fragments, union smb_write);
2011 struct async_write_fragment *fragment = talloc_zero(fragments, struct async_write_fragment);
2012 ssize_t size = MIN(block, remaining);
2014 fragment->fragments = fragments;
2015 fragment->io_frag = io_frag;
2017 io_frag->generic.level = io->generic.level;
2018 io_frag->generic.in.file.fnum = io->generic.in.file.fnum;
2019 io_frag->generic.in.wmode = io->generic.in.wmode;
2020 io_frag->generic.in.count = size;
2021 io_frag->generic.in.offset = io->generic.in.offset + done;
2022 io_frag->generic.in.data = io->generic.in.data + done;
2024 c_req = proxy_smb_raw_write_send(ntvfs, io_frag, f);
2025 if (! c_req) {
2026 /* let pending requests clean-up when ready */
2027 fragments->status=NT_STATUS_UNSUCCESSFUL;
2028 talloc_steal(NULL, fragments);
2029 DEBUG(3,("Can't send request fragment\n"));
2030 return NT_STATUS_UNSUCCESSFUL;
2033 DEBUG(5,("Frag write sent offset=%lld size=%d MID=%d\n",
2034 io_frag->generic.in.offset,io_frag->generic.in.count,c_req->mid));
2035 fragment->c_req=c_req;
2036 DLIST_ADD(fragments->fragments, fragment);
2038 // ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
2039 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_write_fragment, NT_STATUS_INTERNAL_ERROR);
2040 DEBUG(5,("Frag response chained\n"));
2042 remaining -= size;
2043 done += size;
2044 } while(remaining > 0);
2046 /* this strategy has the callback chain attached to each c_req, so we
2047 don't use the ASYNC_RECV_TAIL* to install a general one */
2050 ASYNC_RECV_TAIL_HANDLER(io, async_chain_handler);
2054 a handler for async seek replies
2056 static void async_seek(struct smbcli_request *c_req)
2058 struct async_info *async = c_req->async.private;
2059 struct ntvfs_request *req = async->req;
2060 req->async_states->status = smb_raw_seek_recv(c_req, async->parms);
2061 talloc_free(async);
2062 req->async_states->send_fn(req);
2066 seek in a file
2068 static NTSTATUS proxy_seek(struct ntvfs_module_context *ntvfs,
2069 struct ntvfs_request *req,
2070 union smb_seek *io)
2072 struct proxy_private *private = ntvfs->private_data;
2073 struct smbcli_request *c_req;
2075 SETUP_PID_AND_FILE;
2077 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2078 return smb_raw_seek(private->tree, io);
2081 c_req = smb_raw_seek_send(private->tree, io);
2083 ASYNC_RECV_TAIL(io, async_seek);
2087 flush a file
2089 static NTSTATUS proxy_flush(struct ntvfs_module_context *ntvfs,
2090 struct ntvfs_request *req,
2091 union smb_flush *io)
2093 struct proxy_private *private = ntvfs->private_data;
2094 struct smbcli_request *c_req;
2096 SETUP_PID;
2097 switch (io->generic.level) {
2098 case RAW_FLUSH_FLUSH:
2099 SETUP_FILE;
2100 break;
2101 case RAW_FLUSH_ALL:
2102 io->generic.in.file.fnum = 0xFFFF;
2103 break;
2104 case RAW_FLUSH_SMB2:
2105 return NT_STATUS_INVALID_LEVEL;
2108 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2109 return smb_raw_flush(private->tree, io);
2112 c_req = smb_raw_flush_send(private->tree, io);
2114 SIMPLE_ASYNC_TAIL;
2118 close a file
2120 static NTSTATUS proxy_close(struct ntvfs_module_context *ntvfs,
2121 struct ntvfs_request *req, union smb_close *io)
2123 struct proxy_private *private = ntvfs->private_data;
2124 struct smbcli_request *c_req;
2125 struct proxy_file *f;
2126 union smb_close io2;
2128 SETUP_PID;
2130 if (io->generic.level != RAW_CLOSE_GENERIC &&
2131 private->map_generic) {
2132 return ntvfs_map_close(ntvfs, req, io);
2134 SETUP_FILE_HERE(f);
2135 /* Note, we aren't free-ing f, or it's h here. Should we?
2136 even if file-close fails, we'll remove it from the list,
2137 what else would we do? Maybe we should not remove until
2138 after the proxied call completes? */
2139 DLIST_REMOVE(private->files, f);
2141 /* possibly samba can't do RAW_CLOSE_SEND yet */
2142 if (! (c_req = smb_raw_close_send(private->tree, io))) {
2143 if (io->generic.level == RAW_CLOSE_GENERIC) {
2144 ZERO_STRUCT(io2);
2145 io2.close.level = RAW_CLOSE_CLOSE;
2146 io2.close.in.file = io->generic.in.file;
2147 io2.close.in.write_time = io->generic.in.write_time;
2148 io = &io2;
2150 c_req = smb_raw_close_send(private->tree, io);
2153 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2154 return smbcli_request_simple_recv(c_req);
2157 SIMPLE_ASYNC_TAIL;
2161 exit - closing files open by the pid
2163 static NTSTATUS proxy_exit(struct ntvfs_module_context *ntvfs,
2164 struct ntvfs_request *req)
2166 struct proxy_private *private = ntvfs->private_data;
2167 struct smbcli_request *c_req;
2169 SETUP_PID;
2171 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2172 return smb_raw_exit(private->tree->session);
2175 c_req = smb_raw_exit_send(private->tree->session);
2177 SIMPLE_ASYNC_TAIL;
2181 logoff - closing files open by the user
2183 static NTSTATUS proxy_logoff(struct ntvfs_module_context *ntvfs,
2184 struct ntvfs_request *req)
2186 /* we can't do this right in the proxy backend .... */
2187 return NT_STATUS_OK;
2191 setup for an async call - nothing to do yet
2193 static NTSTATUS proxy_async_setup(struct ntvfs_module_context *ntvfs,
2194 struct ntvfs_request *req,
2195 void *private)
2197 return NT_STATUS_OK;
2201 cancel an async call
2203 static NTSTATUS proxy_cancel(struct ntvfs_module_context *ntvfs,
2204 struct ntvfs_request *req)
2206 struct proxy_private *private = ntvfs->private_data;
2207 struct async_info *a;
2209 /* find the matching request */
2210 for (a=private->pending;a;a=a->next) {
2211 if (a->req == req) {
2212 break;
2216 if (a == NULL) {
2217 return NT_STATUS_INVALID_PARAMETER;
2220 return smb_raw_ntcancel(a->c_req);
2224 lock a byte range
2226 static NTSTATUS proxy_lock(struct ntvfs_module_context *ntvfs,
2227 struct ntvfs_request *req, union smb_lock *io)
2229 struct proxy_private *private = ntvfs->private_data;
2230 struct smbcli_request *c_req;
2232 SETUP_PID;
2234 if (io->generic.level != RAW_LOCK_GENERIC &&
2235 private->map_generic) {
2236 return ntvfs_map_lock(ntvfs, req, io);
2238 SETUP_FILE;
2240 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2241 return smb_raw_lock(private->tree, io);
2244 c_req = smb_raw_lock_send(private->tree, io);
2245 SIMPLE_ASYNC_TAIL;
2249 set info on a open file
2251 static NTSTATUS proxy_setfileinfo(struct ntvfs_module_context *ntvfs,
2252 struct ntvfs_request *req,
2253 union smb_setfileinfo *io)
2255 struct proxy_private *private = ntvfs->private_data;
2256 struct smbcli_request *c_req;
2258 SETUP_PID_AND_FILE;
2260 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2261 return smb_raw_setfileinfo(private->tree, io);
2263 c_req = smb_raw_setfileinfo_send(private->tree, io);
2265 SIMPLE_ASYNC_TAIL;
2270 a handler for async fsinfo replies
2272 static void async_fsinfo(struct smbcli_request *c_req)
2274 struct async_info *async = c_req->async.private;
2275 struct ntvfs_request *req = async->req;
2276 req->async_states->status = smb_raw_fsinfo_recv(c_req, req, async->parms);
2277 talloc_free(async);
2278 req->async_states->send_fn(req);
2282 return filesystem space info
2284 static NTSTATUS proxy_fsinfo(struct ntvfs_module_context *ntvfs,
2285 struct ntvfs_request *req, union smb_fsinfo *fs)
2287 struct proxy_private *private = ntvfs->private_data;
2288 struct smbcli_request *c_req;
2290 SETUP_PID;
2292 /* QFS Proxy */
2293 if (fs->generic.level == RAW_QFS_PROXY_INFO) {
2294 fs->proxy_info.out.major_version=1;
2295 fs->proxy_info.out.minor_version=0;
2296 fs->proxy_info.out.capability=0;
2297 return NT_STATUS_OK;
2300 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2301 return smb_raw_fsinfo(private->tree, req, fs);
2304 c_req = smb_raw_fsinfo_send(private->tree, req, fs);
2306 ASYNC_RECV_TAIL(fs, async_fsinfo);
2310 return print queue info
2312 static NTSTATUS proxy_lpq(struct ntvfs_module_context *ntvfs,
2313 struct ntvfs_request *req, union smb_lpq *lpq)
2315 return NT_STATUS_NOT_SUPPORTED;
2319 list files in a directory matching a wildcard pattern
2321 static NTSTATUS proxy_search_first(struct ntvfs_module_context *ntvfs,
2322 struct ntvfs_request *req, union smb_search_first *io,
2323 void *search_private,
2324 bool (*callback)(void *, const union smb_search_data *))
2326 struct proxy_private *private = ntvfs->private_data;
2328 SETUP_PID;
2330 return smb_raw_search_first(private->tree, req, io, search_private, callback);
2333 /* continue a search */
2334 static NTSTATUS proxy_search_next(struct ntvfs_module_context *ntvfs,
2335 struct ntvfs_request *req, union smb_search_next *io,
2336 void *search_private,
2337 bool (*callback)(void *, const union smb_search_data *))
2339 struct proxy_private *private = ntvfs->private_data;
2341 SETUP_PID;
2343 return smb_raw_search_next(private->tree, req, io, search_private, callback);
2346 /* close a search */
2347 static NTSTATUS proxy_search_close(struct ntvfs_module_context *ntvfs,
2348 struct ntvfs_request *req, union smb_search_close *io)
2350 struct proxy_private *private = ntvfs->private_data;
2352 SETUP_PID;
2354 return smb_raw_search_close(private->tree, io);
2358 a handler for async trans2 replies
2360 static void async_trans2(struct smbcli_request *c_req)
2362 struct async_info *async = c_req->async.private;
2363 struct ntvfs_request *req = async->req;
2364 req->async_states->status = smb_raw_trans2_recv(c_req, req, async->parms);
2365 talloc_free(async);
2366 req->async_states->send_fn(req);
2369 /* raw trans2 */
2370 static NTSTATUS proxy_trans2(struct ntvfs_module_context *ntvfs,
2371 struct ntvfs_request *req,
2372 struct smb_trans2 *trans2)
2374 struct proxy_private *private = ntvfs->private_data;
2375 struct smbcli_request *c_req;
2377 if (private->map_trans2) {
2378 return NT_STATUS_NOT_IMPLEMENTED;
2381 SETUP_PID;
2382 #warning we should be mapping file handles here
2384 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2385 return smb_raw_trans2(private->tree, req, trans2);
2388 c_req = smb_raw_trans2_send(private->tree, trans2);
2390 ASYNC_RECV_TAIL(trans2, async_trans2);
2394 /* SMBtrans - not used on file shares */
2395 static NTSTATUS proxy_trans(struct ntvfs_module_context *ntvfs,
2396 struct ntvfs_request *req,
2397 struct smb_trans2 *trans2)
2399 return NT_STATUS_ACCESS_DENIED;
2403 a handler for async change notify replies
2405 static void async_changenotify(struct smbcli_request *c_req)
2407 struct async_info *async = c_req->async.private;
2408 struct ntvfs_request *req = async->req;
2409 req->async_states->status = smb_raw_changenotify_recv(c_req, req, async->parms);
2410 talloc_free(async);
2411 req->async_states->send_fn(req);
2414 /* change notify request - always async */
2415 static NTSTATUS proxy_notify(struct ntvfs_module_context *ntvfs,
2416 struct ntvfs_request *req,
2417 union smb_notify *io)
2419 struct proxy_private *private = ntvfs->private_data;
2420 struct smbcli_request *c_req;
2421 int saved_timeout = private->transport->options.request_timeout;
2422 struct proxy_file *f;
2424 if (io->nttrans.level != RAW_NOTIFY_NTTRANS) {
2425 return NT_STATUS_NOT_IMPLEMENTED;
2428 SETUP_PID;
2430 f = ntvfs_handle_get_backend_data(io->nttrans.in.file.ntvfs, ntvfs);
2431 if (!f) return NT_STATUS_INVALID_HANDLE;
2432 io->nttrans.in.file.fnum = f->fnum;
2434 /* this request doesn't make sense unless its async */
2435 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2436 return NT_STATUS_INVALID_PARAMETER;
2439 /* we must not timeout on notify requests - they wait
2440 forever */
2441 private->transport->options.request_timeout = 0;
2443 c_req = smb_raw_changenotify_send(private->tree, io);
2445 private->transport->options.request_timeout = saved_timeout;
2447 ASYNC_RECV_TAIL(io, async_changenotify);
2451 * A hander for converting from rpc struct replies to ntioctl
2453 static NTSTATUS proxy_rpclite_map_async_send(
2454 struct ntvfs_module_context *ntvfs,
2455 struct ntvfs_request *req,
2456 void *io1, void *io2, NTSTATUS status)
2458 union smb_ioctl* io=talloc_get_type_abort(io1, union smb_ioctl);
2459 struct async_rpclite_send *rpclite_send=talloc_get_type_abort(io2, struct async_rpclite_send);
2460 void* r=rpclite_send->struct_ptr;
2461 struct ndr_push* push;
2462 const struct ndr_interface_call* call=rpclite_send->call;
2463 enum ndr_err_code ndr_err;
2464 DATA_BLOB ndr;
2466 talloc_free(rpclite_send);
2468 push = ndr_push_init_ctx(req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2469 NT_STATUS_HAVE_NO_MEMORY(push);
2471 if (0) {
2472 push->flags |= LIBNDR_FLAG_BIGENDIAN;
2475 ndr_err = call->ndr_push(push, NDR_OUT, r);
2476 status=ndr_map_error2ntstatus(ndr_err);
2478 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2479 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
2480 nt_errstr(status)));
2481 return status;
2484 ndr=ndr_push_blob(push);
2485 //if (ndr.length > io->ntioctl.in.max_data) {
2486 DEBUG(3,("%s NDR size %d, max_size %d %p\n",__FUNCTION__, ndr.length,
2487 io->ntioctl.in.max_data, ndr.data));
2488 io->ntioctl.out.blob=ndr;
2489 return status;
2493 * A handler for sending async rpclite Read replies that were mapped to union smb_read
2495 static NTSTATUS rpclite_proxy_Read_map_async_send(
2496 struct ntvfs_module_context *ntvfs,
2497 struct ntvfs_request *req,
2498 void *io1, void *io2, NTSTATUS status)
2500 struct proxy_Read* r=talloc_get_type_abort(io1, struct proxy_Read);
2501 union smb_read* io=talloc_get_type_abort(io2, union smb_read);
2503 /* status here is a result of proxy_read, it doesn't reflect the status
2504 of the rpc transport or relates calls, just the read operation */
2505 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2506 r->out.result=status;
2508 if (! NT_STATUS_IS_OK(status)) {
2509 /* We can't use result as a discriminator in IDL, so nread and flags always exist */
2510 r->out.nread=0;
2511 r->out.flags=0;
2512 } else {
2513 ssize_t size=io->readx.out.nread;
2514 r->out.flags=0;
2515 r->out.nread=io->readx.out.nread;
2517 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE) && io->readx.out.nread>0) {
2518 declare_checksum(digest);
2519 checksum_block(digest, io->readx.out.data, io->readx.out.nread);
2521 DEBUG(5,("New digest for size: %lld\n", (long long) io->readx.out.nread));
2522 dump_data (5, digest, sizeof(digest));
2523 DEBUG(5,("Cached digest\n"));
2524 dump_data (5, r->in.digest.digest, sizeof(digest));
2526 if (memcmp(digest, r->in.digest.digest, sizeof(digest))==0) {
2527 r->out.flags=PROXY_USE_CACHE;
2528 DEBUG(5,("%s: Use cached data len=%lld\n",__FUNCTION__,
2529 (long long)r->out.nread));
2530 if (r->in.flags & PROXY_VALIDATE) {
2531 r->out.flags |= PROXY_VALIDATE;
2532 DEBUG(5,("%s: Use VALIDATED len=%lld, %lld\n",__FUNCTION__,
2533 (long long)r->out.nread, (long long) io->readx.out.nread));
2535 goto done;
2537 DEBUG(5,("Cache does not match\n"));
2540 if (r->in.flags & PROXY_VALIDATE) {
2541 /* validate failed, shrink read to mincnt - so we don't fill link */
2542 r->out.nread=MIN(r->out.nread, r->in.mincnt);
2543 size=r->out.nread;
2544 DEBUG(5,("VALIDATE failed, shrink read of %d from %d to %d\n",
2545 r->in.maxcnt,r->out.nread,MIN(r->out.nread, r->in.mincnt)));
2548 if (r->in.flags & PROXY_USE_ZLIB) {
2549 if (compress_block(io->readx.out.data, &size) ) {
2550 r->out.flags|=PROXY_USE_ZLIB;
2551 r->out.response.compress.count=size;
2552 r->out.response.compress.data=io->readx.out.data;
2553 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
2554 __FUNCTION__,r->out.nread,size,size*100/r->out.nread));
2555 goto done;
2559 DEBUG(5,("%s: Compression not worthwhile\n", __FUNCTION__));
2560 r->out.response.generic.count=io->readx.out.nread;
2561 r->out.response.generic.data=io->readx.out.data;
2564 done:
2566 /* Or should we return NT_STATUS_OK ?*/
2567 DEBUG(5,("Finish %s status %s\n",__FUNCTION__,get_friendly_nt_error_msg(status)));
2569 /* the rpc transport succeeded even if the operation did not */
2570 return NT_STATUS_OK;
2574 * RPC implementation of Read
2576 static NTSTATUS rpclite_proxy_Read(struct ntvfs_module_context *ntvfs,
2577 struct ntvfs_request *req, struct proxy_Read *r)
2579 struct proxy_private *private = ntvfs->private_data;
2580 union smb_read* io=talloc(req, union smb_read);
2581 NTSTATUS status;
2582 struct proxy_file *f;
2583 struct ntvfs_handle *h;
2585 NT_STATUS_HAVE_NO_MEMORY(io);
2587 /* if next hop is a proxy just repeat this call also handle VALIDATE check
2588 that means have own callback handlers too... */
2589 SETUP_PID;
2591 RPCLITE_SETUP_FILE_HERE(f, h);
2593 DEBUG(5,("Opnum: proxy_Read min=%d max=%d offset=%lld, fnum=%d\n",
2594 r->in.mincnt, r->in.maxcnt, r->in.offset, r->in.fnum));
2595 DEBUG(5,("Anticipated digest\n"));
2596 dump_data (5, r->in.digest.digest, sizeof(r->in.digest.digest));
2598 /* If the remove end is a proxy, jusr fixup file handle and passthrough,
2599 but update cache on the way back
2600 if (PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
2603 /* prepare for response */
2604 r->out.response.generic.data=talloc_array(io, uint8_t, r->in.maxcnt);
2605 NT_STATUS_HAVE_NO_MEMORY(r->out.response.generic.data);
2607 if (! PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
2608 return proxy_validate(ntvfs, req, r, f);
2611 /* pack up an smb_read request and dispatch here */
2612 io->readx.level=RAW_READ_READX;
2613 io->readx.in.file.ntvfs=h;
2614 io->readx.in.mincnt=r->in.mincnt;
2615 io->readx.in.maxcnt=r->in.maxcnt;
2616 io->readx.in.offset=r->in.offset;
2617 io->readx.in.remaining=r->in.remaining;
2618 /* and something to hold the answer */
2619 io->readx.out.data=r->out.response.generic.data;
2621 /* so we get to pack the io->*.out response */
2622 status = ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Read_map_async_send);
2623 NT_STATUS_NOT_OK_RETURN(status);
2625 /* so the read will get processed normally */
2626 return proxy_read(ntvfs, req, io);
2630 * A handler for sending async rpclite Write replies
2632 static NTSTATUS rpclite_proxy_Write_map_async_send(
2633 struct ntvfs_module_context *ntvfs,
2634 struct ntvfs_request *req,
2635 void *io1, void *io2, NTSTATUS status)
2637 struct proxy_Write* r=talloc_get_type_abort(io1, struct proxy_Write);
2638 union smb_write* io=talloc_get_type_abort(io2, union smb_write);
2640 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2641 r->out.result=status;
2643 r->out.nwritten=io->writex.out.nwritten;
2644 r->out.remaining=io->writex.out.remaining;
2646 /* the rpc transport succeeded even if the operation did not */
2647 return NT_STATUS_OK;
2651 * RPC implementation of write
2653 static NTSTATUS rpclite_proxy_Write(struct ntvfs_module_context *ntvfs,
2654 struct ntvfs_request *req, struct proxy_Write *r)
2656 struct proxy_private *private = ntvfs->private_data;
2657 union smb_write* io=talloc(req, union smb_write);
2658 NTSTATUS status;
2659 struct proxy_file* f;
2660 struct ntvfs_handle *h;
2662 SETUP_PID;
2664 RPCLITE_SETUP_FILE_HERE(f,h);
2666 DEBUG(5,("Opnum: proxy_Write count=%d offset=%lld, fnum=%d\n",
2667 r->in.count, r->in.offset, r->in.fnum));
2669 /* pack up an smb_write request and dispatch here */
2670 io->writex.level=RAW_WRITE_WRITEX;
2671 io->writex.in.file.ntvfs=h;
2672 io->writex.in.offset=r->in.offset;
2673 io->writex.in.wmode=r->in.mode;
2674 io->writex.in.count=r->in.count;
2676 /* and the data */
2677 if (PROXY_USE_ZLIB & r->in.flags) {
2678 ssize_t count=r->in.data.generic.count;
2679 io->writex.in.data=uncompress_block_talloc(io, r->in.data.compress.data,
2680 &count, r->in.count);
2681 if (count != r->in.count || !io->writex.in.data) {
2682 /* Didn't uncompress properly, but the RPC layer worked */
2683 r->out.result=NT_STATUS_BAD_COMPRESSION_BUFFER;
2684 return NT_STATUS_OK;
2686 } else {
2687 io->writex.in.data=r->in.data.generic.data;
2690 /* so we get to pack the io->*.out response */
2691 status=ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Write_map_async_send);
2692 NT_STATUS_NOT_OK_RETURN(status);
2694 /* so the read will get processed normally */
2695 return proxy_write(ntvfs, req, io);
2698 /* unmarshall ntioctl and rpc-dispatch, but push async map handler to convert
2699 back from rpc struct to ntioctl */
2700 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
2701 struct ntvfs_request *req, union smb_ioctl *io)
2703 struct proxy_private *private = ntvfs->private_data;
2704 DATA_BLOB *request;
2705 struct ndr_syntax_id* syntax_id;
2706 uint32_t opnum;
2707 const struct ndr_interface_table *table;
2708 struct ndr_pull* pull;
2709 void* r;
2710 NTSTATUS status;
2711 struct async_rpclite_send *rpclite_send;
2712 enum ndr_err_code ndr_err;
2714 SETUP_PID;
2716 /* We don't care about io->generic.in.file, ntvfs layer already proved it was valid,
2717 our operations will have the fnum embedded in them anyway */
2718 DEBUG(5,("START %s blob-size %d\n",__FUNCTION__,io->ntioctl.in.blob.length));
2719 /* unpack the NDR */
2720 request=&io->ntioctl.in.blob;
2722 pull = ndr_pull_init_blob(request, req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2723 NT_STATUS_HAVE_NO_MEMORY(pull);
2724 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
2725 DEBUG(5,("%s pull init'd\n",__FUNCTION__));
2727 /* the blob is 4-aligned because it was memcpy'd */
2728 syntax_id=talloc_zero(pull, struct ndr_syntax_id);
2729 NT_STATUS_HAVE_NO_MEMORY(syntax_id);
2731 ndr_err=ndr_pull_ndr_syntax_id(pull, NDR_SCALARS, syntax_id);
2732 status=ndr_map_error2ntstatus(ndr_err);
2733 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2734 DEBUG(2,("Can't read syntax-id: %s\n",nt_errstr(status)));
2735 return status;
2738 /* now find the struct ndr_interface_table * for this syntax_id */
2739 table=ndr_table_by_uuid(&syntax_id->uuid);
2740 if (! table) {
2741 DEBUG(5,("Can't find table for uuid: %s\n",GUID_string(debug_ctx(),&syntax_id->uuid)));
2742 return NT_STATUS_NO_GUID_TRANSLATION;
2745 ndr_err=ndr_pull_uint32(pull, NDR_SCALARS, &opnum);
2746 status=ndr_map_error2ntstatus(ndr_err);
2747 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2748 DEBUG(2,("Can't read op-num: %s\n",nt_errstr(status)));
2749 return status;
2751 DEBUG(5,("%s opnum %d\n",__FUNCTION__,opnum));
2753 DEBUG(10,("rpc request data:\n"));
2754 dump_data(10, pull->data, pull->data_size);
2756 r = talloc_named(req, table->calls[opnum].struct_size, "struct %s",
2757 table->calls[opnum].name);
2758 NT_STATUS_HAVE_NO_MEMORY(r);
2760 ndr_err=table->calls[opnum].ndr_pull(pull, NDR_IN, r);
2761 status=ndr_map_error2ntstatus(ndr_err);
2762 DEBUG(5,("%s opnum %d pulled status %s\n",__FUNCTION__,opnum,get_friendly_nt_error_msg (status)));
2763 NT_STATUS_NOT_OK_RETURN(status);
2765 rpclite_send=talloc(req, struct async_rpclite_send);
2766 NT_STATUS_HAVE_NO_MEMORY(rpclite_send);
2767 rpclite_send->call=&table->calls[opnum];
2768 rpclite_send->struct_ptr=r;
2769 /* need to push conversion function to convert from r to io */
2770 status=ntvfs_map_async_setup(ntvfs, req, io, rpclite_send, proxy_rpclite_map_async_send);
2772 /* Magically despatch the call based on syntax_id, table and opnum.
2773 But there is no table of handlers.... so until then*/
2774 if (0==strcasecmp(table->name,"rpcproxy")) {
2775 switch(opnum) {
2776 case(NDR_PROXY_READ):
2777 status=rpclite_proxy_Read(ntvfs, req, r);
2778 break;
2779 case(NDR_PROXY_WRITE):
2780 status=rpclite_proxy_Write(ntvfs, req, r);
2781 break;
2782 default:
2783 DEBUG(3,("Can't despatch %s:%d\n",table->name, opnum));
2784 return NT_STATUS_PROCEDURE_NOT_FOUND;
2786 } else {
2787 DEBUG(5,("Can't despatch %s:%d %s\n",table->name, opnum,
2788 GUID_string(debug_ctx(),&syntax_id->uuid)));
2789 return NT_STATUS_NO_GUID_TRANSLATION;
2792 /* status is the status of the rpc layer. If it is NT_STATUS_OK then
2793 the handler status is in r->out.result */
2794 return ntvfs_map_async_finish(req, status);
2797 /* unpack the ntioctl to make some rpc_struct */
2798 NTSTATUS ntioctl_rpc_unmap(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2800 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
2801 struct proxy_private *proxy=async->proxy;
2802 struct smbcli_request *c_req = async->c_req;
2803 void* r=io1;
2804 struct ntioctl_rpc_unmap_info *info=talloc_get_type_abort(io2, struct ntioctl_rpc_unmap_info);
2805 union smb_ioctl* io =talloc_get_type_abort(info->io, union smb_ioctl);
2806 const struct ndr_interface_call *calls=info->calls;
2807 enum ndr_err_code ndr_err;
2808 DATA_BLOB *response;
2809 struct ndr_pull* pull;
2811 DEBUG(5,("START %s io2=%p\n",__FUNCTION__,io2));
2812 DEBUG(5,("%s op %s ntioctl: %s\n",
2813 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
2814 NT_STATUS_NOT_OK_RETURN(status);
2816 if (c_req) {
2817 DEBUG(5,("%s io2 MID=%d\n",__FUNCTION__,c_req->mid));
2818 status = smb_raw_ioctl_recv(c_req, io, io);
2819 #define SESSION_INFO proxy->remote_server, proxy->remote_share
2820 /* This status is the ntioctl wrapper status */
2821 if (! NT_STATUS_IS_OK(status)) {
2822 DEBUG(3,("[\\\\%s\\%s] RPC %s failed for %s: %s\n",SESSION_INFO,
2823 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
2824 if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) return status;
2825 return NT_STATUS_UNSUCCESSFUL;
2829 dump_data(10, io->ntioctl.out.blob.data, io->ntioctl.out.blob.length);
2831 response=&io->ntioctl.out.blob;
2832 pull = ndr_pull_init_blob(response, r, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2833 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
2835 NT_STATUS_HAVE_NO_MEMORY(pull);
2837 ndr_err=calls->ndr_pull(pull, NDR_OUT, r);
2838 #warning can we free pull here?
2839 status=ndr_map_error2ntstatus(ndr_err);
2841 DEBUG(5,("END %s op status %s\n",
2842 __FUNCTION__, get_friendly_nt_error_msg(status)));
2843 return status;
2847 send an ntioctl request based on a NDR encoding.
2849 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
2850 struct smbcli_tree *tree,
2851 struct ntvfs_module_context *ntvfs,
2852 const struct ndr_interface_table *table,
2853 uint32_t opnum,
2854 void *r)
2856 struct proxy_private *private = ntvfs->private_data;
2857 struct smbcli_request * c_req;
2858 struct ndr_push *push;
2859 NTSTATUS status;
2860 DATA_BLOB request;
2861 enum ndr_err_code ndr_err;
2862 union smb_ioctl *io=talloc_zero(r, union smb_ioctl);
2865 /* setup for a ndr_push_* call, we can't free push until the message
2866 actually hits the wire */
2867 push = ndr_push_init_ctx(io, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2868 if (!push) return NULL;
2870 /* first push interface table identifiers */
2871 ndr_err=ndr_push_ndr_syntax_id(push, NDR_SCALARS, &table->syntax_id);
2872 status=ndr_map_error2ntstatus(ndr_err);
2874 if (! NT_STATUS_IS_OK(status)) return NULL;
2876 ndr_err=ndr_push_uint32(push, NDR_SCALARS, opnum);
2877 status=ndr_map_error2ntstatus(ndr_err);
2878 if (! NT_STATUS_IS_OK(status)) return NULL;
2880 if (0) {
2881 push->flags |= LIBNDR_FLAG_BIGENDIAN;
2884 /* push the structure into a blob */
2885 ndr_err = table->calls[opnum].ndr_push(push, NDR_IN, r);
2886 status=ndr_map_error2ntstatus(ndr_err);
2887 if (!NT_STATUS_IS_OK(status)) {
2888 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
2889 nt_errstr(status)));
2890 return NULL;
2893 /* retrieve the blob */
2894 request = ndr_push_blob(push);
2896 io->ntioctl.level=RAW_IOCTL_NTIOCTL;
2897 io->ntioctl.in.function=FSCTL_UFOPROXY_RPCLITE;
2898 io->ntioctl.in.file.fnum=private->nttrans_fnum;
2899 io->ntioctl.in.fsctl=false;
2900 io->ntioctl.in.filter=0;
2901 io->ntioctl.in.max_data=PROXY_NTIOCTL_MAXDATA;
2902 io->ntioctl.in.blob=request;
2904 DEBUG(10,("smbcli_request packet:\n"));
2905 dump_data(10, request.data, request.length);
2907 c_req = smb_raw_ioctl_send(tree, io);
2909 if (! c_req) {
2910 return NULL;
2913 dump_data(10, c_req->out.data, c_req->out.data_size);
2915 { void* req=NULL;
2916 struct ntioctl_rpc_unmap_info* info=talloc_zero(r, struct ntioctl_rpc_unmap_info);
2917 info->io=io;
2918 info->table=table;
2919 info->opnum=opnum;
2920 info->calls=&table->calls[opnum];
2921 ADD_ASYNC_RECV_TAIL(c_req, r, info, NULL, ntioctl_rpc_unmap, NULL);
2924 return c_req;
2928 client helpers, mapping between proxy RPC calls and smbcli_* calls.
2932 * If the sync_chain_handler is called directly it unplugs the async handler
2933 which (as well as preventing loops) will also avoid req->send_fn being
2934 called - which is also nice! */
2935 NTSTATUS sync_chain_handler(struct smbcli_request *c_req)
2937 struct async_info *async=NULL;
2938 /* the first callback which will actually receive the c_req response */
2939 struct async_info_map *async_map;
2940 NTSTATUS status=NT_STATUS_OK;
2941 struct async_info_map** chain;
2943 DEBUG(5,("%s\n",__FUNCTION__));
2944 if (! c_req) return NT_STATUS_UNSUCCESSFUL;
2946 /* If there is a handler installed, it is using async_info to chain */
2947 if (c_req->async.fn) {
2948 /* not safe to talloc_free async if send_fn has been called for the request
2949 against which async was allocated, so steal it (and free below) or neither */
2950 async = talloc_get_type_abort(c_req->async.private, struct async_info);
2951 talloc_steal(NULL, async);
2952 chain=&async->chain;
2953 async_map = talloc_get_type_abort(*chain, struct async_info_map);
2954 } else {
2955 chain=(struct async_info_map**)&c_req->async.private;
2956 async_map = talloc_get_type_abort(*chain, struct async_info_map);
2959 /* unplug c_req->async.fn as if a callback handler calls smb_*_recv
2960 in order to receive the response, smbcli_transport_finish_recv will
2961 call us again and then call the c-req->async.fn
2962 Perhaps we should merely call smbcli_request_receive() IF
2963 c_req->request_state <= SMBCLI_REQUEST_RECV, but that might not
2964 help multi-part replies... except all parts are receive before
2965 callback if a handler WAS set */
2966 c_req->async.fn=NULL;
2968 /* Should we raise an error? Should we simple_recv? */
2969 while(async_map) {
2970 /* remove this one from the list before we call. We do this in case
2971 some callbacks free their async_map but also so that callbacks
2972 can navigate the async_map chain to add additional callbacks to
2973 the end - e.g. so that tag-along reads can call send_fn after
2974 the send_fn of the request they tagged along to, thus preserving
2975 the async response order - which may be a waste of time? */
2976 DLIST_REMOVE(*chain, async_map);
2978 DEBUG(5,("Callback for async_map=%p pre-status %s\n",async_map, get_friendly_nt_error_msg(status)));
2979 if (async_map->fn) {
2980 status=async_map->fn(async_map->async,
2981 async_map->parms1, async_map->parms2, status);
2983 DEBUG(5,("Callback complete for async_map=%p status %s\n",async_map, get_friendly_nt_error_msg(status)));
2984 /* Note: the callback may have added to the chain */
2985 #warning Async_maps have a null talloc_context, it is unclear who should own them
2986 /* it can't be c_req as it stops us chaining more than one, maybe it
2987 should be req but there isn't always a req. However sync_chain_handler
2988 will always free it if called */
2989 DEBUG(6,("Will free async map %p\n",async_map));
2990 #warning put me back
2991 talloc_free(async_map);
2992 DEBUG(6,("Free'd async_map\n"));
2993 if (*chain)
2994 async_map=talloc_get_type_abort(*chain, struct async_info_map);
2995 else
2996 async_map=NULL;
2997 DEBUG(6,("Switch to async_map %p\n",async_map));
2999 /* The first callback will have read c_req, thus talloc_free'ing it,
3000 so we don't let the other callbacks get hurt playing with it */
3001 if (async_map && async_map->async)
3002 async_map->async->c_req=NULL;
3005 talloc_free(async);
3007 DEBUG(5,("%s complete: %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
3008 return status;
3011 /* If the async handler is called, then the send_fn is called */
3012 static void async_chain_handler(struct smbcli_request *c_req)
3014 struct async_info *async = talloc_get_type_abort(c_req->async.private, struct async_info);
3015 struct ntvfs_request *req = async->req;
3016 NTSTATUS status;
3018 if (c_req->state <= SMBCLI_REQUEST_RECV) {
3019 /* Looks like async handlers has been called sync'ly */
3020 smb_panic("async_chain_handler called asyncly on req %p\n");
3023 status=sync_chain_handler(c_req);
3025 /* Should we insist that a chain'd handler does this?
3026 Which makes it hard to intercept the data by adding handlers
3027 before the send_fn handler sends it... */
3028 if (req) {
3029 req->async_states->status=status;
3030 req->async_states->send_fn(req);
3034 /* unpack the rpc struct to make some smb_write */
3035 NTSTATUS async_proxy_smb_raw_write_rpc(struct async_info *async,
3036 void* io1, void* io2, NTSTATUS status)
3038 union smb_write* io =talloc_get_type(io1, union smb_write);
3039 struct proxy_Write* r=talloc_get_type(io2, struct proxy_Write);
3041 DEBUG(5,("START: %s convert from rpc to smb with pre-status %s\n",__FUNCTION__,
3042 get_friendly_nt_error_msg (status)));
3043 DEBUG(3,("Write response for offset=%lld\n",io->generic.in.offset));
3044 NT_STATUS_NOT_OK_RETURN(status);
3046 status=r->out.result;
3047 DEBUG(5,("%s wrapped status: %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
3048 NT_STATUS_NOT_OK_RETURN(status);
3050 io->generic.out.remaining = r->out.remaining;
3051 io->generic.out.nwritten = r->out.nwritten;
3053 DEBUG(5,("END: %s status %s\n",__FUNCTION__,
3054 get_friendly_nt_error_msg (status)));
3055 return status;
3058 /* upgrade from smb to NDR and then send.
3059 The caller should ADD_ASYNC_RECV_TAIL the handler that tries to receive the response*/
3060 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
3061 union smb_write *io,
3062 struct proxy_file *f)
3064 struct proxy_private *private = ntvfs->private_data;
3065 struct smbcli_tree *tree=private->tree;
3067 if (PROXY_REMOTE_SERVER(private)) {
3068 struct smbcli_request *c_req;
3069 struct proxy_Write *r=talloc_zero(io, struct proxy_Write);
3070 ssize_t size;
3072 if (! r) return NULL;
3074 size=io->generic.in.count;
3075 /* upgrade the write */
3076 r->in.fnum = io->generic.in.file.fnum;
3077 r->in.offset = io->generic.in.offset;
3078 r->in.count = io->generic.in.count;
3079 r->in.mode = io->generic.in.wmode;
3080 // r->in.remaining = io->generic.in.remaining;
3081 #warning remove this
3082 /* prepare to lie */
3083 r->out.nwritten=r->in.count;
3084 r->out.remaining=0;
3086 /* try to compress */
3087 #warning compress!
3088 r->in.data.compress.data=compress_block_talloc(r, io->generic.in.data, &size);
3089 if (r->in.data.compress.data) {
3090 r->in.data.compress.count=size;
3091 r->in.flags = PROXY_USE_ZLIB;
3092 } else {
3093 r->in.flags = 0;
3094 /* we'll honour const, honest gov */
3095 r->in.data.generic.data=discard_const(io->generic.in.data);
3096 r->in.data.generic.count=io->generic.in.count;
3099 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
3100 ntvfs,
3101 &ndr_table_rpcproxy,
3102 NDR_PROXY_WRITE, r);
3103 if (! c_req) return NULL;
3105 /* yeah, filthy abuse of f */
3106 { void* req=NULL;
3107 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_write_rpc, NULL);
3110 return c_req;
3111 } else {
3112 return smb_raw_write_send(tree, io);
3116 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
3117 union smb_write *io,
3118 struct proxy_file *f)
3120 struct proxy_private *proxy = ntvfs->private_data;
3121 struct smbcli_tree *tree=proxy->tree;
3123 if (PROXY_REMOTE_SERVER(proxy)) {
3124 struct smbcli_request *c_req = proxy_smb_raw_write_send(ntvfs, io, f);
3125 return sync_chain_handler(c_req);
3126 } else {
3127 struct smbcli_request *c_req = smb_raw_write_send(tree, io);
3128 return smb_raw_write_recv(c_req, io);
3132 /* unpack the rpc struct to make some smb_read response */
3133 NTSTATUS async_proxy_smb_raw_read_rpc(struct async_info *async,
3134 void* io1, void* io2, NTSTATUS status)
3136 union smb_read* io =talloc_get_type_abort(io1, union smb_read);
3137 struct proxy_Read* r=talloc_get_type_abort(io2, struct proxy_Read);
3139 DEBUG(5,("\n>>\n%s() rpc status: %s\n",__FUNCTION__,
3140 get_friendly_nt_error_msg(status)));
3141 NT_STATUS_NOT_OK_RETURN(status);
3143 status=r->out.result;
3144 DEBUG(5,("%s() wrapped status: %s\n",__FUNCTION__,
3145 get_friendly_nt_error_msg(status)));
3146 NT_STATUS_NOT_OK_RETURN(status);
3148 io->generic.out.remaining = 0; /*r->out.response.generic.remaining;*/
3149 io->generic.out.compaction_mode = 0;
3151 if (r->out.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
3152 /* Use the io we already setup!
3153 if out.flags & PROXY_VALIDATE, we may need to validate more in
3154 cache then r->out.nread would suggest, see io->generic.out.nread */
3155 if (r->out.flags & PROXY_VALIDATE)
3156 io->generic.out.nread=io->generic.in.maxcnt;
3157 DEBUG(5,("Using cached data: size=%lld\n",
3158 (long long) io->generic.out.nread));
3159 return status;
3162 if (r->in.flags & PROXY_VALIDATE) {
3163 DEBUG(5,("Cached data did not validate, flags: %x\n",r->out.flags));
3164 /* turn off validate on this file */
3165 //cache_handle_novalidate(f);
3166 #warning turn off validate on this file - do an nread<maxcnt later
3169 if (r->in.flags & PROXY_USE_CACHE) {
3170 DEBUG(5,("Cached data did not match\n"));
3173 io->generic.out.nread = r->out.nread;
3175 /* we may need to uncompress */
3176 if (r->out.flags & PROXY_USE_ZLIB) {
3177 ssize_t size=r->out.response.compress.count;
3178 if (! uncompress_block_to(io->generic.out.data,
3179 r->out.response.compress.data, &size,
3180 io->generic.in.maxcnt) ||
3181 size != r->out.nread) {
3182 io->generic.out.nread=size;
3183 status=NT_STATUS_INVALID_USER_BUFFER;
3185 } else if (io->generic.out.data != r->out.response.generic.data) {
3186 //Assert(r->out.nread == r->out.generic.out.count);
3187 memcpy(io->generic.out.data, r->out.response.generic.data, io->generic.out.nread);
3190 return status;
3193 /* Warning: Assumes that if io->generic.out.nread is not zero, then some
3194 data has been pre-read into io->generic.out.data and can be used for
3195 proxy<->proxy optimized reads */
3196 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
3197 union smb_read *io,
3198 struct proxy_file *f,
3199 struct proxy_Read *r)
3201 struct proxy_private *private = ntvfs->private_data;
3202 #warning we are using out.nread as a out-of-band parameter
3203 if (PROXY_REMOTE_SERVER(private)) {
3205 struct smbcli_request *c_req;
3206 if (! r) {
3207 r=talloc_zero(io, struct proxy_Read);
3210 if (! r) return NULL;
3212 r->in.fnum = io->generic.in.file.fnum;
3213 r->in.read_for_execute=io->generic.in.read_for_execute;
3214 r->in.offset = io->generic.in.offset;
3215 r->in.mincnt = io->generic.in.mincnt;
3216 r->in.maxcnt = io->generic.in.maxcnt;
3217 r->in.remaining = io->generic.in.remaining;
3218 r->in.flags |= PROXY_USE_ZLIB;
3219 if (! (r->in.flags & PROXY_VALIDATE) &&
3220 io->generic.out.data && io->generic.out.nread > 0) {
3221 /* maybe we should limit digest size to MIN(nread, maxcnt) to
3222 permit the caller to provider a larger nread as part of
3223 a split read */
3224 checksum_block(r->in.digest.digest, io->generic.out.data,
3225 io->generic.out.nread);
3227 if (io->generic.out.nread > r->in.maxcnt) {
3228 DEBUG(0,("Cache from nread is too big for requested read struct, ignoring cache\n"));
3229 } else {
3230 r->in.mincnt = io->generic.out.nread;
3231 r->in.maxcnt = io->generic.out.nread;
3232 r->in.flags |= PROXY_USE_CACHE;
3233 /* PROXY_VALIDATE will have been set by caller */
3237 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
3238 DEBUG(3,("Cache digest length=%lld\n", (long long)r->in.maxcnt));
3239 dump_data (3, r->in.digest.digest, sizeof(r->in.digest.digest));
3242 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
3243 ntvfs,
3244 &ndr_table_rpcproxy,
3245 NDR_PROXY_READ, r);
3246 if (! c_req) return NULL;
3248 { void* req=NULL;
3249 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_read_rpc, NULL);
3252 return c_req;
3253 } else {
3254 return smb_raw_read_send(private->tree, io);
3258 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
3259 union smb_read *io,
3260 struct proxy_file *f)
3262 struct proxy_private *proxy = ntvfs->private_data;
3263 struct smbcli_tree *tree=proxy->tree;
3265 if (PROXY_REMOTE_SERVER(proxy)) {
3266 struct smbcli_request *c_req = proxy_smb_raw_read_send(ntvfs, io, f, NULL);
3267 return sync_chain_handler(c_req);
3268 } else {
3269 struct smbcli_request *c_req = smb_raw_read_send(tree, io);
3270 return smb_raw_read_recv(c_req, io);
3276 initialise the PROXY->PROXY backend, registering ourselves with the ntvfs subsystem
3278 NTSTATUS ntvfs_proxy_init(void)
3280 NTSTATUS ret;
3281 struct ntvfs_ops ops;
3282 NTVFS_CURRENT_CRITICAL_SIZES(vers);
3284 ZERO_STRUCT(ops);
3286 /* fill in the name and type */
3287 ops.name = "proxy";
3288 ops.type = NTVFS_DISK;
3290 /* fill in all the operations */
3291 ops.connect = proxy_connect;
3292 ops.disconnect = proxy_disconnect;
3293 ops.unlink = proxy_unlink;
3294 ops.chkpath = proxy_chkpath;
3295 ops.qpathinfo = proxy_qpathinfo;
3296 ops.setpathinfo = proxy_setpathinfo;
3297 ops.open = proxy_open;
3298 ops.mkdir = proxy_mkdir;
3299 ops.rmdir = proxy_rmdir;
3300 ops.rename = proxy_rename;
3301 ops.copy = proxy_copy;
3302 ops.ioctl = proxy_ioctl;
3303 ops.read = proxy_read;
3304 ops.write = proxy_write;
3305 ops.seek = proxy_seek;
3306 ops.flush = proxy_flush;
3307 ops.close = proxy_close;
3308 ops.exit = proxy_exit;
3309 ops.lock = proxy_lock;
3310 ops.setfileinfo = proxy_setfileinfo;
3311 ops.qfileinfo = proxy_qfileinfo;
3312 ops.fsinfo = proxy_fsinfo;
3313 ops.lpq = proxy_lpq;
3314 ops.search_first = proxy_search_first;
3315 ops.search_next = proxy_search_next;
3316 ops.search_close = proxy_search_close;
3317 ops.trans = proxy_trans;
3318 ops.logoff = proxy_logoff;
3319 ops.async_setup = proxy_async_setup;
3320 ops.cancel = proxy_cancel;
3321 ops.notify = proxy_notify;
3322 ops.trans2 = proxy_trans2;
3324 /* register ourselves with the NTVFS subsystem. We register
3325 under the name 'proxy'. */
3326 ret = ntvfs_register(&ops, &vers);
3328 if (!NT_STATUS_IS_OK(ret)) {
3329 DEBUG(0,("Failed to register PROXY backend!\n"));
3332 return ret;