ntvfs proxy module rollup REWORK ME
[Samba/vfs_proxy.git] / source4 / ntvfs / proxy / vfs_proxy.c
blob3440802f8497c4a406c94042dbd82da53677c7ba
1 /*
2 Unix SMB/PROXY implementation.
4 CIFS PROXY NTVFS filesystem backend
6 Copyright (C) Andrew Tridgell 2003
7 Copyright (C) James J Myers 2003 <myersjj@samba.org>
8 Copyright (C) Sam Liddicott <sam@liddicott.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>.
24 this implements a CIFS->CIFS NTVFS filesystem caching proxy.
28 #define TALLOC_ABORT(why) smb_panic(why)
29 #warning handle SMB_FLAGS2_COMPRESSED flag from client: http://msdn2.microsoft.com/en-us/library/cc246254.aspx
31 #define PROXY_NTIOCTL_MAXDATA 0x20000
33 #include "includes.h"
34 #include "libcli/raw/libcliraw.h"
35 #include "libcli/smb_composite/smb_composite.h"
36 #include "auth/auth.h"
37 #include "auth/credentials/credentials.h"
38 #include "ntvfs/ntvfs.h"
39 #include "../lib/util/dlinklist.h"
40 #include "param/param.h"
41 #include "libcli/resolve/resolve.h"
42 #include "libcli/libcli.h"
43 #include "libcli/raw/ioctl.h"
44 #include "librpc/gen_ndr/ndr_misc.h"
45 #include "librpc/gen_ndr/ndr_proxy.h"
46 #include "librpc/ndr/ndr_table.h"
47 #include "lib/cache/cache.h"
48 #include "lib/compression/zlib.h"
49 #include "libcli/raw/raw_proto.h"
51 struct proxy_file {
52 struct proxy_file *prev, *next;
53 uint16_t fnum;
54 struct ntvfs_handle *h;
55 struct cache_file_entry *cache;
56 int readahead_pending;
59 /* this is stored in ntvfs_private */
60 struct proxy_private {
61 struct smbcli_tree *tree;
62 struct smbcli_transport *transport;
63 struct ntvfs_module_context *ntvfs;
64 struct async_info *pending;
65 struct proxy_file *files;
66 bool map_generic;
67 bool map_trans2;
68 bool cache_enabled;
69 int cache_readahead; /* default read-ahead window size */
70 int cache_readaheadblock; /* size of each read-ahead request */
71 ssize_t cache_validatesize; /* chunk size to validate, results in a read this size on remote server */
72 char *remote_server;
73 char *remote_share;
74 struct cache_context *cache;
75 int readahead_spare; /* amount of pending non-user generated requests */
76 bool fake_oplock; /* useful for testing, smbclient never asks for oplock */
79 struct async_info_map;
81 /* a structure used to pass information to an async handler */
82 struct async_info {
83 struct async_info *next, *prev;
84 struct proxy_private *proxy;
85 struct ntvfs_request *req;
86 struct smbcli_request *c_req;
87 struct proxy_file *f;
88 struct async_info_map *chain;
89 void *parms;
92 /* used to chain async callbacks */
93 struct async_info_map {
94 struct async_info_map *next, *prev;
95 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
96 void *parms1;
97 void *parms2;
98 struct async_info *async;
101 struct ntioctl_rpc_unmap_info {
102 void* io;
103 const struct ndr_interface_call *calls;
104 const struct ndr_interface_table *table;
105 uint32_t opnum;
108 /* a structure used to pass information to an async handler */
109 struct async_rpclite_send {
110 const struct ndr_interface_call* call;
111 void* struct_ptr;
114 #define SETUP_PID private->tree->session->pid = req->smbpid
116 #define SETUP_FILE_HERE(f) do { \
117 f = ntvfs_handle_get_backend_data(io->generic.in.file.ntvfs, ntvfs); \
118 if (!f) return NT_STATUS_INVALID_HANDLE; \
119 io->generic.in.file.fnum = f->fnum; \
120 } while (0)
122 #define SETUP_FILE do { \
123 struct proxy_file *f; \
124 SETUP_FILE_HERE(f); \
125 } while (0)
127 #define SETUP_PID_AND_FILE do { \
128 SETUP_PID; \
129 SETUP_FILE; \
130 } while (0)
132 /* remove the MAY_ASYNC from a request, useful for testing */
133 #define MAKE_SYNC_REQ(req) do { req->async_states->state &= ~NTVFS_ASYNC_STATE_MAY_ASYNC; } while(0)
135 #define PROXY_SERVER "proxy:server"
136 #define PROXY_USER "proxy:user"
137 #define PROXY_PASSWORD "proxy:password"
138 #define PROXY_DOMAIN "proxy:domain"
139 #define PROXY_SHARE "proxy:share"
140 #define PROXY_USE_MACHINE_ACCT "proxy:use-machine-account"
141 #define PROXY_MAP_GENERIC "proxy:map-generic"
142 #define PROXY_MAP_TRANS2 "proxy:map-trans2"
144 #define PROXY_CACHE_ENABLED "proxy:cache-enabled"
145 #define PROXY_CACHE_ENABLED_DEFAULT false
147 #define PROXY_CACHE_READAHEAD "proxy:cache-readahead"
148 #define PROXY_CACHE_READAHEAD_DEFAULT 32768
149 /* size of each read-ahead request. */
150 #define PROXY_CACHE_READAHEAD_BLOCK "proxy:cache-readaheadblock"
151 /* the read-ahead block should always be less than max negotiated data */
152 #define PROXY_CACHE_READAHEAD_BLOCK_DEFAULT 4096
154 #define PROXY_CACHE_VALIDATE_SIZE "proxy:validate-size"
155 #define PROXY_CACHE_VALIDATE_SIZE_DEFAULT 256 /* 10M */
157 #define PROXY_FAKE_OPLOCK "proxy:fake-oplock"
158 #define PROXY_FAKE_OPLOCK_DEFAULT false
160 /* how many read-ahead requests can be pending per mid */
161 #define PROXY_REQUEST_LIMIT "proxy:request-limit"
162 #define PROXY_REQUEST_LIMIT_DEFAULT 100
164 #define PROXY_USE_MACHINE_ACCT_DEFAULT false
165 /* These two really should be: true, and possibly not even configurable */
166 #define PROXY_MAP_GENERIC_DEFAULT true
167 #define PROXY_MAP_TRANS2_DEFAULT true
169 /* is the remote server a proxy? */
170 #define PROXY_REMOTE_SERVER(private) \
171 ((private)->tree->session->transport->negotiate.capabilities & CAP_COMPRESSION \
172 && (strcmp("A:",private->tree->device)==0))
174 /* A few forward declarations */
175 static NTSTATUS sync_chain_handler(struct smbcli_request *c_req);
176 static void async_chain_handler(struct smbcli_request *c_req);
177 static void async_read_handler(struct smbcli_request *c_req);
178 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
179 struct ntvfs_request *req, union smb_ioctl *io);
181 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
182 struct smbcli_tree *tree,
183 struct ntvfs_module_context *ntvfs,
184 uint16_t fnum, const struct ndr_interface_table *table,
185 uint32_t opnum, void *r);
186 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
187 union smb_read *io, struct proxy_file *f, struct proxy_Read *r);
188 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
189 union smb_read *io, struct proxy_file *f);
190 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
191 union smb_write *io, struct proxy_file *f);
192 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
193 union smb_write *io, struct proxy_file *f);
194 static NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status);
197 a handler for oplock break events from the server - these need to be passed
198 along to the client
200 static bool oplock_handler(struct smbcli_transport *transport, uint16_t tid, uint16_t fnum, uint8_t level, void *p_private)
202 struct proxy_private *private = p_private;
203 NTSTATUS status;
204 struct ntvfs_handle *h = NULL;
205 struct proxy_file *f;
207 for (f=private->files; f; f=f->next) {
208 if (f->fnum != fnum) continue;
209 h = f->h;
210 break;
213 if (!h) {
214 DEBUG(5,("vfs_proxy: ignoring oplock break level %d for fnum %d\n", level, fnum));
215 return true;
218 /* If we don't have an oplock, then we can't rely on the cache */
219 cache_handle_stale(f);
221 DEBUG(5,("vfs_proxy: sending oplock break level %d for fnum %d\n", level, fnum));
222 status = ntvfs_send_oplock_break(private->ntvfs, h, level);
223 if (!NT_STATUS_IS_OK(status)) return false;
224 return true;
228 connect to a share - used when a tree_connect operation comes in.
230 static NTSTATUS proxy_connect(struct ntvfs_module_context *ntvfs,
231 struct ntvfs_request *req, const char *sharename)
233 NTSTATUS status;
234 struct proxy_private *private;
235 const char *host, *user, *pass, *domain, *remote_share;
236 struct smb_composite_connect io;
237 struct composite_context *creq;
238 struct share_config *scfg = ntvfs->ctx->config;
240 struct cli_credentials *credentials;
241 bool machine_account;
243 /* Here we need to determine which server to connect to.
244 * For now we use parametric options, type proxy.
245 * Later we will use security=server and auth_server.c.
247 host = share_string_option(scfg, PROXY_SERVER, NULL);
248 user = share_string_option(scfg, PROXY_USER, NULL);
249 pass = share_string_option(scfg, PROXY_PASSWORD, NULL);
250 domain = share_string_option(scfg, PROXY_DOMAIN, NULL);
251 remote_share = share_string_option(scfg, PROXY_SHARE, NULL);
252 if (!remote_share) {
253 remote_share = sharename;
256 machine_account = share_bool_option(scfg, PROXY_USE_MACHINE_ACCT, PROXY_USE_MACHINE_ACCT_DEFAULT);
258 private = talloc_zero(ntvfs, struct proxy_private);
259 if (!private) {
260 return NT_STATUS_NO_MEMORY;
263 ntvfs->private_data = private;
265 if (!host) {
266 DEBUG(1,("PROXY backend: You must supply server\n"));
267 return NT_STATUS_INVALID_PARAMETER;
270 if (user && pass) {
271 DEBUG(5, ("PROXY backend: Using specified password\n"));
272 credentials = cli_credentials_init(private);
273 if (!credentials) {
274 return NT_STATUS_NO_MEMORY;
276 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
277 cli_credentials_set_username(credentials, user, CRED_SPECIFIED);
278 if (domain) {
279 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
281 cli_credentials_set_password(credentials, pass, CRED_SPECIFIED);
282 } else if (machine_account) {
283 DEBUG(5, ("PROXY backend: Using machine account\n"));
284 credentials = cli_credentials_init(private);
285 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
286 if (domain) {
287 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
289 status = cli_credentials_set_machine_account(credentials, ntvfs->ctx->lp_ctx);
290 if (!NT_STATUS_IS_OK(status)) {
291 return status;
293 } else if (req->session_info->credentials) {
294 DEBUG(5, ("PROXY backend: Using delegated credentials\n"));
295 credentials = req->session_info->credentials;
296 } else {
297 DEBUG(1,("PROXY backend: NO delegated credentials found: You must supply server, user and password or the client must supply delegated credentials\n"));
298 return NT_STATUS_INVALID_PARAMETER;
301 /* connect to the server, using the smbd event context */
302 io.in.dest_host = host;
303 io.in.dest_ports = lp_smb_ports(ntvfs->ctx->lp_ctx);
304 io.in.socket_options = lp_socket_options(ntvfs->ctx->lp_ctx);
305 io.in.called_name = host;
306 io.in.credentials = credentials;
307 io.in.fallback_to_anonymous = false;
308 io.in.workgroup = lp_workgroup(ntvfs->ctx->lp_ctx);
309 io.in.service = remote_share;
310 io.in.service_type = "?????";
311 io.in.iconv_convenience = lp_iconv_convenience(ntvfs->ctx->lp_ctx);
312 io.in.gensec_settings = lp_gensec_settings(private, ntvfs->ctx->lp_ctx);
313 lp_smbcli_options(ntvfs->ctx->lp_ctx, &io.in.options);
314 lp_smbcli_session_options(ntvfs->ctx->lp_ctx, &io.in.session_options);
316 creq = smb_composite_connect_send(&io, private,
317 lp_resolve_context(ntvfs->ctx->lp_ctx),
318 ntvfs->ctx->event_ctx);
319 status = smb_composite_connect_recv(creq, private);
320 NT_STATUS_NOT_OK_RETURN(status);
322 private->tree = io.out.tree;
324 private->transport = private->tree->session->transport;
325 SETUP_PID;
326 private->ntvfs = ntvfs;
328 ntvfs->ctx->fs_type = talloc_strdup(ntvfs->ctx, io.out.tree->fs_type);
329 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->fs_type);
330 ntvfs->ctx->dev_type = talloc_strdup(ntvfs->ctx, io.out.tree->device);
331 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->dev_type);
333 /* we need to receive oplock break requests from the server */
334 smbcli_oplock_handler(private->transport, oplock_handler, private);
336 private->map_generic = share_bool_option(scfg, PROXY_MAP_GENERIC, PROXY_MAP_GENERIC_DEFAULT);
338 private->map_trans2 = share_bool_option(scfg, PROXY_MAP_TRANS2, PROXY_MAP_TRANS2_DEFAULT);
340 private->cache_validatesize = 1024 * (long long) share_int_option(scfg, PROXY_CACHE_VALIDATE_SIZE, PROXY_CACHE_VALIDATE_SIZE_DEFAULT);
342 if (strcmp("A:",private->tree->device)==0) {
343 private->cache_enabled = share_bool_option(scfg, PROXY_CACHE_ENABLED, PROXY_CACHE_ENABLED_DEFAULT);
344 private->cache_readahead = share_int_option(scfg, PROXY_CACHE_READAHEAD, PROXY_CACHE_READAHEAD_DEFAULT);
345 private->cache_readaheadblock = share_int_option(scfg, PROXY_CACHE_READAHEAD_BLOCK,
346 MIN(private->cache_readahead,PROXY_CACHE_READAHEAD_BLOCK_DEFAULT));
347 private->fake_oplock = share_bool_option(scfg, PROXY_FAKE_OPLOCK, PROXY_FAKE_OPLOCK_DEFAULT);
348 private->readahead_spare = share_int_option(scfg, PROXY_REQUEST_LIMIT, PROXY_REQUEST_LIMIT_DEFAULT);
349 private->cache = new_cache_context(private, lp_proxy_cache_root(ntvfs->ctx->lp_ctx), host, remote_share);
350 DEBUG(0,("proxy tree connect caching for: %s (%s : %s) %s read-ahead: %d\n",
351 remote_share, private->tree->device,private->tree->fs_type,
352 (private->cache_enabled)?"enabled":"disabled",
353 private->cache_readahead));
354 } else {
355 private->cache_enabled = false;
356 DEBUG(0,("No caching or read-ahead for: %s (%s : %s)\n",
357 remote_share, private->tree->device,private->tree->fs_type));
360 private->remote_server = strlower_talloc(private, host);
361 private->remote_share = strlower_talloc(private, remote_share);
363 return NT_STATUS_OK;
367 disconnect from a share
369 static NTSTATUS proxy_disconnect(struct ntvfs_module_context *ntvfs)
371 struct proxy_private *private = ntvfs->private_data;
372 struct async_info *a, *an;
374 /* first cleanup pending requests */
375 for (a=private->pending; a; a = an) {
376 an = a->next;
377 smbcli_request_destroy(a->c_req);
378 talloc_free(a);
381 talloc_free(private);
382 ntvfs->private_data = NULL;
384 return NT_STATUS_OK;
388 destroy an async info structure
390 static int async_info_destructor(struct async_info *async)
392 DLIST_REMOVE(async->proxy->pending, async);
393 return 0;
397 a handler for simple async replies
398 this handler can only be used for functions that don't return any
399 parameters (those that just return a status code)
401 static void async_simple(struct smbcli_request *c_req)
403 struct async_info *async = c_req->async.private;
404 struct ntvfs_request *req = async->req;
405 req->async_states->status = smbcli_request_simple_recv(c_req);
406 talloc_free(async);
407 req->async_states->send_fn(req);
410 /* hopefully this will optimize away */
411 #define TYPE_CHECK(type,check) do { \
412 type=check; \
413 t=t; \
414 } while (0)
416 /* save some typing for the simple functions */
417 #define ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, file, achain, error) do { \
418 if (!c_req) return (error); \
419 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
421 struct async_info *async; \
422 async = talloc(req, struct async_info); \
423 if (!async) return (error); \
424 async->parms = io; \
425 async->req = req; \
426 async->f = file; \
427 async->proxy = private; \
428 async->c_req = c_req; \
429 async->chain = achain; \
430 DLIST_ADD(private->pending, async); \
431 c_req->async.private = async; \
432 talloc_set_destructor(async, async_info_destructor); \
434 c_req->async.fn = async_fn; \
435 } while (0)
437 #define ASYNC_RECV_TAIL_F(io, async_fn, file) do { \
438 if (!c_req) return NT_STATUS_UNSUCCESSFUL; \
439 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
441 struct async_info *async; \
442 async = talloc(req, struct async_info); \
443 if (!async) return NT_STATUS_NO_MEMORY; \
444 async->parms = io; \
445 async->req = req; \
446 async->f = file; \
447 async->proxy = private; \
448 async->c_req = c_req; \
449 DLIST_ADD(private->pending, async); \
450 c_req->async.private = async; \
451 talloc_set_destructor(async, async_info_destructor); \
453 c_req->async.fn = async_fn; \
454 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
455 return NT_STATUS_OK; \
456 } while (0)
458 #define ASYNC_RECV_TAIL(io, async_fn) ASYNC_RECV_TAIL_F(io, async_fn, NULL)
460 #define SIMPLE_ASYNC_TAIL ASYNC_RECV_TAIL(NULL, async_simple)
462 /* managers for chained async-callback.
463 The model of async handlers has changed.
464 backend async functions should be of the form:
465 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
466 And if async->c_req is NULL then an earlier chain has already rec'd the
467 request.
468 ADD_ASYNC_RECV_TAIL is used to add chained handlers.
469 The chained handler manager async_chain_handler is installed the usual way
470 and uses the io pointer to point to the first async_map record
471 static void async_chain_handler(struct smbcli_request *c_req).
472 It is safe to call ADD_ASYNC_RECV_TAIL before the chain manager is installed
473 and often desirable.
475 /* async_chain_handler has an async_info struct so that it can be safely inserted
476 into pending, but the io struct will point to (struct async_info_map *)
477 chained async_info_map will be in c_req->async.private */
478 #define ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_fn) do { \
479 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
480 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
481 } while(0)
483 #define ASYNC_RECV_TAIL_HANDLER(io, async_fn) do { \
484 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
485 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
486 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
487 return NT_STATUS_OK; \
488 } while(0)
491 DEBUG(0,("ADD_ASYNC_RECV_TAIL %s %s:%d\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%s\n", __FUNCTION__,__FILE__,__LINE__, \
492 creq, creq?talloc_get_name(creq):NULL, creq?talloc_get_name(creq):NULL,\
493 io1, io1?talloc_get_name(io1):NULL, io1?talloc_get_name(io1):NULL, \
494 io2, io2?talloc_get_name(io2):NULL, io2?talloc_get_name(io2):NULL, \
495 file, file?"file":"null", file?"file":"null", #async_fn)); \
497 #define ADD_ASYNC_RECV_TAIL(creq, io1, io2, file, async_fn, error) do { \
498 if (! creq) return (error); \
500 struct async_info_map *async_map=talloc(NULL, struct async_info_map); \
501 if (! async_map) return (error); \
502 async_map->async=talloc(async_map, struct async_info); \
503 if (! async_map->async) return (error); \
504 async_map->parms1=io1; \
505 async_map->parms2=io2; \
506 async_map->fn=async_fn; \
507 async_map->async->parms = io1; \
508 async_map->async->req = req; \
509 async_map->async->f = file; \
510 async_map->async->proxy = private; \
511 async_map->async->c_req = creq; \
512 /* If async_chain_handler is installed, get the list from param */ \
513 if (creq->async.fn == async_chain_handler || creq->async.fn == async_read_handler) { \
514 struct async_info *i=creq->async.private; \
515 DLIST_ADD_END(i->chain, async_map, struct async_info_map *); \
516 } else if (creq->async.fn) { \
517 /* incompatible handler installed */ \
518 return (error); \
519 } else { \
520 DLIST_ADD_END(creq->async.private, async_map, struct async_info_map *); \
523 } while(0)
525 /* try and unify cache open function interface with this macro */
526 #define cache_open(cache_context, f, io, oplock, readahead_window) \
527 (io->generic.level == RAW_OPEN_NTCREATEX && \
528 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID)\
529 ?(cache_fileid_open(cache_context, f, (const uint64_t*)(io->generic.in.fname), oplock, readahead_window))\
530 :(cache_filename_open(cache_context, f, SMB_OPEN_IN_FILE(io), oplock, readahead_window))
533 delete a file - the dirtype specifies the file types to include in the search.
534 The name can contain PROXY wildcards, but rarely does (except with OS/2 clients)
536 static NTSTATUS proxy_unlink(struct ntvfs_module_context *ntvfs,
537 struct ntvfs_request *req, union smb_unlink *unl)
539 struct proxy_private *private = ntvfs->private_data;
540 struct smbcli_request *c_req;
542 SETUP_PID;
544 /* see if the front end will allow us to perform this
545 function asynchronously. */
546 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
547 return smb_raw_unlink(private->tree, unl);
550 c_req = smb_raw_unlink_send(private->tree, unl);
552 SIMPLE_ASYNC_TAIL;
556 a handler for async ioctl replies
558 static void async_ioctl(struct smbcli_request *c_req)
560 struct async_info *async = c_req->async.private;
561 struct ntvfs_request *req = async->req;
562 req->async_states->status = smb_raw_ioctl_recv(c_req, req, async->parms);
563 talloc_free(async);
564 req->async_states->send_fn(req);
568 ioctl interface
570 static NTSTATUS proxy_ioctl(struct ntvfs_module_context *ntvfs,
571 struct ntvfs_request *req, union smb_ioctl *io)
573 struct proxy_private *private = ntvfs->private_data;
574 struct smbcli_request *c_req;
576 if (io->ntioctl.level == RAW_IOCTL_NTIOCTL
577 && io->ntioctl.in.function == FSCTL_UFOPROXY_RPCLITE) {
578 return proxy_rpclite(ntvfs, req, io);
581 SETUP_PID_AND_FILE;
583 /* see if the front end will allow us to perform this
584 function asynchronously. */
585 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
586 return smb_raw_ioctl(private->tree, req, io);
589 c_req = smb_raw_ioctl_send(private->tree, io);
591 ASYNC_RECV_TAIL(io, async_ioctl);
595 check if a directory exists
597 static NTSTATUS proxy_chkpath(struct ntvfs_module_context *ntvfs,
598 struct ntvfs_request *req, union smb_chkpath *cp)
600 struct proxy_private *private = ntvfs->private_data;
601 struct smbcli_request *c_req;
603 SETUP_PID;
605 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
606 return smb_raw_chkpath(private->tree, cp);
609 c_req = smb_raw_chkpath_send(private->tree, cp);
611 SIMPLE_ASYNC_TAIL;
615 a handler for async qpathinfo replies
617 static void async_qpathinfo(struct smbcli_request *c_req)
619 struct async_info *async = c_req->async.private;
620 struct ntvfs_request *req = async->req;
621 req->async_states->status = smb_raw_pathinfo_recv(c_req, req, async->parms);
622 talloc_free(async);
623 req->async_states->send_fn(req);
627 return info on a pathname
629 static NTSTATUS proxy_qpathinfo(struct ntvfs_module_context *ntvfs,
630 struct ntvfs_request *req, union smb_fileinfo *info)
632 struct proxy_private *private = ntvfs->private_data;
633 struct smbcli_request *c_req;
635 SETUP_PID;
637 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
638 return smb_raw_pathinfo(private->tree, req, info);
641 c_req = smb_raw_pathinfo_send(private->tree, info);
643 ASYNC_RECV_TAIL(info, async_qpathinfo);
647 a handler for async qfileinfo replies
649 static void async_qfileinfo(struct smbcli_request *c_req)
651 struct async_info *async = c_req->async.private;
652 struct ntvfs_request *req = async->req;
653 req->async_states->status = smb_raw_fileinfo_recv(c_req, req, async->parms);
654 talloc_free(async);
655 req->async_states->send_fn(req);
659 query info on a open file
661 static NTSTATUS proxy_qfileinfo(struct ntvfs_module_context *ntvfs,
662 struct ntvfs_request *req, union smb_fileinfo *io)
664 struct proxy_private *private = ntvfs->private_data;
665 struct smbcli_request *c_req;
667 SETUP_PID_AND_FILE;
669 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
670 return smb_raw_fileinfo(private->tree, req, io);
673 c_req = smb_raw_fileinfo_send(private->tree, io);
675 ASYNC_RECV_TAIL(io, async_qfileinfo);
679 set info on a pathname
681 static NTSTATUS proxy_setpathinfo(struct ntvfs_module_context *ntvfs,
682 struct ntvfs_request *req, union smb_setfileinfo *st)
684 struct proxy_private *private = ntvfs->private_data;
685 struct smbcli_request *c_req;
687 SETUP_PID;
689 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
690 return smb_raw_setpathinfo(private->tree, st);
693 c_req = smb_raw_setpathinfo_send(private->tree, st);
695 SIMPLE_ASYNC_TAIL;
700 a handler for async open replies
702 static void async_open(struct smbcli_request *c_req)
704 struct async_info *async = c_req->async.private;
705 struct proxy_private *proxy = async->proxy;
706 struct ntvfs_request *req = async->req;
707 struct proxy_file *f = async->f;
708 union smb_open *io = async->parms;
709 union smb_handle *file;
711 talloc_free(async);
712 req->async_states->status = smb_raw_open_recv(c_req, req, io);
713 SMB_OPEN_OUT_FILE(io, file);
714 f->fnum = file->fnum;
715 file->ntvfs = NULL;
716 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
717 req->async_states->status = ntvfs_handle_set_backend_data(f->h, proxy->ntvfs, f);
718 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
719 file->ntvfs = f->h;
720 DLIST_ADD(proxy->files, f);
722 if (proxy->cache_enabled) {
723 bool oplock=(io->generic.out.oplock_level != OPLOCK_NONE) || proxy->fake_oplock;
724 f->cache=cache_open(proxy->cache, f, io, oplock, proxy->cache_readahead);
725 if (! PROXY_REMOTE_SERVER(proxy)) cache_handle_novalidate(f);
728 failed:
729 req->async_states->send_fn(req);
733 open a file
735 static NTSTATUS proxy_open(struct ntvfs_module_context *ntvfs,
736 struct ntvfs_request *req, union smb_open *io)
738 struct proxy_private *private = ntvfs->private_data;
739 struct smbcli_request *c_req;
740 struct ntvfs_handle *h;
741 struct proxy_file *f;
742 NTSTATUS status;
744 SETUP_PID;
746 if (io->generic.level != RAW_OPEN_GENERIC &&
747 private->map_generic) {
748 return ntvfs_map_open(ntvfs, req, io);
751 status = ntvfs_handle_new(ntvfs, req, &h);
752 NT_STATUS_NOT_OK_RETURN(status);
754 f = talloc_zero(h, struct proxy_file);
755 NT_STATUS_HAVE_NO_MEMORY(f);
756 f->h = h;
758 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
759 union smb_handle *file;
761 status = smb_raw_open(private->tree, req, io);
762 NT_STATUS_NOT_OK_RETURN(status);
764 SMB_OPEN_OUT_FILE(io, file);
765 f->fnum = file->fnum;
766 file->ntvfs = NULL;
767 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
768 NT_STATUS_NOT_OK_RETURN(status);
769 file->ntvfs = f->h;
770 DLIST_ADD(private->files, f);
772 if (private->cache_enabled) {
773 bool oplock=(io->generic.out.oplock_level != OPLOCK_NONE) || private->fake_oplock;
775 f->cache=cache_open(private->cache, f, io, oplock, private->cache_readahead);
776 if (! PROXY_REMOTE_SERVER(private)) cache_handle_novalidate(f);
779 return NT_STATUS_OK;
782 c_req = smb_raw_open_send(private->tree, io);
784 ASYNC_RECV_TAIL_F(io, async_open, f);
788 create a directory
790 static NTSTATUS proxy_mkdir(struct ntvfs_module_context *ntvfs,
791 struct ntvfs_request *req, union smb_mkdir *md)
793 struct proxy_private *private = ntvfs->private_data;
794 struct smbcli_request *c_req;
796 SETUP_PID;
798 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
799 return smb_raw_mkdir(private->tree, md);
802 c_req = smb_raw_mkdir_send(private->tree, md);
804 SIMPLE_ASYNC_TAIL;
808 remove a directory
810 static NTSTATUS proxy_rmdir(struct ntvfs_module_context *ntvfs,
811 struct ntvfs_request *req, struct smb_rmdir *rd)
813 struct proxy_private *private = ntvfs->private_data;
814 struct smbcli_request *c_req;
816 SETUP_PID;
818 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
819 return smb_raw_rmdir(private->tree, rd);
821 c_req = smb_raw_rmdir_send(private->tree, rd);
823 SIMPLE_ASYNC_TAIL;
827 rename a set of files
829 static NTSTATUS proxy_rename(struct ntvfs_module_context *ntvfs,
830 struct ntvfs_request *req, union smb_rename *ren)
832 struct proxy_private *private = ntvfs->private_data;
833 struct smbcli_request *c_req;
835 SETUP_PID;
837 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
838 return smb_raw_rename(private->tree, ren);
841 c_req = smb_raw_rename_send(private->tree, ren);
843 SIMPLE_ASYNC_TAIL;
847 copy a set of files
849 static NTSTATUS proxy_copy(struct ntvfs_module_context *ntvfs,
850 struct ntvfs_request *req, struct smb_copy *cp)
852 return NT_STATUS_NOT_SUPPORTED;
855 /* we only define this seperately so we can easily spot read calls in
856 pending based on ( c_req->private.fn == async_read_handler ) */
857 static void async_read_handler(struct smbcli_request *c_req)
859 async_chain_handler(c_req);
862 NTSTATUS async_readahead_dec(struct async_info *async, void* io1, void* io2, NTSTATUS status)
864 struct proxy_private *private = async->proxy;
865 struct smbcli_request *c_req = async->c_req;
866 struct proxy_file *f = async->f;
867 union smb_read *io = async->parms;
869 /* if request is not already received by a chained handler, read it */
870 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
872 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
873 f->readahead_pending, private->readahead_spare));
875 f->readahead_pending--;
876 private->readahead_spare++;
878 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
879 f->readahead_pending, private->readahead_spare));
881 return status;
885 a handler for async read replies - speculative read-aheads.
886 It merely saves in the cache. The async chain handler will call send_fn if
887 there is one, or if sync_chain_handler is used the send_fn is called by
888 the ntvfs back end.
890 NTSTATUS async_read_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
892 struct smbcli_request *c_req = async->c_req;
893 struct proxy_file *f = async->f;
894 union smb_read *io = async->parms;
896 /* if request is not already received by a chained handler, read it */
897 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
899 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
900 get_friendly_nt_error_msg(status)));
902 NT_STATUS_NOT_OK_RETURN(status);
904 /* if it was a validate read we don't to save anything unless it failed.
905 Until we use Proxy_read structs we can't tell, so guess */
906 if (io->generic.out.nread == io->generic.in.maxcnt &&
907 io->generic.in.mincnt < io->generic.in.maxcnt) {
908 /* looks like a validate read, just move the validate pointer, the
909 original read-request has already been satisfied from cache */
910 DEBUG(3,("%s megavalidate suceeded, validate to %lld\n",__FUNCTION__,
911 io->generic.in.offset + io->generic.out.nread));
912 cache_handle_validated(f, io->generic.in.offset + io->generic.out.nread);
913 } else cache_handle_save(f, io->generic.out.data,
914 io->generic.out.nread,
915 io->generic.in.offset);
917 DEBUG(3,("%s finished %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
918 return status;
921 /* handler for fragmented reads */
922 NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
924 struct smbcli_request *c_req = async->c_req;
925 struct ntvfs_request *req = async->req;
926 struct proxy_file *f = async->f;
927 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
928 /* this is the io against which the fragment is to be applied */
929 union smb_read *io = talloc_get_type_abort(io1, union smb_read);
930 /* this is the io for the read that issued the callback */
931 union smb_read *io_frag = fragment->io_frag; /* async->parms; */
932 struct async_read_fragments* fragments=fragment->fragments;
934 /* if request is not already received by a chained handler, read it */
935 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
936 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
938 DEBUG(3,("\n\n%s async_read status: %s\n",__FUNCTION__,
939 get_friendly_nt_error_msg(status)));
941 fragment->status = status;
943 /* remove fragment from fragments */
944 DLIST_REMOVE(fragments->fragments, fragment);
946 #warning maybe read requests beyond the short read won't return NT_STATUS_OK with nread=0
947 /* in which case if we will want to collate all responses and return a valid read
948 for the leading NT_STATUS_OK fragments */
950 /* did this one fail, inducing a general fragments failure? */
951 if (!NT_STATUS_IS_OK(fragment->status)) {
952 /* preserve the status of the fragment with the smallest offset
953 when we can work out how */
954 if (NT_STATUS_IS_OK(fragments->status)) {
955 fragments->status=fragment->status;
958 cache_handle_novalidate(f);
959 DEBUG(5,("** Devalidated proxy due to read failure\n"));
960 } else {
961 /* No fragments have yet failed, keep collecting responses */
962 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
963 /* Find memcpy window, copy data from the io_frag to the io */
964 off_t start_offset=MAX(io_frag->generic.in.offset, io->generic.in.offset);
965 /* used to use mincnt */
966 off_t io_extent=io->generic.in.offset + io->generic.in.maxcnt;
967 off_t end_offset=MIN(io_extent, extent);
968 /* ASSERT(start_offset <= end_offset) */
969 /* ASSERT(start_offset <= io_extent) */
970 if (start_offset >= io_extent) {
971 DEBUG(3,("useless read-ahead tagged on to: %s",__location__));
972 } else {
973 uint8_t* dst=io->generic.out.data+(start_offset - io->generic.in.offset);
974 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
975 /* src == dst in cases where we did not latch onto someone elses
976 read, but are handling our own */
977 if (src != dst)
978 memcpy(dst, src, end_offset - start_offset);
981 /* There should be a better way to detect, but it needs the proxy rpc struct
982 not ths smb_read struct */
983 if (io_frag->generic.out.nread < io_frag->generic.in.maxcnt) {
984 DEBUG(5,("\n** Devalidated proxy due to small read: %lld min=%lld, max=%lld\n",
985 (long long) io_frag->generic.out.nread,
986 (long long) io_frag->generic.in.mincnt,
987 (long long) io_frag->generic.in.maxcnt));
988 cache_handle_novalidate(f);
991 /* We broke up the original read. If not enough of this sub-read has
992 been read, and then some of then next block, it could leave holes!
993 We will only acknowledge up to the first partial read, and treat
994 it as a small read. If server can return NT_STATUS_OK for a partial
995 read so can we, so we preserve the response.
996 "enough" is all of it (maxcnt), except on the last block, when it has to
997 be enough to fill io->generic.in.mincnt. We know it is the last block
998 if nread is small but we could fill io->generic.in.mincnt */
999 if (io_frag->generic.out.nread < io_frag->generic.in.mincnt &&
1000 end_offset < io->generic.in.offset + io->generic.in.mincnt) {
1001 DEBUG(4,("Fragmented read only partially successful\n"));
1003 /* Shrink the master nread (or grow to this size if we are first partial */
1004 if (! fragments->partial ||
1005 (io->generic.in.offset + io->generic.out.nread) > extent) {
1006 io->generic.out.nread = extent - io->generic.in.offset;
1009 /* stop any further successes from extending the partial read */
1010 fragments->partial=true;
1011 } else {
1012 /* only grow the master nwritten if we haven't logged a partial write */
1013 if (! fragments->partial &&
1014 (io->generic.in.offset + io->generic.out.nread) < extent ) {
1015 io->generic.out.nread = MIN(io->generic.in.maxcnt, extent - io->generic.in.offset);
1020 /* Was it the last fragment, or do we know enought to send a response? */
1021 if (! fragments->fragments) {
1022 DEBUG(5,("Async read re-fragmented with %d of %d %s\n",
1023 io->generic.out.nread, io->generic.in.mincnt,
1024 get_friendly_nt_error_msg(fragments->status)));
1025 if (fragments->async) {
1026 req->async_states->status=fragments->status;
1027 DEBUG(5,("Fragments async response sending\n"));
1028 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
1029 /* esp. as they may be attached to by other reads. Maybe attachees should be taking reference, but how will they
1030 know the top level they need to take reference too.. */
1031 #warning should really queue a sender here, not call it */
1032 req->async_states->send_fn(req);
1033 DEBUG(5,("Async response sent\n"));
1034 } else {
1035 DEBUG(5,("Fragments SYNC return\n"));
1039 /* because a c_req may be shared by many req, chained handlers must return
1040 a status pertaining to the general validity of this specific c_req, not
1041 to their own private processing of the c_req for the benefit of their req
1042 which is returned in fragments->status
1044 return status;
1047 /* Issue read-ahead X bytes where X is the window size calculation based on
1048 server_latency * server_session_bandwidth
1049 where latency is the idle (link) latency and bandwidth is less than or equal_to
1050 to actual bandwidth available to the server.
1051 Read-ahead should honour locked areas in whatever way is neccessary (who knows?)
1052 read_ahead is defined here and not in the cache engine because it requires too
1053 much knowledge of private structures
1055 /* The concept is buggy unless we can tell the next proxy that these are
1056 read-aheads, otherwise chained proxy setups will each read-ahead of the
1057 read-ahead which can put a larger load on the final server.
1058 Also we probably need to distinguish between
1059 * cache-less read-ahead
1060 * cache-revalidating read-ahead
1062 NTSTATUS read_ahead(struct proxy_file *f, struct ntvfs_module_context *ntvfs,
1063 union smb_read *io, ssize_t as_read)
1065 struct proxy_private *private = ntvfs->private_data;
1066 struct smbcli_tree *tree = private->tree;
1067 struct cache_file_entry *cache;
1068 off_t next_position; /* this read offset+length+window */
1069 off_t end_position; /* position we read-ahead to */
1070 off_t cache_populated;
1071 off_t read_position, new_extent;
1073 if (! PROXY_REMOTE_SERVER(private)) return NT_STATUS_UNSUCCESSFUL;
1074 DEBUG(5,("A\n"));
1075 if (private->cache_readahead==0 || ! private->cache_enabled || ! f->cache) return NT_STATUS_UNSUCCESSFUL;
1076 DEBUG(5,("B\n"));
1077 cache=talloc_get_type_abort(f->cache, struct cache_file_entry);
1078 DEBUG(5,("C\n"));
1079 /* don't read-ahead if we are in bulk validate mode */
1080 if (cache->status & CACHE_VALIDATE) return NT_STATUS_UNSUCCESSFUL;
1081 DEBUG(5,("D\n"));
1082 /* if we can't trust what we read-ahead anyway then don't bother although
1083 * if delta-reads are enabled we can do so in order to get something to
1084 * delta against */
1085 DEBUG(CACHE_DEBUG_LEVEL,("DOING Asking read-aheads: len %lld ra-extend %lld as-read %lld RA %d (%d)\n",
1086 (long long int)(cache_len(cache)),
1087 (long long int)(cache->readahead_extent),
1088 (long long int)(as_read),
1089 cache->readahead_window,private->cache_readahead));
1090 if (private->cache_readahead ==0 || ! (cache->status & CACHE_READ_AHEAD) ) {
1091 DEBUG(CACHE_DEBUG_LEVEL,("FAILED Asking read-aheads: Can't read-ahead as no read-ahead on this file: %x\n",
1092 cache->status));
1093 return NT_STATUS_UNSUCCESSFUL;
1096 /* as_read is the mincnt bytes of a request being made or the
1097 out.nread of completed sync requests
1098 Here we presume that as_read bytes WILL be read. If there is a cache-ahead like ours,
1099 then this may often NOT be the case if readahead_window < requestsize; so we will
1100 get a small read, leaving a hole in the cache, and as we don't yet handle sparse caches,
1101 all future read-ahead will be wasted, so we need to adjust the read-ahead handler to handle
1102 this and have failed sparse writes adjust the cache->readahead_extent back to actual size */
1104 /* predict the file pointers next position */
1105 next_position=io->generic.in.offset + as_read;
1106 DEBUG(5,("Next position: %lld (%lld + %lld)\n",
1107 (long long int)next_position,
1108 (long long int)io->generic.in.offset,
1109 (long long int)as_read));
1110 /* calculate the limit of the validated or requested cache */
1111 cache_populated=MAX(cache->validated_extent, cache->readahead_extent);
1113 /* will the new read take us beyond the current extent without gaps? */
1114 if (cache_populated < io->generic.in.offset) {
1115 /* this read-ahead is a read-behind-pointer */
1116 new_extent=cache_populated;
1117 } else {
1118 new_extent=MAX(next_position, cache_populated);
1121 /* as far as we can tell new_extent is the smallest offset that doesn't
1122 have a pending read request on. Of course if we got a short read then
1123 we will have a cache-gap which we can't handle and need to read from
1124 a shrunk readahead_extent, which we don't currently handle */
1125 read_position=new_extent;
1127 /* of course if we know how big the remote file is we should limit at that */
1128 /* we should also mark-out which read-ahead requests are pending so that we
1129 * don't repeat them while they are in-transit. */
1130 /* we can't really use next_position until we can have caches with holes
1131 UNLESS next_position < new_extent, because a next_position well before
1132 new_extent is no reason to extend it further, we only want to extended
1133 with read-aheads if we have cause to suppose the read-ahead data will
1134 be wanted, i.e. the next_position is near new_extent.
1135 So we can't justify reading beyond window+next_position, but if
1136 next_position is leaving gaps, we use new_extent instead */
1137 end_position=MIN(new_extent, next_position) + cache->readahead_window;
1138 DEBUG(5,("** Read-ahead loop %lld < %lld window=%d, end=%lld, quota: %d\n",
1139 (long long int)read_position,
1140 (long long int)(next_position + cache->readahead_window),
1141 cache->readahead_window,
1142 (long long int)end_position,
1143 private->readahead_spare));
1144 /* do we even need to read? */
1145 if (! (read_position < end_position)) return NT_STATUS_OK;
1147 /* readahead_spare is for the whole session (mid/tid?) and may need sharing
1148 out over files and other tree-connects or something */
1149 while (read_position < end_position &&
1150 private->readahead_spare > 0) {
1151 struct smbcli_request *c_req = NULL;
1152 ssize_t read_remaining = end_position - read_position;
1153 ssize_t read_block = MIN(private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32),
1154 MIN(read_remaining, private->cache_readaheadblock));
1155 void *req = NULL; /* for the ASYNC_REC_TAIL_F_ORPHAN macro */
1156 uint8_t* data;
1157 union smb_read *io_copy=talloc_memdup_type(NULL, io, union smb_read);
1159 if (! io_copy)
1160 return NT_STATUS_NO_MEMORY;
1162 #warning we are ignoring read_for_execute as far as the cache goes
1163 io_copy->generic.in.read_for_execute=io->readx.in.read_for_execute;
1164 io_copy->generic.in.offset=read_position;
1165 io_copy->generic.in.mincnt=read_block;
1166 io_copy->generic.in.maxcnt=read_block;
1167 /* what is generic.in.remaining for? */
1168 io_copy->generic.in.remaining = MIN(65535,read_remaining);
1169 io_copy->generic.out.nread=0;
1171 #warning someone must own io_copy, tree, maybe?
1172 data=talloc_zero_size(io_copy, io_copy->generic.in.maxcnt);
1173 DEBUG(5,("Talloc read-ahead buffer %p size %d\n",data, io_copy->generic.in.maxcnt));
1174 if (! data) {
1175 talloc_free(io_copy);
1176 return NT_STATUS_NO_MEMORY;
1178 io_copy->generic.out.data=data;
1180 /* are we able to pull anything from the cache to validate this read-ahead?
1181 NOTE: there is no point in reading ahead merely to re-validate the
1182 cache if we don't have oplocks and can't save it....
1183 ... or maybe there is if we think a read will come that can be matched
1184 up to this reponse while it is still on the wire */
1185 #warning so we need to distinguish between pipe-line read-ahead and revalidation
1186 if (/*(cache->status & CACHE_READ)!=0 && */
1187 cache_len(cache) >
1188 (io_copy->generic.in.offset + io_copy->generic.in.mincnt) &&
1189 cache->validated_extent <
1190 (io_copy->generic.in.offset + io_copy->generic.in.maxcnt)) {
1191 ssize_t pre_fill;
1193 pre_fill = cache_raw_read(cache, data,
1194 io_copy->generic.in.offset,
1195 io_copy->generic.in.maxcnt);
1196 DEBUG(5,("Data read into %p %d\n",data, pre_fill));
1197 if (pre_fill > 0 && pre_fill >= io_copy->generic.in.mincnt) {
1198 io_copy->generic.out.nread=pre_fill;
1199 read_block=pre_fill;
1203 c_req = proxy_smb_raw_read_send(ntvfs, io_copy, f, NULL);
1205 if (c_req) {
1206 private->readahead_spare--;
1207 f->readahead_pending++;
1208 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead level %d request %p offset=%d size=%d\n",io_copy->generic.level,c_req,(int)read_position,(int)read_block));
1209 if (cache->readahead_extent < read_position+read_block)
1210 cache->readahead_extent=read_position+read_block;
1211 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
1212 /* so we can decrease read-ahead counter for this session */
1213 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_readahead_dec, NT_STATUS_INTERNAL_ERROR);
1214 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io_copy, async_read_handler);
1216 /* Make these be owned by the async struct so they are freed when the callback ends or is cancelled */
1217 talloc_steal(c_req->async.private, c_req);
1218 talloc_steal(c_req->async.private, io_copy);
1219 read_position+=read_block;
1220 } else {
1221 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead request FAILED offset=%d size=%d\n",(int)read_position,(int)read_block));
1222 talloc_free(io_copy);
1223 break;
1227 DEBUG(CACHE_DEBUG_LEVEL,("DONE: Asking read-aheads\n"));
1228 return NT_STATUS_OK;
1232 read from a file
1234 static NTSTATUS proxy_read(struct ntvfs_module_context *ntvfs,
1235 struct ntvfs_request *req, union smb_read *io)
1237 struct proxy_private *private = ntvfs->private_data;
1238 struct smbcli_request *c_req;
1239 struct proxy_file *f;
1240 struct async_read_fragments *fragments=NULL;
1241 /* how much of read-from-cache is certainly valid */
1242 ssize_t valid=0;
1243 off_t offset=io->generic.in.offset+valid;
1244 off_t limit=io->generic.in.offset+io->generic.in.mincnt;
1246 SETUP_PID;
1248 if (io->generic.level != RAW_READ_GENERIC &&
1249 private->map_generic) {
1250 return ntvfs_map_read(ntvfs, req, io);
1253 SETUP_FILE_HERE(f);
1255 DEBUG(3,("%s offset=%lld, mincnt=%d, maxcnt=%d\n",__FUNCTION__,
1256 io->generic.in.offset,
1257 io->generic.in.mincnt,
1258 io->generic.in.maxcnt));
1259 io->generic.out.nread=0;
1260 /* attempt to read from cache. if nread becomes non-zero then we
1261 have cache to validate. Instead of returning "valid" value, cache_read
1262 should probably return an async_read_fragment structure */
1264 if (private->cache_enabled) {
1265 NTSTATUS status=cache_smb_raw_read(f->cache, ntvfs, req, io, &valid);
1267 if (NT_STATUS_IS_OK(status)) {
1268 /* if we read enough valid data, return it */
1269 if (valid > 0 && valid>=io->generic.in.mincnt) {
1270 /* valid will not be bigger than maxcnt */
1271 io->generic.out.nread=valid;
1272 DEBUG(1,("Read from cache offset=%d size=%d\n",
1273 (int)(io->generic.in.offset),
1274 (int)(io->generic.out.nread)) );
1275 return status;
1280 fragments=talloc_zero(req, struct async_read_fragments);
1281 fragments->async=!!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC);
1282 /* See if there are pending reads that would satisfy this request
1283 We have a validated read up to io->generic.out.nread. Anything between
1284 this and mincnt MUST be read, but we could first try and attach to
1285 any pending read-ahead on the same file.
1286 If those read-aheads fail we will re-issue a regular read from the
1287 callback handler and hope it hasn't taken too long. */
1289 /* offset is the extentof the file from which we still need to find
1290 matching read-requests. */
1291 offset=io->generic.in.offset+valid;
1292 /* limit is the byte beyond the last byte for which we need a request.
1293 This used to be mincnt, but is now maxcnt to cope with validate reads.
1294 Maybe we can switch back to mincnt when proxy_read struct is used
1295 instead of smb_read.
1297 limit=io->generic.in.offset+io->generic.in.maxcnt;
1299 while (offset < limit) {
1300 /* Should look for the read-ahead with offset <= in.offset+out.nread
1301 with the longest span, but there is only likely to be one anyway so
1302 just take the first */
1303 struct async_info* pending=private->pending;
1304 union smb_read *readahead_io=NULL;
1305 DEBUG(5,("Looping reads from offset=%lld, end=%lld\n",offset,limit));
1306 while(pending) {
1307 if (pending->c_req->async.fn == async_read_handler) {
1308 struct async_info *async=talloc_get_type_abort(pending->c_req->async.private, struct async_info);
1309 readahead_io=talloc_get_type_abort(async->parms, union smb_read);
1311 if (readahead_io->generic.in.file.fnum == io->generic.in.file.fnum &&
1312 readahead_io->generic.in.offset <= offset &&
1313 readahead_io->generic.in.offset +
1314 readahead_io->generic.in.mincnt > offset) break;
1316 readahead_io=NULL;
1317 pending=pending->next;
1319 /* ASSERT(readahead_io == pending->c_req->async.params) */
1320 if (pending && readahead_io) {
1321 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
1322 fragment->fragments=fragments;
1323 fragment->io_frag=readahead_io;
1324 fragment->c_req = pending->c_req;
1325 /* we found one, so attach to it. We DO need a talloc_reference
1326 because the original send_fn might be called before ALL chained
1327 handlers, and our handler will call its own send_fn first. ugh.
1328 Maybe we need to seperate reverse-mapping callbacks with data users? */
1329 /* Note: the read-ahead io is passed as io, and our req io is
1330 in io_frag->io */
1331 //talloc_reference(req, pending->req);
1332 DEBUG(5,("Attach to read for offset=%lld length=%d\n",
1333 readahead_io->generic.in.offset,
1334 readahead_io->generic.in.mincnt));
1335 ADD_ASYNC_RECV_TAIL(pending->c_req, io, fragment, f,
1336 async_read_fragment, NT_STATUS_INTERNAL_ERROR);
1337 DEBUG(5,("Attached OK\n"));
1338 #warning we don't want to return if we fail to attach, just break
1339 DLIST_ADD(fragments->fragments, fragment);
1340 /* updated offset for which we have reads */
1341 offset=readahead_io->generic.in.offset + readahead_io->generic.in.mincnt;
1342 } else {
1343 /* there are no pending reads to fill this so issue one up to
1344 the maximum supported read size. We could see when the next
1345 pending read is (if any) and only read up till there... later...
1346 Issue a fragment request for what is left, clone io.
1347 In the case that there were no fragments this will be the orginal read
1348 but with a cloned io struct */
1349 off_t next_offset;
1350 struct proxy_Read *r=NULL; /* used only for VALIDATE promotion */
1351 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
1352 union smb_read *io_frag=talloc_memdup_type(req, io, union smb_read);
1353 ssize_t offset_inc=offset-io_frag->generic.in.offset;
1354 /* 250 is a guess at ndr rpc overheads */
1355 ssize_t readsize=MIN(PROXY_NTIOCTL_MAXDATA,
1356 private->tree->session->transport->negotiate.max_xmit) \
1357 - (MIN_SMB_SIZE+32);
1358 if (readsize > 0xFFFF) readsize = 0xFFFF; /* - (MIN_SMB_SIZE+250) ?? */
1359 readsize=MIN(limit-offset, readsize);
1361 DEBUG(5,("Issuing direct read\n"));
1362 /* reduce the cached read (if any). nread is unsigned */
1363 if (io_frag->generic.out.nread > offset_inc) {
1364 io_frag->generic.out.nread-=offset_inc;
1365 /* don't make nread buffer look too big */
1366 if (io_frag->generic.out.nread > readsize)
1367 io_frag->generic.out.nread = readsize;
1368 } else {
1369 io_frag->generic.out.nread=0;
1371 /* adjust the data pointer so we read to the right place */
1372 io_frag->generic.out.data+=offset_inc;
1373 io_frag->generic.in.offset=offset;
1374 io_frag->generic.in.maxcnt=readsize;
1375 /* we don't mind mincnt being smaller if this is the last frag,
1376 but then we can already handle it being bigger but not reached...
1377 The spell would be:
1378 MIN(io_frag->generic.in.mincnt, io_frag->generic.in.maxcnt);
1380 io_frag->generic.in.mincnt=readsize;
1381 fragment->fragments=fragments;
1382 fragment->io_frag=io_frag;
1383 #warning attach to send_fn handler
1384 /* what if someone attaches to us? Our send_fn is called from our
1385 chained handler which will be before their handler and io will
1386 already be freed. We need to keep a reference to the io and the data
1387 but we don't know where it came from in order to take a reference.
1388 We need therefore to tackle calling of send_fn AFTER all other handlers */
1390 /* Calculate next offset (in advance) */
1391 next_offset=io_frag->generic.in.offset + io_frag->generic.in.mincnt;
1393 /* if we are (going to be) the last fragment and we are in VALIDATE
1394 mode, see if we can do a bulk validate now.
1395 io->generic.in.mincnt == io->generic.in.maxcnt is to make sure we
1396 don't do a validate on a receive validate read
1398 if (PROXY_REMOTE_SERVER(private) &&
1399 next_offset >= limit && (f->cache && f->cache->status & CACHE_VALIDATE)) {
1400 ssize_t length=private->cache_validatesize;
1401 declare_checksum(digest);
1403 DEBUG(5,("last read, maybe mega validate: frag length %zu, offset %llu\n",
1404 length, (unsigned long long) offset));
1405 NTSTATUS status=cache_smb_raw_checksum(f->cache, offset, &length, digest);
1406 /* no point in doing it if md5'd length < current out.nread
1407 remember: out.data contains this requests cached response
1408 if validate succeeds */
1409 if (NT_STATUS_IS_OK(status) && (length > io_frag->generic.out.nread)) {
1410 /* upgrade the read, allocate the proxy_read struct here
1411 and fill in the extras, no more out-of-band stuff */
1412 DEBUG(5,("%s: Promoting to validate read: %lld\n",__FUNCTION__,(long long) length));
1413 r=talloc_zero(io_frag, struct proxy_Read);
1414 memcpy(r->in.digest.digest, digest, sizeof(digest));
1415 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE;
1416 io_frag->generic.in.maxcnt = length;
1417 /* the proxy send function will calculate the checksum based on *data */
1418 } else {
1419 /* not enough in cache to make it worthwhile anymore */
1420 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x len=%lld\n",
1421 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0,
1422 (unsigned long long)length));
1423 cache_handle_novalidate(f);
1424 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x\n",
1425 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0));
1427 } else {
1428 if (f->cache && f->cache->status & CACHE_VALIDATE) {
1429 DEBUG(5,(">>>Not last frag, no validate read: %lld %lld\n",
1430 (long long) next_offset,
1431 (long long) limit));
1435 DEBUG(5,("Frag read sending offset=%lld min=%d, size=%d\n",
1436 io_frag->generic.in.offset,io_frag->generic.in.mincnt,
1437 io_frag->generic.in.maxcnt));
1438 c_req = proxy_smb_raw_read_send(ntvfs, io_frag, f, r);
1439 DEBUG(5,("Frag read sent offset=%lld size=%d MID=%d\n",
1440 io_frag->generic.in.offset,io_frag->generic.in.maxcnt,c_req->mid));
1441 fragment->c_req=c_req;
1442 DLIST_ADD(fragments->fragments, fragment);
1443 ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
1444 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_read_fragment, NT_STATUS_INTERNAL_ERROR);
1445 DEBUG(5,("Frag response chained\n"));
1446 /* normally we would only install the chain_handler if we wanted async
1447 response, but as it is the async_read_fragment handler that calls send_fn
1448 based on fragments->async, instead of async_chain_handler, we don't
1449 need to worry about this call completing async'ly while we are
1450 waiting on the other attached calls. Otherwise we would not attach
1451 the async_chain_handler (via async_read_handler) because of the wait
1452 below */
1453 { /* We don't want the chain handler calling send_fn as it is done by the fragment handler */
1454 void* req=NULL;
1455 /* call async_chain_hander not read handler so that folk can't
1456 attach to it, till we solve the problem above */
1457 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1459 offset = next_offset;
1461 DEBUG(5,("Next fragment\n"));
1464 /* do we still need a final fragment? Issue a read */
1466 DEBUG(5,("No frags left to read\n"));
1469 /* issue new round of read-aheads */
1470 DEBUG(5,("== Read aheads asread-%d\n",io->generic.in.mincnt));
1471 if (f->cache && ! (f->cache->status & CACHE_VALIDATE)) read_ahead(f, ntvfs, io, io->generic.in.mincnt);
1472 DEBUG(5,("== Done Read aheads\n"));
1474 /* If we have fragments but we are not called async, we must sync-wait on them */
1475 /* did we map the entire request to pending reads? */
1476 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1477 struct async_read_fragment *fragment;
1478 DEBUG(5,("Sync waiting\n"));
1479 /* fragment get's free'd during the chain_handler so we start at
1480 the top each time */
1481 for (fragment = fragments->fragments; fragment; fragment = fragments->fragments) {
1482 /* Any fragments async handled while we sync-wait on one
1483 will remove themselves from the list and not get sync waited */
1484 sync_chain_handler(fragment->c_req);
1485 /* if we have a non-ok result AND we know we have all the responses
1486 up to extent, then we could quit the loop early and change the
1487 fragments->async to true so the final irrelevant responses would
1488 come async and we could send our response now - but we don't
1489 track that detail until we have cache-maps that we can use to
1490 track the responded fragments and combine responsed linear extents
1491 if (! NT_STATUS_IS_OK(fragments->status) && xxx ) */
1493 DEBUG(5,("Sync return of proxy_read: %s\n",get_friendly_nt_error_msg (fragments->status)));
1494 return fragments->status;
1497 DEBUG(5,("Async returning\n"));
1498 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1499 return NT_STATUS_OK;
1503 a handler to de-fragment async write replies back to one request.
1504 Can cope with out-of-order async responses by waiting for all responses
1505 on an NT_STATUS_OK case so that nwritten is properly adjusted
1507 NTSTATUS async_write_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1509 struct smbcli_request *c_req = async->c_req;
1510 struct ntvfs_request *req = async->req;
1511 struct proxy_file *f=async->f;
1512 struct async_write_fragment* fragment=talloc_get_type_abort(io2, struct async_write_fragment);
1513 /* this is the io against which the fragment is to be applied */
1514 union smb_write *io = talloc_get_type_abort(io1, union smb_write);
1515 /* this is the io for the write that issued the callback */
1516 union smb_write *io_frag = fragment->io_frag; /* async->parms; */
1517 struct async_write_fragments* fragments=fragment->fragments;
1518 ssize_t extent=0;
1520 /* if request is not already received by a chained handler, read it */
1521 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
1522 if (c_req) status=smb_raw_write_recv(c_req, io_frag);
1524 DEBUG(3,("%s async_write status: %s\n",__FUNCTION__,
1525 get_friendly_nt_error_msg(status)));
1527 fragment->status = status;
1529 DLIST_REMOVE(fragments->fragments, fragment);
1531 /* did this one fail? */
1532 if (! NT_STATUS_IS_OK(fragment->status)) {
1533 if (NT_STATUS_IS_OK(fragments->status)) {
1534 fragments->status=fragment->status;
1536 } else {
1537 /* No fragments have yet failed, keep collecting responses */
1538 extent = io_frag->generic.in.offset + io_frag->generic.out.nwritten;
1540 /* we broke up the write so it could all be written. If only some has
1541 been written of this block, and then some of then next block,
1542 it could leave unwritten holes! We will only acknowledge up to the
1543 first partial write, and let the client deal with it.
1544 If server can return NT_STATUS_OK for a partial write so can we */
1545 if (io_frag->generic.out.nwritten != io_frag->generic.in.count) {
1546 DEBUG(4,("Fragmented write only partially successful\n"));
1548 /* Shrink the master nwritten */
1549 if ( ! fragments->partial ||
1550 (io->generic.in.offset + io->generic.out.nwritten) > extent) {
1551 io->generic.out.nwritten = extent - io->generic.in.offset;
1553 /* stop any further successes from extended the partial write */
1554 fragments->partial=true;
1555 } else {
1556 /* only grow the master nwritten if we haven't logged a partial write */
1557 if (! fragments->partial &&
1558 (io->generic.in.offset + io->generic.out.nwritten) < extent ) {
1559 io->generic.out.nwritten = extent - io->generic.in.offset;
1564 /* if this was the last fragment, clean up */
1565 if (! fragments->fragments) {
1566 DEBUG(5,("Async write re-fragmented with %d of %d\n",
1567 io->generic.out.nwritten,
1568 io->generic.in.count));
1569 if (NT_STATUS_IS_OK(fragments->status)) {
1570 cache_handle_save(f, io->generic.in.data, io->generic.out.nwritten,
1571 io->generic.in.offset);
1573 if (fragments->async) {
1574 req->async_states->status=fragments->status;
1575 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
1576 req->async_states->send_fn(req);
1577 DEBUG(5,("Async response sent\n"));
1578 } else {
1579 DEBUG(5,("Fragments SYNC return\n"));
1583 return status;
1587 a handler for async write replies
1589 NTSTATUS async_write_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1591 struct smbcli_request *c_req = async->c_req;
1592 struct ntvfs_request *req = async->req;
1593 struct proxy_file *f=async->f;
1594 union smb_write *io=async->parms;
1596 if (c_req)
1597 status = smb_raw_write_recv(c_req, async->parms);
1599 cache_handle_save(f, io->generic.in.data,
1600 io->generic.out.nwritten,
1601 io->generic.in.offset);
1603 return status;
1607 write to a file
1609 static NTSTATUS proxy_write(struct ntvfs_module_context *ntvfs,
1610 struct ntvfs_request *req, union smb_write *io)
1612 struct proxy_private *private = ntvfs->private_data;
1613 struct smbcli_request *c_req;
1614 struct proxy_file *f;
1616 SETUP_PID;
1618 if (io->generic.level != RAW_WRITE_GENERIC &&
1619 private->map_generic) {
1620 return ntvfs_map_write(ntvfs, req, io);
1622 SETUP_FILE_HERE(f);
1624 DEBUG(5,("proxy_write offset=%lld size=%d\n",io->generic.in.offset, io->generic.in.count));
1625 #warning ERROR get rid of this
1626 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1627 NTSTATUS status;
1628 if (PROXY_REMOTE_SERVER(private)) {
1629 /* Do a proxy write */
1630 status=proxy_smb_raw_write(ntvfs, io, f);
1631 } else if (io->generic.in.count >
1632 private->tree->session->transport->negotiate.max_xmit) {
1634 /* smbcli_write can deal with large writes, which are bigger than
1635 tree->session->transport->negotiate.max_xmit */
1636 ssize_t size=smbcli_write(private->tree,
1637 io->generic.in.file.fnum,
1638 io->generic.in.wmode,
1639 io->generic.in.data,
1640 io->generic.in.offset,
1641 io->generic.in.count);
1643 if (size==io->generic.in.count || size > 0) {
1644 io->generic.out.nwritten=size;
1645 status=NT_STATUS_OK;
1646 } else {
1647 status=NT_STATUS_UNSUCCESSFUL;
1649 } else {
1650 status=smb_raw_write(private->tree, io);
1653 /* Save write in cache */
1654 if (NT_STATUS_IS_OK(status)) {
1655 cache_handle_save(f, io->generic.in.data,
1656 io->generic.out.nwritten,
1657 io->generic.in.offset);
1660 return status;
1663 /* smb_raw_write_send can't deal with large writes, which are bigger than
1664 tree->session->transport->negotiate.max_xmit so we have to break it up
1665 trying to preserve the async nature of the call as much as possible */
1666 if (PROXY_REMOTE_SERVER(private)) {
1667 DEBUG(5,("== %s call proxy_smb_raw_write_send\n",__FUNCTION__));
1668 c_req = proxy_smb_raw_write_send(ntvfs, io, f);
1669 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
1670 } else if (io->generic.in.count <=
1671 private->tree->session->transport->negotiate.max_xmit) {
1672 DEBUG(5,("== %s call smb_raw_write_send\n",__FUNCTION__));
1673 c_req = smb_raw_write_send(private->tree, io);
1674 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
1675 } else {
1676 ssize_t remaining = io->generic.in.count;
1677 #warning Need an audit of these magin numbers MIN_SMB_SIZE+32
1678 int block = (private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32));
1679 int done = 0;
1680 struct async_write_fragments *fragments = talloc_zero(req, struct async_write_fragments);
1682 DEBUG(3,("== %s Client sending too-big write sized %d, negotiated limit %d\n",
1683 __FUNCTION__, io->generic.in.count,
1684 private->tree->session->transport->negotiate.max_xmit));
1686 fragments->io = io;
1687 io->generic.out.nwritten=0;
1688 io->generic.out.remaining=0;
1690 do {
1691 union smb_write *io_frag = talloc_zero(fragments, union smb_write);
1692 struct async_write_fragment *fragment = talloc_zero(fragments, struct async_write_fragment);
1693 ssize_t size = MIN(block, remaining);
1695 fragment->fragments = fragments;
1696 fragment->io_frag = io_frag;
1698 io_frag->generic.level = io->generic.level;
1699 io_frag->generic.in.file.fnum = io->generic.in.file.fnum;
1700 io_frag->generic.in.wmode = io->generic.in.wmode;
1701 io_frag->generic.in.count = size;
1702 io_frag->generic.in.offset = io->generic.in.offset + done;
1703 io_frag->generic.in.data = io->generic.in.data + done;
1705 c_req = proxy_smb_raw_write_send(ntvfs, io_frag, f);
1706 if (! c_req) {
1707 /* let pending requests clean-up when ready */
1708 fragments->status=NT_STATUS_UNSUCCESSFUL;
1709 talloc_steal(NULL, fragments);
1710 DEBUG(3,("Can't send request fragment\n"));
1711 return NT_STATUS_UNSUCCESSFUL;
1714 DEBUG(5,("Frag write sent offset=%lld size=%d MID=%d\n",
1715 io_frag->generic.in.offset,io_frag->generic.in.count,c_req->mid));
1716 fragment->c_req=c_req;
1717 DLIST_ADD(fragments->fragments, fragment);
1719 // ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
1720 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_write_fragment, NT_STATUS_INTERNAL_ERROR);
1721 DEBUG(5,("Frag response chained\n"));
1723 remaining -= size;
1724 done += size;
1725 } while(remaining > 0);
1727 /* this strategy has the callback chain attached to each c_req, so we
1728 don't use the ASYNC_RECV_TAIL* to install a general one */
1731 ASYNC_RECV_TAIL_HANDLER(io, async_chain_handler);
1735 a handler for async seek replies
1737 static void async_seek(struct smbcli_request *c_req)
1739 struct async_info *async = c_req->async.private;
1740 struct ntvfs_request *req = async->req;
1741 req->async_states->status = smb_raw_seek_recv(c_req, async->parms);
1742 talloc_free(async);
1743 req->async_states->send_fn(req);
1747 seek in a file
1749 static NTSTATUS proxy_seek(struct ntvfs_module_context *ntvfs,
1750 struct ntvfs_request *req,
1751 union smb_seek *io)
1753 struct proxy_private *private = ntvfs->private_data;
1754 struct smbcli_request *c_req;
1756 SETUP_PID_AND_FILE;
1758 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1759 return smb_raw_seek(private->tree, io);
1762 c_req = smb_raw_seek_send(private->tree, io);
1764 ASYNC_RECV_TAIL(io, async_seek);
1768 flush a file
1770 static NTSTATUS proxy_flush(struct ntvfs_module_context *ntvfs,
1771 struct ntvfs_request *req,
1772 union smb_flush *io)
1774 struct proxy_private *private = ntvfs->private_data;
1775 struct smbcli_request *c_req;
1777 SETUP_PID;
1778 switch (io->generic.level) {
1779 case RAW_FLUSH_FLUSH:
1780 SETUP_FILE;
1781 break;
1782 case RAW_FLUSH_ALL:
1783 io->generic.in.file.fnum = 0xFFFF;
1784 break;
1785 case RAW_FLUSH_SMB2:
1786 return NT_STATUS_INVALID_LEVEL;
1789 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1790 return smb_raw_flush(private->tree, io);
1793 c_req = smb_raw_flush_send(private->tree, io);
1795 SIMPLE_ASYNC_TAIL;
1799 close a file
1801 static NTSTATUS proxy_close(struct ntvfs_module_context *ntvfs,
1802 struct ntvfs_request *req, union smb_close *io)
1804 struct proxy_private *private = ntvfs->private_data;
1805 struct smbcli_request *c_req;
1806 struct proxy_file *f;
1807 union smb_close io2;
1809 SETUP_PID;
1811 if (io->generic.level != RAW_CLOSE_GENERIC &&
1812 private->map_generic) {
1813 return ntvfs_map_close(ntvfs, req, io);
1815 SETUP_FILE_HERE(f);
1816 /* Note, we aren't free-ing f, or it's h here. Should we?
1817 even if file-close fails, we'll remove it from the list,
1818 what else would we do? Maybe we should not remove until
1819 after the proxied call completes? */
1820 DLIST_REMOVE(private->files, f);
1822 /* possibly samba can't do RAW_CLOSE_SEND yet */
1823 if (! (c_req = smb_raw_close_send(private->tree, io))) {
1824 if (io->generic.level == RAW_CLOSE_GENERIC) {
1825 ZERO_STRUCT(io2);
1826 io2.close.level = RAW_CLOSE_CLOSE;
1827 io2.close.in.file = io->generic.in.file;
1828 io2.close.in.write_time = io->generic.in.write_time;
1829 io = &io2;
1831 c_req = smb_raw_close_send(private->tree, io);
1834 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1835 return smbcli_request_simple_recv(c_req);
1838 SIMPLE_ASYNC_TAIL;
1842 exit - closing files open by the pid
1844 static NTSTATUS proxy_exit(struct ntvfs_module_context *ntvfs,
1845 struct ntvfs_request *req)
1847 struct proxy_private *private = ntvfs->private_data;
1848 struct smbcli_request *c_req;
1850 SETUP_PID;
1852 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1853 return smb_raw_exit(private->tree->session);
1856 c_req = smb_raw_exit_send(private->tree->session);
1858 SIMPLE_ASYNC_TAIL;
1862 logoff - closing files open by the user
1864 static NTSTATUS proxy_logoff(struct ntvfs_module_context *ntvfs,
1865 struct ntvfs_request *req)
1867 /* we can't do this right in the proxy backend .... */
1868 return NT_STATUS_OK;
1872 setup for an async call - nothing to do yet
1874 static NTSTATUS proxy_async_setup(struct ntvfs_module_context *ntvfs,
1875 struct ntvfs_request *req,
1876 void *private)
1878 return NT_STATUS_OK;
1882 cancel an async call
1884 static NTSTATUS proxy_cancel(struct ntvfs_module_context *ntvfs,
1885 struct ntvfs_request *req)
1887 struct proxy_private *private = ntvfs->private_data;
1888 struct async_info *a;
1890 /* find the matching request */
1891 for (a=private->pending;a;a=a->next) {
1892 if (a->req == req) {
1893 break;
1897 if (a == NULL) {
1898 return NT_STATUS_INVALID_PARAMETER;
1901 return smb_raw_ntcancel(a->c_req);
1905 lock a byte range
1907 static NTSTATUS proxy_lock(struct ntvfs_module_context *ntvfs,
1908 struct ntvfs_request *req, union smb_lock *io)
1910 struct proxy_private *private = ntvfs->private_data;
1911 struct smbcli_request *c_req;
1913 SETUP_PID;
1915 if (io->generic.level != RAW_LOCK_GENERIC &&
1916 private->map_generic) {
1917 return ntvfs_map_lock(ntvfs, req, io);
1919 SETUP_FILE;
1921 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1922 return smb_raw_lock(private->tree, io);
1925 c_req = smb_raw_lock_send(private->tree, io);
1926 SIMPLE_ASYNC_TAIL;
1930 set info on a open file
1932 static NTSTATUS proxy_setfileinfo(struct ntvfs_module_context *ntvfs,
1933 struct ntvfs_request *req,
1934 union smb_setfileinfo *io)
1936 struct proxy_private *private = ntvfs->private_data;
1937 struct smbcli_request *c_req;
1939 SETUP_PID_AND_FILE;
1941 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1942 return smb_raw_setfileinfo(private->tree, io);
1944 c_req = smb_raw_setfileinfo_send(private->tree, io);
1946 SIMPLE_ASYNC_TAIL;
1951 a handler for async fsinfo replies
1953 static void async_fsinfo(struct smbcli_request *c_req)
1955 struct async_info *async = c_req->async.private;
1956 struct ntvfs_request *req = async->req;
1957 req->async_states->status = smb_raw_fsinfo_recv(c_req, req, async->parms);
1958 talloc_free(async);
1959 req->async_states->send_fn(req);
1963 return filesystem space info
1965 static NTSTATUS proxy_fsinfo(struct ntvfs_module_context *ntvfs,
1966 struct ntvfs_request *req, union smb_fsinfo *fs)
1968 struct proxy_private *private = ntvfs->private_data;
1969 struct smbcli_request *c_req;
1971 SETUP_PID;
1973 /* QFS Proxy */
1974 if (fs->generic.level == RAW_QFS_PROXY_INFO) {
1975 fs->proxy_info.out.major_version=1;
1976 fs->proxy_info.out.minor_version=0;
1977 fs->proxy_info.out.capability=0;
1978 return NT_STATUS_OK;
1981 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1982 return smb_raw_fsinfo(private->tree, req, fs);
1985 c_req = smb_raw_fsinfo_send(private->tree, req, fs);
1987 ASYNC_RECV_TAIL(fs, async_fsinfo);
1991 return print queue info
1993 static NTSTATUS proxy_lpq(struct ntvfs_module_context *ntvfs,
1994 struct ntvfs_request *req, union smb_lpq *lpq)
1996 return NT_STATUS_NOT_SUPPORTED;
2000 list files in a directory matching a wildcard pattern
2002 static NTSTATUS proxy_search_first(struct ntvfs_module_context *ntvfs,
2003 struct ntvfs_request *req, union smb_search_first *io,
2004 void *search_private,
2005 bool (*callback)(void *, const union smb_search_data *))
2007 struct proxy_private *private = ntvfs->private_data;
2009 SETUP_PID;
2011 return smb_raw_search_first(private->tree, req, io, search_private, callback);
2014 /* continue a search */
2015 static NTSTATUS proxy_search_next(struct ntvfs_module_context *ntvfs,
2016 struct ntvfs_request *req, union smb_search_next *io,
2017 void *search_private,
2018 bool (*callback)(void *, const union smb_search_data *))
2020 struct proxy_private *private = ntvfs->private_data;
2022 SETUP_PID;
2024 return smb_raw_search_next(private->tree, req, io, search_private, callback);
2027 /* close a search */
2028 static NTSTATUS proxy_search_close(struct ntvfs_module_context *ntvfs,
2029 struct ntvfs_request *req, union smb_search_close *io)
2031 struct proxy_private *private = ntvfs->private_data;
2033 SETUP_PID;
2035 return smb_raw_search_close(private->tree, io);
2039 a handler for async trans2 replies
2041 static void async_trans2(struct smbcli_request *c_req)
2043 struct async_info *async = c_req->async.private;
2044 struct ntvfs_request *req = async->req;
2045 req->async_states->status = smb_raw_trans2_recv(c_req, req, async->parms);
2046 talloc_free(async);
2047 req->async_states->send_fn(req);
2050 /* raw trans2 */
2051 static NTSTATUS proxy_trans2(struct ntvfs_module_context *ntvfs,
2052 struct ntvfs_request *req,
2053 struct smb_trans2 *trans2)
2055 struct proxy_private *private = ntvfs->private_data;
2056 struct smbcli_request *c_req;
2058 if (private->map_trans2) {
2059 return NT_STATUS_NOT_IMPLEMENTED;
2062 SETUP_PID;
2063 #warning we should be mapping file handles here
2065 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2066 return smb_raw_trans2(private->tree, req, trans2);
2069 c_req = smb_raw_trans2_send(private->tree, trans2);
2071 ASYNC_RECV_TAIL(trans2, async_trans2);
2075 /* SMBtrans - not used on file shares */
2076 static NTSTATUS proxy_trans(struct ntvfs_module_context *ntvfs,
2077 struct ntvfs_request *req,
2078 struct smb_trans2 *trans2)
2080 return NT_STATUS_ACCESS_DENIED;
2084 a handler for async change notify replies
2086 static void async_changenotify(struct smbcli_request *c_req)
2088 struct async_info *async = c_req->async.private;
2089 struct ntvfs_request *req = async->req;
2090 req->async_states->status = smb_raw_changenotify_recv(c_req, req, async->parms);
2091 talloc_free(async);
2092 req->async_states->send_fn(req);
2095 /* change notify request - always async */
2096 static NTSTATUS proxy_notify(struct ntvfs_module_context *ntvfs,
2097 struct ntvfs_request *req,
2098 union smb_notify *io)
2100 struct proxy_private *private = ntvfs->private_data;
2101 struct smbcli_request *c_req;
2102 int saved_timeout = private->transport->options.request_timeout;
2103 struct proxy_file *f;
2105 if (io->nttrans.level != RAW_NOTIFY_NTTRANS) {
2106 return NT_STATUS_NOT_IMPLEMENTED;
2109 SETUP_PID;
2111 f = ntvfs_handle_get_backend_data(io->nttrans.in.file.ntvfs, ntvfs);
2112 if (!f) return NT_STATUS_INVALID_HANDLE;
2113 io->nttrans.in.file.fnum = f->fnum;
2115 /* this request doesn't make sense unless its async */
2116 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2117 return NT_STATUS_INVALID_PARAMETER;
2120 /* we must not timeout on notify requests - they wait
2121 forever */
2122 private->transport->options.request_timeout = 0;
2124 c_req = smb_raw_changenotify_send(private->tree, io);
2126 private->transport->options.request_timeout = saved_timeout;
2128 ASYNC_RECV_TAIL(io, async_changenotify);
2132 * A hander for converting from rpc struct replies to ntioctl
2134 static NTSTATUS proxy_rpclite_map_async_send(
2135 struct ntvfs_module_context *ntvfs,
2136 struct ntvfs_request *req,
2137 void *io1, void *io2, NTSTATUS status)
2139 union smb_ioctl* io=talloc_get_type_abort(io1, union smb_ioctl);
2140 struct async_rpclite_send *rpclite_send=talloc_get_type_abort(io2, struct async_rpclite_send);
2141 void* r=rpclite_send->struct_ptr;
2142 struct ndr_push* push;
2143 const struct ndr_interface_call* call=rpclite_send->call;
2144 enum ndr_err_code ndr_err;
2145 DATA_BLOB ndr;
2147 talloc_free(rpclite_send);
2149 push = ndr_push_init_ctx(req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2150 NT_STATUS_HAVE_NO_MEMORY(push);
2152 if (0) {
2153 push->flags |= LIBNDR_FLAG_BIGENDIAN;
2156 ndr_err = call->ndr_push(push, NDR_OUT, r);
2157 status=ndr_map_error2ntstatus(ndr_err);
2159 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2160 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
2161 nt_errstr(status)));
2162 return status;
2165 ndr=ndr_push_blob(push);
2166 //if (ndr.length > io->ntioctl.in.max_data) {
2167 DEBUG(3,("%s NDR size %d, max_size %d %p\n",__FUNCTION__, ndr.length,
2168 io->ntioctl.in.max_data, ndr.data));
2169 io->ntioctl.out.blob=ndr;
2170 return status;
2174 * A handler for sending async rpclite Read replies that were mapped to union smb_read
2176 static NTSTATUS rpclite_proxy_Read_map_async_send(
2177 struct ntvfs_module_context *ntvfs,
2178 struct ntvfs_request *req,
2179 void *io1, void *io2, NTSTATUS status)
2181 struct proxy_Read* r=talloc_get_type_abort(io1, struct proxy_Read);
2182 union smb_read* io=talloc_get_type_abort(io2, union smb_read);
2184 /* status here is a result of proxy_read, it doesn't reflect the status
2185 of the rpc transport or relates calls, just the read operation */
2186 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2187 r->out.result=status;
2189 if (! NT_STATUS_IS_OK(status)) {
2190 /* We can't use result as a discriminator in IDL, so nread and flags always exist */
2191 r->out.nread=0;
2192 r->out.flags=0;
2193 } else {
2194 ssize_t size=io->readx.out.nread;
2195 r->out.flags=0;
2196 r->out.nread=io->readx.out.nread;
2198 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE) && io->readx.out.nread>0) {
2199 declare_checksum(digest);
2200 checksum_block(digest, io->readx.out.data, io->readx.out.nread);
2202 DEBUG(5,("New digest for size: %lld\n", (long long) io->readx.out.nread));
2203 dump_data (5, digest, sizeof(digest));
2204 DEBUG(5,("Cached digest\n"));
2205 dump_data (5, r->in.digest.digest, sizeof(digest));
2207 if (memcmp(digest, r->in.digest.digest, sizeof(digest))==0) {
2208 r->out.flags=PROXY_USE_CACHE;
2209 DEBUG(5,("%s: Use cached data len=%lld\n",__FUNCTION__,
2210 (long long)r->out.nread));
2211 if (r->in.flags & PROXY_VALIDATE) {
2212 r->out.flags |= PROXY_VALIDATE;
2213 DEBUG(5,("%s: Use VALIDATED len=%lld, %lld\n",__FUNCTION__,
2214 (long long)r->out.nread, (long long) io->readx.out.nread));
2216 goto done;
2218 DEBUG(5,("Cache does not match\n"));
2221 if (r->in.flags & PROXY_VALIDATE) {
2222 /* validate failed, shrink read to mincnt - so we don't fill link */
2223 r->out.nread=MIN(r->out.nread, r->in.mincnt);
2224 size=r->out.nread;
2225 DEBUG(5,("VALIDATE failed, shrink read of %d from %d to %d\n",
2226 r->in.maxcnt,r->out.nread,MIN(r->out.nread, r->in.mincnt)));
2229 if (r->in.flags & PROXY_USE_ZLIB) {
2230 if (compress_block(io->readx.out.data, &size) ) {
2231 r->out.flags|=PROXY_USE_ZLIB;
2232 r->out.response.compress.count=size;
2233 r->out.response.compress.data=io->readx.out.data;
2234 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
2235 __FUNCTION__,r->out.nread,size,size*100/r->out.nread));
2236 goto done;
2240 DEBUG(5,("%s: Compression not worthwhile\n", __FUNCTION__));
2241 r->out.response.generic.count=io->readx.out.nread;
2242 r->out.response.generic.data=io->readx.out.data;
2245 done:
2247 /* Or should we return NT_STATUS_OK ?*/
2248 DEBUG(5,("Finish %s status %s\n",__FUNCTION__,get_friendly_nt_error_msg(status)));
2250 /* the rpc transport succeeded even if the operation did not */
2251 return NT_STATUS_OK;
2255 * RPC implementation of Read
2257 static NTSTATUS rpclite_proxy_Read(struct ntvfs_module_context *ntvfs,
2258 struct ntvfs_request *req, struct proxy_Read *r,
2259 union smb_handle file)
2261 struct proxy_private *private = ntvfs->private_data;
2262 union smb_read* io=talloc(req, union smb_read);
2263 NTSTATUS status;
2264 /* if next hop is a proxy just repeat this call also handle VALIDATE check
2265 that means have own callback handlers too... */
2266 SETUP_PID;
2268 DEBUG(5,("Opnum: proxy_Read min=%d max=%d offset=%lld, fnum=%d\n",
2269 r->in.mincnt, r->in.maxcnt, r->in.offset, r->in.fnum));
2270 DEBUG(5,("Anticipated digest\n"));
2271 dump_data (5, r->in.digest.digest, sizeof(r->in.digest.digest));
2273 /* prepare for response */
2274 r->out.response.generic.data=talloc_array(io, uint8_t, r->in.maxcnt);
2275 NT_STATUS_HAVE_NO_MEMORY(r->out.response.generic.data);
2277 /* pack up an smb_read request and dispatch here */
2278 io->readx.level=RAW_READ_READX;
2279 io->readx.in.file=file;
2280 io->readx.in.mincnt=r->in.mincnt;
2281 io->readx.in.maxcnt=r->in.maxcnt;
2282 io->readx.in.offset=r->in.offset;
2283 io->readx.in.remaining=r->in.remaining;
2284 /* and something to hold the answer */
2285 io->readx.out.data=r->out.response.generic.data;
2287 /* so we get to pack the io->*.out response */
2288 status = ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Read_map_async_send);
2289 NT_STATUS_NOT_OK_RETURN(status);
2291 /* so the read will get processed normally */
2292 return proxy_read(ntvfs, req, io);
2296 * A handler for sending async rpclite Write replies
2298 static NTSTATUS rpclite_proxy_Write_map_async_send(
2299 struct ntvfs_module_context *ntvfs,
2300 struct ntvfs_request *req,
2301 void *io1, void *io2, NTSTATUS status)
2303 struct proxy_Write* r=talloc_get_type_abort(io1, struct proxy_Write);
2304 union smb_write* io=talloc_get_type_abort(io2, union smb_write);
2306 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2307 r->out.result=status;
2309 r->out.nwritten=io->writex.out.nwritten;
2310 r->out.remaining=io->writex.out.remaining;
2312 /* the rpc transport succeeded even if the operation did not */
2313 return NT_STATUS_OK;
2317 * RPC implementation of write
2319 static NTSTATUS rpclite_proxy_Write(struct ntvfs_module_context *ntvfs,
2320 struct ntvfs_request *req, struct proxy_Write *r,
2321 union smb_handle file)
2323 struct proxy_private *private = ntvfs->private_data;
2324 union smb_write* io=talloc(req, union smb_write);
2325 NTSTATUS status;
2327 SETUP_PID;
2329 DEBUG(5,("Opnum: proxy_Write count=%d offset=%lld, fnum=%d\n",
2330 r->in.count, r->in.offset, r->in.fnum));
2332 /* pack up an smb_write request and dispatch here */
2333 io->writex.level=RAW_WRITE_WRITEX;
2334 io->writex.in.file=file;
2335 io->writex.in.offset=r->in.offset;
2336 io->writex.in.wmode=r->in.mode;
2337 io->writex.in.count=r->in.count;
2339 /* and the data */
2340 if (PROXY_USE_ZLIB & r->in.flags) {
2341 ssize_t count=r->in.data.generic.count;
2342 io->writex.in.data=uncompress_block_talloc(io, r->in.data.compress.data,
2343 &count, r->in.count);
2344 if (count != r->in.count || !io->writex.in.data) {
2345 /* Didn't uncompress properly, but the RPC layer worked */
2346 r->out.result=NT_STATUS_BAD_COMPRESSION_BUFFER;
2347 return NT_STATUS_OK;
2349 } else {
2350 io->writex.in.data=r->in.data.generic.data;
2353 /* so we get to pack the io->*.out response */
2354 status=ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Write_map_async_send);
2355 NT_STATUS_NOT_OK_RETURN(status);
2357 /* so the read will get processed normally */
2358 return proxy_write(ntvfs, req, io);
2361 /* unmarshall ntioctl and rpc-dispatch, but push async map handler to convert
2362 back from rpc struct to ntioctl */
2363 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
2364 struct ntvfs_request *req, union smb_ioctl *io)
2366 struct proxy_private *private = ntvfs->private_data;
2367 DATA_BLOB *request;
2368 struct ndr_syntax_id* syntax_id;
2369 uint32_t opnum;
2370 const struct ndr_interface_table *table;
2371 struct ndr_pull* pull;
2372 void* r;
2373 NTSTATUS status;
2374 struct async_rpclite_send *rpclite_send;
2375 enum ndr_err_code ndr_err;
2377 SETUP_PID;
2379 DEBUG(5,("START %s blob-size %d\n",__FUNCTION__,io->ntioctl.in.blob.length));
2380 /* unpack the NDR */
2381 request=&io->ntioctl.in.blob;
2383 pull = ndr_pull_init_blob(request, req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2384 NT_STATUS_HAVE_NO_MEMORY(pull);
2385 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
2386 DEBUG(5,("%s pull init'd\n",__FUNCTION__));
2388 /* the blob is 4-aligned because it was memcpy'd */
2389 syntax_id=talloc_zero(pull, struct ndr_syntax_id);
2390 NT_STATUS_HAVE_NO_MEMORY(syntax_id);
2392 ndr_err=ndr_pull_ndr_syntax_id(pull, NDR_SCALARS, syntax_id);
2393 status=ndr_map_error2ntstatus(ndr_err);
2394 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2395 DEBUG(2,("Can't read syntax-id: %s\n",nt_errstr(status)));
2396 return status;
2399 /* now find the struct ndr_interface_table * for this syntax_id */
2400 table=ndr_table_by_uuid(&syntax_id->uuid);
2401 if (! table) {
2402 DEBUG(5,("Can't find table for uuid: %s\n",GUID_string(debug_ctx(),&syntax_id->uuid)));
2403 return NT_STATUS_NO_GUID_TRANSLATION;
2406 ndr_err=ndr_pull_uint32(pull, NDR_SCALARS, &opnum);
2407 status=ndr_map_error2ntstatus(ndr_err);
2408 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
2409 DEBUG(2,("Can't read op-num: %s\n",nt_errstr(status)));
2410 return status;
2412 DEBUG(5,("%s opnum %d\n",__FUNCTION__,opnum));
2414 DEBUG(10,("rpc request data:\n"));
2415 dump_data(10, pull->data, pull->data_size);
2417 r = talloc_named(req, table->calls[opnum].struct_size, "struct %s",
2418 table->calls[opnum].name);
2419 NT_STATUS_HAVE_NO_MEMORY(r);
2421 ndr_err=table->calls[opnum].ndr_pull(pull, NDR_IN, r);
2422 status=ndr_map_error2ntstatus(ndr_err);
2423 DEBUG(5,("%s opnum %d pulled status %s\n",__FUNCTION__,opnum,get_friendly_nt_error_msg (status)));
2424 NT_STATUS_NOT_OK_RETURN(status);
2426 rpclite_send=talloc(req, struct async_rpclite_send);
2427 NT_STATUS_HAVE_NO_MEMORY(rpclite_send);
2428 rpclite_send->call=&table->calls[opnum];
2429 rpclite_send->struct_ptr=r;
2430 /* need to push conversion function to convert from r to io */
2431 status=ntvfs_map_async_setup(ntvfs, req, io, rpclite_send, proxy_rpclite_map_async_send);
2433 /* Magically despatch the call based on syntax_id, table and opnum.
2434 But there is no table of handlers.... so until then*/
2435 if (0==strcasecmp(table->name,"rpcproxy")) {
2436 switch(opnum) {
2437 case(NDR_PROXY_READ):
2438 status=rpclite_proxy_Read(ntvfs, req, r, io->generic.in.file);
2439 break;
2440 case(NDR_PROXY_WRITE):
2441 status=rpclite_proxy_Write(ntvfs, req, r, io->generic.in.file);
2442 break;
2443 default:
2444 DEBUG(3,("Can't despatch %s:%d\n",table->name, opnum));
2445 return NT_STATUS_PROCEDURE_NOT_FOUND;
2447 } else {
2448 DEBUG(5,("Can't despatch %s:%d %s\n",table->name, opnum,
2449 GUID_string(debug_ctx(),&syntax_id->uuid)));
2450 return NT_STATUS_NO_GUID_TRANSLATION;
2453 /* status is the status of the rpc layer. If it is NT_STATUS_OK then
2454 the handler status is in r->out.result */
2455 return ntvfs_map_async_finish(req, status);
2458 /* unpack the ntioctl to make some rpc_struct */
2459 NTSTATUS ntioctl_rpc_unmap(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2461 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
2462 struct proxy_private *proxy=async->proxy;
2463 struct smbcli_request *c_req = async->c_req;
2464 void* r=io1;
2465 struct ntioctl_rpc_unmap_info *info=talloc_get_type_abort(io2, struct ntioctl_rpc_unmap_info);
2466 union smb_ioctl* io =talloc_get_type_abort(info->io, union smb_ioctl);
2467 const struct ndr_interface_call *calls=info->calls;
2468 enum ndr_err_code ndr_err;
2469 DATA_BLOB *response;
2470 struct ndr_pull* pull;
2472 DEBUG(5,("START %s io2=%p\n",__FUNCTION__,io2));
2473 DEBUG(5,("%s op %s ntioctl: %s\n",
2474 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
2475 NT_STATUS_NOT_OK_RETURN(status);
2477 if (c_req) {
2478 DEBUG(5,("%s io2 MID=%d\n",__FUNCTION__,c_req->mid));
2479 status = smb_raw_ioctl_recv(c_req, io, io);
2480 #define SESSION_INFO proxy->remote_server, proxy->remote_share
2481 /* This status is the ntioctl wrapper status */
2482 if (! NT_STATUS_IS_OK(status)) {
2483 DEBUG(3,("[\\\\%s\\%s] RPC %s failed for %s: %s\n",SESSION_INFO,
2484 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
2485 if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) return status;
2486 return NT_STATUS_UNSUCCESSFUL;
2490 dump_data(10, io->ntioctl.out.blob.data, io->ntioctl.out.blob.length);
2492 response=&io->ntioctl.out.blob;
2493 pull = ndr_pull_init_blob(response, r, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2494 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
2496 NT_STATUS_HAVE_NO_MEMORY(pull);
2498 ndr_err=calls->ndr_pull(pull, NDR_OUT, r);
2499 #warning can we free pull here?
2500 status=ndr_map_error2ntstatus(ndr_err);
2502 DEBUG(5,("END %s op status %s\n",
2503 __FUNCTION__, get_friendly_nt_error_msg(status)));
2504 return status;
2508 send an ntioctl request based on a NDR encoding.
2510 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
2511 struct smbcli_tree *tree,
2512 struct ntvfs_module_context *ntvfs,
2513 uint16_t fnum,
2514 const struct ndr_interface_table *table,
2515 uint32_t opnum,
2516 void *r)
2518 struct proxy_private *private = ntvfs->private_data;
2519 struct smbcli_request * c_req;
2520 struct ndr_push *push;
2521 NTSTATUS status;
2522 DATA_BLOB request;
2523 enum ndr_err_code ndr_err;
2524 union smb_ioctl *io=talloc_zero(r, union smb_ioctl);
2527 /* setup for a ndr_push_* call, we can't free push until the message
2528 actually hits the wire */
2529 push = ndr_push_init_ctx(io, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
2530 if (!push) return NULL;
2532 /* first push interface table identifiers */
2533 ndr_err=ndr_push_ndr_syntax_id(push, NDR_SCALARS, &table->syntax_id);
2534 status=ndr_map_error2ntstatus(ndr_err);
2536 if (! NT_STATUS_IS_OK(status)) return NULL;
2538 ndr_err=ndr_push_uint32(push, NDR_SCALARS, opnum);
2539 status=ndr_map_error2ntstatus(ndr_err);
2540 if (! NT_STATUS_IS_OK(status)) return NULL;
2542 if (0) {
2543 push->flags |= LIBNDR_FLAG_BIGENDIAN;
2546 /* push the structure into a blob */
2547 ndr_err = table->calls[opnum].ndr_push(push, NDR_IN, r);
2548 status=ndr_map_error2ntstatus(ndr_err);
2549 if (!NT_STATUS_IS_OK(status)) {
2550 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
2551 nt_errstr(status)));
2552 return NULL;
2555 /* retrieve the blob */
2556 request = ndr_push_blob(push);
2558 io->ntioctl.level=RAW_IOCTL_NTIOCTL;
2559 io->ntioctl.in.function=FSCTL_UFOPROXY_RPCLITE;
2560 io->ntioctl.in.file.fnum=fnum;
2561 io->ntioctl.in.fsctl=false;
2562 io->ntioctl.in.filter=0;
2563 io->ntioctl.in.max_data=PROXY_NTIOCTL_MAXDATA;
2564 io->ntioctl.in.blob=request;
2566 DEBUG(10,("smbcli_request packet:\n"));
2567 dump_data(10, request.data, request.length);
2569 c_req = smb_raw_ioctl_send(tree, io);
2571 if (! c_req) {
2572 return NULL;
2575 dump_data(10, c_req->out.data, c_req->out.data_size);
2577 { void* req=NULL;
2578 struct ntioctl_rpc_unmap_info* info=talloc_zero(r, struct ntioctl_rpc_unmap_info);
2579 info->io=io;
2580 info->table=table;
2581 info->opnum=opnum;
2582 info->calls=&table->calls[opnum];
2583 ADD_ASYNC_RECV_TAIL(c_req, r, info, NULL, ntioctl_rpc_unmap, NULL);
2586 return c_req;
2590 client helpers, mapping between proxy RPC calls and smbcli_* calls.
2594 * If the sync_chain_handler is called directly it unplugs the async handler
2595 which (as well as preventing loops) will also avoid req->send_fn being
2596 called - which is also nice! */
2597 NTSTATUS sync_chain_handler(struct smbcli_request *c_req)
2599 struct async_info *async=NULL;
2600 /* the first callback which will actually receive the c_req response */
2601 struct async_info_map *async_map;
2602 NTSTATUS status=NT_STATUS_OK;
2603 struct async_info_map** chain;
2605 DEBUG(5,("%s\n",__FUNCTION__));
2606 if (! c_req) return NT_STATUS_UNSUCCESSFUL;
2608 /* If there is a handler installed, it is using async_info to chain */
2609 if (c_req->async.fn) {
2610 /* not safe to talloc_free async if send_fn has been called for the request
2611 against which async was allocated, so steal it (and free below) or neither */
2612 async = talloc_get_type_abort(c_req->async.private, struct async_info);
2613 talloc_steal(NULL, async);
2614 chain=&async->chain;
2615 async_map = talloc_get_type_abort(*chain, struct async_info_map);
2616 } else {
2617 chain=(struct async_info_map**)&c_req->async.private;
2618 async_map = talloc_get_type_abort(*chain, struct async_info_map);
2621 /* unplug c_req->async.fn as if a callback handler calls smb_*_recv
2622 in order to receive the response, smbcli_transport_finish_recv will
2623 call us again and then call the c-req->async.fn
2624 Perhaps we should merely call smbcli_request_receive() IF
2625 c_req->request_state <= SMBCLI_REQUEST_RECV, but that might not
2626 help multi-part replies... except all parts are receive before
2627 callback if a handler WAS set */
2628 c_req->async.fn=NULL;
2630 /* Should we raise an error? Should we simple_recv? */
2631 while(async_map) {
2632 /* remove this one from the list before we call. We do this in case
2633 some callbacks free their async_map but also so that callbacks
2634 can navigate the async_map chain to add additional callbacks to
2635 the end - e.g. so that tag-along reads can call send_fn after
2636 the send_fn of the request they tagged along to, thus preserving
2637 the async response order - which may be a waste of time? */
2638 DLIST_REMOVE(*chain, async_map);
2640 DEBUG(5,("Callback for async_map=%p pre-status %s\n",async_map, get_friendly_nt_error_msg(status)));
2641 if (async_map->fn) {
2642 status=async_map->fn(async_map->async,
2643 async_map->parms1, async_map->parms2, status);
2645 DEBUG(5,("Callback complete for async_map=%p status %s\n",async_map, get_friendly_nt_error_msg(status)));
2646 /* Note: the callback may have added to the chain */
2647 #warning Async_maps have a null talloc_context, it is unclear who should own them
2648 /* it can't be c_req as it stops us chaining more than one, maybe it
2649 should be req but there isn't always a req. However sync_chain_handler
2650 will always free it if called */
2651 DEBUG(6,("Will free async map %p\n",async_map));
2652 #warning put me back
2653 talloc_free(async_map);
2654 DEBUG(6,("Free'd async_map\n"));
2655 if (*chain)
2656 async_map=talloc_get_type_abort(*chain, struct async_info_map);
2657 else
2658 async_map=NULL;
2659 DEBUG(6,("Switch to async_map %p\n",async_map));
2661 /* The first callback will have read c_req, thus talloc_free'ing it,
2662 so we don't let the other callbacks get hurt playing with it */
2663 if (async_map && async_map->async)
2664 async_map->async->c_req=NULL;
2667 talloc_free(async);
2669 DEBUG(5,("%s complete: %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
2670 return status;
2673 /* If the async handler is called, then the send_fn is called */
2674 static void async_chain_handler(struct smbcli_request *c_req)
2676 struct async_info *async = talloc_get_type_abort(c_req->async.private, struct async_info);
2677 struct ntvfs_request *req = async->req;
2678 NTSTATUS status;
2680 if (c_req->state <= SMBCLI_REQUEST_RECV) {
2681 /* Looks like async handlers has been called sync'ly */
2682 smb_panic("async_chain_handler called asyncly on req %p\n");
2685 status=sync_chain_handler(c_req);
2687 /* Should we insist that a chain'd handler does this?
2688 Which makes it hard to intercept the data by adding handlers
2689 before the send_fn handler sends it... */
2690 if (req) {
2691 req->async_states->status=status;
2692 req->async_states->send_fn(req);
2696 /* unpack the rpc struct to make some smb_write */
2697 NTSTATUS async_proxy_smb_raw_write_rpc(struct async_info *async,
2698 void* io1, void* io2, NTSTATUS status)
2700 union smb_write* io =talloc_get_type(io1, union smb_write);
2701 struct proxy_Write* r=talloc_get_type(io2, struct proxy_Write);
2703 DEBUG(5,("START: %s convert from rpc to smb with pre-status %s\n",__FUNCTION__,
2704 get_friendly_nt_error_msg (status)));
2705 DEBUG(3,("Write response for offset=%lld\n",io->generic.in.offset));
2706 NT_STATUS_NOT_OK_RETURN(status);
2708 status=r->out.result;
2709 DEBUG(5,("%s wrapped status: %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2710 NT_STATUS_NOT_OK_RETURN(status);
2712 io->generic.out.remaining = r->out.remaining;
2713 io->generic.out.nwritten = r->out.nwritten;
2715 DEBUG(5,("END: %s status %s\n",__FUNCTION__,
2716 get_friendly_nt_error_msg (status)));
2717 return status;
2720 /* upgrade from smb to NDR and then send.
2721 The caller should ADD_ASYNC_RECV_TAIL the handler that tries to receive the response*/
2722 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
2723 union smb_write *io,
2724 struct proxy_file *f)
2726 struct proxy_private *private = ntvfs->private_data;
2727 struct smbcli_tree *tree=private->tree;
2729 if (PROXY_REMOTE_SERVER(private)) {
2730 struct smbcli_request *c_req;
2731 struct proxy_Write *r=talloc_zero(io, struct proxy_Write);
2732 ssize_t size;
2734 if (! r) return NULL;
2736 size=io->generic.in.count;
2737 /* upgrade the write */
2738 r->in.fnum = io->generic.in.file.fnum;
2739 r->in.offset = io->generic.in.offset;
2740 r->in.count = io->generic.in.count;
2741 r->in.mode = io->generic.in.wmode;
2742 // r->in.remaining = io->generic.in.remaining;
2743 #warning remove this
2744 /* prepare to lie */
2745 r->out.nwritten=r->in.count;
2746 r->out.remaining=0;
2748 /* try to compress */
2749 #warning compress!
2750 r->in.data.compress.data=compress_block_talloc(r, io->generic.in.data, &size);
2751 if (r->in.data.compress.data) {
2752 r->in.data.compress.count=size;
2753 r->in.flags = PROXY_USE_ZLIB;
2754 } else {
2755 r->in.flags = 0;
2756 /* we'll honour const, honest gov */
2757 r->in.data.generic.data=discard_const(io->generic.in.data);
2758 r->in.data.generic.count=io->generic.in.count;
2761 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
2762 ntvfs,
2763 io->generic.in.file.fnum,
2764 &ndr_table_rpcproxy,
2765 NDR_PROXY_WRITE, r);
2766 if (! c_req) return NULL;
2768 /* yeah, filthy abuse of f */
2769 { void* req=NULL;
2770 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_write_rpc, NULL);
2773 return c_req;
2774 } else {
2775 return smb_raw_write_send(tree, io);
2779 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
2780 union smb_write *io,
2781 struct proxy_file *f)
2783 struct proxy_private *proxy = ntvfs->private_data;
2784 struct smbcli_tree *tree=proxy->tree;
2786 if (PROXY_REMOTE_SERVER(proxy)) {
2787 struct smbcli_request *c_req = proxy_smb_raw_write_send(ntvfs, io, f);
2788 return sync_chain_handler(c_req);
2789 } else {
2790 struct smbcli_request *c_req = smb_raw_write_send(tree, io);
2791 return smb_raw_write_recv(c_req, io);
2795 /* unpack the rpc struct to make some smb_read response */
2796 NTSTATUS async_proxy_smb_raw_read_rpc(struct async_info *async,
2797 void* io1, void* io2, NTSTATUS status)
2799 union smb_read* io =talloc_get_type_abort(io1, union smb_read);
2800 struct proxy_Read* r=talloc_get_type_abort(io2, struct proxy_Read);
2802 DEBUG(5,("\n>>\n%s() rpc status: %s\n",__FUNCTION__,
2803 get_friendly_nt_error_msg(status)));
2804 NT_STATUS_NOT_OK_RETURN(status);
2806 status=r->out.result;
2807 DEBUG(5,("%s() wrapped status: %s\n",__FUNCTION__,
2808 get_friendly_nt_error_msg(status)));
2809 NT_STATUS_NOT_OK_RETURN(status);
2811 io->generic.out.remaining = 0; /*r->out.response.generic.remaining;*/
2812 io->generic.out.compaction_mode = 0;
2814 if (r->out.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
2815 /* Use the io we already setup!
2816 if out.flags & PROXY_VALIDATE, we may need to validate more in
2817 cache then r->out.nread would suggest, see io->generic.out.nread */
2818 if (r->out.flags & PROXY_VALIDATE)
2819 io->generic.out.nread=io->generic.in.maxcnt;
2820 DEBUG(5,("Using cached data: size=%lld\n",
2821 (long long) io->generic.out.nread));
2822 return status;
2825 if (r->in.flags & PROXY_VALIDATE) {
2826 DEBUG(5,("Cached data did not validate, flags: %x\n",r->out.flags));
2827 /* turn off validate on this file */
2828 #warning turn off validate on this file - do an nread<maxcnt later
2831 if (r->in.flags & PROXY_USE_CACHE) {
2832 DEBUG(5,("Cached data did not match\n"));
2835 io->generic.out.nread = r->out.nread;
2837 /* we may need to uncompress */
2838 if (r->out.flags & PROXY_USE_ZLIB) {
2839 ssize_t size=r->out.response.compress.count;
2840 if (! uncompress_block_to(io->generic.out.data,
2841 r->out.response.compress.data, &size,
2842 io->generic.in.maxcnt) ||
2843 size != r->out.nread) {
2844 io->generic.out.nread=size;
2845 status=NT_STATUS_INVALID_USER_BUFFER;
2847 } else if (io->generic.out.data != r->out.response.generic.data) {
2848 //Assert(r->out.nread == r->out.generic.out.count);
2849 memcpy(io->generic.out.data, r->out.response.generic.data, io->generic.out.nread);
2852 return status;
2855 /* Warning: Assumes that if io->generic.out.nread is not zero, then some
2856 data has been pre-read into io->generic.out.data and can be used for
2857 proxy<->proxy optimized reads */
2858 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
2859 union smb_read *io,
2860 struct proxy_file *f,
2861 struct proxy_Read *r)
2863 struct proxy_private *private = ntvfs->private_data;
2864 #warning we are using out.nread as a out-of-band parameter
2865 if (PROXY_REMOTE_SERVER(private)) {
2867 struct smbcli_request *c_req;
2868 if (! r) {
2869 r=talloc_zero(io, struct proxy_Read);
2872 if (! r) return NULL;
2874 r->in.fnum = io->generic.in.file.fnum;
2875 r->in.read_for_execute=io->generic.in.read_for_execute;
2876 r->in.offset = io->generic.in.offset;
2877 r->in.mincnt = io->generic.in.mincnt;
2878 r->in.maxcnt = io->generic.in.maxcnt;
2879 r->in.remaining = io->generic.in.remaining;
2880 r->in.flags |= PROXY_USE_ZLIB;
2881 if (! (r->in.flags & PROXY_VALIDATE) &&
2882 io->generic.out.data && io->generic.out.nread > 0) {
2883 /* maybe we should limit digest size to MIN(nread, maxcnt) to
2884 permit the caller to provider a larger nread as part of
2885 a split read */
2886 checksum_block(r->in.digest.digest, io->generic.out.data,
2887 io->generic.out.nread);
2889 if (io->generic.out.nread > r->in.maxcnt) {
2890 DEBUG(0,("Cache from nread is too big for requested read struct, ignoring cache\n"));
2891 } else {
2892 r->in.mincnt = io->generic.out.nread;
2893 r->in.maxcnt = io->generic.out.nread;
2894 r->in.flags |= PROXY_USE_CACHE;
2895 /* PROXY_VALIDATE will have been set by caller */
2899 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
2900 DEBUG(3,("Cache digest length=%lld\n", (long long)r->in.maxcnt));
2901 dump_data (3, r->in.digest.digest, sizeof(r->in.digest.digest));
2904 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
2905 ntvfs,
2906 io->generic.in.file.fnum,
2907 &ndr_table_rpcproxy,
2908 NDR_PROXY_READ, r);
2909 if (! c_req) return NULL;
2911 { void* req=NULL;
2912 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_read_rpc, NULL);
2915 return c_req;
2916 } else {
2917 return smb_raw_read_send(private->tree, io);
2921 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
2922 union smb_read *io,
2923 struct proxy_file *f)
2925 struct proxy_private *proxy = ntvfs->private_data;
2926 struct smbcli_tree *tree=proxy->tree;
2928 if (PROXY_REMOTE_SERVER(proxy)) {
2929 struct smbcli_request *c_req = proxy_smb_raw_read_send(ntvfs, io, f, NULL);
2930 return sync_chain_handler(c_req);
2931 } else {
2932 struct smbcli_request *c_req = smb_raw_read_send(tree, io);
2933 return smb_raw_read_recv(c_req, io);
2939 initialise the PROXY->PROXY backend, registering ourselves with the ntvfs subsystem
2941 NTSTATUS ntvfs_proxy_init(void)
2943 NTSTATUS ret;
2944 struct ntvfs_ops ops;
2945 NTVFS_CURRENT_CRITICAL_SIZES(vers);
2947 ZERO_STRUCT(ops);
2949 /* fill in the name and type */
2950 ops.name = "proxy";
2951 ops.type = NTVFS_DISK;
2953 /* fill in all the operations */
2954 ops.connect = proxy_connect;
2955 ops.disconnect = proxy_disconnect;
2956 ops.unlink = proxy_unlink;
2957 ops.chkpath = proxy_chkpath;
2958 ops.qpathinfo = proxy_qpathinfo;
2959 ops.setpathinfo = proxy_setpathinfo;
2960 ops.open = proxy_open;
2961 ops.mkdir = proxy_mkdir;
2962 ops.rmdir = proxy_rmdir;
2963 ops.rename = proxy_rename;
2964 ops.copy = proxy_copy;
2965 ops.ioctl = proxy_ioctl;
2966 ops.read = proxy_read;
2967 ops.write = proxy_write;
2968 ops.seek = proxy_seek;
2969 ops.flush = proxy_flush;
2970 ops.close = proxy_close;
2971 ops.exit = proxy_exit;
2972 ops.lock = proxy_lock;
2973 ops.setfileinfo = proxy_setfileinfo;
2974 ops.qfileinfo = proxy_qfileinfo;
2975 ops.fsinfo = proxy_fsinfo;
2976 ops.lpq = proxy_lpq;
2977 ops.search_first = proxy_search_first;
2978 ops.search_next = proxy_search_next;
2979 ops.search_close = proxy_search_close;
2980 ops.trans = proxy_trans;
2981 ops.logoff = proxy_logoff;
2982 ops.async_setup = proxy_async_setup;
2983 ops.cancel = proxy_cancel;
2984 ops.notify = proxy_notify;
2985 ops.trans2 = proxy_trans2;
2987 /* register ourselves with the NTVFS subsystem. We register
2988 under the name 'proxy'. */
2989 ret = ntvfs_register(&ops, &vers);
2991 if (!NT_STATUS_IS_OK(ret)) {
2992 DEBUG(0,("Failed to register PROXY backend!\n"));
2995 return ret;