Return remote cache name for out-of-process sync mechanism
[Samba/vfs_proxy.git] / source4 / ntvfs / proxy / vfs_proxy.c
blob91b02a7b4e41a0788da681cf8b139d0872973bef
1 /*
2 Unix SMB/PROXY implementation.
4 CIFS PROXY NTVFS filesystem backend
6 Copyright (C) Andrew Tridgell 2003
7 Copyright (C) James J Myers 2003 <myersjj@samba.org>
8 Copyright (C) Sam Liddicott <sam@liddicott.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>.
24 this implements a CIFS->CIFS NTVFS filesystem caching proxy.
28 #define TALLOC_ABORT(why) smb_panic(why)
29 #warning handle SMB_FLAGS2_COMPRESSED flag from client: http://msdn2.microsoft.com/en-us/library/cc246254.aspx
30 #define __LOCATION__ (talloc_asprintf(debug_ctx(),"%s:%d %s",__FILE__,__LINE__,__FUNCTION__))
31 #define PROXY_NTIOCTL_MAXDATA 0x2000000
33 #include "includes.h"
34 #include "libcli/raw/libcliraw.h"
35 #include "libcli/smb_composite/smb_composite.h"
36 #include "auth/auth.h"
37 #include "auth/credentials/credentials.h"
38 #include "ntvfs/ntvfs.h"
39 #include "../lib/util/dlinklist.h"
40 #include "param/param.h"
41 #include "libcli/resolve/resolve.h"
42 #include "libcli/libcli.h"
43 #include "libcli/raw/ioctl.h"
44 #include "librpc/gen_ndr/ndr_misc.h"
45 #include "librpc/gen_ndr/ndr_proxy.h"
46 #include "librpc/ndr/ndr_table.h"
47 #include "lib/cache/cache.h"
48 #include "lib/compression/zlib.h"
49 #include "libcli/raw/raw_proto.h"
50 #include "librpc/gen_ndr/proxy.h"
51 #include "smb_server/smb_server.h"
53 #define fstrcmp(a,b) strcasecmp((a),(b))
54 #define fstrncmp(a,b,len) strncasecmp((a),(b),(len))
56 #define LOAD_CACHE_FILE_DATA(dest, src) do { \
57 dest.create_time=src.create_time; \
58 dest.access_time=src.access_time; \
59 dest.write_time=src.write_time; \
60 dest.change_time=src.change_time; \
61 dest.attrib=src.attrib; \
62 dest.alloc_size=src.alloc_size; \
63 dest.size=src.size; \
64 dest.file_type=src.file_type; \
65 dest.ipc_state=src.ipc_state; \
66 dest.is_directory=src.is_directory; \
67 dest.delete_pending=0; \
68 } while(0)
70 /* taken from #include "librpc/gen_ndr/proxy.h" */
71 struct proxy_file_info_data {
72 /* first three are from ntcreatex */
73 uint16_t file_type;
74 uint16_t ipc_state;
75 uint8_t is_directory;
76 NTSTATUS status_RAW_FILEINFO_BASIC_INFORMATION;
77 uint32_t attrib; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
78 NTTIME create_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
79 NTTIME access_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
80 NTTIME write_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
81 NTTIME change_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
82 NTSTATUS status_RAW_FILEINFO_ALL_INFO;
83 uint32_t ea_size; /* RAW_FILEINFO_ALL_INFO */
84 uint64_t alloc_size; /* RAW_FILEINFO_ALL_INFO */
85 uint64_t size; /* RAW_FILEINFO_ALL_INFO */
86 uint32_t nlink; /* RAW_FILEINFO_ALL_INFO */
87 struct sws fname; /* RAW_FILEINFO_ALL_INFO */
88 uint8_t delete_pending; /* RAW_FILEINFO_ALL_INFO */
89 uint8_t directory; /* RAW_FILEINFO_ALL_INFO */
90 NTSTATUS status_RAW_FILEINFO_COMPRESSION_INFO;
91 uint64_t compressed_size; /* RAW_FILEINFO_COMPRESSION_INFO */
92 uint16_t format; /* RAW_FILEINFO_COMPRESSION_INFO */
93 uint8_t unit_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
94 uint8_t chunk_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
95 uint8_t cluster_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
96 NTSTATUS status_RAW_FILEINFO_INTERNAL_INFORMATION;
97 uint64_t file_id; /* RAW_FILEINFO_INTERNAL_INFORMATION */
98 NTSTATUS status_RAW_FILEINFO_ACCESS_INFORMATION;
99 uint32_t access_flags; /* RAW_FILEINFO_ACCESS_INFORMATION */
100 NTSTATUS status_RAW_FILEINFO_POSITION_INFORMATION;
101 uint64_t position; /* RAW_FILEINFO_POSITION_INFORMATION */
102 NTSTATUS status_RAW_FILEINFO_MODE_INFORMATION;
103 uint32_t mode; /* RAW_FILEINFO_MODE_INFORMATION */
104 NTSTATUS status_RAW_FILEINFO_ALIGNMENT_INFORMATION;
105 uint32_t alignment_requirement; /* RAW_FILEINFO_ALIGNMENT_INFORMATION */
106 NTSTATUS status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
107 uint32_t reparse_tag; /* RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION */
108 uint32_t reparse_attrib; /* RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION */
109 NTSTATUS status_RAW_FILEINFO_STREAM_INFO;
110 uint32_t num_streams; /* RAW_FILEINFO_STREAM_INFO */
111 struct info_stream *streams; /* RAW_FILEINFO_STREAM_INFO */
114 #define valid_RAW_FILEINFO_BASIC_INFORMATION 1
115 #define valid_RAW_FILEINFO_ALL_INFO 2
116 #define valid_RAW_FILEINFO_COMPRESSION_INFO 3
117 #define valid_RAW_FILEINFO_INTERNAL_INFORMATION 4
118 #define valid_RAW_FILEINFO_STANDARD_INFO 8
119 #define valid_RAW_FILEINFO_ACCESS_INFORMATION 16
120 #define valid_RAW_FILEINFO_POSITION_INFORMATION 32
121 #define valid_RAW_FILEINFO_MODE_INFORMATION 64
122 #define valid_RAW_FILEINFO_ALIGNMENT_INFORMATION 128
123 #define valid_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION 256
124 #define valid_RAW_FILEINFO_STREAM_INFO 512
126 struct file_metadata {
127 int count;
128 int valid;
129 struct proxy_file_info_data info_data;
132 struct proxy_file {
133 struct proxy_file *prev, *next;
134 struct proxy_private* proxy;
135 uint16_t fnum;
136 struct ntvfs_handle *h;
137 struct cache_file_entry *cache;
138 /* filename might not be a char*, but if so, _size includes null */
139 void* filename;
140 int filename_size;
141 int readahead_pending;
142 /* *_OPLOCK_RETURN values */
143 int oplock;
144 /* read-only, shareable normal file open, can be cloned by similar opens */
145 bool can_clone;
146 /* If we have an oplock, then the file is NOT bigger than size, which lets
147 us optimize reads */
148 struct file_metadata *metadata;
151 struct proxy_private;
153 struct search_handle {
154 struct search_handle *prev, *next;
155 struct proxy_private *proxy;
156 struct ntvfs_handle *h;
157 uint16_t handle;
158 union {
159 struct smb_search_id id;
160 uint32_t resume_key;
161 } resume_index;
162 struct search_cache_item *resume_item;
163 enum smb_search_level level;
164 enum smb_search_data_level data_level;
165 /* search cache (if any) being used */
166 struct search_cache *cache;
169 struct search_cache_item {
170 struct search_cache_item *prev, *next;
171 enum smb_search_data_level data_level;
172 struct cache_file_entry *cache;
173 union smb_search_data *file;
174 struct file_metadata *metadata;
176 enum search_cache_status {
177 SEARCH_CACHE_INCOMPLETE,
178 SEARCH_CACHE_COMPLETE,
179 SEARCH_CACHE_DEAD
182 struct fdirmon;
183 typedef void*(fdirmon_callback_fn)(void* data, struct fdirmon* fdirmon);
184 //NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS)
186 struct fdirmon {
187 struct fdirmon *prev, *next;
188 struct search_cache_item *items;
190 struct proxy_private *proxy;
192 union smb_notify *notify_io;
193 struct smbcli_request *notify_req;
194 uint16_t dir_fnum;
195 char* dir;
196 struct fdirmon_callback {
197 struct fdirmon_callback *prev, *next;
198 fdirmon_callback_fn *fn;
199 void* data;
200 } *callbacks;
203 struct search_cache {
204 struct search_cache *prev, *next;
205 struct search_cache_item *items;
207 struct proxy_private *proxy;
208 enum search_cache_status status;
210 union smb_notify *notify_io;
211 struct smbcli_request *notify_req;
212 uint16_t dir_fnum;
213 char* dir;
215 struct search_cache_key {
216 enum smb_search_level level;
217 enum smb_search_data_level data_level;
218 uint16_t search_attrib;
219 const char *pattern;
220 /* these only for trans2 */
221 uint16_t flags;
222 uint32_t storage_type;
223 } key;
225 struct search_state {
226 struct search_handle *search_handle;
227 void* private;
228 smbcli_search_callback callback;
229 struct search_cache_item *last_item;
230 uint16_t count;
233 struct fs_attribute_info {
234 uint32_t fs_attr;
235 uint32_t max_file_component_length;
236 struct smb_wire_string fs_type;
239 /* this is stored in ntvfs_private */
240 struct proxy_private {
241 struct smbcli_tree *tree;
242 struct smbcli_transport *transport;
243 struct ntvfs_module_context *ntvfs;
244 struct async_info *pending;
245 struct proxy_file *files;
246 struct proxy_file *closed_files;
247 struct fdirmon *dirmons;
248 struct search_cache *search_caches; /* cache's of find-first data */
249 struct search_handle *search_handles; /* cache's of find-first data */
250 bool map_generic;
251 bool map_trans2;
252 bool cache_enabled;
253 int cache_readahead; /* default read-ahead window size */
254 int cache_readaheadblock; /* size of each read-ahead request */
255 ssize_t cache_validatesize; /* chunk size to validate, results in a read this size on remote server */
256 char *remote_server;
257 char *remote_share;
258 struct cache_context *cache;
259 struct fs_attribute_info *fs_attribute_info;
260 int readahead_spare; /* amount of pending non-user generated requests */
261 bool fake_oplock; /* useful for testing, smbclient never asks for oplock */
262 bool fake_valid; /* useful for testing, smbclient never asks for oplock */
263 uint16_t nttrans_fnum; /* we need a handle for non-proxy operations */
264 bool enabled_cache_info;
265 bool enabled_proxy_search;
266 bool enabled_open_clone;
267 bool enabled_extra_protocol;
268 bool enabled_qpathinfo;
271 struct async_info_map;
273 /* a structure used to pass information to an async handler */
274 struct async_info {
275 struct async_info *next, *prev;
276 struct proxy_private *proxy;
277 struct ntvfs_request *req;
278 struct smbcli_request *c_req;
279 struct proxy_file *f;
280 struct async_info_map *chain;
281 void *parms;
284 /* used to chain async callbacks */
285 struct async_info_map {
286 struct async_info_map *next, *prev;
287 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
288 void *parms1;
289 void *parms2;
290 struct async_info *async;
293 struct ntioctl_rpc_unmap_info {
294 void* io;
295 const struct ndr_interface_call *calls;
296 const struct ndr_interface_table *table;
297 uint32_t opnum;
300 /* a structure used to pass information to an async handler */
301 struct async_rpclite_send {
302 const struct ndr_interface_call* call;
303 void* struct_ptr;
306 #define SETUP_PID private->tree->session->pid = req->smbpid
308 #define RPCLITE_SETUP_FILE_HERE(f, h) do { \
309 RPCLITE_SETUP_THIS_FILE_HERE(r->in.fnum, f, h); \
310 } while (0)
312 #define RPCLITE_SETUP_THIS_FILE_HERE(FNUM, f, h) do { \
313 if ((h = ntvfs_find_handle(private->ntvfs, req, FNUM)) && \
314 (f = ntvfs_handle_get_backend_data(h, ntvfs))) { \
315 FNUM = f->fnum; \
316 } else { \
317 r->out.result = NT_STATUS_INVALID_HANDLE; \
318 return NT_STATUS_OK; \
320 } while (0)
322 #define SETUP_FILE_HERE(f) do { \
323 f = ntvfs_handle_get_backend_data(io->generic.in.file.ntvfs, ntvfs); \
324 if (!f) return NT_STATUS_INVALID_HANDLE; \
325 io->generic.in.file.fnum = f->fnum; \
326 } while (0)
328 #define SETUP_FILE do { \
329 struct proxy_file *f; \
330 SETUP_FILE_HERE(f); \
331 } while (0)
333 #define SETUP_PID_AND_FILE do { \
334 SETUP_PID; \
335 SETUP_FILE; \
336 } while (0)
338 /* remove the MAY_ASYNC from a request, useful for testing */
339 #define MAKE_SYNC_REQ(req) do { req->async_states->state &= ~NTVFS_ASYNC_STATE_MAY_ASYNC; } while(0)
341 #define PROXY_SERVER "proxy:server"
342 #define PROXY_USER "proxy:user"
343 #define PROXY_PASSWORD "proxy:password"
344 #define PROXY_DOMAIN "proxy:domain"
345 #define PROXY_SHARE "proxy:share"
346 #define PROXY_USE_MACHINE_ACCT "proxy:use-machine-account"
347 #define PROXY_MAP_GENERIC "proxy:map-generic"
348 #define PROXY_MAP_TRANS2 "proxy:map-trans2"
350 #define PROXY_CACHE_ENABLED "proxy:cache-enabled"
351 #define PROXY_CACHE_ENABLED_DEFAULT false
353 #define PROXY_CACHE_READAHEAD "proxy:cache-readahead"
354 #define PROXY_CACHE_READAHEAD_DEFAULT 32768
355 /* size of each read-ahead request. */
356 #define PROXY_CACHE_READAHEAD_BLOCK "proxy:cache-readaheadblock"
357 /* the read-ahead block should always be less than max negotiated data */
358 #define PROXY_CACHE_READAHEAD_BLOCK_DEFAULT 4096
360 #define PROXY_CACHE_VALIDATE_SIZE "proxy:validate-size"
361 #define PROXY_CACHE_VALIDATE_SIZE_DEFAULT 256 /* 10M */
363 #define PROXY_FAKE_OPLOCK "proxy:fake-oplock"
364 #define PROXY_FAKE_OPLOCK_DEFAULT false
366 #define PROXY_FAKE_VALID "proxy:fake-valid"
367 #define PROXY_FAKE_VALID_DEFAULT false
369 /* how many read-ahead requests can be pending per mid */
370 #define PROXY_REQUEST_LIMIT "proxy:request-limit"
371 #define PROXY_REQUEST_LIMIT_DEFAULT 100
373 #define PROXY_USE_MACHINE_ACCT_DEFAULT false
374 /* These two really should be: true, and possibly not even configurable */
375 #define PROXY_MAP_GENERIC_DEFAULT true
376 #define PROXY_MAP_TRANS2_DEFAULT true
378 /* is the remote server a proxy? */
379 #define PROXY_REMOTE_SERVER(private) \
380 ((private)->tree->session->transport->negotiate.capabilities & CAP_COMPRESSION \
381 && (strcmp("A:",private->tree->device)==0) \
382 && (private->nttrans_fnum!=0) \
383 && (private->enabled_extra_protocol))
385 /* A few forward declarations */
386 static NTSTATUS sync_chain_handler(struct smbcli_request *c_req);
387 static void async_chain_handler(struct smbcli_request *c_req);
388 static void async_read_handler(struct smbcli_request *c_req);
389 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
390 struct ntvfs_request *req, union smb_ioctl *io);
392 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
393 struct smbcli_tree *tree,
394 struct ntvfs_module_context *ntvfs,
395 const struct ndr_interface_table *table,
396 uint32_t opnum, void *r);
397 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
398 union smb_read *io, struct proxy_file *f, struct proxy_Read *r);
399 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
400 union smb_read *io, struct proxy_file *f);
401 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
402 union smb_write *io, struct proxy_file *f);
403 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
404 union smb_write *io, struct proxy_file *f);
405 static NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status);
407 struct smb_wire_string talloc_smb_wire_string_dup(void* mem_ctx, const struct smb_wire_string* string)
409 struct smb_wire_string result;
410 result.private_length=string->private_length;
411 result.s=talloc_strndup(mem_ctx, string->s, string->private_length);
412 DEBUG(5,("%s: %s\n",__FUNCTION__, string->s));
413 return result;
416 #define sws_dup(mem_ctx, dest, src) (\
417 dest=talloc_smb_wire_string_dup(NULL, &(src)), \
418 (dest.s==NULL && src.s!=NULL))
420 /* These needs replacing with something more canonical perhaps */
421 static char* talloc_dirname(void* mem_ctx, const char* path) {
422 const char* dir;
424 if ((dir=strrchr(path,'\\'))) {
425 return talloc_strndup(mem_ctx, path, (dir - path));
426 } else {
427 return talloc_strdup(mem_ctx,"");
432 a handler for oplock break events from the server - these need to be passed
433 along to the client
435 static bool oplock_handler(struct smbcli_transport *transport, uint16_t tid, uint16_t fnum, uint8_t level, void *p_private)
437 struct proxy_private *private = p_private;
438 NTSTATUS status;
439 struct ntvfs_handle *h = NULL;
440 struct proxy_file *f;
441 bool result=true;
443 /* because we clone handles, there may be more than one match */
444 for (f=private->files; f; f=f->next) {
445 if (f->fnum != fnum) continue;
446 h = f->h;
448 if (level==OPLOCK_BREAK_TO_LEVEL_II) {
449 f->oplock=LEVEL_II_OPLOCK_RETURN;
450 } else {
451 /* If we don't have an oplock, then we can't rely on the cache */
452 cache_handle_stale(f);
453 f->oplock=NO_OPLOCK_RETURN;
456 DEBUG(5,("vfs_proxy: sending oplock break level %d for fnum %d\n", level, fnum));
457 status = ntvfs_send_oplock_break(private->ntvfs, h, level);
458 if (!NT_STATUS_IS_OK(status)) result=false;
460 if (!h) {
461 DEBUG(5,("vfs_proxy: ignoring oplock break level %d for fnum %d\n", level, fnum));
463 return result;
467 get file handle from clients fnum, (from ntvfs/ipc/vfs_ipc.c at metze suggestion)
469 static struct ntvfs_handle *ntvfs_find_handle(struct ntvfs_module_context *ntvfs,
470 struct ntvfs_request *req,
471 uint16_t fnum)
473 DATA_BLOB key;
474 uint16_t _fnum;
477 * the fnum is already in host byteorder
478 * but ntvfs_handle_search_by_wire_key() expects
479 * network byteorder
481 SSVAL(&_fnum, 0, fnum);
482 key = data_blob_const(&_fnum, 2);
484 return ntvfs_handle_search_by_wire_key(ntvfs, req, &key);
488 connect to a share - used when a tree_connect operation comes in.
490 static NTSTATUS proxy_connect(struct ntvfs_module_context *ntvfs,
491 struct ntvfs_request *req, const char *sharename)
493 NTSTATUS status;
494 struct proxy_private *private;
495 const char *host, *user, *pass, *domain, *remote_share;
496 struct smb_composite_connect io;
497 struct composite_context *creq;
498 struct share_config *scfg = ntvfs->ctx->config;
499 int nttrans_fnum;
501 struct cli_credentials *credentials;
502 bool machine_account;
504 /* Here we need to determine which server to connect to.
505 * For now we use parametric options, type proxy.
506 * Later we will use security=server and auth_server.c.
508 host = share_string_option(scfg, PROXY_SERVER, NULL);
509 user = share_string_option(scfg, PROXY_USER, NULL);
510 pass = share_string_option(scfg, PROXY_PASSWORD, NULL);
511 domain = share_string_option(scfg, PROXY_DOMAIN, NULL);
512 remote_share = share_string_option(scfg, PROXY_SHARE, NULL);
513 if (!remote_share) {
514 remote_share = sharename;
517 machine_account = share_bool_option(scfg, PROXY_USE_MACHINE_ACCT, PROXY_USE_MACHINE_ACCT_DEFAULT);
519 private = talloc_zero(ntvfs, struct proxy_private);
520 if (!private) {
521 return NT_STATUS_NO_MEMORY;
524 ntvfs->private_data = private;
526 if (!host) {
527 DEBUG(1,("PROXY backend: You must supply server\n"));
528 return NT_STATUS_INVALID_PARAMETER;
531 if (user && pass) {
532 DEBUG(5, ("PROXY backend: Using specified password\n"));
533 credentials = cli_credentials_init(private);
534 if (!credentials) {
535 return NT_STATUS_NO_MEMORY;
537 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
538 cli_credentials_set_username(credentials, user, CRED_SPECIFIED);
539 if (domain) {
540 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
542 cli_credentials_set_password(credentials, pass, CRED_SPECIFIED);
543 } else if (machine_account) {
544 DEBUG(5, ("PROXY backend: Using machine account\n"));
545 credentials = cli_credentials_init(private);
546 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
547 if (domain) {
548 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
550 status = cli_credentials_set_machine_account(credentials, ntvfs->ctx->lp_ctx);
551 if (!NT_STATUS_IS_OK(status)) {
552 return status;
554 } else if (req->session_info->credentials) {
555 DEBUG(5, ("PROXY backend: Using delegated credentials\n"));
556 credentials = req->session_info->credentials;
557 } else {
558 DEBUG(1,("PROXY backend: NO delegated credentials found: You must supply server, user and password or the client must supply delegated credentials\n"));
559 return NT_STATUS_INVALID_PARAMETER;
562 /* connect to the server, using the smbd event context */
563 io.in.dest_host = host;
564 io.in.dest_ports = lp_smb_ports(ntvfs->ctx->lp_ctx);
565 io.in.socket_options = lp_socket_options(ntvfs->ctx->lp_ctx);
566 io.in.called_name = host;
567 io.in.credentials = credentials;
568 io.in.fallback_to_anonymous = false;
569 io.in.workgroup = lp_workgroup(ntvfs->ctx->lp_ctx);
570 io.in.service = remote_share;
571 io.in.service_type = "?????";
572 io.in.iconv_convenience = lp_iconv_convenience(ntvfs->ctx->lp_ctx);
573 io.in.gensec_settings = lp_gensec_settings(private, ntvfs->ctx->lp_ctx);
574 lp_smbcli_options(ntvfs->ctx->lp_ctx, &io.in.options);
575 lp_smbcli_session_options(ntvfs->ctx->lp_ctx, &io.in.session_options);
577 creq = smb_composite_connect_send(&io, private,
578 lp_resolve_context(ntvfs->ctx->lp_ctx),
579 ntvfs->ctx->event_ctx);
580 status = smb_composite_connect_recv(creq, private);
581 NT_STATUS_NOT_OK_RETURN(status);
583 private->tree = io.out.tree;
585 private->transport = private->tree->session->transport;
586 SETUP_PID;
587 private->ntvfs = ntvfs;
589 ntvfs->ctx->fs_type = talloc_strdup(ntvfs->ctx, io.out.tree->fs_type);
590 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->fs_type);
591 ntvfs->ctx->dev_type = talloc_strdup(ntvfs->ctx, io.out.tree->device);
592 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->dev_type);
594 /* we need to receive oplock break requests from the server */
595 smbcli_oplock_handler(private->transport, oplock_handler, private);
597 private->map_generic = share_bool_option(scfg, PROXY_MAP_GENERIC, PROXY_MAP_GENERIC_DEFAULT);
599 private->map_trans2 = share_bool_option(scfg, PROXY_MAP_TRANS2, PROXY_MAP_TRANS2_DEFAULT);
601 private->cache_validatesize = 1024 * (long long) share_int_option(scfg, PROXY_CACHE_VALIDATE_SIZE, PROXY_CACHE_VALIDATE_SIZE_DEFAULT);
603 if (strcmp("A:",private->tree->device)==0) {
604 private->cache_enabled = share_bool_option(scfg, PROXY_CACHE_ENABLED, PROXY_CACHE_ENABLED_DEFAULT);
605 private->cache_readahead = share_int_option(scfg, PROXY_CACHE_READAHEAD, PROXY_CACHE_READAHEAD_DEFAULT);
606 private->cache_readaheadblock = share_int_option(scfg, PROXY_CACHE_READAHEAD_BLOCK,
607 MIN(private->cache_readahead,PROXY_CACHE_READAHEAD_BLOCK_DEFAULT));
608 private->fake_oplock = share_bool_option(scfg, PROXY_FAKE_OPLOCK, PROXY_FAKE_OPLOCK_DEFAULT);
609 private->fake_valid = share_bool_option(scfg, PROXY_FAKE_VALID, PROXY_FAKE_VALID_DEFAULT);
610 private->readahead_spare = share_int_option(scfg, PROXY_REQUEST_LIMIT, PROXY_REQUEST_LIMIT_DEFAULT);
611 private->cache = new_cache_context(private, lp_proxy_cache_root(ntvfs->ctx->lp_ctx), host, remote_share);
612 private->enabled_cache_info=true;
613 private->enabled_proxy_search=true;
614 private->enabled_open_clone=true;
615 private->enabled_extra_protocol=true;
616 private->enabled_qpathinfo=true;
618 DEBUG(0,("proxy tree connect caching for: %s (%s : %s) %s read-ahead: %d\n",
619 remote_share, private->tree->device,private->tree->fs_type,
620 (private->cache_enabled)?"enabled":"disabled",
621 private->cache_readahead));
622 } else {
623 private->cache_enabled = false;
624 DEBUG(0,("No caching or read-ahead for: %s (%s : %s)\n",
625 remote_share, private->tree->device,private->tree->fs_type));
628 private->remote_server = strlower_talloc(private, host);
629 private->remote_share = strlower_talloc(private, remote_share);
631 /* some proxy operations will not be performed on files, so open a handle
632 now that we can use for such things. We won't bother to close it on
633 shutdown, as the remote server ought to be able to close it for us
634 and we might be shutting down because the remote server went away and
635 so we don't want to delay further */
636 nttrans_fnum=smbcli_nt_create_full(private->tree, "\\",
638 SEC_FILE_READ_DATA,
639 FILE_ATTRIBUTE_NORMAL,
640 NTCREATEX_SHARE_ACCESS_MASK,
641 NTCREATEX_DISP_OPEN,
642 NTCREATEX_OPTIONS_DIRECTORY,
643 NTCREATEX_IMPERSONATION_IMPERSONATION);
644 if (nttrans_fnum < 0) {
645 DEBUG(5,("Could not open handle for ntioctl %d\n",private->nttrans_fnum));
646 //return NT_STATUS_UNSUCCESSFUL;
648 private->nttrans_fnum=nttrans_fnum;
649 DEBUG(5,("Got nttrans handle %d\n",private->nttrans_fnum));
651 return NT_STATUS_OK;
655 disconnect from a share
657 static NTSTATUS proxy_disconnect(struct ntvfs_module_context *ntvfs)
659 struct proxy_private *private = ntvfs->private_data;
660 struct async_info *a, *an;
661 struct search_cache *s;
663 /* first clean up caches because they have a pending request that
664 they will try and clean up later and fail during talloc_free */
665 for (s=private->search_caches; s; s=s->next) {
666 s->notify_req=NULL;
667 s->dir_fnum=65535;
670 /* first cleanup pending requests */
671 for (a=private->pending; a; a = an) {
672 an = a->next;
673 smbcli_request_destroy(a->c_req);
674 talloc_free(a);
677 talloc_free(private);
678 ntvfs->private_data = NULL;
680 return NT_STATUS_OK;
684 destroy an async info structure
686 static int async_info_destructor(struct async_info *async)
688 DLIST_REMOVE(async->proxy->pending, async);
689 return 0;
693 a handler for simple async replies
694 this handler can only be used for functions that don't return any
695 parameters (those that just return a status code)
697 static void async_simple(struct smbcli_request *c_req)
699 struct async_info *async = c_req->async.private;
700 struct ntvfs_request *req = async->req;
701 req->async_states->status = smbcli_request_simple_recv(c_req);
702 talloc_free(async);
703 req->async_states->send_fn(req);
706 /* hopefully this will optimize away */
707 #define TYPE_CHECK(type,check) do { \
708 type=check; \
709 t=t; \
710 } while (0)
712 /* save some typing for the simple functions */
713 #define ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, file, achain, error) do { \
714 if (!c_req) return (error); \
715 ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_fn, file, achain); \
716 if (! c_req->async.private) return (error); \
717 } while(0)
719 #define ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_fn, file, achain) do { \
720 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
722 struct async_info *async; \
723 async = talloc(req, struct async_info); \
724 if (async) { \
725 async->parms = io; \
726 async->req = req; \
727 async->f = file; \
728 async->proxy = private; \
729 async->c_req = c_req; \
730 async->chain = achain; \
731 DLIST_ADD(private->pending, async); \
732 c_req->async.private = async; \
733 talloc_set_destructor(async, async_info_destructor); \
736 c_req->async.fn = async_fn; \
737 } while (0)
739 #define ASYNC_RECV_TAIL_F(io, async_fn, file) do { \
740 if (!c_req) return NT_STATUS_UNSUCCESSFUL; \
741 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
743 struct async_info *async; \
744 async = talloc(req, struct async_info); \
745 if (!async) return NT_STATUS_NO_MEMORY; \
746 async->parms = io; \
747 async->req = req; \
748 async->f = file; \
749 async->proxy = private; \
750 async->c_req = c_req; \
751 DLIST_ADD(private->pending, async); \
752 c_req->async.private = async; \
753 talloc_set_destructor(async, async_info_destructor); \
755 c_req->async.fn = async_fn; \
756 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
757 return NT_STATUS_OK; \
758 } while (0)
760 #define ASYNC_RECV_TAIL(io, async_fn) ASYNC_RECV_TAIL_F(io, async_fn, NULL)
762 #define SIMPLE_ASYNC_TAIL ASYNC_RECV_TAIL(NULL, async_simple)
764 /* managers for chained async-callback.
765 The model of async handlers has changed.
766 backend async functions should be of the form:
767 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
768 And if async->c_req is NULL then an earlier chain has already rec'd the
769 request.
770 ADD_ASYNC_RECV_TAIL is used to add chained handlers.
771 The chained handler manager async_chain_handler is installed the usual way
772 and uses the io pointer to point to the first async_map record
773 static void async_chain_handler(struct smbcli_request *c_req).
774 It is safe to call ADD_ASYNC_RECV_TAIL before the chain manager is installed
775 and often desirable.
777 /* async_chain_handler has an async_info struct so that it can be safely inserted
778 into pending, but the io struct will point to (struct async_info_map *)
779 chained async_info_map will be in c_req->async.private */
780 #define ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_fn) do { \
781 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
782 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
783 } while(0)
785 #define ASYNC_RECV_TAIL_HANDLER(io, async_fn) do { \
786 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
787 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
788 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
789 return NT_STATUS_OK; \
790 } while(0)
793 DEBUG(0,("ADD_ASYNC_RECV_TAIL %s %s:%d\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%s\n", __FUNCTION__,__FILE__,__LINE__, \
794 creq, creq?talloc_get_name(creq):NULL, creq?talloc_get_name(creq):NULL,\
795 io1, io1?talloc_get_name(io1):NULL, io1?talloc_get_name(io1):NULL, \
796 io2, io2?talloc_get_name(io2):NULL, io2?talloc_get_name(io2):NULL, \
797 file, file?"file":"null", file?"file":"null", #async_fn)); \
799 #define ADD_ASYNC_RECV_TAIL(creq, io1, io2, file, async_fn, error) do { \
800 if (! creq) { \
801 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no creq\n",__FUNCTION__)); \
802 return (error); \
803 } else { \
804 struct async_info_map *async_map=talloc(NULL, struct async_info_map); \
805 if (! async_map) { \
806 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no async_map\n",__FUNCTION__)); \
807 return (error); \
809 async_map->async=talloc(async_map, struct async_info); \
810 if (! async_map->async) { \
811 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no async_map->async\n",__FUNCTION__)); \
812 return (error); \
814 async_map->parms1=io1; \
815 async_map->parms2=io2; \
816 async_map->fn=async_fn; \
817 async_map->async->parms = io1; \
818 async_map->async->req = req; \
819 async_map->async->f = file; \
820 async_map->async->proxy = private; \
821 async_map->async->c_req = creq; \
822 /* If async_chain_handler is installed, get the list from param */ \
823 if (creq->async.fn == async_chain_handler || creq->async.fn == async_read_handler) { \
824 struct async_info *i=creq->async.private; \
825 DLIST_ADD_END(i->chain, async_map, struct async_info_map *); \
826 } else if (creq->async.fn) { \
827 /* incompatible handler installed */ \
828 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL incompatible handler already installed\n",__FUNCTION__)); \
829 return (error); \
830 } else { \
831 DLIST_ADD_END(creq->async.private, async_map, struct async_info_map *); \
834 } while(0)
836 static void async_dirmon_notify(struct smbcli_request *c_req)
838 struct async_info *async = c_req->async.private;
839 struct ntvfs_request *req = async->req;
840 struct fdirmon *dirmon;
841 struct fdirmon_callback *callback;
842 struct proxy_private *proxy = async->proxy;
844 NTSTATUS status;
846 DEBUG(5,("%s: dirmon %p invalidated\n",__LOCATION__, (void*)async->f));
848 dirmon = talloc_get_type_abort((void*)async->f, struct fdirmon);
850 status = smb_raw_changenotify_recv(c_req, req, async->parms);
851 DEBUG(5,("%s: update status %s\n",__LOCATION__, get_friendly_nt_error_msg (status)));
853 dirmon->notify_req=NULL;
854 DLIST_FOR_EACH(dirmon->callbacks, callback, callback->fn(callback->data, dirmon));
855 /* So nothing can find it even if there are still in-use references */
856 DLIST_REMOVE(proxy->dirmons, dirmon);
857 /* free it */
858 //talloc_steal(async, search_cache);
859 talloc_free(async);
860 talloc_free(dirmon);
863 struct fdirmon* get_fdirmon(struct proxy_private *proxy, const char* path, bool dir_only) {
864 const char *file;
865 int pathlen;
867 if ((file=strrchr(path,'\\'))) {
868 if (dir_only) {
869 pathlen = file - path;
870 file++;
871 } else {
872 pathlen=strlen(path);
874 } else {
875 file = path;
876 pathlen = 0;
879 struct fdirmon *dirmon;
880 /* see if we have a matching dirmon */
881 DLIST_FIND(proxy->dirmons, dirmon, (strlen(dirmon->dir) == pathlen && fstrncmp(path, dirmon->dir, pathlen)==0));
882 if (! dirmon) {
883 int saved_timeout;
885 dirmon=talloc_zero(proxy, struct fdirmon);
886 if (! dirmon) {
887 goto error;
889 if (! (dirmon->dir=talloc_strndup(dirmon, path, pathlen))) {
890 goto error;
892 if (! (dirmon->notify_io=talloc_zero(dirmon, union smb_notify))) {
893 goto error;
896 dirmon->dir_fnum=smbcli_nt_create_full(proxy->tree, dirmon->dir,
898 SEC_FILE_READ_DATA,
899 FILE_ATTRIBUTE_NORMAL,
900 NTCREATEX_SHARE_ACCESS_MASK,
901 NTCREATEX_DISP_OPEN,
902 NTCREATEX_OPTIONS_DIRECTORY,
903 NTCREATEX_IMPERSONATION_IMPERSONATION);
904 if (dirmon->dir_fnum==65535) {
905 goto error;
908 saved_timeout = proxy->transport->options.request_timeout;
909 /* request notify changes on cache before we start to fill it */
910 dirmon->notify_io->nttrans.level=RAW_NOTIFY_NTTRANS;
911 dirmon->notify_io->nttrans.in.completion_filter=FILE_NOTIFY_CHANGE_ANY;
912 dirmon->notify_io->nttrans.in.file.fnum=dirmon->dir_fnum;
913 dirmon->notify_io->nttrans.in.recursive=false;
914 dirmon->notify_io->nttrans.in.buffer_size=1024;
915 proxy->transport->options.request_timeout = 0;
916 dirmon->notify_req=smb_raw_changenotify_send(proxy->tree, dirmon->notify_io);
917 /* Make the request hang around so we can tell if it needs cancelling */
918 talloc_reference(dirmon, dirmon->notify_req);
919 proxy->transport->options.request_timeout = saved_timeout;
921 if (! dirmon->notify_req) {
922 goto error;
923 }else {
924 struct ntvfs_request *req=NULL;
925 struct smbcli_request *c_req=dirmon->notify_req;
926 union smb_notify *io=dirmon->notify_io;
927 struct proxy_private *private=proxy;
928 ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_dirmon_notify,
929 (void*) dirmon, c_req->async.private);
930 DLIST_ADD(private->dirmons, dirmon);
934 return dirmon;
935 error:
936 talloc_free(dirmon);
937 return NULL;
940 bool dirmon_add_callback(struct fdirmon *dirmon, fdirmon_callback_fn *fn, void* data) {
941 struct fdirmon_callback *callback=talloc_zero(dirmon, struct fdirmon_callback);
942 if (! callback) {
943 return false;
945 callback->data=data;
946 callback->fn=fn;
947 DLIST_ADD(dirmon->callbacks, callback);
948 return true;
951 /* try and unify cache open function interface with this macro */
952 #define cache_open(cache_context, f, io, oplock, readahead_window) \
953 (io->generic.level == RAW_OPEN_NTCREATEX && \
954 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID)\
955 ?(cache_fileid_open(cache_context, f, (const uint64_t*)(io->generic.in.fname), oplock, readahead_window))\
956 :(cache_filename_open(cache_context, f, SMB_OPEN_IN_FILE(io), oplock, readahead_window))
958 struct search_cache* find_partial_search_cache(struct search_cache* search_cache, const struct search_cache_key* search_cache_key) {
959 struct search_cache* result;
960 DLIST_FIND(search_cache, result,
961 (result->key.level == search_cache_key->level) &&
962 (result->key.data_level == search_cache_key->data_level) &&
963 (result->key.search_attrib == search_cache_key->search_attrib) &&
964 (result->key.flags == search_cache_key->flags) &&
965 (result->key.storage_type == search_cache_key->storage_type) &&
966 (fstrcmp(result->key.pattern, search_cache_key->pattern) == 0));
967 DEBUG(5,("%s: found %p\n",__LOCATION__,result));
968 return result;
970 struct search_cache* find_search_cache(struct search_cache* search_cache, const struct search_cache_key* search_cache_key) {
971 struct search_cache* result = find_partial_search_cache(search_cache, search_cache_key);
972 if (result && result->status == SEARCH_CACHE_COMPLETE) {
973 DEBUG(5,("%s: found complete %p\n",__LOCATION__,result));
974 return result;
976 DEBUG(5,("%s: found INCOMPLETE %p\n",__LOCATION__,result));
977 return NULL;
980 uint16_t smbsrv_fnum(struct ntvfs_handle *h) {
981 uint16_t fnum;
982 smbsrv_push_fnum((uint8_t *)&fnum, 0, h);
983 return SVAL(&fnum, 0);
986 static void async_search_cache_notify(struct smbcli_request *c_req)
988 struct async_info *async = c_req->async.private;
989 struct ntvfs_request *req = async->req;
990 struct search_cache *search_cache;
991 NTSTATUS status;
993 DEBUG(5,("%s: search cache %p invalidated\n",__LOCATION__, (void*)async->f));
995 search_cache = talloc_get_type_abort((void*)async->f, struct search_cache);
997 status = smb_raw_changenotify_recv(c_req, req, async->parms);
998 DEBUG(5,("%s: update status %s\n",__LOCATION__, get_friendly_nt_error_msg (status)));
1000 search_cache->notify_req=NULL;
1001 /* dispose of the search_cache */
1002 search_cache->status=SEARCH_CACHE_DEAD;
1003 /* So nothing can find it even if there are still in-use references */
1004 DLIST_REMOVE(search_cache->proxy->search_caches, search_cache);
1005 /* free it */
1006 //talloc_steal(async, search_cache);
1007 talloc_free(async);
1011 destroy a search handle
1013 static int search_handle_destructor(struct search_handle *s)
1015 DLIST_REMOVE(s->proxy->search_handles, s);
1016 DEBUG(5,("%s: handle destructor %p\n",__LOCATION__,s));
1017 return 0;
1019 static int search_cache_destructor(struct search_cache *s)
1021 NTSTATUS status;
1023 DLIST_REMOVE(s->proxy->search_caches, s);
1024 DEBUG(5,("%s: cache destructor %p\n",__LOCATION__,s));
1025 if (s->notify_req) {
1026 status=smb_raw_ntcancel(s->notify_req);
1027 s->notify_req=NULL;
1028 DEBUG(5,("%s: Cancel notification %s\n",__LOCATION__,get_friendly_nt_error_msg (status)));
1030 if (s->dir_fnum!=65535) {
1031 struct smbcli_request *req;
1032 union smb_close close_parms;
1033 close_parms.close.level = RAW_CLOSE_CLOSE;
1034 close_parms.close.in.file.fnum = s->dir_fnum;
1035 close_parms.close.in.write_time = 0;
1037 /* destructor may be called from a notify response and won't be able
1038 to wait on this close response, not that we care anyway */
1039 req=smb_raw_close_send(s->proxy->tree, &close_parms);
1041 DEBUG(5,("%s: Close dir_fnum: %d %p\n",__LOCATION__, s->dir_fnum, req));
1042 s->dir_fnum=65535;
1044 return 0;
1047 struct search_cache* new_search_cache(struct proxy_private *private, struct search_cache_key* key) {
1048 /* need to opendir the folder being searched so we can get a notification */
1049 uint16_t dir_fnum=65535;
1050 struct search_cache *search_cache=NULL;
1052 search_cache=talloc_zero(private, struct search_cache);
1053 DEBUG(5,("%s: Start new cache %p for %s\n",__LOCATION__, search_cache, key->pattern));
1054 if (! search_cache) {
1055 return NULL;
1057 search_cache->proxy=private;
1058 if (! (search_cache->dir=talloc_dirname(search_cache, key->pattern))) {
1059 goto error;
1061 if (! (search_cache->notify_io=talloc_zero(search_cache, union smb_notify))) {
1062 goto error;
1064 search_cache->key=*key;
1065 /* make private copy of pattern now that we need it AND have something to own it */
1066 if (! (search_cache->key.pattern=talloc_strdup(search_cache, search_cache->key.pattern))) {
1067 goto error;
1069 dir_fnum=smbcli_nt_create_full(private->tree, search_cache->dir,
1071 SEC_FILE_READ_DATA,
1072 FILE_ATTRIBUTE_NORMAL,
1073 NTCREATEX_SHARE_ACCESS_MASK,
1074 NTCREATEX_DISP_OPEN,
1075 NTCREATEX_OPTIONS_DIRECTORY,
1076 NTCREATEX_IMPERSONATION_IMPERSONATION);
1077 DEBUG(5,("%s: %d=opendir on %s\n",__LOCATION__,dir_fnum, search_cache->dir));
1078 if (dir_fnum==65535) {
1079 goto error;
1081 /* The destructor will close the handle */
1082 talloc_set_destructor(search_cache, search_cache_destructor);
1083 search_cache->dir_fnum=dir_fnum;
1084 DEBUG(5,("%s: Start new cache %p, dir_fnum %d\n",__LOCATION__, search_cache, dir_fnum));
1087 int saved_timeout = private->transport->options.request_timeout;
1089 /* request notify changes on cache before we start to fill it */
1090 search_cache->notify_io->nttrans.level=RAW_NOTIFY_NTTRANS;
1091 search_cache->notify_io->nttrans.in.completion_filter=FILE_NOTIFY_CHANGE_ANY;
1092 search_cache->notify_io->nttrans.in.file.fnum=dir_fnum;
1093 search_cache->notify_io->nttrans.in.recursive=false;
1094 search_cache->notify_io->nttrans.in.buffer_size=1024;
1095 private->transport->options.request_timeout = 0;
1096 search_cache->notify_req=smb_raw_changenotify_send(private->tree, search_cache->notify_io);
1097 /* Make the request hang around so we can tell if it needs cancelling */
1098 talloc_reference(search_cache, search_cache->notify_req);
1099 private->transport->options.request_timeout = saved_timeout;
1101 if (! search_cache->notify_req) {
1102 goto error;
1103 } else {
1104 struct ntvfs_request *req=NULL;
1105 struct smbcli_request *c_req=search_cache->notify_req;
1106 union smb_notify *io=search_cache->notify_io;
1107 ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_search_cache_notify,
1108 (void*) search_cache, c_req->async.private);
1109 DLIST_ADD_END(private->search_caches, search_cache, struct search_cache*);
1112 return search_cache;
1113 error:
1114 talloc_free(search_cache);
1115 return NULL;
1119 delete a file - the dirtype specifies the file types to include in the search.
1120 The name can contain PROXY wildcards, but rarely does (except with OS/2 clients)
1122 static NTSTATUS proxy_unlink(struct ntvfs_module_context *ntvfs,
1123 struct ntvfs_request *req, union smb_unlink *unl)
1125 struct proxy_private *private = ntvfs->private_data;
1126 struct smbcli_request *c_req;
1128 SETUP_PID;
1130 /* see if the front end will allow us to perform this
1131 function asynchronously. */
1132 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1133 return smb_raw_unlink(private->tree, unl);
1136 c_req = smb_raw_unlink_send(private->tree, unl);
1138 SIMPLE_ASYNC_TAIL;
1142 a handler for async ioctl replies
1144 static void async_ioctl(struct smbcli_request *c_req)
1146 struct async_info *async = c_req->async.private;
1147 struct ntvfs_request *req = async->req;
1148 req->async_states->status = smb_raw_ioctl_recv(c_req, req, async->parms);
1149 talloc_free(async);
1150 req->async_states->send_fn(req);
1154 ioctl interface
1156 static NTSTATUS proxy_ioctl(struct ntvfs_module_context *ntvfs,
1157 struct ntvfs_request *req, union smb_ioctl *io)
1159 struct proxy_private *private = ntvfs->private_data;
1160 struct smbcli_request *c_req;
1162 if (io->ntioctl.level == RAW_IOCTL_NTIOCTL
1163 && io->ntioctl.in.function == FSCTL_UFOPROXY_RPCLITE) {
1164 return proxy_rpclite(ntvfs, req, io);
1167 SETUP_PID_AND_FILE;
1169 /* see if the front end will allow us to perform this
1170 function asynchronously. */
1171 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1172 return smb_raw_ioctl(private->tree, req, io);
1175 c_req = smb_raw_ioctl_send(private->tree, io);
1177 ASYNC_RECV_TAIL(io, async_ioctl);
1181 check if a directory exists
1183 static NTSTATUS proxy_chkpath(struct ntvfs_module_context *ntvfs,
1184 struct ntvfs_request *req, union smb_chkpath *cp)
1186 struct proxy_private *private = ntvfs->private_data;
1187 struct smbcli_request *c_req;
1189 SETUP_PID;
1191 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1192 return smb_raw_chkpath(private->tree, cp);
1195 c_req = smb_raw_chkpath_send(private->tree, cp);
1197 SIMPLE_ASYNC_TAIL;
1200 static bool find_search_cache_item(const char* path,
1201 struct search_cache **search_cache,
1202 struct search_cache_item **item) {
1203 struct search_cache *s=*search_cache;
1204 struct search_cache_item *i=*item;
1205 const char* file;
1206 int dir_len;
1208 /* see if we can satisfy from a directory cache */
1209 DEBUG(5,("%s: Looking for pathinfo: '%s'\n",__LOCATION__,path));
1210 if ((file=strrchr(path,'\\'))) {
1211 dir_len = file - path;
1212 /* point past the \ */
1213 file++;
1214 } else {
1215 file = path;
1216 dir_len = 0;
1218 /* convert empty path to . so we can find it in the cache */
1219 if (! *file) {
1220 file=".";
1222 DEBUG(5,("%s: Path='%s' File='%s'\n",__LOCATION__,path, file));
1224 /* Note we don't care if the cache is partial, as long as it has a hit */
1225 while(s) {
1226 /* One day we may support all directory levels */
1227 DLIST_FIND(s, s, (s->key.data_level == RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO &&
1228 strlen(s->dir)==dir_len &&
1229 fstrncmp(s->dir, path, dir_len)==0));
1230 if (! s) {
1231 break;
1233 DEBUG(5,("%s: found cache %p\n",__LOCATION__,s));
1234 /* search s for io->generic.in.file.path */
1235 DLIST_FIND(s->items, i, (i->data_level == RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO &&
1236 ((i->file->both_directory_info.name.s &&
1237 fstrcmp(i->file->both_directory_info.name.s, file) ==0) ||
1238 (i->file->both_directory_info.short_name.s &&
1239 fstrcmp(i->file->both_directory_info.short_name.s, file)==0)
1240 )));
1241 DEBUG(5,("%s: found cache %p item %p\n",__LOCATION__,s, i));
1242 if (i) {
1243 *item=i;
1244 *search_cache=s;
1245 return true;
1247 s=s->next;
1248 DEBUG(5,("%s: continue search at %p\n",__LOCATION__,s));
1250 *item=i;
1251 *search_cache=s;
1252 return false;
1255 static void proxy_set_cache_info(struct file_metadata *metadata, struct proxy_GetInfo *r) {
1256 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_BASIC_INFORMATION) ||
1257 NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALL_INFO)) {
1258 metadata->info_data.create_time=r->out.info_data[0].create_time;
1259 metadata->info_data.access_time =r->out.info_data[0].access_time;
1260 metadata->info_data.write_time=r->out.info_data[0].write_time;
1261 metadata->info_data.change_time=r->out.info_data[0].change_time;
1262 metadata->info_data.attrib=r->out.info_data[0].attrib;
1263 metadata->valid|=valid_RAW_FILEINFO_BASIC_INFORMATION;
1265 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALL_INFO)) {
1266 metadata->info_data.ea_size=r->out.info_data[0].ea_size;
1267 metadata->info_data.alloc_size=r->out.info_data[0].alloc_size;
1268 metadata->info_data.size=r->out.info_data[0].size;
1269 metadata->info_data.nlink=r->out.info_data[0].nlink;
1270 /* Are we duping this right? Would talloc_reference be ok? */
1271 //f->metadata->info_data.fname=
1272 metadata->info_data.fname.s=talloc_memdup(metadata, r->out.info_data[0].fname.s, r->out.info_data[0].fname.count);
1273 metadata->info_data.fname.count=r->out.info_data[0].fname.count;
1274 metadata->info_data.delete_pending=r->out.info_data[0].delete_pending;
1275 metadata->info_data.directory=r->out.info_data[0].directory;
1276 metadata->valid|=valid_RAW_FILEINFO_ALL_INFO | valid_RAW_FILEINFO_STANDARD_INFO;;
1278 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_COMPRESSION_INFO)) {
1279 metadata->info_data.compressed_size=r->out.info_data[0].compressed_size;
1280 metadata->info_data.format=r->out.info_data[0].format;
1281 metadata->info_data.unit_shift=r->out.info_data[0].unit_shift;
1282 metadata->info_data.chunk_shift=r->out.info_data[0].chunk_shift;
1283 metadata->info_data.cluster_shift=r->out.info_data[0].cluster_shift;
1284 metadata->valid|=valid_RAW_FILEINFO_COMPRESSION_INFO;
1286 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_INTERNAL_INFORMATION)) {
1287 metadata->info_data.file_id=r->out.info_data[0].file_id;
1288 metadata->valid|=valid_RAW_FILEINFO_INTERNAL_INFORMATION;
1290 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ACCESS_INFORMATION)) {
1291 metadata->info_data.access_flags=r->out.info_data[0].access_flags;
1292 metadata->valid|=valid_RAW_FILEINFO_ACCESS_INFORMATION;
1294 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_POSITION_INFORMATION)) {
1295 metadata->info_data.position=r->out.info_data[0].position;
1296 metadata->valid|=valid_RAW_FILEINFO_POSITION_INFORMATION;
1298 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_MODE_INFORMATION)) {
1299 metadata->info_data.mode=r->out.info_data[0].mode;
1300 metadata->valid|=valid_RAW_FILEINFO_MODE_INFORMATION;
1302 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALIGNMENT_INFORMATION)) {
1303 metadata->info_data.alignment_requirement=r->out.info_data[0].alignment_requirement;
1304 metadata->valid|=valid_RAW_FILEINFO_ALIGNMENT_INFORMATION;
1306 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION)) {
1307 metadata->info_data.reparse_tag=r->out.info_data[0].reparse_tag;
1308 metadata->info_data.reparse_attrib=r->out.info_data[0].reparse_attrib;
1309 metadata->valid|=valid_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
1311 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_STREAM_INFO)) {
1312 metadata->info_data.num_streams=r->out.info_data[0].num_streams;
1313 talloc_free(metadata->info_data.streams);
1314 metadata->info_data.streams=talloc_steal(metadata, r->out.info_data[0].streams);
1315 metadata->valid|=valid_RAW_FILEINFO_STREAM_INFO;
1318 /* satisfy a file-info request from cache */
1319 NTSTATUS proxy_cache_info(union smb_fileinfo *io, struct file_metadata *metadata, bool *valid)
1321 #define SET_VALID(FLAG) do { \
1322 if (valid) *valid=!!(metadata->valid & valid_ ## FLAG); \
1323 DEBUG(5,("%s check %s=%d (%x)\n",__FUNCTION__, #FLAG, !!(metadata->valid & valid_ ## FLAG), metadata->valid)); \
1324 } while(0)
1325 /* and now serve the request from the cache */
1326 switch(io->generic.level) {
1327 case RAW_FILEINFO_BASIC_INFORMATION:
1328 SET_VALID(RAW_FILEINFO_BASIC_INFORMATION);
1329 io->basic_info.out.create_time=metadata->info_data.create_time;
1330 io->basic_info.out.access_time=metadata->info_data.access_time;
1331 io->basic_info.out.write_time=metadata->info_data.write_time;
1332 io->basic_info.out.change_time=metadata->info_data.change_time;
1333 io->basic_info.out.attrib=metadata->info_data.attrib;
1334 return metadata->info_data.status_RAW_FILEINFO_BASIC_INFORMATION;
1335 case RAW_FILEINFO_ALL_INFO:
1336 SET_VALID(RAW_FILEINFO_ALL_INFO);
1337 io->all_info.out.create_time=metadata->info_data.create_time;
1338 io->all_info.out.access_time=metadata->info_data.access_time;
1339 io->all_info.out.write_time=metadata->info_data.write_time;
1340 io->all_info.out.change_time=metadata->info_data.change_time;
1341 io->all_info.out.attrib=metadata->info_data.attrib;
1342 io->all_info.out.alloc_size=metadata->info_data.alloc_size;
1343 io->all_info.out.size=metadata->info_data.size;
1344 io->all_info.out.directory=metadata->info_data.directory;
1345 io->all_info.out.nlink=metadata->info_data.nlink;
1346 io->all_info.out.delete_pending=metadata->info_data.delete_pending;
1347 io->all_info.out.fname.s=metadata->info_data.fname.s;
1348 io->all_info.out.fname.private_length=metadata->info_data.fname.count;
1349 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1350 case RAW_FILEINFO_STANDARD_INFO:
1351 case RAW_FILEINFO_STANDARD_INFORMATION:
1352 SET_VALID(RAW_FILEINFO_ALL_INFO);
1353 io->standard_info.out.alloc_size=metadata->info_data.alloc_size;
1354 io->standard_info.out.size=metadata->info_data.size;
1355 io->standard_info.out.directory=metadata->info_data.directory;
1356 io->standard_info.out.nlink=metadata->info_data.nlink; /* may be wrong */
1357 io->standard_info.out.delete_pending=metadata->info_data.delete_pending;
1358 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1359 case RAW_FILEINFO_EA_INFO:
1360 case RAW_FILEINFO_EA_INFORMATION:
1361 SET_VALID(RAW_FILEINFO_ALL_INFO);
1362 io->ea_info.out.ea_size=metadata->info_data.ea_size;
1363 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1364 case RAW_FILEINFO_COMPRESSION_INFO:
1365 SET_VALID(RAW_FILEINFO_COMPRESSION_INFO);
1366 io->compression_info.out.compressed_size=metadata->info_data.compressed_size;
1367 io->compression_info.out.format=metadata->info_data.format;
1368 io->compression_info.out.unit_shift=metadata->info_data.unit_shift;
1369 io->compression_info.out.chunk_shift=metadata->info_data.chunk_shift;
1370 io->compression_info.out.cluster_shift=metadata->info_data.cluster_shift;
1371 return metadata->info_data.status_RAW_FILEINFO_COMPRESSION_INFO;
1372 case RAW_FILEINFO_INTERNAL_INFORMATION:
1373 SET_VALID(RAW_FILEINFO_INTERNAL_INFORMATION);
1374 io->internal_information.out.file_id=metadata->info_data.file_id;
1375 return metadata->info_data.status_RAW_FILEINFO_INTERNAL_INFORMATION;
1376 case RAW_FILEINFO_ACCESS_INFORMATION:
1377 SET_VALID(RAW_FILEINFO_ACCESS_INFORMATION);
1378 io->access_information.out.access_flags=metadata->info_data.access_flags;
1379 return metadata->info_data.status_RAW_FILEINFO_ACCESS_INFORMATION;
1380 case RAW_FILEINFO_POSITION_INFORMATION:
1381 SET_VALID(RAW_FILEINFO_POSITION_INFORMATION);
1382 io->position_information.out.position=metadata->info_data.position;
1383 return metadata->info_data.status_RAW_FILEINFO_POSITION_INFORMATION;
1384 case RAW_FILEINFO_MODE_INFORMATION:
1385 SET_VALID(RAW_FILEINFO_MODE_INFORMATION);
1386 io->mode_information.out.mode=metadata->info_data.mode;
1387 return metadata->info_data.status_RAW_FILEINFO_MODE_INFORMATION;
1388 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1389 SET_VALID(RAW_FILEINFO_ALIGNMENT_INFORMATION);
1390 io->alignment_information.out.alignment_requirement=metadata->info_data.alignment_requirement;
1391 return metadata->info_data.status_RAW_FILEINFO_ALIGNMENT_INFORMATION;
1392 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1393 SET_VALID(RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION);
1394 io->attribute_tag_information.out.reparse_tag=metadata->info_data.reparse_tag;
1395 io->attribute_tag_information.out.attrib=metadata->info_data.reparse_attrib;
1396 return metadata->info_data.status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
1397 case RAW_FILEINFO_STREAM_INFO:
1398 case RAW_FILEINFO_STREAM_INFORMATION:
1399 SET_VALID(RAW_FILEINFO_STREAM_INFO);
1400 io->stream_info.out.num_streams=metadata->info_data.num_streams;
1401 if (metadata->info_data.num_streams > 0) {
1402 io->stream_info.out.streams = talloc_zero_array(io, struct stream_struct, metadata->info_data.num_streams);
1403 int c;
1404 if (! io->stream_info.out.streams) {
1405 if (*valid) *valid=false;
1406 io->stream_info.out.num_streams=0;
1407 return NT_STATUS_NO_MEMORY;
1409 for (c=0; c<io->stream_info.out.num_streams; c++) {
1410 io->stream_info.out.streams[c].size = metadata->info_data.streams[c].size;
1411 io->stream_info.out.streams[c].alloc_size = metadata->info_data.streams[c].alloc_size;
1412 io->stream_info.out.streams[c].stream_name.s = talloc_reference(io, metadata->info_data.streams[c].stream_name.s);
1413 io->stream_info.out.streams[c].stream_name.private_length = metadata->info_data.streams[c].stream_name.count;
1415 } else {
1416 io->stream_info.out.streams=NULL;
1418 return metadata->info_data.status_RAW_FILEINFO_STREAM_INFO;
1419 default:
1420 DEBUG(5,("%s: Unknown request\n",__FUNCTION__));
1421 if (valid) *valid=false;
1422 return NT_STATUS_INTERNAL_ERROR;
1427 a handler for async qpathinfo replies
1429 static void async_qpathinfo(struct smbcli_request *c_req)
1431 struct async_info *async = c_req->async.private;
1432 struct ntvfs_request *req = async->req;
1433 req->async_states->status = smb_raw_pathinfo_recv(c_req, req, async->parms);
1434 talloc_free(async);
1435 req->async_states->send_fn(req);
1438 static NTSTATUS async_proxy_qpathinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1440 struct proxy_private *private = async->proxy;
1441 struct smbcli_request *c_req = async->c_req;
1442 struct ntvfs_request *req = async->req;
1443 struct proxy_file *f = talloc_get_type_abort(async->f, struct proxy_file);
1444 union smb_fileinfo *io = talloc_get_type_abort(io1, union smb_fileinfo);
1445 struct proxy_GetInfo *r=talloc_get_type_abort(io2, struct proxy_GetInfo);
1447 if (c_req) status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1448 req->async_states->status=status;
1450 NT_STATUS_NOT_OK_RETURN(status);
1452 /* populate the cache, and then fill the request from the cache */
1453 /* Assuming that r->count.in == 1 */
1454 SMB_ASSERT(r->out.count==1);
1455 NT_STATUS_NOT_OK_RETURN(r->out.info_data[0].status);
1457 DEBUG(5,("%s: will set cache %p item=%p metadata=%p %p\n",__LOCATION__,f, f?f->metadata:NULL, r));
1458 proxy_set_cache_info(f->metadata, r);
1460 req->async_states->status=proxy_cache_info(io, f->metadata, NULL);
1462 return req->async_states->status;
1465 static void async_qpathinfo_notify(void* data, struct fdirmon* dirmon) {
1466 struct proxy_file* file=data;
1468 DEBUG(5,("%s: qpathinfo cache %s destroyed\n",__LOCATION__,file->filename));
1469 DLIST_REMOVE(file->proxy->closed_files, file);
1470 talloc_free(file);
1474 return info on a pathname
1476 static NTSTATUS proxy_qpathinfo(struct ntvfs_module_context *ntvfs,
1477 struct ntvfs_request *req, union smb_fileinfo *io)
1479 struct proxy_private *private = ntvfs->private_data;
1480 struct smbcli_request *c_req;
1481 struct proxy_file *f=NULL;
1482 const char* path;
1484 SETUP_PID;
1486 /* Look for closed files */
1487 if (private->enabled_qpathinfo) {
1488 int len=strlen(io->generic.in.file.path)+1;
1489 DEBUG(5,("%s: Looking for cached metadata for: %s\n",__LOCATION__,io->generic.in.file.path));
1490 DLIST_FIND(private->closed_files, f,
1491 (len==f->filename_size && fstrncmp(io->generic.in.file.path, f->filename, f->filename_size)==0));
1492 if (f) {
1493 /* stop cache going away while we are using it */
1494 talloc_reference(req, f);
1497 /* upgrade the request */
1498 switch(io->generic.level) {
1499 case RAW_FILEINFO_STANDARD_INFO:
1500 case RAW_FILEINFO_STANDARD_INFORMATION:
1501 case RAW_FILEINFO_BASIC_INFORMATION: /* we get this on file open */
1502 case RAW_FILEINFO_ALL_INFO:
1503 case RAW_FILEINFO_COMPRESSION_INFO:
1504 case RAW_FILEINFO_INTERNAL_INFORMATION:
1505 case RAW_FILEINFO_ACCESS_INFORMATION:
1506 case RAW_FILEINFO_POSITION_INFORMATION:
1507 case RAW_FILEINFO_MODE_INFORMATION:
1508 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1509 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1510 case RAW_FILEINFO_STREAM_INFO:
1511 case RAW_FILEINFO_STREAM_INFORMATION:
1512 case RAW_FILEINFO_EA_INFO:
1513 case RAW_FILEINFO_EA_INFORMATION:
1514 DEBUG(5,("%s: item is %p\n",__FUNCTION__, f));
1515 if (f && f->metadata) {
1516 NTSTATUS status;
1517 bool valid;
1518 DEBUG(5,("%s: Using cached metadata %x (item=%p)\n",__FUNCTION__, f->metadata->valid, f));
1519 status=proxy_cache_info(io, f->metadata, &valid);
1520 if (valid) return status;
1521 DEBUG(5,("%s: But cached metadata not valid :-(\n",__FUNCTION__));
1523 /* construct an item to hold the cache if we need to */
1524 if (! f && private->enabled_cache_info && PROXY_REMOTE_SERVER(private) && (f=talloc_zero(private, struct proxy_file))) {
1525 struct fdirmon* dirmon;
1526 dirmon=get_fdirmon(private, io->generic.in.file.path, true);
1527 f->proxy=private;
1528 dirmon_add_callback(dirmon, async_qpathinfo_notify, f);
1530 f->filename=talloc_strdup(f, io->generic.in.file.path);
1531 f->filename_size=strlen(f->filename)+1;
1532 f->metadata=talloc_zero(f, struct file_metadata);
1533 /* should not really add unless we succeeded */
1534 DLIST_ADD(private->closed_files, f);
1537 if (f && f->metadata && private->enabled_cache_info && PROXY_REMOTE_SERVER(private)) {
1538 struct proxy_GetInfo *r;
1539 DEBUG(5,("%s: promoting request to proxy\n",__FUNCTION__));
1541 r=talloc_zero(req, struct proxy_GetInfo);
1542 NT_STATUS_HAVE_NO_MEMORY(r);
1544 r->in.count=1;
1545 r->in.info_tags=talloc_zero_array(req, struct info_tags, r->in.count);
1546 r->in.info_tags[0].tag_type=TAG_TYPE_PATH_INFO;
1547 /* 1+ to get the null */
1548 r->in.info_tags[0].info_tag.path.count=1+strlen(io->generic.in.file.path);
1549 r->in.info_tags[0].info_tag.path.s=io->generic.in.file.path;
1550 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
1551 /* the callback handler will populate the cache and respond from the cache */
1552 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_qpathinfo, NT_STATUS_INTERNAL_ERROR);
1554 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1555 DEBUG(5,("%s Sync waiting promotion\n",__FUNCTION__));
1556 return sync_chain_handler(c_req);
1557 } else {
1558 void* f=NULL;
1559 DEBUG(5,("%s Async waiting promotion\n",__FUNCTION__));
1560 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1561 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1562 return NT_STATUS_OK;
1567 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1568 return smb_raw_pathinfo(private->tree, req, io);
1571 c_req = smb_raw_pathinfo_send(private->tree, io);
1573 ASYNC_RECV_TAIL(io, async_qpathinfo);
1577 a handler for async qfileinfo replies
1579 static void async_qfileinfo(struct smbcli_request *c_req)
1581 struct async_info *async = c_req->async.private;
1582 struct ntvfs_request *req = async->req;
1583 req->async_states->status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1584 talloc_free(async);
1585 req->async_states->send_fn(req);
1588 static NTSTATUS async_proxy_qfileinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1590 struct proxy_private *private = async->proxy;
1591 struct smbcli_request *c_req = async->c_req;
1592 struct ntvfs_request *req = async->req;
1593 struct proxy_file *f = async->f;
1594 union smb_fileinfo *io = talloc_get_type_abort(io1, union smb_fileinfo);
1595 struct proxy_GetInfo *r=talloc_get_type_abort(io2, struct proxy_GetInfo);
1597 if (c_req) status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1598 req->async_states->status=status;
1600 NT_STATUS_NOT_OK_RETURN(status);
1602 /* populate the cache, and then fill the request from the cache */
1603 /* Assuming that r->count.in == 1 */
1604 SMB_ASSERT(r->out.count==1);
1605 NT_STATUS_NOT_OK_RETURN(r->out.info_data[0].status);
1607 proxy_set_cache_info(f->metadata, r);
1609 req->async_states->status=proxy_cache_info(io, f->metadata, NULL);
1611 return req->async_states->status;
1615 query info on a open file
1617 static NTSTATUS proxy_qfileinfo(struct ntvfs_module_context *ntvfs,
1618 struct ntvfs_request *req, union smb_fileinfo *io)
1620 struct proxy_private *private = ntvfs->private_data;
1621 struct smbcli_request *c_req;
1622 struct proxy_file *f;
1623 bool valid=false;
1624 NTSTATUS status;
1626 SETUP_PID;
1628 SETUP_FILE_HERE(f);
1630 /* upgrade the request */
1631 switch(io->generic.level) {
1632 case RAW_FILEINFO_STANDARD_INFO:
1633 case RAW_FILEINFO_STANDARD_INFORMATION:
1634 case RAW_FILEINFO_BASIC_INFORMATION: /* we get this on file open */
1635 case RAW_FILEINFO_ALL_INFO:
1636 case RAW_FILEINFO_COMPRESSION_INFO:
1637 case RAW_FILEINFO_INTERNAL_INFORMATION:
1638 case RAW_FILEINFO_ACCESS_INFORMATION:
1639 case RAW_FILEINFO_POSITION_INFORMATION:
1640 case RAW_FILEINFO_MODE_INFORMATION:
1641 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1642 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1643 case RAW_FILEINFO_STREAM_INFO:
1644 case RAW_FILEINFO_STREAM_INFORMATION:
1645 case RAW_FILEINFO_EA_INFO:
1646 case RAW_FILEINFO_EA_INFORMATION:
1647 DEBUG(5,("%s: oplock is %d\n",__FUNCTION__, f->oplock));
1648 if (f->oplock) {
1649 DEBUG(5,("%s: %p Using cached metadata %x (fnum=%d)\n",__FUNCTION__, f, f->metadata->valid, f->fnum));
1650 status=proxy_cache_info(io, f->metadata, &valid);
1651 if (valid) return status;
1652 DEBUG(5,("%s: But cached metadata not valid :-(\n",__FUNCTION__));
1654 if (private->enabled_cache_info && PROXY_REMOTE_SERVER(private)) {
1655 DEBUG(5,("%s: promoting request to proxy\n",__FUNCTION__));
1656 struct proxy_GetInfo *r=talloc_zero(req, struct proxy_GetInfo);
1657 NT_STATUS_HAVE_NO_MEMORY(r);
1658 r->in.count=1;
1659 r->in.info_tags=talloc_zero_array(req, struct info_tags, r->in.count);
1660 r->in.info_tags[0].tag_type=TAG_TYPE_FILE_INFO;
1661 r->in.info_tags[0].info_tag.fnum=io->generic.in.file.fnum;
1662 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
1663 /* the callback handler will populate the cache and respond from the cache */
1664 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_qfileinfo, NT_STATUS_INTERNAL_ERROR);
1666 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1667 DEBUG(5,("%s Sync waiting promotion\n",__FUNCTION__));
1668 return sync_chain_handler(c_req);
1669 } else {
1670 DEBUG(5,("%s Async waiting promotion\n",__FUNCTION__));
1671 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1672 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1673 return NT_STATUS_OK;
1678 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1679 return smb_raw_fileinfo(private->tree, req, io);
1682 c_req = smb_raw_fileinfo_send(private->tree, io);
1684 ASYNC_RECV_TAIL(io, async_qfileinfo);
1688 set info on a pathname
1690 static NTSTATUS proxy_setpathinfo(struct ntvfs_module_context *ntvfs,
1691 struct ntvfs_request *req, union smb_setfileinfo *st)
1693 struct proxy_private *private = ntvfs->private_data;
1694 struct smbcli_request *c_req;
1696 SETUP_PID;
1698 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1699 return smb_raw_setpathinfo(private->tree, st);
1702 c_req = smb_raw_setpathinfo_send(private->tree, st);
1704 SIMPLE_ASYNC_TAIL;
1709 a handler for async open replies
1711 static void async_open(struct smbcli_request *c_req)
1713 struct async_info *async = c_req->async.private;
1714 struct proxy_private *proxy = async->proxy;
1715 struct ntvfs_request *req = async->req;
1716 struct proxy_file *f = async->f;
1717 union smb_open *io = async->parms;
1718 union smb_handle *file;
1720 talloc_free(async);
1721 req->async_states->status = smb_raw_open_recv(c_req, req, io);
1722 SMB_OPEN_OUT_FILE(io, file);
1723 f->fnum = file->fnum;
1724 file->ntvfs = NULL;
1725 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
1726 req->async_states->status = ntvfs_handle_set_backend_data(f->h, proxy->ntvfs, f);
1727 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
1728 file->ntvfs = f->h;
1729 DLIST_ADD(proxy->files, f);
1731 f->oplock=io->generic.out.oplock_level;
1733 LOAD_CACHE_FILE_DATA (f->metadata->info_data, io->generic.out);
1734 f->metadata->valid |= valid_RAW_FILEINFO_BASIC_INFORMATION;
1735 DEBUG(5,("**** METADATA VALID %p %x LEN=%lld\n",f,f->metadata->valid,(long long)f->metadata->info_data.size));
1737 if (proxy->cache_enabled) {
1738 struct search_cache_item *item=NULL;
1739 struct search_cache *s=proxy->search_caches;
1740 /* If we are still monitoring the file for changes we can
1741 retain the previous cache state */
1742 /* yeah yeah what if there is more than one.... :-( */
1743 if (! (io->generic.level == RAW_OPEN_NTCREATEX &&
1744 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID) &&
1745 find_search_cache_item(SMB_OPEN_IN_FILE(io), &s, &item) && item->cache) {
1746 DEBUG(5,("%s: Using cached file cache\n",__LOCATION__));
1747 f->cache=talloc_reference(f, item->cache);
1748 cache_beopen(f->cache);
1749 if (item->metadata) {
1750 *(f->metadata)=*(item->metadata);
1751 f->metadata->info_data.fname.s=talloc_strdup(f, item->metadata->info_data.fname.s);
1752 f->metadata->info_data.fname.count=item->metadata->info_data.fname.count;
1754 f->metadata->info_data.streams=talloc_zero_array(f, struct info_stream, f->metadata->info_data.num_streams);
1755 if (f->metadata->info_data.streams) {
1756 int c;
1757 for(c=0; c < f->metadata->info_data.num_streams; c++) {
1758 f->metadata->info_data.streams[c].size = item->metadata->info_data.streams[c].size;
1759 f->metadata->info_data.streams[c].alloc_size = item->metadata->info_data.streams[c].alloc_size;
1760 f->metadata->info_data.streams[c].stream_name.s= talloc_strdup(f, item->metadata->info_data.streams[c].stream_name.s);
1761 f->metadata->info_data.streams[c].stream_name.count=item->metadata->info_data.streams[c].stream_name.count;
1764 f->metadata->count=1;
1766 } else {
1767 f->cache=cache_open(proxy->cache, f, io, f->oplock, proxy->cache_readahead);
1768 if (proxy->fake_valid) {
1769 cache_handle_validated(f, cache_handle_len(f));
1771 if (! PROXY_REMOTE_SERVER(proxy)) cache_handle_novalidate(f);
1772 if (item) {
1773 item->cache = talloc_reference(item, f->cache);
1774 item->metadata=talloc_reference(item, f->metadata);
1775 DEBUG(5,("%s: Caching file cache for later\n",__LOCATION__));
1776 } else {
1777 DEBUG(5,("%s: NOT Caching file cache for later\n",__LOCATION__));
1782 failed:
1783 req->async_states->send_fn(req);
1787 open a file
1789 static NTSTATUS proxy_open(struct ntvfs_module_context *ntvfs,
1790 struct ntvfs_request *req, union smb_open *io)
1792 struct proxy_private *private = ntvfs->private_data;
1793 struct smbcli_request *c_req;
1794 struct ntvfs_handle *h;
1795 struct proxy_file *f, *clone;
1796 NTSTATUS status;
1797 void *filename;
1798 int filename_size;
1799 uint16_t fnum;
1801 SETUP_PID;
1803 if (io->generic.level != RAW_OPEN_GENERIC &&
1804 private->map_generic) {
1805 return ntvfs_map_open(ntvfs, req, io);
1808 status = ntvfs_handle_new(ntvfs, req, &h);
1809 #warning should we free this handle if the open fails?
1810 NT_STATUS_NOT_OK_RETURN(status);
1812 f = talloc_zero(h, struct proxy_file);
1813 NT_STATUS_HAVE_NO_MEMORY(f);
1814 f->proxy=private;
1816 /* If the file is being opened read only and we already have a read-only
1817 handle for this file, then just clone and ref-count the handle */
1818 /* First calculate the filename key */
1819 if (io->generic.level == RAW_OPEN_NTCREATEX &&
1820 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID) {
1821 filename_size=sizeof(uint64_t);
1822 filename=io->generic.in.fname;
1823 } else {
1824 filename=SMB_OPEN_IN_FILE(io);
1825 filename_size=strlen(filename)+1;
1827 f->filename=talloc_memdup(f, filename, filename_size);
1828 f->filename_size=filename_size;
1829 f->h = h;
1830 f->can_clone= (io->generic.in.access_mask & NTCREATEX_SHARE_ACCESS_MASK) == NTCREATEX_SHARE_ACCESS_READ &&
1831 (io->generic.in.impersonation == NTCREATEX_IMPERSONATION_IMPERSONATION) &&
1832 (io->generic.in.create_options & NTCREATEX_OPTIONS_DIRECTORY) == 0 &&
1833 (io->generic.in.open_disposition != NTCREATEX_DISP_CREATE) &&
1834 (io->generic.in.open_disposition != NTCREATEX_DISP_SUPERSEDE);
1835 /* see if we have a matching open file */
1836 clone=NULL;
1837 if (f->can_clone) for (clone=private->files; clone; clone=clone->next) {
1838 if (clone->can_clone && filename_size == clone->filename_size &&
1839 memcmp(filename, clone->filename, filename_size)==0) {
1840 break;
1844 /* if clone is not null, then we found a match */
1845 if (private->enabled_open_clone && clone) {
1846 union smb_handle *file;
1848 DEBUG(5,("%s: clone handle %d\n",__FUNCTION__,clone->fnum));
1849 SMB_OPEN_OUT_FILE(io, file);
1850 f->fnum = clone->fnum;
1851 file->ntvfs = NULL;
1852 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
1853 NT_STATUS_NOT_OK_RETURN(status);
1854 file->ntvfs = f->h;
1855 DLIST_ADD(private->files, f);
1856 /* but be sure to share the same metadata cache */
1857 f->metadata=talloc_reference(f, clone->metadata);
1858 f->metadata->count++;
1859 f->oplock=clone->oplock;
1860 f->cache=talloc_reference(f, clone->cache);
1861 /* We don't need to reduce the oplocks for both files if we are read-only */
1862 /* if (clone->oplock==EXCLUSIVE_OPLOCK_RETURN ||
1863 clone->oplock==BATCH_OPLOCK_RETURN) {
1864 DEBUG(5,("%s: Breaking clone oplock from %d\n",__LOCATION__, clone->oplock));
1865 clone->oplock==LEVEL_II_OPLOCK_RETURN;
1866 status = ntvfs_send_oplock_break(private->ntvfs, clone->h, OPLOCK_BREAK_TO_LEVEL_II);
1867 //if (!NT_STATUS_IS_OK(status)) result=false;
1868 } else if (clone->oplock==LEVEL_II_OPLOCK_RETURN) {
1869 DEBUG(5,("%s: Breaking clone oplock from %d, cache no longer valid\n",__LOCATION__, clone->oplock));
1870 cache_handle_stale(f);
1871 clone->oplock=NO_OPLOCK_RETURN;
1872 status = ntvfs_send_oplock_break(private->ntvfs, clone->h, OPLOCK_BREAK_TO_NONE);
1873 //if (!NT_STATUS_IS_OK(status)) result=false;
1876 f->oplock=clone->oplock;
1877 /* and fake the rest of the response struct */
1878 io->generic.out.oplock_level=f->oplock;
1879 io->generic.out.create_action=NTCREATEX_ACTION_EXISTED;
1880 io->generic.out.create_time=f->metadata->info_data.create_time;
1881 io->generic.out.access_time=f->metadata->info_data.access_time;
1882 io->generic.out.write_time=f->metadata->info_data.write_time;
1883 io->generic.out.change_time=f->metadata->info_data.change_time;
1884 io->generic.out.attrib=f->metadata->info_data.attrib;
1885 io->generic.out.alloc_size=f->metadata->info_data.alloc_size;
1886 io->generic.out.size=f->metadata->info_data.size;
1887 io->generic.out.file_type=f->metadata->info_data.file_type;
1888 io->generic.out.ipc_state=f->metadata->info_data.ipc_state;
1889 io->generic.out.is_directory=f->metadata->info_data.is_directory;
1890 /* optional return values matching SMB2 tagged
1891 values in the call */
1892 //io->generic.out.maximal_access;
1893 return NT_STATUS_OK;
1895 f->metadata=talloc_zero(f, struct file_metadata);
1896 NT_STATUS_HAVE_NO_MEMORY(f->metadata);
1897 f->metadata->count=1;
1899 /* if oplocks aren't requested, optionally override and request them */
1900 if (! (io->generic.in.flags & (OPENX_FLAGS_REQUEST_OPLOCK | OPENX_FLAGS_REQUEST_BATCH_OPLOCK))
1901 && private->fake_oplock) {
1902 io->generic.in.flags |= OPENX_FLAGS_REQUEST_OPLOCK | OPENX_FLAGS_REQUEST_BATCH_OPLOCK;
1905 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1906 union smb_handle *file;
1908 status = smb_raw_open(private->tree, req, io);
1909 NT_STATUS_NOT_OK_RETURN(status);
1911 SMB_OPEN_OUT_FILE(io, file);
1912 f->fnum = file->fnum;
1913 file->ntvfs = NULL;
1914 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
1915 NT_STATUS_NOT_OK_RETURN(status);
1916 file->ntvfs = f->h;
1917 DLIST_ADD(private->files, f);
1919 f->oplock=io->generic.out.oplock_level;
1921 LOAD_CACHE_FILE_DATA (f->metadata->info_data, io->generic.out);
1922 DEBUG(5,("**** METADATA VALID %p %x LEN=%lld\n",f,f->metadata->valid,(long long)f->metadata->info_data.size));
1923 f->metadata->valid |= valid_RAW_FILEINFO_BASIC_INFORMATION;
1925 if (private->cache_enabled) {
1926 f->cache=cache_open(private->cache, f, io, f->oplock, private->cache_readahead);
1927 if (private->fake_valid) {
1928 cache_handle_validated(f, cache_handle_len(f));
1930 if (! PROXY_REMOTE_SERVER(private)) cache_handle_novalidate(f);
1933 return NT_STATUS_OK;
1936 c_req = smb_raw_open_send(private->tree, io);
1938 ASYNC_RECV_TAIL_F(io, async_open, f);
1942 create a directory
1944 static NTSTATUS proxy_mkdir(struct ntvfs_module_context *ntvfs,
1945 struct ntvfs_request *req, union smb_mkdir *md)
1947 struct proxy_private *private = ntvfs->private_data;
1948 struct smbcli_request *c_req;
1950 SETUP_PID;
1952 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1953 return smb_raw_mkdir(private->tree, md);
1956 c_req = smb_raw_mkdir_send(private->tree, md);
1958 SIMPLE_ASYNC_TAIL;
1962 remove a directory
1964 static NTSTATUS proxy_rmdir(struct ntvfs_module_context *ntvfs,
1965 struct ntvfs_request *req, struct smb_rmdir *rd)
1967 struct proxy_private *private = ntvfs->private_data;
1968 struct smbcli_request *c_req;
1970 SETUP_PID;
1972 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1973 return smb_raw_rmdir(private->tree, rd);
1975 c_req = smb_raw_rmdir_send(private->tree, rd);
1977 SIMPLE_ASYNC_TAIL;
1981 rename a set of files
1983 static NTSTATUS proxy_rename(struct ntvfs_module_context *ntvfs,
1984 struct ntvfs_request *req, union smb_rename *ren)
1986 struct proxy_private *private = ntvfs->private_data;
1987 struct smbcli_request *c_req;
1989 SETUP_PID;
1991 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1992 return smb_raw_rename(private->tree, ren);
1995 c_req = smb_raw_rename_send(private->tree, ren);
1997 SIMPLE_ASYNC_TAIL;
2001 copy a set of files
2003 static NTSTATUS proxy_copy(struct ntvfs_module_context *ntvfs,
2004 struct ntvfs_request *req, struct smb_copy *cp)
2006 return NT_STATUS_NOT_SUPPORTED;
2009 /* we only define this seperately so we can easily spot read calls in
2010 pending based on ( c_req->private.fn == async_read_handler ) */
2011 static void async_read_handler(struct smbcli_request *c_req)
2013 async_chain_handler(c_req);
2016 NTSTATUS async_readahead_dec(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2018 struct proxy_private *private = async->proxy;
2019 struct smbcli_request *c_req = async->c_req;
2020 struct proxy_file *f = async->f;
2021 union smb_read *io = async->parms;
2023 /* if request is not already received by a chained handler, read it */
2024 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
2026 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
2027 f->readahead_pending, private->readahead_spare));
2029 f->readahead_pending--;
2030 private->readahead_spare++;
2032 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
2033 f->readahead_pending, private->readahead_spare));
2035 return status;
2039 a handler for async read replies - speculative read-aheads.
2040 It merely saves in the cache. The async chain handler will call send_fn if
2041 there is one, or if sync_chain_handler is used the send_fn is called by
2042 the ntvfs back end.
2044 NTSTATUS async_read_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2046 struct smbcli_request *c_req = async->c_req;
2047 struct proxy_file *f = async->f;
2048 union smb_read *io = async->parms;
2050 /* if request is not already received by a chained handler, read it */
2051 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
2053 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
2054 get_friendly_nt_error_msg(status)));
2056 NT_STATUS_NOT_OK_RETURN(status);
2058 /* if it was a validate read we don't to save anything unless it failed.
2059 Until we use Proxy_read structs we can't tell, so guess */
2060 if (io->generic.out.nread == io->generic.in.maxcnt &&
2061 io->generic.in.mincnt < io->generic.in.maxcnt) {
2062 /* looks like a validate read, just move the validate pointer, the
2063 original read-request has already been satisfied from cache */
2064 DEBUG(3,("%s megavalidate suceeded, validate to %lld\n",__FUNCTION__,
2065 io->generic.in.offset + io->generic.out.nread));
2066 cache_handle_validated(f, io->generic.in.offset + io->generic.out.nread);
2067 } else {
2068 DEBUG(5,("Not a mega-validate, save %d in cache\n",io->generic.out.nread));
2069 cache_handle_save(f, io->generic.out.data,
2070 io->generic.out.nread,
2071 io->generic.in.offset);
2074 DEBUG(3,("%s finished %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2075 return status;
2078 /* handler for fragmented reads */
2079 NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2081 struct proxy_private *private = async->proxy;
2082 struct smbcli_request *c_req = async->c_req;
2083 struct ntvfs_request *req = async->req;
2084 struct proxy_file *f = async->f;
2085 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
2086 /* this is the io against which the fragment is to be applied */
2087 union smb_read *io = talloc_get_type_abort(io1, union smb_read);
2088 /* this is the io for the read that issued the callback */
2089 union smb_read *io_frag = fragment->io_frag; /* async->parms; */
2090 struct async_read_fragments* fragments=fragment->fragments;
2092 /* if request is not already received by a chained handler, read it */
2093 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
2094 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
2096 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
2097 get_friendly_nt_error_msg(status)));
2099 fragment->status = status;
2101 /* remove fragment from fragments */
2102 DLIST_REMOVE(fragments->fragments, fragment);
2104 #warning maybe read requests beyond the short read won't return NT_STATUS_OK with nread=0
2105 /* in which case if we will want to collate all responses and return a valid read
2106 for the leading NT_STATUS_OK fragments */
2108 /* did this one fail, inducing a general fragments failure? */
2109 if (!NT_STATUS_IS_OK(fragment->status)) {
2110 /* preserve the status of the fragment with the smallest offset
2111 when we can work out how */
2112 if (NT_STATUS_IS_OK(fragments->status)) {
2113 fragments->status=fragment->status;
2116 cache_handle_novalidate(f);
2117 DEBUG(5,("** Devalidated proxy due to read failure\n"));
2118 } else {
2119 /* No fragments have yet failed, keep collecting responses */
2120 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
2121 /* Find memcpy window, copy data from the io_frag to the io */
2122 off_t start_offset=MAX(io_frag->generic.in.offset, io->generic.in.offset);
2123 /* used to use mincnt */
2124 off_t io_extent=io->generic.in.offset + io->generic.in.maxcnt;
2125 off_t end_offset=MIN(io_extent, extent);
2126 /* ASSERT(start_offset <= end_offset) */
2127 /* ASSERT(start_offset <= io_extent) */
2128 if (start_offset >= io_extent) {
2129 DEBUG(3,("useless read-ahead tagged on to: %s",__LOCATION__));
2130 } else {
2131 uint8_t* dst=io->generic.out.data+(start_offset - io->generic.in.offset);
2132 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
2133 /* src == dst in cases where we did not latch onto someone elses
2134 read, but are handling our own */
2135 if (src != dst)
2136 memcpy(dst, src, end_offset - start_offset);
2139 /* There should be a better way to detect, but it needs the proxy rpc struct
2140 not ths smb_read struct */
2141 if (io_frag->generic.out.nread < io_frag->generic.in.maxcnt) {
2142 DEBUG(5,("\n** Devalidated proxy due to small read: %lld min=%lld, max=%lld\n",
2143 (long long) io_frag->generic.out.nread,
2144 (long long) io_frag->generic.in.mincnt,
2145 (long long) io_frag->generic.in.maxcnt));
2146 cache_handle_novalidate(f);
2149 /* We broke up the original read. If not enough of this sub-read has
2150 been read, and then some of then next block, it could leave holes!
2151 We will only acknowledge up to the first partial read, and treat
2152 it as a small read. If server can return NT_STATUS_OK for a partial
2153 read so can we, so we preserve the response.
2154 "enough" is all of it (maxcnt), except on the last block, when it has to
2155 be enough to fill io->generic.in.mincnt. We know it is the last block
2156 if nread is small but we could fill io->generic.in.mincnt */
2157 if (io_frag->generic.out.nread < io_frag->generic.in.mincnt &&
2158 end_offset < io->generic.in.offset + io->generic.in.mincnt) {
2159 DEBUG(4,("Fragmented read only partially successful\n"));
2161 /* Shrink the master nread (or grow to this size if we are first partial */
2162 if (! fragments->partial ||
2163 (io->generic.in.offset + io->generic.out.nread) > extent) {
2164 io->generic.out.nread = extent - io->generic.in.offset;
2167 /* stop any further successes from extending the partial read */
2168 fragments->partial=true;
2169 } else {
2170 /* only grow the master nwritten if we haven't logged a partial write */
2171 if (! fragments->partial &&
2172 (io->generic.in.offset + io->generic.out.nread) < extent ) {
2173 io->generic.out.nread = MIN(io->generic.in.maxcnt, extent - io->generic.in.offset);
2178 /* Was it the last fragment, or do we know enought to send a response? */
2179 if (! fragments->fragments) {
2180 DEBUG(5,("Async read re-fragmented with %d of %d %s\n",
2181 io->generic.out.nread, io->generic.in.mincnt,
2182 get_friendly_nt_error_msg(fragments->status)));
2183 if (fragments->async) {
2184 req->async_states->status=fragments->status;
2185 DEBUG(5,("Fragments async response sending\n"));
2186 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
2187 /* esp. as they may be attached to by other reads. Maybe attachees should be taking reference, but how will they
2188 know the top level they need to take reference too.. */
2189 #warning should really queue a sender here, not call it */
2190 req->async_states->send_fn(req);
2191 DEBUG(5,("Async response sent\n"));
2192 } else {
2193 DEBUG(5,("Fragments SYNC return\n"));
2197 /* because a c_req may be shared by many req, chained handlers must return
2198 a status pertaining to the general validity of this specific c_req, not
2199 to their own private processing of the c_req for the benefit of their req
2200 which is returned in fragments->status
2202 return status;
2205 /* Issue read-ahead X bytes where X is the window size calculation based on
2206 server_latency * server_session_bandwidth
2207 where latency is the idle (link) latency and bandwidth is less than or equal_to
2208 to actual bandwidth available to the server.
2209 Read-ahead should honour locked areas in whatever way is neccessary (who knows?)
2210 read_ahead is defined here and not in the cache engine because it requires too
2211 much knowledge of private structures
2213 /* The concept is buggy unless we can tell the next proxy that these are
2214 read-aheads, otherwise chained proxy setups will each read-ahead of the
2215 read-ahead which can put a larger load on the final server.
2216 Also we probably need to distinguish between
2217 * cache-less read-ahead
2218 * cache-revalidating read-ahead
2220 NTSTATUS read_ahead(struct proxy_file *f, struct ntvfs_module_context *ntvfs,
2221 union smb_read *io, ssize_t as_read)
2223 struct proxy_private *private = ntvfs->private_data;
2224 struct smbcli_tree *tree = private->tree;
2225 struct cache_file_entry *cache;
2226 off_t next_position; /* this read offset+length+window */
2227 off_t end_position; /* position we read-ahead to */
2228 off_t cache_populated;
2229 off_t read_position, new_extent;
2231 if (! PROXY_REMOTE_SERVER(private)) return NT_STATUS_UNSUCCESSFUL;
2232 DEBUG(5,("A\n"));
2233 if (private->cache_readahead==0 || ! private->cache_enabled || ! f->cache) return NT_STATUS_UNSUCCESSFUL;
2234 DEBUG(5,("B\n"));
2235 cache=talloc_get_type_abort(f->cache, struct cache_file_entry);
2236 DEBUG(5,("C\n"));
2237 /* don't read-ahead if we are in bulk validate mode */
2238 if (cache->status & CACHE_VALIDATE) return NT_STATUS_UNSUCCESSFUL;
2239 DEBUG(5,("D\n"));
2240 /* if we can't trust what we read-ahead anyway then don't bother although
2241 * if delta-reads are enabled we can do so in order to get something to
2242 * delta against */
2243 DEBUG(CACHE_DEBUG_LEVEL,("DOING Asking read-aheads: len %lld ra-extend %lld as-read %lld RA %d (%d)\n",
2244 (long long int)(cache_len(cache)),
2245 (long long int)(cache->readahead_extent),
2246 (long long int)(as_read),
2247 cache->readahead_window,private->cache_readahead));
2248 if (private->cache_readahead ==0 || ! (cache->status & CACHE_READ_AHEAD) ) {
2249 DEBUG(CACHE_DEBUG_LEVEL,("FAILED Asking read-aheads: Can't read-ahead as no read-ahead on this file: %x\n",
2250 cache->status));
2251 return NT_STATUS_UNSUCCESSFUL;
2254 /* as_read is the mincnt bytes of a request being made or the
2255 out.nread of completed sync requests
2256 Here we presume that as_read bytes WILL be read. If there is a cache-ahead like ours,
2257 then this may often NOT be the case if readahead_window < requestsize; so we will
2258 get a small read, leaving a hole in the cache, and as we don't yet handle sparse caches,
2259 all future read-ahead will be wasted, so we need to adjust the read-ahead handler to handle
2260 this and have failed sparse writes adjust the cache->readahead_extent back to actual size */
2262 /* predict the file pointers next position */
2263 next_position=io->generic.in.offset + as_read;
2265 /* if we know how big the file is, don't read beyond */
2266 if (f->oplock && next_position > f->metadata->info_data.size) {
2267 next_position = f->metadata->info_data.size;
2269 DEBUG(5,("Next position: %lld (%lld + %lld)\n",
2270 (long long int)next_position,
2271 (long long int)io->generic.in.offset,
2272 (long long int)as_read));
2273 /* calculate the limit of the validated or requested cache */
2274 cache_populated=MAX(cache->validated_extent, cache->readahead_extent);
2276 /* will the new read take us beyond the current extent without gaps? */
2277 if (cache_populated < io->generic.in.offset) {
2278 /* this read-ahead is a read-behind-pointer */
2279 new_extent=cache_populated;
2280 } else {
2281 new_extent=MAX(next_position, cache_populated);
2284 /* as far as we can tell new_extent is the smallest offset that doesn't
2285 have a pending read request on. Of course if we got a short read then
2286 we will have a cache-gap which we can't handle and need to read from
2287 a shrunk readahead_extent, which we don't currently handle */
2288 read_position=new_extent;
2290 /* of course if we know how big the remote file is we should limit at that */
2291 /* we should also mark-out which read-ahead requests are pending so that we
2292 * don't repeat them while they are in-transit. */
2293 /* we can't really use next_position until we can have caches with holes
2294 UNLESS next_position < new_extent, because a next_position well before
2295 new_extent is no reason to extend it further, we only want to extended
2296 with read-aheads if we have cause to suppose the read-ahead data will
2297 be wanted, i.e. the next_position is near new_extent.
2298 So we can't justify reading beyond window+next_position, but if
2299 next_position is leaving gaps, we use new_extent instead */
2300 end_position=MIN(new_extent, next_position) + cache->readahead_window;
2301 if (f->oplock) {
2302 end_position=MIN(end_position, f->metadata->info_data.size);
2304 DEBUG(5,("** Read-ahead loop %lld < %lld window=%d, end=%lld, quota: %d\n",
2305 (long long int)read_position,
2306 (long long int)(next_position + cache->readahead_window),
2307 cache->readahead_window,
2308 (long long int)end_position,
2309 private->readahead_spare));
2310 /* do we even need to read? */
2311 if (! (read_position < end_position)) return NT_STATUS_OK;
2313 /* readahead_spare is for the whole session (mid/tid?) and may need sharing
2314 out over files and other tree-connects or something */
2315 while (read_position < end_position &&
2316 private->readahead_spare > 0) {
2317 struct smbcli_request *c_req = NULL;
2318 ssize_t read_remaining = end_position - read_position;
2319 ssize_t read_block = MIN(private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32),
2320 MIN(read_remaining, private->cache_readaheadblock));
2321 void *req = NULL; /* for the ASYNC_REC_TAIL_F_ORPHAN macro */
2322 uint8_t* data;
2323 union smb_read *io_copy=talloc_memdup_type(NULL, io, union smb_read);
2325 if (! io_copy)
2326 return NT_STATUS_NO_MEMORY;
2328 #warning we are ignoring read_for_execute as far as the cache goes
2329 io_copy->generic.in.read_for_execute=io->readx.in.read_for_execute;
2330 io_copy->generic.in.offset=read_position;
2331 io_copy->generic.in.mincnt=read_block;
2332 io_copy->generic.in.maxcnt=read_block;
2333 /* what is generic.in.remaining for? */
2334 io_copy->generic.in.remaining = MIN(65535,read_remaining);
2335 io_copy->generic.out.nread=0;
2337 #warning someone must own io_copy, tree, maybe?
2338 data=talloc_zero_size(io_copy, io_copy->generic.in.maxcnt);
2339 DEBUG(5,("Talloc read-ahead buffer %p size %d\n",data, io_copy->generic.in.maxcnt));
2340 if (! data) {
2341 talloc_free(io_copy);
2342 return NT_STATUS_NO_MEMORY;
2344 io_copy->generic.out.data=data;
2346 /* are we able to pull anything from the cache to validate this read-ahead?
2347 NOTE: there is no point in reading ahead merely to re-validate the
2348 cache if we don't have oplocks and can't save it....
2349 ... or maybe there is if we think a read will come that can be matched
2350 up to this reponse while it is still on the wire */
2351 #warning so we need to distinguish between pipe-line read-ahead and revalidation
2352 if (/*(cache->status & CACHE_READ)!=0 && */
2353 cache_len(cache) >
2354 (io_copy->generic.in.offset + io_copy->generic.in.mincnt) &&
2355 cache->validated_extent <
2356 (io_copy->generic.in.offset + io_copy->generic.in.maxcnt)) {
2357 ssize_t pre_fill;
2359 pre_fill = cache_raw_read(cache, data,
2360 io_copy->generic.in.offset,
2361 io_copy->generic.in.maxcnt);
2362 DEBUG(5,("Data read into %p %d\n",data, pre_fill));
2363 if (pre_fill > 0 && pre_fill >= io_copy->generic.in.mincnt) {
2364 io_copy->generic.out.nread=pre_fill;
2365 read_block=pre_fill;
2369 c_req = proxy_smb_raw_read_send(ntvfs, io_copy, f, NULL);
2371 if (c_req) {
2372 private->readahead_spare--;
2373 f->readahead_pending++;
2374 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead level %d request %p offset=%d size=%d\n",io_copy->generic.level,c_req,(int)read_position,(int)read_block));
2375 if (cache->readahead_extent < read_position+read_block)
2376 cache->readahead_extent=read_position+read_block;
2377 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
2378 /* so we can decrease read-ahead counter for this session */
2379 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_readahead_dec, NT_STATUS_INTERNAL_ERROR);
2380 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io_copy, async_read_handler);
2382 /* Make these be owned by the async struct so they are freed when the callback ends or is cancelled */
2383 talloc_steal(c_req->async.private, c_req);
2384 talloc_steal(c_req->async.private, io_copy);
2385 read_position+=read_block;
2386 } else {
2387 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead request FAILED offset=%d size=%d\n",(int)read_position,(int)read_block));
2388 talloc_free(io_copy);
2389 break;
2393 DEBUG(CACHE_DEBUG_LEVEL,("DONE: Asking read-aheads\n"));
2394 return NT_STATUS_OK;
2397 struct proxy_validate_parts_parts {
2398 struct proxy_Read* r;
2399 struct ntvfs_request *req;
2400 struct proxy_file *f;
2401 struct async_read_fragments *fragments;
2402 off_t offset;
2403 ssize_t remaining;
2404 bool complete;
2405 declare_checksum(digest);
2406 struct MD5Context context;
2409 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts);
2410 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status);
2411 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
2412 struct proxy_validate_parts_parts *parts);
2414 /* this will be the new struct proxy_Read based read function, for now
2415 it just deals with non-cached based validate to a regular server */
2416 static NTSTATUS proxy_validate(struct ntvfs_module_context *ntvfs,
2417 struct ntvfs_request *req,
2418 struct proxy_Read *r,
2419 struct proxy_file *f)
2421 struct proxy_private *private = ntvfs->private_data;
2422 struct proxy_validate_parts_parts *parts;
2423 struct async_read_fragments *fragments;
2424 NTSTATUS status;
2426 if (!f) return NT_STATUS_INVALID_HANDLE;
2428 DEBUG(5,("%s: fnum=%d **** %lld bytes \n\n\n\n",__LOCATION__,f->fnum,(long long int)r->in.maxcnt));
2430 parts = talloc_zero(req, struct proxy_validate_parts_parts);
2431 DEBUG(5,("%s: parts=%p\n",__FUNCTION__,parts));
2432 NT_STATUS_HAVE_NO_MEMORY(parts);
2434 fragments = talloc_zero(parts, struct async_read_fragments);
2435 NT_STATUS_HAVE_NO_MEMORY(fragments);
2437 parts->fragments=fragments;
2439 parts->r=r;
2440 parts->f=f;
2441 parts->req=req;
2442 /* processed offset */
2443 parts->offset=r->in.offset;
2444 parts->remaining=r->in.maxcnt;
2445 fragments->async=true;
2447 MD5Init (&parts->context);
2449 /* start a read-loop which will continue in the callback until it is
2450 all done */
2451 status=proxy_validate_parts(ntvfs, parts);
2452 if (parts->complete) {
2453 /* Make sure we are not async */
2454 DEBUG(5,("%s: completed EARLY\n",__FUNCTION__));
2455 return proxy_validate_complete(parts);
2458 /* Assert if status!=NT_STATUS_OK then parts->complete==true */
2459 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
2460 DEBUG(5,("%s: returning ASYNC\n",__FUNCTION__));
2461 return status;
2464 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts)
2466 NTSTATUS status;
2467 struct proxy_Read* r=parts->r;
2468 struct proxy_file *f=parts->f;
2470 DEBUG(5,("%s: %d/%d bytes \n\n\n\n",__LOCATION__,r->out.nread,r->in.maxcnt));
2472 MD5Final(parts->digest, &parts->context);
2474 status = parts->fragments->status;
2475 r->out.result = status;
2476 r->out.response.generic.count=r->out.nread;
2478 DEBUG(5,("%s: %s nread=%d\n",__FUNCTION__, get_friendly_nt_error_msg (status),
2479 r->out.response.generic.count));
2481 DEBUG(5,("Anticipated validated digest for size: %lld\n", (long long) r->in.maxcnt));
2482 dump_data (5, r->in.digest.digest, sizeof(parts->digest));
2483 DEBUG(5,("read digest for size %lld\n",(long long) parts->offset));
2484 dump_data (5, parts->digest, sizeof(parts->digest));
2486 if (NT_STATUS_IS_OK(status) &&
2487 (memcmp(parts->digest, r->in.digest.digest, sizeof(parts->digest))==0)) {
2488 r->out.flags = PROXY_USE_CACHE | PROXY_VALIDATE;
2489 DEBUG(5,("======= VALIDATED FINE \n\n\n"));
2490 } else {
2491 if (r->in.flags & PROXY_USE_ZLIB) {
2492 ssize_t size = r->out.response.generic.count;
2493 DEBUG(5,("======= VALIDATED WRONG; compress size %d \n\n\n",size));
2494 if (compress_block(r->out.response.generic.data, &size) ) {
2495 r->out.flags|=PROXY_USE_ZLIB;
2496 r->out.response.compress.count=size;
2497 r->out.response.compress.data=r->out.response.generic.data;
2498 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
2499 __LOCATION__,r->out.nread,size,size*100/r->out.nread));
2502 /* return cache filename as a ghastly hack for now */
2503 r->out.cache_name.s=f->cache->cache_name;
2504 r->out.cache_name.count=strlen(r->out.cache_name.s)+1;
2505 DEBUG(5,("%s: writing cache name: %s\n",__LOCATION__, f->cache->cache_name));
2506 /* todo: what about tiny files, buffer to small, don't validate tiny files <1K */
2509 /* assert: this must only be true if we are in a callback */
2510 if (parts->req->async_states->state & NTVFS_ASYNC_STATE_ASYNC) {
2511 /* we are async complete, we need to call the sendfn */
2512 parts->req->async_states->status=status;
2513 DEBUG(5,("Fragments async response sending\n"));
2515 parts->req->async_states->send_fn(parts->req);
2516 return NT_STATUS_OK;
2518 return status;
2521 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2523 struct smbcli_request *c_req = async->c_req;
2524 struct ntvfs_request *req = async->req;
2525 struct proxy_file *f = async->f;
2526 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
2527 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
2528 /* this is the io against which the fragment is to be applied */
2529 struct proxy_validate_parts_parts *parts = talloc_get_type_abort(io1, struct proxy_validate_parts_parts);
2530 struct proxy_Read* r=parts->r;
2531 /* this is the io for the read that issued the callback */
2532 union smb_read *io_frag = fragment->io_frag;
2533 struct async_read_fragments* fragments=fragment->fragments;
2535 /* if request is not already received by a chained handler, read it */
2536 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
2537 DEBUG(5,("%s: status %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
2538 DEBUG(5,("\n\n%s: parts=%p c_req=%p io_frag=%p read %lld\n",__LOCATION__,parts, c_req, io_frag,(long long int)io_frag->generic.out.nread));
2540 fragment->status=status;
2542 if (NT_STATUS_IS_OK(status)) {
2543 /* TODO: If we are not sequentially "next" the queue until we can do it */
2544 /* log this data in r->out.generic.data */
2545 /* Find memcpy window, copy data from the io_frag to the io */
2547 /* Also write validate to cache */
2548 if (f && f->cache) {
2549 cache_save(f->cache, io_frag->generic.out.data, io_frag->generic.out.nread, io_frag->generic.in.offset);
2552 /* extent is the last byte we (don't) read for this frag */
2553 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
2554 /* start_offset is the file offset we first care about */
2555 off_t start_offset=MAX(io_frag->generic.in.offset, r->in.offset);
2556 /* Don't want to go past mincnt cos we don't have the buffer */
2557 off_t io_extent=r->in.offset + r->in.mincnt;
2558 off_t end_offset=MIN(io_extent, extent);
2560 /* ASSERT(start_offset <= end_offset) */
2561 /* ASSERT(start_offset <= io_extent) */
2562 /* Don't copy beyond buffer */
2563 if (! (start_offset >= io_extent)) {
2564 uint8_t* dst=r->out.response.generic.data + (start_offset - r->in.offset);
2565 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
2566 /* src == dst in cases where we did not latch onto someone elses
2567 read, but are handling our own */
2568 if (src != dst)
2569 memcpy(dst, src, end_offset - start_offset);
2570 r->out.nread=end_offset - r->in.offset;
2571 DEBUG(5,("%s: nread %lld ++++++++++++++++++\n", __LOCATION__,(long long int)r->out.nread));
2574 MD5Update(&parts->context, io_frag->generic.out.data,
2575 io_frag->generic.out.nread);
2577 parts->fragments->status=status;
2578 status=proxy_validate_parts(ntvfs, parts);
2579 } else {
2580 parts->fragments->status=status;
2583 DLIST_REMOVE(fragments->fragments, fragment);
2584 /* this will free the io_frag too */
2585 talloc_free(fragment);
2587 if (parts->complete || NT_STATUS_IS_ERR(status)) {
2588 /* this will call sendfn, the chain handler won't know... but
2589 should have no more handlers queued */
2590 return proxy_validate_complete(parts);
2593 return NT_STATUS_OK;
2596 /* continue a read loop, possibly from a callback */
2597 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
2598 struct proxy_validate_parts_parts *parts)
2600 struct proxy_private *private = ntvfs->private_data;
2601 union smb_read *io_frag;
2602 struct async_read_fragment *fragment;
2603 struct smbcli_request *c_req = NULL;
2604 ssize_t size=private->tree->session->transport->negotiate.max_xmit \
2605 - (MIN_SMB_SIZE+32);
2607 /* Have we already read enough? */
2608 if (parts->offset >= (parts->r->in.offset + parts->r->in.maxcnt)) {
2609 parts->complete=true;
2610 return NT_STATUS_OK;
2613 size=MIN(size, parts->remaining);
2615 fragment=talloc_zero(parts->fragments, struct async_read_fragment);
2616 NT_STATUS_HAVE_NO_MEMORY(fragment);
2618 io_frag = talloc_zero(fragment, union smb_read);
2619 NT_STATUS_HAVE_NO_MEMORY(io_frag);
2621 io_frag->generic.out.data = talloc_size(io_frag, size);
2622 NT_STATUS_HAVE_NO_MEMORY(io_frag->generic.out.data);
2624 io_frag->generic.level = RAW_READ_GENERIC;
2625 io_frag->generic.in.file.fnum = parts->r->in.fnum;
2626 io_frag->generic.in.offset = parts->offset;
2627 io_frag->generic.in.mincnt = size;
2628 io_frag->generic.in.maxcnt = size;
2629 io_frag->generic.in.remaining = 0;
2630 #warning maybe true is more permissive?
2631 io_frag->generic.in.read_for_execute = false;
2633 DEBUG(5,("%s: issue part read offset=%lld, size=%lld,%lld\n",__LOCATION__,
2634 (long long int)io_frag->generic.in.offset,
2635 (long long int)io_frag->generic.in.mincnt,
2636 (long long int)io_frag->generic.in.maxcnt));
2638 //c_req = smb_raw_read_send(ntvfs, io_frag, parts->f, parts->r);
2639 c_req = smb_raw_read_send(private->tree, io_frag);
2640 NT_STATUS_HAVE_NO_MEMORY(c_req);
2642 parts->offset+=size;
2643 parts->remaining-=size;
2644 fragment->c_req = c_req;
2645 fragment->io_frag = io_frag;
2646 fragment->fragments=parts->fragments;
2647 DLIST_ADD(parts->fragments->fragments, fragment);
2649 { void* req=NULL;
2650 ADD_ASYNC_RECV_TAIL(c_req, parts, fragment, parts->f, async_proxy_validate_parts, NT_STATUS_INTERNAL_ERROR);
2651 ASYNC_RECV_TAIL_F_ORPHAN(io_frag, async_read_handler, parts->f, c_req->async.private, NT_STATUS_UNSUCCESSFUL);
2654 DEBUG(5,("%s: issued read parts=%p c_req=%p io_frag=%p\n",__LOCATION__,parts, c_req, io_frag));
2656 return NT_STATUS_OK;
2660 read from a file
2662 static NTSTATUS proxy_read(struct ntvfs_module_context *ntvfs,
2663 struct ntvfs_request *req, union smb_read *io)
2665 struct proxy_private *private = ntvfs->private_data;
2666 struct smbcli_request *c_req;
2667 struct proxy_file *f;
2668 struct async_read_fragments *fragments=NULL;
2669 /* how much of read-from-cache is certainly valid */
2670 ssize_t valid=0;
2671 off_t offset=io->generic.in.offset+valid;
2672 off_t limit=io->generic.in.offset+io->generic.in.mincnt;
2674 SETUP_PID;
2676 if (io->generic.level != RAW_READ_GENERIC &&
2677 private->map_generic) {
2678 return ntvfs_map_read(ntvfs, req, io);
2681 SETUP_FILE_HERE(f);
2683 DEBUG(3,("\n%s() fnum=%d offset=%lld, mincnt=%d, maxcnt=%d\n",__FUNCTION__,
2684 io->generic.in.file.fnum,
2685 io->generic.in.offset,
2686 io->generic.in.mincnt,
2687 io->generic.in.maxcnt));
2689 io->generic.out.nread=0;
2691 /* if we have oplocks and know the files size, don't even ask the server
2692 for more */
2693 if (f->oplock) {
2694 if (io->generic.in.offset >= f->metadata->info_data.size) {
2695 io->generic.in.mincnt=0;
2696 io->generic.in.maxcnt=0;
2697 io->generic.out.nread=0;
2698 DEBUG(5,("Reading beyond known length %lld; return 0\n",(long long)f->metadata->info_data.size));
2699 return NT_STATUS_OK;
2700 } else {
2701 io->generic.in.mincnt=MIN(io->generic.in.mincnt,
2702 f->metadata->info_data.size - io->generic.in.offset);
2703 io->generic.in.maxcnt=MIN(io->generic.in.maxcnt,
2704 f->metadata->info_data.size - io->generic.in.offset);
2706 DEBUG(5,("Oplock and known size, limiting read to %lld (s=%d)\n",
2707 f->metadata->info_data.size, io->generic.in.mincnt));
2711 /* attempt to read from cache. if nread becomes non-zero then we
2712 have cache to validate. Instead of returning "valid" value, cache_read
2713 should probably return an async_read_fragment structure */
2715 if (private->cache_enabled) {
2716 NTSTATUS status=cache_smb_raw_read(f->cache, ntvfs, req, io, &valid);
2718 if (NT_STATUS_IS_OK(status)) {
2719 /* if we read enough valid data, return it */
2720 if (valid > 0 && valid>=io->generic.in.mincnt) {
2721 /* valid will not be bigger than maxcnt */
2722 io->generic.out.nread=valid;
2723 DEBUG(1,("Read from cache offset=%d size=%d\n",
2724 (int)(io->generic.in.offset),
2725 (int)(io->generic.out.nread)) );
2726 return status;
2729 DEBUG(5,("Cache read status: %s\n",get_friendly_nt_error_msg (status)));
2732 fragments=talloc_zero(req, struct async_read_fragments);
2733 fragments->async=!!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC);
2734 /* See if there are pending reads that would satisfy this request
2735 We have a validated read up to io->generic.out.nread. Anything between
2736 this and mincnt MUST be read, but we could first try and attach to
2737 any pending read-ahead on the same file.
2738 If those read-aheads fail we will re-issue a regular read from the
2739 callback handler and hope it hasn't taken too long. */
2741 /* offset is the extentof the file from which we still need to find
2742 matching read-requests. */
2743 offset=io->generic.in.offset+valid;
2744 /* limit is the byte beyond the last byte for which we need a request.
2745 This used to be mincnt, but is now maxcnt to cope with validate reads.
2746 Maybe we can switch back to mincnt when proxy_read struct is used
2747 instead of smb_read.
2749 limit=io->generic.in.offset+io->generic.in.maxcnt;
2751 while (offset < limit) {
2752 /* Should look for the read-ahead with offset <= in.offset+out.nread
2753 with the longest span, but there is only likely to be one anyway so
2754 just take the first */
2755 struct async_info* pending=private->pending;
2756 union smb_read *readahead_io=NULL;
2757 DEBUG(5,("Looping reads from offset=%lld, end=%lld\n",offset,limit));
2758 while(pending) {
2759 if (pending->c_req->async.fn == async_read_handler) {
2760 struct async_info *async=talloc_get_type_abort(pending->c_req->async.private, struct async_info);
2761 readahead_io=talloc_get_type_abort(async->parms, union smb_read);
2763 if (readahead_io->generic.in.file.fnum == io->generic.in.file.fnum &&
2764 readahead_io->generic.in.offset <= offset &&
2765 readahead_io->generic.in.offset +
2766 readahead_io->generic.in.mincnt > offset) break;
2768 readahead_io=NULL;
2769 pending=pending->next;
2771 /* ASSERT(readahead_io == pending->c_req->async.params) */
2772 if (pending && readahead_io) {
2773 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
2774 fragment->fragments=fragments;
2775 fragment->io_frag=readahead_io;
2776 fragment->c_req = pending->c_req;
2777 /* we found one, so attach to it. We DO need a talloc_reference
2778 because the original send_fn might be called before ALL chained
2779 handlers, and our handler will call its own send_fn first. ugh.
2780 Maybe we need to seperate reverse-mapping callbacks with data users? */
2781 /* Note: the read-ahead io is passed as io, and our req io is
2782 in io_frag->io */
2783 //talloc_reference(req, pending->req);
2784 DEBUG(5,("Attach to read for offset=%lld length=%d\n",
2785 readahead_io->generic.in.offset,
2786 readahead_io->generic.in.mincnt));
2787 ADD_ASYNC_RECV_TAIL(pending->c_req, io, fragment, f,
2788 async_read_fragment, NT_STATUS_INTERNAL_ERROR);
2789 DEBUG(5,("Attached OK\n"));
2790 #warning we don't want to return if we fail to attach, just break
2791 DLIST_ADD(fragments->fragments, fragment);
2792 /* updated offset for which we have reads */
2793 offset=readahead_io->generic.in.offset + readahead_io->generic.in.mincnt;
2794 } else {
2795 /* there are no pending reads to fill this so issue one up to
2796 the maximum supported read size. We could see when the next
2797 pending read is (if any) and only read up till there... later...
2798 Issue a fragment request for what is left, clone io.
2799 In the case that there were no fragments this will be the orginal read
2800 but with a cloned io struct */
2801 off_t next_offset;
2802 struct proxy_Read *r=NULL; /* used only for VALIDATE promotion */
2803 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
2804 union smb_read *io_frag=talloc_memdup_type(req, io, union smb_read);
2805 ssize_t offset_inc=offset-io_frag->generic.in.offset;
2806 /* 250 is a guess at ndr rpc overheads */
2807 ssize_t readsize=MIN(PROXY_NTIOCTL_MAXDATA,
2808 private->tree->session->transport->negotiate.max_xmit) \
2809 - (MIN_SMB_SIZE+32);
2810 if (readsize > 0xFFFF) readsize = 0xFFFF; /* - (MIN_SMB_SIZE+250) ?? */
2811 readsize=MIN(limit-offset, readsize);
2813 DEBUG(5,("Issuing direct read\n"));
2814 /* reduce the cached read (if any). nread is unsigned */
2815 if (io_frag->generic.out.nread > offset_inc) {
2816 io_frag->generic.out.nread-=offset_inc;
2817 /* don't make nread buffer look too big */
2818 if (io_frag->generic.out.nread > readsize)
2819 io_frag->generic.out.nread = readsize;
2820 } else {
2821 io_frag->generic.out.nread=0;
2823 /* adjust the data pointer so we read to the right place */
2824 io_frag->generic.out.data+=offset_inc;
2825 io_frag->generic.in.offset=offset;
2826 io_frag->generic.in.maxcnt=readsize;
2827 /* we don't mind mincnt being smaller if this is the last frag,
2828 but then we can already handle it being bigger but not reached...
2829 The spell would be:
2830 MIN(io_frag->generic.in.mincnt, io_frag->generic.in.maxcnt);
2832 io_frag->generic.in.mincnt=readsize;
2833 fragment->fragments=fragments;
2834 fragment->io_frag=io_frag;
2835 #warning attach to send_fn handler
2836 /* what if someone attaches to us? Our send_fn is called from our
2837 chained handler which will be before their handler and io will
2838 already be freed. We need to keep a reference to the io and the data
2839 but we don't know where it came from in order to take a reference.
2840 We need therefore to tackle calling of send_fn AFTER all other handlers */
2842 /* Calculate next offset (in advance) */
2843 next_offset=io_frag->generic.in.offset + io_frag->generic.in.mincnt;
2845 /* if we are (going to be) the last fragment and we are in VALIDATE
2846 mode, see if we can do a bulk validate now.
2847 io->generic.in.mincnt == io->generic.in.maxcnt is to make sure we
2848 don't do a validate on a receive validate read
2850 if (private->cache_validatesize && PROXY_REMOTE_SERVER(private) &&
2851 next_offset >= limit && (f->cache && f->cache->status & CACHE_VALIDATE)) {
2852 ssize_t length=private->cache_validatesize;
2853 declare_checksum(digest);
2855 DEBUG(5,("last read, maybe mega validate: frag length %zu, offset %llu\n",
2856 length, (unsigned long long) offset));
2857 NTSTATUS status=cache_smb_raw_checksum(f->cache, offset, &length, digest);
2858 /* no point in doing it if md5'd length < current out.nread
2859 remember: out.data contains this requests cached response
2860 if validate succeeds */
2861 if (NT_STATUS_IS_OK(status) && (length > io_frag->generic.out.nread)) {
2862 /* upgrade the read, allocate the proxy_read struct here
2863 and fill in the extras, no more out-of-band stuff */
2864 DEBUG(5,("%s: Promoting to validate read: %lld\n",__FUNCTION__,(long long) length));
2865 dump_data (5, digest, sizeof(digest));
2867 r=talloc_zero(io_frag, struct proxy_Read);
2868 memcpy(r->in.digest.digest, digest, sizeof(digest));
2869 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE;
2870 io_frag->generic.in.maxcnt = length;
2871 r->in.mincnt=io_frag->generic.in.mincnt;
2872 /* the proxy send function will calculate the checksum based on *data */
2873 } else {
2874 /* try bulk read */
2875 if (f->oplock) {
2876 DEBUG(5,("%s: *** faking bulkd read\n\n",__LOCATION__));
2877 r=talloc_zero(io_frag, struct proxy_Read);
2878 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE;//| PROXY_USE_ZLIB;
2879 io_frag->generic.in.maxcnt = MIN(f->metadata->info_data.size, private->cache_validatesize);
2880 r->in.mincnt=io_frag->generic.in.maxcnt;
2881 r->in.mincnt=io_frag->generic.in.mincnt;
2883 /* not enough in cache to make it worthwhile anymore */
2884 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x len=%lld\n",
2885 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0,
2886 (unsigned long long)length));
2887 //cache_handle_novalidate(f);
2888 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x\n",
2889 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0));
2891 } else {
2892 if (f->cache && f->cache->status & CACHE_VALIDATE) {
2893 DEBUG(5,(">>>Not last frag, no validate read: %lld %lld\n",
2894 (long long) next_offset,
2895 (long long) limit));
2899 DEBUG(5,("Frag read sending offset=%lld min=%d, size=%d\n",
2900 io_frag->generic.in.offset,io_frag->generic.in.mincnt,
2901 io_frag->generic.in.maxcnt));
2902 c_req = proxy_smb_raw_read_send(ntvfs, io_frag, f, r);
2903 DEBUG(5,("Frag read sent offset=%lld size=%d MID=%d\n",
2904 io_frag->generic.in.offset,io_frag->generic.in.maxcnt,c_req->mid));
2905 fragment->c_req=c_req;
2906 DLIST_ADD(fragments->fragments, fragment);
2907 ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
2908 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_read_fragment, NT_STATUS_INTERNAL_ERROR);
2909 DEBUG(5,("Frag response chained\n"));
2910 /* normally we would only install the chain_handler if we wanted async
2911 response, but as it is the async_read_fragment handler that calls send_fn
2912 based on fragments->async, instead of async_chain_handler, we don't
2913 need to worry about this call completing async'ly while we are
2914 waiting on the other attached calls. Otherwise we would not attach
2915 the async_chain_handler (via async_read_handler) because of the wait
2916 below */
2917 { /* We don't want the chain handler calling send_fn as it is done by the fragment handler */
2918 void* req=NULL;
2919 /* call async_chain_hander not read handler so that folk can't
2920 attach to it, till we solve the problem above */
2921 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
2923 offset = next_offset;
2925 DEBUG(5,("Next fragment\n"));
2928 /* do we still need a final fragment? Issue a read */
2930 DEBUG(5,("No frags left to read\n"));
2933 /* issue new round of read-aheads */
2934 DEBUG(5,("== Read aheads asread-%d\n",io->generic.in.mincnt));
2935 if (f->cache && ! (f->cache->status & CACHE_VALIDATE)) read_ahead(f, ntvfs, io, io->generic.in.mincnt);
2936 DEBUG(5,("== Done Read aheads\n"));
2938 /* If we have fragments but we are not called async, we must sync-wait on them */
2939 /* did we map the entire request to pending reads? */
2940 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2941 struct async_read_fragment *fragment;
2942 DEBUG(5,("Sync waiting\n"));
2943 /* fragment get's free'd during the chain_handler so we start at
2944 the top each time */
2945 for (fragment = fragments->fragments; fragment; fragment = fragments->fragments) {
2946 /* Any fragments async handled while we sync-wait on one
2947 will remove themselves from the list and not get sync waited */
2948 sync_chain_handler(fragment->c_req);
2949 /* if we have a non-ok result AND we know we have all the responses
2950 up to extent, then we could quit the loop early and change the
2951 fragments->async to true so the final irrelevant responses would
2952 come async and we could send our response now - but we don't
2953 track that detail until we have cache-maps that we can use to
2954 track the responded fragments and combine responsed linear extents
2955 if (! NT_STATUS_IS_OK(fragments->status) && xxx ) */
2957 DEBUG(5,("Sync return of proxy_read: %s\n",get_friendly_nt_error_msg (fragments->status)));
2958 return fragments->status;
2961 DEBUG(5,("Async returning\n"));
2962 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
2963 return NT_STATUS_OK;
2967 a handler to de-fragment async write replies back to one request.
2968 Can cope with out-of-order async responses by waiting for all responses
2969 on an NT_STATUS_OK case so that nwritten is properly adjusted
2971 NTSTATUS async_write_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2973 struct smbcli_request *c_req = async->c_req;
2974 struct ntvfs_request *req = async->req;
2975 struct proxy_file *f=async->f;
2976 struct async_write_fragment* fragment=talloc_get_type_abort(io2, struct async_write_fragment);
2977 /* this is the io against which the fragment is to be applied */
2978 union smb_write *io = talloc_get_type_abort(io1, union smb_write);
2979 /* this is the io for the write that issued the callback */
2980 union smb_write *io_frag = fragment->io_frag; /* async->parms; */
2981 struct async_write_fragments* fragments=fragment->fragments;
2982 ssize_t extent=0;
2984 /* if request is not already received by a chained handler, read it */
2985 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
2986 if (c_req) status=smb_raw_write_recv(c_req, io_frag);
2988 DEBUG(3,("%s async_write status: %s\n",__FUNCTION__,
2989 get_friendly_nt_error_msg(status)));
2991 fragment->status = status;
2993 DLIST_REMOVE(fragments->fragments, fragment);
2995 /* did this one fail? */
2996 if (! NT_STATUS_IS_OK(fragment->status)) {
2997 if (NT_STATUS_IS_OK(fragments->status)) {
2998 fragments->status=fragment->status;
3000 } else {
3001 /* No fragments have yet failed, keep collecting responses */
3002 extent = io_frag->generic.in.offset + io_frag->generic.out.nwritten;
3004 /* we broke up the write so it could all be written. If only some has
3005 been written of this block, and then some of then next block,
3006 it could leave unwritten holes! We will only acknowledge up to the
3007 first partial write, and let the client deal with it.
3008 If server can return NT_STATUS_OK for a partial write so can we */
3009 if (io_frag->generic.out.nwritten != io_frag->generic.in.count) {
3010 DEBUG(4,("Fragmented write only partially successful\n"));
3012 /* Shrink the master nwritten */
3013 if ( ! fragments->partial ||
3014 (io->generic.in.offset + io->generic.out.nwritten) > extent) {
3015 io->generic.out.nwritten = extent - io->generic.in.offset;
3017 /* stop any further successes from extended the partial write */
3018 fragments->partial=true;
3019 } else {
3020 /* only grow the master nwritten if we haven't logged a partial write */
3021 if (! fragments->partial &&
3022 (io->generic.in.offset + io->generic.out.nwritten) < extent ) {
3023 io->generic.out.nwritten = extent - io->generic.in.offset;
3028 /* if this was the last fragment, clean up */
3029 if (! fragments->fragments) {
3030 DEBUG(5,("Async write re-fragmented with %d of %d\n",
3031 io->generic.out.nwritten,
3032 io->generic.in.count));
3033 if (NT_STATUS_IS_OK(fragments->status)) {
3034 cache_handle_save(f, io->generic.in.data, io->generic.out.nwritten,
3035 io->generic.in.offset);
3036 if (f->metadata->info_data.size < io->generic.in.offset+io->generic.in.count) {
3037 f->metadata->info_data.size=io->generic.in.offset+io->generic.in.count;
3040 if (fragments->async) {
3041 req->async_states->status=fragments->status;
3042 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
3043 req->async_states->send_fn(req);
3044 DEBUG(5,("Async response sent\n"));
3045 } else {
3046 DEBUG(5,("Fragments SYNC return\n"));
3050 return status;
3054 a handler for async write replies
3056 NTSTATUS async_write_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
3058 struct smbcli_request *c_req = async->c_req;
3059 struct ntvfs_request *req = async->req;
3060 struct proxy_file *f=async->f;
3061 union smb_write *io=async->parms;
3063 if (c_req)
3064 status = smb_raw_write_recv(c_req, async->parms);
3066 cache_handle_save(f, io->generic.in.data,
3067 io->generic.out.nwritten,
3068 io->generic.in.offset);
3070 return status;
3074 write to a file
3076 static NTSTATUS proxy_write(struct ntvfs_module_context *ntvfs,
3077 struct ntvfs_request *req, union smb_write *io)
3079 struct proxy_private *private = ntvfs->private_data;
3080 struct smbcli_request *c_req;
3081 struct proxy_file *f;
3083 SETUP_PID;
3085 if (io->generic.level != RAW_WRITE_GENERIC &&
3086 private->map_generic) {
3087 return ntvfs_map_write(ntvfs, req, io);
3089 SETUP_FILE_HERE(f);
3091 DEBUG(5,("proxy_write offset=%lld size=%d\n",io->generic.in.offset, io->generic.in.count));
3092 #warning ERROR get rid of this
3093 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3094 NTSTATUS status;
3095 if (PROXY_REMOTE_SERVER(private)) {
3096 /* Do a proxy write */
3097 status=proxy_smb_raw_write(ntvfs, io, f);
3098 } else if (io->generic.in.count >
3099 private->tree->session->transport->negotiate.max_xmit) {
3101 /* smbcli_write can deal with large writes, which are bigger than
3102 tree->session->transport->negotiate.max_xmit */
3103 ssize_t size=smbcli_write(private->tree,
3104 io->generic.in.file.fnum,
3105 io->generic.in.wmode,
3106 io->generic.in.data,
3107 io->generic.in.offset,
3108 io->generic.in.count);
3110 if (size==io->generic.in.count || size > 0) {
3111 io->generic.out.nwritten=size;
3112 status=NT_STATUS_OK;
3113 } else {
3114 status=NT_STATUS_UNSUCCESSFUL;
3116 } else {
3117 status=smb_raw_write(private->tree, io);
3120 /* Save write in cache */
3121 if (NT_STATUS_IS_OK(status)) {
3122 cache_handle_save(f, io->generic.in.data,
3123 io->generic.out.nwritten,
3124 io->generic.in.offset);
3125 if (f->metadata->info_data.size <
3126 io->generic.in.offset+io->generic.in.count) {
3127 f->metadata->info_data.size=io->generic.in.offset+io->generic.in.count;
3131 return status;
3134 /* smb_raw_write_send can't deal with large writes, which are bigger than
3135 tree->session->transport->negotiate.max_xmit so we have to break it up
3136 trying to preserve the async nature of the call as much as possible */
3137 if (PROXY_REMOTE_SERVER(private)) {
3138 DEBUG(5,("== %s call proxy_smb_raw_write_send\n",__FUNCTION__));
3139 c_req = proxy_smb_raw_write_send(ntvfs, io, f);
3140 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3141 } else if (io->generic.in.count <=
3142 private->tree->session->transport->negotiate.max_xmit) {
3143 DEBUG(5,("== %s call smb_raw_write_send\n",__FUNCTION__));
3144 c_req = smb_raw_write_send(private->tree, io);
3145 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3146 } else {
3147 ssize_t remaining = io->generic.in.count;
3148 #warning Need an audit of these magin numbers MIN_SMB_SIZE+32
3149 int block = (private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32));
3150 int done = 0;
3151 struct async_write_fragments *fragments = talloc_zero(req, struct async_write_fragments);
3153 DEBUG(3,("== %s Client sending too-big write sized %d, negotiated limit %d\n",
3154 __FUNCTION__, io->generic.in.count,
3155 private->tree->session->transport->negotiate.max_xmit));
3157 fragments->io = io;
3158 io->generic.out.nwritten=0;
3159 io->generic.out.remaining=0;
3161 do {
3162 union smb_write *io_frag = talloc_zero(fragments, union smb_write);
3163 struct async_write_fragment *fragment = talloc_zero(fragments, struct async_write_fragment);
3164 ssize_t size = MIN(block, remaining);
3166 fragment->fragments = fragments;
3167 fragment->io_frag = io_frag;
3169 io_frag->generic.level = io->generic.level;
3170 io_frag->generic.in.file.fnum = io->generic.in.file.fnum;
3171 io_frag->generic.in.wmode = io->generic.in.wmode;
3172 io_frag->generic.in.count = size;
3173 io_frag->generic.in.offset = io->generic.in.offset + done;
3174 io_frag->generic.in.data = io->generic.in.data + done;
3176 c_req = proxy_smb_raw_write_send(ntvfs, io_frag, f);
3177 if (! c_req) {
3178 /* let pending requests clean-up when ready */
3179 fragments->status=NT_STATUS_UNSUCCESSFUL;
3180 talloc_steal(NULL, fragments);
3181 DEBUG(3,("Can't send request fragment\n"));
3182 return NT_STATUS_UNSUCCESSFUL;
3185 DEBUG(5,("Frag write sent offset=%lld size=%d MID=%d\n",
3186 io_frag->generic.in.offset,io_frag->generic.in.count,c_req->mid));
3187 fragment->c_req=c_req;
3188 DLIST_ADD(fragments->fragments, fragment);
3190 // ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3191 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_write_fragment, NT_STATUS_INTERNAL_ERROR);
3192 DEBUG(5,("Frag response chained\n"));
3194 remaining -= size;
3195 done += size;
3196 } while(remaining > 0);
3198 /* this strategy has the callback chain attached to each c_req, so we
3199 don't use the ASYNC_RECV_TAIL* to install a general one */
3202 ASYNC_RECV_TAIL_HANDLER(io, async_chain_handler);
3206 a handler for async seek replies
3208 static void async_seek(struct smbcli_request *c_req)
3210 struct async_info *async = c_req->async.private;
3211 struct ntvfs_request *req = async->req;
3212 req->async_states->status = smb_raw_seek_recv(c_req, async->parms);
3213 talloc_free(async);
3214 req->async_states->send_fn(req);
3218 seek in a file
3220 static NTSTATUS proxy_seek(struct ntvfs_module_context *ntvfs,
3221 struct ntvfs_request *req,
3222 union smb_seek *io)
3224 struct proxy_private *private = ntvfs->private_data;
3225 struct smbcli_request *c_req;
3227 SETUP_PID_AND_FILE;
3229 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3230 return smb_raw_seek(private->tree, io);
3233 c_req = smb_raw_seek_send(private->tree, io);
3235 ASYNC_RECV_TAIL(io, async_seek);
3239 flush a file
3241 static NTSTATUS proxy_flush(struct ntvfs_module_context *ntvfs,
3242 struct ntvfs_request *req,
3243 union smb_flush *io)
3245 struct proxy_private *private = ntvfs->private_data;
3246 struct smbcli_request *c_req;
3248 SETUP_PID;
3249 switch (io->generic.level) {
3250 case RAW_FLUSH_FLUSH:
3251 SETUP_FILE;
3252 break;
3253 case RAW_FLUSH_ALL:
3254 io->generic.in.file.fnum = 0xFFFF;
3255 break;
3256 case RAW_FLUSH_SMB2:
3257 return NT_STATUS_INVALID_LEVEL;
3260 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3261 return smb_raw_flush(private->tree, io);
3264 c_req = smb_raw_flush_send(private->tree, io);
3266 SIMPLE_ASYNC_TAIL;
3270 close a file
3272 static NTSTATUS proxy_close(struct ntvfs_module_context *ntvfs,
3273 struct ntvfs_request *req, union smb_close *io)
3275 struct proxy_private *private = ntvfs->private_data;
3276 struct smbcli_request *c_req;
3277 struct proxy_file *f;
3278 union smb_close io2;
3280 SETUP_PID;
3282 if (io->generic.level != RAW_CLOSE_GENERIC &&
3283 private->map_generic) {
3284 return ntvfs_map_close(ntvfs, req, io);
3286 SETUP_FILE_HERE(f);
3287 /* Note, we aren't free-ing f, or it's h here. Should we?
3288 even if file-close fails, we'll remove it from the list,
3289 what else would we do? Maybe we should not remove until
3290 after the proxied call completes? */
3291 DLIST_REMOVE(private->files, f);
3293 /* Don't send the close on cloned handles unless we are the last one */
3294 if (f->metadata && --(f->metadata->count)) {
3295 DEBUG(5,("%s: Fake close of %d, %d left\n",__FUNCTION__,f->fnum, f->metadata->count));
3296 return NT_STATUS_OK;
3298 DEBUG(5,("%s: Real close of %d\n",__FUNCTION__, f->fnum));
3299 /* only close the cache if we aren't keeping references */
3300 //cache_close(f->cache);
3302 /* possibly samba can't do RAW_CLOSE_SEND yet */
3303 if (! (c_req = smb_raw_close_send(private->tree, io))) {
3304 if (io->generic.level == RAW_CLOSE_GENERIC) {
3305 ZERO_STRUCT(io2);
3306 io2.close.level = RAW_CLOSE_CLOSE;
3307 io2.close.in.file = io->generic.in.file;
3308 io2.close.in.write_time = io->generic.in.write_time;
3309 io = &io2;
3311 c_req = smb_raw_close_send(private->tree, io);
3312 /* destroy handle */
3313 ntvfs_handle_remove_backend_data(f->h, ntvfs);
3316 /* If it is read-only, don't bother waiting for the result */
3317 if (f->can_clone) {
3318 DEBUG(5,("%s: not waiting for close response fnum=%d\n",__FUNCTION__,f->fnum));
3319 return NT_STATUS_OK;
3322 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3323 return smbcli_request_simple_recv(c_req);
3326 SIMPLE_ASYNC_TAIL;
3330 exit - closing files open by the pid
3332 static NTSTATUS proxy_exit(struct ntvfs_module_context *ntvfs,
3333 struct ntvfs_request *req)
3335 struct proxy_private *private = ntvfs->private_data;
3336 struct smbcli_request *c_req;
3338 SETUP_PID;
3340 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3341 return smb_raw_exit(private->tree->session);
3344 c_req = smb_raw_exit_send(private->tree->session);
3346 SIMPLE_ASYNC_TAIL;
3350 logoff - closing files open by the user
3352 static NTSTATUS proxy_logoff(struct ntvfs_module_context *ntvfs,
3353 struct ntvfs_request *req)
3355 /* we can't do this right in the proxy backend .... */
3356 return NT_STATUS_OK;
3360 setup for an async call - nothing to do yet
3362 static NTSTATUS proxy_async_setup(struct ntvfs_module_context *ntvfs,
3363 struct ntvfs_request *req,
3364 void *private)
3366 return NT_STATUS_OK;
3370 cancel an async call
3372 static NTSTATUS proxy_cancel(struct ntvfs_module_context *ntvfs,
3373 struct ntvfs_request *req)
3375 struct proxy_private *private = ntvfs->private_data;
3376 struct async_info *a;
3378 /* find the matching request */
3379 for (a=private->pending;a;a=a->next) {
3380 if (a->req == req) {
3381 break;
3385 if (a == NULL) {
3386 return NT_STATUS_INVALID_PARAMETER;
3389 return smb_raw_ntcancel(a->c_req);
3393 lock a byte range
3395 static NTSTATUS proxy_lock(struct ntvfs_module_context *ntvfs,
3396 struct ntvfs_request *req, union smb_lock *io)
3398 struct proxy_private *private = ntvfs->private_data;
3399 struct smbcli_request *c_req;
3401 SETUP_PID;
3403 if (io->generic.level != RAW_LOCK_GENERIC &&
3404 private->map_generic) {
3405 return ntvfs_map_lock(ntvfs, req, io);
3407 SETUP_FILE;
3409 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3410 return smb_raw_lock(private->tree, io);
3413 c_req = smb_raw_lock_send(private->tree, io);
3414 SIMPLE_ASYNC_TAIL;
3418 set info on a open file
3420 static NTSTATUS proxy_setfileinfo(struct ntvfs_module_context *ntvfs,
3421 struct ntvfs_request *req,
3422 union smb_setfileinfo *io)
3424 struct proxy_private *private = ntvfs->private_data;
3425 struct smbcli_request *c_req;
3427 SETUP_PID_AND_FILE;
3429 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3430 return smb_raw_setfileinfo(private->tree, io);
3432 c_req = smb_raw_setfileinfo_send(private->tree, io);
3434 SIMPLE_ASYNC_TAIL;
3439 a handler for async fsinfo replies
3441 static void async_fsinfo(struct smbcli_request *c_req)
3443 struct async_info *async = c_req->async.private;
3444 struct ntvfs_request *req = async->req;
3445 union smb_fsinfo *fs = async->parms;
3446 struct proxy_private *private = async->proxy;
3448 req->async_states->status = smb_raw_fsinfo_recv(c_req, req, fs);
3450 if (NT_STATUS_IS_OK(req->async_states->status) && (fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3451 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO)) {
3452 if (! private->fs_attribute_info && (private->fs_attribute_info=talloc_zero(private, struct fs_attribute_info))) {
3453 DEBUG(5,("%s: caching fs_attribute_info\n",__LOCATION__));
3454 private->fs_attribute_info->fs_attr=fs->attribute_info.out.fs_attr;
3455 private->fs_attribute_info->max_file_component_length=fs->attribute_info.out.max_file_component_length;
3456 private->fs_attribute_info->fs_type=talloc_smb_wire_string_dup(private, &(fs->attribute_info.out.fs_type));
3460 talloc_free(async);
3461 req->async_states->send_fn(req);
3465 return filesystem space info
3467 static NTSTATUS proxy_fsinfo(struct ntvfs_module_context *ntvfs,
3468 struct ntvfs_request *req, union smb_fsinfo *fs)
3470 struct proxy_private *private = ntvfs->private_data;
3471 struct smbcli_request *c_req;
3473 SETUP_PID;
3475 DEBUG(5,("%s: level %x\n",__LOCATION__,fs->generic.level));
3476 /* this value is easy to cache */
3477 if ((fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3478 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO) &&
3479 private->fs_attribute_info) {
3480 DEBUG(5,("%s: using cached fsinfo\n",__LOCATION__));
3481 fs->attribute_info.out.fs_attr=private->fs_attribute_info->fs_attr;
3482 fs->attribute_info.out.max_file_component_length=private->fs_attribute_info->max_file_component_length;
3483 fs->attribute_info.out.fs_type=talloc_smb_wire_string_dup(req, &(private->fs_attribute_info->fs_type));
3484 return NT_STATUS_OK;
3487 /* QFS Proxy */
3488 if (fs->generic.level == RAW_QFS_PROXY_INFO) {
3489 fs->proxy_info.out.major_version=1;
3490 fs->proxy_info.out.minor_version=0;
3491 fs->proxy_info.out.capability=0;
3492 return NT_STATUS_OK;
3495 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3496 NTSTATUS status = smb_raw_fsinfo(private->tree, req, fs);
3497 if (NT_STATUS_IS_OK(status) && (fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3498 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO)) {
3499 if (! private->fs_attribute_info && (private->fs_attribute_info=talloc_zero(private, struct fs_attribute_info))) {
3500 DEBUG(5,("%s: caching fs_attribute_info\n",__LOCATION__));
3501 private->fs_attribute_info->fs_attr=fs->attribute_info.out.fs_attr;
3502 private->fs_attribute_info->max_file_component_length=fs->attribute_info.out.max_file_component_length;
3503 private->fs_attribute_info->fs_type=talloc_smb_wire_string_dup(private, &(fs->attribute_info.out.fs_type));
3506 return status;
3508 c_req = smb_raw_fsinfo_send(private->tree, req, fs);
3510 ASYNC_RECV_TAIL(fs, async_fsinfo);
3514 return print queue info
3516 static NTSTATUS proxy_lpq(struct ntvfs_module_context *ntvfs,
3517 struct ntvfs_request *req, union smb_lpq *lpq)
3519 return NT_STATUS_NOT_SUPPORTED;
3523 find_first / find_next caching.
3524 For now, cache based on directory,search_attributes,search_pattern,ea stuff
3525 Consider in response:
3526 * search id
3527 * search count
3528 * end of search
3529 * ea stuff
3532 static union smb_search_data *smb_search_data_dup(void* mem_ctx, const union smb_search_data *file, enum smb_search_data_level data_level) {
3533 union smb_search_data *result;
3534 struct smb_wire_string *name;
3536 result=talloc_zero(mem_ctx, union smb_search_data);
3537 if (! result) {
3538 return result;
3541 *result = *file;
3543 switch(data_level) {
3544 case RAW_SEARCH_DATA_SEARCH:
3545 if (! (result->search.name=talloc_strdup(mem_ctx, file->search.name))) goto error;
3546 break;
3547 case RAW_SEARCH_DATA_STANDARD:
3548 if (sws_dup(result, result->standard.name, file->standard.name)) goto error;
3549 break;
3550 case RAW_SEARCH_DATA_EA_SIZE:
3551 if (sws_dup(result, result->ea_size.name, file->ea_size.name)) goto error;
3552 break;
3553 case RAW_SEARCH_DATA_EA_LIST:
3554 if (sws_dup(result, result->ea_list.name, file->ea_list.name)) goto error;
3555 break;
3556 case RAW_SEARCH_DATA_DIRECTORY_INFO:
3557 if (sws_dup(result, result->directory_info.name, file->directory_info.name)) goto error;
3558 break;
3559 case RAW_SEARCH_DATA_FULL_DIRECTORY_INFO:
3560 if (sws_dup(result, result->full_directory_info.name, file->full_directory_info.name)) goto error;
3561 break;
3562 case RAW_SEARCH_DATA_NAME_INFO:
3563 if (sws_dup(result, result->name_info.name, file->name_info.name)) goto error;
3564 break;
3565 case RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO:
3566 if (sws_dup(result, result->both_directory_info.name, file->both_directory_info.name)) goto error;
3567 if (sws_dup(result, result->both_directory_info.short_name, file->both_directory_info.short_name)) goto error;
3568 break;
3569 case RAW_SEARCH_DATA_ID_FULL_DIRECTORY_INFO:
3570 if (sws_dup(result, result->id_full_directory_info.name, file->id_full_directory_info.name)) goto error;
3571 break;
3572 case RAW_SEARCH_DATA_ID_BOTH_DIRECTORY_INFO:
3573 if (sws_dup(result, result->id_both_directory_info.name, file->id_both_directory_info.name)) goto error;
3574 if (sws_dup(result, result->id_both_directory_info.short_name, file->id_both_directory_info.short_name)) goto error;
3575 break;
3576 case RAW_SEARCH_DATA_UNIX_INFO:
3577 if (! (result->unix_info.name=talloc_strdup(mem_ctx, file->unix_info.name))) goto error;
3578 break;
3579 case RAW_SEARCH_DATA_UNIX_INFO2:
3580 if (sws_dup(result, result->unix_info2.name, file->unix_info2.name)) goto error;
3581 break;
3582 default:
3583 DEBUG(5,("%s: Error can't dup an unknown file data type: %x\n", __LOCATION__, data_level));
3584 goto error;
3586 return result;
3587 error:
3588 talloc_free(result);
3589 return NULL;
3592 /* callback function for search first/next */
3593 static bool find_callback(void *private, const union smb_search_data *file)
3595 struct search_state *state = (struct search_state *)private;
3596 struct search_handle *search_handle = state->search_handle;
3597 bool status;
3599 /* if we have a cache, copy this data */
3600 if (search_handle->cache) {
3601 struct search_cache_item *item = talloc_zero(search_handle->cache, struct search_cache_item);
3602 DEBUG(5,("%s: Copy %p to cache %p\n", __LOCATION__, item, search_handle->cache));
3603 if (item) {
3604 item->data_level=search_handle->data_level;
3605 item->file = smb_search_data_dup(item, file, item->data_level);
3606 if (! item->file) {
3607 talloc_free(item);
3608 item=NULL;
3611 if (item) {
3612 /* optimization to save enumerating the entire list each time, to find the end.
3613 the cached last_item is very short lived, it doesn't matter if something has
3614 been added since, as long as it hasn't been removed */
3615 if (state->last_item) {
3616 DLIST_ADD_END(state->last_item, item, struct search_cache_item*);
3617 } else {
3618 DLIST_ADD_END(search_handle->cache->items, item, struct search_cache_item*);
3620 state->last_item=item;
3621 } else {
3622 DEBUG(5,("%s: Could not add name to search cache %p, invalidating cache\n", __LOCATION__, search_handle->cache));
3623 /* dear me, the whole cache will be invalid if we miss data */
3624 search_handle->cache->status=SEARCH_CACHE_DEAD;
3625 /* remove from the list of caches to use */
3626 DLIST_REMOVE(search_handle->cache->proxy->search_caches, search_handle->cache);
3627 /* Make it feel unwanted */
3628 //if (talloc_unlink(search_handle, search_handle->cache)==0) {
3629 //talloc_free(search_handle->cache);
3631 /* stop us using it for this search too */
3632 search_handle->cache=NULL;
3636 status=state->callback(state->private, file);
3637 if (status) {
3638 state->count++;
3640 return status;
3644 list files in a directory matching a wildcard pattern
3646 static NTSTATUS proxy_search_first(struct ntvfs_module_context *ntvfs,
3647 struct ntvfs_request *req, union smb_search_first *io,
3648 void *search_private,
3649 bool (*callback)(void *, const union smb_search_data *))
3651 struct proxy_private *private = ntvfs->private_data;
3652 struct search_state *state;
3653 struct search_cache *search_cache=NULL;
3654 struct search_cache_key search_cache_key={0};
3655 struct ntvfs_handle *h=NULL;
3656 struct search_handle *s;
3657 uint16_t max_count;
3658 NTSTATUS status;
3660 SETUP_PID;
3662 if (! private->enabled_proxy_search) {
3663 return smb_raw_search_first(private->tree, req, io, search_private, callback);
3665 switch (io->generic.level) {
3666 /* case RAW_SEARCH_DATA_SEARCH:
3667 search_cache_key.search_attrib=io->search_first.in.search_attrib;
3668 search_cache_key.pattern=io->search_first.in.pattern;
3669 max_count = io->search_first.in.max_count;
3670 search_cache = find_search_cache(private->search_cache, &search_cache_key);
3671 break;*/
3672 case RAW_SEARCH_TRANS2:
3673 io->t2ffirst.in.max_count=MIN(io->t2ffirst.in.max_count,100);
3674 max_count = io->t2ffirst.in.max_count;
3676 search_cache_key.level=io->generic.level;
3677 search_cache_key.data_level=io->generic.data_level;
3678 search_cache_key.search_attrib=io->t2ffirst.in.search_attrib;
3679 search_cache_key.pattern=io->t2ffirst.in.pattern;
3680 search_cache_key.flags=io->t2ffirst.in.flags;
3681 search_cache_key.storage_type=io->t2ffirst.in.storage_type;
3682 /* try and find a search cache that is complete */
3683 search_cache = find_search_cache(private->search_caches, &search_cache_key);
3685 /* do handle mapping for TRANS2 */
3686 status = ntvfs_handle_new(ntvfs, req, &h);
3687 NT_STATUS_NOT_OK_RETURN(status);
3689 DEBUG(5,("%s: RAW_SEARCH_TRANS2 %s max count %d, cache=%p level=%x\n",__LOCATION__, search_cache_key.pattern, max_count, search_cache, search_cache_key.data_level));
3690 break;
3691 default: /* won't cache or proxy this */
3692 return smb_raw_search_first(private->tree, req, io, search_private, callback);
3695 /* finish setting up mapped handle */
3696 if (h) {
3697 s = talloc_zero(h, struct search_handle);
3698 NT_STATUS_HAVE_NO_MEMORY(s);
3699 s->proxy=private;
3700 talloc_set_destructor(s, search_handle_destructor);
3701 s->h=h;
3702 s->level=io->generic.level;
3703 s->data_level=io->generic.data_level;
3704 status = ntvfs_handle_set_backend_data(s->h, private->ntvfs, s);
3705 NT_STATUS_NOT_OK_RETURN(status);
3706 DLIST_ADD(private->search_handles, s);
3707 DEBUG(5,("%s: map handle create %d\n",__LOCATION__, smbsrv_fnum(h)));
3710 /* satisfy from cache */
3711 if (search_cache) {
3712 struct search_cache_item* item=search_cache->items;
3713 uint16_t count=0;
3715 /* stop cache going away while we are using it */
3716 s->cache = talloc_reference(s, search_cache);
3717 DEBUG(5,("%s: Serving from cache: %p\n",__LOCATION__, search_cache));
3718 /* Don't offer over the limit, but only count those that were accepted */
3719 DLIST_FIND(search_cache->items, item, !(count < max_count && callback(search_private, item->file) && ++count) );
3720 io->t2ffirst.out.count=count;
3721 s->resume_item=item;
3722 /* just because callback didn't accept any doesn't mean we are finished */
3723 if (item == NULL) {
3724 /* currently only caching for t2ffirst */
3725 io->t2ffirst.out.end_of_search = true;
3726 DEBUG(5,("%s: Serving from cache complete at %d\n", __LOCATION__, count));
3727 } else {
3728 /* count the rest */
3729 io->t2ffirst.out.end_of_search = false;
3730 DEBUG(5,("%s: Serving from cache incomplete at %d\n", __LOCATION__, count));
3731 DLIST_FOR_EACH(item, item, count++);
3732 DEBUG(5,("%s: Serving from cache max_count %d\n", __LOCATION__, count));
3735 if ((io->t2ffirst.out.end_of_search && io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
3736 io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE)
3738 /* destroy handle */
3739 ntvfs_handle_remove_backend_data(h, ntvfs);
3740 io->t2ffirst.out.handle=0;
3741 } else {
3742 /* now map handle */
3743 io->t2ffirst.out.handle=smbsrv_fnum(h);
3745 return NT_STATUS_OK;
3748 state = talloc_zero(req, struct search_state);
3749 NT_STATUS_HAVE_NO_MEMORY(state);
3751 /* if there isn't a matching cache already being generated by another search,
3752 start one, unless FLAG_TRANS2_FIND_BACKUP_INTENT which is always live */
3753 if (!(io->t2ffirst.in.flags & FLAG_TRANS2_FIND_BACKUP_INTENT) &&
3754 find_partial_search_cache(private->search_caches, &search_cache_key) == NULL) {
3755 /* need to opendir the folder being searched so we can get a notification */
3756 struct search_cache *search_cache=NULL;
3758 search_cache=new_search_cache(private, &search_cache_key);
3759 /* Stop cache going away while we are using it */
3760 if (search_cache) {
3761 s->cache=talloc_reference(s, search_cache);
3765 /* stop the handle going away while we are using it */
3766 state->search_handle=talloc_reference(state, s);
3767 state->private=search_private;
3768 state->callback=callback;
3770 status=smb_raw_search_first(private->tree, req, io, state, find_callback);
3771 DEBUG(5,("%s: count from %d to %d\n",__LOCATION__,io->t2ffirst.out.count,state->count));
3773 DEBUG(5,("%s: Done %d %s\n",__LOCATION__, io->t2ffirst.out.count, get_friendly_nt_error_msg (status)));
3775 #warning check NT_STATUS_IS_OK ?
3776 if (io->t2ffirst.out.end_of_search) {
3777 /* cache might have gone away if problem filling */
3778 if (s->cache) {
3779 DEBUG(5,("B\n"));
3780 s->cache->status = SEARCH_CACHE_COMPLETE;
3781 DEBUG(5,("%s: Cache %p filled in first go!\n",__LOCATION__, s->cache));
3784 if ((io->t2ffirst.out.end_of_search && io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
3785 io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE) {
3786 DEBUG(5,("%s: Closing search\n",__LOCATION__));
3787 /* destroy partial cache */
3788 if (s->cache && (io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE) &&
3789 ! io->t2ffirst.out.end_of_search) {
3790 DEBUG(5,("%s: Destroying cache %p\n",__LOCATION__, s->cache));
3791 /* cache is no good now! */
3792 DLIST_REMOVE(private->search_caches, s->cache);
3793 //if (talloc_unlink(s, s->cache)==0) {
3794 //talloc_free(s->cache);
3796 s->cache=NULL;
3798 if (s->cache) {
3799 s->cache->status=SEARCH_CACHE_COMPLETE;
3801 /* Need to deal with the case when the client would not take them all but we still cache them
3802 if (state->count < io->t2ffirst.out.count && io->t2ffirst.out.end_of_search) {
3803 io->t2ffirst.out.end_of_search = false;
3804 //s->resume_item = state->last_item;
3806 /* destroy handle */
3807 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
3808 ntvfs_handle_remove_backend_data(h, ntvfs);
3809 io->t2ffirst.out.handle=0;
3810 } else {
3811 s->handle = io->t2ffirst.out.handle;
3812 io->t2ffirst.out.handle=smbsrv_fnum(h);
3814 io->t2ffirst.out.count=state->count;
3815 return status;
3818 #define DLIST_FIND_NEXT(start, item, test) do {\
3819 DLIST_FIND(start, item, test); \
3820 if (item) (item)=(item)->next; \
3821 } while(0)
3823 /* continue a search */
3824 static NTSTATUS proxy_search_next(struct ntvfs_module_context *ntvfs,
3825 struct ntvfs_request *req, union smb_search_next *io,
3826 void *search_private,
3827 bool (*callback)(void *, const union smb_search_data *))
3829 struct proxy_private *private = ntvfs->private_data;
3830 struct search_state *state;
3831 struct ntvfs_handle *h=NULL;
3832 struct search_handle *s;
3833 const struct search_cache *search_cache=NULL;
3834 struct search_cache_item *start_at=NULL;
3835 uint16_t max_count;
3836 NTSTATUS status;
3838 SETUP_PID;
3840 if (! private->enabled_proxy_search) {
3841 return smb_raw_search_next(private->tree, req, io, search_private, callback);
3843 switch (io->generic.level) {
3844 case RAW_SEARCH_TRANS2:
3845 io->t2fnext.in.max_count=MIN(io->t2fnext.in.max_count,20);
3846 max_count = io->t2fnext.in.max_count;
3848 h = talloc_get_type(ntvfs_find_handle(ntvfs, req, io->t2fnext.in.handle), struct ntvfs_handle);
3849 if (! h) return NT_STATUS_INVALID_HANDLE;
3850 /* convert handle into search_cache */
3851 s=talloc_get_type(ntvfs_handle_get_backend_data(h, ntvfs), struct search_handle);
3852 if (! s) return NT_STATUS_INVALID_HANDLE;
3853 search_cache=s->cache;
3854 DEBUG(5,("%s: RAW_SEARCH_TRANS2 find_next h=%d [real %d] count %d, cache=%p\n",__LOCATION__, io->t2fnext.in.handle, s->handle, max_count, search_cache));
3855 io->t2fnext.in.handle=s->handle;
3856 if (! search_cache) {
3857 break;
3860 /* warning if: uint16_t flags or const char *last_name have changed, abort cache */
3861 /* skip up to resume key */
3862 if (search_cache && search_cache->status == SEARCH_CACHE_COMPLETE) {
3863 DEBUG(5,("%s: seek resume position\n",__LOCATION__));
3864 /* work out where in the cache to continue from */
3865 switch (io->generic.data_level) {
3866 case RAW_SEARCH_DATA_STANDARD:
3867 case RAW_SEARCH_DATA_EA_SIZE:
3868 case RAW_SEARCH_DATA_EA_LIST:
3869 /* have a resume key? */
3870 DEBUG(5,("%s: type %x seek on %x\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.resume_key));
3871 DLIST_FIND_NEXT(search_cache->items, start_at, io->t2fnext.in.resume_key == start_at->file->standard.resume_key);
3872 break;
3873 case RAW_SEARCH_DATA_DIRECTORY_INFO: /* TODO: maybe these should be strcasecmp for some filesystems */
3874 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3875 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->directory_info.name.s)==0);
3876 break;
3877 case RAW_SEARCH_DATA_FULL_DIRECTORY_INFO:
3878 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3879 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->full_directory_info.name.s)==0);
3880 break;
3881 case RAW_SEARCH_DATA_NAME_INFO:
3882 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3883 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->name_info.name.s)==0);
3884 break;
3885 case RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO:
3886 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3887 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->both_directory_info.name.s)==0);
3888 break;
3889 case RAW_SEARCH_DATA_ID_FULL_DIRECTORY_INFO:
3890 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3891 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->id_full_directory_info.name.s)==0);
3892 break;
3893 case RAW_SEARCH_DATA_ID_BOTH_DIRECTORY_INFO:
3894 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3895 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->id_both_directory_info.name.s)==0);
3896 break;
3897 case RAW_SEARCH_DATA_UNIX_INFO:
3898 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3899 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->unix_info.name)==0);
3900 break;
3901 case RAW_SEARCH_DATA_UNIX_INFO2:
3902 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3903 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->unix_info2.name.s)==0);
3904 break;
3905 default:
3906 if (io->t2fnext.in.flags & FLAG_TRANS2_FIND_CONTINUE) {
3907 start_at = s->resume_item;
3908 } else {
3909 DEBUG(5,("%s: HELP! How can we resume?\n",__LOCATION__));
3910 start_at = s->resume_item;
3913 DEBUG(5,("%s: Start at %p\n",__LOCATION__,start_at));
3915 break;
3918 if (! search_cache) {
3919 DEBUG(5,("%s: No cache, pass-through\n",__LOCATION__));
3920 return smb_raw_search_next(private->tree, req, io, search_private, callback);
3922 //#define talloc_reference(ctx, ptr) (_TALLOC_TYPEOF(ptr))_talloc_reference((ctx),(ptr))
3923 //surely should be
3924 //#define talloc_reference(ctx, ptr) _talloc_reference((ctx),(ptr))?(ptr):(NULL) to preserve the type of ptr
3926 /* satisfy from cache */
3927 if (search_cache->status == SEARCH_CACHE_COMPLETE) {
3928 struct search_cache_item* item;
3929 uint16_t count=0;
3930 DEBUG(5,("%s: Serving from cache: %p\n",__LOCATION__, search_cache));
3932 if (! start_at) {
3933 start_at = search_cache->items;
3936 DLIST_FIND(start_at, item, !(count < max_count && callback(search_private, item->file) && ++count) );
3937 io->t2fnext.out.count=count;
3938 s->resume_item=item;
3939 if (item == NULL) {
3940 DEBUG(5,("%s: Serving from cache complete at %d\n", __LOCATION__, count));
3941 io->t2fnext.out.end_of_search = true;
3942 } else {
3943 DEBUG(5,("%s: Serving from cache incomplete at %d\n", __LOCATION__, count));
3944 io->t2fnext.out.end_of_search = false;
3945 /* count the rest */
3946 DLIST_FOR_EACH(item, item, count++);
3947 DEBUG(5,("%s: Serving from cache max_count %d\n", __LOCATION__, count));
3949 /* is it the end? */
3950 if ((io->t2fnext.out.end_of_search && io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
3951 io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE)
3953 /* destroy handle */
3954 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
3955 ntvfs_handle_remove_backend_data(h, ntvfs);
3958 return NT_STATUS_OK;
3961 /* pass-through and fill-cache */
3962 state = talloc_zero(req, struct search_state);
3963 NT_STATUS_HAVE_NO_MEMORY(state);
3965 state->search_handle=talloc_reference(state, s);
3966 state->private=search_private;
3967 state->callback=callback;
3969 status = smb_raw_search_next(private->tree, req, io, state, find_callback);
3970 DEBUG(5,("%s: count from %d to %d\n",__LOCATION__,io->t2fnext.out.count,state->count));
3972 /* if closing, then close */
3973 if ((io->t2fnext.out.end_of_search && io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
3974 io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE)
3976 if (s->cache && (io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE) &&
3977 ! io->t2fnext.out.end_of_search) {
3978 /* partial cache is useless */
3979 DLIST_REMOVE(private->search_caches, s->cache);
3980 //if (talloc_unlink(s, s->cache)==0) {
3981 //talloc_free(s->cache);
3983 s->cache=NULL;
3985 if (s->cache) {
3986 s->cache->status=SEARCH_CACHE_COMPLETE;
3987 /* Need to deal with the case when the client would not take them all but we still cache them
3988 if (state->count < io->t2fnext.out.count && io->t2fnext.out.end_of_search) {
3989 io->t2fnext.out.end_of_search = false;
3992 /* destroy handle */
3993 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
3994 ntvfs_handle_remove_backend_data(h, ntvfs);
3996 io->t2fnext.out.count=state->count;
3998 return status;
4001 /* close a search */
4002 static NTSTATUS proxy_search_close(struct ntvfs_module_context *ntvfs,
4003 struct ntvfs_request *req, union smb_search_close *io)
4005 struct proxy_private *private = ntvfs->private_data;
4006 struct ntvfs_handle *h=NULL;
4007 struct search_handle *s;
4008 NTSTATUS status;
4010 SETUP_PID;
4012 if (! private->enabled_proxy_search) {
4013 return smb_raw_search_close(private->tree, io);
4015 switch (io->generic.level) {
4016 case RAW_SEARCH_TRANS2:
4017 h = talloc_get_type(ntvfs_find_handle(ntvfs, req, io->findclose.in.handle), struct ntvfs_handle);
4018 if (! h) return NT_STATUS_INVALID_HANDLE;
4019 /* convert handle into search_cache */
4020 s=talloc_get_type(ntvfs_handle_get_backend_data(h, ntvfs), struct search_handle);
4021 if (! s) return NT_STATUS_INVALID_HANDLE;
4022 io->findclose.in.handle=s->handle;
4023 default:
4024 return smb_raw_search_close(private->tree, io);
4027 if (! s->cache) {
4028 status = smb_raw_search_close(private->tree, io);
4029 } else {
4030 if (s->cache->status != SEARCH_CACHE_COMPLETE) {
4031 /* cache is useless */
4032 DLIST_REMOVE(private->search_caches, s->cache);
4033 //if (talloc_unlink(s, s->cache)==0) {
4034 //talloc_free(s->cache);
4037 status = NT_STATUS_OK;
4040 s->h=NULL;
4041 ntvfs_handle_remove_backend_data(h, ntvfs);
4042 /* s MAY also be gone at this point, if h was free'd, unless there were
4043 pending responses, in which case they see s->h is NULL as a sign to stop */
4044 return status;
4048 a handler for async trans2 replies
4050 static void async_trans2(struct smbcli_request *c_req)
4052 struct async_info *async = c_req->async.private;
4053 struct ntvfs_request *req = async->req;
4054 req->async_states->status = smb_raw_trans2_recv(c_req, req, async->parms);
4055 talloc_free(async);
4056 req->async_states->send_fn(req);
4059 /* raw trans2 */
4060 static NTSTATUS proxy_trans2(struct ntvfs_module_context *ntvfs,
4061 struct ntvfs_request *req,
4062 struct smb_trans2 *trans2)
4064 struct proxy_private *private = ntvfs->private_data;
4065 struct smbcli_request *c_req;
4067 if (private->map_trans2) {
4068 return NT_STATUS_NOT_IMPLEMENTED;
4071 SETUP_PID;
4072 #warning we should be mapping file handles here
4074 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4075 return smb_raw_trans2(private->tree, req, trans2);
4078 c_req = smb_raw_trans2_send(private->tree, trans2);
4080 ASYNC_RECV_TAIL(trans2, async_trans2);
4084 /* SMBtrans - not used on file shares */
4085 static NTSTATUS proxy_trans(struct ntvfs_module_context *ntvfs,
4086 struct ntvfs_request *req,
4087 struct smb_trans2 *trans2)
4089 return NT_STATUS_ACCESS_DENIED;
4093 a handler for async change notify replies
4095 static void async_changenotify(struct smbcli_request *c_req)
4097 struct async_info *async = c_req->async.private;
4098 struct ntvfs_request *req = async->req;
4099 req->async_states->status = smb_raw_changenotify_recv(c_req, req, async->parms);
4100 talloc_free(async);
4101 req->async_states->send_fn(req);
4104 /* change notify request - always async */
4105 static NTSTATUS proxy_notify(struct ntvfs_module_context *ntvfs,
4106 struct ntvfs_request *req,
4107 union smb_notify *io)
4109 struct proxy_private *private = ntvfs->private_data;
4110 struct smbcli_request *c_req;
4111 int saved_timeout = private->transport->options.request_timeout;
4112 struct proxy_file *f;
4114 if (io->nttrans.level != RAW_NOTIFY_NTTRANS) {
4115 return NT_STATUS_NOT_IMPLEMENTED;
4118 SETUP_PID;
4120 f = ntvfs_handle_get_backend_data(io->nttrans.in.file.ntvfs, ntvfs);
4121 if (!f) return NT_STATUS_INVALID_HANDLE;
4122 io->nttrans.in.file.fnum = f->fnum;
4124 /* this request doesn't make sense unless its async */
4125 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4126 return NT_STATUS_INVALID_PARAMETER;
4129 /* we must not timeout on notify requests - they wait
4130 forever */
4131 private->transport->options.request_timeout = 0;
4133 c_req = smb_raw_changenotify_send(private->tree, io);
4135 private->transport->options.request_timeout = saved_timeout;
4137 ASYNC_RECV_TAIL(io, async_changenotify);
4141 * A hander for converting from rpc struct replies to ntioctl
4143 static NTSTATUS proxy_rpclite_map_async_send(
4144 struct ntvfs_module_context *ntvfs,
4145 struct ntvfs_request *req,
4146 void *io1, void *io2, NTSTATUS status)
4148 union smb_ioctl* io=talloc_get_type_abort(io1, union smb_ioctl);
4149 struct async_rpclite_send *rpclite_send=talloc_get_type_abort(io2, struct async_rpclite_send);
4150 void* r=rpclite_send->struct_ptr;
4151 struct ndr_push* push;
4152 const struct ndr_interface_call* call=rpclite_send->call;
4153 enum ndr_err_code ndr_err;
4154 DATA_BLOB ndr;
4156 talloc_free(rpclite_send);
4158 DEBUG(5,("%s: converting r=%p back to ntiocl\n",__FUNCTION__, r));
4159 push = ndr_push_init_ctx(req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
4160 NT_STATUS_HAVE_NO_MEMORY(push);
4162 if (0) {
4163 push->flags |= LIBNDR_FLAG_BIGENDIAN;
4166 ndr_err = call->ndr_push(push, NDR_OUT, r);
4167 status=ndr_map_error2ntstatus(ndr_err);
4169 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4170 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
4171 nt_errstr(status)));
4172 return status;
4175 ndr=ndr_push_blob(push);
4176 //if (ndr.length > io->ntioctl.in.max_data) {
4177 DEBUG(3,("%s NDR size %d, max_size %d %p\n",__FUNCTION__, ndr.length,
4178 io->ntioctl.in.max_data, ndr.data));
4179 io->ntioctl.out.blob=ndr;
4180 return status;
4184 * A handler for sending async rpclite Read replies that were mapped to union smb_read
4186 static NTSTATUS rpclite_proxy_Read_map_async_send(
4187 struct ntvfs_module_context *ntvfs,
4188 struct ntvfs_request *req,
4189 void *io1, void *io2, NTSTATUS status)
4191 struct proxy_Read* r=talloc_get_type_abort(io1, struct proxy_Read);
4192 union smb_read* io=talloc_get_type_abort(io2, union smb_read);
4194 /* status here is a result of proxy_read, it doesn't reflect the status
4195 of the rpc transport or relates calls, just the read operation */
4196 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
4197 r->out.result=status;
4199 if (! NT_STATUS_IS_OK(status)) {
4200 /* We can't use result as a discriminator in IDL, so nread and flags always exist */
4201 r->out.nread=0;
4202 r->out.flags=0;
4203 } else {
4204 ssize_t size=io->readx.out.nread;
4205 r->out.flags=0;
4206 r->out.nread=io->readx.out.nread;
4208 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE) && io->readx.out.nread>0) {
4209 declare_checksum(digest);
4210 checksum_block(digest, io->readx.out.data, io->readx.out.nread);
4212 DEBUG(5,("New digest for size: %lld\n", (long long) io->readx.out.nread));
4213 dump_data (5, digest, sizeof(digest));
4214 DEBUG(5,("Cached digest\n"));
4215 dump_data (5, r->in.digest.digest, sizeof(digest));
4217 if (memcmp(digest, r->in.digest.digest, sizeof(digest))==0) {
4218 r->out.flags=PROXY_USE_CACHE;
4219 DEBUG(5,("%s: Use cached data len=%lld\n",__FUNCTION__,
4220 (long long)r->out.nread));
4221 if (r->in.flags & PROXY_VALIDATE) {
4222 r->out.flags |= PROXY_VALIDATE;
4223 DEBUG(5,("%s: Use VALIDATED len=%lld, %lld\n",__FUNCTION__,
4224 (long long)r->out.nread, (long long) io->readx.out.nread));
4226 goto done;
4228 DEBUG(5,("Cache does not match\n"));
4231 if (r->in.flags & PROXY_VALIDATE) {
4232 /* validate failed, shrink read to mincnt - so we don't fill link */
4233 r->out.nread=MIN(r->out.nread, r->in.mincnt);
4234 size=r->out.nread;
4235 DEBUG(5,("VALIDATE failed, shrink read of %d from %d to %d\n",
4236 r->in.maxcnt,r->out.nread,MIN(r->out.nread, r->in.mincnt)));
4239 if (r->in.flags & PROXY_USE_ZLIB) {
4240 if (compress_block(io->readx.out.data, &size) ) {
4241 r->out.flags|=PROXY_USE_ZLIB;
4242 r->out.response.compress.count=size;
4243 r->out.response.compress.data=io->readx.out.data;
4244 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
4245 __FUNCTION__,r->out.nread,size,size*100/r->out.nread));
4246 goto done;
4250 DEBUG(5,("%s: Compression not worthwhile\n", __FUNCTION__));
4251 r->out.response.generic.count=io->readx.out.nread;
4252 r->out.response.generic.data=io->readx.out.data;
4255 done:
4257 /* Or should we return NT_STATUS_OK ?*/
4258 DEBUG(5,("Finish %s status %s\n",__FUNCTION__,get_friendly_nt_error_msg(status)));
4260 /* the rpc transport succeeded even if the operation did not */
4261 return NT_STATUS_OK;
4265 * RPC implementation of Read
4267 static NTSTATUS rpclite_proxy_Read(struct ntvfs_module_context *ntvfs,
4268 struct ntvfs_request *req, struct proxy_Read *r)
4270 struct proxy_private *private = ntvfs->private_data;
4271 union smb_read* io=talloc(req, union smb_read);
4272 NTSTATUS status;
4273 struct proxy_file *f;
4274 struct ntvfs_handle *h;
4276 NT_STATUS_HAVE_NO_MEMORY(io);
4278 /* if next hop is a proxy just repeat this call also handle VALIDATE check
4279 that means have own callback handlers too... */
4280 SETUP_PID;
4282 RPCLITE_SETUP_FILE_HERE(f, h);
4284 DEBUG(5,("Opnum: proxy_Read min=%d max=%d offset=%lld, fnum=%d\n",
4285 r->in.mincnt, r->in.maxcnt, r->in.offset, r->in.fnum));
4286 DEBUG(5,("Anticipated digest\n"));
4287 dump_data (5, r->in.digest.digest, sizeof(r->in.digest.digest));
4289 /* If the remove end is a proxy, jusr fixup file handle and passthrough,
4290 but update cache on the way back
4291 if (PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
4294 /* prepare for response */
4295 r->out.response.generic.data=talloc_array(io, uint8_t, r->in.maxcnt);
4296 NT_STATUS_HAVE_NO_MEMORY(r->out.response.generic.data);
4298 if (! PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
4299 return proxy_validate(ntvfs, req, r, f);
4302 /* pack up an smb_read request and dispatch here */
4303 io->readx.level=RAW_READ_READX;
4304 io->readx.in.file.ntvfs=h;
4305 io->readx.in.mincnt=r->in.mincnt;
4306 io->readx.in.maxcnt=r->in.maxcnt;
4307 io->readx.in.offset=r->in.offset;
4308 io->readx.in.remaining=r->in.remaining;
4309 /* and something to hold the answer */
4310 io->readx.out.data=r->out.response.generic.data;
4312 /* so we get to pack the io->*.out response */
4313 status = ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Read_map_async_send);
4314 NT_STATUS_NOT_OK_RETURN(status);
4316 /* so the read will get processed normally */
4317 return proxy_read(ntvfs, req, io);
4321 * A handler for sending async rpclite Write replies
4323 static NTSTATUS rpclite_proxy_Write_map_async_send(
4324 struct ntvfs_module_context *ntvfs,
4325 struct ntvfs_request *req,
4326 void *io1, void *io2, NTSTATUS status)
4328 struct proxy_Write* r=talloc_get_type_abort(io1, struct proxy_Write);
4329 union smb_write* io=talloc_get_type_abort(io2, union smb_write);
4331 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
4332 r->out.result=status;
4334 r->out.nwritten=io->writex.out.nwritten;
4335 r->out.remaining=io->writex.out.remaining;
4337 /* the rpc transport succeeded even if the operation did not */
4338 return NT_STATUS_OK;
4342 * RPC implementation of write
4344 static NTSTATUS rpclite_proxy_Write(struct ntvfs_module_context *ntvfs,
4345 struct ntvfs_request *req, struct proxy_Write *r)
4347 struct proxy_private *private = ntvfs->private_data;
4348 union smb_write* io=talloc(req, union smb_write);
4349 NTSTATUS status;
4350 struct proxy_file* f;
4351 struct ntvfs_handle *h;
4353 SETUP_PID;
4355 RPCLITE_SETUP_FILE_HERE(f,h);
4357 DEBUG(5,("Opnum: proxy_Write count=%d offset=%lld, fnum=%d\n",
4358 r->in.count, r->in.offset, r->in.fnum));
4360 /* pack up an smb_write request and dispatch here */
4361 io->writex.level=RAW_WRITE_WRITEX;
4362 io->writex.in.file.ntvfs=h;
4363 io->writex.in.offset=r->in.offset;
4364 io->writex.in.wmode=r->in.mode;
4365 io->writex.in.count=r->in.count;
4367 /* and the data */
4368 if (PROXY_USE_ZLIB & r->in.flags) {
4369 ssize_t count=r->in.data.generic.count;
4370 io->writex.in.data=uncompress_block_talloc(io, r->in.data.compress.data,
4371 &count, r->in.count);
4372 if (count != r->in.count || !io->writex.in.data) {
4373 /* Didn't uncompress properly, but the RPC layer worked */
4374 r->out.result=NT_STATUS_BAD_COMPRESSION_BUFFER;
4375 return NT_STATUS_OK;
4377 } else {
4378 io->writex.in.data=r->in.data.generic.data;
4381 /* so we get to pack the io->*.out response */
4382 status=ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Write_map_async_send);
4383 NT_STATUS_NOT_OK_RETURN(status);
4385 /* so the read will get processed normally */
4386 return proxy_write(ntvfs, req, io);
4390 * RPC amalgamation of getinfo requests
4392 struct proxy_getinfo_fragments;
4393 struct proxy_getinfo_fragmentses;
4395 /* holds one smbcli_request to satisfy part of one proxy_GetInfo request */
4396 struct proxy_getinfo_fragment {
4397 struct proxy_getinfo_fragment *prev, *next;
4398 struct proxy_getinfo_fragments *fragments;
4399 union smb_fileinfo *smb_fileinfo;
4400 struct smbcli_request *c_req;
4401 NTSTATUS status;
4404 /* holds reference to many fragment smbcli_request that together make up one proxy_GetInfo request */
4405 struct proxy_getinfo_fragments {
4406 struct proxy_getinfo_fragments *prev, *next;
4407 struct proxy_getinfo_fragmentses *fragmentses;
4408 struct proxy_getinfo_fragment *fragments;
4409 uint32_t index;
4412 struct proxy_getinfo_fragmentses {
4413 struct proxy_getinfo_fragments *fragments;
4414 struct proxy_GetInfo *r;
4415 struct ntvfs_request *req;
4416 bool async;
4420 a handler for async write replies
4422 NTSTATUS async_proxy_getinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
4424 struct smbcli_request *c_req = async->c_req;
4425 struct ntvfs_request *req = async->req;
4426 struct proxy_file *f=async->f;
4427 struct proxy_getinfo_fragment *fragment=talloc_get_type_abort(io2, struct proxy_getinfo_fragment);
4428 struct proxy_getinfo_fragments* fragments=fragment->fragments;
4429 struct proxy_getinfo_fragmentses* fragmentses=fragments->fragmentses;
4430 struct proxy_GetInfo *r=talloc_get_type_abort(fragmentses->r, struct proxy_GetInfo);
4431 int c=fragments->index;
4432 struct info_data* d=&(r->out.info_data[c]);
4433 union smb_fileinfo *io=talloc_get_type_abort(io1, union smb_fileinfo);
4435 SMB_ASSERT(c_req == NULL || c_req == fragment->c_req);
4437 if (c_req) {
4438 switch (r->in.info_tags[0].tag_type) {
4439 case TAG_TYPE_FILE_INFO:
4440 status=smb_raw_fileinfo_recv(c_req, r, io);
4441 break;
4442 case TAG_TYPE_PATH_INFO:
4443 status=smb_raw_pathinfo_recv(c_req, r, io);
4444 break;
4445 default:
4446 status=NT_STATUS_INVALID_PARAMETER;
4448 c_req=NULL;
4451 /* stop callback occuring more than once sync'ly */
4452 fragment->c_req=NULL;
4454 DEBUG(5,("%s: async callback level %x %s\n",__FUNCTION__,io->generic.level, get_friendly_nt_error_msg (status)));
4455 switch (io->generic.level) {
4456 case RAW_FILEINFO_ALL_INFO:
4457 case RAW_FILEINFO_ALL_INFORMATION:
4458 DEBUG(5,("%s: async callback level RAW_FILEINFO_ALL_INFO\n",__FUNCTION__));
4459 d->status_RAW_FILEINFO_ALL_INFO=status;
4461 /* don't blindly overwrite BASIC_INFORMATION as we may already have it */
4462 if (NT_STATUS_IS_OK(status)) {
4463 d->status_RAW_FILEINFO_BASIC_INFORMATION=status;
4464 d->create_time=io->all_info.out.create_time;
4465 d->access_time=io->all_info.out.access_time;
4466 d->write_time=io->all_info.out.write_time;
4467 d->change_time=io->all_info.out.change_time;
4468 d->attrib=io->all_info.out.attrib;
4470 d->alloc_size=io->all_info.out.alloc_size;
4471 d->size=io->all_info.out.size;
4472 dump_data(5, io, sizeof(*io));
4473 d->nlink=io->all_info.out.nlink;
4474 d->delete_pending=io->all_info.out.delete_pending;
4475 d->directory=io->all_info.out.directory;
4476 d->ea_size=io->all_info.out.ea_size;
4477 /* io is sticking around for as long as d is */
4478 d->fname.s=io->all_info.out.fname.s;
4479 d->fname.count=io->all_info.out.fname.private_length;
4480 break;
4481 case RAW_FILEINFO_BASIC_INFO:
4482 case RAW_FILEINFO_BASIC_INFORMATION:
4483 DEBUG(5,("%s: async callback level RAW_FILEINFO_BASIC_INFORMATION\n",__FUNCTION__));
4484 d->status_RAW_FILEINFO_BASIC_INFORMATION=status;
4485 d->create_time=io->basic_info.out.create_time;
4486 d->access_time=io->basic_info.out.access_time;
4487 d->write_time=io->basic_info.out.write_time;
4488 d->change_time=io->basic_info.out.change_time;
4489 d->attrib=io->basic_info.out.attrib;
4490 break;
4491 case RAW_FILEINFO_COMPRESSION_INFO:
4492 DEBUG(5,("%s: async callback level RAW_FILEINFO_COMPRESSION_INFO\n",__FUNCTION__));
4493 d->status_RAW_FILEINFO_COMPRESSION_INFO = status;
4494 d->compressed_size=io->compression_info.out.compressed_size;
4495 d->format=io->compression_info.out.format;
4496 d->unit_shift=io->compression_info.out.unit_shift;
4497 d->chunk_shift=io->compression_info.out.chunk_shift;
4498 d->cluster_shift=io->compression_info.out.cluster_shift;
4499 break;
4500 case RAW_FILEINFO_INTERNAL_INFORMATION:
4501 DEBUG(5,("%s: async callback level RAW_FILEINFO_INTERNAL_INFORMATION\n",__FUNCTION__));
4502 d->status_RAW_FILEINFO_INTERNAL_INFORMATION=status;
4503 d->file_id=io->internal_information.out.file_id;
4504 break;
4505 case RAW_FILEINFO_ACCESS_INFORMATION:
4506 DEBUG(5,("%s: async callback level RAW_FILEINFO_ACCESS_INFORMATION\n",__FUNCTION__));
4507 d->status_RAW_FILEINFO_ACCESS_INFORMATION=status;
4508 d->access_flags=io->access_information.out.access_flags;
4509 break;
4510 case RAW_FILEINFO_POSITION_INFORMATION:
4511 DEBUG(5,("%s: async callback level RAW_FILEINFO_POSITION_INFORMATION\n",__FUNCTION__));
4512 d->status_RAW_FILEINFO_POSITION_INFORMATION = status;
4513 d->position=io->position_information.out.position;
4514 break;
4515 case RAW_FILEINFO_MODE_INFORMATION:
4516 DEBUG(5,("%s: async callback level RAW_FILEINFO_MODE_INFORMATION\n",__FUNCTION__));
4517 d->status_RAW_FILEINFO_MODE_INFORMATION =status;
4518 d->mode=io->mode_information.out.mode;
4519 break;
4520 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
4521 DEBUG(5,("%s: async callback level RAW_FILEINFO_ALIGNMENT_INFORMATION\n",__FUNCTION__));
4522 d->status_RAW_FILEINFO_ALIGNMENT_INFORMATION=status;
4523 d->alignment_requirement=io->alignment_information.out.alignment_requirement;
4524 break;
4525 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
4526 DEBUG(5,("%s: async callback level RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION\n",__FUNCTION__));
4527 d->status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION=status;
4528 d->reparse_tag=io->attribute_tag_information.out.reparse_tag;
4529 d->reparse_attrib=io->attribute_tag_information.out.attrib;
4530 break;
4531 case RAW_FILEINFO_STREAM_INFO: {
4532 uint_t c;
4533 DEBUG(5,("%s: async callback level RAW_FILEINFO_STREAM_INFO %s,\n",__FUNCTION__));
4534 d->status_RAW_FILEINFO_STREAM_INFO=status;
4535 DEBUG(5,("Num Streams %d %s\n",io->stream_info.out.num_streams, get_friendly_nt_error_msg (status)));
4536 if (NT_STATUS_IS_OK(status)) {
4537 d->streams=talloc_zero_array(d, struct info_stream, io->stream_info.out.num_streams);
4538 if (! d->streams) {
4539 d->status_RAW_FILEINFO_STREAM_INFO=NT_STATUS_NO_MEMORY;
4540 } else {
4541 d->num_streams=io->stream_info.out.num_streams;
4542 for(c=0; c < io->stream_info.out.num_streams; c++) {
4543 d->streams[c].size = io->stream_info.out.streams[c].size;
4544 d->streams[c].alloc_size = io->stream_info.out.streams[c].alloc_size;
4545 d->streams[c].stream_name.s=io->stream_info.out.streams[c].stream_name.s;
4546 d->streams[c].stream_name.count=io->stream_info.out.streams[c].stream_name.private_length;
4550 break; }
4551 default:
4552 /* so... where's it from? */
4553 DEBUG(5,("Unexpected read level\n"));
4556 fragment->smb_fileinfo = NULL;
4557 fragment->c_req=NULL;
4559 /* are the fragments complete? */
4560 DLIST_REMOVE(fragments->fragments, fragment);
4561 /* if this index is complete, remove from fragmentses */
4562 if (! fragments->fragments) {
4563 DLIST_REMOVE(fragmentses->fragments, fragments);
4565 /* is that the end? */
4566 if (! fragmentses->fragments && fragmentses->async) {
4567 DEBUG(5,("Thats the end of the fragments, doing send\n"));
4568 /* call the send_fn */
4569 req=fragmentses->req;
4570 req->async_states->status=NT_STATUS_OK;
4571 DEBUG(5,("Fragments async response sending\n"));
4572 req->async_states->send_fn(req);
4574 DEBUG(5,("%s: Thats the end of the callback\n",__FUNCTION__));
4575 return status;
4578 #define FINISH_GETINFO_FRAGMENT(r, io) do { \
4579 struct smbcli_request *c_req; \
4580 switch (r->in.info_tags[0].tag_type) { \
4581 case TAG_TYPE_FILE_INFO: \
4582 io->all_info.in.file.fnum=r->in.info_tags[0].info_tag.fnum; \
4583 c_req=smb_raw_fileinfo_send(private->tree, io); \
4584 break; \
4585 case TAG_TYPE_PATH_INFO: \
4586 io->all_info.in.file.path=r->in.info_tags[0].info_tag.path.s; \
4587 c_req=smb_raw_pathinfo_send(private->tree, io); \
4588 break; \
4589 default: \
4590 return NT_STATUS_INVALID_PARAMETER; \
4592 /* Add fragment collator */ \
4593 fragment->c_req=c_req; \
4594 /* use the same stateful async handler for them all... */ \
4595 { void* req=NULL; \
4596 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_proxy_getinfo, NT_STATUS_INTERNAL_ERROR); \
4597 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_read_handler); \
4599 io=NULL; \
4600 } while (0)
4602 #define SETUP_GETINFO_FRAGMENT(io, LEVEL) do { \
4603 fragment=talloc_zero(fragments, struct proxy_getinfo_fragment); \
4604 NT_STATUS_HAVE_NO_MEMORY(fragment); \
4605 DLIST_ADD(fragments->fragments, fragment); \
4606 fragment->fragments=fragments; \
4607 io=talloc_zero(fragment, union smb_fileinfo); \
4608 NT_STATUS_HAVE_NO_MEMORY(io); \
4609 io->generic.level=LEVEL; \
4610 } while (0)
4612 static NTSTATUS rpclite_proxy_Getinfo(struct ntvfs_module_context *ntvfs,
4613 struct ntvfs_request *req, struct proxy_GetInfo *r)
4615 struct proxy_private *private = ntvfs->private_data;
4616 struct smbcli_request *c_req;
4617 union smb_fileinfo *io=NULL;
4618 NTSTATUS status;
4619 struct proxy_file* f;
4620 struct ntvfs_handle *h;
4621 struct proxy_getinfo_fragmentses *fragmentses;
4622 int c;
4624 SETUP_PID;
4626 DEBUG(5,("Opnum: proxy_Getinfo r=%p\n",r));
4628 DEBUG(5,("Convering %d handles for r=%p\n",r->in.count, r));
4629 for(c=0; c < r->in.count; c++) {
4630 if (r->in.info_tags[c].tag_type==TAG_TYPE_FILE_INFO) {
4631 RPCLITE_SETUP_THIS_FILE_HERE(r->in.info_tags[c].info_tag.fnum, f, h);
4635 if (PROXY_REMOTE_SERVER(private)) {
4636 DEBUG(5,("Remote proxy, doing transparent\n"));
4637 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
4638 /* No need to add a receive hander, the ntioctl transport adds
4639 the async chain handler which deals with the send_fn */
4640 // ADD_ASYNC_RECV_TAIL(c_req, r, NULL, f, rpclite_proxy_Getinfo_map_async_send, NT_STATUS_INTERNAL_ERROR);
4642 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4643 DEBUG(5,("%s:Sync waiting for nttrans response\n",__LOCATION__));
4644 return sync_chain_handler(c_req);
4645 } else {
4646 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
4647 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
4648 return NT_STATUS_OK;
4652 /* I thought this was done for me for [in,out] */
4653 r->out.info_data=talloc_zero_array(r, struct info_data, r->in.count);
4654 NT_STATUS_HAVE_NO_MEMORY(r->out.info_data);
4655 r->out.count = r->in.count;
4656 r->out.result = NT_STATUS_OK;
4658 fragmentses=talloc_zero(req, struct proxy_getinfo_fragmentses);
4659 fragmentses->r=r;
4660 fragmentses->req=req;
4661 NT_STATUS_HAVE_NO_MEMORY(fragmentses);
4663 #warning, if C is large, we need to do a few at a time according to resource limits
4664 for (c=0; c < r->in.count; c++) {
4665 struct proxy_getinfo_fragments *fragments;
4666 struct proxy_getinfo_fragment *fragment;
4668 fragments=talloc_zero(fragmentses, struct proxy_getinfo_fragments);
4669 NT_STATUS_HAVE_NO_MEMORY(fragments);
4670 DLIST_ADD(fragmentses->fragments, fragments);
4671 fragments->fragmentses=fragmentses;
4672 fragments->index=c;
4674 /* Issue a set of getinfo requests */
4675 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ALL_INFO);
4676 FINISH_GETINFO_FRAGMENT(r, io);
4678 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_COMPRESSION_INFO);
4679 FINISH_GETINFO_FRAGMENT(r, io);
4681 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_INTERNAL_INFORMATION);
4682 FINISH_GETINFO_FRAGMENT(r, io);
4684 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ACCESS_INFORMATION);
4685 FINISH_GETINFO_FRAGMENT(r, io);
4687 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_POSITION_INFORMATION);
4688 FINISH_GETINFO_FRAGMENT(r, io);
4690 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_MODE_INFORMATION);
4691 FINISH_GETINFO_FRAGMENT(r, io);
4693 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ALIGNMENT_INFORMATION);
4694 FINISH_GETINFO_FRAGMENT(r, io);
4696 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION);
4697 FINISH_GETINFO_FRAGMENT(r, io);
4699 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_STREAM_INFO);
4700 FINISH_GETINFO_FRAGMENT(r, io);
4703 /* If ! async, wait for all requests to finish */
4705 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4706 struct proxy_getinfo_fragments *fragments;
4707 struct proxy_getinfo_fragment *fragment;
4708 while ((fragments = fragmentses->fragments) &&
4709 (fragment = fragments->fragments) &&
4710 fragment->c_req) {
4711 sync_chain_handler(fragment->c_req);
4712 /* and because the whole fragment / fragments may be gone now... */
4713 continue;
4715 return NT_STATUS_OK; /* see individual failures */
4718 DEBUG(5,("%s: Setting async response\n",__FUNCTION__));
4719 fragmentses->async=true;
4720 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
4721 return NT_STATUS_OK;
4724 /* rpclite dispatch table */
4725 #define RPC_PROXY_OPS 3
4726 struct {
4727 uint32_t opnum;
4728 NTSTATUS (*handler)(struct ntvfs_module_context *ntvfs,
4729 struct ntvfs_request *req, void* r);
4730 } rpcproxy_ops[RPC_PROXY_OPS]={
4731 {NDR_PROXY_READ, rpclite_proxy_Read},
4732 {NDR_PROXY_WRITE, rpclite_proxy_Write},
4733 {NDR_PROXY_GETINFO, rpclite_proxy_Getinfo}
4736 /* unmarshall ntioctl and rpc-dispatch, but push async map handler to convert
4737 back from rpc struct to ntioctl */
4738 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
4739 struct ntvfs_request *req, union smb_ioctl *io)
4741 struct proxy_private *private = ntvfs->private_data;
4742 DATA_BLOB *request;
4743 struct ndr_syntax_id* syntax_id;
4744 uint32_t opnum;
4745 const struct ndr_interface_table *table;
4746 struct ndr_pull* pull;
4747 void* r;
4748 NTSTATUS status;
4749 struct async_rpclite_send *rpclite_send;
4750 enum ndr_err_code ndr_err;
4752 SETUP_PID;
4754 /* We don't care about io->generic.in.file, ntvfs layer already proved it was valid,
4755 our operations will have the fnum embedded in them anyway */
4756 DEBUG(5,("START %s blob-size %d\n",__FUNCTION__,io->ntioctl.in.blob.length));
4757 /* unpack the NDR */
4758 request=&io->ntioctl.in.blob;
4760 pull = ndr_pull_init_blob(request, req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
4761 NT_STATUS_HAVE_NO_MEMORY(pull);
4762 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
4763 DEBUG(5,("%s pull init'd\n",__FUNCTION__));
4765 /* the blob is 4-aligned because it was memcpy'd */
4766 syntax_id=talloc_zero(pull, struct ndr_syntax_id);
4767 NT_STATUS_HAVE_NO_MEMORY(syntax_id);
4769 ndr_err=ndr_pull_ndr_syntax_id(pull, NDR_SCALARS, syntax_id);
4770 status=ndr_map_error2ntstatus(ndr_err);
4771 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4772 DEBUG(2,("Can't read syntax-id: %s\n",nt_errstr(status)));
4773 return status;
4776 /* now find the struct ndr_interface_table * for this syntax_id */
4777 table=ndr_table_by_uuid(&syntax_id->uuid);
4778 if (! table) {
4779 DEBUG(5,("Can't find table for uuid: %s\n",GUID_string(debug_ctx(),&syntax_id->uuid)));
4780 return NT_STATUS_NO_GUID_TRANSLATION;
4783 ndr_err=ndr_pull_uint32(pull, NDR_SCALARS, &opnum);
4784 status=ndr_map_error2ntstatus(ndr_err);
4785 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4786 DEBUG(2,("Can't read op-num: %s\n",nt_errstr(status)));
4787 return status;
4789 DEBUG(5,("%s opnum %d\n",__FUNCTION__,opnum));
4791 DEBUG(10,("rpc request data:\n"));
4792 dump_data(10, pull->data, pull->data_size);
4794 r = talloc_named(req, table->calls[opnum].struct_size, "struct %s",
4795 table->calls[opnum].name);
4796 NT_STATUS_HAVE_NO_MEMORY(r);
4798 memset(r, 0, table->calls[opnum].struct_size);
4800 ndr_err=table->calls[opnum].ndr_pull(pull, NDR_IN, r);
4801 status=ndr_map_error2ntstatus(ndr_err);
4802 DEBUG(5,("%s opnum %d pulled r=%p status %s\n",__FUNCTION__,opnum,r,get_friendly_nt_error_msg (status)));
4803 NT_STATUS_NOT_OK_RETURN(status);
4805 rpclite_send=talloc(req, struct async_rpclite_send);
4806 NT_STATUS_HAVE_NO_MEMORY(rpclite_send);
4807 rpclite_send->call=&table->calls[opnum];
4808 rpclite_send->struct_ptr=r;
4809 /* need to push conversion function to convert from r to io */
4810 status=ntvfs_map_async_setup(ntvfs, req, io, rpclite_send, proxy_rpclite_map_async_send);
4811 NT_STATUS_NOT_OK_RETURN(status);
4813 /* Magically despatch the call based on syntax_id, table and opnum.
4814 But there is no table of handlers.... so until then*/
4815 if (0==strcasecmp(table->name,"rpcproxy")) {
4816 if (opnum >= RPC_PROXY_OPS) {
4817 DEBUG(3,("Can't despatch %s:%d\n",table->name, opnum));
4818 return NT_STATUS_PROCEDURE_NOT_FOUND;
4820 status = rpcproxy_ops[opnum].handler(ntvfs, req, r);
4821 } else {
4822 DEBUG(5,("Can't despatch %s:%d %s\n",table->name, opnum,
4823 GUID_string(debug_ctx(),&syntax_id->uuid)));
4824 return NT_STATUS_NO_GUID_TRANSLATION;
4827 /* status is the status of the rpc layer. If it is NT_STATUS_OK then
4828 the handler status is in r->out.result */
4829 DEBUG(5,("%s now map_async_finish: status=%s async=%d\n", __FUNCTION__,
4830 get_friendly_nt_error_msg (status), req->async_states->state & NTVFS_ASYNC_STATE_ASYNC));
4832 return ntvfs_map_async_finish(req, status);
4835 /* unpack the ntioctl to make some rpc_struct */
4836 NTSTATUS ntioctl_rpc_unmap(struct async_info *async, void* io1, void* io2, NTSTATUS status)
4838 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
4839 struct proxy_private *proxy=async->proxy;
4840 struct smbcli_request *c_req = async->c_req;
4841 void* r=io1;
4842 struct ntioctl_rpc_unmap_info *info=talloc_get_type_abort(io2, struct ntioctl_rpc_unmap_info);
4843 union smb_ioctl* io =talloc_get_type_abort(info->io, union smb_ioctl);
4844 const struct ndr_interface_call *calls=info->calls;
4845 enum ndr_err_code ndr_err;
4846 DATA_BLOB *response;
4847 struct ndr_pull* pull;
4849 DEBUG(5,("START %s io2=%p\n",__FUNCTION__,io2));
4850 DEBUG(5,("%s op %s ntioctl: %s\n",
4851 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
4852 NT_STATUS_NOT_OK_RETURN(status);
4854 if (c_req) {
4855 DEBUG(5,("%s io2 MID=%d\n",__FUNCTION__,c_req->mid));
4856 status = smb_raw_ioctl_recv(c_req, io, io);
4857 #define SESSION_INFO proxy->remote_server, proxy->remote_share
4858 /* This status is the ntioctl wrapper status */
4859 if (! NT_STATUS_IS_OK(status)) {
4860 DEBUG(3,("[\\\\%s\\%s] RPC %s failed for %s: %s\n",SESSION_INFO,
4861 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
4862 if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) return status;
4863 return NT_STATUS_UNSUCCESSFUL;
4867 dump_data(10, io->ntioctl.out.blob.data, io->ntioctl.out.blob.length);
4869 response=&io->ntioctl.out.blob;
4870 pull = ndr_pull_init_blob(response, r, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
4871 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
4873 NT_STATUS_HAVE_NO_MEMORY(pull);
4875 ndr_err=calls->ndr_pull(pull, NDR_OUT, r);
4876 #warning can we free pull here?
4877 status=ndr_map_error2ntstatus(ndr_err);
4879 DEBUG(5,("END %s op status %s\n",
4880 __FUNCTION__, get_friendly_nt_error_msg(status)));
4881 return status;
4885 send an ntioctl request based on a NDR encoding.
4887 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
4888 struct smbcli_tree *tree,
4889 struct ntvfs_module_context *ntvfs,
4890 const struct ndr_interface_table *table,
4891 uint32_t opnum,
4892 void *r)
4894 struct proxy_private *private = ntvfs->private_data;
4895 struct smbcli_request * c_req;
4896 struct ndr_push *push;
4897 NTSTATUS status;
4898 DATA_BLOB request;
4899 enum ndr_err_code ndr_err;
4900 union smb_ioctl *io=talloc_zero(r, union smb_ioctl);
4903 /* setup for a ndr_push_* call, we can't free push until the message
4904 actually hits the wire */
4905 push = ndr_push_init_ctx(io, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
4906 if (!push) return NULL;
4908 /* first push interface table identifiers */
4909 ndr_err=ndr_push_ndr_syntax_id(push, NDR_SCALARS, &table->syntax_id);
4910 status=ndr_map_error2ntstatus(ndr_err);
4912 if (! NT_STATUS_IS_OK(status)) return NULL;
4914 ndr_err=ndr_push_uint32(push, NDR_SCALARS, opnum);
4915 status=ndr_map_error2ntstatus(ndr_err);
4916 if (! NT_STATUS_IS_OK(status)) return NULL;
4918 if (0) {
4919 push->flags |= LIBNDR_FLAG_BIGENDIAN;
4922 /* push the structure into a blob */
4923 ndr_err = table->calls[opnum].ndr_push(push, NDR_IN, r);
4924 status=ndr_map_error2ntstatus(ndr_err);
4925 if (!NT_STATUS_IS_OK(status)) {
4926 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
4927 nt_errstr(status)));
4928 return NULL;
4931 /* retrieve the blob */
4932 request = ndr_push_blob(push);
4934 io->ntioctl.level=RAW_IOCTL_NTIOCTL;
4935 io->ntioctl.in.function=FSCTL_UFOPROXY_RPCLITE;
4936 io->ntioctl.in.file.fnum=private->nttrans_fnum;
4937 io->ntioctl.in.fsctl=false;
4938 io->ntioctl.in.filter=0;
4939 io->ntioctl.in.max_data=PROXY_NTIOCTL_MAXDATA;
4940 io->ntioctl.in.blob=request;
4942 DEBUG(10,("smbcli_request packet:\n"));
4943 dump_data(10, request.data, request.length);
4945 c_req = smb_raw_ioctl_send(tree, io);
4947 if (! c_req) {
4948 return NULL;
4951 dump_data(10, c_req->out.data, c_req->out.data_size);
4953 { void* req=NULL;
4954 struct ntioctl_rpc_unmap_info* info=talloc_zero(r, struct ntioctl_rpc_unmap_info);
4955 info->io=io;
4956 info->table=table;
4957 info->opnum=opnum;
4958 info->calls=&table->calls[opnum];
4959 ADD_ASYNC_RECV_TAIL(c_req, r, info, NULL, ntioctl_rpc_unmap, NULL);
4962 return c_req;
4966 client helpers, mapping between proxy RPC calls and smbcli_* calls.
4970 * If the sync_chain_handler is called directly it unplugs the async handler
4971 which (as well as preventing loops) will also avoid req->send_fn being
4972 called - which is also nice! */
4973 NTSTATUS sync_chain_handler(struct smbcli_request *c_req)
4975 struct async_info *async=NULL;
4976 /* the first callback which will actually receive the c_req response */
4977 struct async_info_map *async_map;
4978 NTSTATUS status=NT_STATUS_OK;
4979 struct async_info_map** chain;
4981 DEBUG(5,("%s\n",__FUNCTION__));
4982 if (! c_req) return NT_STATUS_UNSUCCESSFUL;
4984 /* If there is a handler installed, it is using async_info to chain */
4985 if (c_req->async.fn) {
4986 /* not safe to talloc_free async if send_fn has been called for the request
4987 against which async was allocated, so steal it (and free below) or neither */
4988 async = talloc_get_type_abort(c_req->async.private, struct async_info);
4989 talloc_steal(NULL, async);
4990 chain=&async->chain;
4991 async_map = talloc_get_type_abort(*chain, struct async_info_map);
4992 } else {
4993 chain=(struct async_info_map**)&c_req->async.private;
4994 async_map = talloc_get_type_abort(*chain, struct async_info_map);
4997 /* unplug c_req->async.fn as if a callback handler calls smb_*_recv
4998 in order to receive the response, smbcli_transport_finish_recv will
4999 call us again and then call the c-req->async.fn
5000 Perhaps we should merely call smbcli_request_receive() IF
5001 c_req->request_state <= SMBCLI_REQUEST_RECV, but that might not
5002 help multi-part replies... except all parts are receive before
5003 callback if a handler WAS set */
5004 c_req->async.fn=NULL;
5006 /* Should we raise an error? Should we simple_recv? */
5007 while(async_map) {
5008 /* remove this one from the list before we call. We do this in case
5009 some callbacks free their async_map but also so that callbacks
5010 can navigate the async_map chain to add additional callbacks to
5011 the end - e.g. so that tag-along reads can call send_fn after
5012 the send_fn of the request they tagged along to, thus preserving
5013 the async response order - which may be a waste of time? */
5014 DLIST_REMOVE(*chain, async_map);
5016 DEBUG(5,("Callback for async_map=%p pre-status %s\n",async_map, get_friendly_nt_error_msg(status)));
5017 if (async_map->fn) {
5018 status=async_map->fn(async_map->async,
5019 async_map->parms1, async_map->parms2, status);
5021 DEBUG(5,("Callback complete for async_map=%p status %s\n",async_map, get_friendly_nt_error_msg(status)));
5022 /* Note: the callback may have added to the chain */
5023 #warning Async_maps have a null talloc_context, it is unclear who should own them
5024 /* it can't be c_req as it stops us chaining more than one, maybe it
5025 should be req but there isn't always a req. However sync_chain_handler
5026 will always free it if called */
5027 DEBUG(6,("Will free async map %p\n",async_map));
5028 #warning put me back
5029 talloc_free(async_map);
5030 DEBUG(6,("Free'd async_map\n"));
5031 if (*chain)
5032 async_map=talloc_get_type_abort(*chain, struct async_info_map);
5033 else
5034 async_map=NULL;
5035 DEBUG(6,("Switch to async_map %p\n",async_map));
5037 /* The first callback will have read c_req, thus talloc_free'ing it,
5038 so we don't let the other callbacks get hurt playing with it */
5039 if (async_map && async_map->async)
5040 async_map->async->c_req=NULL;
5043 talloc_free(async);
5045 DEBUG(5,("%s complete: %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
5046 return status;
5049 /* If the async handler is called, then the send_fn is called */
5050 static void async_chain_handler(struct smbcli_request *c_req)
5052 struct async_info *async = talloc_get_type_abort(c_req->async.private, struct async_info);
5053 struct ntvfs_request *req = async->req;
5054 NTSTATUS status;
5056 if (c_req->state <= SMBCLI_REQUEST_RECV) {
5057 /* Looks like async handlers has been called sync'ly */
5058 smb_panic("async_chain_handler called asyncly on req %p\n");
5061 status=sync_chain_handler(c_req);
5063 /* Should we insist that a chain'd handler does this?
5064 Which makes it hard to intercept the data by adding handlers
5065 before the send_fn handler sends it... */
5066 if (req) {
5067 DEBUG(5,("%s send_fn on req=%p\n",__FUNCTION__,req));
5068 req->async_states->status=status;
5069 req->async_states->send_fn(req);
5073 /* unpack the rpc struct to make some smb_write */
5074 NTSTATUS async_proxy_smb_raw_write_rpc(struct async_info *async,
5075 void* io1, void* io2, NTSTATUS status)
5077 union smb_write* io =talloc_get_type(io1, union smb_write);
5078 struct proxy_Write* r=talloc_get_type(io2, struct proxy_Write);
5080 DEBUG(5,("START: %s convert from rpc to smb with pre-status %s\n",__FUNCTION__,
5081 get_friendly_nt_error_msg (status)));
5082 DEBUG(3,("Write response for offset=%lld\n",io->generic.in.offset));
5083 NT_STATUS_NOT_OK_RETURN(status);
5085 status=r->out.result;
5086 DEBUG(5,("%s wrapped status: %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
5087 NT_STATUS_NOT_OK_RETURN(status);
5089 io->generic.out.remaining = r->out.remaining;
5090 io->generic.out.nwritten = r->out.nwritten;
5092 DEBUG(5,("END: %s status %s\n",__FUNCTION__,
5093 get_friendly_nt_error_msg (status)));
5094 return status;
5097 /* upgrade from smb to NDR and then send.
5098 The caller should ADD_ASYNC_RECV_TAIL the handler that tries to receive the response*/
5099 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
5100 union smb_write *io,
5101 struct proxy_file *f)
5103 struct proxy_private *private = ntvfs->private_data;
5104 struct smbcli_tree *tree=private->tree;
5106 if (PROXY_REMOTE_SERVER(private)) {
5107 struct smbcli_request *c_req;
5108 struct proxy_Write *r=talloc_zero(io, struct proxy_Write);
5109 ssize_t size;
5111 if (! r) return NULL;
5113 size=io->generic.in.count;
5114 /* upgrade the write */
5115 r->in.fnum = io->generic.in.file.fnum;
5116 r->in.offset = io->generic.in.offset;
5117 r->in.count = io->generic.in.count;
5118 r->in.mode = io->generic.in.wmode;
5119 // r->in.remaining = io->generic.in.remaining;
5120 #warning remove this
5121 /* prepare to lie */
5122 r->out.nwritten=r->in.count;
5123 r->out.remaining=0;
5125 /* try to compress */
5126 #warning compress!
5127 r->in.data.compress.data=compress_block_talloc(r, io->generic.in.data, &size);
5128 if (r->in.data.compress.data) {
5129 r->in.data.compress.count=size;
5130 r->in.flags = PROXY_USE_ZLIB;
5131 } else {
5132 r->in.flags = 0;
5133 /* we'll honour const, honest gov */
5134 r->in.data.generic.data=discard_const(io->generic.in.data);
5135 r->in.data.generic.count=io->generic.in.count;
5138 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
5139 ntvfs,
5140 &ndr_table_rpcproxy,
5141 NDR_PROXY_WRITE, r);
5142 if (! c_req) return NULL;
5144 /* yeah, filthy abuse of f */
5145 { void* req=NULL;
5146 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_write_rpc, NULL);
5149 return c_req;
5150 } else {
5151 return smb_raw_write_send(tree, io);
5155 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
5156 union smb_write *io,
5157 struct proxy_file *f)
5159 struct proxy_private *proxy = ntvfs->private_data;
5160 struct smbcli_tree *tree=proxy->tree;
5162 if (PROXY_REMOTE_SERVER(proxy)) {
5163 struct smbcli_request *c_req = proxy_smb_raw_write_send(ntvfs, io, f);
5164 return sync_chain_handler(c_req);
5165 } else {
5166 struct smbcli_request *c_req = smb_raw_write_send(tree, io);
5167 return smb_raw_write_recv(c_req, io);
5171 /* unpack the rpc struct to make some smb_read response */
5172 NTSTATUS async_proxy_smb_raw_read_rpc(struct async_info *async,
5173 void* io1, void* io2, NTSTATUS status)
5175 union smb_read* io =talloc_get_type_abort(io1, union smb_read);
5176 struct proxy_Read* r=talloc_get_type_abort(io2, struct proxy_Read);
5177 struct proxy_file *f = async->f;
5178 struct proxy_private *private=async->proxy;
5180 DEBUG(5,("\n>>\n%s() rpc status: %s\n",__FUNCTION__,
5181 get_friendly_nt_error_msg(status)));
5182 NT_STATUS_NOT_OK_RETURN(status);
5184 status=r->out.result;
5185 DEBUG(5,("%s() wrapped status: %s\n",__FUNCTION__,
5186 get_friendly_nt_error_msg(status)));
5187 NT_STATUS_NOT_OK_RETURN(status);
5189 io->generic.out.remaining = 0; /*r->out.response.generic.remaining;*/
5190 io->generic.out.compaction_mode = 0;
5192 if (r->out.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
5193 /* Use the io we already setup!
5194 if out.flags & PROXY_VALIDATE, we may need to validate more in
5195 cache then r->out.nread would suggest, see io->generic.out.nread */
5196 if (r->out.flags & PROXY_VALIDATE)
5197 io->generic.out.nread=io->generic.in.maxcnt;
5198 DEBUG(5,("Using cached data: size=%lld\n",
5199 (long long) io->generic.out.nread));
5200 return status;
5203 if (r->in.flags & PROXY_VALIDATE) {
5204 DEBUG(5,("Cached data did not validate, flags: %x\n",r->out.flags));
5205 /* turn off validate on this file */
5206 //cache_handle_novalidate(f);
5207 #warning turn off validate on this file - do an nread<maxcnt later
5210 if (r->in.flags & PROXY_USE_CACHE) {
5211 DEBUG(5,("Cached data did not match\n"));
5214 io->generic.out.nread = r->out.nread;
5216 /* we may need to uncompress */
5217 if (r->out.flags & PROXY_USE_ZLIB) {
5218 ssize_t size=r->out.response.compress.count;
5219 DEBUG(5,("%s: uncompress, %lld wanted %lld or %lld\n",__LOCATION__,
5220 (long long int)size,
5221 (long long int)io->generic.in.maxcnt,
5222 (long long int)io->generic.in.mincnt));
5223 if (size > io->generic.in.mincnt) {
5224 /* we did a bulk read for the cache */
5225 uint8_t *data=talloc_size(io, io->generic.in.maxcnt);
5226 DEBUG(5,("%s: bulk uncompress to %p\n",__LOCATION__,data));
5227 if (! uncompress_block_to(data,
5228 r->out.response.compress.data, &size,
5229 io->generic.in.maxcnt) ||
5230 size != r->out.nread) {
5231 status=NT_STATUS_INVALID_USER_BUFFER;
5232 } else {
5233 DEBUG(5,("%s: uncompressed\n",__LOCATION__));
5234 /* copy as much as they can take */
5235 io->generic.out.nread=MIN(io->generic.in.mincnt, size);
5236 memcpy(io->generic.out.data, data, io->generic.out.nread);
5237 /* copy the rest to the cache */
5238 cache_handle_save(f, data,
5239 size,
5240 io->generic.in.offset);
5242 } else if (! uncompress_block_to(io->generic.out.data,
5243 r->out.response.compress.data, &size,
5244 io->generic.in.maxcnt) ||
5245 size != r->out.nread) {
5246 io->generic.out.nread=size;
5247 status=NT_STATUS_INVALID_USER_BUFFER;
5249 } else if (io->generic.out.data != r->out.response.generic.data) {
5250 //Assert(r->out.nread == r->out.generic.out.count);
5251 memcpy(io->generic.out.data, r->out.response.generic.data, io->generic.out.nread);
5253 if (r->out.cache_name.s && f && f->cache) {
5254 int result;
5255 setenv("WAFS_CACHE_REMOTE_NAME",r->out.cache_name.s,1);
5256 setenv("WAFS_CACHE_LOCAL_NAME",f->cache->cache_name,1);
5257 setenv("WAFS_REMOTE_SERVER",private->remote_server,1);
5258 DEBUG(5,("%s running cache transfer command: %s\n",__LOCATION__,getenv("WAFS_CACHE_REMOTE_NAME")));
5259 result=system(getenv("WAFS_CACHE_TRANSFER"));
5260 DEBUG(5,("%s cache transfer command result %d\n",__LOCATION__,result));
5261 // now set cache to make whole local file valud
5262 cache_validated(f->cache, cache_len(f->cache));
5265 return status;
5268 /* Warning: Assumes that if io->generic.out.nread is not zero, then some
5269 data has been pre-read into io->generic.out.data and can be used for
5270 proxy<->proxy optimized reads */
5271 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
5272 union smb_read *io,
5273 struct proxy_file *f,
5274 struct proxy_Read *r)
5276 struct proxy_private *private = ntvfs->private_data;
5277 #warning we are using out.nread as a out-of-band parameter
5278 if (PROXY_REMOTE_SERVER(private)) {
5280 struct smbcli_request *c_req;
5281 if (! r) {
5282 r=talloc_zero(io, struct proxy_Read);
5283 if (! r) return NULL;
5284 r->in.mincnt = io->generic.in.mincnt;
5288 r->in.fnum = io->generic.in.file.fnum;
5289 r->in.read_for_execute=io->generic.in.read_for_execute;
5290 r->in.offset = io->generic.in.offset;
5291 r->in.maxcnt = io->generic.in.maxcnt;
5292 r->in.remaining = io->generic.in.remaining;
5293 r->in.flags |= PROXY_USE_ZLIB;
5294 if (! (r->in.flags & PROXY_VALIDATE) &&
5295 io->generic.out.data && io->generic.out.nread > 0) {
5296 /* maybe we should limit digest size to MIN(nread, maxcnt) to
5297 permit the caller to provider a larger nread as part of
5298 a split read */
5299 checksum_block(r->in.digest.digest, io->generic.out.data,
5300 io->generic.out.nread);
5302 if (io->generic.out.nread > r->in.maxcnt) {
5303 DEBUG(0,("Cache from nread is too big for requested read struct, ignoring cache\n"));
5304 } else {
5305 r->in.mincnt = io->generic.out.nread;
5306 r->in.maxcnt = io->generic.out.nread;
5307 r->in.flags |= PROXY_USE_CACHE;
5308 /* PROXY_VALIDATE will have been set by caller */
5312 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
5313 DEBUG(3,("Cache digest length=%lld\n", (long long)r->in.maxcnt));
5314 dump_data (3, r->in.digest.digest, sizeof(r->in.digest.digest));
5317 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
5318 ntvfs,
5319 &ndr_table_rpcproxy,
5320 NDR_PROXY_READ, r);
5321 if (! c_req) return NULL;
5323 { void* req=NULL;
5324 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_read_rpc, NULL);
5327 return c_req;
5328 } else {
5329 return smb_raw_read_send(private->tree, io);
5333 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
5334 union smb_read *io,
5335 struct proxy_file *f)
5337 struct proxy_private *proxy = ntvfs->private_data;
5338 struct smbcli_tree *tree=proxy->tree;
5340 if (PROXY_REMOTE_SERVER(proxy)) {
5341 struct smbcli_request *c_req = proxy_smb_raw_read_send(ntvfs, io, f, NULL);
5342 return sync_chain_handler(c_req);
5343 } else {
5344 struct smbcli_request *c_req = smb_raw_read_send(tree, io);
5345 return smb_raw_read_recv(c_req, io);
5351 initialise the PROXY->PROXY backend, registering ourselves with the ntvfs subsystem
5353 NTSTATUS ntvfs_proxy_init(void)
5355 NTSTATUS ret;
5356 struct ntvfs_ops ops;
5357 NTVFS_CURRENT_CRITICAL_SIZES(vers);
5359 ZERO_STRUCT(ops);
5361 /* fill in the name and type */
5362 ops.name = "proxy";
5363 ops.type = NTVFS_DISK;
5365 /* fill in all the operations */
5366 ops.connect = proxy_connect;
5367 ops.disconnect = proxy_disconnect;
5368 ops.unlink = proxy_unlink;
5369 ops.chkpath = proxy_chkpath;
5370 ops.qpathinfo = proxy_qpathinfo;
5371 ops.setpathinfo = proxy_setpathinfo;
5372 ops.open = proxy_open;
5373 ops.mkdir = proxy_mkdir;
5374 ops.rmdir = proxy_rmdir;
5375 ops.rename = proxy_rename;
5376 ops.copy = proxy_copy;
5377 ops.ioctl = proxy_ioctl;
5378 ops.read = proxy_read;
5379 ops.write = proxy_write;
5380 ops.seek = proxy_seek;
5381 ops.flush = proxy_flush;
5382 ops.close = proxy_close;
5383 ops.exit = proxy_exit;
5384 ops.lock = proxy_lock;
5385 ops.setfileinfo = proxy_setfileinfo;
5386 ops.qfileinfo = proxy_qfileinfo;
5387 ops.fsinfo = proxy_fsinfo;
5388 ops.lpq = proxy_lpq;
5389 ops.search_first = proxy_search_first;
5390 ops.search_next = proxy_search_next;
5391 ops.search_close = proxy_search_close;
5392 ops.trans = proxy_trans;
5393 ops.logoff = proxy_logoff;
5394 ops.async_setup = proxy_async_setup;
5395 ops.cancel = proxy_cancel;
5396 ops.notify = proxy_notify;
5397 ops.trans2 = proxy_trans2;
5399 /* register ourselves with the NTVFS subsystem. We register
5400 under the name 'proxy'. */
5401 ret = ntvfs_register(&ops, &vers);
5403 if (!NT_STATUS_IS_OK(ret)) {
5404 DEBUG(0,("Failed to register PROXY backend!\n"));
5407 return ret;