TEMP: Wait toms patch
[Samba/vfs_proxy.git] / source4 / ntvfs / proxy / vfs_proxy.c
blobaf0afe89e947f5ab7e0f1df92f177d125a6782e5
1 /*
2 Unix SMB/PROXY implementation.
4 CIFS PROXY NTVFS filesystem backend
6 Copyright (C) Andrew Tridgell 2003
7 Copyright (C) James J Myers 2003 <myersjj@samba.org>
8 Copyright (C) Sam Liddicott <sam@liddicott.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>.
24 this implements a CIFS->CIFS NTVFS filesystem caching proxy.
25 TODO:
26 New read-ahead
27 Delete cache
28 Share cache states between processes
29 Update to latest samba
30 limit dirmons etc
31 mapi delegated creds
34 #define TALLOC_ABORT(why) smb_panic(why)
35 #warning handle SMB_FLAGS2_COMPRESSED flag from client: http://msdn2.microsoft.com/en-us/library/cc246254.aspx
36 #define __LOCATION__ (talloc_asprintf(debug_ctx(),"%s:%d %s",__FILE__,__LINE__,__FUNCTION__))
37 #define PROXY_NTIOCTL_MAXDATA 0x2000000
39 #include "includes.h"
40 #include "libcli/raw/libcliraw.h"
41 #include "libcli/smb_composite/smb_composite.h"
42 #include "auth/auth.h"
43 #include "auth/credentials/credentials.h"
44 #include "ntvfs/ntvfs.h"
45 #include "../lib/util/dlinklist.h"
46 #include "param/param.h"
47 #include "libcli/resolve/resolve.h"
48 #include "libcli/libcli.h"
49 #include "libcli/raw/ioctl.h"
50 #include "librpc/gen_ndr/ndr_misc.h"
51 #include "librpc/gen_ndr/ndr_proxy.h"
52 #include "librpc/ndr/ndr_table.h"
53 #include "lib/cache/cache.h"
54 #include "lib/compression/zlib.h"
55 #include "libcli/raw/raw_proto.h"
56 #include "librpc/gen_ndr/proxy.h"
57 #include "smb_server/smb_server.h"
59 #define fstrcmp(a,b) strcasecmp((a),(b))
60 #define fstrncmp(a,b,len) strncasecmp((a),(b),(len))
62 #define LOAD_CACHE_FILE_DATA(dest, src) do { \
63 dest.create_time=src.create_time; \
64 dest.access_time=src.access_time; \
65 dest.write_time=src.write_time; \
66 dest.change_time=src.change_time; \
67 dest.attrib=src.attrib; \
68 dest.alloc_size=src.alloc_size; \
69 dest.size=src.size; \
70 dest.file_type=src.file_type; \
71 dest.ipc_state=src.ipc_state; \
72 dest.is_directory=src.is_directory; \
73 dest.delete_pending=0; \
74 } while(0)
76 /* taken from #include "librpc/gen_ndr/proxy.h" */
77 struct proxy_file_info_data {
78 /* first three are from ntcreatex */
79 uint16_t file_type;
80 uint16_t ipc_state;
81 uint8_t is_directory;
82 NTSTATUS status_RAW_FILEINFO_BASIC_INFORMATION;
83 uint32_t attrib; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
84 NTTIME create_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
85 NTTIME access_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
86 NTTIME write_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
87 NTTIME change_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
88 NTSTATUS status_RAW_FILEINFO_ALL_INFO;
89 uint32_t ea_size; /* RAW_FILEINFO_ALL_INFO */
90 uint64_t alloc_size; /* RAW_FILEINFO_ALL_INFO */
91 uint64_t size; /* RAW_FILEINFO_ALL_INFO */
92 uint32_t nlink; /* RAW_FILEINFO_ALL_INFO */
93 struct sws fname; /* RAW_FILEINFO_ALL_INFO */
94 uint8_t delete_pending; /* RAW_FILEINFO_ALL_INFO */
95 uint8_t directory; /* RAW_FILEINFO_ALL_INFO */
96 NTSTATUS status_RAW_FILEINFO_COMPRESSION_INFO;
97 uint64_t compressed_size; /* RAW_FILEINFO_COMPRESSION_INFO */
98 uint16_t format; /* RAW_FILEINFO_COMPRESSION_INFO */
99 uint8_t unit_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
100 uint8_t chunk_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
101 uint8_t cluster_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
102 NTSTATUS status_RAW_FILEINFO_INTERNAL_INFORMATION;
103 uint64_t file_id; /* RAW_FILEINFO_INTERNAL_INFORMATION */
104 NTSTATUS status_RAW_FILEINFO_ACCESS_INFORMATION;
105 uint32_t access_flags; /* RAW_FILEINFO_ACCESS_INFORMATION */
106 NTSTATUS status_RAW_FILEINFO_POSITION_INFORMATION;
107 uint64_t position; /* RAW_FILEINFO_POSITION_INFORMATION */
108 NTSTATUS status_RAW_FILEINFO_MODE_INFORMATION;
109 uint32_t mode; /* RAW_FILEINFO_MODE_INFORMATION */
110 NTSTATUS status_RAW_FILEINFO_ALIGNMENT_INFORMATION;
111 uint32_t alignment_requirement; /* RAW_FILEINFO_ALIGNMENT_INFORMATION */
112 NTSTATUS status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
113 uint32_t reparse_tag; /* RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION */
114 uint32_t reparse_attrib; /* RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION */
115 NTSTATUS status_RAW_FILEINFO_STREAM_INFO;
116 uint32_t num_streams; /* RAW_FILEINFO_STREAM_INFO */
117 struct info_stream *streams; /* RAW_FILEINFO_STREAM_INFO */
120 #define valid_RAW_FILEINFO_BASIC_INFORMATION 1
121 #define valid_RAW_FILEINFO_ALL_INFO 2
122 #define valid_RAW_FILEINFO_COMPRESSION_INFO 3
123 #define valid_RAW_FILEINFO_INTERNAL_INFORMATION 4
124 #define valid_RAW_FILEINFO_STANDARD_INFO 8
125 #define valid_RAW_FILEINFO_ACCESS_INFORMATION 16
126 #define valid_RAW_FILEINFO_POSITION_INFORMATION 32
127 #define valid_RAW_FILEINFO_MODE_INFORMATION 64
128 #define valid_RAW_FILEINFO_ALIGNMENT_INFORMATION 128
129 #define valid_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION 256
130 #define valid_RAW_FILEINFO_STREAM_INFO 512
132 struct file_metadata {
133 int count;
134 int valid;
135 struct proxy_file_info_data info_data;
138 struct proxy_file {
139 struct proxy_file *prev, *next;
140 struct proxy_private* proxy;
141 uint16_t fnum;
142 struct ntvfs_handle *h;
143 struct cache_file_entry *cache;
144 /* filename might not be a char*, but if so, _size includes null */
145 void* filename;
146 int filename_size;
147 int readahead_pending;
148 /* *_OPLOCK_RETURN values */
149 int oplock;
150 /* read-only, shareable normal file open, can be cloned by similar opens */
151 bool can_clone;
152 /* If we have an oplock, then the file is NOT bigger than size, which lets
153 us optimize reads */
154 struct file_metadata *metadata;
157 struct proxy_private;
159 struct search_handle {
160 struct search_handle *prev, *next;
161 struct proxy_private *proxy;
162 struct ntvfs_handle *h;
163 uint16_t handle;
164 union {
165 struct smb_search_id id;
166 uint32_t resume_key;
167 } resume_index;
168 struct search_cache_item *resume_item;
169 enum smb_search_level level;
170 enum smb_search_data_level data_level;
171 /* search cache (if any) being used */
172 struct search_cache *cache;
175 struct search_cache_item {
176 struct search_cache_item *prev, *next;
177 enum smb_search_data_level data_level;
178 struct cache_file_entry *cache;
179 union smb_search_data *file;
180 struct file_metadata *metadata;
182 enum search_cache_status {
183 SEARCH_CACHE_INCOMPLETE,
184 SEARCH_CACHE_COMPLETE,
185 SEARCH_CACHE_DEAD
188 struct fdirmon;
189 typedef void*(fdirmon_callback_fn)(void* data, struct fdirmon* fdirmon);
190 //NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS)
192 struct fdirmon {
193 struct fdirmon *prev, *next;
194 struct search_cache_item *items;
196 struct proxy_private *proxy;
198 union smb_notify *notify_io;
199 struct smbcli_request *notify_req;
200 uint16_t dir_fnum;
201 char* dir;
202 struct fdirmon_callback {
203 struct fdirmon_callback *prev, *next;
204 fdirmon_callback_fn *fn;
205 void* data;
206 } *callbacks;
209 struct search_cache {
210 struct search_cache *prev, *next;
211 struct search_cache_item *items;
213 struct proxy_private *proxy;
214 enum search_cache_status status;
216 union smb_notify *notify_io;
217 struct smbcli_request *notify_req;
218 uint16_t dir_fnum;
219 char* dir;
221 struct search_cache_key {
222 enum smb_search_level level;
223 enum smb_search_data_level data_level;
224 uint16_t search_attrib;
225 const char *pattern;
226 /* these only for trans2 */
227 uint16_t flags;
228 uint32_t storage_type;
229 } key;
231 struct search_state {
232 struct search_handle *search_handle;
233 void* private;
234 smbcli_search_callback callback;
235 struct search_cache_item *last_item;
236 uint16_t count; /* count how many client receives */
237 uint16_t all_count; /* count how many we receive */
240 struct fs_attribute_info {
241 uint32_t fs_attr;
242 uint32_t max_file_component_length;
243 struct smb_wire_string fs_type;
246 /* this is stored in ntvfs_private */
247 struct proxy_private {
248 struct smbcli_tree *tree;
249 struct smbcli_transport *transport;
250 struct ntvfs_module_context *ntvfs;
251 struct async_info *pending;
252 struct proxy_file *files;
253 struct proxy_file *closed_files;
254 struct fdirmon *dirmons;
255 struct search_cache *search_caches; /* cache's of find-first data */
256 struct search_handle *search_handles; /* cache's of find-first data */
257 bool map_generic;
258 bool map_trans2;
259 bool cache_enabled;
260 int cache_readahead; /* default read-ahead window size */
261 int cache_readaheadblock; /* size of each read-ahead request */
262 ssize_t cache_validatesize; /* chunk size to validate, results in a read this size on remote server */
263 char *remote_server;
264 char *remote_share;
265 struct cache_context *cache;
266 struct fs_attribute_info *fs_attribute_info;
267 int readahead_spare; /* amount of pending non-user generated requests */
268 bool fake_oplock; /* useful for testing, smbclient never asks for oplock */
269 bool fake_valid; /* useful for testing, smbclient never asks for oplock */
270 uint16_t nttrans_fnum; /* we need a handle for non-proxy operations */
271 bool enabled_cache_info;
272 bool enabled_proxy_search;
273 bool enabled_open_clone;
274 bool enabled_extra_protocol;
275 bool enabled_qpathinfo;
278 struct async_info_map;
280 /* a structure used to pass information to an async handler */
281 struct async_info {
282 struct async_info *next, *prev;
283 struct proxy_private *proxy;
284 struct ntvfs_request *req;
285 struct smbcli_request *c_req;
286 struct proxy_file *f;
287 struct async_info_map *chain;
288 void *parms;
291 /* used to chain async callbacks */
292 struct async_info_map {
293 struct async_info_map *next, *prev;
294 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
295 void *parms1;
296 void *parms2;
297 struct async_info *async;
300 struct ntioctl_rpc_unmap_info {
301 void* io;
302 const struct ndr_interface_call *calls;
303 const struct ndr_interface_table *table;
304 uint32_t opnum;
307 /* a structure used to pass information to an async handler */
308 struct async_rpclite_send {
309 const struct ndr_interface_call* call;
310 void* struct_ptr;
313 #define SETUP_PID private->tree->session->pid = req->smbpid
315 #define RPCLITE_SETUP_FILE_HERE(f, h) do { \
316 RPCLITE_SETUP_THIS_FILE_HERE(r->in.fnum, f, h); \
317 } while (0)
319 #define RPCLITE_SETUP_THIS_FILE_HERE(FNUM, f, h) do { \
320 if ((h = ntvfs_find_handle(private->ntvfs, req, FNUM)) && \
321 (f = ntvfs_handle_get_backend_data(h, ntvfs))) { \
322 FNUM = f->fnum; \
323 } else { \
324 r->out.result = NT_STATUS_INVALID_HANDLE; \
325 return NT_STATUS_OK; \
327 } while (0)
329 #define SETUP_FILE_HERE(f) do { \
330 f = ntvfs_handle_get_backend_data(io->generic.in.file.ntvfs, ntvfs); \
331 if (!f) return NT_STATUS_INVALID_HANDLE; \
332 io->generic.in.file.fnum = f->fnum; \
333 } while (0)
335 #define SETUP_FILE do { \
336 struct proxy_file *f; \
337 SETUP_FILE_HERE(f); \
338 } while (0)
340 #define SETUP_PID_AND_FILE do { \
341 SETUP_PID; \
342 SETUP_FILE; \
343 } while (0)
345 /* remove the MAY_ASYNC from a request, useful for testing */
346 #define MAKE_SYNC_REQ(req) do { req->async_states->state &= ~NTVFS_ASYNC_STATE_MAY_ASYNC; } while(0)
348 #define PROXY_SERVER "proxy:server"
349 #define PROXY_USER "proxy:user"
350 #define PROXY_PASSWORD "proxy:password"
351 #define PROXY_DOMAIN "proxy:domain"
352 #define PROXY_SHARE "proxy:share"
353 #define PROXY_USE_MACHINE_ACCT "proxy:use-machine-account"
354 #define PROXY_MAP_GENERIC "proxy:map-generic"
355 #define PROXY_MAP_TRANS2 "proxy:map-trans2"
357 #define PROXY_CACHE_ENABLED "proxy:cache-enabled"
358 #define PROXY_CACHE_ENABLED_DEFAULT false
360 #define PROXY_CACHE_READAHEAD "proxy:cache-readahead"
361 #define PROXY_CACHE_READAHEAD_DEFAULT 32768
362 /* size of each read-ahead request. */
363 #define PROXY_CACHE_READAHEAD_BLOCK "proxy:cache-readaheadblock"
364 /* the read-ahead block should always be less than max negotiated data */
365 #define PROXY_CACHE_READAHEAD_BLOCK_DEFAULT 4096
367 #define PROXY_CACHE_VALIDATE_SIZE "proxy:validate-size"
368 #define PROXY_CACHE_VALIDATE_SIZE_DEFAULT 256 /* 10M */
370 #define PROXY_FAKE_OPLOCK "proxy:fake-oplock"
371 #define PROXY_FAKE_OPLOCK_DEFAULT false
373 #define PROXY_FAKE_VALID "proxy:fake-valid"
374 #define PROXY_FAKE_VALID_DEFAULT false
376 /* how many read-ahead requests can be pending per mid */
377 #define PROXY_REQUEST_LIMIT "proxy:request-limit"
378 #define PROXY_REQUEST_LIMIT_DEFAULT 100
380 #define PROXY_USE_MACHINE_ACCT_DEFAULT false
381 /* These two really should be: true, and possibly not even configurable */
382 #define PROXY_MAP_GENERIC_DEFAULT true
383 #define PROXY_MAP_TRANS2_DEFAULT true
385 /* is the remote server a proxy? */
386 #define PROXY_REMOTE_SERVER(private) \
387 ((private)->tree->session->transport->negotiate.capabilities & CAP_COMPRESSION \
388 && (strcmp("A:",private->tree->device)==0) \
389 && (private->nttrans_fnum!=0) \
390 && (private->enabled_extra_protocol))
392 /* A few forward declarations */
393 static NTSTATUS sync_chain_handler(struct smbcli_request *c_req);
394 static void async_chain_handler(struct smbcli_request *c_req);
395 static void async_read_handler(struct smbcli_request *c_req);
396 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
397 struct ntvfs_request *req, union smb_ioctl *io);
399 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
400 struct smbcli_tree *tree,
401 struct ntvfs_module_context *ntvfs,
402 const struct ndr_interface_table *table,
403 uint32_t opnum, void *r);
404 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
405 union smb_read *io, struct proxy_file *f, struct proxy_Read *r);
406 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
407 union smb_read *io, struct proxy_file *f);
408 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
409 union smb_write *io, struct proxy_file *f);
410 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
411 union smb_write *io, struct proxy_file *f);
412 static NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status);
414 struct smb_wire_string talloc_smb_wire_string_dup(void* mem_ctx, const struct smb_wire_string* string)
416 struct smb_wire_string result;
417 result.private_length=string->private_length;
418 result.s=talloc_strndup(mem_ctx, string->s, string->private_length);
419 DEBUG(5,("%s: %s\n",__FUNCTION__, string->s));
420 return result;
423 #define sws_dup(mem_ctx, dest, src) (\
424 dest=talloc_smb_wire_string_dup(NULL, &(src)), \
425 (dest.s==NULL && src.s!=NULL))
427 /* These needs replacing with something more canonical perhaps */
428 static char* talloc_dirname(void* mem_ctx, const char* path) {
429 const char* dir;
431 if ((dir=strrchr(path,'\\'))) {
432 return talloc_strndup(mem_ctx, path, (dir - path));
433 } else {
434 return talloc_strdup(mem_ctx,"");
439 a handler for oplock break events from the server - these need to be passed
440 along to the client
442 static bool oplock_handler(struct smbcli_transport *transport, uint16_t tid, uint16_t fnum, uint8_t level, void *p_private)
444 struct proxy_private *private = p_private;
445 NTSTATUS status;
446 struct ntvfs_handle *h = NULL;
447 struct proxy_file *f;
448 bool result=true;
450 /* because we clone handles, there may be more than one match */
451 for (f=private->files; f; f=f->next) {
452 if (f->fnum != fnum) continue;
453 h = f->h;
455 if (level==OPLOCK_BREAK_TO_LEVEL_II) {
456 f->oplock=LEVEL_II_OPLOCK_RETURN;
457 } else {
458 /* If we don't have an oplock, then we can't rely on the cache */
459 cache_handle_stale(f);
460 f->oplock=NO_OPLOCK_RETURN;
463 DEBUG(5,("vfs_proxy: sending oplock break level %d for fnum %d\n", level, fnum));
464 status = ntvfs_send_oplock_break(private->ntvfs, h, level);
465 if (!NT_STATUS_IS_OK(status)) result=false;
467 if (!h) {
468 DEBUG(5,("vfs_proxy: ignoring oplock break level %d for fnum %d\n", level, fnum));
470 return result;
474 get file handle from clients fnum, (from ntvfs/ipc/vfs_ipc.c at metze suggestion)
476 static struct ntvfs_handle *ntvfs_find_handle(struct ntvfs_module_context *ntvfs,
477 struct ntvfs_request *req,
478 uint16_t fnum)
480 DATA_BLOB key;
481 uint16_t _fnum;
484 * the fnum is already in host byteorder
485 * but ntvfs_handle_search_by_wire_key() expects
486 * network byteorder
488 SSVAL(&_fnum, 0, fnum);
489 key = data_blob_const(&_fnum, 2);
491 return ntvfs_handle_search_by_wire_key(ntvfs, req, &key);
495 connect to a share - used when a tree_connect operation comes in.
497 static NTSTATUS proxy_connect(struct ntvfs_module_context *ntvfs,
498 struct ntvfs_request *req, const char *sharename)
500 NTSTATUS status;
501 struct proxy_private *private;
502 const char *host, *user, *pass, *domain, *remote_share;
503 struct smb_composite_connect io;
504 struct composite_context *creq;
505 struct share_config *scfg = ntvfs->ctx->config;
506 int nttrans_fnum;
508 struct cli_credentials *credentials;
509 bool machine_account;
511 /* Here we need to determine which server to connect to.
512 * For now we use parametric options, type proxy.
513 * Later we will use security=server and auth_server.c.
515 host = share_string_option(scfg, PROXY_SERVER, NULL);
516 user = share_string_option(scfg, PROXY_USER, NULL);
517 pass = share_string_option(scfg, PROXY_PASSWORD, NULL);
518 domain = share_string_option(scfg, PROXY_DOMAIN, NULL);
519 remote_share = share_string_option(scfg, PROXY_SHARE, NULL);
520 if (!remote_share) {
521 remote_share = sharename;
524 machine_account = share_bool_option(scfg, PROXY_USE_MACHINE_ACCT, PROXY_USE_MACHINE_ACCT_DEFAULT);
526 private = talloc_zero(ntvfs, struct proxy_private);
527 if (!private) {
528 return NT_STATUS_NO_MEMORY;
531 ntvfs->private_data = private;
533 if (!host) {
534 DEBUG(1,("PROXY backend: You must supply server\n"));
535 return NT_STATUS_INVALID_PARAMETER;
538 if (user && pass) {
539 DEBUG(5, ("PROXY backend: Using specified password\n"));
540 credentials = cli_credentials_init(private);
541 if (!credentials) {
542 return NT_STATUS_NO_MEMORY;
544 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
545 cli_credentials_set_username(credentials, user, CRED_SPECIFIED);
546 if (domain) {
547 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
549 cli_credentials_set_password(credentials, pass, CRED_SPECIFIED);
550 } else if (machine_account) {
551 DEBUG(5, ("PROXY backend: Using machine account\n"));
552 credentials = cli_credentials_init(private);
553 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
554 if (domain) {
555 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
557 status = cli_credentials_set_machine_account(credentials, ntvfs->ctx->lp_ctx);
558 if (!NT_STATUS_IS_OK(status)) {
559 return status;
561 } else if (req->session_info->credentials) {
562 DEBUG(5, ("PROXY backend: Using delegated credentials\n"));
563 credentials = req->session_info->credentials;
564 } else {
565 DEBUG(1,("PROXY backend: NO delegated credentials found: You must supply server, user and password or the client must supply delegated credentials\n"));
566 return NT_STATUS_INVALID_PARAMETER;
569 /* connect to the server, using the smbd event context */
570 io.in.dest_host = host;
571 io.in.dest_ports = lp_smb_ports(ntvfs->ctx->lp_ctx);
572 io.in.socket_options = lp_socket_options(ntvfs->ctx->lp_ctx);
573 io.in.called_name = host;
574 io.in.credentials = credentials;
575 io.in.fallback_to_anonymous = false;
576 io.in.workgroup = lp_workgroup(ntvfs->ctx->lp_ctx);
577 io.in.service = remote_share;
578 io.in.service_type = "?????";
579 io.in.iconv_convenience = lp_iconv_convenience(ntvfs->ctx->lp_ctx);
580 io.in.gensec_settings = lp_gensec_settings(private, ntvfs->ctx->lp_ctx);
581 lp_smbcli_options(ntvfs->ctx->lp_ctx, &io.in.options);
582 lp_smbcli_session_options(ntvfs->ctx->lp_ctx, &io.in.session_options);
584 creq = smb_composite_connect_send(&io, private,
585 lp_resolve_context(ntvfs->ctx->lp_ctx),
586 ntvfs->ctx->event_ctx);
587 status = smb_composite_connect_recv(creq, private);
588 NT_STATUS_NOT_OK_RETURN(status);
590 private->tree = io.out.tree;
592 private->transport = private->tree->session->transport;
593 SETUP_PID;
594 private->ntvfs = ntvfs;
596 ntvfs->ctx->fs_type = talloc_strdup(ntvfs->ctx, io.out.tree->fs_type);
597 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->fs_type);
598 ntvfs->ctx->dev_type = talloc_strdup(ntvfs->ctx, io.out.tree->device);
599 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->dev_type);
601 /* we need to receive oplock break requests from the server */
602 smbcli_oplock_handler(private->transport, oplock_handler, private);
604 private->map_generic = share_bool_option(scfg, PROXY_MAP_GENERIC, PROXY_MAP_GENERIC_DEFAULT);
606 private->map_trans2 = share_bool_option(scfg, PROXY_MAP_TRANS2, PROXY_MAP_TRANS2_DEFAULT);
608 private->cache_validatesize = 1024 * (long long) share_int_option(scfg, PROXY_CACHE_VALIDATE_SIZE, PROXY_CACHE_VALIDATE_SIZE_DEFAULT);
610 if (strcmp("A:",private->tree->device)==0) {
611 private->cache_enabled = share_bool_option(scfg, PROXY_CACHE_ENABLED, PROXY_CACHE_ENABLED_DEFAULT);
612 private->cache_readahead = share_int_option(scfg, PROXY_CACHE_READAHEAD, PROXY_CACHE_READAHEAD_DEFAULT);
613 private->cache_readaheadblock = share_int_option(scfg, PROXY_CACHE_READAHEAD_BLOCK,
614 MIN(private->cache_readahead,PROXY_CACHE_READAHEAD_BLOCK_DEFAULT));
615 private->fake_oplock = share_bool_option(scfg, PROXY_FAKE_OPLOCK, PROXY_FAKE_OPLOCK_DEFAULT);
616 private->fake_valid = share_bool_option(scfg, PROXY_FAKE_VALID, PROXY_FAKE_VALID_DEFAULT);
617 private->readahead_spare = share_int_option(scfg, PROXY_REQUEST_LIMIT, PROXY_REQUEST_LIMIT_DEFAULT);
618 private->cache = new_cache_context(private, lp_proxy_cache_root(ntvfs->ctx->lp_ctx), host, remote_share);
619 private->enabled_cache_info=true;
620 private->enabled_proxy_search=true;
621 private->enabled_open_clone=true;
622 private->enabled_extra_protocol=true;
623 private->enabled_qpathinfo=true;
625 DEBUG(0,("proxy tree connect caching for: %s (%s : %s) %s read-ahead: %d\n",
626 remote_share, private->tree->device,private->tree->fs_type,
627 (private->cache_enabled)?"enabled":"disabled",
628 private->cache_readahead));
629 } else {
630 private->cache_enabled = false;
631 DEBUG(0,("No caching or read-ahead for: %s (%s : %s)\n",
632 remote_share, private->tree->device,private->tree->fs_type));
635 private->remote_server = strlower_talloc(private, host);
636 private->remote_share = strlower_talloc(private, remote_share);
638 /* some proxy operations will not be performed on files, so open a handle
639 now that we can use for such things. We won't bother to close it on
640 shutdown, as the remote server ought to be able to close it for us
641 and we might be shutting down because the remote server went away and
642 so we don't want to delay further */
643 nttrans_fnum=smbcli_nt_create_full(private->tree, "\\",
645 SEC_FILE_READ_DATA,
646 FILE_ATTRIBUTE_NORMAL,
647 NTCREATEX_SHARE_ACCESS_MASK,
648 NTCREATEX_DISP_OPEN,
649 NTCREATEX_OPTIONS_DIRECTORY,
650 NTCREATEX_IMPERSONATION_IMPERSONATION);
651 if (nttrans_fnum < 0) {
652 DEBUG(5,("Could not open handle for ntioctl %d\n",private->nttrans_fnum));
653 //return NT_STATUS_UNSUCCESSFUL;
655 private->nttrans_fnum=nttrans_fnum;
656 DEBUG(5,("Got nttrans handle %d\n",private->nttrans_fnum));
658 return NT_STATUS_OK;
662 disconnect from a share
664 static NTSTATUS proxy_disconnect(struct ntvfs_module_context *ntvfs)
666 struct proxy_private *private = ntvfs->private_data;
667 struct async_info *a, *an;
668 struct search_cache *s;
670 /* first clean up caches because they have a pending request that
671 they will try and clean up later and fail during talloc_free */
672 for (s=private->search_caches; s; s=s->next) {
673 if (s->notify_req) {
674 talloc_unlink(s, s->notify_req);
675 s->notify_req=NULL;
677 s->dir_fnum=65535;
680 /* first cleanup pending requests */
681 for (a=private->pending; a; a = an) {
682 an = a->next;
683 smbcli_request_destroy(a->c_req);
684 talloc_free(a);
687 talloc_free(private);
688 ntvfs->private_data = NULL;
690 return NT_STATUS_OK;
694 destroy an async info structure
696 static int async_info_destructor(struct async_info *async)
698 DLIST_REMOVE(async->proxy->pending, async);
699 return 0;
703 a handler for simple async replies
704 this handler can only be used for functions that don't return any
705 parameters (those that just return a status code)
707 static void async_simple(struct smbcli_request *c_req)
709 struct async_info *async = c_req->async.private;
710 struct ntvfs_request *req = async->req;
711 req->async_states->status = smbcli_request_simple_recv(c_req);
712 talloc_free(async);
713 req->async_states->send_fn(req);
716 /* hopefully this will optimize away */
717 #define TYPE_CHECK(type,check) do { \
718 type=check; \
719 t=t; \
720 } while (0)
722 /* save some typing for the simple functions */
723 #define ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, file, achain, error) do { \
724 if (!c_req) return (error); \
725 ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_fn, file, achain); \
726 if (! c_req->async.private) return (error); \
727 } while(0)
729 #define ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_fn, file, achain) do { \
730 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
732 struct async_info *async; \
733 async = talloc(req, struct async_info); \
734 if (async) { \
735 async->parms = io; \
736 async->req = req; \
737 async->f = file; \
738 async->proxy = private; \
739 async->c_req = c_req; \
740 async->chain = achain; \
741 DLIST_ADD(private->pending, async); \
742 c_req->async.private = async; \
743 talloc_set_destructor(async, async_info_destructor); \
746 c_req->async.fn = async_fn; \
747 } while (0)
749 #define ASYNC_RECV_TAIL_F(io, async_fn, file) do { \
750 if (!c_req) return NT_STATUS_UNSUCCESSFUL; \
751 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
753 struct async_info *async; \
754 async = talloc(req, struct async_info); \
755 if (!async) return NT_STATUS_NO_MEMORY; \
756 async->parms = io; \
757 async->req = req; \
758 async->f = file; \
759 async->proxy = private; \
760 async->c_req = c_req; \
761 DLIST_ADD(private->pending, async); \
762 c_req->async.private = async; \
763 talloc_set_destructor(async, async_info_destructor); \
765 c_req->async.fn = async_fn; \
766 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
767 return NT_STATUS_OK; \
768 } while (0)
770 #define ASYNC_RECV_TAIL(io, async_fn) ASYNC_RECV_TAIL_F(io, async_fn, NULL)
772 #define SIMPLE_ASYNC_TAIL ASYNC_RECV_TAIL(NULL, async_simple)
774 /* managers for chained async-callback.
775 The model of async handlers has changed.
776 backend async functions should be of the form:
777 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
778 And if async->c_req is NULL then an earlier chain has already rec'd the
779 request.
780 ADD_ASYNC_RECV_TAIL is used to add chained handlers.
781 The chained handler manager async_chain_handler is installed the usual way
782 and uses the io pointer to point to the first async_map record
783 static void async_chain_handler(struct smbcli_request *c_req).
784 It is safe to call ADD_ASYNC_RECV_TAIL before the chain manager is installed
785 and often desirable.
787 /* async_chain_handler has an async_info struct so that it can be safely inserted
788 into pending, but the io struct will point to (struct async_info_map *)
789 chained async_info_map will be in c_req->async.private */
790 #define ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_fn) do { \
791 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
792 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
793 } while(0)
795 #define ASYNC_RECV_TAIL_HANDLER(io, async_fn) do { \
796 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
797 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
798 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
799 return NT_STATUS_OK; \
800 } while(0)
803 DEBUG(0,("ADD_ASYNC_RECV_TAIL %s %s:%d\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%s\n", __FUNCTION__,__FILE__,__LINE__, \
804 creq, creq?talloc_get_name(creq):NULL, creq?talloc_get_name(creq):NULL,\
805 io1, io1?talloc_get_name(io1):NULL, io1?talloc_get_name(io1):NULL, \
806 io2, io2?talloc_get_name(io2):NULL, io2?talloc_get_name(io2):NULL, \
807 file, file?"file":"null", file?"file":"null", #async_fn)); \
809 #define ADD_ASYNC_RECV_TAIL(creq, io1, io2, file, async_fn, error) do { \
810 if (! creq) { \
811 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no creq\n",__FUNCTION__)); \
812 return (error); \
813 } else { \
814 struct async_info_map *async_map=talloc(NULL, struct async_info_map); \
815 if (! async_map) { \
816 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no async_map\n",__FUNCTION__)); \
817 return (error); \
819 async_map->async=talloc(async_map, struct async_info); \
820 if (! async_map->async) { \
821 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no async_map->async\n",__FUNCTION__)); \
822 return (error); \
824 async_map->parms1=io1; \
825 async_map->parms2=io2; \
826 async_map->fn=async_fn; \
827 async_map->async->parms = io1; \
828 async_map->async->req = req; \
829 async_map->async->f = file; \
830 async_map->async->proxy = private; \
831 async_map->async->c_req = creq; \
832 /* If async_chain_handler is installed, get the list from param */ \
833 if (creq->async.fn == async_chain_handler || creq->async.fn == async_read_handler) { \
834 struct async_info *i=creq->async.private; \
835 DLIST_ADD_END(i->chain, async_map, struct async_info_map *); \
836 } else if (creq->async.fn) { \
837 /* incompatible handler installed */ \
838 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL incompatible handler already installed\n",__FUNCTION__)); \
839 return (error); \
840 } else { \
841 DLIST_ADD_END(creq->async.private, async_map, struct async_info_map *); \
844 } while(0)
846 static void async_dirmon_notify(struct smbcli_request *c_req)
848 struct async_info *async = c_req->async.private;
849 struct ntvfs_request *req = async->req;
850 struct fdirmon *dirmon;
851 struct fdirmon_callback *callback;
852 struct proxy_private *proxy = async->proxy;
853 int f;
855 NTSTATUS status;
857 dirmon = talloc_get_type_abort((void*)async->f, struct fdirmon);
858 DEBUG(5,("%s: dirmon %s invalidated\n",__LOCATION__, dirmon->dir));
860 status = smb_raw_changenotify_recv(c_req, req, async->parms);
861 DEBUG(5,("%s: update status %s\n",__LOCATION__, get_friendly_nt_error_msg (status)));
863 if (dirmon->notify_req) {
864 talloc_unlink(dirmon, dirmon->notify_req);
865 dirmon->notify_req=NULL;
867 /* Mark closed cached files as invalid if they changed, as they will be
868 assuming cache is valid if a dirmon exists and hasn't invalidated it */
869 for(f=0; f<dirmon->notify_io->nttrans.out.num_changes; f++) {
870 DEBUG(1,("DIRMON: %s changed\n",dirmon->notify_io->nttrans.out.changes[f].name.s));
872 DLIST_FOR_EACH(dirmon->callbacks, callback, callback->fn(callback->data, dirmon));
873 /* So nothing can find it even if there are still in-use references */
874 DLIST_REMOVE(proxy->dirmons, dirmon);
875 if (dirmon->dir_fnum!=65535) {
876 struct smbcli_request *req;
877 union smb_close close_parms;
878 close_parms.close.level = RAW_CLOSE_CLOSE;
879 close_parms.close.in.file.fnum = dirmon->dir_fnum;
880 close_parms.close.in.write_time = 0;
882 /* destructor may be called from a notify response and won't be able
883 to wait on this close response, not that we care anyway */
884 req=smb_raw_close_send(proxy->tree, &close_parms);
886 DEBUG(5,("%s: Close dir_fnum: %d %p\n",__LOCATION__, dirmon->dir_fnum, req));
887 dirmon->dir_fnum=65535;
889 talloc_free(async);
890 talloc_free(dirmon);
893 struct fdirmon* get_fdirmon(struct proxy_private *proxy, const char* path, bool dir_only) {
894 const char *file;
895 int pathlen;
897 if ((file=strrchr(path,'\\'))) {
898 if (dir_only) {
899 pathlen = file - path;
900 file++;
901 } else {
902 pathlen=strlen(path);
904 } else {
905 file = path;
906 pathlen = 0;
909 struct fdirmon *dirmon;
910 /* see if we have a matching dirmon */
911 DLIST_FIND(proxy->dirmons, dirmon, (strlen(dirmon->dir) == pathlen && fstrncmp(path, dirmon->dir, pathlen)==0));
912 if (! dirmon) {
913 int saved_timeout;
915 DEBUG(5,("%s: allocating new dirmon for %s\n",__FUNCTION__,path));
916 dirmon=talloc_zero(proxy, struct fdirmon);
917 if (! dirmon) {
918 goto error;
920 if (! (dirmon->dir=talloc_strndup(dirmon, path, pathlen))) {
921 goto error;
923 if (! (dirmon->notify_io=talloc_zero(dirmon, union smb_notify))) {
924 goto error;
927 dirmon->dir_fnum=smbcli_nt_create_full(proxy->tree, dirmon->dir,
929 SEC_FILE_READ_DATA,
930 FILE_ATTRIBUTE_NORMAL,
931 NTCREATEX_SHARE_ACCESS_MASK,
932 NTCREATEX_DISP_OPEN,
933 NTCREATEX_OPTIONS_DIRECTORY,
934 NTCREATEX_IMPERSONATION_IMPERSONATION);
936 if (dirmon->dir_fnum==65535) {
937 DEBUG(5,("%s: smbcli_nt_create_full %s failed\n",__FUNCTION__, dirmon->dir));
938 goto error;
941 saved_timeout = proxy->transport->options.request_timeout;
942 /* request notify changes on cache before we start to fill it */
943 dirmon->notify_io->nttrans.level=RAW_NOTIFY_NTTRANS;
944 dirmon->notify_io->nttrans.in.completion_filter=FILE_NOTIFY_CHANGE_ANY;
945 dirmon->notify_io->nttrans.in.file.fnum=dirmon->dir_fnum;
946 dirmon->notify_io->nttrans.in.recursive=false;
947 dirmon->notify_io->nttrans.in.buffer_size=10240;
948 proxy->transport->options.request_timeout = 0;
949 dirmon->notify_req=smb_raw_changenotify_send(proxy->tree, dirmon->notify_io);
950 /* Make the request hang around so we can tell if it needs cancelling */
951 proxy->transport->options.request_timeout = saved_timeout;
953 if (! dirmon->notify_req) {
954 goto error;
955 }else {
956 struct ntvfs_request *req=NULL;
957 struct smbcli_request *c_req=dirmon->notify_req;
958 union smb_notify *io=dirmon->notify_io;
959 struct proxy_private *private=proxy;
961 talloc_reference(dirmon, dirmon->notify_req);
962 ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_dirmon_notify,
963 (void*) dirmon, c_req->async.private);
964 DLIST_ADD(private->dirmons, dirmon);
968 return dirmon;
969 error:
970 DEBUG(3,("%s: failed to allocate dirmon\n",__FUNCTION__));
971 talloc_free(dirmon);
972 return NULL;
975 bool dirmon_add_callback(struct fdirmon *dirmon, fdirmon_callback_fn *fn, void* data) {
976 struct fdirmon_callback *callback=talloc_zero(dirmon, struct fdirmon_callback);
977 if (! callback) {
978 return false;
980 callback->data=data;
981 callback->fn=fn;
982 DLIST_ADD(dirmon->callbacks, callback);
983 return true;
986 /* try and unify cache open function interface with this macro */
987 #define cache_open(cache_context, f, io, oplock, readahead_window) \
988 (io->generic.level == RAW_OPEN_NTCREATEX && \
989 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID)\
990 ?(cache_fileid_open(cache_context, f, (const uint64_t*)(io->generic.in.fname), oplock, readahead_window))\
991 :(cache_filename_open(cache_context, f, SMB_OPEN_IN_FILE(io), oplock, readahead_window))
993 struct search_cache* find_partial_search_cache(struct search_cache* search_cache, const struct search_cache_key* search_cache_key) {
994 struct search_cache* result;
995 DLIST_FIND(search_cache, result,
996 (result->key.level == search_cache_key->level) &&
997 (result->key.data_level == search_cache_key->data_level) &&
998 (result->key.search_attrib == search_cache_key->search_attrib) &&
999 (result->key.flags == search_cache_key->flags) &&
1000 (result->key.storage_type == search_cache_key->storage_type) &&
1001 (fstrcmp(result->key.pattern, search_cache_key->pattern) == 0));
1002 DEBUG(5,("%s: found %p\n",__LOCATION__,result));
1003 return result;
1005 struct search_cache* find_search_cache(struct search_cache* search_cache, const struct search_cache_key* search_cache_key) {
1006 struct search_cache* result = find_partial_search_cache(search_cache, search_cache_key);
1007 if (result && result->status == SEARCH_CACHE_COMPLETE) {
1008 DEBUG(5,("%s: found complete %p\n",__LOCATION__,result));
1009 return result;
1011 DEBUG(5,("%s: found INCOMPLETE %p\n",__LOCATION__,result));
1012 return NULL;
1015 uint16_t smbsrv_fnum(struct ntvfs_handle *h) {
1016 uint16_t fnum;
1017 smbsrv_push_fnum((uint8_t *)&fnum, 0, h);
1018 return SVAL(&fnum, 0);
1021 static void async_search_cache_notify(struct smbcli_request *c_req)
1023 struct async_info *async = c_req->async.private;
1024 struct ntvfs_request *req = async->req;
1025 struct search_cache *search_cache;
1026 NTSTATUS status;
1028 DEBUG(5,("%s: search cache %p invalidated\n",__LOCATION__, (void*)async->f));
1030 search_cache = talloc_get_type_abort((void*)async->f, struct search_cache);
1032 status = smb_raw_changenotify_recv(c_req, req, async->parms);
1034 DEBUG(5,("%s: update status is %s\n",__LOCATION__, get_friendly_nt_error_msg (status)));
1036 search_cache->notify_req=NULL;
1037 /* dispose of the search_cache */
1038 search_cache->status=SEARCH_CACHE_DEAD;
1039 /* So nothing can find it even if there are still in-use references */
1040 DLIST_REMOVE(search_cache->proxy->search_caches, search_cache);
1041 /* free it */
1042 //talloc_steal(async, search_cache);
1043 //talloc_unlink(async->proxy, search_cache);
1044 if (search_cache->dir_fnum!=65535) {
1045 struct smbcli_request *req;
1046 union smb_close close_parms;
1047 close_parms.close.level = RAW_CLOSE_CLOSE;
1048 close_parms.close.in.file.fnum = search_cache->dir_fnum;
1049 close_parms.close.in.write_time = 0;
1051 /* destructor may be called from a notify response and won't be able
1052 to wait on this close response, not that we care anyway */
1053 req=smb_raw_close_send(search_cache->proxy->tree, &close_parms);
1055 DEBUG(5,("%s: Close dir_fnum: %d %p\n",__LOCATION__, search_cache->dir_fnum, req));
1056 search_cache->dir_fnum=65535;
1058 talloc_free(async);
1062 destroy a search handle
1064 static int search_handle_destructor(struct search_handle *s)
1066 DLIST_REMOVE(s->proxy->search_handles, s);
1067 DEBUG(5,("%s: handle destructor %p\n",__LOCATION__,s));
1068 return 0;
1070 static int search_cache_destructor(struct search_cache *s)
1072 NTSTATUS status;
1074 DLIST_REMOVE(s->proxy->search_caches, s);
1075 DEBUG(5,("%s: cache destructor %p\n",__LOCATION__,s));
1076 if (s->notify_req) {
1077 status=smb_raw_ntcancel(s->notify_req);
1078 s->notify_req=NULL;
1079 DEBUG(5,("%s: Cancel notification %s\n",__LOCATION__,get_friendly_nt_error_msg (status)));
1081 if (s->dir_fnum!=65535) {
1082 struct smbcli_request *req;
1083 union smb_close close_parms;
1084 close_parms.close.level = RAW_CLOSE_CLOSE;
1085 close_parms.close.in.file.fnum = s->dir_fnum;
1086 close_parms.close.in.write_time = 0;
1088 /* destructor may be called from a notify response and won't be able
1089 to wait on this close response, not that we care anyway */
1090 req=smb_raw_close_send(s->proxy->tree, &close_parms);
1092 DEBUG(5,("%s: Close dir_fnum: %d %p\n",__LOCATION__, s->dir_fnum, req));
1093 s->dir_fnum=65535;
1095 return 0;
1098 struct search_cache* new_search_cache(struct proxy_private *private, struct search_cache_key* key) {
1099 /* need to opendir the folder being searched so we can get a notification */
1100 uint16_t dir_fnum=65535;
1101 struct search_cache *search_cache=NULL;
1103 search_cache=talloc_zero(private, struct search_cache);
1104 DEBUG(5,("%s: Start new cache %p for %s\n",__LOCATION__, search_cache, key->pattern));
1105 if (! search_cache) {
1106 return NULL;
1108 search_cache->proxy=private;
1109 if (! (search_cache->dir=talloc_dirname(search_cache, key->pattern))) {
1110 goto error;
1112 if (! (search_cache->notify_io=talloc_zero(search_cache, union smb_notify))) {
1113 goto error;
1115 search_cache->key=*key;
1116 /* make private copy of pattern now that we need it AND have something to own it */
1117 if (! (search_cache->key.pattern=talloc_strdup(search_cache, search_cache->key.pattern))) {
1118 goto error;
1120 dir_fnum=smbcli_nt_create_full(private->tree, search_cache->dir,
1122 SEC_FILE_READ_DATA,
1123 FILE_ATTRIBUTE_NORMAL,
1124 NTCREATEX_SHARE_ACCESS_MASK,
1125 NTCREATEX_DISP_OPEN,
1126 NTCREATEX_OPTIONS_DIRECTORY,
1127 NTCREATEX_IMPERSONATION_IMPERSONATION);
1128 DEBUG(5,("%s: %d=opendir on %s\n",__LOCATION__,dir_fnum, search_cache->dir));
1129 if (dir_fnum==65535) {
1130 goto error;
1132 /* The destructor will close the handle */
1133 talloc_set_destructor(search_cache, search_cache_destructor);
1134 search_cache->dir_fnum=dir_fnum;
1135 DEBUG(5,("%s: Start new cache %p, dir_fnum %d\n",__LOCATION__, search_cache, dir_fnum));
1138 int saved_timeout = private->transport->options.request_timeout;
1140 /* request notify changes on cache before we start to fill it */
1141 search_cache->notify_io->nttrans.level=RAW_NOTIFY_NTTRANS;
1142 search_cache->notify_io->nttrans.in.completion_filter=FILE_NOTIFY_CHANGE_ANY;
1143 search_cache->notify_io->nttrans.in.file.fnum=dir_fnum;
1144 search_cache->notify_io->nttrans.in.recursive=false;
1145 search_cache->notify_io->nttrans.in.buffer_size=1024;
1146 private->transport->options.request_timeout = 0;
1147 search_cache->notify_req=smb_raw_changenotify_send(private->tree, search_cache->notify_io);
1148 /* Make the request hang around so we can tell if it needs cancelling */
1149 private->transport->options.request_timeout = saved_timeout;
1151 if (! search_cache->notify_req) {
1152 goto error;
1153 } else {
1154 struct ntvfs_request *req=NULL;
1155 struct smbcli_request *c_req=search_cache->notify_req;
1156 union smb_notify *io=search_cache->notify_io;
1158 talloc_reference(search_cache, search_cache->notify_req);
1159 ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_search_cache_notify,
1160 (void*) search_cache, c_req->async.private);
1161 DLIST_ADD_END(private->search_caches, search_cache, struct search_cache*);
1164 return search_cache;
1165 error:
1166 talloc_free(search_cache);
1167 return NULL;
1171 delete a file - the dirtype specifies the file types to include in the search.
1172 The name can contain PROXY wildcards, but rarely does (except with OS/2 clients)
1174 static NTSTATUS proxy_unlink(struct ntvfs_module_context *ntvfs,
1175 struct ntvfs_request *req, union smb_unlink *unl)
1177 struct proxy_private *private = ntvfs->private_data;
1178 struct smbcli_request *c_req;
1180 SETUP_PID;
1182 /* see if the front end will allow us to perform this
1183 function asynchronously. */
1184 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1185 return smb_raw_unlink(private->tree, unl);
1188 c_req = smb_raw_unlink_send(private->tree, unl);
1190 SIMPLE_ASYNC_TAIL;
1194 a handler for async ioctl replies
1196 static void async_ioctl(struct smbcli_request *c_req)
1198 struct async_info *async = c_req->async.private;
1199 struct ntvfs_request *req = async->req;
1200 req->async_states->status = smb_raw_ioctl_recv(c_req, req, async->parms);
1201 talloc_free(async);
1202 req->async_states->send_fn(req);
1206 ioctl interface
1208 static NTSTATUS proxy_ioctl(struct ntvfs_module_context *ntvfs,
1209 struct ntvfs_request *req, union smb_ioctl *io)
1211 struct proxy_private *private = ntvfs->private_data;
1212 struct smbcli_request *c_req;
1214 if (io->ntioctl.level == RAW_IOCTL_NTIOCTL
1215 && io->ntioctl.in.function == FSCTL_UFOPROXY_RPCLITE) {
1216 return proxy_rpclite(ntvfs, req, io);
1219 SETUP_PID_AND_FILE;
1221 /* see if the front end will allow us to perform this
1222 function asynchronously. */
1223 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1224 return smb_raw_ioctl(private->tree, req, io);
1227 c_req = smb_raw_ioctl_send(private->tree, io);
1229 ASYNC_RECV_TAIL(io, async_ioctl);
1233 check if a directory exists
1235 static NTSTATUS proxy_chkpath(struct ntvfs_module_context *ntvfs,
1236 struct ntvfs_request *req, union smb_chkpath *cp)
1238 struct proxy_private *private = ntvfs->private_data;
1239 struct smbcli_request *c_req;
1241 SETUP_PID;
1243 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1244 return smb_raw_chkpath(private->tree, cp);
1247 c_req = smb_raw_chkpath_send(private->tree, cp);
1249 SIMPLE_ASYNC_TAIL;
1252 static bool find_search_cache_item(const char* path,
1253 struct search_cache **search_cache,
1254 struct search_cache_item **item) {
1255 struct search_cache *s=*search_cache;
1256 struct search_cache_item *i=*item;
1257 const char* file;
1258 int dir_len;
1260 /* see if we can satisfy from a directory cache */
1261 DEBUG(5,("%s: Looking for pathinfo: '%s'\n",__LOCATION__,path));
1262 if ((file=strrchr(path,'\\'))) {
1263 dir_len = file - path;
1264 /* point past the \ */
1265 file++;
1266 } else {
1267 file = path;
1268 dir_len = 0;
1270 /* convert empty path to . so we can find it in the cache */
1271 if (! *file) {
1272 file=".";
1274 DEBUG(5,("%s: Path='%s' File='%s'\n",__LOCATION__,path, file));
1276 /* Note we don't care if the cache is partial, as long as it has a hit */
1277 while(s) {
1278 /* One day we may support all directory levels */
1279 DLIST_FIND(s, s, (s->key.data_level == RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO &&
1280 strlen(s->dir)==dir_len &&
1281 fstrncmp(s->dir, path, dir_len)==0));
1282 if (! s) {
1283 break;
1285 DEBUG(5,("%s: found cache %p\n",__LOCATION__,s));
1286 /* search s for io->generic.in.file.path */
1287 DLIST_FIND(s->items, i, (i->data_level == RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO &&
1288 ((i->file->both_directory_info.name.s &&
1289 fstrcmp(i->file->both_directory_info.name.s, file) ==0) ||
1290 (i->file->both_directory_info.short_name.s &&
1291 fstrcmp(i->file->both_directory_info.short_name.s, file)==0)
1292 )));
1293 DEBUG(5,("%s: found cache %p item %p\n",__LOCATION__,s, i));
1294 if (i) {
1295 *item=i;
1296 *search_cache=s;
1297 return true;
1299 s=s->next;
1300 DEBUG(5,("%s: continue search at %p\n",__LOCATION__,s));
1302 *item=i;
1303 *search_cache=s;
1304 return false;
1307 static void proxy_set_cache_info(struct file_metadata *metadata, struct proxy_GetInfo *r) {
1308 /* only set this if it was responded... I think they all are responded... */
1309 metadata->info_data.status_RAW_FILEINFO_BASIC_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_BASIC_INFORMATION;
1310 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_BASIC_INFORMATION) /*||
1311 /*NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALL_INFO)*/) {
1312 metadata->info_data.create_time=r->out.info_data[0].create_time;
1313 metadata->info_data.access_time =r->out.info_data[0].access_time;
1314 metadata->info_data.write_time=r->out.info_data[0].write_time;
1315 metadata->info_data.change_time=r->out.info_data[0].change_time;
1316 metadata->info_data.attrib=r->out.info_data[0].attrib;
1317 metadata->valid|=valid_RAW_FILEINFO_BASIC_INFORMATION;
1319 metadata->info_data.status_RAW_FILEINFO_ALL_INFO=r->out.info_data[0].status_RAW_FILEINFO_ALL_INFO;
1320 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALL_INFO)) {
1321 metadata->info_data.ea_size=r->out.info_data[0].ea_size;
1322 metadata->info_data.alloc_size=r->out.info_data[0].alloc_size;
1323 metadata->info_data.size=r->out.info_data[0].size;
1324 metadata->info_data.nlink=r->out.info_data[0].nlink;
1325 /* Are we duping this right? Would talloc_reference be ok? */
1326 //f->metadata->info_data.fname=
1327 metadata->info_data.fname.s=talloc_memdup(metadata, r->out.info_data[0].fname.s, r->out.info_data[0].fname.count);
1328 metadata->info_data.fname.count=r->out.info_data[0].fname.count;
1329 metadata->info_data.delete_pending=r->out.info_data[0].delete_pending;
1330 metadata->info_data.directory=r->out.info_data[0].directory;
1331 metadata->valid|=valid_RAW_FILEINFO_ALL_INFO | valid_RAW_FILEINFO_STANDARD_INFO;;
1333 metadata->info_data.status_RAW_FILEINFO_COMPRESSION_INFO=r->out.info_data[0].status_RAW_FILEINFO_COMPRESSION_INFO;
1334 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_COMPRESSION_INFO)) {
1335 metadata->info_data.compressed_size=r->out.info_data[0].compressed_size;
1336 metadata->info_data.format=r->out.info_data[0].format;
1337 metadata->info_data.unit_shift=r->out.info_data[0].unit_shift;
1338 metadata->info_data.chunk_shift=r->out.info_data[0].chunk_shift;
1339 metadata->info_data.cluster_shift=r->out.info_data[0].cluster_shift;
1340 metadata->valid|=valid_RAW_FILEINFO_COMPRESSION_INFO;
1342 metadata->info_data.status_RAW_FILEINFO_INTERNAL_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_INTERNAL_INFORMATION;
1343 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_INTERNAL_INFORMATION)) {
1344 metadata->info_data.file_id=r->out.info_data[0].file_id;
1345 metadata->valid|=valid_RAW_FILEINFO_INTERNAL_INFORMATION;
1347 metadata->info_data.status_RAW_FILEINFO_ACCESS_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_ACCESS_INFORMATION;
1348 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ACCESS_INFORMATION)) {
1349 metadata->info_data.access_flags=r->out.info_data[0].access_flags;
1350 metadata->valid|=valid_RAW_FILEINFO_ACCESS_INFORMATION;
1352 metadata->info_data.status_RAW_FILEINFO_POSITION_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_POSITION_INFORMATION;
1353 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_POSITION_INFORMATION)) {
1354 metadata->info_data.position=r->out.info_data[0].position;
1355 metadata->valid|=valid_RAW_FILEINFO_POSITION_INFORMATION;
1357 metadata->info_data.status_RAW_FILEINFO_MODE_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_MODE_INFORMATION;
1358 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_MODE_INFORMATION)) {
1359 metadata->info_data.mode=r->out.info_data[0].mode;
1360 metadata->valid|=valid_RAW_FILEINFO_MODE_INFORMATION;
1362 metadata->info_data.status_RAW_FILEINFO_ALIGNMENT_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_ALIGNMENT_INFORMATION;
1363 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALIGNMENT_INFORMATION)) {
1364 metadata->info_data.alignment_requirement=r->out.info_data[0].alignment_requirement;
1365 metadata->valid|=valid_RAW_FILEINFO_ALIGNMENT_INFORMATION;
1367 metadata->info_data.status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
1368 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION)) {
1369 metadata->info_data.reparse_tag=r->out.info_data[0].reparse_tag;
1370 metadata->info_data.reparse_attrib=r->out.info_data[0].reparse_attrib;
1371 metadata->valid|=valid_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
1373 metadata->info_data.status_RAW_FILEINFO_STREAM_INFO=r->out.info_data[0].status_RAW_FILEINFO_STREAM_INFO;
1374 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_STREAM_INFO)) {
1375 metadata->info_data.num_streams=r->out.info_data[0].num_streams;
1376 talloc_free(metadata->info_data.streams);
1377 metadata->info_data.streams=talloc_steal(metadata, r->out.info_data[0].streams);
1378 metadata->valid|=valid_RAW_FILEINFO_STREAM_INFO;
1381 /* satisfy a file-info request from cache */
1382 NTSTATUS proxy_cache_info(union smb_fileinfo *io, struct file_metadata *metadata, bool *valid)
1384 #define SET_VALID(FLAG) do { \
1385 if (valid) *valid=!!(metadata->valid & valid_ ## FLAG); \
1386 DEBUG(5,("%s check %s=%d (%x)\n",__FUNCTION__, #FLAG, !!(metadata->valid & valid_ ## FLAG), metadata->valid)); \
1387 } while(0)
1388 /* and now serve the request from the cache */
1389 switch(io->generic.level) {
1390 case RAW_FILEINFO_BASIC_INFORMATION:
1391 SET_VALID(RAW_FILEINFO_BASIC_INFORMATION);
1392 io->basic_info.out.create_time=metadata->info_data.create_time;
1393 io->basic_info.out.access_time=metadata->info_data.access_time;
1394 io->basic_info.out.write_time=metadata->info_data.write_time;
1395 io->basic_info.out.change_time=metadata->info_data.change_time;
1396 io->basic_info.out.attrib=metadata->info_data.attrib;
1397 return metadata->info_data.status_RAW_FILEINFO_BASIC_INFORMATION;
1398 case RAW_FILEINFO_ALL_INFO:
1399 SET_VALID(RAW_FILEINFO_ALL_INFO);
1400 io->all_info.out.create_time=metadata->info_data.create_time;
1401 io->all_info.out.access_time=metadata->info_data.access_time;
1402 io->all_info.out.write_time=metadata->info_data.write_time;
1403 io->all_info.out.change_time=metadata->info_data.change_time;
1404 io->all_info.out.attrib=metadata->info_data.attrib;
1405 io->all_info.out.alloc_size=metadata->info_data.alloc_size;
1406 io->all_info.out.size=metadata->info_data.size;
1407 io->all_info.out.directory=metadata->info_data.directory;
1408 io->all_info.out.nlink=metadata->info_data.nlink;
1409 io->all_info.out.delete_pending=metadata->info_data.delete_pending;
1410 io->all_info.out.fname.s=metadata->info_data.fname.s;
1411 io->all_info.out.fname.private_length=metadata->info_data.fname.count;
1412 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1413 case RAW_FILEINFO_STANDARD_INFO:
1414 case RAW_FILEINFO_STANDARD_INFORMATION:
1415 SET_VALID(RAW_FILEINFO_ALL_INFO);
1416 io->standard_info.out.alloc_size=metadata->info_data.alloc_size;
1417 io->standard_info.out.size=metadata->info_data.size;
1418 io->standard_info.out.directory=metadata->info_data.directory;
1419 io->standard_info.out.nlink=metadata->info_data.nlink; /* may be wrong */
1420 io->standard_info.out.delete_pending=metadata->info_data.delete_pending;
1421 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1422 case RAW_FILEINFO_EA_INFO:
1423 case RAW_FILEINFO_EA_INFORMATION:
1424 SET_VALID(RAW_FILEINFO_ALL_INFO);
1425 io->ea_info.out.ea_size=metadata->info_data.ea_size;
1426 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1427 case RAW_FILEINFO_COMPRESSION_INFO:
1428 SET_VALID(RAW_FILEINFO_COMPRESSION_INFO);
1429 io->compression_info.out.compressed_size=metadata->info_data.compressed_size;
1430 io->compression_info.out.format=metadata->info_data.format;
1431 io->compression_info.out.unit_shift=metadata->info_data.unit_shift;
1432 io->compression_info.out.chunk_shift=metadata->info_data.chunk_shift;
1433 io->compression_info.out.cluster_shift=metadata->info_data.cluster_shift;
1434 return metadata->info_data.status_RAW_FILEINFO_COMPRESSION_INFO;
1435 case RAW_FILEINFO_INTERNAL_INFORMATION:
1436 SET_VALID(RAW_FILEINFO_INTERNAL_INFORMATION);
1437 io->internal_information.out.file_id=metadata->info_data.file_id;
1438 return metadata->info_data.status_RAW_FILEINFO_INTERNAL_INFORMATION;
1439 case RAW_FILEINFO_ACCESS_INFORMATION:
1440 SET_VALID(RAW_FILEINFO_ACCESS_INFORMATION);
1441 io->access_information.out.access_flags=metadata->info_data.access_flags;
1442 return metadata->info_data.status_RAW_FILEINFO_ACCESS_INFORMATION;
1443 case RAW_FILEINFO_POSITION_INFORMATION:
1444 SET_VALID(RAW_FILEINFO_POSITION_INFORMATION);
1445 io->position_information.out.position=metadata->info_data.position;
1446 return metadata->info_data.status_RAW_FILEINFO_POSITION_INFORMATION;
1447 case RAW_FILEINFO_MODE_INFORMATION:
1448 SET_VALID(RAW_FILEINFO_MODE_INFORMATION);
1449 io->mode_information.out.mode=metadata->info_data.mode;
1450 return metadata->info_data.status_RAW_FILEINFO_MODE_INFORMATION;
1451 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1452 SET_VALID(RAW_FILEINFO_ALIGNMENT_INFORMATION);
1453 io->alignment_information.out.alignment_requirement=metadata->info_data.alignment_requirement;
1454 return metadata->info_data.status_RAW_FILEINFO_ALIGNMENT_INFORMATION;
1455 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1456 SET_VALID(RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION);
1457 io->attribute_tag_information.out.reparse_tag=metadata->info_data.reparse_tag;
1458 io->attribute_tag_information.out.attrib=metadata->info_data.reparse_attrib;
1459 return metadata->info_data.status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
1460 case RAW_FILEINFO_STREAM_INFO:
1461 case RAW_FILEINFO_STREAM_INFORMATION:
1462 SET_VALID(RAW_FILEINFO_STREAM_INFO);
1463 io->stream_info.out.num_streams=metadata->info_data.num_streams;
1464 if (metadata->info_data.num_streams > 0) {
1465 io->stream_info.out.streams = talloc_zero_array(io, struct stream_struct, metadata->info_data.num_streams);
1466 int c;
1467 if (! io->stream_info.out.streams) {
1468 if (*valid) *valid=false;
1469 io->stream_info.out.num_streams=0;
1470 return NT_STATUS_NO_MEMORY;
1472 for (c=0; c<io->stream_info.out.num_streams; c++) {
1473 io->stream_info.out.streams[c].size = metadata->info_data.streams[c].size;
1474 io->stream_info.out.streams[c].alloc_size = metadata->info_data.streams[c].alloc_size;
1475 io->stream_info.out.streams[c].stream_name.s = talloc_reference(io, metadata->info_data.streams[c].stream_name.s);
1476 io->stream_info.out.streams[c].stream_name.private_length = metadata->info_data.streams[c].stream_name.count;
1478 } else {
1479 io->stream_info.out.streams=NULL;
1481 return metadata->info_data.status_RAW_FILEINFO_STREAM_INFO;
1482 default:
1483 DEBUG(5,("%s: Unknown request\n",__FUNCTION__));
1484 if (valid) *valid=false;
1485 return NT_STATUS_INTERNAL_ERROR;
1490 a handler for async qpathinfo replies
1492 static void async_qpathinfo(struct smbcli_request *c_req)
1494 struct async_info *async = c_req->async.private;
1495 struct ntvfs_request *req = async->req;
1496 req->async_states->status = smb_raw_pathinfo_recv(c_req, req, async->parms);
1497 talloc_free(async);
1498 req->async_states->send_fn(req);
1501 static NTSTATUS async_proxy_qpathinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1503 struct proxy_private *private = async->proxy;
1504 struct smbcli_request *c_req = async->c_req;
1505 struct ntvfs_request *req = async->req;
1506 struct proxy_file *f = talloc_get_type_abort(async->f, struct proxy_file);
1507 union smb_fileinfo *io = talloc_get_type_abort(io1, union smb_fileinfo);
1508 struct proxy_GetInfo *r=talloc_get_type_abort(io2, struct proxy_GetInfo);
1510 if (c_req) status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1511 req->async_states->status=status;
1513 /* It's good to check for over-all status but we need to check status of each sub-message */
1514 NT_STATUS_NOT_OK_RETURN(status);
1516 /* populate the cache, and then fill the request from the cache */
1517 /* Assuming that r->count.in == 1 */
1518 SMB_ASSERT(r->out.count==1);
1519 DEBUG(5,("%s: Combined status of meta request: %s\n",__LOCATION__, get_friendly_nt_error_msg (r->out.info_data[0].status)));
1520 NT_STATUS_NOT_OK_RETURN(r->out.info_data[0].status);
1522 DEBUG(5,("%s: will set cache %p item=%p metadata=%p %p\n",__LOCATION__, f, f?f->metadata:NULL, r));
1523 proxy_set_cache_info(f->metadata, r);
1525 req->async_states->status=proxy_cache_info(io, f->metadata, NULL);
1526 DEBUG(5,("%s: set final response of original request to: %s\n",__LOCATION__, get_friendly_nt_error_msg (req->async_states->status)));
1528 return req->async_states->status;
1531 static void async_qpathinfo_notify(void* data, struct fdirmon* dirmon) {
1532 struct proxy_file* file=data;
1534 DEBUG(5,("%s: qpathinfo cache %s destroyed\n",__LOCATION__,file->filename));
1535 DLIST_REMOVE(file->proxy->closed_files, file);
1536 talloc_free(file);
1540 return info on a pathname
1542 static NTSTATUS proxy_qpathinfo(struct ntvfs_module_context *ntvfs,
1543 struct ntvfs_request *req, union smb_fileinfo *io)
1545 struct proxy_private *private = ntvfs->private_data;
1546 struct smbcli_request *c_req;
1547 struct proxy_file *f=NULL;
1548 const char* path;
1550 SETUP_PID;
1552 /* Look for closed files */
1553 if (private->enabled_qpathinfo) {
1554 int len=strlen(io->generic.in.file.path)+1;
1555 DEBUG(5,("%s: Looking for cached metadata for: %s\n",__LOCATION__,io->generic.in.file.path));
1556 DLIST_FIND(private->closed_files, f,
1557 (len==f->filename_size && fstrncmp(io->generic.in.file.path, f->filename, f->filename_size)==0));
1558 if (f) {
1559 /* stop cache going away while we are using it */
1560 talloc_reference(req, f);
1563 /* upgrade the request */
1564 switch(io->generic.level) {
1565 case RAW_FILEINFO_STANDARD_INFO:
1566 case RAW_FILEINFO_STANDARD_INFORMATION:
1567 case RAW_FILEINFO_BASIC_INFORMATION: /* we get this on file open */
1568 case RAW_FILEINFO_ALL_INFO:
1569 case RAW_FILEINFO_COMPRESSION_INFO:
1570 case RAW_FILEINFO_INTERNAL_INFORMATION:
1571 case RAW_FILEINFO_ACCESS_INFORMATION:
1572 case RAW_FILEINFO_POSITION_INFORMATION:
1573 case RAW_FILEINFO_MODE_INFORMATION:
1574 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1575 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1576 case RAW_FILEINFO_STREAM_INFO:
1577 case RAW_FILEINFO_STREAM_INFORMATION:
1578 case RAW_FILEINFO_EA_INFO:
1579 case RAW_FILEINFO_EA_INFORMATION:
1580 DEBUG(5,("%s: item is %p\n",__FUNCTION__, f));
1581 if (f && f->metadata) {
1582 NTSTATUS status;
1583 bool valid;
1584 DEBUG(5,("%s: Using cached metadata %x (item=%p)\n",__FUNCTION__, f->metadata->valid, f));
1585 status=proxy_cache_info(io, f->metadata, &valid);
1586 if (valid) return status;
1587 DEBUG(5,("%s: But cached metadata not valid :-(\n",__FUNCTION__));
1589 /* construct an item to hold the cache if we need to */
1590 if (! f && private->enabled_cache_info && PROXY_REMOTE_SERVER(private) && (f=talloc_zero(private, struct proxy_file))) {
1591 struct fdirmon* dirmon;
1592 dirmon=get_fdirmon(private, io->generic.in.file.path, true);
1593 if (f && dirmon) {
1594 f->proxy=private;
1595 dirmon_add_callback(dirmon, async_qpathinfo_notify, f);
1597 f->filename=talloc_strdup(f, io->generic.in.file.path);
1598 f->filename_size=strlen(f->filename)+1;
1599 f->metadata=talloc_zero(f, struct file_metadata);
1600 /* should not really add unless we succeeded */
1601 DLIST_ADD(private->closed_files, f);
1602 } else {
1603 talloc_free(f);
1604 f=NULL;
1607 if (f && f->metadata && private->enabled_cache_info && PROXY_REMOTE_SERVER(private)) {
1608 struct proxy_GetInfo *r;
1609 DEBUG(5,("%s: promoting request to proxy\n",__FUNCTION__));
1611 r=talloc_zero(req, struct proxy_GetInfo);
1612 NT_STATUS_HAVE_NO_MEMORY(r);
1614 r->in.count=1;
1615 r->in.info_tags=talloc_zero_array(req, struct info_tags, r->in.count);
1616 r->in.info_tags[0].tag_type=TAG_TYPE_PATH_INFO;
1617 /* 1+ to get the null */
1618 r->in.info_tags[0].info_tag.path.count=1+strlen(io->generic.in.file.path);
1619 r->in.info_tags[0].info_tag.path.s=io->generic.in.file.path;
1620 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
1621 /* the callback handler will populate the cache and respond from the cache */
1622 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_qpathinfo, NT_STATUS_INTERNAL_ERROR);
1624 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1625 DEBUG(5,("%s Sync waiting promotion\n",__FUNCTION__));
1626 return sync_chain_handler(c_req);
1627 } else {
1628 void* f=NULL;
1629 DEBUG(5,("%s Async waiting promotion\n",__FUNCTION__));
1630 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1631 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1632 return NT_STATUS_OK;
1637 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1638 return smb_raw_pathinfo(private->tree, req, io);
1641 c_req = smb_raw_pathinfo_send(private->tree, io);
1643 ASYNC_RECV_TAIL(io, async_qpathinfo);
1647 a handler for async qfileinfo replies
1649 static void async_qfileinfo(struct smbcli_request *c_req)
1651 struct async_info *async = c_req->async.private;
1652 struct ntvfs_request *req = async->req;
1653 req->async_states->status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1654 talloc_free(async);
1655 req->async_states->send_fn(req);
1658 static NTSTATUS async_proxy_qfileinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1660 struct proxy_private *private = async->proxy;
1661 struct smbcli_request *c_req = async->c_req;
1662 struct ntvfs_request *req = async->req;
1663 struct proxy_file *f = async->f;
1664 union smb_fileinfo *io = talloc_get_type_abort(io1, union smb_fileinfo);
1665 struct proxy_GetInfo *r=talloc_get_type_abort(io2, struct proxy_GetInfo);
1667 if (c_req) status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1668 req->async_states->status=status;
1670 NT_STATUS_NOT_OK_RETURN(status);
1672 /* populate the cache, and then fill the request from the cache */
1673 /* Assuming that r->count.in == 1 */
1674 SMB_ASSERT(r->out.count==1);
1675 NT_STATUS_NOT_OK_RETURN(r->out.info_data[0].status);
1677 proxy_set_cache_info(f->metadata, r);
1679 req->async_states->status=proxy_cache_info(io, f->metadata, NULL);
1681 return req->async_states->status;
1685 query info on a open file
1687 static NTSTATUS proxy_qfileinfo(struct ntvfs_module_context *ntvfs,
1688 struct ntvfs_request *req, union smb_fileinfo *io)
1690 struct proxy_private *private = ntvfs->private_data;
1691 struct smbcli_request *c_req;
1692 struct proxy_file *f;
1693 bool valid=false;
1694 NTSTATUS status;
1696 SETUP_PID;
1698 SETUP_FILE_HERE(f);
1700 /* upgrade the request */
1701 switch(io->generic.level) {
1702 case RAW_FILEINFO_STANDARD_INFO:
1703 case RAW_FILEINFO_STANDARD_INFORMATION:
1704 case RAW_FILEINFO_BASIC_INFORMATION: /* we get this on file open */
1705 case RAW_FILEINFO_ALL_INFO:
1706 case RAW_FILEINFO_COMPRESSION_INFO:
1707 case RAW_FILEINFO_INTERNAL_INFORMATION:
1708 case RAW_FILEINFO_ACCESS_INFORMATION:
1709 case RAW_FILEINFO_POSITION_INFORMATION:
1710 case RAW_FILEINFO_MODE_INFORMATION:
1711 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1712 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1713 case RAW_FILEINFO_STREAM_INFO:
1714 case RAW_FILEINFO_STREAM_INFORMATION:
1715 case RAW_FILEINFO_EA_INFO:
1716 case RAW_FILEINFO_EA_INFORMATION:
1717 DEBUG(5,("%s: oplock is %d\n",__FUNCTION__, f->oplock));
1718 if (f->oplock) {
1719 DEBUG(5,("%s: %p Using cached metadata %x (fnum=%d)\n",__FUNCTION__, f, f->metadata->valid, f->fnum));
1720 status=proxy_cache_info(io, f->metadata, &valid);
1721 if (valid) return status;
1722 DEBUG(5,("%s: But cached metadata not valid :-(\n",__FUNCTION__));
1724 if (private->enabled_cache_info && PROXY_REMOTE_SERVER(private)) {
1725 DEBUG(5,("%s: promoting request to proxy\n",__FUNCTION__));
1726 struct proxy_GetInfo *r=talloc_zero(req, struct proxy_GetInfo);
1727 NT_STATUS_HAVE_NO_MEMORY(r);
1728 r->in.count=1;
1729 r->in.info_tags=talloc_zero_array(req, struct info_tags, r->in.count);
1730 r->in.info_tags[0].tag_type=TAG_TYPE_FILE_INFO;
1731 r->in.info_tags[0].info_tag.fnum=io->generic.in.file.fnum;
1732 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
1733 /* the callback handler will populate the cache and respond from the cache */
1734 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_qfileinfo, NT_STATUS_INTERNAL_ERROR);
1736 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1737 DEBUG(5,("%s Sync waiting promotion\n",__FUNCTION__));
1738 return sync_chain_handler(c_req);
1739 } else {
1740 DEBUG(5,("%s Async waiting promotion\n",__FUNCTION__));
1741 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1742 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1743 return NT_STATUS_OK;
1748 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1749 return smb_raw_fileinfo(private->tree, req, io);
1752 c_req = smb_raw_fileinfo_send(private->tree, io);
1754 ASYNC_RECV_TAIL(io, async_qfileinfo);
1758 set info on a pathname
1760 static NTSTATUS proxy_setpathinfo(struct ntvfs_module_context *ntvfs,
1761 struct ntvfs_request *req, union smb_setfileinfo *st)
1763 struct proxy_private *private = ntvfs->private_data;
1764 struct smbcli_request *c_req;
1766 SETUP_PID;
1768 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1769 return smb_raw_setpathinfo(private->tree, st);
1772 c_req = smb_raw_setpathinfo_send(private->tree, st);
1774 SIMPLE_ASYNC_TAIL;
1779 a handler for async open replies
1781 static void async_open(struct smbcli_request *c_req)
1783 struct async_info *async = c_req->async.private;
1784 struct proxy_private *proxy = async->proxy;
1785 struct ntvfs_request *req = async->req;
1786 struct proxy_file *f = async->f;
1787 union smb_open *io = async->parms;
1788 union smb_handle *file;
1790 talloc_free(async);
1791 req->async_states->status = smb_raw_open_recv(c_req, req, io);
1792 SMB_OPEN_OUT_FILE(io, file);
1793 f->fnum = file->fnum;
1794 file->ntvfs = NULL;
1795 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
1796 req->async_states->status = ntvfs_handle_set_backend_data(f->h, proxy->ntvfs, f);
1797 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
1798 file->ntvfs = f->h;
1799 DLIST_ADD(proxy->files, f);
1801 f->oplock=io->generic.out.oplock_level;
1803 LOAD_CACHE_FILE_DATA (f->metadata->info_data, io->generic.out);
1804 f->metadata->valid |= valid_RAW_FILEINFO_BASIC_INFORMATION;
1805 DEBUG(5,("**** METADATA VALID %p %x LEN=%lld\n",f,f->metadata->valid,(long long)f->metadata->info_data.size));
1807 if (proxy->cache_enabled) {
1808 struct search_cache_item *item=NULL;
1809 struct search_cache *s=proxy->search_caches;
1810 /* If we are still monitoring the file for changes we can
1811 retain the previous cache state, [if it is more recent that the monitor]! */
1812 /* yeah yeah what if there is more than one.... :-( */
1813 if (! (io->generic.level == RAW_OPEN_NTCREATEX &&
1814 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID) &&
1815 find_search_cache_item(SMB_OPEN_IN_FILE(io), &s, &item) && item->cache) {
1816 DEBUG(5,("%s: Using cached file cache\n",__LOCATION__));
1817 f->cache=talloc_reference(f, item->cache);
1818 cache_beopen(f->cache);
1819 if (item->metadata) {
1820 *(f->metadata)=*(item->metadata);
1821 f->metadata->info_data.fname.s=talloc_strdup(f, item->metadata->info_data.fname.s);
1822 f->metadata->info_data.fname.count=item->metadata->info_data.fname.count;
1824 f->metadata->info_data.streams=talloc_zero_array(f, struct info_stream, f->metadata->info_data.num_streams);
1825 if (f->metadata->info_data.streams) {
1826 int c;
1827 for(c=0; c < f->metadata->info_data.num_streams; c++) {
1828 f->metadata->info_data.streams[c].size = item->metadata->info_data.streams[c].size;
1829 f->metadata->info_data.streams[c].alloc_size = item->metadata->info_data.streams[c].alloc_size;
1830 f->metadata->info_data.streams[c].stream_name.s= talloc_strdup(f, item->metadata->info_data.streams[c].stream_name.s);
1831 f->metadata->info_data.streams[c].stream_name.count=item->metadata->info_data.streams[c].stream_name.count;
1834 f->metadata->count=1;
1836 } else {
1837 f->cache=cache_open(proxy->cache, f, io, f->oplock, proxy->cache_readahead);
1838 if (proxy->fake_valid) {
1839 cache_handle_validated(f, cache_handle_len(f));
1841 if (! PROXY_REMOTE_SERVER(proxy)) cache_handle_novalidate(f);
1842 if (item) {
1843 item->cache = talloc_reference(item, f->cache);
1844 item->metadata=talloc_reference(item, f->metadata);
1845 DEBUG(5,("%s: Caching file cache for later\n",__LOCATION__));
1846 } else {
1847 DEBUG(5,("%s: NOT Caching file cache for later\n",__LOCATION__));
1852 failed:
1853 req->async_states->send_fn(req);
1857 open a file
1859 static NTSTATUS proxy_open(struct ntvfs_module_context *ntvfs,
1860 struct ntvfs_request *req, union smb_open *io)
1862 struct proxy_private *private = ntvfs->private_data;
1863 struct smbcli_request *c_req;
1864 struct ntvfs_handle *h;
1865 struct proxy_file *f, *clone;
1866 NTSTATUS status;
1867 void *filename;
1868 int filename_size;
1869 uint16_t fnum;
1871 SETUP_PID;
1873 if (io->generic.level != RAW_OPEN_GENERIC &&
1874 private->map_generic) {
1875 return ntvfs_map_open(ntvfs, req, io);
1878 status = ntvfs_handle_new(ntvfs, req, &h);
1879 #warning should we free this handle if the open fails?
1880 NT_STATUS_NOT_OK_RETURN(status);
1882 f = talloc_zero(h, struct proxy_file);
1883 NT_STATUS_HAVE_NO_MEMORY(f);
1884 f->proxy=private;
1886 /* If the file is being opened read only and we already have a read-only
1887 handle for this file, then just clone and ref-count the handle */
1888 /* First calculate the filename key */
1889 if (io->generic.level == RAW_OPEN_NTCREATEX &&
1890 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID) {
1891 filename_size=sizeof(uint64_t);
1892 filename=io->generic.in.fname;
1893 } else {
1894 filename=SMB_OPEN_IN_FILE(io);
1895 filename_size=strlen(filename)+1;
1897 f->filename=talloc_memdup(f, filename, filename_size);
1898 f->filename_size=filename_size;
1899 f->h = h;
1900 f->can_clone= (io->generic.in.access_mask & NTCREATEX_SHARE_ACCESS_MASK) == NTCREATEX_SHARE_ACCESS_READ &&
1901 (io->generic.in.impersonation == NTCREATEX_IMPERSONATION_IMPERSONATION) &&
1902 (io->generic.in.create_options & NTCREATEX_OPTIONS_DIRECTORY) == 0 &&
1903 (io->generic.in.open_disposition != NTCREATEX_DISP_CREATE) &&
1904 (io->generic.in.open_disposition != NTCREATEX_DISP_SUPERSEDE);
1905 /* see if we have a matching open file */
1906 clone=NULL;
1907 if (f->can_clone) for (clone=private->files; clone; clone=clone->next) {
1908 if (clone->can_clone && filename_size == clone->filename_size &&
1909 memcmp(filename, clone->filename, filename_size)==0) {
1910 break;
1914 /* if clone is not null, then we found a match */
1915 if (private->enabled_open_clone && clone) {
1916 union smb_handle *file;
1918 DEBUG(5,("%s: clone handle %d\n",__FUNCTION__,clone->fnum));
1919 SMB_OPEN_OUT_FILE(io, file);
1920 f->fnum = clone->fnum;
1921 file->ntvfs = NULL;
1922 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
1923 NT_STATUS_NOT_OK_RETURN(status);
1924 file->ntvfs = f->h;
1925 DLIST_ADD(private->files, f);
1926 /* but be sure to share the same metadata cache */
1927 f->metadata=talloc_reference(f, clone->metadata);
1928 f->metadata->count++;
1929 f->oplock=clone->oplock;
1930 f->cache=talloc_reference(f, clone->cache);
1931 /* We don't need to reduce the oplocks for both files if we are read-only */
1932 /* if (clone->oplock==EXCLUSIVE_OPLOCK_RETURN ||
1933 clone->oplock==BATCH_OPLOCK_RETURN) {
1934 DEBUG(5,("%s: Breaking clone oplock from %d\n",__LOCATION__, clone->oplock));
1935 clone->oplock==LEVEL_II_OPLOCK_RETURN;
1936 status = ntvfs_send_oplock_break(private->ntvfs, clone->h, OPLOCK_BREAK_TO_LEVEL_II);
1937 //if (!NT_STATUS_IS_OK(status)) result=false;
1938 } else if (clone->oplock==LEVEL_II_OPLOCK_RETURN) {
1939 DEBUG(5,("%s: Breaking clone oplock from %d, cache no longer valid\n",__LOCATION__, clone->oplock));
1940 cache_handle_stale(f);
1941 clone->oplock=NO_OPLOCK_RETURN;
1942 status = ntvfs_send_oplock_break(private->ntvfs, clone->h, OPLOCK_BREAK_TO_NONE);
1943 //if (!NT_STATUS_IS_OK(status)) result=false;
1946 f->oplock=clone->oplock;
1947 /* and fake the rest of the response struct */
1948 io->generic.out.oplock_level=f->oplock;
1949 io->generic.out.create_action=NTCREATEX_ACTION_EXISTED;
1950 io->generic.out.create_time=f->metadata->info_data.create_time;
1951 io->generic.out.access_time=f->metadata->info_data.access_time;
1952 io->generic.out.write_time=f->metadata->info_data.write_time;
1953 io->generic.out.change_time=f->metadata->info_data.change_time;
1954 io->generic.out.attrib=f->metadata->info_data.attrib;
1955 io->generic.out.alloc_size=f->metadata->info_data.alloc_size;
1956 io->generic.out.size=f->metadata->info_data.size;
1957 io->generic.out.file_type=f->metadata->info_data.file_type;
1958 io->generic.out.ipc_state=f->metadata->info_data.ipc_state;
1959 io->generic.out.is_directory=f->metadata->info_data.is_directory;
1960 /* optional return values matching SMB2 tagged
1961 values in the call */
1962 //io->generic.out.maximal_access;
1963 return NT_STATUS_OK;
1965 f->metadata=talloc_zero(f, struct file_metadata);
1966 NT_STATUS_HAVE_NO_MEMORY(f->metadata);
1967 f->metadata->count=1;
1969 /* if oplocks aren't requested, optionally override and request them */
1970 if (! (io->generic.in.flags & (OPENX_FLAGS_REQUEST_OPLOCK | OPENX_FLAGS_REQUEST_BATCH_OPLOCK))
1971 && private->fake_oplock) {
1972 io->generic.in.flags |= OPENX_FLAGS_REQUEST_OPLOCK | OPENX_FLAGS_REQUEST_BATCH_OPLOCK;
1975 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1976 union smb_handle *file;
1978 status = smb_raw_open(private->tree, req, io);
1979 NT_STATUS_NOT_OK_RETURN(status);
1981 SMB_OPEN_OUT_FILE(io, file);
1982 f->fnum = file->fnum;
1983 file->ntvfs = NULL;
1984 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
1985 NT_STATUS_NOT_OK_RETURN(status);
1986 file->ntvfs = f->h;
1987 DLIST_ADD(private->files, f);
1989 f->oplock=io->generic.out.oplock_level;
1991 LOAD_CACHE_FILE_DATA (f->metadata->info_data, io->generic.out);
1992 DEBUG(5,("**** METADATA VALID %p %x LEN=%lld\n",f,f->metadata->valid,(long long)f->metadata->info_data.size));
1993 f->metadata->valid |= valid_RAW_FILEINFO_BASIC_INFORMATION;
1995 if (private->cache_enabled) {
1996 f->cache=cache_open(private->cache, f, io, f->oplock, private->cache_readahead);
1997 if (private->fake_valid) {
1998 cache_handle_validated(f, cache_handle_len(f));
2000 if (! PROXY_REMOTE_SERVER(private)) cache_handle_novalidate(f);
2003 return NT_STATUS_OK;
2006 c_req = smb_raw_open_send(private->tree, io);
2008 ASYNC_RECV_TAIL_F(io, async_open, f);
2012 create a directory
2014 static NTSTATUS proxy_mkdir(struct ntvfs_module_context *ntvfs,
2015 struct ntvfs_request *req, union smb_mkdir *md)
2017 struct proxy_private *private = ntvfs->private_data;
2018 struct smbcli_request *c_req;
2020 SETUP_PID;
2022 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2023 return smb_raw_mkdir(private->tree, md);
2026 c_req = smb_raw_mkdir_send(private->tree, md);
2028 SIMPLE_ASYNC_TAIL;
2032 remove a directory
2034 static NTSTATUS proxy_rmdir(struct ntvfs_module_context *ntvfs,
2035 struct ntvfs_request *req, struct smb_rmdir *rd)
2037 struct proxy_private *private = ntvfs->private_data;
2038 struct smbcli_request *c_req;
2040 SETUP_PID;
2042 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2043 return smb_raw_rmdir(private->tree, rd);
2045 c_req = smb_raw_rmdir_send(private->tree, rd);
2047 SIMPLE_ASYNC_TAIL;
2051 rename a set of files
2053 static NTSTATUS proxy_rename(struct ntvfs_module_context *ntvfs,
2054 struct ntvfs_request *req, union smb_rename *ren)
2056 struct proxy_private *private = ntvfs->private_data;
2057 struct smbcli_request *c_req;
2059 SETUP_PID;
2061 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2062 return smb_raw_rename(private->tree, ren);
2065 c_req = smb_raw_rename_send(private->tree, ren);
2067 SIMPLE_ASYNC_TAIL;
2071 copy a set of files
2073 static NTSTATUS proxy_copy(struct ntvfs_module_context *ntvfs,
2074 struct ntvfs_request *req, struct smb_copy *cp)
2076 return NT_STATUS_NOT_SUPPORTED;
2079 /* we only define this seperately so we can easily spot read calls in
2080 pending based on ( c_req->private.fn == async_read_handler ) */
2081 static void async_read_handler(struct smbcli_request *c_req)
2083 async_chain_handler(c_req);
2086 NTSTATUS async_readahead_dec(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2088 struct proxy_private *private = async->proxy;
2089 struct smbcli_request *c_req = async->c_req;
2090 struct proxy_file *f = async->f;
2091 union smb_read *io = async->parms;
2093 /* if request is not already received by a chained handler, read it */
2094 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
2096 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
2097 f->readahead_pending, private->readahead_spare));
2099 f->readahead_pending--;
2100 private->readahead_spare++;
2102 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
2103 f->readahead_pending, private->readahead_spare));
2105 return status;
2109 a handler for async read replies - speculative read-aheads.
2110 It merely saves in the cache. The async chain handler will call send_fn if
2111 there is one, or if sync_chain_handler is used the send_fn is called by
2112 the ntvfs back end.
2114 NTSTATUS async_read_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2116 struct smbcli_request *c_req = async->c_req;
2117 struct proxy_file *f = async->f;
2118 union smb_read *io = async->parms;
2120 /* if request is not already received by a chained handler, read it */
2121 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
2123 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
2124 get_friendly_nt_error_msg(status)));
2126 NT_STATUS_NOT_OK_RETURN(status);
2128 /* if it was a validate read we don't to save anything unless it failed.
2129 Until we use Proxy_read structs we can't tell, so guess */
2130 if (io->generic.out.nread == io->generic.in.maxcnt &&
2131 io->generic.in.mincnt < io->generic.in.maxcnt) {
2132 /* looks like a validate read, just move the validate pointer, the
2133 original read-request has already been satisfied from cache */
2134 DEBUG(3,("%s megavalidate suceeded, validate to %lld\n",__FUNCTION__,
2135 io->generic.in.offset + io->generic.out.nread));
2136 cache_handle_validated(f, io->generic.in.offset + io->generic.out.nread);
2137 } else {
2138 DEBUG(5,("Not a mega-validate, save %d in cache\n",io->generic.out.nread));
2139 cache_handle_save(f, io->generic.out.data,
2140 io->generic.out.nread,
2141 io->generic.in.offset);
2144 DEBUG(3,("%s finished %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2145 return status;
2148 /* handler for fragmented reads */
2149 NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2151 struct proxy_private *private = async->proxy;
2152 struct smbcli_request *c_req = async->c_req;
2153 struct ntvfs_request *req = async->req;
2154 struct proxy_file *f = async->f;
2155 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
2156 /* this is the io against which the fragment is to be applied */
2157 union smb_read *io = talloc_get_type_abort(io1, union smb_read);
2158 /* this is the io for the read that issued the callback */
2159 union smb_read *io_frag = fragment->io_frag; /* async->parms; */
2160 struct async_read_fragments* fragments=fragment->fragments;
2162 /* if request is not already received by a chained handler, read it */
2163 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
2164 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
2166 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
2167 get_friendly_nt_error_msg(status)));
2169 fragment->status = status;
2171 /* remove fragment from fragments */
2172 DLIST_REMOVE(fragments->fragments, fragment);
2174 #warning maybe read requests beyond the short read won't return NT_STATUS_OK with nread=0
2175 /* in which case if we will want to collate all responses and return a valid read
2176 for the leading NT_STATUS_OK fragments */
2178 /* did this one fail, inducing a general fragments failure? */
2179 if (!NT_STATUS_IS_OK(fragment->status)) {
2180 /* preserve the status of the fragment with the smallest offset
2181 when we can work out how */
2182 if (NT_STATUS_IS_OK(fragments->status)) {
2183 fragments->status=fragment->status;
2186 cache_handle_novalidate(f);
2187 DEBUG(5,("** Devalidated proxy due to read failure\n"));
2188 } else {
2189 /* No fragments have yet failed, keep collecting responses */
2190 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
2191 /* Find memcpy window, copy data from the io_frag to the io */
2192 off_t start_offset=MAX(io_frag->generic.in.offset, io->generic.in.offset);
2193 /* used to use mincnt */
2194 off_t io_extent=io->generic.in.offset + io->generic.in.maxcnt;
2195 off_t end_offset=MIN(io_extent, extent);
2196 /* ASSERT(start_offset <= end_offset) */
2197 /* ASSERT(start_offset <= io_extent) */
2198 if (start_offset >= io_extent) {
2199 DEBUG(3,("useless read-ahead tagged on to: %s",__LOCATION__));
2200 } else {
2201 uint8_t* dst=io->generic.out.data+(start_offset - io->generic.in.offset);
2202 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
2203 /* src == dst in cases where we did not latch onto someone elses
2204 read, but are handling our own */
2205 if (src != dst)
2206 memcpy(dst, src, end_offset - start_offset);
2209 /* There should be a better way to detect, but it needs the proxy rpc struct
2210 not ths smb_read struct */
2211 if (io_frag->generic.out.nread < io_frag->generic.in.maxcnt) {
2212 DEBUG(5,("\n** Devalidated proxy due to small read: %lld min=%lld, max=%lld\n",
2213 (long long) io_frag->generic.out.nread,
2214 (long long) io_frag->generic.in.mincnt,
2215 (long long) io_frag->generic.in.maxcnt));
2216 cache_handle_novalidate(f);
2219 /* We broke up the original read. If not enough of this sub-read has
2220 been read, and then some of then next block, it could leave holes!
2221 We will only acknowledge up to the first partial read, and treat
2222 it as a small read. If server can return NT_STATUS_OK for a partial
2223 read so can we, so we preserve the response.
2224 "enough" is all of it (maxcnt), except on the last block, when it has to
2225 be enough to fill io->generic.in.mincnt. We know it is the last block
2226 if nread is small but we could fill io->generic.in.mincnt */
2227 if (io_frag->generic.out.nread < io_frag->generic.in.mincnt &&
2228 end_offset < io->generic.in.offset + io->generic.in.mincnt) {
2229 DEBUG(4,("Fragmented read only partially successful\n"));
2231 /* Shrink the master nread (or grow to this size if we are first partial */
2232 if (! fragments->partial ||
2233 (io->generic.in.offset + io->generic.out.nread) > extent) {
2234 io->generic.out.nread = extent - io->generic.in.offset;
2237 /* stop any further successes from extending the partial read */
2238 fragments->partial=true;
2239 } else {
2240 /* only grow the master nwritten if we haven't logged a partial write */
2241 if (! fragments->partial &&
2242 (io->generic.in.offset + io->generic.out.nread) < extent ) {
2243 io->generic.out.nread = MIN(io->generic.in.maxcnt, extent - io->generic.in.offset);
2248 /* Was it the last fragment, or do we know enought to send a response? */
2249 if (! fragments->fragments) {
2250 DEBUG(5,("Async read re-fragmented with %d of %d %s\n",
2251 io->generic.out.nread, io->generic.in.mincnt,
2252 get_friendly_nt_error_msg(fragments->status)));
2253 if (fragments->async) {
2254 req->async_states->status=fragments->status;
2255 DEBUG(5,("Fragments async response sending\n"));
2256 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
2257 /* esp. as they may be attached to by other reads. Maybe attachees should be taking reference, but how will they
2258 know the top level they need to take reference too.. */
2259 #warning should really queue a sender here, not call it */
2260 req->async_states->send_fn(req);
2261 DEBUG(5,("Async response sent\n"));
2262 } else {
2263 DEBUG(5,("Fragments SYNC return\n"));
2267 /* because a c_req may be shared by many req, chained handlers must return
2268 a status pertaining to the general validity of this specific c_req, not
2269 to their own private processing of the c_req for the benefit of their req
2270 which is returned in fragments->status
2272 return status;
2275 /* Issue read-ahead X bytes where X is the window size calculation based on
2276 server_latency * server_session_bandwidth
2277 where latency is the idle (link) latency and bandwidth is less than or equal_to
2278 to actual bandwidth available to the server.
2279 Read-ahead should honour locked areas in whatever way is neccessary (who knows?)
2280 read_ahead is defined here and not in the cache engine because it requires too
2281 much knowledge of private structures
2283 /* The concept is buggy unless we can tell the next proxy that these are
2284 read-aheads, otherwise chained proxy setups will each read-ahead of the
2285 read-ahead which can put a larger load on the final server.
2286 Also we probably need to distinguish between
2287 * cache-less read-ahead
2288 * cache-revalidating read-ahead
2290 NTSTATUS read_ahead(struct proxy_file *f, struct ntvfs_module_context *ntvfs,
2291 union smb_read *io, ssize_t as_read)
2293 struct proxy_private *private = ntvfs->private_data;
2294 struct smbcli_tree *tree = private->tree;
2295 struct cache_file_entry *cache;
2296 off_t next_position; /* this read offset+length+window */
2297 off_t end_position; /* position we read-ahead to */
2298 off_t cache_populated;
2299 off_t read_position, new_extent;
2301 if (! PROXY_REMOTE_SERVER(private)) return NT_STATUS_UNSUCCESSFUL;
2302 DEBUG(5,("A\n"));
2303 if (private->cache_readahead==0 || ! private->cache_enabled || ! f->cache) return NT_STATUS_UNSUCCESSFUL;
2304 DEBUG(5,("B\n"));
2305 cache=talloc_get_type_abort(f->cache, struct cache_file_entry);
2306 DEBUG(5,("C\n"));
2307 /* don't read-ahead if we are in bulk validate mode */
2308 if (cache->status & CACHE_VALIDATE) return NT_STATUS_UNSUCCESSFUL;
2309 DEBUG(5,("D\n"));
2310 /* if we can't trust what we read-ahead anyway then don't bother although
2311 * if delta-reads are enabled we can do so in order to get something to
2312 * delta against */
2313 DEBUG(CACHE_DEBUG_LEVEL,("DOING Asking read-aheads: len %lld ra-extend %lld as-read %lld RA %d (%d)\n",
2314 (long long int)(cache_len(cache)),
2315 (long long int)(cache->readahead_extent),
2316 (long long int)(as_read),
2317 cache->readahead_window,private->cache_readahead));
2318 if (private->cache_readahead ==0 || ! (cache->status & CACHE_READ_AHEAD) ) {
2319 DEBUG(CACHE_DEBUG_LEVEL,("FAILED Asking read-aheads: Can't read-ahead as no read-ahead on this file: %x\n",
2320 cache->status));
2321 return NT_STATUS_UNSUCCESSFUL;
2324 /* as_read is the mincnt bytes of a request being made or the
2325 out.nread of completed sync requests
2326 Here we presume that as_read bytes WILL be read. If there is a cache-ahead like ours,
2327 then this may often NOT be the case if readahead_window < requestsize; so we will
2328 get a small read, leaving a hole in the cache, and as we don't yet handle sparse caches,
2329 all future read-ahead will be wasted, so we need to adjust the read-ahead handler to handle
2330 this and have failed sparse writes adjust the cache->readahead_extent back to actual size */
2332 /* predict the file pointers next position */
2333 next_position=io->generic.in.offset + as_read;
2335 /* if we know how big the file is, don't read beyond */
2336 if (f->oplock && next_position > f->metadata->info_data.size) {
2337 next_position = f->metadata->info_data.size;
2339 DEBUG(5,("Next position: %lld (%lld + %lld)\n",
2340 (long long int)next_position,
2341 (long long int)io->generic.in.offset,
2342 (long long int)as_read));
2343 /* calculate the limit of the validated or requested cache */
2344 cache_populated=MAX(cache->validated_extent, cache->readahead_extent);
2346 /* will the new read take us beyond the current extent without gaps? */
2347 if (cache_populated < io->generic.in.offset) {
2348 /* this read-ahead is a read-behind-pointer */
2349 new_extent=cache_populated;
2350 } else {
2351 new_extent=MAX(next_position, cache_populated);
2354 /* as far as we can tell new_extent is the smallest offset that doesn't
2355 have a pending read request on. Of course if we got a short read then
2356 we will have a cache-gap which we can't handle and need to read from
2357 a shrunk readahead_extent, which we don't currently handle */
2358 read_position=new_extent;
2360 /* of course if we know how big the remote file is we should limit at that */
2361 /* we should also mark-out which read-ahead requests are pending so that we
2362 * don't repeat them while they are in-transit. */
2363 /* we can't really use next_position until we can have caches with holes
2364 UNLESS next_position < new_extent, because a next_position well before
2365 new_extent is no reason to extend it further, we only want to extended
2366 with read-aheads if we have cause to suppose the read-ahead data will
2367 be wanted, i.e. the next_position is near new_extent.
2368 So we can't justify reading beyond window+next_position, but if
2369 next_position is leaving gaps, we use new_extent instead */
2370 end_position=MIN(new_extent, next_position) + cache->readahead_window;
2371 if (f->oplock) {
2372 end_position=MIN(end_position, f->metadata->info_data.size);
2374 DEBUG(5,("** Read-ahead loop %lld < %lld window=%d, end=%lld, quota: %d\n",
2375 (long long int)read_position,
2376 (long long int)(next_position + cache->readahead_window),
2377 cache->readahead_window,
2378 (long long int)end_position,
2379 private->readahead_spare));
2380 /* do we even need to read? */
2381 if (! (read_position < end_position)) return NT_STATUS_OK;
2383 /* readahead_spare is for the whole session (mid/tid?) and may need sharing
2384 out over files and other tree-connects or something */
2385 while (read_position < end_position &&
2386 private->readahead_spare > 0) {
2387 struct smbcli_request *c_req = NULL;
2388 ssize_t read_remaining = end_position - read_position;
2389 ssize_t read_block = MIN(private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32),
2390 MIN(read_remaining, private->cache_readaheadblock));
2391 void *req = NULL; /* for the ASYNC_REC_TAIL_F_ORPHAN macro */
2392 uint8_t* data;
2393 union smb_read *io_copy=talloc_memdup_type(NULL, io, union smb_read);
2395 if (! io_copy)
2396 return NT_STATUS_NO_MEMORY;
2398 #warning we are ignoring read_for_execute as far as the cache goes
2399 io_copy->generic.in.read_for_execute=io->readx.in.read_for_execute;
2400 io_copy->generic.in.offset=read_position;
2401 io_copy->generic.in.mincnt=read_block;
2402 io_copy->generic.in.maxcnt=read_block;
2403 /* what is generic.in.remaining for? */
2404 io_copy->generic.in.remaining = MIN(65535,read_remaining);
2405 io_copy->generic.out.nread=0;
2407 #warning someone must own io_copy, tree, maybe?
2408 data=talloc_zero_size(io_copy, io_copy->generic.in.maxcnt);
2409 DEBUG(5,("Talloc read-ahead buffer %p size %d\n",data, io_copy->generic.in.maxcnt));
2410 if (! data) {
2411 talloc_free(io_copy);
2412 return NT_STATUS_NO_MEMORY;
2414 io_copy->generic.out.data=data;
2416 /* are we able to pull anything from the cache to validate this read-ahead?
2417 NOTE: there is no point in reading ahead merely to re-validate the
2418 cache if we don't have oplocks and can't save it....
2419 ... or maybe there is if we think a read will come that can be matched
2420 up to this reponse while it is still on the wire */
2421 #warning so we need to distinguish between pipe-line read-ahead and revalidation
2422 if (/*(cache->status & CACHE_READ)!=0 && */
2423 cache_len(cache) >
2424 (io_copy->generic.in.offset + io_copy->generic.in.mincnt) &&
2425 cache->validated_extent <
2426 (io_copy->generic.in.offset + io_copy->generic.in.maxcnt)) {
2427 ssize_t pre_fill;
2429 pre_fill = cache_raw_read(cache, data,
2430 io_copy->generic.in.offset,
2431 io_copy->generic.in.maxcnt);
2432 DEBUG(5,("Data read into %p %d\n",data, pre_fill));
2433 if (pre_fill > 0 && pre_fill >= io_copy->generic.in.mincnt) {
2434 io_copy->generic.out.nread=pre_fill;
2435 read_block=pre_fill;
2439 c_req = proxy_smb_raw_read_send(ntvfs, io_copy, f, NULL);
2441 if (c_req) {
2442 private->readahead_spare--;
2443 f->readahead_pending++;
2444 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead level %d request %p offset=%d size=%d\n",io_copy->generic.level,c_req,(int)read_position,(int)read_block));
2445 if (cache->readahead_extent < read_position+read_block)
2446 cache->readahead_extent=read_position+read_block;
2447 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
2448 /* so we can decrease read-ahead counter for this session */
2449 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_readahead_dec, NT_STATUS_INTERNAL_ERROR);
2450 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io_copy, async_read_handler);
2452 /* Make these be owned by the async struct so they are freed when the callback ends or is cancelled */
2453 talloc_steal(c_req->async.private, c_req);
2454 talloc_steal(c_req->async.private, io_copy);
2455 read_position+=read_block;
2456 } else {
2457 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead request FAILED offset=%d size=%d\n",(int)read_position,(int)read_block));
2458 talloc_free(io_copy);
2459 break;
2463 DEBUG(CACHE_DEBUG_LEVEL,("DONE: Asking read-aheads\n"));
2464 return NT_STATUS_OK;
2467 struct proxy_validate_parts_parts {
2468 struct proxy_Read* r;
2469 struct ntvfs_request *req;
2470 struct proxy_file *f;
2471 struct async_read_fragments *fragments;
2472 off_t offset;
2473 ssize_t remaining;
2474 bool complete;
2475 declare_checksum(digest);
2476 struct MD5Context context;
2479 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts);
2480 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status);
2481 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
2482 struct proxy_validate_parts_parts *parts);
2484 /* this will be the new struct proxy_Read based read function, for now
2485 it just deals with non-cached based validate to a regular server */
2486 static NTSTATUS proxy_validate(struct ntvfs_module_context *ntvfs,
2487 struct ntvfs_request *req,
2488 struct proxy_Read *r,
2489 struct proxy_file *f)
2491 struct proxy_private *private = ntvfs->private_data;
2492 struct proxy_validate_parts_parts *parts;
2493 struct async_read_fragments *fragments;
2494 NTSTATUS status;
2496 if (!f) return NT_STATUS_INVALID_HANDLE;
2498 DEBUG(5,("%s: fnum=%d **** %lld bytes \n\n\n\n",__LOCATION__,f->fnum,(long long int)r->in.maxcnt));
2500 parts = talloc_zero(req, struct proxy_validate_parts_parts);
2501 DEBUG(5,("%s: parts=%p\n",__FUNCTION__,parts));
2502 NT_STATUS_HAVE_NO_MEMORY(parts);
2504 fragments = talloc_zero(parts, struct async_read_fragments);
2505 NT_STATUS_HAVE_NO_MEMORY(fragments);
2507 parts->fragments=fragments;
2509 parts->r=r;
2510 parts->f=f;
2511 parts->req=req;
2512 /* processed offset */
2513 parts->offset=r->in.offset;
2514 parts->remaining=r->in.maxcnt;
2515 fragments->async=true;
2517 MD5Init (&parts->context);
2519 /* start a read-loop which will continue in the callback until it is
2520 all done */
2521 status=proxy_validate_parts(ntvfs, parts);
2522 if (parts->complete) {
2523 /* Make sure we are not async */
2524 DEBUG(5,("%s: completed EARLY\n",__FUNCTION__));
2525 return proxy_validate_complete(parts);
2528 /* Assert if status!=NT_STATUS_OK then parts->complete==true */
2529 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
2530 DEBUG(5,("%s: returning ASYNC\n",__FUNCTION__));
2531 return status;
2534 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts)
2536 NTSTATUS status;
2537 struct proxy_Read* r=parts->r;
2538 struct proxy_file *f=parts->f;
2540 DEBUG(5,("%s: %d/%d bytes \n\n\n\n",__LOCATION__,r->out.nread,r->in.maxcnt));
2542 MD5Final(parts->digest, &parts->context);
2544 status = parts->fragments->status;
2545 r->out.result = status;
2546 r->out.response.generic.count=r->out.nread;
2547 r->out.cache_name.count=0;
2549 DEBUG(5,("%s: %s nread=%d\n",__FUNCTION__, get_friendly_nt_error_msg (status),
2550 r->out.response.generic.count));
2552 DEBUG(5,("Anticipated validated digest for size: %lld\n", (long long) r->in.maxcnt));
2553 dump_data (5, r->in.digest.digest, sizeof(parts->digest));
2554 DEBUG(5,("read digest for size %lld\n",(long long) parts->offset));
2555 dump_data (5, parts->digest, sizeof(parts->digest));
2557 if (NT_STATUS_IS_OK(status) &&
2558 (memcmp(parts->digest, r->in.digest.digest, sizeof(parts->digest))==0)) {
2559 r->out.flags = PROXY_USE_CACHE | PROXY_VALIDATE;
2560 DEBUG(5,("======= VALIDATED FINE \n\n\n"));
2561 } else {
2562 if (r->in.flags & PROXY_USE_ZLIB) {
2563 ssize_t size = r->out.response.generic.count;
2564 DEBUG(5,("======= VALIDATED WRONG; compress size %d \n\n\n",size));
2565 if (compress_block(r->out.response.generic.data, &size) ) {
2566 r->out.flags|=PROXY_USE_ZLIB;
2567 r->out.response.compress.count=size;
2568 r->out.response.compress.data=r->out.response.generic.data;
2569 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
2570 __LOCATION__,r->out.nread,size,size*100/r->out.nread));
2573 /* return cache filename as a ghastly hack for now */
2574 r->out.cache_name.s=f->cache->cache_name;
2575 r->out.cache_name.count=strlen(r->out.cache_name.s)+1;
2576 DEBUG(5,("%s: writing cache name: %s\n",__LOCATION__, f->cache->cache_name));
2577 /* todo: what about tiny files, buffer to small, don't validate tiny files <1K */
2580 /* assert: this must only be true if we are in a callback */
2581 if (parts->req->async_states->state & NTVFS_ASYNC_STATE_ASYNC) {
2582 /* we are async complete, we need to call the sendfn */
2583 parts->req->async_states->status=status;
2584 DEBUG(5,("Fragments async response sending\n"));
2586 parts->req->async_states->send_fn(parts->req);
2587 return NT_STATUS_OK;
2589 return status;
2592 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2594 struct smbcli_request *c_req = async->c_req;
2595 struct ntvfs_request *req = async->req;
2596 struct proxy_file *f = async->f;
2597 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
2598 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
2599 /* this is the io against which the fragment is to be applied */
2600 struct proxy_validate_parts_parts *parts = talloc_get_type_abort(io1, struct proxy_validate_parts_parts);
2601 struct proxy_Read* r=parts->r;
2602 /* this is the io for the read that issued the callback */
2603 union smb_read *io_frag = fragment->io_frag;
2604 struct async_read_fragments* fragments=fragment->fragments;
2606 /* if request is not already received by a chained handler, read it */
2607 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
2608 DEBUG(5,("%s: status %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
2609 DEBUG(5,("\n\n%s: parts=%p c_req=%p io_frag=%p read %lld\n",__LOCATION__,parts, c_req, io_frag,(long long int)io_frag->generic.out.nread));
2611 fragment->status=status;
2613 if (NT_STATUS_IS_OK(status)) {
2614 /* TODO: If we are not sequentially "next" the queue until we can do it */
2615 /* log this data in r->out.generic.data */
2616 /* Find memcpy window, copy data from the io_frag to the io */
2618 /* Also write validate to cache */
2619 if (f && f->cache) {
2620 cache_save(f->cache, io_frag->generic.out.data, io_frag->generic.out.nread, io_frag->generic.in.offset);
2623 /* extent is the last byte we (don't) read for this frag */
2624 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
2625 /* start_offset is the file offset we first care about */
2626 off_t start_offset=MAX(io_frag->generic.in.offset, r->in.offset);
2627 /* Don't want to go past mincnt cos we don't have the buffer */
2628 off_t io_extent=r->in.offset + r->in.mincnt;
2629 off_t end_offset=MIN(io_extent, extent);
2631 /* ASSERT(start_offset <= end_offset) */
2632 /* ASSERT(start_offset <= io_extent) */
2633 /* Don't copy beyond buffer */
2634 if (! (start_offset >= io_extent)) {
2635 uint8_t* dst=r->out.response.generic.data + (start_offset - r->in.offset);
2636 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
2637 /* src == dst in cases where we did not latch onto someone elses
2638 read, but are handling our own */
2639 if (src != dst)
2640 memcpy(dst, src, end_offset - start_offset);
2641 r->out.nread=end_offset - r->in.offset;
2642 DEBUG(5,("%s: nread %lld ++++++++++++++++++\n", __LOCATION__,(long long int)r->out.nread));
2645 MD5Update(&parts->context, io_frag->generic.out.data,
2646 io_frag->generic.out.nread);
2648 parts->fragments->status=status;
2649 status=proxy_validate_parts(ntvfs, parts);
2650 } else {
2651 parts->fragments->status=status;
2654 DLIST_REMOVE(fragments->fragments, fragment);
2655 /* this will free the io_frag too */
2656 talloc_free(fragment);
2658 if (parts->complete || NT_STATUS_IS_ERR(status)) {
2659 /* this will call sendfn, the chain handler won't know... but
2660 should have no more handlers queued */
2661 return proxy_validate_complete(parts);
2664 return NT_STATUS_OK;
2667 /* continue a read loop, possibly from a callback */
2668 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
2669 struct proxy_validate_parts_parts *parts)
2671 struct proxy_private *private = ntvfs->private_data;
2672 union smb_read *io_frag;
2673 struct async_read_fragment *fragment;
2674 struct smbcli_request *c_req = NULL;
2675 ssize_t size=private->tree->session->transport->negotiate.max_xmit \
2676 - (MIN_SMB_SIZE+32);
2678 /* Have we already read enough? */
2679 if (parts->offset >= (parts->r->in.offset + parts->r->in.maxcnt)) {
2680 parts->complete=true;
2681 return NT_STATUS_OK;
2684 size=MIN(size, parts->remaining);
2686 fragment=talloc_zero(parts->fragments, struct async_read_fragment);
2687 NT_STATUS_HAVE_NO_MEMORY(fragment);
2689 io_frag = talloc_zero(fragment, union smb_read);
2690 NT_STATUS_HAVE_NO_MEMORY(io_frag);
2692 io_frag->generic.out.data = talloc_size(io_frag, size);
2693 NT_STATUS_HAVE_NO_MEMORY(io_frag->generic.out.data);
2695 io_frag->generic.level = RAW_READ_GENERIC;
2696 io_frag->generic.in.file.fnum = parts->r->in.fnum;
2697 io_frag->generic.in.offset = parts->offset;
2698 io_frag->generic.in.mincnt = size;
2699 io_frag->generic.in.maxcnt = size;
2700 io_frag->generic.in.remaining = 0;
2701 #warning maybe true is more permissive?
2702 io_frag->generic.in.read_for_execute = false;
2704 DEBUG(5,("%s: issue part read offset=%lld, size=%lld,%lld\n",__LOCATION__,
2705 (long long int)io_frag->generic.in.offset,
2706 (long long int)io_frag->generic.in.mincnt,
2707 (long long int)io_frag->generic.in.maxcnt));
2709 //c_req = smb_raw_read_send(ntvfs, io_frag, parts->f, parts->r);
2710 c_req = smb_raw_read_send(private->tree, io_frag);
2711 NT_STATUS_HAVE_NO_MEMORY(c_req);
2713 parts->offset+=size;
2714 parts->remaining-=size;
2715 fragment->c_req = c_req;
2716 fragment->io_frag = io_frag;
2717 fragment->fragments=parts->fragments;
2718 DLIST_ADD(parts->fragments->fragments, fragment);
2720 { void* req=NULL;
2721 ADD_ASYNC_RECV_TAIL(c_req, parts, fragment, parts->f, async_proxy_validate_parts, NT_STATUS_INTERNAL_ERROR);
2722 ASYNC_RECV_TAIL_F_ORPHAN(io_frag, async_read_handler, parts->f, c_req->async.private, NT_STATUS_UNSUCCESSFUL);
2725 DEBUG(5,("%s: issued read parts=%p c_req=%p io_frag=%p\n",__LOCATION__,parts, c_req, io_frag));
2727 return NT_STATUS_OK;
2731 read from a file
2733 static NTSTATUS proxy_read(struct ntvfs_module_context *ntvfs,
2734 struct ntvfs_request *req, union smb_read *io)
2736 struct proxy_private *private = ntvfs->private_data;
2737 struct smbcli_request *c_req;
2738 struct proxy_file *f;
2739 struct async_read_fragments *fragments=NULL;
2740 /* how much of read-from-cache is certainly valid */
2741 ssize_t valid=0;
2742 off_t offset=io->generic.in.offset+valid;
2743 off_t limit=io->generic.in.offset+io->generic.in.mincnt;
2745 SETUP_PID;
2747 if (io->generic.level != RAW_READ_GENERIC &&
2748 private->map_generic) {
2749 return ntvfs_map_read(ntvfs, req, io);
2752 SETUP_FILE_HERE(f);
2754 DEBUG(3,("\n%s() fnum=%d offset=%lld, mincnt=%d, maxcnt=%d\n",__FUNCTION__,
2755 io->generic.in.file.fnum,
2756 io->generic.in.offset,
2757 io->generic.in.mincnt,
2758 io->generic.in.maxcnt));
2760 io->generic.out.nread=0;
2762 /* if we have oplocks and know the files size, don't even ask the server
2763 for more */
2764 if (f->oplock) {
2765 if (io->generic.in.offset >= f->metadata->info_data.size) {
2766 io->generic.in.mincnt=0;
2767 io->generic.in.maxcnt=0;
2768 io->generic.out.nread=0;
2769 DEBUG(5,("Reading beyond known length %lld; return 0\n",(long long)f->metadata->info_data.size));
2770 return NT_STATUS_OK;
2771 } else {
2772 io->generic.in.mincnt=MIN(io->generic.in.mincnt,
2773 f->metadata->info_data.size - io->generic.in.offset);
2774 io->generic.in.maxcnt=MIN(io->generic.in.maxcnt,
2775 f->metadata->info_data.size - io->generic.in.offset);
2777 DEBUG(5,("Oplock and known size, limiting read to %lld (s=%d)\n",
2778 f->metadata->info_data.size, io->generic.in.mincnt));
2782 /* attempt to read from cache. if nread becomes non-zero then we
2783 have cache to validate. Instead of returning "valid" value, cache_read
2784 should probably return an async_read_fragment structure */
2786 if (private->cache_enabled) {
2787 NTSTATUS status=cache_smb_raw_read(f->cache, ntvfs, req, io, &valid);
2789 if (NT_STATUS_IS_OK(status)) {
2790 /* if we read enough valid data, return it */
2791 if (valid > 0 && valid>=io->generic.in.mincnt) {
2792 /* valid will not be bigger than maxcnt */
2793 io->generic.out.nread=valid;
2794 DEBUG(1,("Read from cache offset=%d size=%d\n",
2795 (int)(io->generic.in.offset),
2796 (int)(io->generic.out.nread)) );
2797 return status;
2800 DEBUG(5,("Cache read status: %s\n",get_friendly_nt_error_msg (status)));
2803 fragments=talloc_zero(req, struct async_read_fragments);
2804 fragments->async=!!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC);
2805 /* See if there are pending reads that would satisfy this request
2806 We have a validated read up to io->generic.out.nread. Anything between
2807 this and mincnt MUST be read, but we could first try and attach to
2808 any pending read-ahead on the same file.
2809 If those read-aheads fail we will re-issue a regular read from the
2810 callback handler and hope it hasn't taken too long. */
2812 /* offset is the extentof the file from which we still need to find
2813 matching read-requests. */
2814 offset=io->generic.in.offset+valid;
2815 /* limit is the byte beyond the last byte for which we need a request.
2816 This used to be mincnt, but is now maxcnt to cope with validate reads.
2817 Maybe we can switch back to mincnt when proxy_read struct is used
2818 instead of smb_read.
2820 limit=io->generic.in.offset+io->generic.in.maxcnt;
2822 while (offset < limit) {
2823 /* Should look for the read-ahead with offset <= in.offset+out.nread
2824 with the longest span, but there is only likely to be one anyway so
2825 just take the first */
2826 struct async_info* pending=private->pending;
2827 union smb_read *readahead_io=NULL;
2828 DEBUG(5,("Looping reads from offset=%lld, end=%lld\n",offset,limit));
2829 while(pending) {
2830 if (pending->c_req->async.fn == async_read_handler) {
2831 struct async_info *async=talloc_get_type_abort(pending->c_req->async.private, struct async_info);
2832 readahead_io=talloc_get_type_abort(async->parms, union smb_read);
2834 if (readahead_io->generic.in.file.fnum == io->generic.in.file.fnum &&
2835 readahead_io->generic.in.offset <= offset &&
2836 readahead_io->generic.in.offset +
2837 readahead_io->generic.in.mincnt > offset) break;
2839 readahead_io=NULL;
2840 pending=pending->next;
2842 /* ASSERT(readahead_io == pending->c_req->async.params) */
2843 if (pending && readahead_io) {
2844 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
2845 fragment->fragments=fragments;
2846 fragment->io_frag=readahead_io;
2847 fragment->c_req = pending->c_req;
2848 /* we found one, so attach to it. We DO need a talloc_reference
2849 because the original send_fn might be called before ALL chained
2850 handlers, and our handler will call its own send_fn first. ugh.
2851 Maybe we need to seperate reverse-mapping callbacks with data users? */
2852 /* Note: the read-ahead io is passed as io, and our req io is
2853 in io_frag->io */
2854 //talloc_reference(req, pending->req);
2855 DEBUG(5,("Attach to read for offset=%lld length=%d\n",
2856 readahead_io->generic.in.offset,
2857 readahead_io->generic.in.mincnt));
2858 ADD_ASYNC_RECV_TAIL(pending->c_req, io, fragment, f,
2859 async_read_fragment, NT_STATUS_INTERNAL_ERROR);
2860 DEBUG(5,("Attached OK\n"));
2861 #warning we don't want to return if we fail to attach, just break
2862 DLIST_ADD(fragments->fragments, fragment);
2863 /* updated offset for which we have reads */
2864 offset=readahead_io->generic.in.offset + readahead_io->generic.in.mincnt;
2865 } else {
2866 /* there are no pending reads to fill this so issue one up to
2867 the maximum supported read size. We could see when the next
2868 pending read is (if any) and only read up till there... later...
2869 Issue a fragment request for what is left, clone io.
2870 In the case that there were no fragments this will be the orginal read
2871 but with a cloned io struct */
2872 off_t next_offset;
2873 struct proxy_Read *r=NULL; /* used only for VALIDATE promotion */
2874 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
2875 union smb_read *io_frag=talloc_memdup_type(req, io, union smb_read);
2876 ssize_t offset_inc=offset-io_frag->generic.in.offset;
2877 /* 250 is a guess at ndr rpc overheads */
2878 ssize_t readsize=MIN(PROXY_NTIOCTL_MAXDATA,
2879 private->tree->session->transport->negotiate.max_xmit) \
2880 - (MIN_SMB_SIZE+32);
2881 if (readsize > 0xFFFF) readsize = 0xFFFF; /* - (MIN_SMB_SIZE+250) ?? */
2882 readsize=MIN(limit-offset, readsize);
2884 DEBUG(5,("Issuing direct read\n"));
2885 /* reduce the cached read (if any). nread is unsigned */
2886 if (io_frag->generic.out.nread > offset_inc) {
2887 io_frag->generic.out.nread-=offset_inc;
2888 /* don't make nread buffer look too big */
2889 if (io_frag->generic.out.nread > readsize)
2890 io_frag->generic.out.nread = readsize;
2891 } else {
2892 io_frag->generic.out.nread=0;
2894 /* adjust the data pointer so we read to the right place */
2895 io_frag->generic.out.data+=offset_inc;
2896 io_frag->generic.in.offset=offset;
2897 io_frag->generic.in.maxcnt=readsize;
2898 /* we don't mind mincnt being smaller if this is the last frag,
2899 but then we can already handle it being bigger but not reached...
2900 The spell would be:
2901 MIN(io_frag->generic.in.mincnt, io_frag->generic.in.maxcnt);
2903 io_frag->generic.in.mincnt=readsize;
2904 fragment->fragments=fragments;
2905 fragment->io_frag=io_frag;
2906 #warning attach to send_fn handler
2907 /* what if someone attaches to us? Our send_fn is called from our
2908 chained handler which will be before their handler and io will
2909 already be freed. We need to keep a reference to the io and the data
2910 but we don't know where it came from in order to take a reference.
2911 We need therefore to tackle calling of send_fn AFTER all other handlers */
2913 /* Calculate next offset (in advance) */
2914 next_offset=io_frag->generic.in.offset + io_frag->generic.in.mincnt;
2916 /* if we are (going to be) the last fragment and we are in VALIDATE
2917 mode, see if we can do a bulk validate now.
2918 io->generic.in.mincnt == io->generic.in.maxcnt is to make sure we
2919 don't do a validate on a receive validate read
2921 if (private->cache_validatesize && PROXY_REMOTE_SERVER(private) &&
2922 next_offset >= limit && (f->cache && f->cache->status & CACHE_VALIDATE)) {
2923 ssize_t length=private->cache_validatesize;
2924 declare_checksum(digest);
2926 DEBUG(5,("last read, maybe mega validate: frag length %zu, offset %llu\n",
2927 length, (unsigned long long) offset));
2928 NTSTATUS status=cache_smb_raw_checksum(f->cache, offset, &length, digest);
2929 /* no point in doing it if md5'd length < current out.nread
2930 remember: out.data contains this requests cached response
2931 if validate succeeds */
2932 if (NT_STATUS_IS_OK(status) && (length > io_frag->generic.out.nread)) {
2933 /* upgrade the read, allocate the proxy_read struct here
2934 and fill in the extras, no more out-of-band stuff */
2935 DEBUG(5,("%s: Promoting to validate read: %lld\n",__FUNCTION__,(long long) length));
2936 dump_data (5, digest, sizeof(digest));
2938 r=talloc_zero(io_frag, struct proxy_Read);
2939 memcpy(r->in.digest.digest, digest, sizeof(digest));
2940 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE;
2941 io_frag->generic.in.maxcnt = length;
2942 r->in.mincnt=io_frag->generic.in.mincnt;
2943 /* the proxy send function will calculate the checksum based on *data */
2944 } else {
2945 /* try bulk read */
2946 if (f->oplock) {
2947 DEBUG(5,("%s: *** faking bulkd read\n\n",__LOCATION__));
2948 r=talloc_zero(io_frag, struct proxy_Read);
2949 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE;//| PROXY_USE_ZLIB;
2950 io_frag->generic.in.maxcnt = MIN(f->metadata->info_data.size, private->cache_validatesize);
2951 r->in.mincnt=io_frag->generic.in.maxcnt;
2952 r->in.mincnt=io_frag->generic.in.mincnt;
2954 /* not enough in cache to make it worthwhile anymore */
2955 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x len=%lld\n",
2956 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0,
2957 (unsigned long long)length));
2958 //cache_handle_novalidate(f);
2959 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x\n",
2960 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0));
2962 } else {
2963 if (f->cache && f->cache->status & CACHE_VALIDATE) {
2964 DEBUG(5,(">>>Not last frag, no validate read: %lld %lld\n",
2965 (long long) next_offset,
2966 (long long) limit));
2970 DEBUG(5,("Frag read sending offset=%lld min=%d, size=%d\n",
2971 io_frag->generic.in.offset,io_frag->generic.in.mincnt,
2972 io_frag->generic.in.maxcnt));
2973 c_req = proxy_smb_raw_read_send(ntvfs, io_frag, f, r);
2974 DEBUG(5,("Frag read sent offset=%lld size=%d MID=%d\n",
2975 io_frag->generic.in.offset,io_frag->generic.in.maxcnt,c_req->mid));
2976 fragment->c_req=c_req;
2977 DLIST_ADD(fragments->fragments, fragment);
2978 ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
2979 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_read_fragment, NT_STATUS_INTERNAL_ERROR);
2980 DEBUG(5,("Frag response chained\n"));
2981 /* normally we would only install the chain_handler if we wanted async
2982 response, but as it is the async_read_fragment handler that calls send_fn
2983 based on fragments->async, instead of async_chain_handler, we don't
2984 need to worry about this call completing async'ly while we are
2985 waiting on the other attached calls. Otherwise we would not attach
2986 the async_chain_handler (via async_read_handler) because of the wait
2987 below */
2988 { /* We don't want the chain handler calling send_fn as it is done by the fragment handler */
2989 void* req=NULL;
2990 /* call async_chain_hander not read handler so that folk can't
2991 attach to it, till we solve the problem above */
2992 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
2994 offset = next_offset;
2996 DEBUG(5,("Next fragment\n"));
2999 /* do we still need a final fragment? Issue a read */
3001 DEBUG(5,("No frags left to read\n"));
3004 /* issue new round of read-aheads */
3005 DEBUG(5,("== Read aheads asread-%d\n",io->generic.in.mincnt));
3006 if (f->cache && ! (f->cache->status & CACHE_VALIDATE)) read_ahead(f, ntvfs, io, io->generic.in.mincnt);
3007 DEBUG(5,("== Done Read aheads\n"));
3009 /* If we have fragments but we are not called async, we must sync-wait on them */
3010 /* did we map the entire request to pending reads? */
3011 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3012 struct async_read_fragment *fragment;
3013 DEBUG(5,("Sync waiting\n"));
3014 /* fragment get's free'd during the chain_handler so we start at
3015 the top each time */
3016 for (fragment = fragments->fragments; fragment; fragment = fragments->fragments) {
3017 /* Any fragments async handled while we sync-wait on one
3018 will remove themselves from the list and not get sync waited */
3019 sync_chain_handler(fragment->c_req);
3020 /* if we have a non-ok result AND we know we have all the responses
3021 up to extent, then we could quit the loop early and change the
3022 fragments->async to true so the final irrelevant responses would
3023 come async and we could send our response now - but we don't
3024 track that detail until we have cache-maps that we can use to
3025 track the responded fragments and combine responsed linear extents
3026 if (! NT_STATUS_IS_OK(fragments->status) && xxx ) */
3028 DEBUG(5,("Sync return of proxy_read: %s\n",get_friendly_nt_error_msg (fragments->status)));
3029 return fragments->status;
3032 DEBUG(5,("Async returning\n"));
3033 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
3034 return NT_STATUS_OK;
3038 a handler to de-fragment async write replies back to one request.
3039 Can cope with out-of-order async responses by waiting for all responses
3040 on an NT_STATUS_OK case so that nwritten is properly adjusted
3042 NTSTATUS async_write_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
3044 struct smbcli_request *c_req = async->c_req;
3045 struct ntvfs_request *req = async->req;
3046 struct proxy_file *f=async->f;
3047 struct async_write_fragment* fragment=talloc_get_type_abort(io2, struct async_write_fragment);
3048 /* this is the io against which the fragment is to be applied */
3049 union smb_write *io = talloc_get_type_abort(io1, union smb_write);
3050 /* this is the io for the write that issued the callback */
3051 union smb_write *io_frag = fragment->io_frag; /* async->parms; */
3052 struct async_write_fragments* fragments=fragment->fragments;
3053 ssize_t extent=0;
3055 /* if request is not already received by a chained handler, read it */
3056 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
3057 if (c_req) status=smb_raw_write_recv(c_req, io_frag);
3059 DEBUG(3,("%s async_write status: %s\n",__FUNCTION__,
3060 get_friendly_nt_error_msg(status)));
3062 fragment->status = status;
3064 DLIST_REMOVE(fragments->fragments, fragment);
3066 /* did this one fail? */
3067 if (! NT_STATUS_IS_OK(fragment->status)) {
3068 if (NT_STATUS_IS_OK(fragments->status)) {
3069 fragments->status=fragment->status;
3071 } else {
3072 /* No fragments have yet failed, keep collecting responses */
3073 extent = io_frag->generic.in.offset + io_frag->generic.out.nwritten;
3075 /* we broke up the write so it could all be written. If only some has
3076 been written of this block, and then some of then next block,
3077 it could leave unwritten holes! We will only acknowledge up to the
3078 first partial write, and let the client deal with it.
3079 If server can return NT_STATUS_OK for a partial write so can we */
3080 if (io_frag->generic.out.nwritten != io_frag->generic.in.count) {
3081 DEBUG(4,("Fragmented write only partially successful\n"));
3083 /* Shrink the master nwritten */
3084 if ( ! fragments->partial ||
3085 (io->generic.in.offset + io->generic.out.nwritten) > extent) {
3086 io->generic.out.nwritten = extent - io->generic.in.offset;
3088 /* stop any further successes from extended the partial write */
3089 fragments->partial=true;
3090 } else {
3091 /* only grow the master nwritten if we haven't logged a partial write */
3092 if (! fragments->partial &&
3093 (io->generic.in.offset + io->generic.out.nwritten) < extent ) {
3094 io->generic.out.nwritten = extent - io->generic.in.offset;
3099 /* if this was the last fragment, clean up */
3100 if (! fragments->fragments) {
3101 DEBUG(5,("Async write re-fragmented with %d of %d\n",
3102 io->generic.out.nwritten,
3103 io->generic.in.count));
3104 if (NT_STATUS_IS_OK(fragments->status)) {
3105 cache_handle_save(f, io->generic.in.data, io->generic.out.nwritten,
3106 io->generic.in.offset);
3107 if (f->metadata->info_data.size < io->generic.in.offset+io->generic.in.count) {
3108 f->metadata->info_data.size=io->generic.in.offset+io->generic.in.count;
3111 if (fragments->async) {
3112 req->async_states->status=fragments->status;
3113 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
3114 req->async_states->send_fn(req);
3115 DEBUG(5,("Async response sent\n"));
3116 } else {
3117 DEBUG(5,("Fragments SYNC return\n"));
3121 return status;
3125 a handler for async write replies
3127 NTSTATUS async_write_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
3129 struct smbcli_request *c_req = async->c_req;
3130 struct ntvfs_request *req = async->req;
3131 struct proxy_file *f=async->f;
3132 union smb_write *io=async->parms;
3134 if (c_req)
3135 status = smb_raw_write_recv(c_req, async->parms);
3137 cache_handle_save(f, io->generic.in.data,
3138 io->generic.out.nwritten,
3139 io->generic.in.offset);
3141 return status;
3145 write to a file
3147 static NTSTATUS proxy_write(struct ntvfs_module_context *ntvfs,
3148 struct ntvfs_request *req, union smb_write *io)
3150 struct proxy_private *private = ntvfs->private_data;
3151 struct smbcli_request *c_req;
3152 struct proxy_file *f;
3154 SETUP_PID;
3156 if (io->generic.level != RAW_WRITE_GENERIC &&
3157 private->map_generic) {
3158 return ntvfs_map_write(ntvfs, req, io);
3160 SETUP_FILE_HERE(f);
3162 DEBUG(5,("proxy_write offset=%lld size=%d\n",io->generic.in.offset, io->generic.in.count));
3163 #warning ERROR get rid of this
3164 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3165 NTSTATUS status;
3166 if (PROXY_REMOTE_SERVER(private)) {
3167 /* Do a proxy write */
3168 status=proxy_smb_raw_write(ntvfs, io, f);
3169 } else if (io->generic.in.count >
3170 private->tree->session->transport->negotiate.max_xmit) {
3172 /* smbcli_write can deal with large writes, which are bigger than
3173 tree->session->transport->negotiate.max_xmit */
3174 ssize_t size=smbcli_write(private->tree,
3175 io->generic.in.file.fnum,
3176 io->generic.in.wmode,
3177 io->generic.in.data,
3178 io->generic.in.offset,
3179 io->generic.in.count);
3181 if (size==io->generic.in.count || size > 0) {
3182 io->generic.out.nwritten=size;
3183 status=NT_STATUS_OK;
3184 } else {
3185 status=NT_STATUS_UNSUCCESSFUL;
3187 } else {
3188 status=smb_raw_write(private->tree, io);
3191 /* Save write in cache */
3192 if (NT_STATUS_IS_OK(status)) {
3193 cache_handle_save(f, io->generic.in.data,
3194 io->generic.out.nwritten,
3195 io->generic.in.offset);
3196 if (f->metadata->info_data.size <
3197 io->generic.in.offset+io->generic.in.count) {
3198 f->metadata->info_data.size=io->generic.in.offset+io->generic.in.count;
3202 return status;
3205 /* smb_raw_write_send can't deal with large writes, which are bigger than
3206 tree->session->transport->negotiate.max_xmit so we have to break it up
3207 trying to preserve the async nature of the call as much as possible */
3208 if (PROXY_REMOTE_SERVER(private)) {
3209 DEBUG(5,("== %s call proxy_smb_raw_write_send\n",__FUNCTION__));
3210 c_req = proxy_smb_raw_write_send(ntvfs, io, f);
3211 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3212 } else if (io->generic.in.count <=
3213 private->tree->session->transport->negotiate.max_xmit) {
3214 DEBUG(5,("== %s call smb_raw_write_send\n",__FUNCTION__));
3215 c_req = smb_raw_write_send(private->tree, io);
3216 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3217 } else {
3218 ssize_t remaining = io->generic.in.count;
3219 #warning Need an audit of these magin numbers MIN_SMB_SIZE+32
3220 int block = (private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32));
3221 int done = 0;
3222 struct async_write_fragments *fragments = talloc_zero(req, struct async_write_fragments);
3224 DEBUG(3,("== %s Client sending too-big write sized %d, negotiated limit %d\n",
3225 __FUNCTION__, io->generic.in.count,
3226 private->tree->session->transport->negotiate.max_xmit));
3228 fragments->io = io;
3229 io->generic.out.nwritten=0;
3230 io->generic.out.remaining=0;
3232 do {
3233 union smb_write *io_frag = talloc_zero(fragments, union smb_write);
3234 struct async_write_fragment *fragment = talloc_zero(fragments, struct async_write_fragment);
3235 ssize_t size = MIN(block, remaining);
3237 fragment->fragments = fragments;
3238 fragment->io_frag = io_frag;
3240 io_frag->generic.level = io->generic.level;
3241 io_frag->generic.in.file.fnum = io->generic.in.file.fnum;
3242 io_frag->generic.in.wmode = io->generic.in.wmode;
3243 io_frag->generic.in.count = size;
3244 io_frag->generic.in.offset = io->generic.in.offset + done;
3245 io_frag->generic.in.data = io->generic.in.data + done;
3247 c_req = proxy_smb_raw_write_send(ntvfs, io_frag, f);
3248 if (! c_req) {
3249 /* let pending requests clean-up when ready */
3250 fragments->status=NT_STATUS_UNSUCCESSFUL;
3251 talloc_steal(NULL, fragments);
3252 DEBUG(3,("Can't send request fragment\n"));
3253 return NT_STATUS_UNSUCCESSFUL;
3256 DEBUG(5,("Frag write sent offset=%lld size=%d MID=%d\n",
3257 io_frag->generic.in.offset,io_frag->generic.in.count,c_req->mid));
3258 fragment->c_req=c_req;
3259 DLIST_ADD(fragments->fragments, fragment);
3261 // ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3262 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_write_fragment, NT_STATUS_INTERNAL_ERROR);
3263 DEBUG(5,("Frag response chained\n"));
3265 remaining -= size;
3266 done += size;
3267 } while(remaining > 0);
3269 /* this strategy has the callback chain attached to each c_req, so we
3270 don't use the ASYNC_RECV_TAIL* to install a general one */
3273 ASYNC_RECV_TAIL_HANDLER(io, async_chain_handler);
3277 a handler for async seek replies
3279 static void async_seek(struct smbcli_request *c_req)
3281 struct async_info *async = c_req->async.private;
3282 struct ntvfs_request *req = async->req;
3283 req->async_states->status = smb_raw_seek_recv(c_req, async->parms);
3284 talloc_free(async);
3285 req->async_states->send_fn(req);
3289 seek in a file
3291 static NTSTATUS proxy_seek(struct ntvfs_module_context *ntvfs,
3292 struct ntvfs_request *req,
3293 union smb_seek *io)
3295 struct proxy_private *private = ntvfs->private_data;
3296 struct smbcli_request *c_req;
3298 SETUP_PID_AND_FILE;
3300 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3301 return smb_raw_seek(private->tree, io);
3304 c_req = smb_raw_seek_send(private->tree, io);
3306 ASYNC_RECV_TAIL(io, async_seek);
3310 flush a file
3312 static NTSTATUS proxy_flush(struct ntvfs_module_context *ntvfs,
3313 struct ntvfs_request *req,
3314 union smb_flush *io)
3316 struct proxy_private *private = ntvfs->private_data;
3317 struct smbcli_request *c_req;
3319 SETUP_PID;
3320 switch (io->generic.level) {
3321 case RAW_FLUSH_FLUSH:
3322 SETUP_FILE;
3323 break;
3324 case RAW_FLUSH_ALL:
3325 io->generic.in.file.fnum = 0xFFFF;
3326 break;
3327 case RAW_FLUSH_SMB2:
3328 return NT_STATUS_INVALID_LEVEL;
3331 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3332 return smb_raw_flush(private->tree, io);
3335 c_req = smb_raw_flush_send(private->tree, io);
3337 SIMPLE_ASYNC_TAIL;
3341 close a file
3343 static NTSTATUS proxy_close(struct ntvfs_module_context *ntvfs,
3344 struct ntvfs_request *req, union smb_close *io)
3346 struct proxy_private *private = ntvfs->private_data;
3347 struct smbcli_request *c_req;
3348 struct proxy_file *f;
3349 union smb_close io2;
3350 bool can_clone;
3352 SETUP_PID;
3354 if (io->generic.level != RAW_CLOSE_GENERIC &&
3355 private->map_generic) {
3356 return ntvfs_map_close(ntvfs, req, io);
3358 SETUP_FILE_HERE(f);
3359 /* we free the backend data before we use this value, so save it */
3360 can_clone=f->can_clone;
3361 /* Note, we aren't free-ing f, or it's h here. Should we?
3362 even if file-close fails, we'll remove it from the list,
3363 what else would we do? Maybe we should not remove until
3364 after the proxied call completes? */
3365 DLIST_REMOVE(private->files, f);
3367 /* Don't send the close on cloned handles unless we are the last one */
3368 if (f->metadata && --(f->metadata->count)) {
3369 DEBUG(5,("%s: Fake close of %d, %d left\n",__FUNCTION__,f->fnum, f->metadata->count));
3370 return NT_STATUS_OK;
3372 DEBUG(5,("%s: Real close of %d\n",__FUNCTION__, f->fnum));
3373 /* only close the cache if we aren't keeping references */
3374 //cache_close(f->cache);
3376 /* possibly samba can't do RAW_CLOSE_SEND yet */
3377 if (! (c_req = smb_raw_close_send(private->tree, io))) {
3378 if (io->generic.level == RAW_CLOSE_GENERIC) {
3379 ZERO_STRUCT(io2);
3380 io2.close.level = RAW_CLOSE_CLOSE;
3381 io2.close.in.file = io->generic.in.file;
3382 io2.close.in.write_time = io->generic.in.write_time;
3383 io = &io2;
3385 c_req = smb_raw_close_send(private->tree, io);
3386 /* destroy handle */
3387 ntvfs_handle_remove_backend_data(f->h, ntvfs);
3390 /* If it is read-only, don't bother waiting for the result */
3391 if (can_clone) {
3392 DEBUG(5,("%s: not waiting for close response fnum=%d\n",__FUNCTION__,f->fnum));
3393 return NT_STATUS_OK;
3396 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3397 return smbcli_request_simple_recv(c_req);
3399 DEBUG(0,("%s\n",__LOCATION__));
3400 SIMPLE_ASYNC_TAIL;
3404 exit - closing files open by the pid
3406 static NTSTATUS proxy_exit(struct ntvfs_module_context *ntvfs,
3407 struct ntvfs_request *req)
3409 struct proxy_private *private = ntvfs->private_data;
3410 struct smbcli_request *c_req;
3412 SETUP_PID;
3414 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3415 return smb_raw_exit(private->tree->session);
3418 c_req = smb_raw_exit_send(private->tree->session);
3420 SIMPLE_ASYNC_TAIL;
3424 logoff - closing files open by the user
3426 static NTSTATUS proxy_logoff(struct ntvfs_module_context *ntvfs,
3427 struct ntvfs_request *req)
3429 /* we can't do this right in the proxy backend .... */
3430 return NT_STATUS_OK;
3434 setup for an async call - nothing to do yet
3436 static NTSTATUS proxy_async_setup(struct ntvfs_module_context *ntvfs,
3437 struct ntvfs_request *req,
3438 void *private)
3440 return NT_STATUS_OK;
3444 cancel an async call
3446 static NTSTATUS proxy_cancel(struct ntvfs_module_context *ntvfs,
3447 struct ntvfs_request *req)
3449 struct proxy_private *private = ntvfs->private_data;
3450 struct async_info *a;
3452 /* find the matching request */
3453 for (a=private->pending;a;a=a->next) {
3454 if (a->req == req) {
3455 break;
3459 if (a == NULL) {
3460 return NT_STATUS_INVALID_PARAMETER;
3463 return smb_raw_ntcancel(a->c_req);
3467 lock a byte range
3469 static NTSTATUS proxy_lock(struct ntvfs_module_context *ntvfs,
3470 struct ntvfs_request *req, union smb_lock *io)
3472 struct proxy_private *private = ntvfs->private_data;
3473 struct smbcli_request *c_req;
3475 SETUP_PID;
3477 if (io->generic.level != RAW_LOCK_GENERIC &&
3478 private->map_generic) {
3479 return ntvfs_map_lock(ntvfs, req, io);
3481 SETUP_FILE;
3483 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3484 return smb_raw_lock(private->tree, io);
3487 c_req = smb_raw_lock_send(private->tree, io);
3488 SIMPLE_ASYNC_TAIL;
3492 set info on a open file
3494 static NTSTATUS proxy_setfileinfo(struct ntvfs_module_context *ntvfs,
3495 struct ntvfs_request *req,
3496 union smb_setfileinfo *io)
3498 struct proxy_private *private = ntvfs->private_data;
3499 struct smbcli_request *c_req;
3501 SETUP_PID_AND_FILE;
3503 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3504 return smb_raw_setfileinfo(private->tree, io);
3506 c_req = smb_raw_setfileinfo_send(private->tree, io);
3508 SIMPLE_ASYNC_TAIL;
3513 a handler for async fsinfo replies
3515 static void async_fsinfo(struct smbcli_request *c_req)
3517 struct async_info *async = c_req->async.private;
3518 struct ntvfs_request *req = async->req;
3519 union smb_fsinfo *fs = async->parms;
3520 struct proxy_private *private = async->proxy;
3522 req->async_states->status = smb_raw_fsinfo_recv(c_req, req, fs);
3524 if (NT_STATUS_IS_OK(req->async_states->status) && (fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3525 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO)) {
3526 if (! private->fs_attribute_info && (private->fs_attribute_info=talloc_zero(private, struct fs_attribute_info))) {
3527 DEBUG(5,("%s: caching fs_attribute_info\n",__LOCATION__));
3528 private->fs_attribute_info->fs_attr=fs->attribute_info.out.fs_attr;
3529 private->fs_attribute_info->max_file_component_length=fs->attribute_info.out.max_file_component_length;
3530 private->fs_attribute_info->fs_type=talloc_smb_wire_string_dup(private, &(fs->attribute_info.out.fs_type));
3534 talloc_free(async);
3535 req->async_states->send_fn(req);
3539 return filesystem space info
3541 static NTSTATUS proxy_fsinfo(struct ntvfs_module_context *ntvfs,
3542 struct ntvfs_request *req, union smb_fsinfo *fs)
3544 struct proxy_private *private = ntvfs->private_data;
3545 struct smbcli_request *c_req;
3547 SETUP_PID;
3549 DEBUG(5,("%s: level %x\n",__LOCATION__,fs->generic.level));
3550 /* this value is easy to cache */
3551 if ((fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3552 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO) &&
3553 private->fs_attribute_info) {
3554 DEBUG(5,("%s: using cached fsinfo\n",__LOCATION__));
3555 fs->attribute_info.out.fs_attr=private->fs_attribute_info->fs_attr;
3556 fs->attribute_info.out.max_file_component_length=private->fs_attribute_info->max_file_component_length;
3557 fs->attribute_info.out.fs_type=talloc_smb_wire_string_dup(req, &(private->fs_attribute_info->fs_type));
3558 return NT_STATUS_OK;
3561 /* QFS Proxy */
3562 if (fs->generic.level == RAW_QFS_PROXY_INFO) {
3563 fs->proxy_info.out.major_version=1;
3564 fs->proxy_info.out.minor_version=0;
3565 fs->proxy_info.out.capability=0;
3566 return NT_STATUS_OK;
3569 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3570 NTSTATUS status = smb_raw_fsinfo(private->tree, req, fs);
3571 if (NT_STATUS_IS_OK(status) && (fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3572 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO)) {
3573 if (! private->fs_attribute_info && (private->fs_attribute_info=talloc_zero(private, struct fs_attribute_info))) {
3574 DEBUG(5,("%s: caching fs_attribute_info\n",__LOCATION__));
3575 private->fs_attribute_info->fs_attr=fs->attribute_info.out.fs_attr;
3576 private->fs_attribute_info->max_file_component_length=fs->attribute_info.out.max_file_component_length;
3577 private->fs_attribute_info->fs_type=talloc_smb_wire_string_dup(private, &(fs->attribute_info.out.fs_type));
3580 return status;
3582 c_req = smb_raw_fsinfo_send(private->tree, req, fs);
3584 ASYNC_RECV_TAIL(fs, async_fsinfo);
3588 return print queue info
3590 static NTSTATUS proxy_lpq(struct ntvfs_module_context *ntvfs,
3591 struct ntvfs_request *req, union smb_lpq *lpq)
3593 return NT_STATUS_NOT_SUPPORTED;
3597 find_first / find_next caching.
3598 For now, cache based on directory,search_attributes,search_pattern,ea stuff
3599 Consider in response:
3600 * search id
3601 * search count
3602 * end of search
3603 * ea stuff
3606 static union smb_search_data *smb_search_data_dup(void* mem_ctx, const union smb_search_data *file, enum smb_search_data_level data_level) {
3607 union smb_search_data *result;
3608 struct smb_wire_string *name;
3610 result=talloc_zero(mem_ctx, union smb_search_data);
3611 if (! result) {
3612 return result;
3615 *result = *file;
3617 switch(data_level) {
3618 case RAW_SEARCH_DATA_SEARCH:
3619 if (! (result->search.name=talloc_strdup(mem_ctx, file->search.name))) goto error;
3620 break;
3621 case RAW_SEARCH_DATA_STANDARD:
3622 if (sws_dup(result, result->standard.name, file->standard.name)) goto error;
3623 break;
3624 case RAW_SEARCH_DATA_EA_SIZE:
3625 if (sws_dup(result, result->ea_size.name, file->ea_size.name)) goto error;
3626 break;
3627 case RAW_SEARCH_DATA_EA_LIST:
3628 if (sws_dup(result, result->ea_list.name, file->ea_list.name)) goto error;
3629 break;
3630 case RAW_SEARCH_DATA_DIRECTORY_INFO:
3631 if (sws_dup(result, result->directory_info.name, file->directory_info.name)) goto error;
3632 break;
3633 case RAW_SEARCH_DATA_FULL_DIRECTORY_INFO:
3634 if (sws_dup(result, result->full_directory_info.name, file->full_directory_info.name)) goto error;
3635 break;
3636 case RAW_SEARCH_DATA_NAME_INFO:
3637 if (sws_dup(result, result->name_info.name, file->name_info.name)) goto error;
3638 break;
3639 case RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO:
3640 if (sws_dup(result, result->both_directory_info.name, file->both_directory_info.name)) goto error;
3641 if (sws_dup(result, result->both_directory_info.short_name, file->both_directory_info.short_name)) goto error;
3642 break;
3643 case RAW_SEARCH_DATA_ID_FULL_DIRECTORY_INFO:
3644 if (sws_dup(result, result->id_full_directory_info.name, file->id_full_directory_info.name)) goto error;
3645 break;
3646 case RAW_SEARCH_DATA_ID_BOTH_DIRECTORY_INFO:
3647 if (sws_dup(result, result->id_both_directory_info.name, file->id_both_directory_info.name)) goto error;
3648 if (sws_dup(result, result->id_both_directory_info.short_name, file->id_both_directory_info.short_name)) goto error;
3649 break;
3650 case RAW_SEARCH_DATA_UNIX_INFO:
3651 if (! (result->unix_info.name=talloc_strdup(mem_ctx, file->unix_info.name))) goto error;
3652 break;
3653 case RAW_SEARCH_DATA_UNIX_INFO2:
3654 if (sws_dup(result, result->unix_info2.name, file->unix_info2.name)) goto error;
3655 break;
3656 default:
3657 DEBUG(5,("%s: Error can't dup an unknown file data type: %x\n", __LOCATION__, data_level));
3658 goto error;
3660 return result;
3661 error:
3662 talloc_free(result);
3663 return NULL;
3666 /* callback function for search first/next */
3667 static bool find_callback(void *private, const union smb_search_data *file)
3669 struct search_state *state = (struct search_state *)private;
3670 struct search_handle *search_handle = state->search_handle;
3671 bool status;
3673 /* if we have a cache, copy this data */
3674 if (search_handle->cache) {
3675 struct search_cache_item *item = talloc_zero(search_handle->cache, struct search_cache_item);
3676 DEBUG(5,("%s: Copy %p to cache %p\n", __LOCATION__, item, search_handle->cache));
3677 if (item) {
3678 item->data_level=search_handle->data_level;
3679 item->file = smb_search_data_dup(item, file, item->data_level);
3680 if (! item->file) {
3681 talloc_free(item);
3682 item=NULL;
3685 if (item) {
3686 /* optimization to save enumerating the entire list each time, to find the end.
3687 the cached last_item is very short lived, it doesn't matter if something has
3688 been added since, as long as it hasn't been removed */
3689 if (state->last_item) {
3690 DLIST_ADD_END(state->last_item, item, struct search_cache_item*);
3691 } else {
3692 DLIST_ADD_END(search_handle->cache->items, item, struct search_cache_item*);
3694 state->last_item=item;
3695 state->all_count++;
3696 } else {
3697 DEBUG(5,("%s: Could not add name to search cache %p, invalidating cache\n", __LOCATION__, search_handle->cache));
3698 /* dear me, the whole cache will be invalid if we miss data */
3699 search_handle->cache->status=SEARCH_CACHE_DEAD;
3700 /* remove from the list of caches to use */
3701 DLIST_REMOVE(search_handle->cache->proxy->search_caches, search_handle->cache);
3702 /* Make it feel unwanted */
3703 talloc_unlink(private, search_handle->cache);
3704 talloc_unlink(search_handle, search_handle->cache);
3705 //if (talloc_unlink(search_handle, search_handle->cache)==0) {
3706 //talloc_free(search_handle->cache);
3708 /* stop us using it for this search too */
3709 search_handle->cache=NULL;
3713 status=state->callback(state->private, file);
3714 if (status) {
3715 state->count++;
3717 return status;
3721 list files in a directory matching a wildcard pattern
3723 static NTSTATUS proxy_search_first(struct ntvfs_module_context *ntvfs,
3724 struct ntvfs_request *req, union smb_search_first *io,
3725 void *search_private,
3726 bool (*callback)(void *, const union smb_search_data *))
3728 struct proxy_private *private = ntvfs->private_data;
3729 struct search_state *state;
3730 struct search_cache *search_cache=NULL;
3731 struct search_cache_key search_cache_key={0};
3732 struct ntvfs_handle *h=NULL;
3733 struct search_handle *s;
3734 uint16_t max_count;
3735 NTSTATUS status;
3737 SETUP_PID;
3739 if (! private->enabled_proxy_search) {
3740 return smb_raw_search_first(private->tree, req, io, search_private, callback);
3742 switch (io->generic.level) {
3743 /* case RAW_SEARCH_DATA_SEARCH:
3744 search_cache_key.search_attrib=io->search_first.in.search_attrib;
3745 search_cache_key.pattern=io->search_first.in.pattern;
3746 max_count = io->search_first.in.max_count;
3747 search_cache = find_search_cache(private->search_cache, &search_cache_key);
3748 break;*/
3749 case RAW_SEARCH_TRANS2:
3750 io->t2ffirst.in.max_count=MIN(io->t2ffirst.in.max_count,80);
3751 max_count = io->t2ffirst.in.max_count;
3753 search_cache_key.level=io->generic.level;
3754 search_cache_key.data_level=io->generic.data_level;
3755 search_cache_key.search_attrib=io->t2ffirst.in.search_attrib;
3756 search_cache_key.pattern=io->t2ffirst.in.pattern;
3757 search_cache_key.flags=io->t2ffirst.in.flags;
3758 search_cache_key.storage_type=io->t2ffirst.in.storage_type;
3759 /* try and find a search cache that is complete */
3760 search_cache = find_search_cache(private->search_caches, &search_cache_key);
3762 /* do handle mapping for TRANS2 */
3763 status = ntvfs_handle_new(ntvfs, req, &h);
3764 NT_STATUS_NOT_OK_RETURN(status);
3766 DEBUG(5,("%s: RAW_SEARCH_TRANS2 %s limit %d, cache=%p level=%x\n",__LOCATION__, search_cache_key.pattern, max_count, search_cache, search_cache_key.data_level));
3767 break;
3768 default: /* won't cache or proxy this */
3769 return smb_raw_search_first(private->tree, req, io, search_private, callback);
3772 /* finish setting up mapped handle */
3773 if (h) {
3774 s = talloc_zero(h, struct search_handle);
3775 NT_STATUS_HAVE_NO_MEMORY(s);
3776 s->proxy=private;
3777 talloc_set_destructor(s, search_handle_destructor);
3778 s->h=h;
3779 s->level=io->generic.level;
3780 s->data_level=io->generic.data_level;
3781 status = ntvfs_handle_set_backend_data(s->h, private->ntvfs, s);
3782 NT_STATUS_NOT_OK_RETURN(status);
3783 DLIST_ADD(private->search_handles, s);
3784 DEBUG(5,("%s: map handle create %d\n",__LOCATION__, smbsrv_fnum(h)));
3787 /* satisfy from cache */
3788 if (search_cache) {
3789 struct search_cache_item* item=search_cache->items;
3790 uint16_t count=0;
3792 /* stop cache going away while we are using it */
3793 s->cache = talloc_reference(s, search_cache);
3794 DEBUG(5,("%s: Serving from cache: %p\n",__LOCATION__, search_cache));
3795 /* Don't offer over the limit, but only count those that were accepted */
3796 DLIST_FIND(search_cache->items, item, !(count < max_count && callback(search_private, item->file) && ++count) );
3797 io->t2ffirst.out.count=count;
3798 s->resume_item=item;
3799 /* just because callback didn't accept any doesn't mean we are finished */
3800 if (item == NULL) {
3801 /* currently only caching for t2ffirst */
3802 io->t2ffirst.out.end_of_search = true;
3803 DEBUG(5,("%s: Serving from cache complete at %d\n", __LOCATION__, count));
3804 } else {
3805 /* count the rest */
3806 io->t2ffirst.out.end_of_search = false;
3807 DEBUG(5,("%s: Serving from cache incomplete at %d\n", __LOCATION__, count));
3808 DLIST_FOR_EACH(item, item, count++);
3809 DEBUG(5,("%s: Serving from cache max_count %d\n", __LOCATION__, count));
3812 if ((io->t2ffirst.out.end_of_search && io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
3813 io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE)
3815 /* destroy handle */
3816 ntvfs_handle_remove_backend_data(h, ntvfs);
3817 io->t2ffirst.out.handle=0;
3818 } else {
3819 /* now map handle */
3820 io->t2ffirst.out.handle=smbsrv_fnum(h);
3822 return NT_STATUS_OK;
3825 state = talloc_zero(req, struct search_state);
3826 NT_STATUS_HAVE_NO_MEMORY(state);
3828 /* if there isn't a matching cache already being generated by another search,
3829 start one, unless FLAG_TRANS2_FIND_BACKUP_INTENT which is always live */
3830 if (!(io->t2ffirst.in.flags & FLAG_TRANS2_FIND_BACKUP_INTENT) &&
3831 find_partial_search_cache(private->search_caches, &search_cache_key) == NULL) {
3832 /* need to opendir the folder being searched so we can get a notification */
3833 struct search_cache *search_cache=NULL;
3835 search_cache=new_search_cache(private, &search_cache_key);
3836 /* Stop cache going away while we are using it */
3837 if (search_cache) {
3838 s->cache=talloc_reference(s, search_cache);
3842 /* stop the handle going away while we are using it */
3843 state->search_handle=talloc_reference(state, s);
3844 state->private=search_private;
3845 state->callback=callback;
3847 status=smb_raw_search_first(private->tree, req, io, state, find_callback);
3848 // if (! NT_STATUS_IS_OK(status)) {
3849 // return (status);
3850 // }
3851 if (! NT_STATUS_IS_OK(status)) {
3852 if (s->cache) {
3853 DLIST_REMOVE(private->search_caches, s->cache);
3854 talloc_unlink(private, s->cache);
3855 talloc_unlink(s, s->cache);
3856 //if (talloc_unlink(s, s->cache)==0) {
3857 //talloc_free(s->cache);
3859 s->cache=NULL;
3861 s->h=NULL;
3862 ntvfs_handle_remove_backend_data(h, ntvfs);
3863 return (status);
3865 // DEBUG(1,("%s: %p; %s\n",__LOCATION__,io,get_friendly_nt_error_msg (status)));
3866 DEBUG(5,("%s: max %d, got %d, copied %d; %s\n",__LOCATION__,io->t2ffirst.out.count,state->all_count, state->count,get_friendly_nt_error_msg (status)));
3868 #warning check NT_STATUS_IS_OK ?
3869 if (io->t2ffirst.out.end_of_search) {
3870 /* cache might have gone away if problem filling */
3871 if (s->cache) {
3872 DEBUG(5,("B\n"));
3873 s->cache->status = SEARCH_CACHE_COMPLETE;
3874 DEBUG(5,("%s: Cache %p filled in first go!\n",__LOCATION__, s->cache));
3877 if ((io->t2ffirst.out.end_of_search && io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
3878 io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE) {
3879 DEBUG(5,("%s: Closing search\n",__LOCATION__));
3880 /* destroy partial cache */
3881 if (s->cache && (io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE) &&
3882 ! io->t2ffirst.out.end_of_search) {
3883 DEBUG(5,("%s: Destroying cache %p\n",__LOCATION__, s->cache));
3884 /* cache is no good now! */
3885 DLIST_REMOVE(private->search_caches, s->cache);
3886 talloc_unlink(private, s->cache);
3887 talloc_unlink(s, s->cache);
3888 //if (talloc_unlink(s, s->cache)==0) {
3889 //talloc_free(s->cache);
3891 s->cache=NULL;
3893 if (s->cache) {
3894 s->cache->status=SEARCH_CACHE_COMPLETE;
3896 /* Need to deal with the case when the client would not take them all but we still cache them
3897 if (state->count < io->t2ffirst.out.count && io->t2ffirst.out.end_of_search) {
3898 io->t2ffirst.out.end_of_search = false;
3899 //s->resume_item = state->last_item;
3901 /* destroy handle */
3902 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
3903 ntvfs_handle_remove_backend_data(h, ntvfs);
3904 io->t2ffirst.out.handle=0;
3905 } else {
3906 s->handle = io->t2ffirst.out.handle;
3907 io->t2ffirst.out.handle=smbsrv_fnum(h);
3909 io->t2ffirst.out.count=state->count;
3910 return status;
3913 #define DLIST_FIND_NEXT(start, item, test) do {\
3914 DLIST_FIND(start, item, test); \
3915 if (item) (item)=(item)->next; \
3916 } while(0)
3917 #define DLIST_TALLOC_FREE(list) do {\
3918 while(list) { \
3919 void *tmp=(list); \
3920 (list)=(list)->next; \
3921 talloc_free(tmp); \
3923 } while(0)
3925 /* continue a search */
3926 static NTSTATUS proxy_search_next(struct ntvfs_module_context *ntvfs,
3927 struct ntvfs_request *req, union smb_search_next *io,
3928 void *search_private,
3929 bool (*callback)(void *, const union smb_search_data *))
3931 struct proxy_private *private = ntvfs->private_data;
3932 struct search_state *state;
3933 struct ntvfs_handle *h=NULL;
3934 struct search_handle *s;
3935 const struct search_cache *search_cache=NULL;
3936 struct search_cache_item *start_at=NULL;
3937 uint16_t max_count;
3938 NTSTATUS status;
3940 SETUP_PID;
3942 if (! private->enabled_proxy_search) {
3943 return smb_raw_search_next(private->tree, req, io, search_private, callback);
3945 switch (io->generic.level) {
3946 case RAW_SEARCH_TRANS2:
3947 io->t2fnext.in.max_count=MIN(io->t2fnext.in.max_count,80);
3948 max_count = io->t2fnext.in.max_count;
3950 h = talloc_get_type(ntvfs_find_handle(ntvfs, req, io->t2fnext.in.handle), struct ntvfs_handle);
3951 if (! h) return NT_STATUS_INVALID_HANDLE;
3952 /* convert handle into search_cache */
3953 s=talloc_get_type(ntvfs_handle_get_backend_data(h, ntvfs), struct search_handle);
3954 if (! s) return NT_STATUS_INVALID_HANDLE;
3955 search_cache=s->cache;
3956 DEBUG(5,("%s: RAW_SEARCH_TRANS2 find_next h=%d [real %d] count %d, cache=%p\n",__LOCATION__, io->t2fnext.in.handle, s->handle, max_count, search_cache));
3957 io->t2fnext.in.handle=s->handle;
3958 if (! search_cache) {
3959 break;
3962 /* warning if: uint16_t flags or const char *last_name have changed, abort cache */
3963 /* skip up to resume key */
3964 /* TODO: resume key may be PRIOR to where we left off... in which case
3965 we need to avoid duplicating values */
3966 if (search_cache /*&& search_cache->status == SEARCH_CACHE_COMPLETE*/) {
3967 DEBUG(5,("%s: seek resume position\n",__LOCATION__));
3968 /* work out where in the cache to continue from */
3969 switch (io->generic.data_level) {
3970 case RAW_SEARCH_DATA_STANDARD:
3971 case RAW_SEARCH_DATA_EA_SIZE:
3972 case RAW_SEARCH_DATA_EA_LIST:
3973 /* have a resume key? */
3974 DEBUG(5,("%s: type %x seek on %x\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.resume_key));
3975 DLIST_FIND_NEXT(search_cache->items, start_at, io->t2fnext.in.resume_key == start_at->file->standard.resume_key);
3976 break;
3977 case RAW_SEARCH_DATA_DIRECTORY_INFO: /* TODO: maybe these should be strcasecmp for some filesystems */
3978 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3979 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->directory_info.name.s)==0);
3980 break;
3981 case RAW_SEARCH_DATA_FULL_DIRECTORY_INFO:
3982 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3983 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->full_directory_info.name.s)==0);
3984 break;
3985 case RAW_SEARCH_DATA_NAME_INFO:
3986 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3987 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->name_info.name.s)==0);
3988 break;
3989 case RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO:
3990 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3991 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->both_directory_info.name.s)==0);
3992 break;
3993 case RAW_SEARCH_DATA_ID_FULL_DIRECTORY_INFO:
3994 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3995 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->id_full_directory_info.name.s)==0);
3996 break;
3997 case RAW_SEARCH_DATA_ID_BOTH_DIRECTORY_INFO:
3998 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3999 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->id_both_directory_info.name.s)==0);
4000 break;
4001 case RAW_SEARCH_DATA_UNIX_INFO:
4002 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
4003 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->unix_info.name)==0);
4004 break;
4005 case RAW_SEARCH_DATA_UNIX_INFO2:
4006 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
4007 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->unix_info2.name.s)==0);
4008 break;
4009 default:
4010 if (io->t2fnext.in.flags & FLAG_TRANS2_FIND_CONTINUE) {
4011 start_at = s->resume_item;
4012 } else {
4013 DEBUG(5,("%s: HELP! How can we resume?\n",__LOCATION__));
4014 start_at = s->resume_item;
4017 DEBUG(5,("%s: Start at %p\n",__LOCATION__,start_at));
4019 break;
4022 if (! search_cache) {
4023 DEBUG(5,("%s: No cache, pass-through\n",__LOCATION__));
4024 return smb_raw_search_next(private->tree, req, io, search_private, callback);
4026 //#define talloc_reference(ctx, ptr) (_TALLOC_TYPEOF(ptr))_talloc_reference((ctx),(ptr))
4027 //surely should be
4028 //#define talloc_reference(ctx, ptr) _talloc_reference((ctx),(ptr))?(ptr):(NULL) to preserve the type of ptr
4030 /* satisfy from cache */
4031 if (search_cache->status == SEARCH_CACHE_COMPLETE) {
4032 struct search_cache_item* item;
4033 uint16_t count=0;
4034 DEBUG(5,("%s: Serving from cache: %p\n",__LOCATION__, search_cache));
4036 if (! start_at) {
4037 start_at = search_cache->items;
4040 DLIST_FIND(start_at, item, !(count < max_count && callback(search_private, item->file) && ++count) );
4041 io->t2fnext.out.count=count;
4042 s->resume_item=item;
4043 if (item == NULL) {
4044 DEBUG(5,("%s: Serving from cache complete at %d\n", __LOCATION__, count));
4045 io->t2fnext.out.end_of_search = true;
4046 } else {
4047 DEBUG(5,("%s: Serving from cache incomplete at %d\n", __LOCATION__, count));
4048 io->t2fnext.out.end_of_search = false;
4049 /* count the rest */
4050 DLIST_FOR_EACH(item, item, count++);
4051 DEBUG(5,("%s: Serving from cache max_count %d\n", __LOCATION__, count));
4053 /* is it the end? */
4054 if ((io->t2fnext.out.end_of_search && io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
4055 io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE)
4057 /* destroy handle */
4058 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
4059 ntvfs_handle_remove_backend_data(h, ntvfs);
4062 return NT_STATUS_OK;
4065 /* pass-through and fill-cache */
4066 if (start_at) {
4067 /* risk of duplicate data */
4068 DEBUG(5,("\n\n\nCache-populating search has resumed but NOT where we left off!\n\n\n-d"));
4069 /* free everything from start_at onwards through start_at-> next*/
4070 /* cut from the list */
4071 start_at->prev->next=NULL;
4072 start_at->prev=NULL;
4073 /* now how to free a list? */
4074 DLIST_TALLOC_FREE(start_at);
4076 state = talloc_zero(req, struct search_state);
4077 NT_STATUS_HAVE_NO_MEMORY(state);
4079 state->search_handle=talloc_reference(state, s);
4080 state->private=search_private;
4081 state->callback=callback;
4083 status = smb_raw_search_next(private->tree, req, io, state, find_callback);
4084 if (! NT_STATUS_IS_OK(status)) {
4085 if (s->cache) {
4086 DLIST_REMOVE(private->search_caches, s->cache);
4087 talloc_unlink(private, s->cache);
4088 talloc_unlink(s, s->cache);
4089 //if (talloc_unlink(s, s->cache)==0) {
4090 //talloc_free(s->cache);
4092 s->cache=NULL;
4094 s->h=NULL;
4095 ntvfs_handle_remove_backend_data(h, ntvfs);
4096 return (status);
4099 DEBUG(5,("%s: max %d, got %d, copied %d; %s\n",__LOCATION__,io->t2fnext.out.count,state->all_count, state->count,get_friendly_nt_error_msg (status)));
4101 /* if closing, then close */
4102 if ((io->t2fnext.out.end_of_search && io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
4103 io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE)
4105 if (s->cache && (io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE) &&
4106 ! io->t2fnext.out.end_of_search) {
4107 /* partial cache is useless */
4108 DLIST_REMOVE(private->search_caches, s->cache);
4109 talloc_unlink(private, s->cache);
4110 talloc_unlink(s, s->cache);
4111 //if (talloc_unlink(s, s->cache)==0) {
4112 //talloc_free(s->cache);
4114 s->cache=NULL;
4116 if (s->cache) {
4117 s->cache->status=SEARCH_CACHE_COMPLETE;
4118 /* Need to deal with the case when the client would not take them all but we still cache them
4119 if (state->count < io->t2fnext.out.count && io->t2fnext.out.end_of_search) {
4120 io->t2fnext.out.end_of_search = false;
4123 /* destroy handle */
4124 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
4125 ntvfs_handle_remove_backend_data(h, ntvfs);
4127 io->t2fnext.out.count=state->count;
4129 return status;
4132 /* close a search */
4133 static NTSTATUS proxy_search_close(struct ntvfs_module_context *ntvfs,
4134 struct ntvfs_request *req, union smb_search_close *io)
4136 struct proxy_private *private = ntvfs->private_data;
4137 struct ntvfs_handle *h=NULL;
4138 struct search_handle *s;
4139 NTSTATUS status;
4141 SETUP_PID;
4143 if (! private->enabled_proxy_search) {
4144 return smb_raw_search_close(private->tree, io);
4146 switch (io->generic.level) {
4147 case RAW_SEARCH_TRANS2:
4148 h = talloc_get_type(ntvfs_find_handle(ntvfs, req, io->findclose.in.handle), struct ntvfs_handle);
4149 if (! h) return NT_STATUS_INVALID_HANDLE;
4150 /* convert handle into search_cache */
4151 s=talloc_get_type(ntvfs_handle_get_backend_data(h, ntvfs), struct search_handle);
4152 if (! s) return NT_STATUS_INVALID_HANDLE;
4153 io->findclose.in.handle=s->handle;
4154 default:
4155 return smb_raw_search_close(private->tree, io);
4158 if (! s->cache) {
4159 status = smb_raw_search_close(private->tree, io);
4160 } else {
4161 if (s->cache->status != SEARCH_CACHE_COMPLETE) {
4162 /* cache is useless */
4163 DLIST_REMOVE(private->search_caches, s->cache);
4164 talloc_unlink(private, s->cache);
4165 talloc_unlink(s, s->cache);
4166 //if (talloc_unlink(s, s->cache)==0) {
4167 //talloc_free(s->cache);
4170 status = NT_STATUS_OK;
4173 s->h=NULL;
4174 ntvfs_handle_remove_backend_data(h, ntvfs);
4175 /* s MAY also be gone at this point, if h was free'd, unless there were
4176 pending responses, in which case they see s->h is NULL as a sign to stop */
4177 return status;
4181 a handler for async trans2 replies
4183 static void async_trans2(struct smbcli_request *c_req)
4185 struct async_info *async = c_req->async.private;
4186 struct ntvfs_request *req = async->req;
4187 req->async_states->status = smb_raw_trans2_recv(c_req, req, async->parms);
4188 talloc_free(async);
4189 req->async_states->send_fn(req);
4192 /* raw trans2 */
4193 static NTSTATUS proxy_trans2(struct ntvfs_module_context *ntvfs,
4194 struct ntvfs_request *req,
4195 struct smb_trans2 *trans2)
4197 struct proxy_private *private = ntvfs->private_data;
4198 struct smbcli_request *c_req;
4200 if (private->map_trans2) {
4201 return NT_STATUS_NOT_IMPLEMENTED;
4204 SETUP_PID;
4205 #warning we should be mapping file handles here
4207 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4208 return smb_raw_trans2(private->tree, req, trans2);
4211 c_req = smb_raw_trans2_send(private->tree, trans2);
4213 ASYNC_RECV_TAIL(trans2, async_trans2);
4217 /* SMBtrans - not used on file shares */
4218 static NTSTATUS proxy_trans(struct ntvfs_module_context *ntvfs,
4219 struct ntvfs_request *req,
4220 struct smb_trans2 *trans2)
4222 return NT_STATUS_ACCESS_DENIED;
4226 a handler for async change notify replies
4228 static void async_changenotify(struct smbcli_request *c_req)
4230 struct async_info *async = c_req->async.private;
4231 struct ntvfs_request *req = async->req;
4232 req->async_states->status = smb_raw_changenotify_recv(c_req, req, async->parms);
4233 talloc_free(async);
4234 req->async_states->send_fn(req);
4237 /* change notify request - always async */
4238 static NTSTATUS proxy_notify(struct ntvfs_module_context *ntvfs,
4239 struct ntvfs_request *req,
4240 union smb_notify *io)
4242 struct proxy_private *private = ntvfs->private_data;
4243 struct smbcli_request *c_req;
4244 int saved_timeout = private->transport->options.request_timeout;
4245 struct proxy_file *f;
4247 if (io->nttrans.level != RAW_NOTIFY_NTTRANS) {
4248 return NT_STATUS_NOT_IMPLEMENTED;
4251 SETUP_PID;
4253 f = ntvfs_handle_get_backend_data(io->nttrans.in.file.ntvfs, ntvfs);
4254 if (!f) return NT_STATUS_INVALID_HANDLE;
4255 io->nttrans.in.file.fnum = f->fnum;
4257 /* this request doesn't make sense unless its async */
4258 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4259 return NT_STATUS_INVALID_PARAMETER;
4262 /* we must not timeout on notify requests - they wait
4263 forever */
4264 private->transport->options.request_timeout = 0;
4266 c_req = smb_raw_changenotify_send(private->tree, io);
4268 private->transport->options.request_timeout = saved_timeout;
4270 ASYNC_RECV_TAIL(io, async_changenotify);
4274 * A hander for converting from rpc struct replies to ntioctl
4276 static NTSTATUS proxy_rpclite_map_async_send(
4277 struct ntvfs_module_context *ntvfs,
4278 struct ntvfs_request *req,
4279 void *io1, void *io2, NTSTATUS status)
4281 union smb_ioctl* io=talloc_get_type_abort(io1, union smb_ioctl);
4282 struct async_rpclite_send *rpclite_send=talloc_get_type_abort(io2, struct async_rpclite_send);
4283 void* r=rpclite_send->struct_ptr;
4284 struct ndr_push* push;
4285 const struct ndr_interface_call* call=rpclite_send->call;
4286 enum ndr_err_code ndr_err;
4287 DATA_BLOB ndr;
4289 talloc_free(rpclite_send);
4291 DEBUG(5,("%s: converting r=%p back to ntiocl\n",__FUNCTION__, r));
4292 push = ndr_push_init_ctx(req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
4293 NT_STATUS_HAVE_NO_MEMORY(push);
4295 if (0) {
4296 push->flags |= LIBNDR_FLAG_BIGENDIAN;
4299 ndr_err = call->ndr_push(push, NDR_OUT, r);
4300 status=ndr_map_error2ntstatus(ndr_err);
4302 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4303 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
4304 nt_errstr(status)));
4305 return status;
4308 ndr=ndr_push_blob(push);
4309 //if (ndr.length > io->ntioctl.in.max_data) {
4310 DEBUG(3,("%s NDR size %d, max_size %d %p\n",__FUNCTION__, ndr.length,
4311 io->ntioctl.in.max_data, ndr.data));
4312 io->ntioctl.out.blob=ndr;
4313 return status;
4317 * A handler for sending async rpclite Read replies that were mapped to union smb_read
4319 static NTSTATUS rpclite_proxy_Read_map_async_send(
4320 struct ntvfs_module_context *ntvfs,
4321 struct ntvfs_request *req,
4322 void *io1, void *io2, NTSTATUS status)
4324 struct proxy_Read* r=talloc_get_type_abort(io1, struct proxy_Read);
4325 union smb_read* io=talloc_get_type_abort(io2, union smb_read);
4327 /* status here is a result of proxy_read, it doesn't reflect the status
4328 of the rpc transport or relates calls, just the read operation */
4329 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
4330 r->out.result=status;
4332 if (! NT_STATUS_IS_OK(status)) {
4333 /* We can't use result as a discriminator in IDL, so nread and flags always exist */
4334 r->out.nread=0;
4335 r->out.flags=0;
4336 } else {
4337 ssize_t size=io->readx.out.nread;
4338 r->out.flags=0;
4339 r->out.nread=io->readx.out.nread;
4341 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE) && io->readx.out.nread>0) {
4342 declare_checksum(digest);
4343 checksum_block(digest, io->readx.out.data, io->readx.out.nread);
4345 DEBUG(5,("New digest for size: %lld\n", (long long) io->readx.out.nread));
4346 dump_data (5, digest, sizeof(digest));
4347 DEBUG(5,("Cached digest\n"));
4348 dump_data (5, r->in.digest.digest, sizeof(digest));
4350 if (memcmp(digest, r->in.digest.digest, sizeof(digest))==0) {
4351 r->out.flags=PROXY_USE_CACHE;
4352 DEBUG(5,("%s: Use cached data len=%lld\n",__FUNCTION__,
4353 (long long)r->out.nread));
4354 if (r->in.flags & PROXY_VALIDATE) {
4355 r->out.flags |= PROXY_VALIDATE;
4356 DEBUG(5,("%s: Use VALIDATED len=%lld, %lld\n",__FUNCTION__,
4357 (long long)r->out.nread, (long long) io->readx.out.nread));
4359 goto done;
4361 DEBUG(5,("Cache does not match\n"));
4364 if (r->in.flags & PROXY_VALIDATE) {
4365 /* validate failed, shrink read to mincnt - so we don't fill link */
4366 r->out.nread=MIN(r->out.nread, r->in.mincnt);
4367 size=r->out.nread;
4368 DEBUG(5,("VALIDATE failed, shrink read of %d from %d to %d\n",
4369 r->in.maxcnt,r->out.nread,MIN(r->out.nread, r->in.mincnt)));
4372 if (r->in.flags & PROXY_USE_ZLIB) {
4373 if (compress_block(io->readx.out.data, &size) ) {
4374 r->out.flags|=PROXY_USE_ZLIB;
4375 r->out.response.compress.count=size;
4376 r->out.response.compress.data=io->readx.out.data;
4377 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
4378 __FUNCTION__,r->out.nread,size,size*100/r->out.nread));
4379 goto done;
4383 DEBUG(5,("%s: Compression not worthwhile\n", __FUNCTION__));
4384 r->out.response.generic.count=io->readx.out.nread;
4385 r->out.response.generic.data=io->readx.out.data;
4388 done:
4390 /* Or should we return NT_STATUS_OK ?*/
4391 DEBUG(5,("Finish %s status %s\n",__FUNCTION__,get_friendly_nt_error_msg(status)));
4393 /* the rpc transport succeeded even if the operation did not */
4394 return NT_STATUS_OK;
4398 * RPC implementation of Read
4400 static NTSTATUS rpclite_proxy_Read(struct ntvfs_module_context *ntvfs,
4401 struct ntvfs_request *req, struct proxy_Read *r)
4403 struct proxy_private *private = ntvfs->private_data;
4404 union smb_read* io=talloc(req, union smb_read);
4405 NTSTATUS status;
4406 struct proxy_file *f;
4407 struct ntvfs_handle *h;
4409 NT_STATUS_HAVE_NO_MEMORY(io);
4411 /* if next hop is a proxy just repeat this call also handle VALIDATE check
4412 that means have own callback handlers too... */
4413 SETUP_PID;
4415 RPCLITE_SETUP_FILE_HERE(f, h);
4417 DEBUG(5,("Opnum: proxy_Read min=%d max=%d offset=%lld, fnum=%d\n",
4418 r->in.mincnt, r->in.maxcnt, r->in.offset, r->in.fnum));
4419 DEBUG(5,("Anticipated digest\n"));
4420 dump_data (5, r->in.digest.digest, sizeof(r->in.digest.digest));
4422 /* If the remove end is a proxy, jusr fixup file handle and passthrough,
4423 but update cache on the way back
4424 if (PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
4427 /* prepare for response */
4428 r->out.response.generic.data=talloc_array(io, uint8_t, r->in.maxcnt);
4429 NT_STATUS_HAVE_NO_MEMORY(r->out.response.generic.data);
4431 if (! PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
4432 return proxy_validate(ntvfs, req, r, f);
4435 /* pack up an smb_read request and dispatch here */
4436 io->readx.level=RAW_READ_READX;
4437 io->readx.in.file.ntvfs=h;
4438 io->readx.in.mincnt=r->in.mincnt;
4439 io->readx.in.maxcnt=r->in.maxcnt;
4440 io->readx.in.offset=r->in.offset;
4441 io->readx.in.remaining=r->in.remaining;
4442 /* and something to hold the answer */
4443 io->readx.out.data=r->out.response.generic.data;
4445 /* so we get to pack the io->*.out response */
4446 status = ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Read_map_async_send);
4447 NT_STATUS_NOT_OK_RETURN(status);
4449 /* so the read will get processed normally */
4450 return proxy_read(ntvfs, req, io);
4454 * A handler for sending async rpclite Write replies
4456 static NTSTATUS rpclite_proxy_Write_map_async_send(
4457 struct ntvfs_module_context *ntvfs,
4458 struct ntvfs_request *req,
4459 void *io1, void *io2, NTSTATUS status)
4461 struct proxy_Write* r=talloc_get_type_abort(io1, struct proxy_Write);
4462 union smb_write* io=talloc_get_type_abort(io2, union smb_write);
4464 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
4465 r->out.result=status;
4467 r->out.nwritten=io->writex.out.nwritten;
4468 r->out.remaining=io->writex.out.remaining;
4470 /* the rpc transport succeeded even if the operation did not */
4471 return NT_STATUS_OK;
4475 * RPC implementation of write
4477 static NTSTATUS rpclite_proxy_Write(struct ntvfs_module_context *ntvfs,
4478 struct ntvfs_request *req, struct proxy_Write *r)
4480 struct proxy_private *private = ntvfs->private_data;
4481 union smb_write* io=talloc(req, union smb_write);
4482 NTSTATUS status;
4483 struct proxy_file* f;
4484 struct ntvfs_handle *h;
4486 SETUP_PID;
4488 RPCLITE_SETUP_FILE_HERE(f,h);
4490 DEBUG(5,("Opnum: proxy_Write count=%d offset=%lld, fnum=%d\n",
4491 r->in.count, r->in.offset, r->in.fnum));
4493 /* pack up an smb_write request and dispatch here */
4494 io->writex.level=RAW_WRITE_WRITEX;
4495 io->writex.in.file.ntvfs=h;
4496 io->writex.in.offset=r->in.offset;
4497 io->writex.in.wmode=r->in.mode;
4498 io->writex.in.count=r->in.count;
4500 /* and the data */
4501 if (PROXY_USE_ZLIB & r->in.flags) {
4502 ssize_t count=r->in.data.generic.count;
4503 io->writex.in.data=uncompress_block_talloc(io, r->in.data.compress.data,
4504 &count, r->in.count);
4505 if (count != r->in.count || !io->writex.in.data) {
4506 /* Didn't uncompress properly, but the RPC layer worked */
4507 r->out.result=NT_STATUS_BAD_COMPRESSION_BUFFER;
4508 return NT_STATUS_OK;
4510 } else {
4511 io->writex.in.data=r->in.data.generic.data;
4514 /* so we get to pack the io->*.out response */
4515 status=ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Write_map_async_send);
4516 NT_STATUS_NOT_OK_RETURN(status);
4518 /* so the read will get processed normally */
4519 return proxy_write(ntvfs, req, io);
4523 * RPC amalgamation of getinfo requests
4525 struct proxy_getinfo_fragments;
4526 struct proxy_getinfo_fragmentses;
4528 /* holds one smbcli_request to satisfy part of one proxy_GetInfo request */
4529 struct proxy_getinfo_fragment {
4530 struct proxy_getinfo_fragment *prev, *next;
4531 struct proxy_getinfo_fragments *fragments;
4532 union smb_fileinfo *smb_fileinfo;
4533 struct smbcli_request *c_req;
4534 NTSTATUS status;
4537 /* holds reference to many fragment smbcli_request that together make up one proxy_GetInfo request */
4538 struct proxy_getinfo_fragments {
4539 struct proxy_getinfo_fragments *prev, *next;
4540 struct proxy_getinfo_fragmentses *fragmentses;
4541 struct proxy_getinfo_fragment *fragments;
4542 uint32_t index;
4545 struct proxy_getinfo_fragmentses {
4546 struct proxy_getinfo_fragments *fragments;
4547 struct proxy_GetInfo *r;
4548 struct ntvfs_request *req;
4549 bool async;
4553 a handler for async write replies
4555 NTSTATUS async_proxy_getinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
4557 struct smbcli_request *c_req = async->c_req;
4558 struct ntvfs_request *req = async->req;
4559 struct proxy_file *f=async->f;
4560 struct proxy_getinfo_fragment *fragment=talloc_get_type_abort(io2, struct proxy_getinfo_fragment);
4561 struct proxy_getinfo_fragments* fragments=fragment->fragments;
4562 struct proxy_getinfo_fragmentses* fragmentses=fragments->fragmentses;
4563 struct proxy_GetInfo *r=talloc_get_type_abort(fragmentses->r, struct proxy_GetInfo);
4564 int c=fragments->index;
4565 struct info_data* d=&(r->out.info_data[c]);
4566 union smb_fileinfo *io=talloc_get_type_abort(io1, union smb_fileinfo);
4568 SMB_ASSERT(c_req == NULL || c_req == fragment->c_req);
4570 if (c_req) {
4571 switch (r->in.info_tags[0].tag_type) {
4572 case TAG_TYPE_FILE_INFO:
4573 status=smb_raw_fileinfo_recv(c_req, r, io);
4574 break;
4575 case TAG_TYPE_PATH_INFO:
4576 status=smb_raw_pathinfo_recv(c_req, r, io);
4577 break;
4578 default:
4579 status=NT_STATUS_INVALID_PARAMETER;
4581 c_req=NULL;
4584 /* stop callback occuring more than once sync'ly */
4585 fragment->c_req=NULL;
4587 DEBUG(5,("%s: async callback level %x %s\n",__FUNCTION__,io->generic.level, get_friendly_nt_error_msg (status)));
4588 switch (io->generic.level) {
4589 case RAW_FILEINFO_ALL_INFO:
4590 case RAW_FILEINFO_ALL_INFORMATION:
4591 DEBUG(5,("%s: async callback level RAW_FILEINFO_ALL_INFO\n",__FUNCTION__));
4592 d->status_RAW_FILEINFO_ALL_INFO=status;
4594 /* don't blindly overwrite BASIC_INFORMATION as we may already have it */
4595 if (1 || NT_STATUS_IS_OK(status)) {
4596 d->status_RAW_FILEINFO_BASIC_INFORMATION=status;
4597 d->create_time=io->all_info.out.create_time;
4598 d->access_time=io->all_info.out.access_time;
4599 d->write_time=io->all_info.out.write_time;
4600 d->change_time=io->all_info.out.change_time;
4601 d->attrib=io->all_info.out.attrib;
4603 d->alloc_size=io->all_info.out.alloc_size;
4604 d->size=io->all_info.out.size;
4605 dump_data(5, io, sizeof(*io));
4606 d->nlink=io->all_info.out.nlink;
4607 d->delete_pending=io->all_info.out.delete_pending;
4608 d->directory=io->all_info.out.directory;
4609 d->ea_size=io->all_info.out.ea_size;
4610 /* io is sticking around for as long as d is */
4611 d->fname.s=io->all_info.out.fname.s;
4612 d->fname.count=io->all_info.out.fname.private_length;
4613 break;
4614 case RAW_FILEINFO_BASIC_INFO:
4615 case RAW_FILEINFO_BASIC_INFORMATION:
4616 DEBUG(5,("%s: async callback level RAW_FILEINFO_BASIC_INFORMATION\n",__FUNCTION__));
4617 d->status_RAW_FILEINFO_BASIC_INFORMATION=status;
4618 d->create_time=io->basic_info.out.create_time;
4619 d->access_time=io->basic_info.out.access_time;
4620 d->write_time=io->basic_info.out.write_time;
4621 d->change_time=io->basic_info.out.change_time;
4622 d->attrib=io->basic_info.out.attrib;
4623 break;
4624 case RAW_FILEINFO_COMPRESSION_INFO:
4625 DEBUG(5,("%s: async callback level RAW_FILEINFO_COMPRESSION_INFO\n",__FUNCTION__));
4626 d->status_RAW_FILEINFO_COMPRESSION_INFO = status;
4627 d->compressed_size=io->compression_info.out.compressed_size;
4628 d->format=io->compression_info.out.format;
4629 d->unit_shift=io->compression_info.out.unit_shift;
4630 d->chunk_shift=io->compression_info.out.chunk_shift;
4631 d->cluster_shift=io->compression_info.out.cluster_shift;
4632 break;
4633 case RAW_FILEINFO_INTERNAL_INFORMATION:
4634 DEBUG(5,("%s: async callback level RAW_FILEINFO_INTERNAL_INFORMATION\n",__FUNCTION__));
4635 d->status_RAW_FILEINFO_INTERNAL_INFORMATION=status;
4636 d->file_id=io->internal_information.out.file_id;
4637 break;
4638 case RAW_FILEINFO_ACCESS_INFORMATION:
4639 DEBUG(5,("%s: async callback level RAW_FILEINFO_ACCESS_INFORMATION\n",__FUNCTION__));
4640 d->status_RAW_FILEINFO_ACCESS_INFORMATION=status;
4641 d->access_flags=io->access_information.out.access_flags;
4642 break;
4643 case RAW_FILEINFO_POSITION_INFORMATION:
4644 DEBUG(5,("%s: async callback level RAW_FILEINFO_POSITION_INFORMATION\n",__FUNCTION__));
4645 d->status_RAW_FILEINFO_POSITION_INFORMATION = status;
4646 d->position=io->position_information.out.position;
4647 break;
4648 case RAW_FILEINFO_MODE_INFORMATION:
4649 DEBUG(5,("%s: async callback level RAW_FILEINFO_MODE_INFORMATION\n",__FUNCTION__));
4650 d->status_RAW_FILEINFO_MODE_INFORMATION =status;
4651 d->mode=io->mode_information.out.mode;
4652 break;
4653 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
4654 DEBUG(5,("%s: async callback level RAW_FILEINFO_ALIGNMENT_INFORMATION\n",__FUNCTION__));
4655 d->status_RAW_FILEINFO_ALIGNMENT_INFORMATION=status;
4656 d->alignment_requirement=io->alignment_information.out.alignment_requirement;
4657 break;
4658 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
4659 DEBUG(5,("%s: async callback level RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION\n",__FUNCTION__));
4660 d->status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION=status;
4661 d->reparse_tag=io->attribute_tag_information.out.reparse_tag;
4662 d->reparse_attrib=io->attribute_tag_information.out.attrib;
4663 break;
4664 case RAW_FILEINFO_STREAM_INFO: {
4665 uint_t c;
4666 DEBUG(5,("%s: async callback level RAW_FILEINFO_STREAM_INFO %s,\n",__FUNCTION__));
4667 d->status_RAW_FILEINFO_STREAM_INFO=status;
4668 DEBUG(5,("Num Streams %d %s\n",io->stream_info.out.num_streams, get_friendly_nt_error_msg (status)));
4669 if (NT_STATUS_IS_OK(status)) {
4670 d->streams=talloc_zero_array(d, struct info_stream, io->stream_info.out.num_streams);
4671 if (! d->streams) {
4672 d->status_RAW_FILEINFO_STREAM_INFO=NT_STATUS_NO_MEMORY;
4673 } else {
4674 d->num_streams=io->stream_info.out.num_streams;
4675 for(c=0; c < io->stream_info.out.num_streams; c++) {
4676 d->streams[c].size = io->stream_info.out.streams[c].size;
4677 d->streams[c].alloc_size = io->stream_info.out.streams[c].alloc_size;
4678 d->streams[c].stream_name.s=io->stream_info.out.streams[c].stream_name.s;
4679 d->streams[c].stream_name.count=io->stream_info.out.streams[c].stream_name.private_length;
4683 break; }
4684 default:
4685 /* so... where's it from? */
4686 DEBUG(5,("Unexpected read level\n"));
4689 fragment->smb_fileinfo = NULL;
4690 fragment->c_req=NULL;
4692 /* are the fragments complete? */
4693 DLIST_REMOVE(fragments->fragments, fragment);
4694 /* if this index is complete, remove from fragmentses */
4695 if (! fragments->fragments) {
4696 DLIST_REMOVE(fragmentses->fragments, fragments);
4698 /* is that the end? */
4699 if (! fragmentses->fragments && fragmentses->async) {
4700 DEBUG(5,("Thats the end of the fragments, doing send\n"));
4701 /* call the send_fn */
4702 req=fragmentses->req;
4703 req->async_states->status=NT_STATUS_OK;
4704 DEBUG(5,("Fragments async response sending\n"));
4705 req->async_states->send_fn(req);
4707 DEBUG(5,("%s: Thats the end of the callback\n",__FUNCTION__));
4708 return status;
4711 #define FINISH_GETINFO_FRAGMENT(r, io) do { \
4712 struct smbcli_request *c_req; \
4713 switch (r->in.info_tags[0].tag_type) { \
4714 case TAG_TYPE_FILE_INFO: \
4715 io->all_info.in.file.fnum=r->in.info_tags[0].info_tag.fnum; \
4716 c_req=smb_raw_fileinfo_send(private->tree, io); \
4717 break; \
4718 case TAG_TYPE_PATH_INFO: \
4719 io->all_info.in.file.path=r->in.info_tags[0].info_tag.path.s; \
4720 c_req=smb_raw_pathinfo_send(private->tree, io); \
4721 break; \
4722 default: \
4723 return NT_STATUS_INVALID_PARAMETER; \
4725 /* Add fragment collator */ \
4726 fragment->c_req=c_req; \
4727 /* use the same stateful async handler for them all... */ \
4728 { void* req=NULL; \
4729 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_proxy_getinfo, NT_STATUS_INTERNAL_ERROR); \
4730 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler); \
4732 io=NULL; \
4733 } while (0)
4735 #define SETUP_GETINFO_FRAGMENT(io, LEVEL) do { \
4736 fragment=talloc_zero(fragments, struct proxy_getinfo_fragment); \
4737 NT_STATUS_HAVE_NO_MEMORY(fragment); \
4738 DLIST_ADD(fragments->fragments, fragment); \
4739 fragment->fragments=fragments; \
4740 io=talloc_zero(fragment, union smb_fileinfo); \
4741 NT_STATUS_HAVE_NO_MEMORY(io); \
4742 io->generic.level=LEVEL; \
4743 } while (0)
4745 static NTSTATUS rpclite_proxy_Getinfo(struct ntvfs_module_context *ntvfs,
4746 struct ntvfs_request *req, struct proxy_GetInfo *r)
4748 struct proxy_private *private = ntvfs->private_data;
4749 struct smbcli_request *c_req;
4750 union smb_fileinfo *io=NULL;
4751 NTSTATUS status;
4752 struct proxy_file* f;
4753 struct ntvfs_handle *h;
4754 struct proxy_getinfo_fragmentses *fragmentses;
4755 int c;
4757 SETUP_PID;
4759 DEBUG(5,("Opnum: proxy_Getinfo r=%p\n",r));
4761 DEBUG(5,("Convering %d handles for r=%p\n",r->in.count, r));
4762 for(c=0; c < r->in.count; c++) {
4763 if (r->in.info_tags[c].tag_type==TAG_TYPE_FILE_INFO) {
4764 RPCLITE_SETUP_THIS_FILE_HERE(r->in.info_tags[c].info_tag.fnum, f, h);
4768 if (PROXY_REMOTE_SERVER(private)) {
4769 DEBUG(5,("Remote proxy, doing transparent\n"));
4770 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
4771 /* No need to add a receive hander, the ntioctl transport adds
4772 the async chain handler which deals with the send_fn */
4773 // ADD_ASYNC_RECV_TAIL(c_req, r, NULL, f, rpclite_proxy_Getinfo_map_async_send, NT_STATUS_INTERNAL_ERROR);
4775 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4776 DEBUG(5,("%s:Sync waiting for nttrans response\n",__LOCATION__));
4777 return sync_chain_handler(c_req);
4778 } else {
4779 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
4780 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
4781 return NT_STATUS_OK;
4785 /* I thought this was done for me for [in,out] */
4786 r->out.info_data=talloc_zero_array(r, struct info_data, r->in.count);
4787 NT_STATUS_HAVE_NO_MEMORY(r->out.info_data);
4788 r->out.count = r->in.count;
4789 r->out.result = NT_STATUS_OK;
4791 fragmentses=talloc_zero(req, struct proxy_getinfo_fragmentses);
4792 fragmentses->r=r;
4793 fragmentses->req=req;
4794 NT_STATUS_HAVE_NO_MEMORY(fragmentses);
4796 #warning, if C is large, we need to do a few at a time according to resource limits
4797 for (c=0; c < r->in.count; c++) {
4798 struct proxy_getinfo_fragments *fragments;
4799 struct proxy_getinfo_fragment *fragment;
4801 fragments=talloc_zero(fragmentses, struct proxy_getinfo_fragments);
4802 NT_STATUS_HAVE_NO_MEMORY(fragments);
4803 DLIST_ADD(fragmentses->fragments, fragments);
4804 fragments->fragmentses=fragmentses;
4805 fragments->index=c;
4807 /* Issue a set of getinfo requests */
4808 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ALL_INFO);
4809 FINISH_GETINFO_FRAGMENT(r, io);
4811 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_BASIC_INFORMATION);
4812 FINISH_GETINFO_FRAGMENT(r, io);
4814 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_COMPRESSION_INFO);
4815 FINISH_GETINFO_FRAGMENT(r, io);
4817 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_INTERNAL_INFORMATION);
4818 FINISH_GETINFO_FRAGMENT(r, io);
4820 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ACCESS_INFORMATION);
4821 FINISH_GETINFO_FRAGMENT(r, io);
4823 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_POSITION_INFORMATION);
4824 FINISH_GETINFO_FRAGMENT(r, io);
4826 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_MODE_INFORMATION);
4827 FINISH_GETINFO_FRAGMENT(r, io);
4829 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ALIGNMENT_INFORMATION);
4830 FINISH_GETINFO_FRAGMENT(r, io);
4832 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION);
4833 FINISH_GETINFO_FRAGMENT(r, io);
4835 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_STREAM_INFO);
4836 FINISH_GETINFO_FRAGMENT(r, io);
4839 /* If ! async, wait for all requests to finish */
4841 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4842 struct proxy_getinfo_fragments *fragments;
4843 struct proxy_getinfo_fragment *fragment;
4844 while ((fragments = fragmentses->fragments) &&
4845 (fragment = fragments->fragments) &&
4846 fragment->c_req) {
4847 sync_chain_handler(fragment->c_req);
4848 /* and because the whole fragment / fragments may be gone now... */
4849 continue;
4851 return NT_STATUS_OK; /* see individual failures */
4854 DEBUG(5,("%s: Setting async response\n",__FUNCTION__));
4855 fragmentses->async=true;
4856 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
4857 return NT_STATUS_OK;
4860 /* rpclite dispatch table */
4861 #define RPC_PROXY_OPS 3
4862 struct {
4863 uint32_t opnum;
4864 NTSTATUS (*handler)(struct ntvfs_module_context *ntvfs,
4865 struct ntvfs_request *req, void* r);
4866 } rpcproxy_ops[RPC_PROXY_OPS]={
4867 {NDR_PROXY_READ, rpclite_proxy_Read},
4868 {NDR_PROXY_WRITE, rpclite_proxy_Write},
4869 {NDR_PROXY_GETINFO, rpclite_proxy_Getinfo}
4872 /* unmarshall ntioctl and rpc-dispatch, but push async map handler to convert
4873 back from rpc struct to ntioctl */
4874 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
4875 struct ntvfs_request *req, union smb_ioctl *io)
4877 struct proxy_private *private = ntvfs->private_data;
4878 DATA_BLOB *request;
4879 struct ndr_syntax_id* syntax_id;
4880 uint32_t opnum;
4881 const struct ndr_interface_table *table;
4882 struct ndr_pull* pull;
4883 void* r;
4884 NTSTATUS status;
4885 struct async_rpclite_send *rpclite_send;
4886 enum ndr_err_code ndr_err;
4888 SETUP_PID;
4890 /* We don't care about io->generic.in.file, ntvfs layer already proved it was valid,
4891 our operations will have the fnum embedded in them anyway */
4892 DEBUG(5,("START %s blob-size %d\n",__FUNCTION__,io->ntioctl.in.blob.length));
4893 /* unpack the NDR */
4894 request=&io->ntioctl.in.blob;
4896 pull = ndr_pull_init_blob(request, req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
4897 NT_STATUS_HAVE_NO_MEMORY(pull);
4898 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
4899 DEBUG(5,("%s pull init'd\n",__FUNCTION__));
4901 /* the blob is 4-aligned because it was memcpy'd */
4902 syntax_id=talloc_zero(pull, struct ndr_syntax_id);
4903 NT_STATUS_HAVE_NO_MEMORY(syntax_id);
4905 ndr_err=ndr_pull_ndr_syntax_id(pull, NDR_SCALARS, syntax_id);
4906 status=ndr_map_error2ntstatus(ndr_err);
4907 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4908 DEBUG(2,("Can't read syntax-id: %s\n",nt_errstr(status)));
4909 return status;
4912 /* now find the struct ndr_interface_table * for this syntax_id */
4913 table=ndr_table_by_uuid(&syntax_id->uuid);
4914 if (! table) ndr_table_init();
4915 table=ndr_table_by_uuid(&syntax_id->uuid);
4917 if (! table) {
4918 DEBUG(5,("Can't find table for uuid: %s\n",GUID_string(debug_ctx(),&syntax_id->uuid)));
4919 return NT_STATUS_NO_GUID_TRANSLATION;
4922 ndr_err=ndr_pull_uint32(pull, NDR_SCALARS, &opnum);
4923 status=ndr_map_error2ntstatus(ndr_err);
4924 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4925 DEBUG(2,("Can't read op-num: %s\n",nt_errstr(status)));
4926 return status;
4928 DEBUG(5,("%s opnum %d\n",__FUNCTION__,opnum));
4930 DEBUG(10,("rpc request data:\n"));
4931 dump_data(10, pull->data, pull->data_size);
4933 r = talloc_named(req, table->calls[opnum].struct_size, "struct %s",
4934 table->calls[opnum].name);
4935 NT_STATUS_HAVE_NO_MEMORY(r);
4937 memset(r, 0, table->calls[opnum].struct_size);
4939 ndr_err=table->calls[opnum].ndr_pull(pull, NDR_IN, r);
4940 status=ndr_map_error2ntstatus(ndr_err);
4941 DEBUG(5,("%s opnum %d pulled r=%p status %s\n",__FUNCTION__,opnum,r,get_friendly_nt_error_msg (status)));
4942 NT_STATUS_NOT_OK_RETURN(status);
4944 rpclite_send=talloc(req, struct async_rpclite_send);
4945 NT_STATUS_HAVE_NO_MEMORY(rpclite_send);
4946 rpclite_send->call=&table->calls[opnum];
4947 rpclite_send->struct_ptr=r;
4948 /* need to push conversion function to convert from r to io */
4949 status=ntvfs_map_async_setup(ntvfs, req, io, rpclite_send, proxy_rpclite_map_async_send);
4950 NT_STATUS_NOT_OK_RETURN(status);
4952 /* Magically despatch the call based on syntax_id, table and opnum.
4953 But there is no table of handlers.... so until then*/
4954 if (0==strcasecmp(table->name,"rpcproxy")) {
4955 if (opnum >= RPC_PROXY_OPS) {
4956 DEBUG(3,("Can't despatch %s:%d\n",table->name, opnum));
4957 return NT_STATUS_PROCEDURE_NOT_FOUND;
4959 status = rpcproxy_ops[opnum].handler(ntvfs, req, r);
4960 } else {
4961 DEBUG(5,("Can't despatch %s:%d %s\n",table->name, opnum,
4962 GUID_string(debug_ctx(),&syntax_id->uuid)));
4963 return NT_STATUS_NO_GUID_TRANSLATION;
4966 /* status is the status of the rpc layer. If it is NT_STATUS_OK then
4967 the handler status is in r->out.result */
4968 DEBUG(5,("%s now map_async_finish: status=%s async=%d\n", __FUNCTION__,
4969 get_friendly_nt_error_msg (status), req->async_states->state & NTVFS_ASYNC_STATE_ASYNC));
4971 return ntvfs_map_async_finish(req, status);
4974 /* unpack the ntioctl to make some rpc_struct */
4975 NTSTATUS ntioctl_rpc_unmap(struct async_info *async, void* io1, void* io2, NTSTATUS status)
4977 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
4978 struct proxy_private *proxy=async->proxy;
4979 struct smbcli_request *c_req = async->c_req;
4980 void* r=io1;
4981 struct ntioctl_rpc_unmap_info *info=talloc_get_type_abort(io2, struct ntioctl_rpc_unmap_info);
4982 union smb_ioctl* io =talloc_get_type_abort(info->io, union smb_ioctl);
4983 const struct ndr_interface_call *calls=info->calls;
4984 enum ndr_err_code ndr_err;
4985 DATA_BLOB *response;
4986 struct ndr_pull* pull;
4988 DEBUG(5,("START %s io2=%p\n",__FUNCTION__,io2));
4989 DEBUG(5,("%s op %s ntioctl: %s\n",
4990 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
4991 NT_STATUS_NOT_OK_RETURN(status);
4993 if (c_req) {
4994 DEBUG(5,("%s io2 MID=%d\n",__FUNCTION__,c_req->mid));
4995 status = smb_raw_ioctl_recv(c_req, io, io);
4996 #define SESSION_INFO proxy->remote_server, proxy->remote_share
4997 /* This status is the ntioctl wrapper status */
4998 if (! NT_STATUS_IS_OK(status)) {
4999 DEBUG(3,("[\\\\%s\\%s] RPC %s failed for %s: %s\n",SESSION_INFO,
5000 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
5001 if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) return status;
5002 return NT_STATUS_UNSUCCESSFUL;
5006 dump_data(10, io->ntioctl.out.blob.data, io->ntioctl.out.blob.length);
5008 response=&io->ntioctl.out.blob;
5009 pull = ndr_pull_init_blob(response, r, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
5010 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
5012 NT_STATUS_HAVE_NO_MEMORY(pull);
5014 ndr_err=calls->ndr_pull(pull, NDR_OUT, r);
5015 #warning can we free pull here?
5016 status=ndr_map_error2ntstatus(ndr_err);
5018 DEBUG(5,("END %s op status %s\n",
5019 __FUNCTION__, get_friendly_nt_error_msg(status)));
5020 return status;
5024 send an ntioctl request based on a NDR encoding.
5026 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
5027 struct smbcli_tree *tree,
5028 struct ntvfs_module_context *ntvfs,
5029 const struct ndr_interface_table *table,
5030 uint32_t opnum,
5031 void *r)
5033 struct proxy_private *private = ntvfs->private_data;
5034 struct smbcli_request * c_req;
5035 struct ndr_push *push;
5036 NTSTATUS status;
5037 DATA_BLOB request;
5038 enum ndr_err_code ndr_err;
5039 union smb_ioctl *io=talloc_zero(r, union smb_ioctl);
5042 /* setup for a ndr_push_* call, we can't free push until the message
5043 actually hits the wire */
5044 push = ndr_push_init_ctx(io, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
5045 if (!push) return NULL;
5047 /* first push interface table identifiers */
5048 ndr_err=ndr_push_ndr_syntax_id(push, NDR_SCALARS, &table->syntax_id);
5049 status=ndr_map_error2ntstatus(ndr_err);
5051 if (! NT_STATUS_IS_OK(status)) return NULL;
5053 ndr_err=ndr_push_uint32(push, NDR_SCALARS, opnum);
5054 status=ndr_map_error2ntstatus(ndr_err);
5055 if (! NT_STATUS_IS_OK(status)) return NULL;
5057 if (0) {
5058 push->flags |= LIBNDR_FLAG_BIGENDIAN;
5061 /* push the structure into a blob */
5062 ndr_err = table->calls[opnum].ndr_push(push, NDR_IN, r);
5063 status=ndr_map_error2ntstatus(ndr_err);
5064 if (!NT_STATUS_IS_OK(status)) {
5065 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
5066 nt_errstr(status)));
5067 return NULL;
5070 /* retrieve the blob */
5071 request = ndr_push_blob(push);
5073 io->ntioctl.level=RAW_IOCTL_NTIOCTL;
5074 io->ntioctl.in.function=FSCTL_UFOPROXY_RPCLITE;
5075 io->ntioctl.in.file.fnum=private->nttrans_fnum;
5076 io->ntioctl.in.fsctl=false;
5077 io->ntioctl.in.filter=0;
5078 io->ntioctl.in.max_data=PROXY_NTIOCTL_MAXDATA;
5079 io->ntioctl.in.blob=request;
5081 DEBUG(10,("smbcli_request packet:\n"));
5082 dump_data(10, request.data, request.length);
5084 c_req = smb_raw_ioctl_send(tree, io);
5086 if (! c_req) {
5087 return NULL;
5090 dump_data(10, c_req->out.data, c_req->out.data_size);
5092 { void* req=NULL;
5093 struct ntioctl_rpc_unmap_info* info=talloc_zero(r, struct ntioctl_rpc_unmap_info);
5094 info->io=io;
5095 info->table=table;
5096 info->opnum=opnum;
5097 info->calls=&table->calls[opnum];
5098 ADD_ASYNC_RECV_TAIL(c_req, r, info, NULL, ntioctl_rpc_unmap, NULL);
5101 return c_req;
5105 client helpers, mapping between proxy RPC calls and smbcli_* calls.
5109 * If the sync_chain_handler is called directly it unplugs the async handler
5110 which (as well as preventing loops) will also avoid req->send_fn being
5111 called - which is also nice! */
5112 NTSTATUS sync_chain_handler(struct smbcli_request *c_req)
5114 struct async_info *async=NULL;
5115 /* the first callback which will actually receive the c_req response */
5116 struct async_info_map *async_map;
5117 NTSTATUS status=NT_STATUS_OK;
5118 struct async_info_map** chain;
5120 DEBUG(5,("%s\n",__FUNCTION__));
5121 if (! c_req) return NT_STATUS_UNSUCCESSFUL;
5123 /* If there is a handler installed, it is using async_info to chain */
5124 if (c_req->async.fn) {
5125 /* not safe to talloc_free async if send_fn has been called for the request
5126 against which async was allocated, so steal it (and free below) or neither */
5127 async = talloc_get_type_abort(c_req->async.private, struct async_info);
5128 talloc_steal(NULL, async);
5129 chain=&async->chain;
5130 async_map = talloc_get_type_abort(*chain, struct async_info_map);
5131 } else {
5132 chain=(struct async_info_map**)&c_req->async.private;
5133 async_map = talloc_get_type_abort(*chain, struct async_info_map);
5136 /* unplug c_req->async.fn as if a callback handler calls smb_*_recv
5137 in order to receive the response, smbcli_transport_finish_recv will
5138 call us again and then call the c-req->async.fn
5139 Perhaps we should merely call smbcli_request_receive() IF
5140 c_req->request_state <= SMBCLI_REQUEST_RECV, but that might not
5141 help multi-part replies... except all parts are receive before
5142 callback if a handler WAS set */
5143 c_req->async.fn=NULL;
5145 /* Should we raise an error? Should we simple_recv? */
5146 while(async_map) {
5147 /* remove this one from the list before we call. We do this in case
5148 some callbacks free their async_map but also so that callbacks
5149 can navigate the async_map chain to add additional callbacks to
5150 the end - e.g. so that tag-along reads can call send_fn after
5151 the send_fn of the request they tagged along to, thus preserving
5152 the async response order - which may be a waste of time? */
5153 DLIST_REMOVE(*chain, async_map);
5155 DEBUG(5,("Callback for async_map=%p pre-status %s\n",async_map, get_friendly_nt_error_msg(status)));
5156 if (async_map->fn) {
5157 status=async_map->fn(async_map->async,
5158 async_map->parms1, async_map->parms2, status);
5160 DEBUG(5,("Callback complete for async_map=%p status %s\n",async_map, get_friendly_nt_error_msg(status)));
5161 /* Note: the callback may have added to the chain */
5162 #warning Async_maps have a null talloc_context, it is unclear who should own them
5163 /* it can't be c_req as it stops us chaining more than one, maybe it
5164 should be req but there isn't always a req. However sync_chain_handler
5165 will always free it if called */
5166 DEBUG(6,("Will free async map %p\n",async_map));
5167 #warning put me back
5168 talloc_free(async_map);
5169 DEBUG(6,("Free'd async_map\n"));
5170 if (*chain)
5171 async_map=talloc_get_type_abort(*chain, struct async_info_map);
5172 else
5173 async_map=NULL;
5174 DEBUG(6,("Switch to async_map %p\n",async_map));
5176 /* The first callback will have read c_req, thus talloc_free'ing it,
5177 so we don't let the other callbacks get hurt playing with it */
5178 if (async_map && async_map->async)
5179 async_map->async->c_req=NULL;
5182 talloc_free(async);
5184 DEBUG(5,("%s complete: %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
5185 return status;
5188 /* If the async handler is called, then the send_fn is called */
5189 static void async_chain_handler(struct smbcli_request *c_req)
5191 struct async_info *async = talloc_get_type_abort(c_req->async.private, struct async_info);
5192 struct ntvfs_request *req = async->req;
5193 NTSTATUS status;
5195 if (c_req->state <= SMBCLI_REQUEST_RECV) {
5196 /* Looks like async handlers has been called sync'ly */
5197 smb_panic("async_chain_handler called asyncly on req %p\n");
5200 status=sync_chain_handler(c_req);
5202 /* Should we insist that a chain'd handler does this?
5203 Which makes it hard to intercept the data by adding handlers
5204 before the send_fn handler sends it... */
5205 if (req) {
5206 DEBUG(5,("%s send_fn on req=%p\n",__FUNCTION__,req));
5207 req->async_states->status=status;
5208 req->async_states->send_fn(req);
5212 /* unpack the rpc struct to make some smb_write */
5213 NTSTATUS async_proxy_smb_raw_write_rpc(struct async_info *async,
5214 void* io1, void* io2, NTSTATUS status)
5216 union smb_write* io =talloc_get_type(io1, union smb_write);
5217 struct proxy_Write* r=talloc_get_type(io2, struct proxy_Write);
5219 DEBUG(5,("START: %s convert from rpc to smb with pre-status %s\n",__FUNCTION__,
5220 get_friendly_nt_error_msg (status)));
5221 DEBUG(3,("Write response for offset=%lld\n",io->generic.in.offset));
5222 NT_STATUS_NOT_OK_RETURN(status);
5224 status=r->out.result;
5225 DEBUG(5,("%s wrapped status: %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
5226 NT_STATUS_NOT_OK_RETURN(status);
5228 io->generic.out.remaining = r->out.remaining;
5229 io->generic.out.nwritten = r->out.nwritten;
5231 DEBUG(5,("END: %s status %s\n",__FUNCTION__,
5232 get_friendly_nt_error_msg (status)));
5233 return status;
5236 /* upgrade from smb to NDR and then send.
5237 The caller should ADD_ASYNC_RECV_TAIL the handler that tries to receive the response*/
5238 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
5239 union smb_write *io,
5240 struct proxy_file *f)
5242 struct proxy_private *private = ntvfs->private_data;
5243 struct smbcli_tree *tree=private->tree;
5245 if (PROXY_REMOTE_SERVER(private)) {
5246 struct smbcli_request *c_req;
5247 struct proxy_Write *r=talloc_zero(io, struct proxy_Write);
5248 ssize_t size;
5250 if (! r) return NULL;
5252 size=io->generic.in.count;
5253 /* upgrade the write */
5254 r->in.fnum = io->generic.in.file.fnum;
5255 r->in.offset = io->generic.in.offset;
5256 r->in.count = io->generic.in.count;
5257 r->in.mode = io->generic.in.wmode;
5258 // r->in.remaining = io->generic.in.remaining;
5259 #warning remove this
5260 /* prepare to lie */
5261 r->out.nwritten=r->in.count;
5262 r->out.remaining=0;
5264 /* try to compress */
5265 #warning compress!
5266 r->in.data.compress.data=compress_block_talloc(r, io->generic.in.data, &size);
5267 if (r->in.data.compress.data) {
5268 r->in.data.compress.count=size;
5269 r->in.flags = PROXY_USE_ZLIB;
5270 } else {
5271 r->in.flags = 0;
5272 /* we'll honour const, honest gov */
5273 r->in.data.generic.data=discard_const(io->generic.in.data);
5274 r->in.data.generic.count=io->generic.in.count;
5277 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
5278 ntvfs,
5279 &ndr_table_rpcproxy,
5280 NDR_PROXY_WRITE, r);
5281 if (! c_req) return NULL;
5283 /* yeah, filthy abuse of f */
5284 { void* req=NULL;
5285 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_write_rpc, NULL);
5288 return c_req;
5289 } else {
5290 return smb_raw_write_send(tree, io);
5294 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
5295 union smb_write *io,
5296 struct proxy_file *f)
5298 struct proxy_private *proxy = ntvfs->private_data;
5299 struct smbcli_tree *tree=proxy->tree;
5301 if (PROXY_REMOTE_SERVER(proxy)) {
5302 struct smbcli_request *c_req = proxy_smb_raw_write_send(ntvfs, io, f);
5303 return sync_chain_handler(c_req);
5304 } else {
5305 struct smbcli_request *c_req = smb_raw_write_send(tree, io);
5306 return smb_raw_write_recv(c_req, io);
5310 /* unpack the rpc struct to make some smb_read response */
5311 NTSTATUS async_proxy_smb_raw_read_rpc(struct async_info *async,
5312 void* io1, void* io2, NTSTATUS status)
5314 union smb_read* io =talloc_get_type_abort(io1, union smb_read);
5315 struct proxy_Read* r=talloc_get_type_abort(io2, struct proxy_Read);
5316 struct proxy_file *f = async->f;
5317 struct proxy_private *private=async->proxy;
5319 DEBUG(5,("\n>>\n%s() rpc status: %s\n",__FUNCTION__,
5320 get_friendly_nt_error_msg(status)));
5321 NT_STATUS_NOT_OK_RETURN(status);
5323 status=r->out.result;
5324 DEBUG(5,("%s() wrapped status: %s\n",__FUNCTION__,
5325 get_friendly_nt_error_msg(status)));
5326 NT_STATUS_NOT_OK_RETURN(status);
5328 io->generic.out.remaining = 0; /*r->out.response.generic.remaining;*/
5329 io->generic.out.compaction_mode = 0;
5331 if (r->out.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
5332 /* Use the io we already setup!
5333 if out.flags & PROXY_VALIDATE, we may need to validate more in
5334 cache then r->out.nread would suggest, see io->generic.out.nread */
5335 if (r->out.flags & PROXY_VALIDATE)
5336 io->generic.out.nread=io->generic.in.maxcnt;
5337 DEBUG(5,("Using cached data: size=%lld\n",
5338 (long long) io->generic.out.nread));
5339 return status;
5342 if (r->in.flags & PROXY_VALIDATE) {
5343 DEBUG(5,("Cached data did not validate, flags: %x\n",r->out.flags));
5344 /* turn off validate on this file */
5345 //cache_handle_novalidate(f);
5346 #warning turn off validate on this file - do an nread<maxcnt later
5349 if (r->in.flags & PROXY_USE_CACHE) {
5350 DEBUG(5,("Cached data did not match\n"));
5353 io->generic.out.nread = r->out.nread;
5355 /* we may need to uncompress */
5356 if (r->out.flags & PROXY_USE_ZLIB) {
5357 ssize_t size=r->out.response.compress.count;
5358 DEBUG(5,("%s: uncompress, %lld wanted %lld or %lld\n",__LOCATION__,
5359 (long long int)size,
5360 (long long int)io->generic.in.maxcnt,
5361 (long long int)io->generic.in.mincnt));
5362 if (size > io->generic.in.mincnt) {
5363 /* we did a bulk read for the cache */
5364 uint8_t *data=talloc_size(io, io->generic.in.maxcnt);
5365 DEBUG(5,("%s: bulk uncompress to %p\n",__LOCATION__,data));
5366 if (! uncompress_block_to(data,
5367 r->out.response.compress.data, &size,
5368 io->generic.in.maxcnt) ||
5369 size != r->out.nread) {
5370 status=NT_STATUS_INVALID_USER_BUFFER;
5371 } else {
5372 DEBUG(5,("%s: uncompressed\n",__LOCATION__));
5373 /* copy as much as they can take */
5374 io->generic.out.nread=MIN(io->generic.in.mincnt, size);
5375 memcpy(io->generic.out.data, data, io->generic.out.nread);
5376 /* copy the rest to the cache */
5377 cache_handle_save(f, data,
5378 size,
5379 io->generic.in.offset);
5381 } else if (! uncompress_block_to(io->generic.out.data,
5382 r->out.response.compress.data, &size,
5383 io->generic.in.maxcnt) ||
5384 size != r->out.nread) {
5385 io->generic.out.nread=size;
5386 status=NT_STATUS_INVALID_USER_BUFFER;
5388 } else if (io->generic.out.data != r->out.response.generic.data) {
5389 //Assert(r->out.nread == r->out.generic.out.count);
5390 memcpy(io->generic.out.data, r->out.response.generic.data, io->generic.out.nread);
5392 if (r->out.cache_name.s && r->out.cache_name.count && f && f->cache) {
5393 int result;
5394 setenv("WAFS_CACHE_REMOTE_NAME",r->out.cache_name.s,1);
5395 setenv("WAFS_CACHE_LOCAL_NAME",f->cache->cache_name,1);
5396 setenv("WAFS_REMOTE_SERVER",private->remote_server,1);
5397 DEBUG(5,("WAFS_CACHE_REMOTE_NAME=%s [cache_name]\nWAFS_CACHE_LOCAL_NAME=%s\nWAFS_REMOTE_SERVER=%s\n\n",getenv("WAFS_CACHE_REMOTE_NAME"),getenv("WAFS_CACHE_LOCAL_NAME"),getenv("WAFS_REMOTE_SERVER")));
5398 DEBUG(5,("%s running cache transfer command: %s\n",__LOCATION__,getenv("WAFS_CACHE_REMOTE_NAME")));
5399 system(getenv("WAFS_CACHE_TRANSFER"));
5400 DEBUG(5,("%s cache transfer command result %d\n",__LOCATION__,result));
5401 // now set cache to make whole local file valid
5402 cache_validated(f->cache, cache_len(f->cache));
5405 return status;
5408 /* Warning: Assumes that if io->generic.out.nread is not zero, then some
5409 data has been pre-read into io->generic.out.data and can be used for
5410 proxy<->proxy optimized reads */
5411 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
5412 union smb_read *io,
5413 struct proxy_file *f,
5414 struct proxy_Read *r)
5416 struct proxy_private *private = ntvfs->private_data;
5417 #warning we are using out.nread as a out-of-band parameter
5418 if (PROXY_REMOTE_SERVER(private)) {
5420 struct smbcli_request *c_req;
5421 if (! r) {
5422 r=talloc_zero(io, struct proxy_Read);
5423 if (! r) return NULL;
5424 r->in.mincnt = io->generic.in.mincnt;
5428 r->in.fnum = io->generic.in.file.fnum;
5429 r->in.read_for_execute=io->generic.in.read_for_execute;
5430 r->in.offset = io->generic.in.offset;
5431 r->in.maxcnt = io->generic.in.maxcnt;
5432 r->in.remaining = io->generic.in.remaining;
5433 r->in.flags |= PROXY_USE_ZLIB;
5434 if (! (r->in.flags & PROXY_VALIDATE) &&
5435 io->generic.out.data && io->generic.out.nread > 0) {
5436 /* maybe we should limit digest size to MIN(nread, maxcnt) to
5437 permit the caller to provider a larger nread as part of
5438 a split read */
5439 checksum_block(r->in.digest.digest, io->generic.out.data,
5440 io->generic.out.nread);
5442 if (io->generic.out.nread > r->in.maxcnt) {
5443 DEBUG(0,("Cache from nread is too big for requested read struct, ignoring cache\n"));
5444 } else {
5445 r->in.mincnt = io->generic.out.nread;
5446 r->in.maxcnt = io->generic.out.nread;
5447 r->in.flags |= PROXY_USE_CACHE;
5448 /* PROXY_VALIDATE will have been set by caller */
5452 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
5453 DEBUG(3,("Cache digest length=%lld\n", (long long)r->in.maxcnt));
5454 dump_data (3, r->in.digest.digest, sizeof(r->in.digest.digest));
5457 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
5458 ntvfs,
5459 &ndr_table_rpcproxy,
5460 NDR_PROXY_READ, r);
5461 if (! c_req) return NULL;
5463 { void* req=NULL;
5464 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_read_rpc, NULL);
5467 return c_req;
5468 } else {
5469 return smb_raw_read_send(private->tree, io);
5473 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
5474 union smb_read *io,
5475 struct proxy_file *f)
5477 struct proxy_private *proxy = ntvfs->private_data;
5478 struct smbcli_tree *tree=proxy->tree;
5480 if (PROXY_REMOTE_SERVER(proxy)) {
5481 struct smbcli_request *c_req = proxy_smb_raw_read_send(ntvfs, io, f, NULL);
5482 return sync_chain_handler(c_req);
5483 } else {
5484 struct smbcli_request *c_req = smb_raw_read_send(tree, io);
5485 return smb_raw_read_recv(c_req, io);
5491 initialise the PROXY->PROXY backend, registering ourselves with the ntvfs subsystem
5493 NTSTATUS ntvfs_proxy_init(void)
5495 NTSTATUS ret;
5496 struct ntvfs_ops ops;
5497 NTVFS_CURRENT_CRITICAL_SIZES(vers);
5499 ZERO_STRUCT(ops);
5501 /* fill in the name and type */
5502 ops.name = "proxy";
5503 ops.type = NTVFS_DISK;
5505 /* fill in all the operations */
5506 ops.connect = proxy_connect;
5507 ops.disconnect = proxy_disconnect;
5508 ops.unlink = proxy_unlink;
5509 ops.chkpath = proxy_chkpath;
5510 ops.qpathinfo = proxy_qpathinfo;
5511 ops.setpathinfo = proxy_setpathinfo;
5512 ops.open = proxy_open;
5513 ops.mkdir = proxy_mkdir;
5514 ops.rmdir = proxy_rmdir;
5515 ops.rename = proxy_rename;
5516 ops.copy = proxy_copy;
5517 ops.ioctl = proxy_ioctl;
5518 ops.read = proxy_read;
5519 ops.write = proxy_write;
5520 ops.seek = proxy_seek;
5521 ops.flush = proxy_flush;
5522 ops.close = proxy_close;
5523 ops.exit = proxy_exit;
5524 ops.lock = proxy_lock;
5525 ops.setfileinfo = proxy_setfileinfo;
5526 ops.qfileinfo = proxy_qfileinfo;
5527 ops.fsinfo = proxy_fsinfo;
5528 ops.lpq = proxy_lpq;
5529 ops.search_first = proxy_search_first;
5530 ops.search_next = proxy_search_next;
5531 ops.search_close = proxy_search_close;
5532 ops.trans = proxy_trans;
5533 ops.logoff = proxy_logoff;
5534 ops.async_setup = proxy_async_setup;
5535 ops.cancel = proxy_cancel;
5536 ops.notify = proxy_notify;
5537 ops.trans2 = proxy_trans2;
5539 /* register ourselves with the NTVFS subsystem. We register
5540 under the name 'proxy'. */
5541 ret = ntvfs_register(&ops, &vers);
5543 if (!NT_STATUS_IS_OK(ret)) {
5544 DEBUG(0,("Failed to register PROXY backend!\n"));
5547 return ret;