Have vfs_proxy search caches use dirmon framework to save handles
[Samba/vfs_proxy.git] / source4 / ntvfs / proxy / vfs_proxy.c
bloba81d2ac5f6871e0d40f8d540942bc97338e40fc2
1 /*
2 Unix SMB/PROXY implementation.
4 CIFS PROXY NTVFS filesystem backend
6 Copyright (C) Andrew Tridgell 2003
7 Copyright (C) James J Myers 2003 <myersjj@samba.org>
8 Copyright (C) Sam Liddicott <sam@liddicott.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>.
24 this implements a CIFS->CIFS NTVFS filesystem caching proxy.
25 TODO:
26 New read-ahead
27 Delete cache
28 Share cache states between processes
29 Update to latest samba
30 limit dirmons etc
31 mapi delegated creds
34 #define TALLOC_ABORT(why) smb_panic(why)
35 #warning handle SMB_FLAGS2_COMPRESSED flag from client: http://msdn2.microsoft.com/en-us/library/cc246254.aspx
36 #define __LOCATION__ (talloc_asprintf(debug_ctx(),"%s:%d %s",__FILE__,__LINE__,__FUNCTION__))
37 #define PROXY_NTIOCTL_MAXDATA 0x2000000
39 #include "includes.h"
40 #include "libcli/raw/libcliraw.h"
41 #include "libcli/smb_composite/smb_composite.h"
42 #include "auth/auth.h"
43 #include "auth/credentials/credentials.h"
44 #include "ntvfs/ntvfs.h"
45 #include "../lib/util/dlinklist.h"
46 #include "param/param.h"
47 #include "libcli/resolve/resolve.h"
48 #include "libcli/libcli.h"
49 #include "libcli/raw/ioctl.h"
50 #include "librpc/gen_ndr/ndr_misc.h"
51 #include "librpc/gen_ndr/ndr_proxy.h"
52 #include "librpc/ndr/ndr_table.h"
53 #include "lib/cache/cache.h"
54 #include "lib/compression/zlib.h"
55 #include "libcli/raw/raw_proto.h"
56 #include "librpc/gen_ndr/proxy.h"
57 #include "smb_server/smb_server.h"
59 #define fstrcmp(a,b) strcasecmp((a),(b))
60 #define fstrncmp(a,b,len) strncasecmp((a),(b),(len))
62 #define LOAD_CACHE_FILE_DATA(dest, src) do { \
63 dest.create_time=src.create_time; \
64 dest.access_time=src.access_time; \
65 dest.write_time=src.write_time; \
66 dest.change_time=src.change_time; \
67 dest.attrib=src.attrib; \
68 dest.alloc_size=src.alloc_size; \
69 dest.size=src.size; \
70 dest.file_type=src.file_type; \
71 dest.ipc_state=src.ipc_state; \
72 dest.is_directory=src.is_directory; \
73 dest.delete_pending=0; \
74 } while(0)
76 /* taken from #include "librpc/gen_ndr/proxy.h" */
77 struct proxy_file_info_data {
78 /* first three are from ntcreatex */
79 uint16_t file_type;
80 uint16_t ipc_state;
81 uint8_t is_directory;
82 NTSTATUS status_RAW_FILEINFO_BASIC_INFORMATION;
83 uint32_t attrib; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
84 NTTIME create_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
85 NTTIME access_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
86 NTTIME write_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
87 NTTIME change_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
88 NTSTATUS status_RAW_FILEINFO_ALL_INFO;
89 uint32_t ea_size; /* RAW_FILEINFO_ALL_INFO */
90 uint64_t alloc_size; /* RAW_FILEINFO_ALL_INFO */
91 uint64_t size; /* RAW_FILEINFO_ALL_INFO */
92 uint32_t nlink; /* RAW_FILEINFO_ALL_INFO */
93 struct sws fname; /* RAW_FILEINFO_ALL_INFO */
94 uint8_t delete_pending; /* RAW_FILEINFO_ALL_INFO */
95 uint8_t directory; /* RAW_FILEINFO_ALL_INFO */
96 NTSTATUS status_RAW_FILEINFO_COMPRESSION_INFO;
97 uint64_t compressed_size; /* RAW_FILEINFO_COMPRESSION_INFO */
98 uint16_t format; /* RAW_FILEINFO_COMPRESSION_INFO */
99 uint8_t unit_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
100 uint8_t chunk_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
101 uint8_t cluster_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
102 NTSTATUS status_RAW_FILEINFO_INTERNAL_INFORMATION;
103 uint64_t file_id; /* RAW_FILEINFO_INTERNAL_INFORMATION */
104 NTSTATUS status_RAW_FILEINFO_ACCESS_INFORMATION;
105 uint32_t access_flags; /* RAW_FILEINFO_ACCESS_INFORMATION */
106 NTSTATUS status_RAW_FILEINFO_POSITION_INFORMATION;
107 uint64_t position; /* RAW_FILEINFO_POSITION_INFORMATION */
108 NTSTATUS status_RAW_FILEINFO_MODE_INFORMATION;
109 uint32_t mode; /* RAW_FILEINFO_MODE_INFORMATION */
110 NTSTATUS status_RAW_FILEINFO_ALIGNMENT_INFORMATION;
111 uint32_t alignment_requirement; /* RAW_FILEINFO_ALIGNMENT_INFORMATION */
112 NTSTATUS status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
113 uint32_t reparse_tag; /* RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION */
114 uint32_t reparse_attrib; /* RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION */
115 NTSTATUS status_RAW_FILEINFO_STREAM_INFO;
116 uint32_t num_streams; /* RAW_FILEINFO_STREAM_INFO */
117 struct info_stream *streams; /* RAW_FILEINFO_STREAM_INFO */
120 #define valid_RAW_FILEINFO_BASIC_INFORMATION 1
121 #define valid_RAW_FILEINFO_ALL_INFO 2
122 #define valid_RAW_FILEINFO_COMPRESSION_INFO 3
123 #define valid_RAW_FILEINFO_INTERNAL_INFORMATION 4
124 #define valid_RAW_FILEINFO_STANDARD_INFO 8
125 #define valid_RAW_FILEINFO_ACCESS_INFORMATION 16
126 #define valid_RAW_FILEINFO_POSITION_INFORMATION 32
127 #define valid_RAW_FILEINFO_MODE_INFORMATION 64
128 #define valid_RAW_FILEINFO_ALIGNMENT_INFORMATION 128
129 #define valid_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION 256
130 #define valid_RAW_FILEINFO_STREAM_INFO 512
132 struct file_metadata {
133 int count;
134 int valid;
135 struct proxy_file_info_data info_data;
138 struct proxy_file {
139 struct proxy_file *prev, *next;
140 struct proxy_private* proxy;
141 uint16_t fnum;
142 struct ntvfs_handle *h;
143 struct cache_file_entry *cache;
144 /* filename might not be a char*, but if so, _size includes null */
145 void* filename;
146 int filename_size;
147 int readahead_pending;
148 /* *_OPLOCK_RETURN values */
149 int oplock;
150 /* read-only, shareable normal file open, can be cloned by similar opens */
151 bool can_clone;
152 /* If we have an oplock, then the file is NOT bigger than size, which lets
153 us optimize reads */
154 struct file_metadata *metadata;
157 struct proxy_private;
159 struct search_handle {
160 struct search_handle *prev, *next;
161 struct proxy_private *proxy;
162 struct ntvfs_handle *h;
163 uint16_t handle;
164 union {
165 struct smb_search_id id;
166 uint32_t resume_key;
167 } resume_index;
168 struct search_cache_item *resume_item;
169 enum smb_search_level level;
170 enum smb_search_data_level data_level;
171 /* search cache (if any) being used */
172 struct search_cache *cache;
175 struct search_cache_item {
176 struct search_cache_item *prev, *next;
177 enum smb_search_data_level data_level;
178 struct cache_file_entry *cache;
179 union smb_search_data *file;
180 struct file_metadata *metadata;
182 enum search_cache_status {
183 SEARCH_CACHE_INCOMPLETE,
184 SEARCH_CACHE_COMPLETE,
185 SEARCH_CACHE_DEAD
188 struct fdirmon;
189 typedef void(fdirmon_callback_fn)(void* data, struct fdirmon* fdirmon);
190 //NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS)
192 struct fdirmon {
193 struct fdirmon *prev, *next;
194 struct search_cache_item *items;
196 struct proxy_private *proxy;
198 union smb_notify *notify_io;
199 struct smbcli_request *notify_req;
200 uint16_t dir_fnum;
201 char* dir;
202 struct fdirmon_callback {
203 struct fdirmon_callback *prev, *next;
204 fdirmon_callback_fn *fn;
205 void* data;
206 } *callbacks;
209 struct search_cache {
210 struct search_cache *prev, *next;
211 struct search_cache_item *items;
213 struct proxy_private *proxy;
214 enum search_cache_status status;
216 struct fdirmon* dirmon;
217 char* dir;
219 struct search_cache_key {
220 enum smb_search_level level;
221 enum smb_search_data_level data_level;
222 uint16_t search_attrib;
223 const char *pattern;
224 /* these only for trans2 */
225 uint16_t flags;
226 uint32_t storage_type;
227 } key;
229 struct search_state {
230 struct search_handle *search_handle;
231 void* private;
232 smbcli_search_callback callback;
233 struct search_cache_item *last_item;
234 uint16_t count; /* count how many client receives */
235 uint16_t all_count; /* count how many we receive */
238 struct fs_attribute_info {
239 uint32_t fs_attr;
240 uint32_t max_file_component_length;
241 struct smb_wire_string fs_type;
244 /* this is stored in ntvfs_private */
245 struct proxy_private {
246 struct smbcli_tree *tree;
247 struct smbcli_transport *transport;
248 struct ntvfs_module_context *ntvfs;
249 struct async_info *pending;
250 struct proxy_file *files;
251 struct proxy_file *closed_files;
252 struct fdirmon *dirmons;
253 struct search_cache *search_caches; /* cache's of find-first data */
254 struct search_handle *search_handles; /* cache's of find-first data */
255 bool map_generic;
256 bool map_trans2;
257 bool cache_enabled;
258 int cache_readahead; /* default read-ahead window size */
259 int cache_readaheadblock; /* size of each read-ahead request */
260 ssize_t cache_validatesize; /* chunk size to validate, results in a read this size on remote server */
261 char *remote_server;
262 char *remote_share;
263 struct cache_context *cache;
264 struct fs_attribute_info *fs_attribute_info;
265 int readahead_spare; /* amount of pending non-user generated requests */
266 bool fake_oplock; /* useful for testing, smbclient never asks for oplock */
267 bool fake_valid; /* useful for testing, smbclient never asks for oplock */
268 uint16_t nttrans_fnum; /* we need a handle for non-proxy operations */
269 bool enabled_cache_info;
270 bool enabled_proxy_search;
271 bool enabled_open_clone;
272 bool enabled_extra_protocol;
273 bool enabled_qpathinfo;
276 struct async_info_map;
278 /* a structure used to pass information to an async handler */
279 struct async_info {
280 struct async_info *next, *prev;
281 struct proxy_private *proxy;
282 struct ntvfs_request *req;
283 struct smbcli_request *c_req;
284 struct proxy_file *f;
285 struct async_info_map *chain;
286 void *parms;
289 /* used to chain async callbacks */
290 struct async_info_map {
291 struct async_info_map *next, *prev;
292 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
293 void *parms1;
294 void *parms2;
295 struct async_info *async;
298 struct ntioctl_rpc_unmap_info {
299 void* io;
300 const struct ndr_interface_call *calls;
301 const struct ndr_interface_table *table;
302 uint32_t opnum;
305 /* a structure used to pass information to an async handler */
306 struct async_rpclite_send {
307 const struct ndr_interface_call* call;
308 void* struct_ptr;
311 #define SETUP_PID private->tree->session->pid = req->smbpid
313 #define RPCLITE_SETUP_FILE_HERE(f, h) do { \
314 RPCLITE_SETUP_THIS_FILE_HERE(r->in.fnum, f, h); \
315 } while (0)
317 #define RPCLITE_SETUP_THIS_FILE_HERE(FNUM, f, h) do { \
318 if ((h = ntvfs_find_handle(private->ntvfs, req, FNUM)) && \
319 (f = ntvfs_handle_get_backend_data(h, ntvfs))) { \
320 FNUM = f->fnum; \
321 } else { \
322 r->out.result = NT_STATUS_INVALID_HANDLE; \
323 return NT_STATUS_OK; \
325 } while (0)
327 #define SETUP_FILE_HERE(f) do { \
328 f = ntvfs_handle_get_backend_data(io->generic.in.file.ntvfs, ntvfs); \
329 if (!f) return NT_STATUS_INVALID_HANDLE; \
330 io->generic.in.file.fnum = f->fnum; \
331 } while (0)
333 #define SETUP_FILE do { \
334 struct proxy_file *f; \
335 SETUP_FILE_HERE(f); \
336 } while (0)
338 #define SETUP_PID_AND_FILE do { \
339 SETUP_PID; \
340 SETUP_FILE; \
341 } while (0)
343 /* remove the MAY_ASYNC from a request, useful for testing */
344 #define MAKE_SYNC_REQ(req) do { req->async_states->state &= ~NTVFS_ASYNC_STATE_MAY_ASYNC; } while(0)
346 #define PROXY_SERVER "proxy:server"
347 #define PROXY_USER "proxy:user"
348 #define PROXY_PASSWORD "proxy:password"
349 #define PROXY_DOMAIN "proxy:domain"
350 #define PROXY_SHARE "proxy:share"
351 #define PROXY_USE_MACHINE_ACCT "proxy:use-machine-account"
352 #define PROXY_MAP_GENERIC "proxy:map-generic"
353 #define PROXY_MAP_TRANS2 "proxy:map-trans2"
355 #define PROXY_CACHE_ENABLED "proxy:cache-enabled"
356 #define PROXY_CACHE_ENABLED_DEFAULT false
358 #define PROXY_CACHE_READAHEAD "proxy:cache-readahead"
359 #define PROXY_CACHE_READAHEAD_DEFAULT 32768
360 /* size of each read-ahead request. */
361 #define PROXY_CACHE_READAHEAD_BLOCK "proxy:cache-readaheadblock"
362 /* the read-ahead block should always be less than max negotiated data */
363 #define PROXY_CACHE_READAHEAD_BLOCK_DEFAULT 4096
365 #define PROXY_CACHE_VALIDATE_SIZE "proxy:validate-size"
366 #define PROXY_CACHE_VALIDATE_SIZE_DEFAULT 256 /* 10M */
368 #define PROXY_FAKE_OPLOCK "proxy:fake-oplock"
369 #define PROXY_FAKE_OPLOCK_DEFAULT false
371 #define PROXY_FAKE_VALID "proxy:fake-valid"
372 #define PROXY_FAKE_VALID_DEFAULT false
374 /* how many read-ahead requests can be pending per mid */
375 #define PROXY_REQUEST_LIMIT "proxy:request-limit"
376 #define PROXY_REQUEST_LIMIT_DEFAULT 100
378 #define PROXY_USE_MACHINE_ACCT_DEFAULT false
379 /* These two really should be: true, and possibly not even configurable */
380 #define PROXY_MAP_GENERIC_DEFAULT true
381 #define PROXY_MAP_TRANS2_DEFAULT true
383 /* is the remote server a proxy? */
384 #define PROXY_REMOTE_SERVER(private) \
385 ((private)->tree->session->transport->negotiate.capabilities & CAP_COMPRESSION \
386 && (strcmp("A:",private->tree->device)==0) \
387 && (private->nttrans_fnum!=0) \
388 && (private->enabled_extra_protocol))
390 /* A few forward declarations */
391 static NTSTATUS sync_chain_handler(struct smbcli_request *c_req);
392 static void async_chain_handler(struct smbcli_request *c_req);
393 static void async_read_handler(struct smbcli_request *c_req);
394 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
395 struct ntvfs_request *req, union smb_ioctl *io);
397 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
398 struct smbcli_tree *tree,
399 struct ntvfs_module_context *ntvfs,
400 const struct ndr_interface_table *table,
401 uint32_t opnum, void *r);
402 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
403 union smb_read *io, struct proxy_file *f, struct proxy_Read *r);
404 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
405 union smb_read *io, struct proxy_file *f);
406 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
407 union smb_write *io, struct proxy_file *f);
408 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
409 union smb_write *io, struct proxy_file *f);
410 static NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status);
412 struct smb_wire_string talloc_smb_wire_string_dup(void* mem_ctx, const struct smb_wire_string* string)
414 struct smb_wire_string result;
415 result.private_length=string->private_length;
416 result.s=talloc_strndup(mem_ctx, string->s, string->private_length);
417 DEBUG(5,("%s: %s\n",__FUNCTION__, string->s));
418 return result;
421 #define sws_dup(mem_ctx, dest, src) (\
422 dest=talloc_smb_wire_string_dup(NULL, &(src)), \
423 (dest.s==NULL && src.s!=NULL))
425 /* These needs replacing with something more canonical perhaps */
426 static char* talloc_dirname(void* mem_ctx, const char* path) {
427 const char* dir;
429 if ((dir=strrchr(path,'\\'))) {
430 return talloc_strndup(mem_ctx, path, (dir - path));
431 } else {
432 return talloc_strdup(mem_ctx,"");
437 a handler for oplock break events from the server - these need to be passed
438 along to the client
440 static bool oplock_handler(struct smbcli_transport *transport, uint16_t tid, uint16_t fnum, uint8_t level, void *p_private)
442 struct proxy_private *private = p_private;
443 NTSTATUS status;
444 struct ntvfs_handle *h = NULL;
445 struct proxy_file *f;
446 bool result=true;
448 /* because we clone handles, there may be more than one match */
449 for (f=private->files; f; f=f->next) {
450 if (f->fnum != fnum) continue;
451 h = f->h;
453 if (level==OPLOCK_BREAK_TO_LEVEL_II) {
454 f->oplock=LEVEL_II_OPLOCK_RETURN;
455 } else {
456 /* If we don't have an oplock, then we can't rely on the cache */
457 cache_handle_stale(f);
458 f->oplock=NO_OPLOCK_RETURN;
461 DEBUG(5,("vfs_proxy: sending oplock break level %d for fnum %d\n", level, fnum));
462 status = ntvfs_send_oplock_break(private->ntvfs, h, level);
463 if (!NT_STATUS_IS_OK(status)) result=false;
465 if (!h) {
466 DEBUG(5,("vfs_proxy: ignoring oplock break level %d for fnum %d\n", level, fnum));
468 return result;
471 /* need to pass error upstream and then close? */
472 static void transport_dead(struct smbcli_transport *transport, NTSTATUS status, void* p_private) {
473 struct proxy_private *private = p_private;
474 struct async_info *a;
476 /* first cleanup pending requests */
477 if (transport->pending_recv) {
478 struct smbcli_request *req = transport->pending_recv;
479 req->state = SMBCLI_REQUEST_ERROR;
480 req->status = status;
481 DLIST_REMOVE(transport->pending_recv, req);
482 if (req->async.fn) {
483 req->async.fn(req);
486 // smbsrv_terminate_connection(private->ntvfs,"Upstream hates us");
490 get file handle from clients fnum, (from ntvfs/ipc/vfs_ipc.c at metze suggestion)
492 static struct ntvfs_handle *ntvfs_find_handle(struct ntvfs_module_context *ntvfs,
493 struct ntvfs_request *req,
494 uint16_t fnum)
496 DATA_BLOB key;
497 uint16_t _fnum;
500 * the fnum is already in host byteorder
501 * but ntvfs_handle_search_by_wire_key() expects
502 * network byteorder
504 SSVAL(&_fnum, 0, fnum);
505 key = data_blob_const(&_fnum, 2);
507 return ntvfs_handle_search_by_wire_key(ntvfs, req, &key);
511 connect to a share - used when a tree_connect operation comes in.
513 static NTSTATUS proxy_connect(struct ntvfs_module_context *ntvfs,
514 struct ntvfs_request *req, const char *sharename)
516 NTSTATUS status;
517 struct proxy_private *private;
518 const char *host, *user, *pass, *domain, *remote_share;
519 struct smb_composite_connect io;
520 struct composite_context *creq;
521 struct share_config *scfg = ntvfs->ctx->config;
522 int nttrans_fnum;
524 struct cli_credentials *credentials;
525 bool machine_account;
527 /* Here we need to determine which server to connect to.
528 * For now we use parametric options, type proxy.
529 * Later we will use security=server and auth_server.c.
531 host = share_string_option(scfg, PROXY_SERVER, NULL);
532 user = share_string_option(scfg, PROXY_USER, NULL);
533 pass = share_string_option(scfg, PROXY_PASSWORD, NULL);
534 domain = share_string_option(scfg, PROXY_DOMAIN, NULL);
535 remote_share = share_string_option(scfg, PROXY_SHARE, NULL);
536 if (!remote_share) {
537 remote_share = sharename;
540 machine_account = share_bool_option(scfg, PROXY_USE_MACHINE_ACCT, PROXY_USE_MACHINE_ACCT_DEFAULT);
542 private = talloc_zero(ntvfs, struct proxy_private);
543 if (!private) {
544 return NT_STATUS_NO_MEMORY;
547 ntvfs->private_data = private;
549 if (!host) {
550 DEBUG(1,("PROXY backend: You must supply server\n"));
551 return NT_STATUS_INVALID_PARAMETER;
554 if (user && pass) {
555 DEBUG(5, ("PROXY backend: Using specified password\n"));
556 credentials = cli_credentials_init(private);
557 if (!credentials) {
558 return NT_STATUS_NO_MEMORY;
560 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
561 cli_credentials_set_username(credentials, user, CRED_SPECIFIED);
562 if (domain) {
563 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
565 cli_credentials_set_password(credentials, pass, CRED_SPECIFIED);
566 } else if (machine_account) {
567 DEBUG(5, ("PROXY backend: Using machine account\n"));
568 credentials = cli_credentials_init(private);
569 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
570 if (domain) {
571 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
573 status = cli_credentials_set_machine_account(credentials, ntvfs->ctx->lp_ctx);
574 if (!NT_STATUS_IS_OK(status)) {
575 return status;
577 } else if (req->session_info->credentials) {
578 DEBUG(5, ("PROXY backend: Using delegated credentials\n"));
579 credentials = req->session_info->credentials;
580 } else {
581 DEBUG(1,("PROXY backend: NO delegated credentials found: You must supply server, user and password or the client must supply delegated credentials\n"));
582 return NT_STATUS_INVALID_PARAMETER;
585 /* connect to the server, using the smbd event context */
586 io.in.dest_host = host;
587 io.in.dest_ports = lp_smb_ports(ntvfs->ctx->lp_ctx);
588 io.in.socket_options = lp_socket_options(ntvfs->ctx->lp_ctx);
589 io.in.called_name = host;
590 io.in.credentials = credentials;
591 io.in.fallback_to_anonymous = false;
592 io.in.workgroup = lp_workgroup(ntvfs->ctx->lp_ctx);
593 io.in.service = remote_share;
594 io.in.service_type = "?????";
595 io.in.iconv_convenience = lp_iconv_convenience(ntvfs->ctx->lp_ctx);
596 io.in.gensec_settings = lp_gensec_settings(private, ntvfs->ctx->lp_ctx);
597 lp_smbcli_options(ntvfs->ctx->lp_ctx, &io.in.options);
598 lp_smbcli_session_options(ntvfs->ctx->lp_ctx, &io.in.session_options);
600 creq = smb_composite_connect_send(&io, private,
601 lp_resolve_context(ntvfs->ctx->lp_ctx),
602 ntvfs->ctx->event_ctx);
603 status = smb_composite_connect_recv(creq, private);
604 NT_STATUS_NOT_OK_RETURN(status);
606 private->tree = io.out.tree;
608 private->transport = private->tree->session->transport;
609 SETUP_PID;
610 private->ntvfs = ntvfs;
612 ntvfs->ctx->fs_type = talloc_strdup(ntvfs->ctx, io.out.tree->fs_type);
613 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->fs_type);
614 ntvfs->ctx->dev_type = talloc_strdup(ntvfs->ctx, io.out.tree->device);
615 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->dev_type);
617 /* we need to receive oplock break requests from the server */
618 smbcli_oplock_handler(private->transport, oplock_handler, private);
620 /* we also want to know when the transport goes bad */
621 private->transport->transport_dead.handler = transport_dead;
622 private->transport->transport_dead.private = private;
624 private->map_generic = share_bool_option(scfg, PROXY_MAP_GENERIC, PROXY_MAP_GENERIC_DEFAULT);
626 private->map_trans2 = share_bool_option(scfg, PROXY_MAP_TRANS2, PROXY_MAP_TRANS2_DEFAULT);
628 private->cache_validatesize = 1024 * (long long) share_int_option(scfg, PROXY_CACHE_VALIDATE_SIZE, PROXY_CACHE_VALIDATE_SIZE_DEFAULT);
630 if (strcmp("A:",private->tree->device)==0) {
631 private->cache_enabled = share_bool_option(scfg, PROXY_CACHE_ENABLED, PROXY_CACHE_ENABLED_DEFAULT);
632 private->cache_readahead = share_int_option(scfg, PROXY_CACHE_READAHEAD, PROXY_CACHE_READAHEAD_DEFAULT);
633 private->cache_readaheadblock = share_int_option(scfg, PROXY_CACHE_READAHEAD_BLOCK,
634 MIN(private->cache_readahead,PROXY_CACHE_READAHEAD_BLOCK_DEFAULT));
635 private->fake_oplock = share_bool_option(scfg, PROXY_FAKE_OPLOCK, PROXY_FAKE_OPLOCK_DEFAULT);
636 private->fake_valid = share_bool_option(scfg, PROXY_FAKE_VALID, PROXY_FAKE_VALID_DEFAULT);
637 private->readahead_spare = share_int_option(scfg, PROXY_REQUEST_LIMIT, PROXY_REQUEST_LIMIT_DEFAULT);
638 private->cache = new_cache_context(private, lp_proxy_cache_root(ntvfs->ctx->lp_ctx), host, remote_share);
639 private->enabled_cache_info=true;
640 private->enabled_proxy_search=true;
641 private->enabled_open_clone=true;
642 private->enabled_extra_protocol=true;
643 private->enabled_qpathinfo=true;
645 DEBUG(0,("proxy tree connect caching for: %s (%s : %s) %s read-ahead: %d\n",
646 remote_share, private->tree->device,private->tree->fs_type,
647 (private->cache_enabled)?"enabled":"disabled",
648 private->cache_readahead));
649 } else {
650 private->cache_enabled = false;
651 DEBUG(0,("No caching or read-ahead for: %s (%s : %s)\n",
652 remote_share, private->tree->device,private->tree->fs_type));
655 private->remote_server = strlower_talloc(private, host);
656 private->remote_share = strlower_talloc(private, remote_share);
658 /* some proxy operations will not be performed on files, so open a handle
659 now that we can use for such things. We won't bother to close it on
660 shutdown, as the remote server ought to be able to close it for us
661 and we might be shutting down because the remote server went away and
662 so we don't want to delay further */
663 nttrans_fnum=smbcli_nt_create_full(private->tree, "\\",
665 SEC_FILE_READ_DATA,
666 FILE_ATTRIBUTE_NORMAL,
667 NTCREATEX_SHARE_ACCESS_MASK,
668 NTCREATEX_DISP_OPEN,
669 NTCREATEX_OPTIONS_DIRECTORY,
670 NTCREATEX_IMPERSONATION_IMPERSONATION);
671 if (nttrans_fnum < 0) {
672 DEBUG(5,("Could not open handle for ntioctl %d\n",private->nttrans_fnum));
673 //return NT_STATUS_UNSUCCESSFUL;
675 private->nttrans_fnum=nttrans_fnum;
676 DEBUG(5,("Got nttrans handle %d\n",private->nttrans_fnum));
678 return NT_STATUS_OK;
682 disconnect from a share
684 static void async_search_cache_notify(void *data, struct fdirmon *dirmon);
685 static void dirmon_remove_callback(struct fdirmon *dirmon, fdirmon_callback_fn *fn, void* data);
686 static NTSTATUS proxy_disconnect(struct ntvfs_module_context *ntvfs)
688 struct proxy_private *private = ntvfs->private_data;
689 struct async_info *a, *an;
690 struct search_cache *s;
692 /* first clean up caches because they have a pending request that
693 they will try and clean up later and fail during talloc_free */
694 for (s=private->search_caches; s; s=s->next) {
695 if (s->dirmon) {
696 dirmon_remove_callback (s->dirmon, async_search_cache_notify, s);
697 s->dirmon=NULL;
701 /* first cleanup pending requests */
702 for (a=private->pending; a; a = an) {
703 an = a->next;
704 smbcli_request_destroy(a->c_req);
705 talloc_free(a);
708 talloc_free(private);
709 ntvfs->private_data = NULL;
711 return NT_STATUS_OK;
715 destroy an async info structure
717 static int async_info_destructor(struct async_info *async)
719 DLIST_REMOVE(async->proxy->pending, async);
720 return 0;
724 a handler for simple async replies
725 this handler can only be used for functions that don't return any
726 parameters (those that just return a status code)
728 static void async_simple(struct smbcli_request *c_req)
730 struct async_info *async = c_req->async.private;
731 struct ntvfs_request *req = async->req;
732 req->async_states->status = smbcli_request_simple_recv(c_req);
733 talloc_free(async);
734 req->async_states->send_fn(req);
737 /* hopefully this will optimize away */
738 #define TYPE_CHECK(type,check) do { \
739 type=check; \
740 t=t; \
741 } while (0)
743 /* save some typing for the simple functions */
744 #define ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, file, achain, error) do { \
745 if (!c_req) return (error); \
746 ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_fn, file, achain); \
747 if (! c_req->async.private) return (error); \
748 MAKE_SYNC_ERROR_ASYNC(c_req, error); \
749 } while(0)
751 #define ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_fn, file, achain) do { \
752 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
754 struct async_info *async; \
755 async = talloc(req, struct async_info); \
756 if (async) { \
757 async->parms = io; \
758 async->req = req; \
759 async->f = file; \
760 async->proxy = private; \
761 async->c_req = c_req; \
762 async->chain = achain; \
763 DLIST_ADD(private->pending, async); \
764 c_req->async.private = async; \
765 talloc_set_destructor(async, async_info_destructor); \
768 c_req->async.fn = async_fn; \
769 } while (0)
771 #define ASYNC_RECV_TAIL_F(io, async_fn, file) do { \
772 if (!c_req) return NT_STATUS_UNSUCCESSFUL; \
773 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
775 struct async_info *async; \
776 async = talloc(req, struct async_info); \
777 if (!async) return NT_STATUS_NO_MEMORY; \
778 async->parms = io; \
779 async->req = req; \
780 async->f = file; \
781 async->proxy = private; \
782 async->c_req = c_req; \
783 DLIST_ADD(private->pending, async); \
784 c_req->async.private = async; \
785 talloc_set_destructor(async, async_info_destructor); \
787 c_req->async.fn = async_fn; \
788 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
789 MAKE_SYNC_ERROR_ASYNC(c_req, NT_STATUS_UNSUCCESSFUL); \
790 return NT_STATUS_OK; \
791 } while (0)
793 static void vasync_timer(struct event_context * ec, struct timed_event *te,
794 struct timeval tv, void *data) {
795 struct smbcli_request *c_req = talloc_get_type_abort(data, struct smbcli_request);
797 DEBUG(5,("Calling async timer on c_req %p with req %p\n",c_req->async.fn, c_req)); \
798 c_req->async.fn(c_req);
801 #define MAKE_SYNC_ERROR_ASYNC(c_req, error) do { \
802 if (c_req && c_req->state >= SMBCLI_REQUEST_DONE) { \
803 /* NOTE: the timer struct is allocated against c_req, so if the c_req */ \
804 /* handler is called manually, the timer will be destroyed with c_req */ \
805 if (! event_add_timed(private->ntvfs->ctx->event_ctx, c_req, \
806 timeval_current_ofs(0, 0), \
807 vasync_timer, \
808 c_req)) return (error); \
809 DEBUG(5,("Queueing async timer on c_req %p with req %p\n",c_req->async.fn, c_req)); \
811 } while(0)
813 #define ASYNC_RECV_TAIL(io, async_fn) ASYNC_RECV_TAIL_F(io, async_fn, NULL)
815 #define SIMPLE_ASYNC_TAIL ASYNC_RECV_TAIL(NULL, async_simple)
817 /* managers for chained async-callback.
818 The model of async handlers has changed.
819 backend async functions should be of the form:
820 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
821 And if async->c_req is NULL then an earlier chain has already rec'd the
822 request.
823 ADD_ASYNC_RECV_TAIL is used to add chained handlers.
824 The chained handler manager async_chain_handler is installed the usual way
825 and uses the io pointer to point to the first async_map record
826 static void async_chain_handler(struct smbcli_request *c_req).
827 It is safe to call ADD_ASYNC_RECV_TAIL before the chain manager is installed
828 and often desirable.
830 /* async_chain_handler has an async_info struct so that it can be safely inserted
831 into pending, but the io struct will point to (struct async_info_map *)
832 chained async_info_map will be in c_req->async.private */
833 #define ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_fn) do { \
834 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
835 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
836 } while(0)
838 #define ASYNC_RECV_TAIL_HANDLER(io, async_fn) do { \
839 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
840 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
841 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
842 return NT_STATUS_OK; \
843 } while(0)
846 DEBUG(0,("ADD_ASYNC_RECV_TAIL %s %s:%d\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%s\n", __FUNCTION__,__FILE__,__LINE__, \
847 creq, creq?talloc_get_name(creq):NULL, creq?talloc_get_name(creq):NULL,\
848 io1, io1?talloc_get_name(io1):NULL, io1?talloc_get_name(io1):NULL, \
849 io2, io2?talloc_get_name(io2):NULL, io2?talloc_get_name(io2):NULL, \
850 file, file?"file":"null", file?"file":"null", #async_fn)); \
852 #define ADD_ASYNC_RECV_TAIL(creq, io1, io2, file, async_fn, error) do { \
853 if (! creq) { \
854 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no creq\n",__FUNCTION__)); \
855 return (error); \
856 } else { \
857 struct async_info_map *async_map=talloc(NULL, struct async_info_map); \
858 if (! async_map) { \
859 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no async_map\n",__FUNCTION__)); \
860 return (error); \
862 async_map->async=talloc(async_map, struct async_info); \
863 if (! async_map->async) { \
864 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no async_map->async\n",__FUNCTION__)); \
865 return (error); \
867 async_map->parms1=io1; \
868 async_map->parms2=io2; \
869 async_map->fn=async_fn; \
870 async_map->async->parms = io1; \
871 async_map->async->req = req; \
872 async_map->async->f = file; \
873 async_map->async->proxy = private; \
874 async_map->async->c_req = creq; \
875 /* If async_chain_handler is installed, get the list from param */ \
876 if (creq->async.fn == async_chain_handler || creq->async.fn == async_read_handler) { \
877 struct async_info *i=creq->async.private; \
878 DLIST_ADD_END(i->chain, async_map, struct async_info_map *); \
879 } else if (creq->async.fn) { \
880 /* incompatible handler installed */ \
881 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL incompatible handler already installed\n",__FUNCTION__)); \
882 return (error); \
883 } else { \
884 DLIST_ADD_END(creq->async.private, async_map, struct async_info_map *); \
887 } while(0)
889 static void async_dirmon_notify(struct smbcli_request *c_req)
891 struct async_info *async = c_req->async.private;
892 struct ntvfs_request *req = async->req;
893 struct fdirmon *dirmon;
894 struct fdirmon_callback *callback;
895 struct proxy_private *proxy = async->proxy;
896 int f;
898 NTSTATUS status;
900 dirmon = talloc_get_type_abort((void*)async->f, struct fdirmon);
901 DEBUG(5,("%s: dirmon %s invalidated\n",__LOCATION__, dirmon->dir));
903 status = smb_raw_changenotify_recv(c_req, req, async->parms);
904 DEBUG(5,("%s: update status %s\n",__LOCATION__, get_friendly_nt_error_msg (status)));
906 if (dirmon->notify_req) {
907 talloc_unlink(dirmon, dirmon->notify_req);
908 dirmon->notify_req=NULL;
910 /* Mark closed cached files as invalid if they changed, as they will be
911 assuming cache is valid if a dirmon exists and hasn't invalidated it */
912 for(f=0; f<dirmon->notify_io->nttrans.out.num_changes; f++) {
913 DEBUG(1,("DIRMON: %s changed\n",dirmon->notify_io->nttrans.out.changes[f].name.s));
915 DLIST_FOR_EACH(dirmon->callbacks, callback, callback->fn(callback->data, dirmon));
916 /* So nothing can find it even if there are still in-use references */
917 DLIST_REMOVE(proxy->dirmons, dirmon);
918 if (dirmon->dir_fnum!=65535) {
919 struct smbcli_request *req;
920 union smb_close close_parms;
921 close_parms.close.level = RAW_CLOSE_CLOSE;
922 close_parms.close.in.file.fnum = dirmon->dir_fnum;
923 close_parms.close.in.write_time = 0;
925 /* destructor may be called from a notify response and won't be able
926 to wait on this close response, not that we care anyway */
927 req=smb_raw_close_send(proxy->tree, &close_parms);
929 DEBUG(5,("%s: Close dir_fnum: %d %p\n",__LOCATION__, dirmon->dir_fnum, req));
930 dirmon->dir_fnum=65535;
932 talloc_free(async);
933 talloc_free(dirmon);
936 struct fdirmon* get_fdirmon(struct proxy_private *proxy, const char* path, bool dir_only) {
937 const char *file;
938 int pathlen;
940 if ((file=strrchr(path,'\\'))) {
941 if (dir_only) {
942 pathlen = file - path;
943 file++;
944 } else {
945 pathlen=strlen(path);
947 } else {
948 file = path;
949 pathlen = 0;
952 struct fdirmon *dirmon;
953 /* see if we have a matching dirmon */
954 DLIST_FIND(proxy->dirmons, dirmon, (strlen(dirmon->dir) == pathlen && fstrncmp(path, dirmon->dir, pathlen)==0));
955 if (! dirmon) {
956 int saved_timeout;
958 DEBUG(5,("%s: allocating new dirmon for %s\n",__FUNCTION__,path));
959 dirmon=talloc_zero(proxy, struct fdirmon);
960 if (! dirmon) {
961 goto error;
963 if (! (dirmon->dir=talloc_strndup(dirmon, path, pathlen))) {
964 goto error;
966 if (! (dirmon->notify_io=talloc_zero(dirmon, union smb_notify))) {
967 goto error;
970 dirmon->dir_fnum=smbcli_nt_create_full(proxy->tree, dirmon->dir,
972 SEC_FILE_READ_DATA,
973 FILE_ATTRIBUTE_NORMAL,
974 NTCREATEX_SHARE_ACCESS_MASK,
975 NTCREATEX_DISP_OPEN,
976 NTCREATEX_OPTIONS_DIRECTORY,
977 NTCREATEX_IMPERSONATION_IMPERSONATION);
979 if (dirmon->dir_fnum==65535) {
980 DEBUG(5,("%s: smbcli_nt_create_full %s failed\n",__FUNCTION__, dirmon->dir));
981 goto error;
984 saved_timeout = proxy->transport->options.request_timeout;
985 /* request notify changes on cache before we start to fill it */
986 dirmon->notify_io->nttrans.level=RAW_NOTIFY_NTTRANS;
987 dirmon->notify_io->nttrans.in.completion_filter=FILE_NOTIFY_CHANGE_ANY;
988 dirmon->notify_io->nttrans.in.file.fnum=dirmon->dir_fnum;
989 dirmon->notify_io->nttrans.in.recursive=false;
990 dirmon->notify_io->nttrans.in.buffer_size=10240;
991 proxy->transport->options.request_timeout = 0;
992 dirmon->notify_req=smb_raw_changenotify_send(proxy->tree, dirmon->notify_io);
993 /* Make the request hang around so we can tell if it needs cancelling */
994 proxy->transport->options.request_timeout = saved_timeout;
996 if (! dirmon->notify_req) {
997 goto error;
998 }else {
999 struct ntvfs_request *req=NULL;
1000 struct smbcli_request *c_req=dirmon->notify_req;
1001 union smb_notify *io=dirmon->notify_io;
1002 struct proxy_private *private=proxy;
1004 talloc_reference(dirmon, dirmon->notify_req);
1005 ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_dirmon_notify,
1006 (void*) dirmon, c_req->async.private);
1007 DLIST_ADD(private->dirmons, dirmon);
1011 return dirmon;
1012 error:
1013 DEBUG(3,("%s: failed to allocate dirmon\n",__FUNCTION__));
1014 talloc_free(dirmon);
1015 return NULL;
1018 static bool dirmon_add_callback(struct fdirmon *dirmon, fdirmon_callback_fn *fn, void* data) {
1019 struct fdirmon_callback *callback=talloc_zero(dirmon, struct fdirmon_callback);
1020 if (! callback) {
1021 return false;
1023 callback->data=data;
1024 callback->fn=fn;
1025 DLIST_ADD(dirmon->callbacks, callback);
1026 return true;
1029 static void dirmon_remove_callback(struct fdirmon *dirmon, fdirmon_callback_fn *fn, void* data) {
1030 struct fdirmon_callback *callback;
1032 for(callback=dirmon->callbacks; callback; callback=callback->next) {
1033 if (callback->data==data && callback->fn==fn) {
1034 DLIST_REMOVE(dirmon->callbacks, callback);
1039 /* try and unify cache open function interface with this macro */
1040 #define cache_open(cache_context, f, io, oplock, readahead_window) \
1041 (io->generic.level == RAW_OPEN_NTCREATEX && \
1042 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID)\
1043 ?(cache_fileid_open(cache_context, f, (const uint64_t*)(io->generic.in.fname), oplock, readahead_window))\
1044 :(cache_filename_open(cache_context, f, SMB_OPEN_IN_FILE(io), oplock, readahead_window))
1046 struct search_cache* find_partial_search_cache(struct search_cache* search_cache, const struct search_cache_key* search_cache_key) {
1047 struct search_cache* result;
1048 DLIST_FIND(search_cache, result,
1049 (result->key.level == search_cache_key->level) &&
1050 (result->key.data_level == search_cache_key->data_level) &&
1051 (result->key.search_attrib == search_cache_key->search_attrib) &&
1052 (result->key.flags == search_cache_key->flags) &&
1053 (result->key.storage_type == search_cache_key->storage_type) &&
1054 (fstrcmp(result->key.pattern, search_cache_key->pattern) == 0));
1055 DEBUG(5,("%s: found %p\n",__LOCATION__,result));
1056 return result;
1058 struct search_cache* find_search_cache(struct search_cache* search_cache, const struct search_cache_key* search_cache_key) {
1059 struct search_cache* result = find_partial_search_cache(search_cache, search_cache_key);
1060 if (result && result->status == SEARCH_CACHE_COMPLETE) {
1061 DEBUG(5,("%s: found complete %p\n",__LOCATION__,result));
1062 return result;
1064 DEBUG(5,("%s: found INCOMPLETE %p\n",__LOCATION__,result));
1065 return NULL;
1068 uint16_t smbsrv_fnum(struct ntvfs_handle *h) {
1069 uint16_t fnum;
1070 smbsrv_push_fnum((uint8_t *)&fnum, 0, h);
1071 return SVAL(&fnum, 0);
1074 static void async_search_cache_notify(void *data, struct fdirmon *dirmon) {
1075 struct search_cache *s=talloc_get_type_abort(data, struct search_cache);
1077 DEBUG(5,("%s: cache notify %p,%s/%s\n",__LOCATION__,s, s->dir, s->key.pattern));
1078 s->dirmon=NULL;
1079 /* dispose of the search_cache */
1080 s->status=SEARCH_CACHE_DEAD;
1081 /* So nothing can find it even if there are still in-use references */
1082 DLIST_REMOVE(s->proxy->search_caches, s);
1083 /* free it */
1084 //talloc_steal(async, search_cache);
1085 talloc_unlink(s->proxy, s);
1089 destroy a search handle
1091 static int search_handle_destructor(struct search_handle *s)
1093 DLIST_REMOVE(s->proxy->search_handles, s);
1094 DEBUG(5,("%s: handle destructor %p\n",__LOCATION__,s));
1095 return 0;
1097 static int search_cache_destructor(struct search_cache *s)
1099 NTSTATUS status;
1101 DLIST_REMOVE(s->proxy->search_caches, s);
1102 DEBUG(5,("%s: cache destructor %p,%s/%s\n",__LOCATION__,s, s->dir, s->key.pattern));
1103 if (s->dirmon) {
1104 dirmon_remove_callback(s->dirmon, async_search_cache_notify, s);
1105 s->dirmon=NULL;
1107 return 0;
1110 struct search_cache* new_search_cache(struct proxy_private *private, struct search_cache_key* key) {
1111 /* need to opendir the folder being searched so we can get a notification */
1112 struct search_cache *search_cache=NULL;
1114 search_cache=talloc_zero(private, struct search_cache);
1115 DEBUG(5,("%s: Start new cache %p for %s\n",__LOCATION__, search_cache, key->pattern));
1116 if (! search_cache) {
1117 return NULL;
1119 search_cache->proxy=private;
1120 if (! (search_cache->dir=talloc_dirname(search_cache, key->pattern))) {
1121 goto error;
1123 search_cache->key=*key;
1124 /* make private copy of pattern now that we need it AND have something to own it */
1125 if (! (search_cache->key.pattern=talloc_strdup(search_cache, search_cache->key.pattern))) {
1126 goto error;
1129 search_cache->dirmon=get_fdirmon(private, search_cache->dir, true);
1130 if (! search_cache->dirmon) {
1131 goto error;
1133 /* The destructor will close the handle */
1134 talloc_set_destructor(search_cache, search_cache_destructor);
1136 DEBUG(5,("%s: Start new cache %p, dir_fnum %p\n",__LOCATION__, search_cache, search_cache->dirmon));
1138 if (! dirmon_add_callback(search_cache->dirmon, async_search_cache_notify, search_cache)) {
1139 goto error;
1140 } else {
1141 DLIST_ADD_END(private->search_caches, search_cache, struct search_cache*);
1144 return search_cache;
1145 error:
1146 talloc_free(search_cache);
1147 return NULL;
1151 delete a file - the dirtype specifies the file types to include in the search.
1152 The name can contain PROXY wildcards, but rarely does (except with OS/2 clients)
1154 static NTSTATUS proxy_unlink(struct ntvfs_module_context *ntvfs,
1155 struct ntvfs_request *req, union smb_unlink *unl)
1157 struct proxy_private *private = ntvfs->private_data;
1158 struct smbcli_request *c_req;
1160 SETUP_PID;
1162 /* see if the front end will allow us to perform this
1163 function asynchronously. */
1164 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1165 return smb_raw_unlink(private->tree, unl);
1168 c_req = smb_raw_unlink_send(private->tree, unl);
1170 SIMPLE_ASYNC_TAIL;
1174 a handler for async ioctl replies
1176 static void async_ioctl(struct smbcli_request *c_req)
1178 struct async_info *async = c_req->async.private;
1179 struct ntvfs_request *req = async->req;
1180 req->async_states->status = smb_raw_ioctl_recv(c_req, req, async->parms);
1181 talloc_free(async);
1182 req->async_states->send_fn(req);
1186 ioctl interface
1188 static NTSTATUS proxy_ioctl(struct ntvfs_module_context *ntvfs,
1189 struct ntvfs_request *req, union smb_ioctl *io)
1191 struct proxy_private *private = ntvfs->private_data;
1192 struct smbcli_request *c_req;
1194 if (io->ntioctl.level == RAW_IOCTL_NTIOCTL
1195 && io->ntioctl.in.function == FSCTL_UFOPROXY_RPCLITE) {
1196 return proxy_rpclite(ntvfs, req, io);
1199 SETUP_PID_AND_FILE;
1201 /* see if the front end will allow us to perform this
1202 function asynchronously. */
1203 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1204 return smb_raw_ioctl(private->tree, req, io);
1207 c_req = smb_raw_ioctl_send(private->tree, io);
1209 ASYNC_RECV_TAIL(io, async_ioctl);
1213 check if a directory exists
1215 static NTSTATUS proxy_chkpath(struct ntvfs_module_context *ntvfs,
1216 struct ntvfs_request *req, union smb_chkpath *cp)
1218 struct proxy_private *private = ntvfs->private_data;
1219 struct smbcli_request *c_req;
1221 SETUP_PID;
1223 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1224 return smb_raw_chkpath(private->tree, cp);
1227 c_req = smb_raw_chkpath_send(private->tree, cp);
1229 SIMPLE_ASYNC_TAIL;
1232 static bool find_search_cache_item(const char* path,
1233 struct search_cache **search_cache,
1234 struct search_cache_item **item) {
1235 struct search_cache *s=*search_cache;
1236 struct search_cache_item *i=*item;
1237 const char* file;
1238 int dir_len;
1240 /* see if we can satisfy from a directory cache */
1241 DEBUG(5,("%s: Looking for pathinfo: '%s'\n",__LOCATION__,path));
1242 if ((file=strrchr(path,'\\'))) {
1243 dir_len = file - path;
1244 /* point past the \ */
1245 file++;
1246 } else {
1247 file = path;
1248 dir_len = 0;
1250 /* convert empty path to . so we can find it in the cache */
1251 if (! *file) {
1252 file=".";
1254 DEBUG(5,("%s: Path='%s' File='%s'\n",__LOCATION__,path, file));
1256 /* Note we don't care if the cache is partial, as long as it has a hit */
1257 while(s) {
1258 /* One day we may support all directory levels */
1259 DLIST_FIND(s, s, (s->key.data_level == RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO &&
1260 strlen(s->dir)==dir_len &&
1261 fstrncmp(s->dir, path, dir_len)==0));
1262 if (! s) {
1263 break;
1265 DEBUG(5,("%s: found cache %p\n",__LOCATION__,s));
1266 /* search s for io->generic.in.file.path */
1267 DLIST_FIND(s->items, i, (i->data_level == RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO &&
1268 ((i->file->both_directory_info.name.s &&
1269 fstrcmp(i->file->both_directory_info.name.s, file) ==0) ||
1270 (i->file->both_directory_info.short_name.s &&
1271 fstrcmp(i->file->both_directory_info.short_name.s, file)==0)
1272 )));
1273 DEBUG(5,("%s: found cache %p item %p\n",__LOCATION__,s, i));
1274 if (i) {
1275 *item=i;
1276 *search_cache=s;
1277 return true;
1279 s=s->next;
1280 DEBUG(5,("%s: continue search at %p\n",__LOCATION__,s));
1282 *item=i;
1283 *search_cache=s;
1284 return false;
1287 static void proxy_set_cache_info(struct file_metadata *metadata, struct proxy_GetInfo *r) {
1288 /* only set this if it was responded... I think they all are responded... */
1289 metadata->info_data.status_RAW_FILEINFO_BASIC_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_BASIC_INFORMATION;
1290 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_BASIC_INFORMATION) /*||
1291 /*NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALL_INFO)*/) {
1292 metadata->info_data.create_time=r->out.info_data[0].create_time;
1293 metadata->info_data.access_time =r->out.info_data[0].access_time;
1294 metadata->info_data.write_time=r->out.info_data[0].write_time;
1295 metadata->info_data.change_time=r->out.info_data[0].change_time;
1296 metadata->info_data.attrib=r->out.info_data[0].attrib;
1297 metadata->valid|=valid_RAW_FILEINFO_BASIC_INFORMATION;
1299 metadata->info_data.status_RAW_FILEINFO_ALL_INFO=r->out.info_data[0].status_RAW_FILEINFO_ALL_INFO;
1300 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALL_INFO)) {
1301 metadata->info_data.ea_size=r->out.info_data[0].ea_size;
1302 metadata->info_data.alloc_size=r->out.info_data[0].alloc_size;
1303 metadata->info_data.size=r->out.info_data[0].size;
1304 metadata->info_data.nlink=r->out.info_data[0].nlink;
1305 /* Are we duping this right? Would talloc_reference be ok? */
1306 //f->metadata->info_data.fname=
1307 metadata->info_data.fname.s=talloc_memdup(metadata, r->out.info_data[0].fname.s, r->out.info_data[0].fname.count);
1308 metadata->info_data.fname.count=r->out.info_data[0].fname.count;
1309 metadata->info_data.delete_pending=r->out.info_data[0].delete_pending;
1310 metadata->info_data.directory=r->out.info_data[0].directory;
1311 metadata->valid|=valid_RAW_FILEINFO_ALL_INFO | valid_RAW_FILEINFO_STANDARD_INFO;;
1313 metadata->info_data.status_RAW_FILEINFO_COMPRESSION_INFO=r->out.info_data[0].status_RAW_FILEINFO_COMPRESSION_INFO;
1314 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_COMPRESSION_INFO)) {
1315 metadata->info_data.compressed_size=r->out.info_data[0].compressed_size;
1316 metadata->info_data.format=r->out.info_data[0].format;
1317 metadata->info_data.unit_shift=r->out.info_data[0].unit_shift;
1318 metadata->info_data.chunk_shift=r->out.info_data[0].chunk_shift;
1319 metadata->info_data.cluster_shift=r->out.info_data[0].cluster_shift;
1320 metadata->valid|=valid_RAW_FILEINFO_COMPRESSION_INFO;
1322 metadata->info_data.status_RAW_FILEINFO_INTERNAL_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_INTERNAL_INFORMATION;
1323 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_INTERNAL_INFORMATION)) {
1324 metadata->info_data.file_id=r->out.info_data[0].file_id;
1325 metadata->valid|=valid_RAW_FILEINFO_INTERNAL_INFORMATION;
1327 metadata->info_data.status_RAW_FILEINFO_ACCESS_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_ACCESS_INFORMATION;
1328 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ACCESS_INFORMATION)) {
1329 metadata->info_data.access_flags=r->out.info_data[0].access_flags;
1330 metadata->valid|=valid_RAW_FILEINFO_ACCESS_INFORMATION;
1332 metadata->info_data.status_RAW_FILEINFO_POSITION_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_POSITION_INFORMATION;
1333 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_POSITION_INFORMATION)) {
1334 metadata->info_data.position=r->out.info_data[0].position;
1335 metadata->valid|=valid_RAW_FILEINFO_POSITION_INFORMATION;
1337 metadata->info_data.status_RAW_FILEINFO_MODE_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_MODE_INFORMATION;
1338 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_MODE_INFORMATION)) {
1339 metadata->info_data.mode=r->out.info_data[0].mode;
1340 metadata->valid|=valid_RAW_FILEINFO_MODE_INFORMATION;
1342 metadata->info_data.status_RAW_FILEINFO_ALIGNMENT_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_ALIGNMENT_INFORMATION;
1343 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALIGNMENT_INFORMATION)) {
1344 metadata->info_data.alignment_requirement=r->out.info_data[0].alignment_requirement;
1345 metadata->valid|=valid_RAW_FILEINFO_ALIGNMENT_INFORMATION;
1347 metadata->info_data.status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
1348 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION)) {
1349 metadata->info_data.reparse_tag=r->out.info_data[0].reparse_tag;
1350 metadata->info_data.reparse_attrib=r->out.info_data[0].reparse_attrib;
1351 metadata->valid|=valid_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
1353 metadata->info_data.status_RAW_FILEINFO_STREAM_INFO=r->out.info_data[0].status_RAW_FILEINFO_STREAM_INFO;
1354 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_STREAM_INFO)) {
1355 metadata->info_data.num_streams=r->out.info_data[0].num_streams;
1356 talloc_free(metadata->info_data.streams);
1357 metadata->info_data.streams=talloc_steal(metadata, r->out.info_data[0].streams);
1358 metadata->valid|=valid_RAW_FILEINFO_STREAM_INFO;
1361 /* satisfy a file-info request from cache */
1362 NTSTATUS proxy_cache_info(union smb_fileinfo *io, struct file_metadata *metadata, bool *valid)
1364 #define SET_VALID(FLAG) do { \
1365 if (valid) *valid=!!(metadata->valid & valid_ ## FLAG); \
1366 DEBUG(5,("%s check %s=%d (%x)\n",__FUNCTION__, #FLAG, !!(metadata->valid & valid_ ## FLAG), metadata->valid)); \
1367 } while(0)
1368 /* and now serve the request from the cache */
1369 switch(io->generic.level) {
1370 case RAW_FILEINFO_BASIC_INFORMATION:
1371 SET_VALID(RAW_FILEINFO_BASIC_INFORMATION);
1372 io->basic_info.out.create_time=metadata->info_data.create_time;
1373 io->basic_info.out.access_time=metadata->info_data.access_time;
1374 io->basic_info.out.write_time=metadata->info_data.write_time;
1375 io->basic_info.out.change_time=metadata->info_data.change_time;
1376 io->basic_info.out.attrib=metadata->info_data.attrib;
1377 return metadata->info_data.status_RAW_FILEINFO_BASIC_INFORMATION;
1378 case RAW_FILEINFO_ALL_INFO:
1379 SET_VALID(RAW_FILEINFO_ALL_INFO);
1380 io->all_info.out.create_time=metadata->info_data.create_time;
1381 io->all_info.out.access_time=metadata->info_data.access_time;
1382 io->all_info.out.write_time=metadata->info_data.write_time;
1383 io->all_info.out.change_time=metadata->info_data.change_time;
1384 io->all_info.out.attrib=metadata->info_data.attrib;
1385 io->all_info.out.alloc_size=metadata->info_data.alloc_size;
1386 io->all_info.out.size=metadata->info_data.size;
1387 io->all_info.out.directory=metadata->info_data.directory;
1388 io->all_info.out.nlink=metadata->info_data.nlink;
1389 io->all_info.out.delete_pending=metadata->info_data.delete_pending;
1390 io->all_info.out.fname.s=metadata->info_data.fname.s;
1391 io->all_info.out.fname.private_length=metadata->info_data.fname.count;
1392 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1393 case RAW_FILEINFO_STANDARD_INFO:
1394 case RAW_FILEINFO_STANDARD_INFORMATION:
1395 SET_VALID(RAW_FILEINFO_ALL_INFO);
1396 io->standard_info.out.alloc_size=metadata->info_data.alloc_size;
1397 io->standard_info.out.size=metadata->info_data.size;
1398 io->standard_info.out.directory=metadata->info_data.directory;
1399 io->standard_info.out.nlink=metadata->info_data.nlink; /* may be wrong */
1400 io->standard_info.out.delete_pending=metadata->info_data.delete_pending;
1401 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1402 case RAW_FILEINFO_EA_INFO:
1403 case RAW_FILEINFO_EA_INFORMATION:
1404 SET_VALID(RAW_FILEINFO_ALL_INFO);
1405 io->ea_info.out.ea_size=metadata->info_data.ea_size;
1406 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1407 case RAW_FILEINFO_COMPRESSION_INFO:
1408 SET_VALID(RAW_FILEINFO_COMPRESSION_INFO);
1409 io->compression_info.out.compressed_size=metadata->info_data.compressed_size;
1410 io->compression_info.out.format=metadata->info_data.format;
1411 io->compression_info.out.unit_shift=metadata->info_data.unit_shift;
1412 io->compression_info.out.chunk_shift=metadata->info_data.chunk_shift;
1413 io->compression_info.out.cluster_shift=metadata->info_data.cluster_shift;
1414 return metadata->info_data.status_RAW_FILEINFO_COMPRESSION_INFO;
1415 case RAW_FILEINFO_INTERNAL_INFORMATION:
1416 SET_VALID(RAW_FILEINFO_INTERNAL_INFORMATION);
1417 io->internal_information.out.file_id=metadata->info_data.file_id;
1418 return metadata->info_data.status_RAW_FILEINFO_INTERNAL_INFORMATION;
1419 case RAW_FILEINFO_ACCESS_INFORMATION:
1420 SET_VALID(RAW_FILEINFO_ACCESS_INFORMATION);
1421 io->access_information.out.access_flags=metadata->info_data.access_flags;
1422 return metadata->info_data.status_RAW_FILEINFO_ACCESS_INFORMATION;
1423 case RAW_FILEINFO_POSITION_INFORMATION:
1424 SET_VALID(RAW_FILEINFO_POSITION_INFORMATION);
1425 io->position_information.out.position=metadata->info_data.position;
1426 return metadata->info_data.status_RAW_FILEINFO_POSITION_INFORMATION;
1427 case RAW_FILEINFO_MODE_INFORMATION:
1428 SET_VALID(RAW_FILEINFO_MODE_INFORMATION);
1429 io->mode_information.out.mode=metadata->info_data.mode;
1430 return metadata->info_data.status_RAW_FILEINFO_MODE_INFORMATION;
1431 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1432 SET_VALID(RAW_FILEINFO_ALIGNMENT_INFORMATION);
1433 io->alignment_information.out.alignment_requirement=metadata->info_data.alignment_requirement;
1434 return metadata->info_data.status_RAW_FILEINFO_ALIGNMENT_INFORMATION;
1435 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1436 SET_VALID(RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION);
1437 io->attribute_tag_information.out.reparse_tag=metadata->info_data.reparse_tag;
1438 io->attribute_tag_information.out.attrib=metadata->info_data.reparse_attrib;
1439 return metadata->info_data.status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
1440 case RAW_FILEINFO_STREAM_INFO:
1441 case RAW_FILEINFO_STREAM_INFORMATION:
1442 SET_VALID(RAW_FILEINFO_STREAM_INFO);
1443 io->stream_info.out.num_streams=metadata->info_data.num_streams;
1444 if (metadata->info_data.num_streams > 0) {
1445 io->stream_info.out.streams = talloc_zero_array(io, struct stream_struct, metadata->info_data.num_streams);
1446 int c;
1447 if (! io->stream_info.out.streams) {
1448 if (*valid) *valid=false;
1449 io->stream_info.out.num_streams=0;
1450 return NT_STATUS_NO_MEMORY;
1452 for (c=0; c<io->stream_info.out.num_streams; c++) {
1453 io->stream_info.out.streams[c].size = metadata->info_data.streams[c].size;
1454 io->stream_info.out.streams[c].alloc_size = metadata->info_data.streams[c].alloc_size;
1455 io->stream_info.out.streams[c].stream_name.s = talloc_reference(io, metadata->info_data.streams[c].stream_name.s);
1456 io->stream_info.out.streams[c].stream_name.private_length = metadata->info_data.streams[c].stream_name.count;
1458 } else {
1459 io->stream_info.out.streams=NULL;
1461 return metadata->info_data.status_RAW_FILEINFO_STREAM_INFO;
1462 default:
1463 DEBUG(5,("%s: Unknown request\n",__FUNCTION__));
1464 if (valid) *valid=false;
1465 return NT_STATUS_INTERNAL_ERROR;
1470 a handler for async qpathinfo replies
1472 static void async_qpathinfo(struct smbcli_request *c_req)
1474 struct async_info *async = c_req->async.private;
1475 struct ntvfs_request *req = async->req;
1476 req->async_states->status = smb_raw_pathinfo_recv(c_req, req, async->parms);
1477 talloc_free(async);
1478 req->async_states->send_fn(req);
1481 static NTSTATUS async_proxy_qpathinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1483 struct proxy_private *private = async->proxy;
1484 struct smbcli_request *c_req = async->c_req;
1485 struct ntvfs_request *req = async->req;
1486 struct proxy_file *f = talloc_get_type_abort(async->f, struct proxy_file);
1487 union smb_fileinfo *io = talloc_get_type_abort(io1, union smb_fileinfo);
1488 struct proxy_GetInfo *r=talloc_get_type_abort(io2, struct proxy_GetInfo);
1490 if (c_req) status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1491 req->async_states->status=status;
1493 /* It's good to check for over-all status but we need to check status of each sub-message */
1494 NT_STATUS_NOT_OK_RETURN(status);
1496 /* populate the cache, and then fill the request from the cache */
1497 /* Assuming that r->count.in == 1 */
1498 SMB_ASSERT(r->out.count==1);
1499 DEBUG(5,("%s: Combined status of meta request: %s\n",__LOCATION__, get_friendly_nt_error_msg (r->out.info_data[0].status)));
1500 NT_STATUS_NOT_OK_RETURN(r->out.info_data[0].status);
1502 DEBUG(5,("%s: will set cache %p item=%p metadata=%p %p\n",__LOCATION__, f, f?f->metadata:NULL, r));
1503 proxy_set_cache_info(f->metadata, r);
1505 req->async_states->status=proxy_cache_info(io, f->metadata, NULL);
1506 DEBUG(5,("%s: set final response of original request to: %s\n",__LOCATION__, get_friendly_nt_error_msg (req->async_states->status)));
1508 return req->async_states->status;
1511 static void async_qpathinfo_notify(void* data, struct fdirmon* dirmon) {
1512 struct proxy_file* file=data;
1514 DEBUG(5,("%s: qpathinfo cache %s destroyed\n",__LOCATION__,file->filename));
1515 DLIST_REMOVE(file->proxy->closed_files, file);
1516 talloc_free(file);
1520 return info on a pathname
1522 static NTSTATUS proxy_qpathinfo(struct ntvfs_module_context *ntvfs,
1523 struct ntvfs_request *req, union smb_fileinfo *io)
1525 struct proxy_private *private = ntvfs->private_data;
1526 struct smbcli_request *c_req;
1527 struct proxy_file *f=NULL;
1528 const char* path;
1530 SETUP_PID;
1532 /* Look for closed files */
1533 if (private->enabled_qpathinfo) {
1534 int len=strlen(io->generic.in.file.path)+1;
1535 DEBUG(5,("%s: Looking for cached metadata for: %s\n",__LOCATION__,io->generic.in.file.path));
1536 DLIST_FIND(private->closed_files, f,
1537 (len==f->filename_size && fstrncmp(io->generic.in.file.path, f->filename, f->filename_size)==0));
1538 if (f) {
1539 /* stop cache going away while we are using it */
1540 talloc_reference(req, f);
1543 /* upgrade the request */
1544 switch(io->generic.level) {
1545 case RAW_FILEINFO_STANDARD_INFO:
1546 case RAW_FILEINFO_STANDARD_INFORMATION:
1547 case RAW_FILEINFO_BASIC_INFORMATION: /* we get this on file open */
1548 case RAW_FILEINFO_ALL_INFO:
1549 case RAW_FILEINFO_COMPRESSION_INFO:
1550 case RAW_FILEINFO_INTERNAL_INFORMATION:
1551 case RAW_FILEINFO_ACCESS_INFORMATION:
1552 case RAW_FILEINFO_POSITION_INFORMATION:
1553 case RAW_FILEINFO_MODE_INFORMATION:
1554 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1555 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1556 case RAW_FILEINFO_STREAM_INFO:
1557 case RAW_FILEINFO_STREAM_INFORMATION:
1558 case RAW_FILEINFO_EA_INFO:
1559 case RAW_FILEINFO_EA_INFORMATION:
1560 DEBUG(5,("%s: item is %p\n",__FUNCTION__, f));
1561 if (f && f->metadata) {
1562 NTSTATUS status;
1563 bool valid;
1564 DEBUG(5,("%s: Using cached metadata %x (item=%p)\n",__FUNCTION__, f->metadata->valid, f));
1565 status=proxy_cache_info(io, f->metadata, &valid);
1566 if (valid) return status;
1567 DEBUG(5,("%s: But cached metadata not valid :-(\n",__FUNCTION__));
1569 /* construct an item to hold the cache if we need to */
1570 if (! f && private->enabled_cache_info && PROXY_REMOTE_SERVER(private) && (f=talloc_zero(private, struct proxy_file))) {
1571 struct fdirmon* dirmon;
1572 dirmon=get_fdirmon(private, io->generic.in.file.path, true);
1573 if (f && dirmon) {
1574 f->proxy=private;
1575 dirmon_add_callback(dirmon, async_qpathinfo_notify, f);
1577 f->filename=talloc_strdup(f, io->generic.in.file.path);
1578 f->filename_size=strlen(f->filename)+1;
1579 f->metadata=talloc_zero(f, struct file_metadata);
1580 /* should not really add unless we succeeded */
1581 DLIST_ADD(private->closed_files, f);
1582 } else {
1583 talloc_free(f);
1584 f=NULL;
1587 if (f && f->metadata && private->enabled_cache_info && PROXY_REMOTE_SERVER(private)) {
1588 struct proxy_GetInfo *r;
1589 DEBUG(5,("%s: promoting request to proxy\n",__FUNCTION__));
1591 r=talloc_zero(req, struct proxy_GetInfo);
1592 NT_STATUS_HAVE_NO_MEMORY(r);
1594 r->in.count=1;
1595 r->in.info_tags=talloc_zero_array(req, struct info_tags, r->in.count);
1596 r->in.info_tags[0].tag_type=TAG_TYPE_PATH_INFO;
1597 /* 1+ to get the null */
1598 r->in.info_tags[0].info_tag.path.count=1+strlen(io->generic.in.file.path);
1599 r->in.info_tags[0].info_tag.path.s=io->generic.in.file.path;
1600 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
1601 /* the callback handler will populate the cache and respond from the cache */
1602 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_qpathinfo, NT_STATUS_INTERNAL_ERROR);
1604 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1605 DEBUG(5,("%s Sync waiting promotion\n",__FUNCTION__));
1606 return sync_chain_handler(c_req);
1607 } else {
1608 void* f=NULL;
1609 DEBUG(5,("%s Async waiting promotion\n",__FUNCTION__));
1610 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1611 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1612 return NT_STATUS_OK;
1617 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1618 return smb_raw_pathinfo(private->tree, req, io);
1621 c_req = smb_raw_pathinfo_send(private->tree, io);
1623 ASYNC_RECV_TAIL(io, async_qpathinfo);
1627 a handler for async qfileinfo replies
1629 static void async_qfileinfo(struct smbcli_request *c_req)
1631 struct async_info *async = c_req->async.private;
1632 struct ntvfs_request *req = async->req;
1633 req->async_states->status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1634 talloc_free(async);
1635 req->async_states->send_fn(req);
1638 static NTSTATUS async_proxy_qfileinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1640 struct proxy_private *private = async->proxy;
1641 struct smbcli_request *c_req = async->c_req;
1642 struct ntvfs_request *req = async->req;
1643 struct proxy_file *f = async->f;
1644 union smb_fileinfo *io = talloc_get_type_abort(io1, union smb_fileinfo);
1645 struct proxy_GetInfo *r=talloc_get_type_abort(io2, struct proxy_GetInfo);
1647 if (c_req) status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1648 req->async_states->status=status;
1650 NT_STATUS_NOT_OK_RETURN(status);
1652 /* populate the cache, and then fill the request from the cache */
1653 /* Assuming that r->count.in == 1 */
1654 SMB_ASSERT(r->out.count==1);
1655 NT_STATUS_NOT_OK_RETURN(r->out.info_data[0].status);
1657 proxy_set_cache_info(f->metadata, r);
1659 req->async_states->status=proxy_cache_info(io, f->metadata, NULL);
1661 return req->async_states->status;
1665 query info on a open file
1667 static NTSTATUS proxy_qfileinfo(struct ntvfs_module_context *ntvfs,
1668 struct ntvfs_request *req, union smb_fileinfo *io)
1670 struct proxy_private *private = ntvfs->private_data;
1671 struct smbcli_request *c_req;
1672 struct proxy_file *f;
1673 bool valid=false;
1674 NTSTATUS status;
1676 SETUP_PID;
1678 SETUP_FILE_HERE(f);
1680 /* upgrade the request */
1681 switch(io->generic.level) {
1682 case RAW_FILEINFO_STANDARD_INFO:
1683 case RAW_FILEINFO_STANDARD_INFORMATION:
1684 case RAW_FILEINFO_BASIC_INFORMATION: /* we get this on file open */
1685 case RAW_FILEINFO_ALL_INFO:
1686 case RAW_FILEINFO_COMPRESSION_INFO:
1687 case RAW_FILEINFO_INTERNAL_INFORMATION:
1688 case RAW_FILEINFO_ACCESS_INFORMATION:
1689 case RAW_FILEINFO_POSITION_INFORMATION:
1690 case RAW_FILEINFO_MODE_INFORMATION:
1691 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1692 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1693 case RAW_FILEINFO_STREAM_INFO:
1694 case RAW_FILEINFO_STREAM_INFORMATION:
1695 case RAW_FILEINFO_EA_INFO:
1696 case RAW_FILEINFO_EA_INFORMATION:
1697 DEBUG(5,("%s: oplock is %d\n",__FUNCTION__, f->oplock));
1698 if (f->oplock) {
1699 DEBUG(5,("%s: %p Using cached metadata %x (fnum=%d)\n",__FUNCTION__, f, f->metadata->valid, f->fnum));
1700 status=proxy_cache_info(io, f->metadata, &valid);
1701 if (valid) return status;
1702 DEBUG(5,("%s: But cached metadata not valid :-(\n",__FUNCTION__));
1704 if (private->enabled_cache_info && PROXY_REMOTE_SERVER(private)) {
1705 DEBUG(5,("%s: promoting request to proxy\n",__FUNCTION__));
1706 struct proxy_GetInfo *r=talloc_zero(req, struct proxy_GetInfo);
1707 NT_STATUS_HAVE_NO_MEMORY(r);
1708 r->in.count=1;
1709 r->in.info_tags=talloc_zero_array(req, struct info_tags, r->in.count);
1710 r->in.info_tags[0].tag_type=TAG_TYPE_FILE_INFO;
1711 r->in.info_tags[0].info_tag.fnum=io->generic.in.file.fnum;
1712 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
1713 /* the callback handler will populate the cache and respond from the cache */
1714 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_qfileinfo, NT_STATUS_INTERNAL_ERROR);
1716 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1717 DEBUG(5,("%s Sync waiting promotion\n",__FUNCTION__));
1718 return sync_chain_handler(c_req);
1719 } else {
1720 DEBUG(5,("%s Async waiting promotion\n",__FUNCTION__));
1721 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1722 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1723 return NT_STATUS_OK;
1728 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1729 return smb_raw_fileinfo(private->tree, req, io);
1732 c_req = smb_raw_fileinfo_send(private->tree, io);
1734 ASYNC_RECV_TAIL(io, async_qfileinfo);
1738 set info on a pathname
1740 static NTSTATUS proxy_setpathinfo(struct ntvfs_module_context *ntvfs,
1741 struct ntvfs_request *req, union smb_setfileinfo *st)
1743 struct proxy_private *private = ntvfs->private_data;
1744 struct smbcli_request *c_req;
1746 SETUP_PID;
1748 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1749 return smb_raw_setpathinfo(private->tree, st);
1752 c_req = smb_raw_setpathinfo_send(private->tree, st);
1754 SIMPLE_ASYNC_TAIL;
1759 a handler for async open replies
1761 static void async_open(struct smbcli_request *c_req)
1763 struct async_info *async = c_req->async.private;
1764 struct proxy_private *proxy = async->proxy;
1765 struct ntvfs_request *req = async->req;
1766 struct proxy_file *f = async->f;
1767 union smb_open *io = async->parms;
1768 union smb_handle *file;
1770 talloc_free(async);
1771 req->async_states->status = smb_raw_open_recv(c_req, req, io);
1772 SMB_OPEN_OUT_FILE(io, file);
1773 f->fnum = file->fnum;
1774 file->ntvfs = NULL;
1775 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
1776 req->async_states->status = ntvfs_handle_set_backend_data(f->h, proxy->ntvfs, f);
1777 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
1778 file->ntvfs = f->h;
1779 DLIST_ADD(proxy->files, f);
1781 f->oplock=io->generic.out.oplock_level;
1783 LOAD_CACHE_FILE_DATA (f->metadata->info_data, io->generic.out);
1784 f->metadata->valid |= valid_RAW_FILEINFO_BASIC_INFORMATION;
1785 DEBUG(5,("**** METADATA VALID %p %x LEN=%lld\n",f,f->metadata->valid,(long long)f->metadata->info_data.size));
1787 if (proxy->cache_enabled) {
1788 struct search_cache_item *item=NULL;
1789 struct search_cache *s=proxy->search_caches;
1790 /* If we are still monitoring the file for changes we can
1791 retain the previous cache state, [if it is more recent that the monitor]! */
1792 /* yeah yeah what if there is more than one.... :-( */
1793 if (! (io->generic.level == RAW_OPEN_NTCREATEX &&
1794 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID) &&
1795 find_search_cache_item(SMB_OPEN_IN_FILE(io), &s, &item) && item->cache) {
1796 DEBUG(5,("%s: Using cached file cache\n",__LOCATION__));
1797 f->cache=talloc_reference(f, item->cache);
1798 cache_beopen(f->cache);
1799 if (item->metadata) {
1800 *(f->metadata)=*(item->metadata);
1801 f->metadata->info_data.fname.s=talloc_strdup(f, item->metadata->info_data.fname.s);
1802 f->metadata->info_data.fname.count=item->metadata->info_data.fname.count;
1804 f->metadata->info_data.streams=talloc_zero_array(f, struct info_stream, f->metadata->info_data.num_streams);
1805 if (f->metadata->info_data.streams) {
1806 int c;
1807 for(c=0; c < f->metadata->info_data.num_streams; c++) {
1808 f->metadata->info_data.streams[c].size = item->metadata->info_data.streams[c].size;
1809 f->metadata->info_data.streams[c].alloc_size = item->metadata->info_data.streams[c].alloc_size;
1810 f->metadata->info_data.streams[c].stream_name.s= talloc_strdup(f, item->metadata->info_data.streams[c].stream_name.s);
1811 f->metadata->info_data.streams[c].stream_name.count=item->metadata->info_data.streams[c].stream_name.count;
1814 f->metadata->count=1;
1816 } else {
1817 f->cache=cache_open(proxy->cache, f, io, f->oplock, proxy->cache_readahead);
1818 if (proxy->fake_valid) {
1819 cache_handle_validated(f, cache_handle_len(f));
1821 if (! PROXY_REMOTE_SERVER(proxy)) cache_handle_novalidate(f);
1822 if (item) {
1823 item->cache = talloc_reference(item, f->cache);
1824 item->metadata=talloc_reference(item, f->metadata);
1825 DEBUG(5,("%s: Caching file cache for later\n",__LOCATION__));
1826 } else {
1827 DEBUG(5,("%s: NOT Caching file cache for later\n",__LOCATION__));
1832 failed:
1833 req->async_states->send_fn(req);
1837 open a file
1839 static NTSTATUS proxy_open(struct ntvfs_module_context *ntvfs,
1840 struct ntvfs_request *req, union smb_open *io)
1842 struct proxy_private *private = ntvfs->private_data;
1843 struct smbcli_request *c_req;
1844 struct ntvfs_handle *h;
1845 struct proxy_file *f, *clone;
1846 NTSTATUS status;
1847 void *filename;
1848 int filename_size;
1849 uint16_t fnum;
1851 SETUP_PID;
1853 if (io->generic.level != RAW_OPEN_GENERIC &&
1854 private->map_generic) {
1855 return ntvfs_map_open(ntvfs, req, io);
1858 status = ntvfs_handle_new(ntvfs, req, &h);
1859 #warning should we free this handle if the open fails?
1860 NT_STATUS_NOT_OK_RETURN(status);
1862 f = talloc_zero(h, struct proxy_file);
1863 NT_STATUS_HAVE_NO_MEMORY(f);
1864 f->proxy=private;
1866 /* If the file is being opened read only and we already have a read-only
1867 handle for this file, then just clone and ref-count the handle */
1868 /* First calculate the filename key */
1869 if (io->generic.level == RAW_OPEN_NTCREATEX &&
1870 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID) {
1871 filename_size=sizeof(uint64_t);
1872 filename=io->generic.in.fname;
1873 } else {
1874 filename=SMB_OPEN_IN_FILE(io);
1875 filename_size=strlen(filename)+1;
1877 f->filename=talloc_memdup(f, filename, filename_size);
1878 f->filename_size=filename_size;
1879 f->h = h;
1880 f->can_clone= (io->generic.in.access_mask & NTCREATEX_SHARE_ACCESS_MASK) == NTCREATEX_SHARE_ACCESS_READ &&
1881 (io->generic.in.impersonation == NTCREATEX_IMPERSONATION_IMPERSONATION) &&
1882 (io->generic.in.create_options & NTCREATEX_OPTIONS_DIRECTORY) == 0 &&
1883 (io->generic.in.open_disposition != NTCREATEX_DISP_CREATE) &&
1884 (io->generic.in.open_disposition != NTCREATEX_DISP_SUPERSEDE);
1885 /* see if we have a matching open file */
1886 clone=NULL;
1887 if (f->can_clone) for (clone=private->files; clone; clone=clone->next) {
1888 if (clone->can_clone && filename_size == clone->filename_size &&
1889 memcmp(filename, clone->filename, filename_size)==0) {
1890 break;
1894 /* if clone is not null, then we found a match */
1895 if (private->enabled_open_clone && clone) {
1896 union smb_handle *file;
1898 DEBUG(5,("%s: clone handle %d\n",__FUNCTION__,clone->fnum));
1899 SMB_OPEN_OUT_FILE(io, file);
1900 f->fnum = clone->fnum;
1901 file->ntvfs = NULL;
1902 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
1903 NT_STATUS_NOT_OK_RETURN(status);
1904 file->ntvfs = f->h;
1905 DLIST_ADD(private->files, f);
1906 /* but be sure to share the same metadata cache */
1907 f->metadata=talloc_reference(f, clone->metadata);
1908 f->metadata->count++;
1909 f->oplock=clone->oplock;
1910 f->cache=talloc_reference(f, clone->cache);
1911 /* We don't need to reduce the oplocks for both files if we are read-only */
1912 /* if (clone->oplock==EXCLUSIVE_OPLOCK_RETURN ||
1913 clone->oplock==BATCH_OPLOCK_RETURN) {
1914 DEBUG(5,("%s: Breaking clone oplock from %d\n",__LOCATION__, clone->oplock));
1915 clone->oplock==LEVEL_II_OPLOCK_RETURN;
1916 status = ntvfs_send_oplock_break(private->ntvfs, clone->h, OPLOCK_BREAK_TO_LEVEL_II);
1917 //if (!NT_STATUS_IS_OK(status)) result=false;
1918 } else if (clone->oplock==LEVEL_II_OPLOCK_RETURN) {
1919 DEBUG(5,("%s: Breaking clone oplock from %d, cache no longer valid\n",__LOCATION__, clone->oplock));
1920 cache_handle_stale(f);
1921 clone->oplock=NO_OPLOCK_RETURN;
1922 status = ntvfs_send_oplock_break(private->ntvfs, clone->h, OPLOCK_BREAK_TO_NONE);
1923 //if (!NT_STATUS_IS_OK(status)) result=false;
1926 f->oplock=clone->oplock;
1927 /* and fake the rest of the response struct */
1928 io->generic.out.oplock_level=f->oplock;
1929 io->generic.out.create_action=NTCREATEX_ACTION_EXISTED;
1930 io->generic.out.create_time=f->metadata->info_data.create_time;
1931 io->generic.out.access_time=f->metadata->info_data.access_time;
1932 io->generic.out.write_time=f->metadata->info_data.write_time;
1933 io->generic.out.change_time=f->metadata->info_data.change_time;
1934 io->generic.out.attrib=f->metadata->info_data.attrib;
1935 io->generic.out.alloc_size=f->metadata->info_data.alloc_size;
1936 io->generic.out.size=f->metadata->info_data.size;
1937 io->generic.out.file_type=f->metadata->info_data.file_type;
1938 io->generic.out.ipc_state=f->metadata->info_data.ipc_state;
1939 io->generic.out.is_directory=f->metadata->info_data.is_directory;
1940 /* optional return values matching SMB2 tagged
1941 values in the call */
1942 //io->generic.out.maximal_access;
1943 return NT_STATUS_OK;
1945 f->metadata=talloc_zero(f, struct file_metadata);
1946 NT_STATUS_HAVE_NO_MEMORY(f->metadata);
1947 f->metadata->count=1;
1949 /* if oplocks aren't requested, optionally override and request them */
1950 if (! (io->generic.in.flags & (OPENX_FLAGS_REQUEST_OPLOCK | OPENX_FLAGS_REQUEST_BATCH_OPLOCK))
1951 && private->fake_oplock) {
1952 io->generic.in.flags |= OPENX_FLAGS_REQUEST_OPLOCK | OPENX_FLAGS_REQUEST_BATCH_OPLOCK;
1955 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1956 union smb_handle *file;
1958 status = smb_raw_open(private->tree, req, io);
1959 NT_STATUS_NOT_OK_RETURN(status);
1961 SMB_OPEN_OUT_FILE(io, file);
1962 f->fnum = file->fnum;
1963 file->ntvfs = NULL;
1964 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
1965 NT_STATUS_NOT_OK_RETURN(status);
1966 file->ntvfs = f->h;
1967 DLIST_ADD(private->files, f);
1969 f->oplock=io->generic.out.oplock_level;
1971 LOAD_CACHE_FILE_DATA (f->metadata->info_data, io->generic.out);
1972 DEBUG(5,("**** METADATA VALID %p %x LEN=%lld\n",f,f->metadata->valid,(long long)f->metadata->info_data.size));
1973 f->metadata->valid |= valid_RAW_FILEINFO_BASIC_INFORMATION;
1975 if (private->cache_enabled) {
1976 f->cache=cache_open(private->cache, f, io, f->oplock, private->cache_readahead);
1977 if (private->fake_valid) {
1978 cache_handle_validated(f, cache_handle_len(f));
1980 if (! PROXY_REMOTE_SERVER(private)) cache_handle_novalidate(f);
1983 return NT_STATUS_OK;
1986 c_req = smb_raw_open_send(private->tree, io);
1988 ASYNC_RECV_TAIL_F(io, async_open, f);
1992 create a directory
1994 static NTSTATUS proxy_mkdir(struct ntvfs_module_context *ntvfs,
1995 struct ntvfs_request *req, union smb_mkdir *md)
1997 struct proxy_private *private = ntvfs->private_data;
1998 struct smbcli_request *c_req;
2000 SETUP_PID;
2002 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2003 return smb_raw_mkdir(private->tree, md);
2006 c_req = smb_raw_mkdir_send(private->tree, md);
2008 SIMPLE_ASYNC_TAIL;
2012 remove a directory
2014 static NTSTATUS proxy_rmdir(struct ntvfs_module_context *ntvfs,
2015 struct ntvfs_request *req, struct smb_rmdir *rd)
2017 struct proxy_private *private = ntvfs->private_data;
2018 struct smbcli_request *c_req;
2020 SETUP_PID;
2022 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2023 return smb_raw_rmdir(private->tree, rd);
2025 c_req = smb_raw_rmdir_send(private->tree, rd);
2027 SIMPLE_ASYNC_TAIL;
2031 rename a set of files
2033 static NTSTATUS proxy_rename(struct ntvfs_module_context *ntvfs,
2034 struct ntvfs_request *req, union smb_rename *ren)
2036 struct proxy_private *private = ntvfs->private_data;
2037 struct smbcli_request *c_req;
2039 SETUP_PID;
2041 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2042 return smb_raw_rename(private->tree, ren);
2045 c_req = smb_raw_rename_send(private->tree, ren);
2047 SIMPLE_ASYNC_TAIL;
2051 copy a set of files
2053 static NTSTATUS proxy_copy(struct ntvfs_module_context *ntvfs,
2054 struct ntvfs_request *req, struct smb_copy *cp)
2056 return NT_STATUS_NOT_SUPPORTED;
2059 /* we only define this seperately so we can easily spot read calls in
2060 pending based on ( c_req->private.fn == async_read_handler ) */
2061 static void async_read_handler(struct smbcli_request *c_req)
2063 async_chain_handler(c_req);
2066 NTSTATUS async_readahead_dec(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2068 struct proxy_private *private = async->proxy;
2069 struct smbcli_request *c_req = async->c_req;
2070 struct proxy_file *f = async->f;
2071 union smb_read *io = async->parms;
2073 /* if request is not already received by a chained handler, read it */
2074 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
2076 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
2077 f->readahead_pending, private->readahead_spare));
2079 f->readahead_pending--;
2080 private->readahead_spare++;
2082 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
2083 f->readahead_pending, private->readahead_spare));
2085 return status;
2089 a handler for async read replies - speculative read-aheads.
2090 It merely saves in the cache. The async chain handler will call send_fn if
2091 there is one, or if sync_chain_handler is used the send_fn is called by
2092 the ntvfs back end.
2094 NTSTATUS async_read_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2096 struct smbcli_request *c_req = async->c_req;
2097 struct proxy_file *f = async->f;
2098 union smb_read *io = async->parms;
2100 /* if request is not already received by a chained handler, read it */
2101 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
2103 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
2104 get_friendly_nt_error_msg(status)));
2106 NT_STATUS_NOT_OK_RETURN(status);
2108 /* if it was a validate read we don't to save anything unless it failed.
2109 Until we use Proxy_read structs we can't tell, so guess */
2110 if (io->generic.out.nread == io->generic.in.maxcnt &&
2111 io->generic.in.mincnt < io->generic.in.maxcnt) {
2112 /* looks like a validate read, just move the validate pointer, the
2113 original read-request has already been satisfied from cache */
2114 DEBUG(3,("%s megavalidate suceeded, validate to %lld\n",__FUNCTION__,
2115 io->generic.in.offset + io->generic.out.nread));
2116 cache_handle_validated(f, io->generic.in.offset + io->generic.out.nread);
2117 } else {
2118 DEBUG(5,("Not a mega-validate, save %d in cache\n",io->generic.out.nread));
2119 cache_handle_save(f, io->generic.out.data,
2120 io->generic.out.nread,
2121 io->generic.in.offset);
2124 DEBUG(3,("%s finished %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2125 return status;
2128 /* handler for fragmented reads */
2129 NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2131 struct proxy_private *private = async->proxy;
2132 struct smbcli_request *c_req = async->c_req;
2133 struct ntvfs_request *req = async->req;
2134 struct proxy_file *f = async->f;
2135 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
2136 /* this is the io against which the fragment is to be applied */
2137 union smb_read *io = talloc_get_type_abort(io1, union smb_read);
2138 /* this is the io for the read that issued the callback */
2139 union smb_read *io_frag = fragment->io_frag; /* async->parms; */
2140 struct async_read_fragments* fragments=fragment->fragments;
2142 /* if request is not already received by a chained handler, read it */
2143 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
2144 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
2146 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
2147 get_friendly_nt_error_msg(status)));
2149 fragment->status = status;
2151 /* remove fragment from fragments */
2152 DLIST_REMOVE(fragments->fragments, fragment);
2154 #warning maybe read requests beyond the short read won't return NT_STATUS_OK with nread=0
2155 /* in which case if we will want to collate all responses and return a valid read
2156 for the leading NT_STATUS_OK fragments */
2158 /* did this one fail, inducing a general fragments failure? */
2159 if (!NT_STATUS_IS_OK(fragment->status)) {
2160 /* preserve the status of the fragment with the smallest offset
2161 when we can work out how */
2162 if (NT_STATUS_IS_OK(fragments->status)) {
2163 fragments->status=fragment->status;
2166 cache_handle_novalidate(f);
2167 DEBUG(5,("** Devalidated proxy due to read failure\n"));
2168 } else {
2169 /* No fragments have yet failed, keep collecting responses */
2170 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
2171 /* Find memcpy window, copy data from the io_frag to the io */
2172 off_t start_offset=MAX(io_frag->generic.in.offset, io->generic.in.offset);
2173 /* used to use mincnt */
2174 off_t io_extent=io->generic.in.offset + io->generic.in.maxcnt;
2175 off_t end_offset=MIN(io_extent, extent);
2176 /* ASSERT(start_offset <= end_offset) */
2177 /* ASSERT(start_offset <= io_extent) */
2178 if (start_offset >= io_extent) {
2179 DEBUG(3,("useless read-ahead tagged on to: %s",__LOCATION__));
2180 } else {
2181 uint8_t* dst=io->generic.out.data+(start_offset - io->generic.in.offset);
2182 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
2183 /* src == dst in cases where we did not latch onto someone elses
2184 read, but are handling our own */
2185 if (src != dst)
2186 memcpy(dst, src, end_offset - start_offset);
2189 /* There should be a better way to detect, but it needs the proxy rpc struct
2190 not ths smb_read struct */
2191 if (io_frag->generic.out.nread < io_frag->generic.in.maxcnt) {
2192 DEBUG(5,("\n** Devalidated proxy due to small read: %lld min=%lld, max=%lld\n",
2193 (long long) io_frag->generic.out.nread,
2194 (long long) io_frag->generic.in.mincnt,
2195 (long long) io_frag->generic.in.maxcnt));
2196 cache_handle_novalidate(f);
2199 /* We broke up the original read. If not enough of this sub-read has
2200 been read, and then some of then next block, it could leave holes!
2201 We will only acknowledge up to the first partial read, and treat
2202 it as a small read. If server can return NT_STATUS_OK for a partial
2203 read so can we, so we preserve the response.
2204 "enough" is all of it (maxcnt), except on the last block, when it has to
2205 be enough to fill io->generic.in.mincnt. We know it is the last block
2206 if nread is small but we could fill io->generic.in.mincnt */
2207 if (io_frag->generic.out.nread < io_frag->generic.in.mincnt &&
2208 end_offset < io->generic.in.offset + io->generic.in.mincnt) {
2209 DEBUG(4,("Fragmented read only partially successful\n"));
2211 /* Shrink the master nread (or grow to this size if we are first partial */
2212 if (! fragments->partial ||
2213 (io->generic.in.offset + io->generic.out.nread) > extent) {
2214 io->generic.out.nread = extent - io->generic.in.offset;
2217 /* stop any further successes from extending the partial read */
2218 fragments->partial=true;
2219 } else {
2220 /* only grow the master nwritten if we haven't logged a partial write */
2221 if (! fragments->partial &&
2222 (io->generic.in.offset + io->generic.out.nread) < extent ) {
2223 io->generic.out.nread = MIN(io->generic.in.maxcnt, extent - io->generic.in.offset);
2228 /* Was it the last fragment, or do we know enought to send a response? */
2229 if (! fragments->fragments) {
2230 DEBUG(5,("Async read re-fragmented with %d of %d %s\n",
2231 io->generic.out.nread, io->generic.in.mincnt,
2232 get_friendly_nt_error_msg(fragments->status)));
2233 if (fragments->async) {
2234 req->async_states->status=fragments->status;
2235 DEBUG(5,("Fragments async response sending\n"));
2236 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
2237 /* esp. as they may be attached to by other reads. Maybe attachees should be taking reference, but how will they
2238 know the top level they need to take reference too.. */
2239 #warning should really queue a sender here, not call it */
2240 req->async_states->send_fn(req);
2241 DEBUG(5,("Async response sent\n"));
2242 } else {
2243 DEBUG(5,("Fragments SYNC return\n"));
2247 /* because a c_req may be shared by many req, chained handlers must return
2248 a status pertaining to the general validity of this specific c_req, not
2249 to their own private processing of the c_req for the benefit of their req
2250 which is returned in fragments->status
2252 return status;
2255 /* Issue read-ahead X bytes where X is the window size calculation based on
2256 server_latency * server_session_bandwidth
2257 where latency is the idle (link) latency and bandwidth is less than or equal_to
2258 to actual bandwidth available to the server.
2259 Read-ahead should honour locked areas in whatever way is neccessary (who knows?)
2260 read_ahead is defined here and not in the cache engine because it requires too
2261 much knowledge of private structures
2263 /* The concept is buggy unless we can tell the next proxy that these are
2264 read-aheads, otherwise chained proxy setups will each read-ahead of the
2265 read-ahead which can put a larger load on the final server.
2266 Also we probably need to distinguish between
2267 * cache-less read-ahead
2268 * cache-revalidating read-ahead
2270 NTSTATUS read_ahead(struct proxy_file *f, struct ntvfs_module_context *ntvfs,
2271 union smb_read *io, ssize_t as_read)
2273 struct proxy_private *private = ntvfs->private_data;
2274 struct smbcli_tree *tree = private->tree;
2275 struct cache_file_entry *cache;
2276 off_t next_position; /* this read offset+length+window */
2277 off_t end_position; /* position we read-ahead to */
2278 off_t cache_populated;
2279 off_t read_position, new_extent;
2281 if (! PROXY_REMOTE_SERVER(private)) return NT_STATUS_UNSUCCESSFUL;
2282 DEBUG(5,("A\n"));
2283 if (private->cache_readahead==0 || ! private->cache_enabled || ! f->cache) return NT_STATUS_UNSUCCESSFUL;
2284 DEBUG(5,("B\n"));
2285 cache=talloc_get_type_abort(f->cache, struct cache_file_entry);
2286 DEBUG(5,("C\n"));
2287 /* don't read-ahead if we are in bulk validate mode */
2288 if (cache->status & CACHE_VALIDATE) return NT_STATUS_UNSUCCESSFUL;
2289 DEBUG(5,("D\n"));
2290 /* if we can't trust what we read-ahead anyway then don't bother although
2291 * if delta-reads are enabled we can do so in order to get something to
2292 * delta against */
2293 DEBUG(CACHE_DEBUG_LEVEL,("DOING Asking read-aheads: len %lld ra-extend %lld as-read %lld RA %d (%d)\n",
2294 (long long int)(cache_len(cache)),
2295 (long long int)(cache->readahead_extent),
2296 (long long int)(as_read),
2297 cache->readahead_window,private->cache_readahead));
2298 if (private->cache_readahead ==0 || ! (cache->status & CACHE_READ_AHEAD) ) {
2299 DEBUG(CACHE_DEBUG_LEVEL,("FAILED Asking read-aheads: Can't read-ahead as no read-ahead on this file: %x\n",
2300 cache->status));
2301 return NT_STATUS_UNSUCCESSFUL;
2304 /* as_read is the mincnt bytes of a request being made or the
2305 out.nread of completed sync requests
2306 Here we presume that as_read bytes WILL be read. If there is a cache-ahead like ours,
2307 then this may often NOT be the case if readahead_window < requestsize; so we will
2308 get a small read, leaving a hole in the cache, and as we don't yet handle sparse caches,
2309 all future read-ahead will be wasted, so we need to adjust the read-ahead handler to handle
2310 this and have failed sparse writes adjust the cache->readahead_extent back to actual size */
2312 /* predict the file pointers next position */
2313 next_position=io->generic.in.offset + as_read;
2315 /* if we know how big the file is, don't read beyond */
2316 if (f->oplock && next_position > f->metadata->info_data.size) {
2317 next_position = f->metadata->info_data.size;
2319 DEBUG(5,("Next position: %lld (%lld + %lld)\n",
2320 (long long int)next_position,
2321 (long long int)io->generic.in.offset,
2322 (long long int)as_read));
2323 /* calculate the limit of the validated or requested cache */
2324 cache_populated=MAX(cache->validated_extent, cache->readahead_extent);
2326 /* will the new read take us beyond the current extent without gaps? */
2327 if (cache_populated < io->generic.in.offset) {
2328 /* this read-ahead is a read-behind-pointer */
2329 new_extent=cache_populated;
2330 } else {
2331 new_extent=MAX(next_position, cache_populated);
2334 /* as far as we can tell new_extent is the smallest offset that doesn't
2335 have a pending read request on. Of course if we got a short read then
2336 we will have a cache-gap which we can't handle and need to read from
2337 a shrunk readahead_extent, which we don't currently handle */
2338 read_position=new_extent;
2340 /* of course if we know how big the remote file is we should limit at that */
2341 /* we should also mark-out which read-ahead requests are pending so that we
2342 * don't repeat them while they are in-transit. */
2343 /* we can't really use next_position until we can have caches with holes
2344 UNLESS next_position < new_extent, because a next_position well before
2345 new_extent is no reason to extend it further, we only want to extended
2346 with read-aheads if we have cause to suppose the read-ahead data will
2347 be wanted, i.e. the next_position is near new_extent.
2348 So we can't justify reading beyond window+next_position, but if
2349 next_position is leaving gaps, we use new_extent instead */
2350 end_position=MIN(new_extent, next_position) + cache->readahead_window;
2351 if (f->oplock) {
2352 end_position=MIN(end_position, f->metadata->info_data.size);
2354 DEBUG(5,("** Read-ahead loop %lld < %lld window=%d, end=%lld, quota: %d\n",
2355 (long long int)read_position,
2356 (long long int)(next_position + cache->readahead_window),
2357 cache->readahead_window,
2358 (long long int)end_position,
2359 private->readahead_spare));
2360 /* do we even need to read? */
2361 if (! (read_position < end_position)) return NT_STATUS_OK;
2363 /* readahead_spare is for the whole session (mid/tid?) and may need sharing
2364 out over files and other tree-connects or something */
2365 while (read_position < end_position &&
2366 private->readahead_spare > 0) {
2367 struct smbcli_request *c_req = NULL;
2368 ssize_t read_remaining = end_position - read_position;
2369 ssize_t read_block = MIN(private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32),
2370 MIN(read_remaining, private->cache_readaheadblock));
2371 void *req = NULL; /* for the ASYNC_REC_TAIL_F_ORPHAN macro */
2372 uint8_t* data;
2373 union smb_read *io_copy=talloc_memdup_type(NULL, io, union smb_read);
2375 if (! io_copy)
2376 return NT_STATUS_NO_MEMORY;
2378 #warning we are ignoring read_for_execute as far as the cache goes
2379 io_copy->generic.in.read_for_execute=io->readx.in.read_for_execute;
2380 io_copy->generic.in.offset=read_position;
2381 io_copy->generic.in.mincnt=read_block;
2382 io_copy->generic.in.maxcnt=read_block;
2383 /* what is generic.in.remaining for? */
2384 io_copy->generic.in.remaining = MIN(65535,read_remaining);
2385 io_copy->generic.out.nread=0;
2387 #warning someone must own io_copy, tree, maybe?
2388 data=talloc_zero_size(io_copy, io_copy->generic.in.maxcnt);
2389 DEBUG(5,("Talloc read-ahead buffer %p size %d\n",data, io_copy->generic.in.maxcnt));
2390 if (! data) {
2391 talloc_free(io_copy);
2392 return NT_STATUS_NO_MEMORY;
2394 io_copy->generic.out.data=data;
2396 /* are we able to pull anything from the cache to validate this read-ahead?
2397 NOTE: there is no point in reading ahead merely to re-validate the
2398 cache if we don't have oplocks and can't save it....
2399 ... or maybe there is if we think a read will come that can be matched
2400 up to this reponse while it is still on the wire */
2401 #warning so we need to distinguish between pipe-line read-ahead and revalidation
2402 if (/*(cache->status & CACHE_READ)!=0 && */
2403 cache_len(cache) >
2404 (io_copy->generic.in.offset + io_copy->generic.in.mincnt) &&
2405 cache->validated_extent <
2406 (io_copy->generic.in.offset + io_copy->generic.in.maxcnt)) {
2407 ssize_t pre_fill;
2409 pre_fill = cache_raw_read(cache, data,
2410 io_copy->generic.in.offset,
2411 io_copy->generic.in.maxcnt);
2412 DEBUG(5,("Data read into %p %d\n",data, pre_fill));
2413 if (pre_fill > 0 && pre_fill >= io_copy->generic.in.mincnt) {
2414 io_copy->generic.out.nread=pre_fill;
2415 read_block=pre_fill;
2419 c_req = proxy_smb_raw_read_send(ntvfs, io_copy, f, NULL);
2421 if (c_req) {
2422 private->readahead_spare--;
2423 f->readahead_pending++;
2424 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead level %d request %p offset=%d size=%d\n",io_copy->generic.level,c_req,(int)read_position,(int)read_block));
2425 if (cache->readahead_extent < read_position+read_block)
2426 cache->readahead_extent=read_position+read_block;
2427 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
2428 /* so we can decrease read-ahead counter for this session */
2429 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_readahead_dec, NT_STATUS_INTERNAL_ERROR);
2430 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io_copy, async_read_handler);
2432 /* Make these be owned by the async struct so they are freed when the callback ends or is cancelled */
2433 talloc_steal(c_req->async.private, c_req);
2434 talloc_steal(c_req->async.private, io_copy);
2435 read_position+=read_block;
2436 } else {
2437 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead request FAILED offset=%d size=%d\n",(int)read_position,(int)read_block));
2438 talloc_free(io_copy);
2439 break;
2443 DEBUG(CACHE_DEBUG_LEVEL,("DONE: Asking read-aheads\n"));
2444 return NT_STATUS_OK;
2447 struct proxy_validate_parts_parts {
2448 struct proxy_Read* r;
2449 struct ntvfs_request *req;
2450 struct proxy_file *f;
2451 struct async_read_fragments *fragments;
2452 off_t offset;
2453 ssize_t remaining;
2454 bool complete;
2455 declare_checksum(digest);
2456 struct MD5Context context;
2459 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts);
2460 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status);
2461 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
2462 struct proxy_validate_parts_parts *parts);
2464 /* this will be the new struct proxy_Read based read function, for now
2465 it just deals with non-cached based validate to a regular server */
2466 static NTSTATUS proxy_validate(struct ntvfs_module_context *ntvfs,
2467 struct ntvfs_request *req,
2468 struct proxy_Read *r,
2469 struct proxy_file *f)
2471 struct proxy_private *private = ntvfs->private_data;
2472 struct proxy_validate_parts_parts *parts;
2473 struct async_read_fragments *fragments;
2474 NTSTATUS status;
2476 if (!f) return NT_STATUS_INVALID_HANDLE;
2478 DEBUG(5,("%s: fnum=%d **** %lld bytes \n\n\n\n",__LOCATION__,f->fnum,(long long int)r->in.maxcnt));
2480 parts = talloc_zero(req, struct proxy_validate_parts_parts);
2481 DEBUG(5,("%s: parts=%p\n",__FUNCTION__,parts));
2482 NT_STATUS_HAVE_NO_MEMORY(parts);
2484 fragments = talloc_zero(parts, struct async_read_fragments);
2485 NT_STATUS_HAVE_NO_MEMORY(fragments);
2487 parts->fragments=fragments;
2489 parts->r=r;
2490 parts->f=f;
2491 parts->req=req;
2492 /* processed offset */
2493 parts->offset=r->in.offset;
2494 parts->remaining=r->in.maxcnt;
2495 fragments->async=true;
2497 MD5Init (&parts->context);
2499 /* start a read-loop which will continue in the callback until it is
2500 all done */
2501 status=proxy_validate_parts(ntvfs, parts);
2502 if (parts->complete) {
2503 /* Make sure we are not async */
2504 DEBUG(5,("%s: completed EARLY\n",__FUNCTION__));
2505 return proxy_validate_complete(parts);
2508 /* Assert if status!=NT_STATUS_OK then parts->complete==true */
2509 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
2510 DEBUG(5,("%s: returning ASYNC\n",__FUNCTION__));
2511 return status;
2514 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts)
2516 NTSTATUS status;
2517 struct proxy_Read* r=parts->r;
2518 struct proxy_file *f=parts->f;
2520 DEBUG(5,("%s: %d/%d bytes \n\n\n\n",__LOCATION__,r->out.nread,r->in.maxcnt));
2522 MD5Final(parts->digest, &parts->context);
2524 status = parts->fragments->status;
2525 r->out.result = status;
2526 r->out.response.generic.count=r->out.nread;
2527 r->out.cache_name.count=0;
2529 DEBUG(5,("%s: %s nread=%d\n",__FUNCTION__, get_friendly_nt_error_msg (status),
2530 r->out.response.generic.count));
2532 DEBUG(5,("Anticipated validated digest for size: %lld\n", (long long) r->in.maxcnt));
2533 dump_data (5, r->in.digest.digest, sizeof(parts->digest));
2534 DEBUG(5,("read digest for size %lld\n",(long long) parts->offset));
2535 dump_data (5, parts->digest, sizeof(parts->digest));
2537 if (NT_STATUS_IS_OK(status) &&
2538 (memcmp(parts->digest, r->in.digest.digest, sizeof(parts->digest))==0)) {
2539 r->out.flags = PROXY_USE_CACHE | PROXY_VALIDATE;
2540 DEBUG(5,("======= VALIDATED FINE \n\n\n"));
2541 } else {
2542 if (r->in.flags & PROXY_USE_ZLIB) {
2543 ssize_t size = r->out.response.generic.count;
2544 DEBUG(5,("======= VALIDATED WRONG; compress size %d \n\n\n",size));
2545 if (compress_block(r->out.response.generic.data, &size) ) {
2546 r->out.flags|=PROXY_USE_ZLIB;
2547 r->out.response.compress.count=size;
2548 r->out.response.compress.data=r->out.response.generic.data;
2549 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
2550 __LOCATION__,r->out.nread,size,size*100/r->out.nread));
2553 /* return cache filename as a ghastly hack for now */
2554 r->out.cache_name.s=f->cache->cache_name;
2555 r->out.cache_name.count=strlen(r->out.cache_name.s)+1;
2556 DEBUG(5,("%s: writing cache name: %s\n",__LOCATION__, f->cache->cache_name));
2557 /* todo: what about tiny files, buffer to small, don't validate tiny files <1K */
2560 /* assert: this must only be true if we are in a callback */
2561 if (parts->req->async_states->state & NTVFS_ASYNC_STATE_ASYNC) {
2562 /* we are async complete, we need to call the sendfn */
2563 parts->req->async_states->status=status;
2564 DEBUG(5,("Fragments async response sending\n"));
2566 parts->req->async_states->send_fn(parts->req);
2567 return NT_STATUS_OK;
2569 return status;
2572 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2574 struct smbcli_request *c_req = async->c_req;
2575 struct ntvfs_request *req = async->req;
2576 struct proxy_file *f = async->f;
2577 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
2578 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
2579 /* this is the io against which the fragment is to be applied */
2580 struct proxy_validate_parts_parts *parts = talloc_get_type_abort(io1, struct proxy_validate_parts_parts);
2581 struct proxy_Read* r=parts->r;
2582 /* this is the io for the read that issued the callback */
2583 union smb_read *io_frag = fragment->io_frag;
2584 struct async_read_fragments* fragments=fragment->fragments;
2586 /* if request is not already received by a chained handler, read it */
2587 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
2588 DEBUG(5,("%s: status %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
2589 DEBUG(5,("\n\n%s: parts=%p c_req=%p io_frag=%p read %lld\n",__LOCATION__,parts, c_req, io_frag,(long long int)io_frag->generic.out.nread));
2591 fragment->status=status;
2593 if (NT_STATUS_IS_OK(status)) {
2594 /* TODO: If we are not sequentially "next" the queue until we can do it */
2595 /* log this data in r->out.generic.data */
2596 /* Find memcpy window, copy data from the io_frag to the io */
2598 /* Also write validate to cache */
2599 if (f && f->cache) {
2600 cache_save(f->cache, io_frag->generic.out.data, io_frag->generic.out.nread, io_frag->generic.in.offset);
2603 /* extent is the last byte we (don't) read for this frag */
2604 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
2605 /* start_offset is the file offset we first care about */
2606 off_t start_offset=MAX(io_frag->generic.in.offset, r->in.offset);
2607 /* Don't want to go past mincnt cos we don't have the buffer */
2608 off_t io_extent=r->in.offset + r->in.mincnt;
2609 off_t end_offset=MIN(io_extent, extent);
2611 /* ASSERT(start_offset <= end_offset) */
2612 /* ASSERT(start_offset <= io_extent) */
2613 /* Don't copy beyond buffer */
2614 if (! (start_offset >= io_extent)) {
2615 uint8_t* dst=r->out.response.generic.data + (start_offset - r->in.offset);
2616 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
2617 /* src == dst in cases where we did not latch onto someone elses
2618 read, but are handling our own */
2619 if (src != dst)
2620 memcpy(dst, src, end_offset - start_offset);
2621 r->out.nread=end_offset - r->in.offset;
2622 DEBUG(5,("%s: nread %lld ++++++++++++++++++\n", __LOCATION__,(long long int)r->out.nread));
2625 MD5Update(&parts->context, io_frag->generic.out.data,
2626 io_frag->generic.out.nread);
2628 parts->fragments->status=status;
2629 status=proxy_validate_parts(ntvfs, parts);
2630 } else {
2631 parts->fragments->status=status;
2634 DLIST_REMOVE(fragments->fragments, fragment);
2635 /* this will free the io_frag too */
2636 talloc_free(fragment);
2638 if (parts->complete || NT_STATUS_IS_ERR(status)) {
2639 /* this will call sendfn, the chain handler won't know... but
2640 should have no more handlers queued */
2641 return proxy_validate_complete(parts);
2644 return NT_STATUS_OK;
2647 /* continue a read loop, possibly from a callback */
2648 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
2649 struct proxy_validate_parts_parts *parts)
2651 struct proxy_private *private = ntvfs->private_data;
2652 union smb_read *io_frag;
2653 struct async_read_fragment *fragment;
2654 struct smbcli_request *c_req = NULL;
2655 ssize_t size=private->tree->session->transport->negotiate.max_xmit \
2656 - (MIN_SMB_SIZE+32);
2658 /* Have we already read enough? */
2659 if (parts->offset >= (parts->r->in.offset + parts->r->in.maxcnt)) {
2660 parts->complete=true;
2661 return NT_STATUS_OK;
2664 size=MIN(size, parts->remaining);
2666 fragment=talloc_zero(parts->fragments, struct async_read_fragment);
2667 NT_STATUS_HAVE_NO_MEMORY(fragment);
2669 io_frag = talloc_zero(fragment, union smb_read);
2670 NT_STATUS_HAVE_NO_MEMORY(io_frag);
2672 io_frag->generic.out.data = talloc_size(io_frag, size);
2673 NT_STATUS_HAVE_NO_MEMORY(io_frag->generic.out.data);
2675 io_frag->generic.level = RAW_READ_GENERIC;
2676 io_frag->generic.in.file.fnum = parts->r->in.fnum;
2677 io_frag->generic.in.offset = parts->offset;
2678 io_frag->generic.in.mincnt = size;
2679 io_frag->generic.in.maxcnt = size;
2680 io_frag->generic.in.remaining = 0;
2681 #warning maybe true is more permissive?
2682 io_frag->generic.in.read_for_execute = false;
2684 DEBUG(5,("%s: issue part read offset=%lld, size=%lld,%lld\n",__LOCATION__,
2685 (long long int)io_frag->generic.in.offset,
2686 (long long int)io_frag->generic.in.mincnt,
2687 (long long int)io_frag->generic.in.maxcnt));
2689 //c_req = smb_raw_read_send(ntvfs, io_frag, parts->f, parts->r);
2690 c_req = smb_raw_read_send(private->tree, io_frag);
2691 NT_STATUS_HAVE_NO_MEMORY(c_req);
2693 parts->offset+=size;
2694 parts->remaining-=size;
2695 fragment->c_req = c_req;
2696 fragment->io_frag = io_frag;
2697 fragment->fragments=parts->fragments;
2698 DLIST_ADD(parts->fragments->fragments, fragment);
2700 { void* req=NULL;
2701 ADD_ASYNC_RECV_TAIL(c_req, parts, fragment, parts->f, async_proxy_validate_parts, NT_STATUS_INTERNAL_ERROR);
2702 ASYNC_RECV_TAIL_F_ORPHAN(io_frag, async_read_handler, parts->f, c_req->async.private, NT_STATUS_UNSUCCESSFUL);
2705 DEBUG(5,("%s: issued read parts=%p c_req=%p io_frag=%p\n",__LOCATION__,parts, c_req, io_frag));
2707 return NT_STATUS_OK;
2711 read from a file
2713 static NTSTATUS proxy_read(struct ntvfs_module_context *ntvfs,
2714 struct ntvfs_request *req, union smb_read *io)
2716 struct proxy_private *private = ntvfs->private_data;
2717 struct smbcli_request *c_req;
2718 struct proxy_file *f;
2719 struct async_read_fragments *fragments=NULL;
2720 /* how much of read-from-cache is certainly valid */
2721 ssize_t valid=0;
2722 off_t offset=io->generic.in.offset+valid;
2723 off_t limit=io->generic.in.offset+io->generic.in.mincnt;
2725 SETUP_PID;
2727 if (io->generic.level != RAW_READ_GENERIC &&
2728 private->map_generic) {
2729 return ntvfs_map_read(ntvfs, req, io);
2732 SETUP_FILE_HERE(f);
2734 DEBUG(3,("\n%s() fnum=%d offset=%lld, mincnt=%d, maxcnt=%d\n",__FUNCTION__,
2735 io->generic.in.file.fnum,
2736 io->generic.in.offset,
2737 io->generic.in.mincnt,
2738 io->generic.in.maxcnt));
2740 io->generic.out.nread=0;
2742 /* if we have oplocks and know the files size, don't even ask the server
2743 for more */
2744 if (f->oplock) {
2745 if (io->generic.in.offset >= f->metadata->info_data.size) {
2746 io->generic.in.mincnt=0;
2747 io->generic.in.maxcnt=0;
2748 io->generic.out.nread=0;
2749 DEBUG(5,("Reading beyond known length %lld; return 0\n",(long long)f->metadata->info_data.size));
2750 return NT_STATUS_OK;
2751 } else {
2752 io->generic.in.mincnt=MIN(io->generic.in.mincnt,
2753 f->metadata->info_data.size - io->generic.in.offset);
2754 io->generic.in.maxcnt=MIN(io->generic.in.maxcnt,
2755 f->metadata->info_data.size - io->generic.in.offset);
2757 DEBUG(5,("Oplock and known size, limiting read to %lld (s=%d)\n",
2758 f->metadata->info_data.size, io->generic.in.mincnt));
2762 /* attempt to read from cache. if nread becomes non-zero then we
2763 have cache to validate. Instead of returning "valid" value, cache_read
2764 should probably return an async_read_fragment structure */
2766 if (private->cache_enabled) {
2767 NTSTATUS status=cache_smb_raw_read(f->cache, ntvfs, req, io, &valid);
2769 if (NT_STATUS_IS_OK(status)) {
2770 /* if we read enough valid data, return it */
2771 if (valid > 0 && valid>=io->generic.in.mincnt) {
2772 /* valid will not be bigger than maxcnt */
2773 io->generic.out.nread=valid;
2774 DEBUG(1,("Read from cache offset=%d size=%d\n",
2775 (int)(io->generic.in.offset),
2776 (int)(io->generic.out.nread)) );
2777 return status;
2780 DEBUG(5,("Cache read status: %s\n",get_friendly_nt_error_msg (status)));
2783 fragments=talloc_zero(req, struct async_read_fragments);
2784 fragments->async=!!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC);
2785 /* See if there are pending reads that would satisfy this request
2786 We have a validated read up to io->generic.out.nread. Anything between
2787 this and mincnt MUST be read, but we could first try and attach to
2788 any pending read-ahead on the same file.
2789 If those read-aheads fail we will re-issue a regular read from the
2790 callback handler and hope it hasn't taken too long. */
2792 /* offset is the extentof the file from which we still need to find
2793 matching read-requests. */
2794 offset=io->generic.in.offset+valid;
2795 /* limit is the byte beyond the last byte for which we need a request.
2796 This used to be mincnt, but is now maxcnt to cope with validate reads.
2797 Maybe we can switch back to mincnt when proxy_read struct is used
2798 instead of smb_read.
2800 limit=io->generic.in.offset+io->generic.in.maxcnt;
2802 while (offset < limit) {
2803 /* Should look for the read-ahead with offset <= in.offset+out.nread
2804 with the longest span, but there is only likely to be one anyway so
2805 just take the first */
2806 struct async_info* pending=private->pending;
2807 union smb_read *readahead_io=NULL;
2808 DEBUG(5,("Looping reads from offset=%lld, end=%lld\n",offset,limit));
2809 while(pending) {
2810 if (pending->c_req->async.fn == async_read_handler) {
2811 struct async_info *async=talloc_get_type_abort(pending->c_req->async.private, struct async_info);
2812 readahead_io=talloc_get_type_abort(async->parms, union smb_read);
2814 if (readahead_io->generic.in.file.fnum == io->generic.in.file.fnum &&
2815 readahead_io->generic.in.offset <= offset &&
2816 readahead_io->generic.in.offset +
2817 readahead_io->generic.in.mincnt > offset) break;
2819 readahead_io=NULL;
2820 pending=pending->next;
2822 /* ASSERT(readahead_io == pending->c_req->async.params) */
2823 if (pending && readahead_io) {
2824 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
2825 fragment->fragments=fragments;
2826 fragment->io_frag=readahead_io;
2827 fragment->c_req = pending->c_req;
2828 /* we found one, so attach to it. We DO need a talloc_reference
2829 because the original send_fn might be called before ALL chained
2830 handlers, and our handler will call its own send_fn first. ugh.
2831 Maybe we need to seperate reverse-mapping callbacks with data users? */
2832 /* Note: the read-ahead io is passed as io, and our req io is
2833 in io_frag->io */
2834 //talloc_reference(req, pending->req);
2835 DEBUG(5,("Attach to read for offset=%lld length=%d\n",
2836 readahead_io->generic.in.offset,
2837 readahead_io->generic.in.mincnt));
2838 ADD_ASYNC_RECV_TAIL(pending->c_req, io, fragment, f,
2839 async_read_fragment, NT_STATUS_INTERNAL_ERROR);
2840 DEBUG(5,("Attached OK\n"));
2841 #warning we don't want to return if we fail to attach, just break
2842 DLIST_ADD(fragments->fragments, fragment);
2843 /* updated offset for which we have reads */
2844 offset=readahead_io->generic.in.offset + readahead_io->generic.in.mincnt;
2845 } else {
2846 /* there are no pending reads to fill this so issue one up to
2847 the maximum supported read size. We could see when the next
2848 pending read is (if any) and only read up till there... later...
2849 Issue a fragment request for what is left, clone io.
2850 In the case that there were no fragments this will be the orginal read
2851 but with a cloned io struct */
2852 off_t next_offset;
2853 struct proxy_Read *r=NULL; /* used only for VALIDATE promotion */
2854 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
2855 union smb_read *io_frag=talloc_memdup_type(req, io, union smb_read);
2856 ssize_t offset_inc=offset-io_frag->generic.in.offset;
2857 /* 250 is a guess at ndr rpc overheads */
2858 ssize_t readsize=MIN(PROXY_NTIOCTL_MAXDATA,
2859 private->tree->session->transport->negotiate.max_xmit) \
2860 - (MIN_SMB_SIZE+32);
2861 if (readsize > 0xFFFF) readsize = 0xFFFF; /* - (MIN_SMB_SIZE+250) ?? */
2862 readsize=MIN(limit-offset, readsize);
2864 DEBUG(5,("Issuing direct read\n"));
2865 /* reduce the cached read (if any). nread is unsigned */
2866 if (io_frag->generic.out.nread > offset_inc) {
2867 io_frag->generic.out.nread-=offset_inc;
2868 /* don't make nread buffer look too big */
2869 if (io_frag->generic.out.nread > readsize)
2870 io_frag->generic.out.nread = readsize;
2871 } else {
2872 io_frag->generic.out.nread=0;
2874 /* adjust the data pointer so we read to the right place */
2875 io_frag->generic.out.data+=offset_inc;
2876 io_frag->generic.in.offset=offset;
2877 io_frag->generic.in.maxcnt=readsize;
2878 /* we don't mind mincnt being smaller if this is the last frag,
2879 but then we can already handle it being bigger but not reached...
2880 The spell would be:
2881 MIN(io_frag->generic.in.mincnt, io_frag->generic.in.maxcnt);
2883 io_frag->generic.in.mincnt=readsize;
2884 fragment->fragments=fragments;
2885 fragment->io_frag=io_frag;
2886 #warning attach to send_fn handler
2887 /* what if someone attaches to us? Our send_fn is called from our
2888 chained handler which will be before their handler and io will
2889 already be freed. We need to keep a reference to the io and the data
2890 but we don't know where it came from in order to take a reference.
2891 We need therefore to tackle calling of send_fn AFTER all other handlers */
2893 /* Calculate next offset (in advance) */
2894 next_offset=io_frag->generic.in.offset + io_frag->generic.in.mincnt;
2896 /* if we are (going to be) the last fragment and we are in VALIDATE
2897 mode, see if we can do a bulk validate now.
2898 io->generic.in.mincnt == io->generic.in.maxcnt is to make sure we
2899 don't do a validate on a receive validate read
2901 if (private->cache_validatesize && PROXY_REMOTE_SERVER(private) &&
2902 next_offset >= limit && (f->cache && f->cache->status & CACHE_VALIDATE)) {
2903 ssize_t length=private->cache_validatesize;
2904 declare_checksum(digest);
2906 DEBUG(5,("last read, maybe mega validate: frag length %zu, offset %llu\n",
2907 length, (unsigned long long) offset));
2908 NTSTATUS status=cache_smb_raw_checksum(f->cache, offset, &length, digest);
2909 /* no point in doing it if md5'd length < current out.nread
2910 remember: out.data contains this requests cached response
2911 if validate succeeds */
2912 if (NT_STATUS_IS_OK(status) && (length > io_frag->generic.out.nread)) {
2913 /* upgrade the read, allocate the proxy_read struct here
2914 and fill in the extras, no more out-of-band stuff */
2915 DEBUG(5,("%s: Promoting to validate read: %lld\n",__FUNCTION__,(long long) length));
2916 dump_data (5, digest, sizeof(digest));
2918 r=talloc_zero(io_frag, struct proxy_Read);
2919 memcpy(r->in.digest.digest, digest, sizeof(digest));
2920 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE;
2921 io_frag->generic.in.maxcnt = length;
2922 r->in.mincnt=io_frag->generic.in.mincnt;
2923 /* the proxy send function will calculate the checksum based on *data */
2924 } else {
2925 /* try bulk read */
2926 if (f->oplock) {
2927 DEBUG(5,("%s: *** faking bulkd read\n\n",__LOCATION__));
2928 r=talloc_zero(io_frag, struct proxy_Read);
2929 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE;//| PROXY_USE_ZLIB;
2930 io_frag->generic.in.maxcnt = MIN(f->metadata->info_data.size, private->cache_validatesize);
2931 r->in.mincnt=io_frag->generic.in.maxcnt;
2932 r->in.mincnt=io_frag->generic.in.mincnt;
2934 /* not enough in cache to make it worthwhile anymore */
2935 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x len=%lld\n",
2936 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0,
2937 (unsigned long long)length));
2938 //cache_handle_novalidate(f);
2939 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x\n",
2940 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0));
2942 } else {
2943 if (f->cache && f->cache->status & CACHE_VALIDATE) {
2944 DEBUG(5,(">>>Not last frag, no validate read: %lld %lld\n",
2945 (long long) next_offset,
2946 (long long) limit));
2950 DEBUG(5,("Frag read sending offset=%lld min=%d, size=%d\n",
2951 io_frag->generic.in.offset,io_frag->generic.in.mincnt,
2952 io_frag->generic.in.maxcnt));
2953 c_req = proxy_smb_raw_read_send(ntvfs, io_frag, f, r);
2954 DEBUG(5,("Frag read sent offset=%lld size=%d MID=%d\n",
2955 io_frag->generic.in.offset,io_frag->generic.in.maxcnt,c_req->mid));
2956 fragment->c_req=c_req;
2957 DLIST_ADD(fragments->fragments, fragment);
2958 ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
2959 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_read_fragment, NT_STATUS_INTERNAL_ERROR);
2960 DEBUG(5,("Frag response chained\n"));
2961 /* normally we would only install the chain_handler if we wanted async
2962 response, but as it is the async_read_fragment handler that calls send_fn
2963 based on fragments->async, instead of async_chain_handler, we don't
2964 need to worry about this call completing async'ly while we are
2965 waiting on the other attached calls. Otherwise we would not attach
2966 the async_chain_handler (via async_read_handler) because of the wait
2967 below */
2968 { /* We don't want the chain handler calling send_fn as it is done by the fragment handler */
2969 void* req=NULL;
2970 /* call async_chain_hander not read handler so that folk can't
2971 attach to it, till we solve the problem above */
2972 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
2974 offset = next_offset;
2976 DEBUG(5,("Next fragment\n"));
2979 /* do we still need a final fragment? Issue a read */
2981 DEBUG(5,("No frags left to read\n"));
2984 /* issue new round of read-aheads */
2985 DEBUG(5,("== Read aheads asread-%d\n",io->generic.in.mincnt));
2986 if (f->cache && ! (f->cache->status & CACHE_VALIDATE)) read_ahead(f, ntvfs, io, io->generic.in.mincnt);
2987 DEBUG(5,("== Done Read aheads\n"));
2989 /* If we have fragments but we are not called async, we must sync-wait on them */
2990 /* did we map the entire request to pending reads? */
2991 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2992 struct async_read_fragment *fragment;
2993 DEBUG(5,("Sync waiting\n"));
2994 /* fragment get's free'd during the chain_handler so we start at
2995 the top each time */
2996 for (fragment = fragments->fragments; fragment; fragment = fragments->fragments) {
2997 /* Any fragments async handled while we sync-wait on one
2998 will remove themselves from the list and not get sync waited */
2999 sync_chain_handler(fragment->c_req);
3000 /* if we have a non-ok result AND we know we have all the responses
3001 up to extent, then we could quit the loop early and change the
3002 fragments->async to true so the final irrelevant responses would
3003 come async and we could send our response now - but we don't
3004 track that detail until we have cache-maps that we can use to
3005 track the responded fragments and combine responsed linear extents
3006 if (! NT_STATUS_IS_OK(fragments->status) && xxx ) */
3008 DEBUG(5,("Sync return of proxy_read: %s\n",get_friendly_nt_error_msg (fragments->status)));
3009 return fragments->status;
3012 DEBUG(5,("Async returning\n"));
3013 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
3014 return NT_STATUS_OK;
3018 a handler to de-fragment async write replies back to one request.
3019 Can cope with out-of-order async responses by waiting for all responses
3020 on an NT_STATUS_OK case so that nwritten is properly adjusted
3022 NTSTATUS async_write_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
3024 struct smbcli_request *c_req = async->c_req;
3025 struct ntvfs_request *req = async->req;
3026 struct proxy_file *f=async->f;
3027 struct async_write_fragment* fragment=talloc_get_type_abort(io2, struct async_write_fragment);
3028 /* this is the io against which the fragment is to be applied */
3029 union smb_write *io = talloc_get_type_abort(io1, union smb_write);
3030 /* this is the io for the write that issued the callback */
3031 union smb_write *io_frag = fragment->io_frag; /* async->parms; */
3032 struct async_write_fragments* fragments=fragment->fragments;
3033 ssize_t extent=0;
3035 /* if request is not already received by a chained handler, read it */
3036 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
3037 if (c_req) status=smb_raw_write_recv(c_req, io_frag);
3039 DEBUG(3,("%s async_write status: %s\n",__FUNCTION__,
3040 get_friendly_nt_error_msg(status)));
3042 fragment->status = status;
3044 DLIST_REMOVE(fragments->fragments, fragment);
3046 /* did this one fail? */
3047 if (! NT_STATUS_IS_OK(fragment->status)) {
3048 if (NT_STATUS_IS_OK(fragments->status)) {
3049 fragments->status=fragment->status;
3051 } else {
3052 /* No fragments have yet failed, keep collecting responses */
3053 extent = io_frag->generic.in.offset + io_frag->generic.out.nwritten;
3055 /* we broke up the write so it could all be written. If only some has
3056 been written of this block, and then some of then next block,
3057 it could leave unwritten holes! We will only acknowledge up to the
3058 first partial write, and let the client deal with it.
3059 If server can return NT_STATUS_OK for a partial write so can we */
3060 if (io_frag->generic.out.nwritten != io_frag->generic.in.count) {
3061 DEBUG(4,("Fragmented write only partially successful\n"));
3063 /* Shrink the master nwritten */
3064 if ( ! fragments->partial ||
3065 (io->generic.in.offset + io->generic.out.nwritten) > extent) {
3066 io->generic.out.nwritten = extent - io->generic.in.offset;
3068 /* stop any further successes from extended the partial write */
3069 fragments->partial=true;
3070 } else {
3071 /* only grow the master nwritten if we haven't logged a partial write */
3072 if (! fragments->partial &&
3073 (io->generic.in.offset + io->generic.out.nwritten) < extent ) {
3074 io->generic.out.nwritten = extent - io->generic.in.offset;
3079 /* if this was the last fragment, clean up */
3080 if (! fragments->fragments) {
3081 DEBUG(5,("Async write re-fragmented with %d of %d\n",
3082 io->generic.out.nwritten,
3083 io->generic.in.count));
3084 if (NT_STATUS_IS_OK(fragments->status)) {
3085 cache_handle_save(f, io->generic.in.data, io->generic.out.nwritten,
3086 io->generic.in.offset);
3087 if (f->metadata->info_data.size < io->generic.in.offset+io->generic.in.count) {
3088 f->metadata->info_data.size=io->generic.in.offset+io->generic.in.count;
3091 if (fragments->async) {
3092 req->async_states->status=fragments->status;
3093 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
3094 req->async_states->send_fn(req);
3095 DEBUG(5,("Async response sent\n"));
3096 } else {
3097 DEBUG(5,("Fragments SYNC return\n"));
3101 return status;
3105 a handler for async write replies
3107 NTSTATUS async_write_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
3109 struct smbcli_request *c_req = async->c_req;
3110 struct ntvfs_request *req = async->req;
3111 struct proxy_file *f=async->f;
3112 union smb_write *io=async->parms;
3114 if (c_req)
3115 status = smb_raw_write_recv(c_req, async->parms);
3117 cache_handle_save(f, io->generic.in.data,
3118 io->generic.out.nwritten,
3119 io->generic.in.offset);
3121 return status;
3125 write to a file
3127 static NTSTATUS proxy_write(struct ntvfs_module_context *ntvfs,
3128 struct ntvfs_request *req, union smb_write *io)
3130 struct proxy_private *private = ntvfs->private_data;
3131 struct smbcli_request *c_req;
3132 struct proxy_file *f;
3134 SETUP_PID;
3136 if (io->generic.level != RAW_WRITE_GENERIC &&
3137 private->map_generic) {
3138 return ntvfs_map_write(ntvfs, req, io);
3140 SETUP_FILE_HERE(f);
3142 DEBUG(5,("proxy_write offset=%lld size=%d\n",io->generic.in.offset, io->generic.in.count));
3143 #warning ERROR get rid of this
3144 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3145 NTSTATUS status;
3146 if (PROXY_REMOTE_SERVER(private)) {
3147 /* Do a proxy write */
3148 status=proxy_smb_raw_write(ntvfs, io, f);
3149 } else if (io->generic.in.count >
3150 private->tree->session->transport->negotiate.max_xmit) {
3152 /* smbcli_write can deal with large writes, which are bigger than
3153 tree->session->transport->negotiate.max_xmit */
3154 ssize_t size=smbcli_write(private->tree,
3155 io->generic.in.file.fnum,
3156 io->generic.in.wmode,
3157 io->generic.in.data,
3158 io->generic.in.offset,
3159 io->generic.in.count);
3161 if (size==io->generic.in.count || size > 0) {
3162 io->generic.out.nwritten=size;
3163 status=NT_STATUS_OK;
3164 } else {
3165 status=NT_STATUS_UNSUCCESSFUL;
3167 } else {
3168 status=smb_raw_write(private->tree, io);
3171 /* Save write in cache */
3172 if (NT_STATUS_IS_OK(status)) {
3173 cache_handle_save(f, io->generic.in.data,
3174 io->generic.out.nwritten,
3175 io->generic.in.offset);
3176 if (f->metadata->info_data.size <
3177 io->generic.in.offset+io->generic.in.count) {
3178 f->metadata->info_data.size=io->generic.in.offset+io->generic.in.count;
3182 return status;
3185 /* smb_raw_write_send can't deal with large writes, which are bigger than
3186 tree->session->transport->negotiate.max_xmit so we have to break it up
3187 trying to preserve the async nature of the call as much as possible */
3188 if (PROXY_REMOTE_SERVER(private)) {
3189 DEBUG(5,("== %s call proxy_smb_raw_write_send\n",__FUNCTION__));
3190 c_req = proxy_smb_raw_write_send(ntvfs, io, f);
3191 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3192 } else if (io->generic.in.count <=
3193 private->tree->session->transport->negotiate.max_xmit) {
3194 DEBUG(5,("== %s call smb_raw_write_send\n",__FUNCTION__));
3195 c_req = smb_raw_write_send(private->tree, io);
3196 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3197 } else {
3198 ssize_t remaining = io->generic.in.count;
3199 #warning Need an audit of these magin numbers MIN_SMB_SIZE+32
3200 int block = (private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32));
3201 int done = 0;
3202 struct async_write_fragments *fragments = talloc_zero(req, struct async_write_fragments);
3204 DEBUG(3,("== %s Client sending too-big write sized %d, negotiated limit %d\n",
3205 __FUNCTION__, io->generic.in.count,
3206 private->tree->session->transport->negotiate.max_xmit));
3208 fragments->io = io;
3209 io->generic.out.nwritten=0;
3210 io->generic.out.remaining=0;
3212 do {
3213 union smb_write *io_frag = talloc_zero(fragments, union smb_write);
3214 struct async_write_fragment *fragment = talloc_zero(fragments, struct async_write_fragment);
3215 ssize_t size = MIN(block, remaining);
3217 fragment->fragments = fragments;
3218 fragment->io_frag = io_frag;
3220 io_frag->generic.level = io->generic.level;
3221 io_frag->generic.in.file.fnum = io->generic.in.file.fnum;
3222 io_frag->generic.in.wmode = io->generic.in.wmode;
3223 io_frag->generic.in.count = size;
3224 io_frag->generic.in.offset = io->generic.in.offset + done;
3225 io_frag->generic.in.data = io->generic.in.data + done;
3227 c_req = proxy_smb_raw_write_send(ntvfs, io_frag, f);
3228 if (! c_req) {
3229 /* let pending requests clean-up when ready */
3230 fragments->status=NT_STATUS_UNSUCCESSFUL;
3231 talloc_steal(NULL, fragments);
3232 DEBUG(3,("Can't send request fragment\n"));
3233 return NT_STATUS_UNSUCCESSFUL;
3236 DEBUG(5,("Frag write sent offset=%lld size=%d MID=%d\n",
3237 io_frag->generic.in.offset,io_frag->generic.in.count,c_req->mid));
3238 fragment->c_req=c_req;
3239 DLIST_ADD(fragments->fragments, fragment);
3241 // ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3242 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_write_fragment, NT_STATUS_INTERNAL_ERROR);
3243 DEBUG(5,("Frag response chained\n"));
3245 remaining -= size;
3246 done += size;
3247 } while(remaining > 0);
3249 /* this strategy has the callback chain attached to each c_req, so we
3250 don't use the ASYNC_RECV_TAIL* to install a general one */
3253 ASYNC_RECV_TAIL_HANDLER(io, async_chain_handler);
3257 a handler for async seek replies
3259 static void async_seek(struct smbcli_request *c_req)
3261 struct async_info *async = c_req->async.private;
3262 struct ntvfs_request *req = async->req;
3263 req->async_states->status = smb_raw_seek_recv(c_req, async->parms);
3264 talloc_free(async);
3265 req->async_states->send_fn(req);
3269 seek in a file
3271 static NTSTATUS proxy_seek(struct ntvfs_module_context *ntvfs,
3272 struct ntvfs_request *req,
3273 union smb_seek *io)
3275 struct proxy_private *private = ntvfs->private_data;
3276 struct smbcli_request *c_req;
3278 SETUP_PID_AND_FILE;
3280 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3281 return smb_raw_seek(private->tree, io);
3284 c_req = smb_raw_seek_send(private->tree, io);
3286 ASYNC_RECV_TAIL(io, async_seek);
3290 flush a file
3292 static NTSTATUS proxy_flush(struct ntvfs_module_context *ntvfs,
3293 struct ntvfs_request *req,
3294 union smb_flush *io)
3296 struct proxy_private *private = ntvfs->private_data;
3297 struct smbcli_request *c_req;
3299 SETUP_PID;
3300 switch (io->generic.level) {
3301 case RAW_FLUSH_FLUSH:
3302 SETUP_FILE;
3303 break;
3304 case RAW_FLUSH_ALL:
3305 io->generic.in.file.fnum = 0xFFFF;
3306 break;
3307 case RAW_FLUSH_SMB2:
3308 return NT_STATUS_INVALID_LEVEL;
3311 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3312 return smb_raw_flush(private->tree, io);
3315 c_req = smb_raw_flush_send(private->tree, io);
3317 SIMPLE_ASYNC_TAIL;
3321 close a file
3323 static NTSTATUS proxy_close(struct ntvfs_module_context *ntvfs,
3324 struct ntvfs_request *req, union smb_close *io)
3326 struct proxy_private *private = ntvfs->private_data;
3327 struct smbcli_request *c_req;
3328 struct proxy_file *f;
3329 union smb_close io2;
3330 bool can_clone;
3332 SETUP_PID;
3334 if (io->generic.level != RAW_CLOSE_GENERIC &&
3335 private->map_generic) {
3336 return ntvfs_map_close(ntvfs, req, io);
3338 SETUP_FILE_HERE(f);
3339 /* we free the backend data before we use this value, so save it */
3340 can_clone=f->can_clone;
3341 /* Note, we aren't free-ing f, or it's h here. Should we?
3342 even if file-close fails, we'll remove it from the list,
3343 what else would we do? Maybe we should not remove until
3344 after the proxied call completes? */
3345 DLIST_REMOVE(private->files, f);
3347 /* Don't send the close on cloned handles unless we are the last one */
3348 if (f->metadata && --(f->metadata->count)) {
3349 DEBUG(5,("%s: Fake close of %d, %d left\n",__FUNCTION__,f->fnum, f->metadata->count));
3350 return NT_STATUS_OK;
3352 DEBUG(5,("%s: Real close of %d\n",__FUNCTION__, f->fnum));
3353 /* only close the cache if we aren't keeping references */
3354 //cache_close(f->cache);
3356 /* possibly samba can't do RAW_CLOSE_SEND yet */
3357 if (! (c_req = smb_raw_close_send(private->tree, io))) {
3358 if (io->generic.level == RAW_CLOSE_GENERIC) {
3359 ZERO_STRUCT(io2);
3360 io2.close.level = RAW_CLOSE_CLOSE;
3361 io2.close.in.file = io->generic.in.file;
3362 io2.close.in.write_time = io->generic.in.write_time;
3363 io = &io2;
3365 c_req = smb_raw_close_send(private->tree, io);
3366 /* destroy handle */
3367 ntvfs_handle_remove_backend_data(f->h, ntvfs);
3370 /* If it is read-only, don't bother waiting for the result */
3371 if (can_clone) {
3372 DEBUG(5,("%s: not waiting for close response fnum=%d\n",__FUNCTION__,f->fnum));
3373 return NT_STATUS_OK;
3376 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3377 return smbcli_request_simple_recv(c_req);
3379 DEBUG(0,("%s\n",__LOCATION__));
3380 SIMPLE_ASYNC_TAIL;
3384 exit - closing files open by the pid
3386 static NTSTATUS proxy_exit(struct ntvfs_module_context *ntvfs,
3387 struct ntvfs_request *req)
3389 struct proxy_private *private = ntvfs->private_data;
3390 struct smbcli_request *c_req;
3392 SETUP_PID;
3394 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3395 return smb_raw_exit(private->tree->session);
3398 c_req = smb_raw_exit_send(private->tree->session);
3400 SIMPLE_ASYNC_TAIL;
3404 logoff - closing files open by the user
3406 static NTSTATUS proxy_logoff(struct ntvfs_module_context *ntvfs,
3407 struct ntvfs_request *req)
3409 /* we can't do this right in the proxy backend .... */
3410 return NT_STATUS_OK;
3414 setup for an async call - nothing to do yet
3416 static NTSTATUS proxy_async_setup(struct ntvfs_module_context *ntvfs,
3417 struct ntvfs_request *req,
3418 void *private)
3420 return NT_STATUS_OK;
3424 cancel an async call
3426 static NTSTATUS proxy_cancel(struct ntvfs_module_context *ntvfs,
3427 struct ntvfs_request *req)
3429 struct proxy_private *private = ntvfs->private_data;
3430 struct async_info *a;
3432 /* find the matching request */
3433 for (a=private->pending;a;a=a->next) {
3434 if (a->req == req) {
3435 break;
3439 if (a == NULL) {
3440 return NT_STATUS_INVALID_PARAMETER;
3443 return smb_raw_ntcancel(a->c_req);
3447 lock a byte range
3449 static NTSTATUS proxy_lock(struct ntvfs_module_context *ntvfs,
3450 struct ntvfs_request *req, union smb_lock *io)
3452 struct proxy_private *private = ntvfs->private_data;
3453 struct smbcli_request *c_req;
3455 SETUP_PID;
3457 if (io->generic.level != RAW_LOCK_GENERIC &&
3458 private->map_generic) {
3459 return ntvfs_map_lock(ntvfs, req, io);
3461 SETUP_FILE;
3463 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3464 return smb_raw_lock(private->tree, io);
3467 c_req = smb_raw_lock_send(private->tree, io);
3468 SIMPLE_ASYNC_TAIL;
3472 set info on a open file
3474 static NTSTATUS proxy_setfileinfo(struct ntvfs_module_context *ntvfs,
3475 struct ntvfs_request *req,
3476 union smb_setfileinfo *io)
3478 struct proxy_private *private = ntvfs->private_data;
3479 struct smbcli_request *c_req;
3481 SETUP_PID_AND_FILE;
3483 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3484 return smb_raw_setfileinfo(private->tree, io);
3486 c_req = smb_raw_setfileinfo_send(private->tree, io);
3488 SIMPLE_ASYNC_TAIL;
3493 a handler for async fsinfo replies
3495 static void async_fsinfo(struct smbcli_request *c_req)
3497 struct async_info *async = c_req->async.private;
3498 struct ntvfs_request *req = async->req;
3499 union smb_fsinfo *fs = async->parms;
3500 struct proxy_private *private = async->proxy;
3502 req->async_states->status = smb_raw_fsinfo_recv(c_req, req, fs);
3504 if (NT_STATUS_IS_OK(req->async_states->status) && (fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3505 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO)) {
3506 if (! private->fs_attribute_info && (private->fs_attribute_info=talloc_zero(private, struct fs_attribute_info))) {
3507 DEBUG(5,("%s: caching fs_attribute_info\n",__LOCATION__));
3508 private->fs_attribute_info->fs_attr=fs->attribute_info.out.fs_attr;
3509 private->fs_attribute_info->max_file_component_length=fs->attribute_info.out.max_file_component_length;
3510 private->fs_attribute_info->fs_type=talloc_smb_wire_string_dup(private, &(fs->attribute_info.out.fs_type));
3514 talloc_free(async);
3515 req->async_states->send_fn(req);
3519 return filesystem space info
3521 static NTSTATUS proxy_fsinfo(struct ntvfs_module_context *ntvfs,
3522 struct ntvfs_request *req, union smb_fsinfo *fs)
3524 struct proxy_private *private = ntvfs->private_data;
3525 struct smbcli_request *c_req;
3527 SETUP_PID;
3529 DEBUG(5,("%s: level %x\n",__LOCATION__,fs->generic.level));
3530 /* this value is easy to cache */
3531 if ((fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3532 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO) &&
3533 private->fs_attribute_info) {
3534 DEBUG(5,("%s: using cached fsinfo\n",__LOCATION__));
3535 fs->attribute_info.out.fs_attr=private->fs_attribute_info->fs_attr;
3536 fs->attribute_info.out.max_file_component_length=private->fs_attribute_info->max_file_component_length;
3537 fs->attribute_info.out.fs_type=talloc_smb_wire_string_dup(req, &(private->fs_attribute_info->fs_type));
3538 return NT_STATUS_OK;
3541 /* QFS Proxy */
3542 if (fs->generic.level == RAW_QFS_PROXY_INFO) {
3543 fs->proxy_info.out.major_version=1;
3544 fs->proxy_info.out.minor_version=0;
3545 fs->proxy_info.out.capability=0;
3546 return NT_STATUS_OK;
3549 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3550 NTSTATUS status = smb_raw_fsinfo(private->tree, req, fs);
3551 if (NT_STATUS_IS_OK(status) && (fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3552 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO)) {
3553 if (! private->fs_attribute_info && (private->fs_attribute_info=talloc_zero(private, struct fs_attribute_info))) {
3554 DEBUG(5,("%s: caching fs_attribute_info\n",__LOCATION__));
3555 private->fs_attribute_info->fs_attr=fs->attribute_info.out.fs_attr;
3556 private->fs_attribute_info->max_file_component_length=fs->attribute_info.out.max_file_component_length;
3557 private->fs_attribute_info->fs_type=talloc_smb_wire_string_dup(private, &(fs->attribute_info.out.fs_type));
3560 return status;
3562 c_req = smb_raw_fsinfo_send(private->tree, req, fs);
3564 ASYNC_RECV_TAIL(fs, async_fsinfo);
3568 return print queue info
3570 static NTSTATUS proxy_lpq(struct ntvfs_module_context *ntvfs,
3571 struct ntvfs_request *req, union smb_lpq *lpq)
3573 return NT_STATUS_NOT_SUPPORTED;
3577 find_first / find_next caching.
3578 For now, cache based on directory,search_attributes,search_pattern,ea stuff
3579 Consider in response:
3580 * search id
3581 * search count
3582 * end of search
3583 * ea stuff
3586 static union smb_search_data *smb_search_data_dup(void* mem_ctx, const union smb_search_data *file, enum smb_search_data_level data_level) {
3587 union smb_search_data *result;
3588 struct smb_wire_string *name;
3590 result=talloc_zero(mem_ctx, union smb_search_data);
3591 if (! result) {
3592 return result;
3595 *result = *file;
3597 switch(data_level) {
3598 case RAW_SEARCH_DATA_SEARCH:
3599 if (! (result->search.name=talloc_strdup(mem_ctx, file->search.name))) goto error;
3600 break;
3601 case RAW_SEARCH_DATA_STANDARD:
3602 if (sws_dup(result, result->standard.name, file->standard.name)) goto error;
3603 break;
3604 case RAW_SEARCH_DATA_EA_SIZE:
3605 if (sws_dup(result, result->ea_size.name, file->ea_size.name)) goto error;
3606 break;
3607 case RAW_SEARCH_DATA_EA_LIST:
3608 if (sws_dup(result, result->ea_list.name, file->ea_list.name)) goto error;
3609 break;
3610 case RAW_SEARCH_DATA_DIRECTORY_INFO:
3611 if (sws_dup(result, result->directory_info.name, file->directory_info.name)) goto error;
3612 break;
3613 case RAW_SEARCH_DATA_FULL_DIRECTORY_INFO:
3614 if (sws_dup(result, result->full_directory_info.name, file->full_directory_info.name)) goto error;
3615 break;
3616 case RAW_SEARCH_DATA_NAME_INFO:
3617 if (sws_dup(result, result->name_info.name, file->name_info.name)) goto error;
3618 break;
3619 case RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO:
3620 if (sws_dup(result, result->both_directory_info.name, file->both_directory_info.name)) goto error;
3621 if (sws_dup(result, result->both_directory_info.short_name, file->both_directory_info.short_name)) goto error;
3622 break;
3623 case RAW_SEARCH_DATA_ID_FULL_DIRECTORY_INFO:
3624 if (sws_dup(result, result->id_full_directory_info.name, file->id_full_directory_info.name)) goto error;
3625 break;
3626 case RAW_SEARCH_DATA_ID_BOTH_DIRECTORY_INFO:
3627 if (sws_dup(result, result->id_both_directory_info.name, file->id_both_directory_info.name)) goto error;
3628 if (sws_dup(result, result->id_both_directory_info.short_name, file->id_both_directory_info.short_name)) goto error;
3629 break;
3630 case RAW_SEARCH_DATA_UNIX_INFO:
3631 if (! (result->unix_info.name=talloc_strdup(mem_ctx, file->unix_info.name))) goto error;
3632 break;
3633 case RAW_SEARCH_DATA_UNIX_INFO2:
3634 if (sws_dup(result, result->unix_info2.name, file->unix_info2.name)) goto error;
3635 break;
3636 default:
3637 DEBUG(5,("%s: Error can't dup an unknown file data type: %x\n", __LOCATION__, data_level));
3638 goto error;
3640 return result;
3641 error:
3642 talloc_free(result);
3643 return NULL;
3646 /* callback function for search first/next */
3647 static bool find_callback(void *private, const union smb_search_data *file)
3649 struct search_state *state = (struct search_state *)private;
3650 struct search_handle *search_handle = state->search_handle;
3651 bool status;
3653 /* if we have a cache, copy this data */
3654 if (search_handle->cache) {
3655 struct search_cache_item *item = talloc_zero(search_handle->cache, struct search_cache_item);
3656 DEBUG(5,("%s: Copy %p to cache %p\n", __LOCATION__, item, search_handle->cache));
3657 if (item) {
3658 item->data_level=search_handle->data_level;
3659 item->file = smb_search_data_dup(item, file, item->data_level);
3660 if (! item->file) {
3661 talloc_free(item);
3662 item=NULL;
3665 if (item) {
3666 /* optimization to save enumerating the entire list each time, to find the end.
3667 the cached last_item is very short lived, it doesn't matter if something has
3668 been added since, as long as it hasn't been removed */
3669 if (state->last_item) {
3670 DLIST_ADD_END(state->last_item, item, struct search_cache_item*);
3671 } else {
3672 DLIST_ADD_END(search_handle->cache->items, item, struct search_cache_item*);
3674 state->last_item=item;
3675 state->all_count++;
3676 } else {
3677 DEBUG(5,("%s: Could not add name to search cache %p, invalidating cache\n", __LOCATION__, search_handle->cache));
3678 /* dear me, the whole cache will be invalid if we miss data */
3679 search_handle->cache->status=SEARCH_CACHE_DEAD;
3680 /* remove from the list of caches to use */
3681 DLIST_REMOVE(search_handle->cache->proxy->search_caches, search_handle->cache);
3682 /* Make it feel unwanted */
3683 talloc_unlink(private, search_handle->cache);
3684 talloc_unlink(search_handle, search_handle->cache);
3685 //if (talloc_unlink(search_handle, search_handle->cache)==0) {
3686 //talloc_free(search_handle->cache);
3688 /* stop us using it for this search too */
3689 search_handle->cache=NULL;
3693 status=state->callback(state->private, file);
3694 if (status) {
3695 state->count++;
3697 return status;
3701 list files in a directory matching a wildcard pattern
3703 static NTSTATUS proxy_search_first(struct ntvfs_module_context *ntvfs,
3704 struct ntvfs_request *req, union smb_search_first *io,
3705 void *search_private,
3706 bool (*callback)(void *, const union smb_search_data *))
3708 struct proxy_private *private = ntvfs->private_data;
3709 struct search_state *state;
3710 struct search_cache *search_cache=NULL;
3711 struct search_cache_key search_cache_key={0};
3712 struct ntvfs_handle *h=NULL;
3713 struct search_handle *s;
3714 uint16_t max_count;
3715 NTSTATUS status;
3717 SETUP_PID;
3719 if (! private->enabled_proxy_search) {
3720 return smb_raw_search_first(private->tree, req, io, search_private, callback);
3722 switch (io->generic.level) {
3723 /* case RAW_SEARCH_DATA_SEARCH:
3724 search_cache_key.search_attrib=io->search_first.in.search_attrib;
3725 search_cache_key.pattern=io->search_first.in.pattern;
3726 max_count = io->search_first.in.max_count;
3727 search_cache = find_search_cache(private->search_cache, &search_cache_key);
3728 break;*/
3729 case RAW_SEARCH_TRANS2:
3730 io->t2ffirst.in.max_count=MIN(io->t2ffirst.in.max_count,80);
3731 max_count = io->t2ffirst.in.max_count;
3733 search_cache_key.level=io->generic.level;
3734 search_cache_key.data_level=io->generic.data_level;
3735 search_cache_key.search_attrib=io->t2ffirst.in.search_attrib;
3736 search_cache_key.pattern=io->t2ffirst.in.pattern;
3737 search_cache_key.flags=io->t2ffirst.in.flags;
3738 search_cache_key.storage_type=io->t2ffirst.in.storage_type;
3739 /* try and find a search cache that is complete */
3740 search_cache = find_search_cache(private->search_caches, &search_cache_key);
3742 /* do handle mapping for TRANS2 */
3743 status = ntvfs_handle_new(ntvfs, req, &h);
3744 NT_STATUS_NOT_OK_RETURN(status);
3746 DEBUG(5,("%s: RAW_SEARCH_TRANS2 %s limit %d, cache=%p level=%x\n",__LOCATION__, search_cache_key.pattern, max_count, search_cache, search_cache_key.data_level));
3747 break;
3748 default: /* won't cache or proxy this */
3749 return smb_raw_search_first(private->tree, req, io, search_private, callback);
3752 /* finish setting up mapped handle */
3753 if (h) {
3754 s = talloc_zero(h, struct search_handle);
3755 NT_STATUS_HAVE_NO_MEMORY(s);
3756 s->proxy=private;
3757 talloc_set_destructor(s, search_handle_destructor);
3758 s->h=h;
3759 s->level=io->generic.level;
3760 s->data_level=io->generic.data_level;
3761 status = ntvfs_handle_set_backend_data(s->h, private->ntvfs, s);
3762 NT_STATUS_NOT_OK_RETURN(status);
3763 DLIST_ADD(private->search_handles, s);
3764 DEBUG(5,("%s: map handle create %d\n",__LOCATION__, smbsrv_fnum(h)));
3767 /* satisfy from cache */
3768 if (search_cache) {
3769 struct search_cache_item* item=search_cache->items;
3770 uint16_t count=0;
3772 /* stop cache going away while we are using it */
3773 s->cache = talloc_reference(s, search_cache);
3774 DEBUG(5,("%s: Serving from cache: %p\n",__LOCATION__, search_cache));
3775 /* Don't offer over the limit, but only count those that were accepted */
3776 DLIST_FIND(search_cache->items, item, !(count < max_count && callback(search_private, item->file) && ++count) );
3777 io->t2ffirst.out.count=count;
3778 s->resume_item=item;
3779 /* just because callback didn't accept any doesn't mean we are finished */
3780 if (item == NULL) {
3781 /* currently only caching for t2ffirst */
3782 io->t2ffirst.out.end_of_search = true;
3783 DEBUG(5,("%s: Serving from cache complete at %d\n", __LOCATION__, count));
3784 } else {
3785 /* count the rest */
3786 io->t2ffirst.out.end_of_search = false;
3787 DEBUG(5,("%s: Serving from cache incomplete at %d\n", __LOCATION__, count));
3788 DLIST_FOR_EACH(item, item, count++);
3789 DEBUG(5,("%s: Serving from cache max_count %d\n", __LOCATION__, count));
3792 if ((io->t2ffirst.out.end_of_search && io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
3793 io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE)
3795 /* destroy handle */
3796 ntvfs_handle_remove_backend_data(h, ntvfs);
3797 io->t2ffirst.out.handle=0;
3798 } else {
3799 /* now map handle */
3800 io->t2ffirst.out.handle=smbsrv_fnum(h);
3802 return NT_STATUS_OK;
3805 state = talloc_zero(req, struct search_state);
3806 NT_STATUS_HAVE_NO_MEMORY(state);
3808 /* if there isn't a matching cache already being generated by another search,
3809 start one, unless FLAG_TRANS2_FIND_BACKUP_INTENT which is always live */
3810 if (!(io->t2ffirst.in.flags & FLAG_TRANS2_FIND_BACKUP_INTENT) &&
3811 find_partial_search_cache(private->search_caches, &search_cache_key) == NULL) {
3812 /* need to opendir the folder being searched so we can get a notification */
3813 struct search_cache *search_cache=NULL;
3815 search_cache=new_search_cache(private, &search_cache_key);
3816 /* Stop cache going away while we are using it */
3817 if (search_cache) {
3818 s->cache=talloc_reference(s, search_cache);
3822 /* stop the handle going away while we are using it */
3823 state->search_handle=talloc_reference(state, s);
3824 state->private=search_private;
3825 state->callback=callback;
3827 status=smb_raw_search_first(private->tree, req, io, state, find_callback);
3828 // if (! NT_STATUS_IS_OK(status)) {
3829 // return (status);
3830 // }
3831 if (! NT_STATUS_IS_OK(status)) {
3832 if (s->cache) {
3833 DLIST_REMOVE(private->search_caches, s->cache);
3834 talloc_unlink(private, s->cache);
3835 talloc_unlink(s, s->cache);
3836 //if (talloc_unlink(s, s->cache)==0) {
3837 //talloc_free(s->cache);
3839 s->cache=NULL;
3841 s->h=NULL;
3842 ntvfs_handle_remove_backend_data(h, ntvfs);
3843 return (status);
3845 // DEBUG(1,("%s: %p; %s\n",__LOCATION__,io,get_friendly_nt_error_msg (status)));
3846 DEBUG(5,("%s: max %d, got %d, copied %d; %s\n",__LOCATION__,io->t2ffirst.out.count,state->all_count, state->count,get_friendly_nt_error_msg (status)));
3848 #warning check NT_STATUS_IS_OK ?
3849 if (io->t2ffirst.out.end_of_search) {
3850 /* cache might have gone away if problem filling */
3851 if (s->cache) {
3852 DEBUG(5,("B\n"));
3853 s->cache->status = SEARCH_CACHE_COMPLETE;
3854 DEBUG(5,("%s: Cache %p filled in first go!\n",__LOCATION__, s->cache));
3857 if ((io->t2ffirst.out.end_of_search && io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
3858 io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE) {
3859 DEBUG(5,("%s: Closing search\n",__LOCATION__));
3860 /* destroy partial cache */
3861 if (s->cache && (io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE) &&
3862 ! io->t2ffirst.out.end_of_search) {
3863 DEBUG(5,("%s: Destroying cache %p\n",__LOCATION__, s->cache));
3864 /* cache is no good now! */
3865 DLIST_REMOVE(private->search_caches, s->cache);
3866 talloc_unlink(private, s->cache);
3867 talloc_unlink(s, s->cache);
3868 //if (talloc_unlink(s, s->cache)==0) {
3869 //talloc_free(s->cache);
3871 s->cache=NULL;
3873 if (s->cache) {
3874 s->cache->status=SEARCH_CACHE_COMPLETE;
3876 /* Need to deal with the case when the client would not take them all but we still cache them
3877 if (state->count < io->t2ffirst.out.count && io->t2ffirst.out.end_of_search) {
3878 io->t2ffirst.out.end_of_search = false;
3879 //s->resume_item = state->last_item;
3881 /* destroy handle */
3882 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
3883 ntvfs_handle_remove_backend_data(h, ntvfs);
3884 io->t2ffirst.out.handle=0;
3885 } else {
3886 s->handle = io->t2ffirst.out.handle;
3887 io->t2ffirst.out.handle=smbsrv_fnum(h);
3889 io->t2ffirst.out.count=state->count;
3890 return status;
3893 #define DLIST_FIND_NEXT(start, item, test) do {\
3894 DLIST_FIND(start, item, test); \
3895 if (item) (item)=(item)->next; \
3896 } while(0)
3897 #define DLIST_TALLOC_FREE(list) do {\
3898 while(list) { \
3899 void *tmp=(list); \
3900 (list)=(list)->next; \
3901 talloc_free(tmp); \
3903 } while(0)
3905 /* continue a search */
3906 static NTSTATUS proxy_search_next(struct ntvfs_module_context *ntvfs,
3907 struct ntvfs_request *req, union smb_search_next *io,
3908 void *search_private,
3909 bool (*callback)(void *, const union smb_search_data *))
3911 struct proxy_private *private = ntvfs->private_data;
3912 struct search_state *state;
3913 struct ntvfs_handle *h=NULL;
3914 struct search_handle *s;
3915 const struct search_cache *search_cache=NULL;
3916 struct search_cache_item *start_at=NULL;
3917 uint16_t max_count;
3918 NTSTATUS status;
3920 SETUP_PID;
3922 if (! private->enabled_proxy_search) {
3923 return smb_raw_search_next(private->tree, req, io, search_private, callback);
3925 switch (io->generic.level) {
3926 case RAW_SEARCH_TRANS2:
3927 io->t2fnext.in.max_count=MIN(io->t2fnext.in.max_count,80);
3928 max_count = io->t2fnext.in.max_count;
3930 h = talloc_get_type(ntvfs_find_handle(ntvfs, req, io->t2fnext.in.handle), struct ntvfs_handle);
3931 if (! h) return NT_STATUS_INVALID_HANDLE;
3932 /* convert handle into search_cache */
3933 s=talloc_get_type(ntvfs_handle_get_backend_data(h, ntvfs), struct search_handle);
3934 if (! s) return NT_STATUS_INVALID_HANDLE;
3935 search_cache=s->cache;
3936 DEBUG(5,("%s: RAW_SEARCH_TRANS2 find_next h=%d [real %d] count %d, cache=%p\n",__LOCATION__, io->t2fnext.in.handle, s->handle, max_count, search_cache));
3937 io->t2fnext.in.handle=s->handle;
3938 if (! search_cache) {
3939 break;
3942 /* warning if: uint16_t flags or const char *last_name have changed, abort cache */
3943 /* skip up to resume key */
3944 /* TODO: resume key may be PRIOR to where we left off... in which case
3945 we need to avoid duplicating values */
3946 if (search_cache /*&& search_cache->status == SEARCH_CACHE_COMPLETE*/) {
3947 DEBUG(5,("%s: seek resume position\n",__LOCATION__));
3948 /* work out where in the cache to continue from */
3949 switch (io->generic.data_level) {
3950 case RAW_SEARCH_DATA_STANDARD:
3951 case RAW_SEARCH_DATA_EA_SIZE:
3952 case RAW_SEARCH_DATA_EA_LIST:
3953 /* have a resume key? */
3954 DEBUG(5,("%s: type %x seek on %x\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.resume_key));
3955 DLIST_FIND_NEXT(search_cache->items, start_at, io->t2fnext.in.resume_key == start_at->file->standard.resume_key);
3956 break;
3957 case RAW_SEARCH_DATA_DIRECTORY_INFO: /* TODO: maybe these should be strcasecmp for some filesystems */
3958 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3959 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->directory_info.name.s)==0);
3960 break;
3961 case RAW_SEARCH_DATA_FULL_DIRECTORY_INFO:
3962 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3963 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->full_directory_info.name.s)==0);
3964 break;
3965 case RAW_SEARCH_DATA_NAME_INFO:
3966 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3967 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->name_info.name.s)==0);
3968 break;
3969 case RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO:
3970 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3971 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->both_directory_info.name.s)==0);
3972 break;
3973 case RAW_SEARCH_DATA_ID_FULL_DIRECTORY_INFO:
3974 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3975 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->id_full_directory_info.name.s)==0);
3976 break;
3977 case RAW_SEARCH_DATA_ID_BOTH_DIRECTORY_INFO:
3978 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3979 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->id_both_directory_info.name.s)==0);
3980 break;
3981 case RAW_SEARCH_DATA_UNIX_INFO:
3982 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3983 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->unix_info.name)==0);
3984 break;
3985 case RAW_SEARCH_DATA_UNIX_INFO2:
3986 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3987 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->unix_info2.name.s)==0);
3988 break;
3989 default:
3990 if (io->t2fnext.in.flags & FLAG_TRANS2_FIND_CONTINUE) {
3991 start_at = s->resume_item;
3992 } else {
3993 DEBUG(5,("%s: HELP! How can we resume?\n",__LOCATION__));
3994 start_at = s->resume_item;
3997 DEBUG(5,("%s: Start at %p\n",__LOCATION__,start_at));
3999 break;
4002 if (! search_cache) {
4003 DEBUG(5,("%s: No cache, pass-through\n",__LOCATION__));
4004 return smb_raw_search_next(private->tree, req, io, search_private, callback);
4006 //#define talloc_reference(ctx, ptr) (_TALLOC_TYPEOF(ptr))_talloc_reference((ctx),(ptr))
4007 //surely should be
4008 //#define talloc_reference(ctx, ptr) _talloc_reference((ctx),(ptr))?(ptr):(NULL) to preserve the type of ptr
4010 /* satisfy from cache */
4011 if (search_cache->status == SEARCH_CACHE_COMPLETE) {
4012 struct search_cache_item* item;
4013 uint16_t count=0;
4014 DEBUG(5,("%s: Serving from cache: %p\n",__LOCATION__, search_cache));
4016 if (! start_at) {
4017 start_at = search_cache->items;
4020 DLIST_FIND(start_at, item, !(count < max_count && callback(search_private, item->file) && ++count) );
4021 io->t2fnext.out.count=count;
4022 s->resume_item=item;
4023 if (item == NULL) {
4024 DEBUG(5,("%s: Serving from cache complete at %d\n", __LOCATION__, count));
4025 io->t2fnext.out.end_of_search = true;
4026 } else {
4027 DEBUG(5,("%s: Serving from cache incomplete at %d\n", __LOCATION__, count));
4028 io->t2fnext.out.end_of_search = false;
4029 /* count the rest */
4030 DLIST_FOR_EACH(item, item, count++);
4031 DEBUG(5,("%s: Serving from cache max_count %d\n", __LOCATION__, count));
4033 /* is it the end? */
4034 if ((io->t2fnext.out.end_of_search && io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
4035 io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE)
4037 /* destroy handle */
4038 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
4039 ntvfs_handle_remove_backend_data(h, ntvfs);
4042 return NT_STATUS_OK;
4045 /* pass-through and fill-cache */
4046 if (start_at) {
4047 /* risk of duplicate data */
4048 DEBUG(5,("\n\n\nCache-populating search has resumed but NOT where we left off!\n\n\n-d"));
4049 /* free everything from start_at onwards through start_at-> next*/
4050 /* cut from the list */
4051 start_at->prev->next=NULL;
4052 start_at->prev=NULL;
4053 /* now how to free a list? */
4054 DLIST_TALLOC_FREE(start_at);
4056 state = talloc_zero(req, struct search_state);
4057 NT_STATUS_HAVE_NO_MEMORY(state);
4059 state->search_handle=talloc_reference(state, s);
4060 state->private=search_private;
4061 state->callback=callback;
4063 status = smb_raw_search_next(private->tree, req, io, state, find_callback);
4064 if (! NT_STATUS_IS_OK(status)) {
4065 if (s->cache) {
4066 DLIST_REMOVE(private->search_caches, s->cache);
4067 talloc_unlink(private, s->cache);
4068 talloc_unlink(s, s->cache);
4069 //if (talloc_unlink(s, s->cache)==0) {
4070 //talloc_free(s->cache);
4072 s->cache=NULL;
4074 s->h=NULL;
4075 ntvfs_handle_remove_backend_data(h, ntvfs);
4076 return (status);
4079 DEBUG(5,("%s: max %d, got %d, copied %d; %s\n",__LOCATION__,io->t2fnext.out.count,state->all_count, state->count,get_friendly_nt_error_msg (status)));
4081 /* if closing, then close */
4082 if ((io->t2fnext.out.end_of_search && io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
4083 io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE)
4085 if (s->cache && (io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE) &&
4086 ! io->t2fnext.out.end_of_search) {
4087 /* partial cache is useless */
4088 DLIST_REMOVE(private->search_caches, s->cache);
4089 talloc_unlink(private, s->cache);
4090 talloc_unlink(s, s->cache);
4091 //if (talloc_unlink(s, s->cache)==0) {
4092 //talloc_free(s->cache);
4094 s->cache=NULL;
4096 if (s->cache) {
4097 s->cache->status=SEARCH_CACHE_COMPLETE;
4098 /* Need to deal with the case when the client would not take them all but we still cache them
4099 if (state->count < io->t2fnext.out.count && io->t2fnext.out.end_of_search) {
4100 io->t2fnext.out.end_of_search = false;
4103 /* destroy handle */
4104 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
4105 ntvfs_handle_remove_backend_data(h, ntvfs);
4107 io->t2fnext.out.count=state->count;
4109 return status;
4112 /* close a search */
4113 static NTSTATUS proxy_search_close(struct ntvfs_module_context *ntvfs,
4114 struct ntvfs_request *req, union smb_search_close *io)
4116 struct proxy_private *private = ntvfs->private_data;
4117 struct ntvfs_handle *h=NULL;
4118 struct search_handle *s;
4119 NTSTATUS status;
4121 SETUP_PID;
4123 if (! private->enabled_proxy_search) {
4124 return smb_raw_search_close(private->tree, io);
4126 switch (io->generic.level) {
4127 case RAW_SEARCH_TRANS2:
4128 h = talloc_get_type(ntvfs_find_handle(ntvfs, req, io->findclose.in.handle), struct ntvfs_handle);
4129 if (! h) return NT_STATUS_INVALID_HANDLE;
4130 /* convert handle into search_cache */
4131 s=talloc_get_type(ntvfs_handle_get_backend_data(h, ntvfs), struct search_handle);
4132 if (! s) return NT_STATUS_INVALID_HANDLE;
4133 io->findclose.in.handle=s->handle;
4134 default:
4135 return smb_raw_search_close(private->tree, io);
4138 if (! s->cache) {
4139 status = smb_raw_search_close(private->tree, io);
4140 } else {
4141 if (s->cache->status != SEARCH_CACHE_COMPLETE) {
4142 /* cache is useless */
4143 DLIST_REMOVE(private->search_caches, s->cache);
4144 talloc_unlink(private, s->cache);
4145 talloc_unlink(s, s->cache);
4146 //if (talloc_unlink(s, s->cache)==0) {
4147 //talloc_free(s->cache);
4150 status = NT_STATUS_OK;
4153 s->h=NULL;
4154 ntvfs_handle_remove_backend_data(h, ntvfs);
4155 /* s MAY also be gone at this point, if h was free'd, unless there were
4156 pending responses, in which case they see s->h is NULL as a sign to stop */
4157 return status;
4161 a handler for async trans2 replies
4163 static void async_trans2(struct smbcli_request *c_req)
4165 struct async_info *async = c_req->async.private;
4166 struct ntvfs_request *req = async->req;
4167 req->async_states->status = smb_raw_trans2_recv(c_req, req, async->parms);
4168 talloc_free(async);
4169 req->async_states->send_fn(req);
4172 /* raw trans2 */
4173 static NTSTATUS proxy_trans2(struct ntvfs_module_context *ntvfs,
4174 struct ntvfs_request *req,
4175 struct smb_trans2 *trans2)
4177 struct proxy_private *private = ntvfs->private_data;
4178 struct smbcli_request *c_req;
4180 if (private->map_trans2) {
4181 return NT_STATUS_NOT_IMPLEMENTED;
4184 SETUP_PID;
4185 #warning we should be mapping file handles here
4187 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4188 return smb_raw_trans2(private->tree, req, trans2);
4191 c_req = smb_raw_trans2_send(private->tree, trans2);
4193 ASYNC_RECV_TAIL(trans2, async_trans2);
4197 /* SMBtrans - not used on file shares */
4198 static NTSTATUS proxy_trans(struct ntvfs_module_context *ntvfs,
4199 struct ntvfs_request *req,
4200 struct smb_trans2 *trans2)
4202 return NT_STATUS_ACCESS_DENIED;
4206 a handler for async change notify replies
4208 static void async_changenotify(struct smbcli_request *c_req)
4210 struct async_info *async = c_req->async.private;
4211 struct ntvfs_request *req = async->req;
4212 req->async_states->status = smb_raw_changenotify_recv(c_req, req, async->parms);
4213 talloc_free(async);
4214 req->async_states->send_fn(req);
4217 /* change notify request - always async */
4218 static NTSTATUS proxy_notify(struct ntvfs_module_context *ntvfs,
4219 struct ntvfs_request *req,
4220 union smb_notify *io)
4222 struct proxy_private *private = ntvfs->private_data;
4223 struct smbcli_request *c_req;
4224 int saved_timeout = private->transport->options.request_timeout;
4225 struct proxy_file *f;
4227 if (io->nttrans.level != RAW_NOTIFY_NTTRANS) {
4228 return NT_STATUS_NOT_IMPLEMENTED;
4231 SETUP_PID;
4233 f = ntvfs_handle_get_backend_data(io->nttrans.in.file.ntvfs, ntvfs);
4234 if (!f) return NT_STATUS_INVALID_HANDLE;
4235 io->nttrans.in.file.fnum = f->fnum;
4237 /* this request doesn't make sense unless its async */
4238 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4239 return NT_STATUS_INVALID_PARAMETER;
4242 /* we must not timeout on notify requests - they wait
4243 forever */
4244 private->transport->options.request_timeout = 0;
4246 c_req = smb_raw_changenotify_send(private->tree, io);
4248 private->transport->options.request_timeout = saved_timeout;
4250 ASYNC_RECV_TAIL(io, async_changenotify);
4254 * A hander for converting from rpc struct replies to ntioctl
4256 static NTSTATUS proxy_rpclite_map_async_send(
4257 struct ntvfs_module_context *ntvfs,
4258 struct ntvfs_request *req,
4259 void *io1, void *io2, NTSTATUS status)
4261 union smb_ioctl* io=talloc_get_type_abort(io1, union smb_ioctl);
4262 struct async_rpclite_send *rpclite_send=talloc_get_type_abort(io2, struct async_rpclite_send);
4263 void* r=rpclite_send->struct_ptr;
4264 struct ndr_push* push;
4265 const struct ndr_interface_call* call=rpclite_send->call;
4266 enum ndr_err_code ndr_err;
4267 DATA_BLOB ndr;
4269 talloc_free(rpclite_send);
4271 DEBUG(5,("%s: converting r=%p back to ntiocl\n",__FUNCTION__, r));
4272 push = ndr_push_init_ctx(req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
4273 NT_STATUS_HAVE_NO_MEMORY(push);
4275 if (0) {
4276 push->flags |= LIBNDR_FLAG_BIGENDIAN;
4279 ndr_err = call->ndr_push(push, NDR_OUT, r);
4280 status=ndr_map_error2ntstatus(ndr_err);
4282 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4283 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
4284 nt_errstr(status)));
4285 return status;
4288 ndr=ndr_push_blob(push);
4289 //if (ndr.length > io->ntioctl.in.max_data) {
4290 DEBUG(3,("%s NDR size %d, max_size %d %p\n",__FUNCTION__, ndr.length,
4291 io->ntioctl.in.max_data, ndr.data));
4292 io->ntioctl.out.blob=ndr;
4293 return status;
4297 * A handler for sending async rpclite Read replies that were mapped to union smb_read
4299 static NTSTATUS rpclite_proxy_Read_map_async_send(
4300 struct ntvfs_module_context *ntvfs,
4301 struct ntvfs_request *req,
4302 void *io1, void *io2, NTSTATUS status)
4304 struct proxy_Read* r=talloc_get_type_abort(io1, struct proxy_Read);
4305 union smb_read* io=talloc_get_type_abort(io2, union smb_read);
4307 /* status here is a result of proxy_read, it doesn't reflect the status
4308 of the rpc transport or relates calls, just the read operation */
4309 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
4310 r->out.result=status;
4312 if (! NT_STATUS_IS_OK(status)) {
4313 /* We can't use result as a discriminator in IDL, so nread and flags always exist */
4314 r->out.nread=0;
4315 r->out.flags=0;
4316 } else {
4317 ssize_t size=io->readx.out.nread;
4318 r->out.flags=0;
4319 r->out.nread=io->readx.out.nread;
4321 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE) && io->readx.out.nread>0) {
4322 declare_checksum(digest);
4323 checksum_block(digest, io->readx.out.data, io->readx.out.nread);
4325 DEBUG(5,("New digest for size: %lld\n", (long long) io->readx.out.nread));
4326 dump_data (5, digest, sizeof(digest));
4327 DEBUG(5,("Cached digest\n"));
4328 dump_data (5, r->in.digest.digest, sizeof(digest));
4330 if (memcmp(digest, r->in.digest.digest, sizeof(digest))==0) {
4331 r->out.flags=PROXY_USE_CACHE;
4332 DEBUG(5,("%s: Use cached data len=%lld\n",__FUNCTION__,
4333 (long long)r->out.nread));
4334 if (r->in.flags & PROXY_VALIDATE) {
4335 r->out.flags |= PROXY_VALIDATE;
4336 DEBUG(5,("%s: Use VALIDATED len=%lld, %lld\n",__FUNCTION__,
4337 (long long)r->out.nread, (long long) io->readx.out.nread));
4339 goto done;
4341 DEBUG(5,("Cache does not match\n"));
4344 if (r->in.flags & PROXY_VALIDATE) {
4345 /* validate failed, shrink read to mincnt - so we don't fill link */
4346 r->out.nread=MIN(r->out.nread, r->in.mincnt);
4347 size=r->out.nread;
4348 DEBUG(5,("VALIDATE failed, shrink read of %d from %d to %d\n",
4349 r->in.maxcnt,r->out.nread,MIN(r->out.nread, r->in.mincnt)));
4352 if (r->in.flags & PROXY_USE_ZLIB) {
4353 if (compress_block(io->readx.out.data, &size) ) {
4354 r->out.flags|=PROXY_USE_ZLIB;
4355 r->out.response.compress.count=size;
4356 r->out.response.compress.data=io->readx.out.data;
4357 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
4358 __FUNCTION__,r->out.nread,size,size*100/r->out.nread));
4359 goto done;
4363 DEBUG(5,("%s: Compression not worthwhile\n", __FUNCTION__));
4364 r->out.response.generic.count=io->readx.out.nread;
4365 r->out.response.generic.data=io->readx.out.data;
4368 done:
4370 /* Or should we return NT_STATUS_OK ?*/
4371 DEBUG(5,("Finish %s status %s\n",__FUNCTION__,get_friendly_nt_error_msg(status)));
4373 /* the rpc transport succeeded even if the operation did not */
4374 return NT_STATUS_OK;
4378 * RPC implementation of Read
4380 static NTSTATUS rpclite_proxy_Read(struct ntvfs_module_context *ntvfs,
4381 struct ntvfs_request *req, struct proxy_Read *r)
4383 struct proxy_private *private = ntvfs->private_data;
4384 union smb_read* io=talloc(req, union smb_read);
4385 NTSTATUS status;
4386 struct proxy_file *f;
4387 struct ntvfs_handle *h;
4389 NT_STATUS_HAVE_NO_MEMORY(io);
4391 /* if next hop is a proxy just repeat this call also handle VALIDATE check
4392 that means have own callback handlers too... */
4393 SETUP_PID;
4395 RPCLITE_SETUP_FILE_HERE(f, h);
4397 DEBUG(5,("Opnum: proxy_Read min=%d max=%d offset=%lld, fnum=%d\n",
4398 r->in.mincnt, r->in.maxcnt, r->in.offset, r->in.fnum));
4399 DEBUG(5,("Anticipated digest\n"));
4400 dump_data (5, r->in.digest.digest, sizeof(r->in.digest.digest));
4402 /* If the remove end is a proxy, jusr fixup file handle and passthrough,
4403 but update cache on the way back
4404 if (PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
4407 /* prepare for response */
4408 r->out.response.generic.data=talloc_array(io, uint8_t, r->in.maxcnt);
4409 NT_STATUS_HAVE_NO_MEMORY(r->out.response.generic.data);
4411 if (! PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
4412 return proxy_validate(ntvfs, req, r, f);
4415 /* pack up an smb_read request and dispatch here */
4416 io->readx.level=RAW_READ_READX;
4417 io->readx.in.file.ntvfs=h;
4418 io->readx.in.mincnt=r->in.mincnt;
4419 io->readx.in.maxcnt=r->in.maxcnt;
4420 io->readx.in.offset=r->in.offset;
4421 io->readx.in.remaining=r->in.remaining;
4422 /* and something to hold the answer */
4423 io->readx.out.data=r->out.response.generic.data;
4425 /* so we get to pack the io->*.out response */
4426 status = ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Read_map_async_send);
4427 NT_STATUS_NOT_OK_RETURN(status);
4429 /* so the read will get processed normally */
4430 return proxy_read(ntvfs, req, io);
4434 * A handler for sending async rpclite Write replies
4436 static NTSTATUS rpclite_proxy_Write_map_async_send(
4437 struct ntvfs_module_context *ntvfs,
4438 struct ntvfs_request *req,
4439 void *io1, void *io2, NTSTATUS status)
4441 struct proxy_Write* r=talloc_get_type_abort(io1, struct proxy_Write);
4442 union smb_write* io=talloc_get_type_abort(io2, union smb_write);
4444 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
4445 r->out.result=status;
4447 r->out.nwritten=io->writex.out.nwritten;
4448 r->out.remaining=io->writex.out.remaining;
4450 /* the rpc transport succeeded even if the operation did not */
4451 return NT_STATUS_OK;
4455 * RPC implementation of write
4457 static NTSTATUS rpclite_proxy_Write(struct ntvfs_module_context *ntvfs,
4458 struct ntvfs_request *req, struct proxy_Write *r)
4460 struct proxy_private *private = ntvfs->private_data;
4461 union smb_write* io=talloc(req, union smb_write);
4462 NTSTATUS status;
4463 struct proxy_file* f;
4464 struct ntvfs_handle *h;
4466 SETUP_PID;
4468 RPCLITE_SETUP_FILE_HERE(f,h);
4470 DEBUG(5,("Opnum: proxy_Write count=%d offset=%lld, fnum=%d\n",
4471 r->in.count, r->in.offset, r->in.fnum));
4473 /* pack up an smb_write request and dispatch here */
4474 io->writex.level=RAW_WRITE_WRITEX;
4475 io->writex.in.file.ntvfs=h;
4476 io->writex.in.offset=r->in.offset;
4477 io->writex.in.wmode=r->in.mode;
4478 io->writex.in.count=r->in.count;
4480 /* and the data */
4481 if (PROXY_USE_ZLIB & r->in.flags) {
4482 ssize_t count=r->in.data.generic.count;
4483 io->writex.in.data=uncompress_block_talloc(io, r->in.data.compress.data,
4484 &count, r->in.count);
4485 if (count != r->in.count || !io->writex.in.data) {
4486 /* Didn't uncompress properly, but the RPC layer worked */
4487 r->out.result=NT_STATUS_BAD_COMPRESSION_BUFFER;
4488 return NT_STATUS_OK;
4490 } else {
4491 io->writex.in.data=r->in.data.generic.data;
4494 /* so we get to pack the io->*.out response */
4495 status=ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Write_map_async_send);
4496 NT_STATUS_NOT_OK_RETURN(status);
4498 /* so the read will get processed normally */
4499 return proxy_write(ntvfs, req, io);
4503 * RPC amalgamation of getinfo requests
4505 struct proxy_getinfo_fragments;
4506 struct proxy_getinfo_fragmentses;
4508 /* holds one smbcli_request to satisfy part of one proxy_GetInfo request */
4509 struct proxy_getinfo_fragment {
4510 struct proxy_getinfo_fragment *prev, *next;
4511 struct proxy_getinfo_fragments *fragments;
4512 union smb_fileinfo *smb_fileinfo;
4513 struct smbcli_request *c_req;
4514 NTSTATUS status;
4517 /* holds reference to many fragment smbcli_request that together make up one proxy_GetInfo request */
4518 struct proxy_getinfo_fragments {
4519 struct proxy_getinfo_fragments *prev, *next;
4520 struct proxy_getinfo_fragmentses *fragmentses;
4521 struct proxy_getinfo_fragment *fragments;
4522 uint32_t index;
4525 struct proxy_getinfo_fragmentses {
4526 struct proxy_getinfo_fragments *fragments;
4527 struct proxy_GetInfo *r;
4528 struct ntvfs_request *req;
4529 bool async;
4533 a handler for async write replies
4535 NTSTATUS async_proxy_getinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
4537 struct smbcli_request *c_req = async->c_req;
4538 struct ntvfs_request *req = async->req;
4539 struct proxy_file *f=async->f;
4540 struct proxy_getinfo_fragment *fragment=talloc_get_type_abort(io2, struct proxy_getinfo_fragment);
4541 struct proxy_getinfo_fragments* fragments=fragment->fragments;
4542 struct proxy_getinfo_fragmentses* fragmentses=fragments->fragmentses;
4543 struct proxy_GetInfo *r=talloc_get_type_abort(fragmentses->r, struct proxy_GetInfo);
4544 int c=fragments->index;
4545 struct info_data* d=&(r->out.info_data[c]);
4546 union smb_fileinfo *io=talloc_get_type_abort(io1, union smb_fileinfo);
4548 SMB_ASSERT(c_req == NULL || c_req == fragment->c_req);
4550 if (c_req) {
4551 switch (r->in.info_tags[0].tag_type) {
4552 case TAG_TYPE_FILE_INFO:
4553 status=smb_raw_fileinfo_recv(c_req, r, io);
4554 break;
4555 case TAG_TYPE_PATH_INFO:
4556 status=smb_raw_pathinfo_recv(c_req, r, io);
4557 break;
4558 default:
4559 status=NT_STATUS_INVALID_PARAMETER;
4561 c_req=NULL;
4564 /* stop callback occuring more than once sync'ly */
4565 fragment->c_req=NULL;
4567 DEBUG(5,("%s: async callback level %x %s\n",__FUNCTION__,io->generic.level, get_friendly_nt_error_msg (status)));
4568 switch (io->generic.level) {
4569 case RAW_FILEINFO_ALL_INFO:
4570 case RAW_FILEINFO_ALL_INFORMATION:
4571 DEBUG(5,("%s: async callback level RAW_FILEINFO_ALL_INFO\n",__FUNCTION__));
4572 d->status_RAW_FILEINFO_ALL_INFO=status;
4574 /* don't blindly overwrite BASIC_INFORMATION as we may already have it */
4575 if (1 || NT_STATUS_IS_OK(status)) {
4576 d->status_RAW_FILEINFO_BASIC_INFORMATION=status;
4577 d->create_time=io->all_info.out.create_time;
4578 d->access_time=io->all_info.out.access_time;
4579 d->write_time=io->all_info.out.write_time;
4580 d->change_time=io->all_info.out.change_time;
4581 d->attrib=io->all_info.out.attrib;
4583 d->alloc_size=io->all_info.out.alloc_size;
4584 d->size=io->all_info.out.size;
4585 dump_data(5, io, sizeof(*io));
4586 d->nlink=io->all_info.out.nlink;
4587 d->delete_pending=io->all_info.out.delete_pending;
4588 d->directory=io->all_info.out.directory;
4589 d->ea_size=io->all_info.out.ea_size;
4590 /* io is sticking around for as long as d is */
4591 d->fname.s=io->all_info.out.fname.s;
4592 d->fname.count=io->all_info.out.fname.private_length;
4593 break;
4594 case RAW_FILEINFO_BASIC_INFO:
4595 case RAW_FILEINFO_BASIC_INFORMATION:
4596 DEBUG(5,("%s: async callback level RAW_FILEINFO_BASIC_INFORMATION\n",__FUNCTION__));
4597 d->status_RAW_FILEINFO_BASIC_INFORMATION=status;
4598 d->create_time=io->basic_info.out.create_time;
4599 d->access_time=io->basic_info.out.access_time;
4600 d->write_time=io->basic_info.out.write_time;
4601 d->change_time=io->basic_info.out.change_time;
4602 d->attrib=io->basic_info.out.attrib;
4603 break;
4604 case RAW_FILEINFO_COMPRESSION_INFO:
4605 DEBUG(5,("%s: async callback level RAW_FILEINFO_COMPRESSION_INFO\n",__FUNCTION__));
4606 d->status_RAW_FILEINFO_COMPRESSION_INFO = status;
4607 d->compressed_size=io->compression_info.out.compressed_size;
4608 d->format=io->compression_info.out.format;
4609 d->unit_shift=io->compression_info.out.unit_shift;
4610 d->chunk_shift=io->compression_info.out.chunk_shift;
4611 d->cluster_shift=io->compression_info.out.cluster_shift;
4612 break;
4613 case RAW_FILEINFO_INTERNAL_INFORMATION:
4614 DEBUG(5,("%s: async callback level RAW_FILEINFO_INTERNAL_INFORMATION\n",__FUNCTION__));
4615 d->status_RAW_FILEINFO_INTERNAL_INFORMATION=status;
4616 d->file_id=io->internal_information.out.file_id;
4617 break;
4618 case RAW_FILEINFO_ACCESS_INFORMATION:
4619 DEBUG(5,("%s: async callback level RAW_FILEINFO_ACCESS_INFORMATION\n",__FUNCTION__));
4620 d->status_RAW_FILEINFO_ACCESS_INFORMATION=status;
4621 d->access_flags=io->access_information.out.access_flags;
4622 break;
4623 case RAW_FILEINFO_POSITION_INFORMATION:
4624 DEBUG(5,("%s: async callback level RAW_FILEINFO_POSITION_INFORMATION\n",__FUNCTION__));
4625 d->status_RAW_FILEINFO_POSITION_INFORMATION = status;
4626 d->position=io->position_information.out.position;
4627 break;
4628 case RAW_FILEINFO_MODE_INFORMATION:
4629 DEBUG(5,("%s: async callback level RAW_FILEINFO_MODE_INFORMATION\n",__FUNCTION__));
4630 d->status_RAW_FILEINFO_MODE_INFORMATION =status;
4631 d->mode=io->mode_information.out.mode;
4632 break;
4633 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
4634 DEBUG(5,("%s: async callback level RAW_FILEINFO_ALIGNMENT_INFORMATION\n",__FUNCTION__));
4635 d->status_RAW_FILEINFO_ALIGNMENT_INFORMATION=status;
4636 d->alignment_requirement=io->alignment_information.out.alignment_requirement;
4637 break;
4638 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
4639 DEBUG(5,("%s: async callback level RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION\n",__FUNCTION__));
4640 d->status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION=status;
4641 d->reparse_tag=io->attribute_tag_information.out.reparse_tag;
4642 d->reparse_attrib=io->attribute_tag_information.out.attrib;
4643 break;
4644 case RAW_FILEINFO_STREAM_INFO: {
4645 uint_t c;
4646 DEBUG(5,("%s: async callback level RAW_FILEINFO_STREAM_INFO %s,\n",__FUNCTION__));
4647 d->status_RAW_FILEINFO_STREAM_INFO=status;
4648 DEBUG(5,("Num Streams %d %s\n",io->stream_info.out.num_streams, get_friendly_nt_error_msg (status)));
4649 if (NT_STATUS_IS_OK(status)) {
4650 d->streams=talloc_zero_array(d, struct info_stream, io->stream_info.out.num_streams);
4651 if (! d->streams) {
4652 d->status_RAW_FILEINFO_STREAM_INFO=NT_STATUS_NO_MEMORY;
4653 } else {
4654 d->num_streams=io->stream_info.out.num_streams;
4655 for(c=0; c < io->stream_info.out.num_streams; c++) {
4656 d->streams[c].size = io->stream_info.out.streams[c].size;
4657 d->streams[c].alloc_size = io->stream_info.out.streams[c].alloc_size;
4658 d->streams[c].stream_name.s=io->stream_info.out.streams[c].stream_name.s;
4659 d->streams[c].stream_name.count=io->stream_info.out.streams[c].stream_name.private_length;
4663 break; }
4664 default:
4665 /* so... where's it from? */
4666 DEBUG(5,("Unexpected read level\n"));
4669 fragment->smb_fileinfo = NULL;
4670 fragment->c_req=NULL;
4672 /* are the fragments complete? */
4673 DLIST_REMOVE(fragments->fragments, fragment);
4674 /* if this index is complete, remove from fragmentses */
4675 if (! fragments->fragments) {
4676 DLIST_REMOVE(fragmentses->fragments, fragments);
4678 /* is that the end? */
4679 if (! fragmentses->fragments && fragmentses->async) {
4680 DEBUG(5,("Thats the end of the fragments, doing send\n"));
4681 /* call the send_fn */
4682 req=fragmentses->req;
4683 req->async_states->status=NT_STATUS_OK;
4684 DEBUG(5,("Fragments async response sending\n"));
4685 req->async_states->send_fn(req);
4687 DEBUG(5,("%s: Thats the end of the callback\n",__FUNCTION__));
4688 return status;
4691 #define FINISH_GETINFO_FRAGMENT(r, io) do { \
4692 struct smbcli_request *c_req; \
4693 switch (r->in.info_tags[0].tag_type) { \
4694 case TAG_TYPE_FILE_INFO: \
4695 io->all_info.in.file.fnum=r->in.info_tags[0].info_tag.fnum; \
4696 c_req=smb_raw_fileinfo_send(private->tree, io); \
4697 break; \
4698 case TAG_TYPE_PATH_INFO: \
4699 io->all_info.in.file.path=r->in.info_tags[0].info_tag.path.s; \
4700 c_req=smb_raw_pathinfo_send(private->tree, io); \
4701 break; \
4702 default: \
4703 return NT_STATUS_INVALID_PARAMETER; \
4705 /* Add fragment collator */ \
4706 fragment->c_req=c_req; \
4707 /* use the same stateful async handler for them all... */ \
4708 { void* req=NULL; \
4709 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_proxy_getinfo, NT_STATUS_INTERNAL_ERROR); \
4710 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler); \
4712 io=NULL; \
4713 } while (0)
4715 #define SETUP_GETINFO_FRAGMENT(io, LEVEL) do { \
4716 fragment=talloc_zero(fragments, struct proxy_getinfo_fragment); \
4717 NT_STATUS_HAVE_NO_MEMORY(fragment); \
4718 DLIST_ADD(fragments->fragments, fragment); \
4719 fragment->fragments=fragments; \
4720 io=talloc_zero(fragment, union smb_fileinfo); \
4721 NT_STATUS_HAVE_NO_MEMORY(io); \
4722 io->generic.level=LEVEL; \
4723 } while (0)
4725 static NTSTATUS rpclite_proxy_Getinfo(struct ntvfs_module_context *ntvfs,
4726 struct ntvfs_request *req, struct proxy_GetInfo *r)
4728 struct proxy_private *private = ntvfs->private_data;
4729 struct smbcli_request *c_req;
4730 union smb_fileinfo *io=NULL;
4731 NTSTATUS status;
4732 struct proxy_file* f;
4733 struct ntvfs_handle *h;
4734 struct proxy_getinfo_fragmentses *fragmentses;
4735 int c;
4737 SETUP_PID;
4739 DEBUG(5,("Opnum: proxy_Getinfo r=%p\n",r));
4741 DEBUG(5,("Convering %d handles for r=%p\n",r->in.count, r));
4742 for(c=0; c < r->in.count; c++) {
4743 if (r->in.info_tags[c].tag_type==TAG_TYPE_FILE_INFO) {
4744 RPCLITE_SETUP_THIS_FILE_HERE(r->in.info_tags[c].info_tag.fnum, f, h);
4748 if (PROXY_REMOTE_SERVER(private)) {
4749 DEBUG(5,("Remote proxy, doing transparent\n"));
4750 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
4751 /* No need to add a receive hander, the ntioctl transport adds
4752 the async chain handler which deals with the send_fn */
4753 // ADD_ASYNC_RECV_TAIL(c_req, r, NULL, f, rpclite_proxy_Getinfo_map_async_send, NT_STATUS_INTERNAL_ERROR);
4755 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4756 DEBUG(5,("%s:Sync waiting for nttrans response\n",__LOCATION__));
4757 return sync_chain_handler(c_req);
4758 } else {
4759 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
4760 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
4761 return NT_STATUS_OK;
4765 /* I thought this was done for me for [in,out] */
4766 r->out.info_data=talloc_zero_array(r, struct info_data, r->in.count);
4767 NT_STATUS_HAVE_NO_MEMORY(r->out.info_data);
4768 r->out.count = r->in.count;
4769 r->out.result = NT_STATUS_OK;
4771 fragmentses=talloc_zero(req, struct proxy_getinfo_fragmentses);
4772 fragmentses->r=r;
4773 fragmentses->req=req;
4774 NT_STATUS_HAVE_NO_MEMORY(fragmentses);
4776 #warning, if C is large, we need to do a few at a time according to resource limits
4777 for (c=0; c < r->in.count; c++) {
4778 struct proxy_getinfo_fragments *fragments;
4779 struct proxy_getinfo_fragment *fragment;
4781 fragments=talloc_zero(fragmentses, struct proxy_getinfo_fragments);
4782 NT_STATUS_HAVE_NO_MEMORY(fragments);
4783 DLIST_ADD(fragmentses->fragments, fragments);
4784 fragments->fragmentses=fragmentses;
4785 fragments->index=c;
4787 /* Issue a set of getinfo requests */
4788 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ALL_INFO);
4789 FINISH_GETINFO_FRAGMENT(r, io);
4791 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_BASIC_INFORMATION);
4792 FINISH_GETINFO_FRAGMENT(r, io);
4794 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_COMPRESSION_INFO);
4795 FINISH_GETINFO_FRAGMENT(r, io);
4797 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_INTERNAL_INFORMATION);
4798 FINISH_GETINFO_FRAGMENT(r, io);
4800 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ACCESS_INFORMATION);
4801 FINISH_GETINFO_FRAGMENT(r, io);
4803 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_POSITION_INFORMATION);
4804 FINISH_GETINFO_FRAGMENT(r, io);
4806 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_MODE_INFORMATION);
4807 FINISH_GETINFO_FRAGMENT(r, io);
4809 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ALIGNMENT_INFORMATION);
4810 FINISH_GETINFO_FRAGMENT(r, io);
4812 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION);
4813 FINISH_GETINFO_FRAGMENT(r, io);
4815 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_STREAM_INFO);
4816 FINISH_GETINFO_FRAGMENT(r, io);
4819 /* If ! async, wait for all requests to finish */
4821 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4822 struct proxy_getinfo_fragments *fragments;
4823 struct proxy_getinfo_fragment *fragment;
4824 while ((fragments = fragmentses->fragments) &&
4825 (fragment = fragments->fragments) &&
4826 fragment->c_req) {
4827 sync_chain_handler(fragment->c_req);
4828 /* and because the whole fragment / fragments may be gone now... */
4829 continue;
4831 return NT_STATUS_OK; /* see individual failures */
4834 DEBUG(5,("%s: Setting async response\n",__FUNCTION__));
4835 fragmentses->async=true;
4836 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
4837 return NT_STATUS_OK;
4840 /* rpclite dispatch table */
4841 #define RPC_PROXY_OPS 3
4842 struct {
4843 uint32_t opnum;
4844 NTSTATUS (*handler)(struct ntvfs_module_context *ntvfs,
4845 struct ntvfs_request *req, void* r);
4846 } rpcproxy_ops[RPC_PROXY_OPS]={
4847 {NDR_PROXY_READ, rpclite_proxy_Read},
4848 {NDR_PROXY_WRITE, rpclite_proxy_Write},
4849 {NDR_PROXY_GETINFO, rpclite_proxy_Getinfo}
4852 /* unmarshall ntioctl and rpc-dispatch, but push async map handler to convert
4853 back from rpc struct to ntioctl */
4854 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
4855 struct ntvfs_request *req, union smb_ioctl *io)
4857 struct proxy_private *private = ntvfs->private_data;
4858 DATA_BLOB *request;
4859 struct ndr_syntax_id* syntax_id;
4860 uint32_t opnum;
4861 const struct ndr_interface_table *table;
4862 struct ndr_pull* pull;
4863 void* r;
4864 NTSTATUS status;
4865 struct async_rpclite_send *rpclite_send;
4866 enum ndr_err_code ndr_err;
4868 SETUP_PID;
4870 /* We don't care about io->generic.in.file, ntvfs layer already proved it was valid,
4871 our operations will have the fnum embedded in them anyway */
4872 DEBUG(5,("START %s blob-size %d\n",__FUNCTION__,io->ntioctl.in.blob.length));
4873 /* unpack the NDR */
4874 request=&io->ntioctl.in.blob;
4876 pull = ndr_pull_init_blob(request, req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
4877 NT_STATUS_HAVE_NO_MEMORY(pull);
4878 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
4879 DEBUG(5,("%s pull init'd\n",__FUNCTION__));
4881 /* the blob is 4-aligned because it was memcpy'd */
4882 syntax_id=talloc_zero(pull, struct ndr_syntax_id);
4883 NT_STATUS_HAVE_NO_MEMORY(syntax_id);
4885 ndr_err=ndr_pull_ndr_syntax_id(pull, NDR_SCALARS, syntax_id);
4886 status=ndr_map_error2ntstatus(ndr_err);
4887 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4888 DEBUG(2,("Can't read syntax-id: %s\n",nt_errstr(status)));
4889 return status;
4892 /* now find the struct ndr_interface_table * for this syntax_id */
4893 table=ndr_table_by_uuid(&syntax_id->uuid);
4894 if (! table) ndr_table_init();
4895 table=ndr_table_by_uuid(&syntax_id->uuid);
4897 if (! table) {
4898 DEBUG(5,("Can't find table for uuid: %s\n",GUID_string(debug_ctx(),&syntax_id->uuid)));
4899 return NT_STATUS_NO_GUID_TRANSLATION;
4902 ndr_err=ndr_pull_uint32(pull, NDR_SCALARS, &opnum);
4903 status=ndr_map_error2ntstatus(ndr_err);
4904 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4905 DEBUG(2,("Can't read op-num: %s\n",nt_errstr(status)));
4906 return status;
4908 DEBUG(5,("%s opnum %d\n",__FUNCTION__,opnum));
4910 DEBUG(10,("rpc request data:\n"));
4911 dump_data(10, pull->data, pull->data_size);
4913 r = talloc_named(req, table->calls[opnum].struct_size, "struct %s",
4914 table->calls[opnum].name);
4915 NT_STATUS_HAVE_NO_MEMORY(r);
4917 memset(r, 0, table->calls[opnum].struct_size);
4919 ndr_err=table->calls[opnum].ndr_pull(pull, NDR_IN, r);
4920 status=ndr_map_error2ntstatus(ndr_err);
4921 DEBUG(5,("%s opnum %d pulled r=%p status %s\n",__FUNCTION__,opnum,r,get_friendly_nt_error_msg (status)));
4922 NT_STATUS_NOT_OK_RETURN(status);
4924 rpclite_send=talloc(req, struct async_rpclite_send);
4925 NT_STATUS_HAVE_NO_MEMORY(rpclite_send);
4926 rpclite_send->call=&table->calls[opnum];
4927 rpclite_send->struct_ptr=r;
4928 /* need to push conversion function to convert from r to io */
4929 status=ntvfs_map_async_setup(ntvfs, req, io, rpclite_send, proxy_rpclite_map_async_send);
4930 NT_STATUS_NOT_OK_RETURN(status);
4932 /* Magically despatch the call based on syntax_id, table and opnum.
4933 But there is no table of handlers.... so until then*/
4934 if (0==strcasecmp(table->name,"rpcproxy")) {
4935 if (opnum >= RPC_PROXY_OPS) {
4936 DEBUG(3,("Can't despatch %s:%d\n",table->name, opnum));
4937 return NT_STATUS_PROCEDURE_NOT_FOUND;
4939 status = rpcproxy_ops[opnum].handler(ntvfs, req, r);
4940 } else {
4941 DEBUG(5,("Can't despatch %s:%d %s\n",table->name, opnum,
4942 GUID_string(debug_ctx(),&syntax_id->uuid)));
4943 return NT_STATUS_NO_GUID_TRANSLATION;
4946 /* status is the status of the rpc layer. If it is NT_STATUS_OK then
4947 the handler status is in r->out.result */
4948 DEBUG(5,("%s now map_async_finish: status=%s async=%d\n", __FUNCTION__,
4949 get_friendly_nt_error_msg (status), req->async_states->state & NTVFS_ASYNC_STATE_ASYNC));
4951 return ntvfs_map_async_finish(req, status);
4954 /* unpack the ntioctl to make some rpc_struct */
4955 NTSTATUS ntioctl_rpc_unmap(struct async_info *async, void* io1, void* io2, NTSTATUS status)
4957 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
4958 struct proxy_private *proxy=async->proxy;
4959 struct smbcli_request *c_req = async->c_req;
4960 void* r=io1;
4961 struct ntioctl_rpc_unmap_info *info=talloc_get_type_abort(io2, struct ntioctl_rpc_unmap_info);
4962 union smb_ioctl* io =talloc_get_type_abort(info->io, union smb_ioctl);
4963 const struct ndr_interface_call *calls=info->calls;
4964 enum ndr_err_code ndr_err;
4965 DATA_BLOB *response;
4966 struct ndr_pull* pull;
4968 DEBUG(5,("START %s io2=%p\n",__FUNCTION__,io2));
4969 DEBUG(5,("%s op %s ntioctl: %s\n",
4970 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
4971 NT_STATUS_NOT_OK_RETURN(status);
4973 if (c_req) {
4974 DEBUG(5,("%s io2 MID=%d\n",__FUNCTION__,c_req->mid));
4975 status = smb_raw_ioctl_recv(c_req, io, io);
4976 #define SESSION_INFO proxy->remote_server, proxy->remote_share
4977 /* This status is the ntioctl wrapper status */
4978 if (! NT_STATUS_IS_OK(status)) {
4979 DEBUG(3,("[\\\\%s\\%s] RPC %s failed for %s: %s\n",SESSION_INFO,
4980 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
4981 if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) return status;
4982 return NT_STATUS_UNSUCCESSFUL;
4986 dump_data(10, io->ntioctl.out.blob.data, io->ntioctl.out.blob.length);
4988 response=&io->ntioctl.out.blob;
4989 pull = ndr_pull_init_blob(response, r, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
4990 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
4992 NT_STATUS_HAVE_NO_MEMORY(pull);
4994 ndr_err=calls->ndr_pull(pull, NDR_OUT, r);
4995 #warning can we free pull here?
4996 status=ndr_map_error2ntstatus(ndr_err);
4998 DEBUG(5,("END %s op status %s\n",
4999 __FUNCTION__, get_friendly_nt_error_msg(status)));
5000 return status;
5004 send an ntioctl request based on a NDR encoding.
5006 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
5007 struct smbcli_tree *tree,
5008 struct ntvfs_module_context *ntvfs,
5009 const struct ndr_interface_table *table,
5010 uint32_t opnum,
5011 void *r)
5013 struct proxy_private *private = ntvfs->private_data;
5014 struct smbcli_request * c_req;
5015 struct ndr_push *push;
5016 NTSTATUS status;
5017 DATA_BLOB request;
5018 enum ndr_err_code ndr_err;
5019 union smb_ioctl *io=talloc_zero(r, union smb_ioctl);
5022 /* setup for a ndr_push_* call, we can't free push until the message
5023 actually hits the wire */
5024 push = ndr_push_init_ctx(io, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
5025 if (!push) return NULL;
5027 /* first push interface table identifiers */
5028 ndr_err=ndr_push_ndr_syntax_id(push, NDR_SCALARS, &table->syntax_id);
5029 status=ndr_map_error2ntstatus(ndr_err);
5031 if (! NT_STATUS_IS_OK(status)) return NULL;
5033 ndr_err=ndr_push_uint32(push, NDR_SCALARS, opnum);
5034 status=ndr_map_error2ntstatus(ndr_err);
5035 if (! NT_STATUS_IS_OK(status)) return NULL;
5037 if (0) {
5038 push->flags |= LIBNDR_FLAG_BIGENDIAN;
5041 /* push the structure into a blob */
5042 ndr_err = table->calls[opnum].ndr_push(push, NDR_IN, r);
5043 status=ndr_map_error2ntstatus(ndr_err);
5044 if (!NT_STATUS_IS_OK(status)) {
5045 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
5046 nt_errstr(status)));
5047 return NULL;
5050 /* retrieve the blob */
5051 request = ndr_push_blob(push);
5053 io->ntioctl.level=RAW_IOCTL_NTIOCTL;
5054 io->ntioctl.in.function=FSCTL_UFOPROXY_RPCLITE;
5055 io->ntioctl.in.file.fnum=private->nttrans_fnum;
5056 io->ntioctl.in.fsctl=false;
5057 io->ntioctl.in.filter=0;
5058 io->ntioctl.in.max_data=PROXY_NTIOCTL_MAXDATA;
5059 io->ntioctl.in.blob=request;
5061 DEBUG(10,("smbcli_request packet:\n"));
5062 dump_data(10, request.data, request.length);
5064 c_req = smb_raw_ioctl_send(tree, io);
5066 if (! c_req) {
5067 return NULL;
5070 dump_data(10, c_req->out.data, c_req->out.data_size);
5072 { void* req=NULL;
5073 struct ntioctl_rpc_unmap_info* info=talloc_zero(r, struct ntioctl_rpc_unmap_info);
5074 info->io=io;
5075 info->table=table;
5076 info->opnum=opnum;
5077 info->calls=&table->calls[opnum];
5078 ADD_ASYNC_RECV_TAIL(c_req, r, info, NULL, ntioctl_rpc_unmap, NULL);
5081 return c_req;
5085 client helpers, mapping between proxy RPC calls and smbcli_* calls.
5089 * If the sync_chain_handler is called directly it unplugs the async handler
5090 which (as well as preventing loops) will also avoid req->send_fn being
5091 called - which is also nice! */
5092 NTSTATUS sync_chain_handler(struct smbcli_request *c_req)
5094 struct async_info *async=NULL;
5095 /* the first callback which will actually receive the c_req response */
5096 struct async_info_map *async_map;
5097 NTSTATUS status=NT_STATUS_OK;
5098 struct async_info_map** chain;
5100 DEBUG(5,("%s\n",__FUNCTION__));
5101 if (! c_req) return NT_STATUS_UNSUCCESSFUL;
5103 /* If there is a handler installed, it is using async_info to chain */
5104 if (c_req->async.fn) {
5105 /* not safe to talloc_free async if send_fn has been called for the request
5106 against which async was allocated, so steal it (and free below) or neither */
5107 async = talloc_get_type_abort(c_req->async.private, struct async_info);
5108 talloc_steal(NULL, async);
5109 chain=&async->chain;
5110 async_map = talloc_get_type_abort(*chain, struct async_info_map);
5111 } else {
5112 chain=(struct async_info_map**)&c_req->async.private;
5113 async_map = talloc_get_type_abort(*chain, struct async_info_map);
5116 /* unplug c_req->async.fn as if a callback handler calls smb_*_recv
5117 in order to receive the response, smbcli_transport_finish_recv will
5118 call us again and then call the c-req->async.fn
5119 Perhaps we should merely call smbcli_request_receive() IF
5120 c_req->request_state <= SMBCLI_REQUEST_RECV, but that might not
5121 help multi-part replies... except all parts are receive before
5122 callback if a handler WAS set */
5123 c_req->async.fn=NULL;
5125 /* Should we raise an error? Should we simple_recv? */
5126 while(async_map) {
5127 /* remove this one from the list before we call. We do this in case
5128 some callbacks free their async_map but also so that callbacks
5129 can navigate the async_map chain to add additional callbacks to
5130 the end - e.g. so that tag-along reads can call send_fn after
5131 the send_fn of the request they tagged along to, thus preserving
5132 the async response order - which may be a waste of time? */
5133 DLIST_REMOVE(*chain, async_map);
5135 DEBUG(5,("Callback for async_map=%p pre-status %s\n",async_map, get_friendly_nt_error_msg(status)));
5136 if (async_map->fn) {
5137 status=async_map->fn(async_map->async,
5138 async_map->parms1, async_map->parms2, status);
5140 DEBUG(5,("Callback complete for async_map=%p status %s\n",async_map, get_friendly_nt_error_msg(status)));
5141 /* Note: the callback may have added to the chain */
5142 #warning Async_maps have a null talloc_context, it is unclear who should own them
5143 /* it can't be c_req as it stops us chaining more than one, maybe it
5144 should be req but there isn't always a req. However sync_chain_handler
5145 will always free it if called */
5146 DEBUG(6,("Will free async map %p\n",async_map));
5147 #warning put me back
5148 talloc_free(async_map);
5149 DEBUG(6,("Free'd async_map\n"));
5150 if (*chain)
5151 async_map=talloc_get_type_abort(*chain, struct async_info_map);
5152 else
5153 async_map=NULL;
5154 DEBUG(6,("Switch to async_map %p\n",async_map));
5156 /* The first callback will have read c_req, thus talloc_free'ing it,
5157 so we don't let the other callbacks get hurt playing with it */
5158 if (async_map && async_map->async)
5159 async_map->async->c_req=NULL;
5162 talloc_free(async);
5164 DEBUG(5,("%s complete: %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
5165 return status;
5168 /* If the async handler is called, then the send_fn is called */
5169 static void async_chain_handler(struct smbcli_request *c_req)
5171 struct async_info *async = talloc_get_type_abort(c_req->async.private, struct async_info);
5172 struct ntvfs_request *req = async->req;
5173 NTSTATUS status;
5175 if (c_req->state <= SMBCLI_REQUEST_RECV) {
5176 /* Looks like async handlers has been called sync'ly */
5177 smb_panic("async_chain_handler called asyncly on req %p\n");
5180 status=sync_chain_handler(c_req);
5182 /* Should we insist that a chain'd handler does this?
5183 Which makes it hard to intercept the data by adding handlers
5184 before the send_fn handler sends it... */
5185 if (req) {
5186 DEBUG(5,("%s send_fn on req=%p\n",__FUNCTION__,req));
5187 req->async_states->status=status;
5188 req->async_states->send_fn(req);
5192 /* unpack the rpc struct to make some smb_write */
5193 NTSTATUS async_proxy_smb_raw_write_rpc(struct async_info *async,
5194 void* io1, void* io2, NTSTATUS status)
5196 union smb_write* io =talloc_get_type(io1, union smb_write);
5197 struct proxy_Write* r=talloc_get_type(io2, struct proxy_Write);
5199 DEBUG(5,("START: %s convert from rpc to smb with pre-status %s\n",__FUNCTION__,
5200 get_friendly_nt_error_msg (status)));
5201 DEBUG(3,("Write response for offset=%lld\n",io->generic.in.offset));
5202 NT_STATUS_NOT_OK_RETURN(status);
5204 status=r->out.result;
5205 DEBUG(5,("%s wrapped status: %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
5206 NT_STATUS_NOT_OK_RETURN(status);
5208 io->generic.out.remaining = r->out.remaining;
5209 io->generic.out.nwritten = r->out.nwritten;
5211 DEBUG(5,("END: %s status %s\n",__FUNCTION__,
5212 get_friendly_nt_error_msg (status)));
5213 return status;
5216 /* upgrade from smb to NDR and then send.
5217 The caller should ADD_ASYNC_RECV_TAIL the handler that tries to receive the response*/
5218 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
5219 union smb_write *io,
5220 struct proxy_file *f)
5222 struct proxy_private *private = ntvfs->private_data;
5223 struct smbcli_tree *tree=private->tree;
5225 if (PROXY_REMOTE_SERVER(private)) {
5226 struct smbcli_request *c_req;
5227 struct proxy_Write *r=talloc_zero(io, struct proxy_Write);
5228 ssize_t size;
5230 if (! r) return NULL;
5232 size=io->generic.in.count;
5233 /* upgrade the write */
5234 r->in.fnum = io->generic.in.file.fnum;
5235 r->in.offset = io->generic.in.offset;
5236 r->in.count = io->generic.in.count;
5237 r->in.mode = io->generic.in.wmode;
5238 // r->in.remaining = io->generic.in.remaining;
5239 #warning remove this
5240 /* prepare to lie */
5241 r->out.nwritten=r->in.count;
5242 r->out.remaining=0;
5244 /* try to compress */
5245 #warning compress!
5246 r->in.data.compress.data=compress_block_talloc(r, io->generic.in.data, &size);
5247 if (r->in.data.compress.data) {
5248 r->in.data.compress.count=size;
5249 r->in.flags = PROXY_USE_ZLIB;
5250 } else {
5251 r->in.flags = 0;
5252 /* we'll honour const, honest gov */
5253 r->in.data.generic.data=discard_const(io->generic.in.data);
5254 r->in.data.generic.count=io->generic.in.count;
5257 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
5258 ntvfs,
5259 &ndr_table_rpcproxy,
5260 NDR_PROXY_WRITE, r);
5261 if (! c_req) return NULL;
5263 /* yeah, filthy abuse of f */
5264 { void* req=NULL;
5265 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_write_rpc, NULL);
5268 return c_req;
5269 } else {
5270 return smb_raw_write_send(tree, io);
5274 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
5275 union smb_write *io,
5276 struct proxy_file *f)
5278 struct proxy_private *proxy = ntvfs->private_data;
5279 struct smbcli_tree *tree=proxy->tree;
5281 if (PROXY_REMOTE_SERVER(proxy)) {
5282 struct smbcli_request *c_req = proxy_smb_raw_write_send(ntvfs, io, f);
5283 return sync_chain_handler(c_req);
5284 } else {
5285 struct smbcli_request *c_req = smb_raw_write_send(tree, io);
5286 return smb_raw_write_recv(c_req, io);
5290 /* unpack the rpc struct to make some smb_read response */
5291 NTSTATUS async_proxy_smb_raw_read_rpc(struct async_info *async,
5292 void* io1, void* io2, NTSTATUS status)
5294 union smb_read* io =talloc_get_type_abort(io1, union smb_read);
5295 struct proxy_Read* r=talloc_get_type_abort(io2, struct proxy_Read);
5296 struct proxy_file *f = async->f;
5297 struct proxy_private *private=async->proxy;
5299 DEBUG(5,("\n>>\n%s() rpc status: %s\n",__FUNCTION__,
5300 get_friendly_nt_error_msg(status)));
5301 NT_STATUS_NOT_OK_RETURN(status);
5303 status=r->out.result;
5304 DEBUG(5,("%s() wrapped status: %s\n",__FUNCTION__,
5305 get_friendly_nt_error_msg(status)));
5306 NT_STATUS_NOT_OK_RETURN(status);
5308 io->generic.out.remaining = 0; /*r->out.response.generic.remaining;*/
5309 io->generic.out.compaction_mode = 0;
5311 if (r->out.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
5312 /* Use the io we already setup!
5313 if out.flags & PROXY_VALIDATE, we may need to validate more in
5314 cache then r->out.nread would suggest, see io->generic.out.nread */
5315 if (r->out.flags & PROXY_VALIDATE)
5316 io->generic.out.nread=io->generic.in.maxcnt;
5317 DEBUG(5,("Using cached data: size=%lld\n",
5318 (long long) io->generic.out.nread));
5319 return status;
5322 if (r->in.flags & PROXY_VALIDATE) {
5323 DEBUG(5,("Cached data did not validate, flags: %x\n",r->out.flags));
5324 /* turn off validate on this file */
5325 //cache_handle_novalidate(f);
5326 #warning turn off validate on this file - do an nread<maxcnt later
5329 if (r->in.flags & PROXY_USE_CACHE) {
5330 DEBUG(5,("Cached data did not match\n"));
5333 io->generic.out.nread = r->out.nread;
5335 /* we may need to uncompress */
5336 if (r->out.flags & PROXY_USE_ZLIB) {
5337 ssize_t size=r->out.response.compress.count;
5338 DEBUG(5,("%s: uncompress, %lld wanted %lld or %lld\n",__LOCATION__,
5339 (long long int)size,
5340 (long long int)io->generic.in.maxcnt,
5341 (long long int)io->generic.in.mincnt));
5342 if (size > io->generic.in.mincnt) {
5343 /* we did a bulk read for the cache */
5344 uint8_t *data=talloc_size(io, io->generic.in.maxcnt);
5345 DEBUG(5,("%s: bulk uncompress to %p\n",__LOCATION__,data));
5346 if (! uncompress_block_to(data,
5347 r->out.response.compress.data, &size,
5348 io->generic.in.maxcnt) ||
5349 size != r->out.nread) {
5350 status=NT_STATUS_INVALID_USER_BUFFER;
5351 } else {
5352 DEBUG(5,("%s: uncompressed\n",__LOCATION__));
5353 /* copy as much as they can take */
5354 io->generic.out.nread=MIN(io->generic.in.mincnt, size);
5355 memcpy(io->generic.out.data, data, io->generic.out.nread);
5356 /* copy the rest to the cache */
5357 cache_handle_save(f, data,
5358 size,
5359 io->generic.in.offset);
5361 } else if (! uncompress_block_to(io->generic.out.data,
5362 r->out.response.compress.data, &size,
5363 io->generic.in.maxcnt) ||
5364 size != r->out.nread) {
5365 io->generic.out.nread=size;
5366 status=NT_STATUS_INVALID_USER_BUFFER;
5368 } else if (io->generic.out.data != r->out.response.generic.data) {
5369 //Assert(r->out.nread == r->out.generic.out.count);
5370 memcpy(io->generic.out.data, r->out.response.generic.data, io->generic.out.nread);
5372 if (r->out.cache_name.s && r->out.cache_name.count && f && f->cache) {
5373 int result;
5374 setenv("WAFS_CACHE_REMOTE_NAME",r->out.cache_name.s,1);
5375 setenv("WAFS_CACHE_LOCAL_NAME",f->cache->cache_name,1);
5376 setenv("WAFS_REMOTE_SERVER",private->remote_server,1);
5377 DEBUG(5,("WAFS_CACHE_REMOTE_NAME=%s [cache_name]\nWAFS_CACHE_LOCAL_NAME=%s\nWAFS_REMOTE_SERVER=%s\n\n",getenv("WAFS_CACHE_REMOTE_NAME"),getenv("WAFS_CACHE_LOCAL_NAME"),getenv("WAFS_REMOTE_SERVER")));
5378 DEBUG(5,("%s running cache transfer command: %s\n",__LOCATION__,getenv("WAFS_CACHE_REMOTE_NAME")));
5379 system(getenv("WAFS_CACHE_TRANSFER"));
5380 DEBUG(5,("%s cache transfer command result %d\n",__LOCATION__,result));
5381 // now set cache to make whole local file valid
5382 cache_validated(f->cache, cache_len(f->cache));
5385 return status;
5388 /* Warning: Assumes that if io->generic.out.nread is not zero, then some
5389 data has been pre-read into io->generic.out.data and can be used for
5390 proxy<->proxy optimized reads */
5391 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
5392 union smb_read *io,
5393 struct proxy_file *f,
5394 struct proxy_Read *r)
5396 struct proxy_private *private = ntvfs->private_data;
5397 #warning we are using out.nread as a out-of-band parameter
5398 if (PROXY_REMOTE_SERVER(private)) {
5400 struct smbcli_request *c_req;
5401 if (! r) {
5402 r=talloc_zero(io, struct proxy_Read);
5403 if (! r) return NULL;
5404 r->in.mincnt = io->generic.in.mincnt;
5408 r->in.fnum = io->generic.in.file.fnum;
5409 r->in.read_for_execute=io->generic.in.read_for_execute;
5410 r->in.offset = io->generic.in.offset;
5411 r->in.maxcnt = io->generic.in.maxcnt;
5412 r->in.remaining = io->generic.in.remaining;
5413 r->in.flags |= PROXY_USE_ZLIB;
5414 if (! (r->in.flags & PROXY_VALIDATE) &&
5415 io->generic.out.data && io->generic.out.nread > 0) {
5416 /* maybe we should limit digest size to MIN(nread, maxcnt) to
5417 permit the caller to provider a larger nread as part of
5418 a split read */
5419 checksum_block(r->in.digest.digest, io->generic.out.data,
5420 io->generic.out.nread);
5422 if (io->generic.out.nread > r->in.maxcnt) {
5423 DEBUG(0,("Cache from nread is too big for requested read struct, ignoring cache\n"));
5424 } else {
5425 r->in.mincnt = io->generic.out.nread;
5426 r->in.maxcnt = io->generic.out.nread;
5427 r->in.flags |= PROXY_USE_CACHE;
5428 /* PROXY_VALIDATE will have been set by caller */
5432 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
5433 DEBUG(3,("Cache digest length=%lld\n", (long long)r->in.maxcnt));
5434 dump_data (3, r->in.digest.digest, sizeof(r->in.digest.digest));
5437 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
5438 ntvfs,
5439 &ndr_table_rpcproxy,
5440 NDR_PROXY_READ, r);
5441 if (! c_req) return NULL;
5443 { void* req=NULL;
5444 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_read_rpc, NULL);
5447 return c_req;
5448 } else {
5449 return smb_raw_read_send(private->tree, io);
5453 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
5454 union smb_read *io,
5455 struct proxy_file *f)
5457 struct proxy_private *proxy = ntvfs->private_data;
5458 struct smbcli_tree *tree=proxy->tree;
5460 if (PROXY_REMOTE_SERVER(proxy)) {
5461 struct smbcli_request *c_req = proxy_smb_raw_read_send(ntvfs, io, f, NULL);
5462 return sync_chain_handler(c_req);
5463 } else {
5464 struct smbcli_request *c_req = smb_raw_read_send(tree, io);
5465 return smb_raw_read_recv(c_req, io);
5471 initialise the PROXY->PROXY backend, registering ourselves with the ntvfs subsystem
5473 NTSTATUS ntvfs_proxy_init(void)
5475 NTSTATUS ret;
5476 struct ntvfs_ops ops;
5477 NTVFS_CURRENT_CRITICAL_SIZES(vers);
5479 ZERO_STRUCT(ops);
5481 /* fill in the name and type */
5482 ops.name = "proxy";
5483 ops.type = NTVFS_DISK;
5485 /* fill in all the operations */
5486 ops.connect = proxy_connect;
5487 ops.disconnect = proxy_disconnect;
5488 ops.unlink = proxy_unlink;
5489 ops.chkpath = proxy_chkpath;
5490 ops.qpathinfo = proxy_qpathinfo;
5491 ops.setpathinfo = proxy_setpathinfo;
5492 ops.open = proxy_open;
5493 ops.mkdir = proxy_mkdir;
5494 ops.rmdir = proxy_rmdir;
5495 ops.rename = proxy_rename;
5496 ops.copy = proxy_copy;
5497 ops.ioctl = proxy_ioctl;
5498 ops.read = proxy_read;
5499 ops.write = proxy_write;
5500 ops.seek = proxy_seek;
5501 ops.flush = proxy_flush;
5502 ops.close = proxy_close;
5503 ops.exit = proxy_exit;
5504 ops.lock = proxy_lock;
5505 ops.setfileinfo = proxy_setfileinfo;
5506 ops.qfileinfo = proxy_qfileinfo;
5507 ops.fsinfo = proxy_fsinfo;
5508 ops.lpq = proxy_lpq;
5509 ops.search_first = proxy_search_first;
5510 ops.search_next = proxy_search_next;
5511 ops.search_close = proxy_search_close;
5512 ops.trans = proxy_trans;
5513 ops.logoff = proxy_logoff;
5514 ops.async_setup = proxy_async_setup;
5515 ops.cancel = proxy_cancel;
5516 ops.notify = proxy_notify;
5517 ops.trans2 = proxy_trans2;
5519 /* register ourselves with the NTVFS subsystem. We register
5520 under the name 'proxy'. */
5521 ret = ntvfs_register(&ops, &vers);
5523 if (!NT_STATUS_IS_OK(ret)) {
5524 DEBUG(0,("Failed to register PROXY backend!\n"));
5527 return ret;