Use ntvfs session close to close downstream session
[Samba/vfs_proxy.git] / source4 / ntvfs / proxy / vfs_proxy.c
blob5ecc4c81808fe0c410d54bfe41694e10a11a1488
1 /*
2 Unix SMB/PROXY implementation.
4 CIFS PROXY NTVFS filesystem backend
6 Copyright (C) Andrew Tridgell 2003
7 Copyright (C) James J Myers 2003 <myersjj@samba.org>
8 Copyright (C) Sam Liddicott <sam@liddicott.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>.
24 this implements a CIFS->CIFS NTVFS filesystem caching proxy.
25 TODO:
26 New read-ahead
27 Delete cache
28 Share cache states between processes
29 Update to latest samba
30 limit dirmons etc
31 mapi delegated creds
34 #define TALLOC_ABORT(why) smb_panic(why)
35 #warning handle SMB_FLAGS2_COMPRESSED flag from client: http://msdn2.microsoft.com/en-us/library/cc246254.aspx
36 #define __LOCATION__ (talloc_asprintf(debug_ctx(),"%s:%d %s",__FILE__,__LINE__,__FUNCTION__))
37 #define PROXY_NTIOCTL_MAXDATA 0x2000000
39 #include "includes.h"
40 #include "libcli/raw/libcliraw.h"
41 #include "libcli/smb_composite/smb_composite.h"
42 #include "auth/auth.h"
43 #include "auth/credentials/credentials.h"
44 #include "ntvfs/ntvfs.h"
45 #include "../lib/util/dlinklist.h"
46 #include "param/param.h"
47 #include "libcli/resolve/resolve.h"
48 #include "libcli/libcli.h"
49 #include "libcli/raw/ioctl.h"
50 #include "librpc/gen_ndr/ndr_misc.h"
51 #include "librpc/gen_ndr/ndr_proxy.h"
52 #include "librpc/ndr/ndr_table.h"
53 #include "lib/cache/cache.h"
54 #include "lib/compression/zlib.h"
55 #include "libcli/raw/raw_proto.h"
56 #include "librpc/gen_ndr/proxy.h"
57 #include "smb_server/smb_server.h"
59 #define fstrcmp(a,b) strcasecmp((a),(b))
60 #define fstrncmp(a,b,len) strncasecmp((a),(b),(len))
62 #define LOAD_CACHE_FILE_DATA(dest, src) do { \
63 dest.create_time=src.create_time; \
64 dest.access_time=src.access_time; \
65 dest.write_time=src.write_time; \
66 dest.change_time=src.change_time; \
67 dest.attrib=src.attrib; \
68 dest.alloc_size=src.alloc_size; \
69 dest.size=src.size; \
70 dest.file_type=src.file_type; \
71 dest.ipc_state=src.ipc_state; \
72 dest.is_directory=src.is_directory; \
73 dest.delete_pending=0; \
74 } while(0)
76 /* taken from #include "librpc/gen_ndr/proxy.h" */
77 struct proxy_file_info_data {
78 /* first three are from ntcreatex */
79 uint16_t file_type;
80 uint16_t ipc_state;
81 uint8_t is_directory;
82 NTSTATUS status_RAW_FILEINFO_BASIC_INFORMATION;
83 uint32_t attrib; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
84 NTTIME create_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
85 NTTIME access_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
86 NTTIME write_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
87 NTTIME change_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
88 NTSTATUS status_RAW_FILEINFO_ALL_INFO;
89 uint32_t ea_size; /* RAW_FILEINFO_ALL_INFO */
90 uint64_t alloc_size; /* RAW_FILEINFO_ALL_INFO */
91 uint64_t size; /* RAW_FILEINFO_ALL_INFO */
92 uint32_t nlink; /* RAW_FILEINFO_ALL_INFO */
93 struct sws fname; /* RAW_FILEINFO_ALL_INFO */
94 uint8_t delete_pending; /* RAW_FILEINFO_ALL_INFO */
95 uint8_t directory; /* RAW_FILEINFO_ALL_INFO */
96 NTSTATUS status_RAW_FILEINFO_COMPRESSION_INFO;
97 uint64_t compressed_size; /* RAW_FILEINFO_COMPRESSION_INFO */
98 uint16_t format; /* RAW_FILEINFO_COMPRESSION_INFO */
99 uint8_t unit_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
100 uint8_t chunk_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
101 uint8_t cluster_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
102 NTSTATUS status_RAW_FILEINFO_INTERNAL_INFORMATION;
103 uint64_t file_id; /* RAW_FILEINFO_INTERNAL_INFORMATION */
104 NTSTATUS status_RAW_FILEINFO_ACCESS_INFORMATION;
105 uint32_t access_flags; /* RAW_FILEINFO_ACCESS_INFORMATION */
106 NTSTATUS status_RAW_FILEINFO_POSITION_INFORMATION;
107 uint64_t position; /* RAW_FILEINFO_POSITION_INFORMATION */
108 NTSTATUS status_RAW_FILEINFO_MODE_INFORMATION;
109 uint32_t mode; /* RAW_FILEINFO_MODE_INFORMATION */
110 NTSTATUS status_RAW_FILEINFO_ALIGNMENT_INFORMATION;
111 uint32_t alignment_requirement; /* RAW_FILEINFO_ALIGNMENT_INFORMATION */
112 NTSTATUS status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
113 uint32_t reparse_tag; /* RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION */
114 uint32_t reparse_attrib; /* RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION */
115 NTSTATUS status_RAW_FILEINFO_STREAM_INFO;
116 uint32_t num_streams; /* RAW_FILEINFO_STREAM_INFO */
117 struct info_stream *streams; /* RAW_FILEINFO_STREAM_INFO */
120 #define valid_RAW_FILEINFO_BASIC_INFORMATION 1
121 #define valid_RAW_FILEINFO_ALL_INFO 2
122 #define valid_RAW_FILEINFO_COMPRESSION_INFO 3
123 #define valid_RAW_FILEINFO_INTERNAL_INFORMATION 4
124 #define valid_RAW_FILEINFO_STANDARD_INFO 8
125 #define valid_RAW_FILEINFO_ACCESS_INFORMATION 16
126 #define valid_RAW_FILEINFO_POSITION_INFORMATION 32
127 #define valid_RAW_FILEINFO_MODE_INFORMATION 64
128 #define valid_RAW_FILEINFO_ALIGNMENT_INFORMATION 128
129 #define valid_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION 256
130 #define valid_RAW_FILEINFO_STREAM_INFO 512
132 struct file_metadata {
133 int count;
134 int valid;
135 struct proxy_file_info_data info_data;
138 struct proxy_file {
139 struct proxy_file *prev, *next;
140 struct proxy_private* proxy;
141 uint16_t fnum;
142 struct ntvfs_handle *h;
143 struct cache_file_entry *cache;
144 /* filename might not be a char*, but if so, _size includes null */
145 void* filename;
146 int filename_size;
147 int readahead_pending;
148 /* *_OPLOCK_RETURN values */
149 int oplock;
150 /* read-only, shareable normal file open, can be cloned by similar opens */
151 bool can_clone;
152 /* If we have an oplock, then the file is NOT bigger than size, which lets
153 us optimize reads */
154 struct file_metadata *metadata;
157 struct proxy_private;
159 struct search_handle {
160 struct search_handle *prev, *next;
161 struct proxy_private *proxy;
162 struct ntvfs_handle *h;
163 uint16_t handle;
164 union {
165 struct smb_search_id id;
166 uint32_t resume_key;
167 } resume_index;
168 struct search_cache_item *resume_item;
169 enum smb_search_level level;
170 enum smb_search_data_level data_level;
171 /* search cache (if any) being used */
172 struct search_cache *cache;
175 struct search_cache_item {
176 struct search_cache_item *prev, *next;
177 enum smb_search_data_level data_level;
178 struct cache_file_entry *cache;
179 union smb_search_data *file;
180 struct file_metadata *metadata;
182 enum search_cache_status {
183 SEARCH_CACHE_INCOMPLETE,
184 SEARCH_CACHE_COMPLETE,
185 SEARCH_CACHE_DEAD
188 struct fdirmon;
189 typedef void(fdirmon_callback_fn)(void* data, struct fdirmon* fdirmon);
190 //NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS)
192 struct fdirmon {
193 struct fdirmon *prev, *next;
194 struct search_cache_item *items;
196 struct proxy_private *proxy;
198 union smb_notify *notify_io;
199 struct smbcli_request *notify_req;
200 uint16_t dir_fnum;
201 char* dir;
202 struct fdirmon_callback {
203 struct fdirmon_callback *prev, *next;
204 fdirmon_callback_fn *fn;
205 void* data;
206 } *callbacks;
209 struct search_cache {
210 struct search_cache *prev, *next;
211 struct search_cache_item *items;
213 struct proxy_private *proxy;
214 enum search_cache_status status;
216 struct fdirmon* dirmon;
217 char* dir;
219 struct search_cache_key {
220 enum smb_search_level level;
221 enum smb_search_data_level data_level;
222 uint16_t search_attrib;
223 const char *pattern;
224 /* these only for trans2 */
225 uint16_t flags;
226 uint32_t storage_type;
227 } key;
229 struct search_state {
230 struct search_handle *search_handle;
231 void* private;
232 smbcli_search_callback callback;
233 struct search_cache_item *last_item;
234 uint16_t count; /* count how many client receives */
235 uint16_t all_count; /* count how many we receive */
238 struct fs_attribute_info {
239 uint32_t fs_attr;
240 uint32_t max_file_component_length;
241 struct smb_wire_string fs_type;
244 /* this is stored in ntvfs_private */
245 struct proxy_private {
246 struct smbcli_tree *tree;
247 struct smbcli_transport *transport;
248 struct ntvfs_module_context *ntvfs;
249 struct async_info *pending;
250 struct proxy_file *files;
251 struct proxy_file *closed_files;
252 struct fdirmon *dirmons;
253 struct search_cache *search_caches; /* cache's of find-first data */
254 struct search_handle *search_handles; /* cache's of find-first data */
255 bool map_generic;
256 bool map_trans2;
257 bool cache_enabled;
258 int cache_readahead; /* default read-ahead window size */
259 int cache_readaheadblock; /* size of each read-ahead request */
260 ssize_t cache_validatesize; /* chunk size to validate, results in a read this size on remote server */
261 char *remote_server;
262 char *remote_share;
263 struct cache_context *cache;
264 struct fs_attribute_info *fs_attribute_info;
265 int readahead_spare; /* amount of pending non-user generated requests */
266 bool fake_oplock; /* useful for testing, smbclient never asks for oplock */
267 bool fake_valid; /* useful for testing, smbclient never asks for oplock */
268 uint16_t nttrans_fnum; /* we need a handle for non-proxy operations */
269 bool enabled_cache_info;
270 bool enabled_proxy_search;
271 bool enabled_open_clone;
272 bool enabled_extra_protocol;
273 bool enabled_qpathinfo;
276 struct async_info_map;
278 /* a structure used to pass information to an async handler */
279 struct async_info {
280 struct async_info *next, *prev;
281 struct proxy_private *proxy;
282 struct ntvfs_request *req;
283 struct smbcli_request *c_req;
284 struct proxy_file *f;
285 struct async_info_map *chain;
286 void *parms;
289 /* used to chain async callbacks */
290 struct async_info_map {
291 struct async_info_map *next, *prev;
292 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
293 void *parms1;
294 void *parms2;
295 struct async_info *async;
298 struct ntioctl_rpc_unmap_info {
299 void* io;
300 const struct ndr_interface_call *calls;
301 const struct ndr_interface_table *table;
302 uint32_t opnum;
305 /* a structure used to pass information to an async handler */
306 struct async_rpclite_send {
307 const struct ndr_interface_call* call;
308 void* struct_ptr;
311 #define CHECK_UPSTREAM_CLOSED do { \
312 if (c_req && c_req->state == SMBCLI_REQUEST_ERROR && c_req->status == NT_STATUS_NET_WRITE_FAULT) { \
313 req->async_states->state|=NTVFS_ASYNC_STATE_CLOSE; \
315 } while(0)
317 #define CHECK_UPSTREAM_OPEN do { \
318 if (! private->transport->socket->sock) { \
319 req->async_states->state|=NTVFS_ASYNC_STATE_CLOSE; \
320 return NT_STATUS_CONNECTION_DISCONNECTED; \
322 } while(0)
324 #define SETUP_PID do { \
325 private->tree->session->pid = req->smbpid; \
326 CHECK_UPSTREAM_OPEN; \
327 } while(0)
329 #define RPCLITE_SETUP_FILE_HERE(f, h) do { \
330 RPCLITE_SETUP_THIS_FILE_HERE(r->in.fnum, f, h); \
331 } while (0)
333 #define RPCLITE_SETUP_THIS_FILE_HERE(FNUM, f, h) do { \
334 if ((h = ntvfs_find_handle(private->ntvfs, req, FNUM)) && \
335 (f = ntvfs_handle_get_backend_data(h, ntvfs))) { \
336 FNUM = f->fnum; \
337 } else { \
338 r->out.result = NT_STATUS_INVALID_HANDLE; \
339 return NT_STATUS_OK; \
341 } while (0)
343 #define SETUP_FILE_HERE(f) do { \
344 f = ntvfs_handle_get_backend_data(io->generic.in.file.ntvfs, ntvfs); \
345 if (!f) return NT_STATUS_INVALID_HANDLE; \
346 io->generic.in.file.fnum = f->fnum; \
347 } while (0)
349 #define SETUP_FILE do { \
350 struct proxy_file *f; \
351 SETUP_FILE_HERE(f); \
352 } while (0)
354 #define SETUP_PID_AND_FILE do { \
355 SETUP_PID; \
356 SETUP_FILE; \
357 } while (0)
359 /* remove the MAY_ASYNC from a request, useful for testing */
360 #define MAKE_SYNC_REQ(req) do { req->async_states->state &= ~NTVFS_ASYNC_STATE_MAY_ASYNC; } while(0)
362 #define PROXY_SERVER "proxy:server"
363 #define PROXY_USER "proxy:user"
364 #define PROXY_PASSWORD "proxy:password"
365 #define PROXY_DOMAIN "proxy:domain"
366 #define PROXY_SHARE "proxy:share"
367 #define PROXY_USE_MACHINE_ACCT "proxy:use-machine-account"
368 #define PROXY_MAP_GENERIC "proxy:map-generic"
369 #define PROXY_MAP_TRANS2 "proxy:map-trans2"
371 #define PROXY_CACHE_ENABLED "proxy:cache-enabled"
372 #define PROXY_CACHE_ENABLED_DEFAULT false
374 #define PROXY_CACHE_READAHEAD "proxy:cache-readahead"
375 #define PROXY_CACHE_READAHEAD_DEFAULT 32768
376 /* size of each read-ahead request. */
377 #define PROXY_CACHE_READAHEAD_BLOCK "proxy:cache-readaheadblock"
378 /* the read-ahead block should always be less than max negotiated data */
379 #define PROXY_CACHE_READAHEAD_BLOCK_DEFAULT 4096
381 #define PROXY_CACHE_VALIDATE_SIZE "proxy:validate-size"
382 #define PROXY_CACHE_VALIDATE_SIZE_DEFAULT 256 /* 10M */
384 #define PROXY_FAKE_OPLOCK "proxy:fake-oplock"
385 #define PROXY_FAKE_OPLOCK_DEFAULT false
387 #define PROXY_FAKE_VALID "proxy:fake-valid"
388 #define PROXY_FAKE_VALID_DEFAULT false
390 /* how many read-ahead requests can be pending per mid */
391 #define PROXY_REQUEST_LIMIT "proxy:request-limit"
392 #define PROXY_REQUEST_LIMIT_DEFAULT 100
394 #define PROXY_USE_MACHINE_ACCT_DEFAULT false
395 /* These two really should be: true, and possibly not even configurable */
396 #define PROXY_MAP_GENERIC_DEFAULT true
397 #define PROXY_MAP_TRANS2_DEFAULT true
399 /* is the remote server a proxy? */
400 #define PROXY_REMOTE_SERVER(private) \
401 ((private)->tree->session->transport->negotiate.capabilities & CAP_COMPRESSION \
402 && (strcmp("A:",private->tree->device)==0) \
403 && (private->nttrans_fnum!=0) \
404 && (private->enabled_extra_protocol))
406 /* A few forward declarations */
407 static NTSTATUS sync_chain_handler(struct smbcli_request *c_req);
408 static void async_chain_handler(struct smbcli_request *c_req);
409 static void async_read_handler(struct smbcli_request *c_req);
410 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
411 struct ntvfs_request *req, union smb_ioctl *io);
413 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
414 struct smbcli_tree *tree,
415 struct ntvfs_module_context *ntvfs,
416 const struct ndr_interface_table *table,
417 uint32_t opnum, void *r);
418 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
419 union smb_read *io, struct proxy_file *f, struct proxy_Read *r);
420 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
421 union smb_read *io, struct proxy_file *f);
422 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
423 union smb_write *io, struct proxy_file *f);
424 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
425 union smb_write *io, struct proxy_file *f);
426 static NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status);
428 struct smb_wire_string talloc_smb_wire_string_dup(void* mem_ctx, const struct smb_wire_string* string)
430 struct smb_wire_string result;
431 result.private_length=string->private_length;
432 result.s=talloc_strndup(mem_ctx, string->s, string->private_length);
433 DEBUG(5,("%s: %s\n",__FUNCTION__, string->s));
434 return result;
437 #define sws_dup(mem_ctx, dest, src) (\
438 dest=talloc_smb_wire_string_dup(NULL, &(src)), \
439 (dest.s==NULL && src.s!=NULL))
441 /* These needs replacing with something more canonical perhaps */
442 static char* talloc_dirname(void* mem_ctx, const char* path) {
443 const char* dir;
445 if ((dir=strrchr(path,'\\'))) {
446 return talloc_strndup(mem_ctx, path, (dir - path));
447 } else {
448 return talloc_strdup(mem_ctx,"");
453 a handler for oplock break events from the server - these need to be passed
454 along to the client
456 static bool oplock_handler(struct smbcli_transport *transport, uint16_t tid, uint16_t fnum, uint8_t level, void *p_private)
458 struct proxy_private *private = p_private;
459 NTSTATUS status;
460 struct ntvfs_handle *h = NULL;
461 struct proxy_file *f;
462 bool result=true;
464 /* because we clone handles, there may be more than one match */
465 for (f=private->files; f; f=f->next) {
466 if (f->fnum != fnum) continue;
467 h = f->h;
469 if (level==OPLOCK_BREAK_TO_LEVEL_II) {
470 f->oplock=LEVEL_II_OPLOCK_RETURN;
471 } else {
472 /* If we don't have an oplock, then we can't rely on the cache */
473 cache_handle_stale(f);
474 f->oplock=NO_OPLOCK_RETURN;
477 DEBUG(5,("vfs_proxy: sending oplock break level %d for fnum %d\n", level, fnum));
478 status = ntvfs_send_oplock_break(private->ntvfs, h, level);
479 if (!NT_STATUS_IS_OK(status)) result=false;
481 if (!h) {
482 DEBUG(5,("vfs_proxy: ignoring oplock break level %d for fnum %d\n", level, fnum));
484 return result;
487 /* need to pass error upstream and then close? */
488 static void transport_dead(struct smbcli_transport *transport, NTSTATUS status, void* p_private) {
489 struct proxy_private *private = p_private;
490 struct async_info *a;
492 /* first cleanup pending requests */
493 if (transport->pending_recv) {
494 struct smbcli_request *req = transport->pending_recv;
495 req->state = SMBCLI_REQUEST_ERROR;
496 req->status = status;
497 DLIST_REMOVE(transport->pending_recv, req);
498 if (req->async.fn) {
499 req->async.fn(req);
502 // smbsrv_terminate_connection(private->ntvfs,"Upstream hates us");
506 get file handle from clients fnum, (from ntvfs/ipc/vfs_ipc.c at metze suggestion)
508 static struct ntvfs_handle *ntvfs_find_handle(struct ntvfs_module_context *ntvfs,
509 struct ntvfs_request *req,
510 uint16_t fnum)
512 DATA_BLOB key;
513 uint16_t _fnum;
516 * the fnum is already in host byteorder
517 * but ntvfs_handle_search_by_wire_key() expects
518 * network byteorder
520 SSVAL(&_fnum, 0, fnum);
521 key = data_blob_const(&_fnum, 2);
523 return ntvfs_handle_search_by_wire_key(ntvfs, req, &key);
527 connect to a share - used when a tree_connect operation comes in.
529 static NTSTATUS proxy_connect(struct ntvfs_module_context *ntvfs,
530 struct ntvfs_request *req, const char *sharename)
532 NTSTATUS status;
533 struct proxy_private *private;
534 const char *host, *user, *pass, *domain, *remote_share;
535 struct smb_composite_connect io;
536 struct composite_context *creq;
537 struct share_config *scfg = ntvfs->ctx->config;
538 int nttrans_fnum;
540 struct cli_credentials *credentials;
541 bool machine_account;
543 /* Here we need to determine which server to connect to.
544 * For now we use parametric options, type proxy.
545 * Later we will use security=server and auth_server.c.
547 host = share_string_option(scfg, PROXY_SERVER, NULL);
548 user = share_string_option(scfg, PROXY_USER, NULL);
549 pass = share_string_option(scfg, PROXY_PASSWORD, NULL);
550 domain = share_string_option(scfg, PROXY_DOMAIN, NULL);
551 remote_share = share_string_option(scfg, PROXY_SHARE, NULL);
552 if (!remote_share) {
553 remote_share = sharename;
556 machine_account = share_bool_option(scfg, PROXY_USE_MACHINE_ACCT, PROXY_USE_MACHINE_ACCT_DEFAULT);
558 private = talloc_zero(ntvfs, struct proxy_private);
559 if (!private) {
560 return NT_STATUS_NO_MEMORY;
563 ntvfs->private_data = private;
565 if (!host) {
566 DEBUG(1,("PROXY backend: You must supply server\n"));
567 return NT_STATUS_INVALID_PARAMETER;
570 if (user && pass) {
571 DEBUG(5, ("PROXY backend: Using specified password\n"));
572 credentials = cli_credentials_init(private);
573 if (!credentials) {
574 return NT_STATUS_NO_MEMORY;
576 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
577 cli_credentials_set_username(credentials, user, CRED_SPECIFIED);
578 if (domain) {
579 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
581 cli_credentials_set_password(credentials, pass, CRED_SPECIFIED);
582 } else if (machine_account) {
583 DEBUG(5, ("PROXY backend: Using machine account\n"));
584 credentials = cli_credentials_init(private);
585 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
586 if (domain) {
587 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
589 status = cli_credentials_set_machine_account(credentials, ntvfs->ctx->lp_ctx);
590 if (!NT_STATUS_IS_OK(status)) {
591 return status;
593 } else if (req->session_info->credentials) {
594 DEBUG(5, ("PROXY backend: Using delegated credentials\n"));
595 credentials = req->session_info->credentials;
596 } else {
597 DEBUG(1,("PROXY backend: NO delegated credentials found: You must supply server, user and password or the client must supply delegated credentials\n"));
598 return NT_STATUS_INVALID_PARAMETER;
601 /* connect to the server, using the smbd event context */
602 io.in.dest_host = host;
603 io.in.dest_ports = lp_smb_ports(ntvfs->ctx->lp_ctx);
604 io.in.socket_options = lp_socket_options(ntvfs->ctx->lp_ctx);
605 io.in.called_name = host;
606 io.in.credentials = credentials;
607 io.in.fallback_to_anonymous = false;
608 io.in.workgroup = lp_workgroup(ntvfs->ctx->lp_ctx);
609 io.in.service = remote_share;
610 io.in.service_type = "?????";
611 io.in.iconv_convenience = lp_iconv_convenience(ntvfs->ctx->lp_ctx);
612 io.in.gensec_settings = lp_gensec_settings(private, ntvfs->ctx->lp_ctx);
613 lp_smbcli_options(ntvfs->ctx->lp_ctx, &io.in.options);
614 lp_smbcli_session_options(ntvfs->ctx->lp_ctx, &io.in.session_options);
616 creq = smb_composite_connect_send(&io, private,
617 lp_resolve_context(ntvfs->ctx->lp_ctx),
618 ntvfs->ctx->event_ctx);
619 status = smb_composite_connect_recv(creq, private);
620 NT_STATUS_NOT_OK_RETURN(status);
622 private->tree = io.out.tree;
624 private->transport = private->tree->session->transport;
625 SETUP_PID;
626 private->ntvfs = ntvfs;
628 ntvfs->ctx->fs_type = talloc_strdup(ntvfs->ctx, io.out.tree->fs_type);
629 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->fs_type);
630 ntvfs->ctx->dev_type = talloc_strdup(ntvfs->ctx, io.out.tree->device);
631 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->dev_type);
633 /* we need to receive oplock break requests from the server */
634 smbcli_oplock_handler(private->transport, oplock_handler, private);
636 /* we also want to know when the transport goes bad */
637 private->transport->transport_dead.handler = transport_dead;
638 private->transport->transport_dead.private = private;
640 private->map_generic = share_bool_option(scfg, PROXY_MAP_GENERIC, PROXY_MAP_GENERIC_DEFAULT);
642 private->map_trans2 = share_bool_option(scfg, PROXY_MAP_TRANS2, PROXY_MAP_TRANS2_DEFAULT);
644 private->cache_validatesize = 1024 * (long long) share_int_option(scfg, PROXY_CACHE_VALIDATE_SIZE, PROXY_CACHE_VALIDATE_SIZE_DEFAULT);
646 if (strcmp("A:",private->tree->device)==0) {
647 private->cache_enabled = share_bool_option(scfg, PROXY_CACHE_ENABLED, PROXY_CACHE_ENABLED_DEFAULT);
648 private->cache_readahead = share_int_option(scfg, PROXY_CACHE_READAHEAD, PROXY_CACHE_READAHEAD_DEFAULT);
649 private->cache_readaheadblock = share_int_option(scfg, PROXY_CACHE_READAHEAD_BLOCK,
650 MIN(private->cache_readahead,PROXY_CACHE_READAHEAD_BLOCK_DEFAULT));
651 private->fake_oplock = share_bool_option(scfg, PROXY_FAKE_OPLOCK, PROXY_FAKE_OPLOCK_DEFAULT);
652 private->fake_valid = share_bool_option(scfg, PROXY_FAKE_VALID, PROXY_FAKE_VALID_DEFAULT);
653 private->readahead_spare = share_int_option(scfg, PROXY_REQUEST_LIMIT, PROXY_REQUEST_LIMIT_DEFAULT);
654 private->cache = new_cache_context(private, lp_proxy_cache_root(ntvfs->ctx->lp_ctx), host, remote_share);
655 private->enabled_cache_info=true;
656 private->enabled_proxy_search=true;
657 private->enabled_open_clone=true;
658 private->enabled_extra_protocol=true;
659 private->enabled_qpathinfo=true;
661 DEBUG(0,("proxy tree connect caching for: %s (%s : %s) %s read-ahead: %d\n",
662 remote_share, private->tree->device,private->tree->fs_type,
663 (private->cache_enabled)?"enabled":"disabled",
664 private->cache_readahead));
665 } else {
666 private->cache_enabled = false;
667 DEBUG(0,("No caching or read-ahead for: %s (%s : %s)\n",
668 remote_share, private->tree->device,private->tree->fs_type));
671 private->remote_server = strlower_talloc(private, host);
672 private->remote_share = strlower_talloc(private, remote_share);
674 /* some proxy operations will not be performed on files, so open a handle
675 now that we can use for such things. We won't bother to close it on
676 shutdown, as the remote server ought to be able to close it for us
677 and we might be shutting down because the remote server went away and
678 so we don't want to delay further */
679 nttrans_fnum=smbcli_nt_create_full(private->tree, "\\",
681 SEC_FILE_READ_DATA,
682 FILE_ATTRIBUTE_NORMAL,
683 NTCREATEX_SHARE_ACCESS_MASK,
684 NTCREATEX_DISP_OPEN,
685 NTCREATEX_OPTIONS_DIRECTORY,
686 NTCREATEX_IMPERSONATION_IMPERSONATION);
687 if (nttrans_fnum < 0) {
688 DEBUG(5,("Could not open handle for ntioctl %d\n",private->nttrans_fnum));
689 //return NT_STATUS_UNSUCCESSFUL;
691 private->nttrans_fnum=nttrans_fnum;
692 DEBUG(5,("Got nttrans handle %d\n",private->nttrans_fnum));
694 return NT_STATUS_OK;
698 disconnect from a share
700 static void async_search_cache_notify(void *data, struct fdirmon *dirmon);
701 static void dirmon_remove_callback(struct fdirmon *dirmon, fdirmon_callback_fn *fn, void* data);
702 static NTSTATUS proxy_disconnect(struct ntvfs_module_context *ntvfs)
704 struct proxy_private *private = ntvfs->private_data;
705 struct async_info *a, *an;
706 struct search_cache *s;
708 /* first clean up caches because they have a pending request that
709 they will try and clean up later and fail during talloc_free */
710 for (s=private->search_caches; s; s=s->next) {
711 if (s->dirmon) {
712 dirmon_remove_callback (s->dirmon, async_search_cache_notify, s);
713 s->dirmon=NULL;
717 /* first cleanup pending requests */
718 for (a=private->pending; a; a = an) {
719 an = a->next;
720 smbcli_request_destroy(a->c_req);
721 talloc_free(a);
724 talloc_free(private);
725 ntvfs->private_data = NULL;
727 return NT_STATUS_OK;
731 destroy an async info structure
733 static int async_info_destructor(struct async_info *async)
735 DLIST_REMOVE(async->proxy->pending, async);
736 return 0;
740 a handler for simple async replies
741 this handler can only be used for functions that don't return any
742 parameters (those that just return a status code)
744 static void async_simple(struct smbcli_request *c_req)
746 struct async_info *async = c_req->async.private;
747 struct ntvfs_request *req = async->req;
748 req->async_states->status = smbcli_request_simple_recv(c_req);
749 talloc_free(async);
750 req->async_states->send_fn(req);
753 /* hopefully this will optimize away */
754 #define TYPE_CHECK(type,check) do { \
755 type=check; \
756 t=t; \
757 } while (0)
759 /* save some typing for the simple functions */
760 #define ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, file, achain, error) do { \
761 if (!c_req) return (error); \
762 ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_fn, file, achain); \
763 if (! c_req->async.private) return (error); \
764 MAKE_SYNC_ERROR_ASYNC(c_req, error); \
765 } while(0)
767 #define ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_fn, file, achain) do { \
768 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
770 struct async_info *async; \
771 async = talloc(req, struct async_info); \
772 if (async) { \
773 async->parms = io; \
774 async->req = req; \
775 async->f = file; \
776 async->proxy = private; \
777 async->c_req = c_req; \
778 async->chain = achain; \
779 DLIST_ADD(private->pending, async); \
780 c_req->async.private = async; \
781 talloc_set_destructor(async, async_info_destructor); \
784 c_req->async.fn = async_fn; \
785 } while (0)
787 #define ASYNC_RECV_TAIL_F(io, async_fn, file) do { \
788 if (!c_req) return NT_STATUS_UNSUCCESSFUL; \
789 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
791 struct async_info *async; \
792 async = talloc(req, struct async_info); \
793 if (!async) return NT_STATUS_NO_MEMORY; \
794 async->parms = io; \
795 async->req = req; \
796 async->f = file; \
797 async->proxy = private; \
798 async->c_req = c_req; \
799 DLIST_ADD(private->pending, async); \
800 c_req->async.private = async; \
801 talloc_set_destructor(async, async_info_destructor); \
803 c_req->async.fn = async_fn; \
804 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
805 MAKE_SYNC_ERROR_ASYNC(c_req, NT_STATUS_UNSUCCESSFUL); \
806 return NT_STATUS_OK; \
807 } while (0)
809 static void vasync_timer(struct event_context * ec, struct timed_event *te,
810 struct timeval tv, void *data) {
811 struct smbcli_request *c_req = talloc_get_type_abort(data, struct smbcli_request);
813 DEBUG(5,("Calling async timer on c_req %p with req %p\n",c_req->async.fn, c_req)); \
814 c_req->async.fn(c_req);
817 #define MAKE_SYNC_ERROR_ASYNC(c_req, error) do { \
818 if (c_req && c_req->state >= SMBCLI_REQUEST_DONE) { \
819 /* NOTE: the timer struct is allocated against c_req, so if the c_req */ \
820 /* handler is called manually, the timer will be destroyed with c_req */ \
821 if (! event_add_timed(private->ntvfs->ctx->event_ctx, c_req, \
822 timeval_current_ofs(0, 0), \
823 vasync_timer, \
824 c_req)) return (error); \
825 DEBUG(5,("Queueing async timer on c_req %p with req %p\n",c_req->async.fn, c_req)); \
827 } while(0)
829 #define ASYNC_RECV_TAIL(io, async_fn) ASYNC_RECV_TAIL_F(io, async_fn, NULL)
831 #define SIMPLE_ASYNC_TAIL ASYNC_RECV_TAIL(NULL, async_simple)
833 /* managers for chained async-callback.
834 The model of async handlers has changed.
835 backend async functions should be of the form:
836 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
837 And if async->c_req is NULL then an earlier chain has already rec'd the
838 request.
839 ADD_ASYNC_RECV_TAIL is used to add chained handlers.
840 The chained handler manager async_chain_handler is installed the usual way
841 and uses the io pointer to point to the first async_map record
842 static void async_chain_handler(struct smbcli_request *c_req).
843 It is safe to call ADD_ASYNC_RECV_TAIL before the chain manager is installed
844 and often desirable.
846 /* async_chain_handler has an async_info struct so that it can be safely inserted
847 into pending, but the io struct will point to (struct async_info_map *)
848 chained async_info_map will be in c_req->async.private */
849 #define ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_fn) do { \
850 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
851 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
852 } while(0)
854 #define ASYNC_RECV_TAIL_HANDLER(io, async_fn) do { \
855 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
856 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
857 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
858 return NT_STATUS_OK; \
859 } while(0)
862 DEBUG(0,("ADD_ASYNC_RECV_TAIL %s %s:%d\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%s\n", __FUNCTION__,__FILE__,__LINE__, \
863 creq, creq?talloc_get_name(creq):NULL, creq?talloc_get_name(creq):NULL,\
864 io1, io1?talloc_get_name(io1):NULL, io1?talloc_get_name(io1):NULL, \
865 io2, io2?talloc_get_name(io2):NULL, io2?talloc_get_name(io2):NULL, \
866 file, file?"file":"null", file?"file":"null", #async_fn)); \
868 #define ADD_ASYNC_RECV_TAIL(creq, io1, io2, file, async_fn, error) do { \
869 if (! creq) { \
870 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no creq\n",__FUNCTION__)); \
871 return (error); \
872 } else { \
873 struct async_info_map *async_map=talloc(NULL, struct async_info_map); \
874 if (! async_map) { \
875 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no async_map\n",__FUNCTION__)); \
876 return (error); \
878 async_map->async=talloc(async_map, struct async_info); \
879 if (! async_map->async) { \
880 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no async_map->async\n",__FUNCTION__)); \
881 return (error); \
883 async_map->parms1=io1; \
884 async_map->parms2=io2; \
885 async_map->fn=async_fn; \
886 async_map->async->parms = io1; \
887 async_map->async->req = req; \
888 async_map->async->f = file; \
889 async_map->async->proxy = private; \
890 async_map->async->c_req = creq; \
891 /* If async_chain_handler is installed, get the list from param */ \
892 if (creq->async.fn == async_chain_handler || creq->async.fn == async_read_handler) { \
893 struct async_info *i=creq->async.private; \
894 DLIST_ADD_END(i->chain, async_map, struct async_info_map *); \
895 } else if (creq->async.fn) { \
896 /* incompatible handler installed */ \
897 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL incompatible handler already installed\n",__FUNCTION__)); \
898 return (error); \
899 } else { \
900 DLIST_ADD_END(creq->async.private, async_map, struct async_info_map *); \
903 } while(0)
905 static void async_dirmon_notify(struct smbcli_request *c_req)
907 struct async_info *async = c_req->async.private;
908 struct ntvfs_request *req = async->req;
909 struct fdirmon *dirmon;
910 struct fdirmon_callback *callback;
911 struct proxy_private *proxy = async->proxy;
912 int f;
914 NTSTATUS status;
916 dirmon = talloc_get_type_abort((void*)async->f, struct fdirmon);
917 DEBUG(5,("%s: dirmon %s invalidated\n",__LOCATION__, dirmon->dir));
919 status = smb_raw_changenotify_recv(c_req, req, async->parms);
920 DEBUG(5,("%s: update status %s\n",__LOCATION__, get_friendly_nt_error_msg (status)));
922 if (dirmon->notify_req) {
923 talloc_unlink(dirmon, dirmon->notify_req);
924 dirmon->notify_req=NULL;
926 /* Mark closed cached files as invalid if they changed, as they will be
927 assuming cache is valid if a dirmon exists and hasn't invalidated it */
928 for(f=0; f<dirmon->notify_io->nttrans.out.num_changes; f++) {
929 DEBUG(1,("DIRMON: %s changed\n",dirmon->notify_io->nttrans.out.changes[f].name.s));
931 DLIST_FOR_EACH(dirmon->callbacks, callback, callback->fn(callback->data, dirmon));
932 /* So nothing can find it even if there are still in-use references */
933 DLIST_REMOVE(proxy->dirmons, dirmon);
934 if (dirmon->dir_fnum!=65535) {
935 struct smbcli_request *req;
936 union smb_close close_parms;
937 close_parms.close.level = RAW_CLOSE_CLOSE;
938 close_parms.close.in.file.fnum = dirmon->dir_fnum;
939 close_parms.close.in.write_time = 0;
941 /* destructor may be called from a notify response and won't be able
942 to wait on this close response, not that we care anyway */
943 req=smb_raw_close_send(proxy->tree, &close_parms);
945 DEBUG(5,("%s: Close dir_fnum: %d %p\n",__LOCATION__, dirmon->dir_fnum, req));
946 dirmon->dir_fnum=65535;
948 talloc_free(async);
949 talloc_free(dirmon);
952 struct fdirmon* get_fdirmon(struct proxy_private *proxy, const char* path, bool dir_only) {
953 const char *file;
954 int pathlen;
956 if ((file=strrchr(path,'\\'))) {
957 if (dir_only) {
958 pathlen = file - path;
959 file++;
960 } else {
961 pathlen=strlen(path);
963 } else {
964 file = path;
965 pathlen = 0;
968 struct fdirmon *dirmon;
969 /* see if we have a matching dirmon */
970 DLIST_FIND(proxy->dirmons, dirmon, (strlen(dirmon->dir) == pathlen && fstrncmp(path, dirmon->dir, pathlen)==0));
971 if (! dirmon) {
972 int saved_timeout;
974 DEBUG(5,("%s: allocating new dirmon for %s\n",__FUNCTION__,path));
975 dirmon=talloc_zero(proxy, struct fdirmon);
976 if (! dirmon) {
977 goto error;
979 if (! (dirmon->dir=talloc_strndup(dirmon, path, pathlen))) {
980 goto error;
982 if (! (dirmon->notify_io=talloc_zero(dirmon, union smb_notify))) {
983 goto error;
986 dirmon->dir_fnum=smbcli_nt_create_full(proxy->tree, dirmon->dir,
988 SEC_FILE_READ_DATA,
989 FILE_ATTRIBUTE_NORMAL,
990 NTCREATEX_SHARE_ACCESS_MASK,
991 NTCREATEX_DISP_OPEN,
992 NTCREATEX_OPTIONS_DIRECTORY,
993 NTCREATEX_IMPERSONATION_IMPERSONATION);
995 if (dirmon->dir_fnum==65535) {
996 DEBUG(5,("%s: smbcli_nt_create_full %s failed\n",__FUNCTION__, dirmon->dir));
997 goto error;
1000 saved_timeout = proxy->transport->options.request_timeout;
1001 /* request notify changes on cache before we start to fill it */
1002 dirmon->notify_io->nttrans.level=RAW_NOTIFY_NTTRANS;
1003 dirmon->notify_io->nttrans.in.completion_filter=FILE_NOTIFY_CHANGE_ANY;
1004 dirmon->notify_io->nttrans.in.file.fnum=dirmon->dir_fnum;
1005 dirmon->notify_io->nttrans.in.recursive=false;
1006 dirmon->notify_io->nttrans.in.buffer_size=10240;
1007 proxy->transport->options.request_timeout = 0;
1008 dirmon->notify_req=smb_raw_changenotify_send(proxy->tree, dirmon->notify_io);
1009 /* Make the request hang around so we can tell if it needs cancelling */
1010 proxy->transport->options.request_timeout = saved_timeout;
1012 if (! dirmon->notify_req) {
1013 goto error;
1014 }else {
1015 struct ntvfs_request *req=NULL;
1016 struct smbcli_request *c_req=dirmon->notify_req;
1017 union smb_notify *io=dirmon->notify_io;
1018 struct proxy_private *private=proxy;
1020 talloc_reference(dirmon, dirmon->notify_req);
1021 ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_dirmon_notify,
1022 (void*) dirmon, c_req->async.private);
1023 DLIST_ADD(private->dirmons, dirmon);
1027 return dirmon;
1028 error:
1029 DEBUG(3,("%s: failed to allocate dirmon\n",__FUNCTION__));
1030 talloc_free(dirmon);
1031 return NULL;
1034 static bool dirmon_add_callback(struct fdirmon *dirmon, fdirmon_callback_fn *fn, void* data) {
1035 struct fdirmon_callback *callback=talloc_zero(dirmon, struct fdirmon_callback);
1036 if (! callback) {
1037 return false;
1039 callback->data=data;
1040 callback->fn=fn;
1041 DLIST_ADD(dirmon->callbacks, callback);
1042 return true;
1045 static void dirmon_remove_callback(struct fdirmon *dirmon, fdirmon_callback_fn *fn, void* data) {
1046 struct fdirmon_callback *callback;
1048 for(callback=dirmon->callbacks; callback; callback=callback->next) {
1049 if (callback->data==data && callback->fn==fn) {
1050 DLIST_REMOVE(dirmon->callbacks, callback);
1055 /* try and unify cache open function interface with this macro */
1056 #define cache_open(cache_context, f, io, oplock, readahead_window) \
1057 (io->generic.level == RAW_OPEN_NTCREATEX && \
1058 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID)\
1059 ?(cache_fileid_open(cache_context, f, (const uint64_t*)(io->generic.in.fname), oplock, readahead_window))\
1060 :(cache_filename_open(cache_context, f, SMB_OPEN_IN_FILE(io), oplock, readahead_window))
1062 struct search_cache* find_partial_search_cache(struct search_cache* search_cache, const struct search_cache_key* search_cache_key) {
1063 struct search_cache* result;
1064 DLIST_FIND(search_cache, result,
1065 (result->key.level == search_cache_key->level) &&
1066 (result->key.data_level == search_cache_key->data_level) &&
1067 (result->key.search_attrib == search_cache_key->search_attrib) &&
1068 (result->key.flags == search_cache_key->flags) &&
1069 (result->key.storage_type == search_cache_key->storage_type) &&
1070 (fstrcmp(result->key.pattern, search_cache_key->pattern) == 0));
1071 DEBUG(5,("%s: found %p\n",__LOCATION__,result));
1072 return result;
1074 struct search_cache* find_search_cache(struct search_cache* search_cache, const struct search_cache_key* search_cache_key) {
1075 struct search_cache* result = find_partial_search_cache(search_cache, search_cache_key);
1076 if (result && result->status == SEARCH_CACHE_COMPLETE) {
1077 DEBUG(5,("%s: found complete %p\n",__LOCATION__,result));
1078 return result;
1080 DEBUG(5,("%s: found INCOMPLETE %p\n",__LOCATION__,result));
1081 return NULL;
1084 uint16_t smbsrv_fnum(struct ntvfs_handle *h) {
1085 uint16_t fnum;
1086 smbsrv_push_fnum((uint8_t *)&fnum, 0, h);
1087 return SVAL(&fnum, 0);
1090 static void async_search_cache_notify(void *data, struct fdirmon *dirmon) {
1091 struct search_cache *s=talloc_get_type_abort(data, struct search_cache);
1093 DEBUG(5,("%s: cache notify %p,%s/%s\n",__LOCATION__,s, s->dir, s->key.pattern));
1094 s->dirmon=NULL;
1095 /* dispose of the search_cache */
1096 s->status=SEARCH_CACHE_DEAD;
1097 /* So nothing can find it even if there are still in-use references */
1098 DLIST_REMOVE(s->proxy->search_caches, s);
1099 /* free it */
1100 //talloc_steal(async, search_cache);
1101 talloc_unlink(s->proxy, s);
1105 destroy a search handle
1107 static int search_handle_destructor(struct search_handle *s)
1109 DLIST_REMOVE(s->proxy->search_handles, s);
1110 DEBUG(5,("%s: handle destructor %p\n",__LOCATION__,s));
1111 return 0;
1113 static int search_cache_destructor(struct search_cache *s)
1115 NTSTATUS status;
1117 DLIST_REMOVE(s->proxy->search_caches, s);
1118 DEBUG(5,("%s: cache destructor %p,%s/%s\n",__LOCATION__,s, s->dir, s->key.pattern));
1119 if (s->dirmon) {
1120 dirmon_remove_callback(s->dirmon, async_search_cache_notify, s);
1121 s->dirmon=NULL;
1123 return 0;
1126 struct search_cache* new_search_cache(struct proxy_private *private, struct search_cache_key* key) {
1127 /* need to opendir the folder being searched so we can get a notification */
1128 struct search_cache *search_cache=NULL;
1130 search_cache=talloc_zero(private, struct search_cache);
1131 DEBUG(5,("%s: Start new cache %p for %s\n",__LOCATION__, search_cache, key->pattern));
1132 if (! search_cache) {
1133 return NULL;
1135 search_cache->proxy=private;
1136 if (! (search_cache->dir=talloc_dirname(search_cache, key->pattern))) {
1137 goto error;
1139 search_cache->key=*key;
1140 /* make private copy of pattern now that we need it AND have something to own it */
1141 if (! (search_cache->key.pattern=talloc_strdup(search_cache, search_cache->key.pattern))) {
1142 goto error;
1145 search_cache->dirmon=get_fdirmon(private, search_cache->dir, true);
1146 if (! search_cache->dirmon) {
1147 goto error;
1149 /* The destructor will close the handle */
1150 talloc_set_destructor(search_cache, search_cache_destructor);
1152 DEBUG(5,("%s: Start new cache %p, dir_fnum %p\n",__LOCATION__, search_cache, search_cache->dirmon));
1154 if (! dirmon_add_callback(search_cache->dirmon, async_search_cache_notify, search_cache)) {
1155 goto error;
1156 } else {
1157 DLIST_ADD_END(private->search_caches, search_cache, struct search_cache*);
1160 return search_cache;
1161 error:
1162 talloc_free(search_cache);
1163 return NULL;
1167 delete a file - the dirtype specifies the file types to include in the search.
1168 The name can contain PROXY wildcards, but rarely does (except with OS/2 clients)
1170 static NTSTATUS proxy_unlink(struct ntvfs_module_context *ntvfs,
1171 struct ntvfs_request *req, union smb_unlink *unl)
1173 struct proxy_private *private = ntvfs->private_data;
1174 struct smbcli_request *c_req;
1176 SETUP_PID;
1178 /* see if the front end will allow us to perform this
1179 function asynchronously. */
1180 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1181 return smb_raw_unlink(private->tree, unl);
1184 c_req = smb_raw_unlink_send(private->tree, unl);
1186 SIMPLE_ASYNC_TAIL;
1190 a handler for async ioctl replies
1192 static void async_ioctl(struct smbcli_request *c_req)
1194 struct async_info *async = c_req->async.private;
1195 struct ntvfs_request *req = async->req;
1196 req->async_states->status = smb_raw_ioctl_recv(c_req, req, async->parms);
1197 talloc_free(async);
1198 req->async_states->send_fn(req);
1202 ioctl interface
1204 static NTSTATUS proxy_ioctl(struct ntvfs_module_context *ntvfs,
1205 struct ntvfs_request *req, union smb_ioctl *io)
1207 struct proxy_private *private = ntvfs->private_data;
1208 struct smbcli_request *c_req;
1210 if (io->ntioctl.level == RAW_IOCTL_NTIOCTL
1211 && io->ntioctl.in.function == FSCTL_UFOPROXY_RPCLITE) {
1212 return proxy_rpclite(ntvfs, req, io);
1215 SETUP_PID_AND_FILE;
1217 /* see if the front end will allow us to perform this
1218 function asynchronously. */
1219 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1220 return smb_raw_ioctl(private->tree, req, io);
1223 c_req = smb_raw_ioctl_send(private->tree, io);
1225 ASYNC_RECV_TAIL(io, async_ioctl);
1229 check if a directory exists
1231 static NTSTATUS proxy_chkpath(struct ntvfs_module_context *ntvfs,
1232 struct ntvfs_request *req, union smb_chkpath *cp)
1234 struct proxy_private *private = ntvfs->private_data;
1235 struct smbcli_request *c_req;
1237 SETUP_PID;
1239 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1240 return smb_raw_chkpath(private->tree, cp);
1243 c_req = smb_raw_chkpath_send(private->tree, cp);
1245 SIMPLE_ASYNC_TAIL;
1248 static bool find_search_cache_item(const char* path,
1249 struct search_cache **search_cache,
1250 struct search_cache_item **item) {
1251 struct search_cache *s=*search_cache;
1252 struct search_cache_item *i=*item;
1253 const char* file;
1254 int dir_len;
1256 /* see if we can satisfy from a directory cache */
1257 DEBUG(5,("%s: Looking for pathinfo: '%s'\n",__LOCATION__,path));
1258 if ((file=strrchr(path,'\\'))) {
1259 dir_len = file - path;
1260 /* point past the \ */
1261 file++;
1262 } else {
1263 file = path;
1264 dir_len = 0;
1266 /* convert empty path to . so we can find it in the cache */
1267 if (! *file) {
1268 file=".";
1270 DEBUG(5,("%s: Path='%s' File='%s'\n",__LOCATION__,path, file));
1272 /* Note we don't care if the cache is partial, as long as it has a hit */
1273 while(s) {
1274 /* One day we may support all directory levels */
1275 DLIST_FIND(s, s, (s->key.data_level == RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO &&
1276 strlen(s->dir)==dir_len &&
1277 fstrncmp(s->dir, path, dir_len)==0));
1278 if (! s) {
1279 break;
1281 DEBUG(5,("%s: found cache %p\n",__LOCATION__,s));
1282 /* search s for io->generic.in.file.path */
1283 DLIST_FIND(s->items, i, (i->data_level == RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO &&
1284 ((i->file->both_directory_info.name.s &&
1285 fstrcmp(i->file->both_directory_info.name.s, file) ==0) ||
1286 (i->file->both_directory_info.short_name.s &&
1287 fstrcmp(i->file->both_directory_info.short_name.s, file)==0)
1288 )));
1289 DEBUG(5,("%s: found cache %p item %p\n",__LOCATION__,s, i));
1290 if (i) {
1291 *item=i;
1292 *search_cache=s;
1293 return true;
1295 s=s->next;
1296 DEBUG(5,("%s: continue search at %p\n",__LOCATION__,s));
1298 *item=i;
1299 *search_cache=s;
1300 return false;
1303 static void proxy_set_cache_info(struct file_metadata *metadata, struct proxy_GetInfo *r) {
1304 /* only set this if it was responded... I think they all are responded... */
1305 metadata->info_data.status_RAW_FILEINFO_BASIC_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_BASIC_INFORMATION;
1306 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_BASIC_INFORMATION) /*||
1307 /*NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALL_INFO)*/) {
1308 metadata->info_data.create_time=r->out.info_data[0].create_time;
1309 metadata->info_data.access_time =r->out.info_data[0].access_time;
1310 metadata->info_data.write_time=r->out.info_data[0].write_time;
1311 metadata->info_data.change_time=r->out.info_data[0].change_time;
1312 metadata->info_data.attrib=r->out.info_data[0].attrib;
1313 metadata->valid|=valid_RAW_FILEINFO_BASIC_INFORMATION;
1315 metadata->info_data.status_RAW_FILEINFO_ALL_INFO=r->out.info_data[0].status_RAW_FILEINFO_ALL_INFO;
1316 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALL_INFO)) {
1317 metadata->info_data.ea_size=r->out.info_data[0].ea_size;
1318 metadata->info_data.alloc_size=r->out.info_data[0].alloc_size;
1319 metadata->info_data.size=r->out.info_data[0].size;
1320 metadata->info_data.nlink=r->out.info_data[0].nlink;
1321 /* Are we duping this right? Would talloc_reference be ok? */
1322 //f->metadata->info_data.fname=
1323 metadata->info_data.fname.s=talloc_memdup(metadata, r->out.info_data[0].fname.s, r->out.info_data[0].fname.count);
1324 metadata->info_data.fname.count=r->out.info_data[0].fname.count;
1325 metadata->info_data.delete_pending=r->out.info_data[0].delete_pending;
1326 metadata->info_data.directory=r->out.info_data[0].directory;
1327 metadata->valid|=valid_RAW_FILEINFO_ALL_INFO | valid_RAW_FILEINFO_STANDARD_INFO;;
1329 metadata->info_data.status_RAW_FILEINFO_COMPRESSION_INFO=r->out.info_data[0].status_RAW_FILEINFO_COMPRESSION_INFO;
1330 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_COMPRESSION_INFO)) {
1331 metadata->info_data.compressed_size=r->out.info_data[0].compressed_size;
1332 metadata->info_data.format=r->out.info_data[0].format;
1333 metadata->info_data.unit_shift=r->out.info_data[0].unit_shift;
1334 metadata->info_data.chunk_shift=r->out.info_data[0].chunk_shift;
1335 metadata->info_data.cluster_shift=r->out.info_data[0].cluster_shift;
1336 metadata->valid|=valid_RAW_FILEINFO_COMPRESSION_INFO;
1338 metadata->info_data.status_RAW_FILEINFO_INTERNAL_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_INTERNAL_INFORMATION;
1339 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_INTERNAL_INFORMATION)) {
1340 metadata->info_data.file_id=r->out.info_data[0].file_id;
1341 metadata->valid|=valid_RAW_FILEINFO_INTERNAL_INFORMATION;
1343 metadata->info_data.status_RAW_FILEINFO_ACCESS_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_ACCESS_INFORMATION;
1344 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ACCESS_INFORMATION)) {
1345 metadata->info_data.access_flags=r->out.info_data[0].access_flags;
1346 metadata->valid|=valid_RAW_FILEINFO_ACCESS_INFORMATION;
1348 metadata->info_data.status_RAW_FILEINFO_POSITION_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_POSITION_INFORMATION;
1349 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_POSITION_INFORMATION)) {
1350 metadata->info_data.position=r->out.info_data[0].position;
1351 metadata->valid|=valid_RAW_FILEINFO_POSITION_INFORMATION;
1353 metadata->info_data.status_RAW_FILEINFO_MODE_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_MODE_INFORMATION;
1354 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_MODE_INFORMATION)) {
1355 metadata->info_data.mode=r->out.info_data[0].mode;
1356 metadata->valid|=valid_RAW_FILEINFO_MODE_INFORMATION;
1358 metadata->info_data.status_RAW_FILEINFO_ALIGNMENT_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_ALIGNMENT_INFORMATION;
1359 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALIGNMENT_INFORMATION)) {
1360 metadata->info_data.alignment_requirement=r->out.info_data[0].alignment_requirement;
1361 metadata->valid|=valid_RAW_FILEINFO_ALIGNMENT_INFORMATION;
1363 metadata->info_data.status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
1364 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION)) {
1365 metadata->info_data.reparse_tag=r->out.info_data[0].reparse_tag;
1366 metadata->info_data.reparse_attrib=r->out.info_data[0].reparse_attrib;
1367 metadata->valid|=valid_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
1369 metadata->info_data.status_RAW_FILEINFO_STREAM_INFO=r->out.info_data[0].status_RAW_FILEINFO_STREAM_INFO;
1370 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_STREAM_INFO)) {
1371 metadata->info_data.num_streams=r->out.info_data[0].num_streams;
1372 talloc_free(metadata->info_data.streams);
1373 metadata->info_data.streams=talloc_steal(metadata, r->out.info_data[0].streams);
1374 metadata->valid|=valid_RAW_FILEINFO_STREAM_INFO;
1377 /* satisfy a file-info request from cache */
1378 NTSTATUS proxy_cache_info(union smb_fileinfo *io, struct file_metadata *metadata, bool *valid)
1380 #define SET_VALID(FLAG) do { \
1381 if (valid) *valid=!!(metadata->valid & valid_ ## FLAG); \
1382 DEBUG(5,("%s check %s=%d (%x)\n",__FUNCTION__, #FLAG, !!(metadata->valid & valid_ ## FLAG), metadata->valid)); \
1383 } while(0)
1384 /* and now serve the request from the cache */
1385 switch(io->generic.level) {
1386 case RAW_FILEINFO_BASIC_INFORMATION:
1387 SET_VALID(RAW_FILEINFO_BASIC_INFORMATION);
1388 io->basic_info.out.create_time=metadata->info_data.create_time;
1389 io->basic_info.out.access_time=metadata->info_data.access_time;
1390 io->basic_info.out.write_time=metadata->info_data.write_time;
1391 io->basic_info.out.change_time=metadata->info_data.change_time;
1392 io->basic_info.out.attrib=metadata->info_data.attrib;
1393 return metadata->info_data.status_RAW_FILEINFO_BASIC_INFORMATION;
1394 case RAW_FILEINFO_ALL_INFO:
1395 SET_VALID(RAW_FILEINFO_ALL_INFO);
1396 io->all_info.out.create_time=metadata->info_data.create_time;
1397 io->all_info.out.access_time=metadata->info_data.access_time;
1398 io->all_info.out.write_time=metadata->info_data.write_time;
1399 io->all_info.out.change_time=metadata->info_data.change_time;
1400 io->all_info.out.attrib=metadata->info_data.attrib;
1401 io->all_info.out.alloc_size=metadata->info_data.alloc_size;
1402 io->all_info.out.size=metadata->info_data.size;
1403 io->all_info.out.directory=metadata->info_data.directory;
1404 io->all_info.out.nlink=metadata->info_data.nlink;
1405 io->all_info.out.delete_pending=metadata->info_data.delete_pending;
1406 io->all_info.out.fname.s=metadata->info_data.fname.s;
1407 io->all_info.out.fname.private_length=metadata->info_data.fname.count;
1408 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1409 case RAW_FILEINFO_STANDARD_INFO:
1410 case RAW_FILEINFO_STANDARD_INFORMATION:
1411 SET_VALID(RAW_FILEINFO_ALL_INFO);
1412 io->standard_info.out.alloc_size=metadata->info_data.alloc_size;
1413 io->standard_info.out.size=metadata->info_data.size;
1414 io->standard_info.out.directory=metadata->info_data.directory;
1415 io->standard_info.out.nlink=metadata->info_data.nlink; /* may be wrong */
1416 io->standard_info.out.delete_pending=metadata->info_data.delete_pending;
1417 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1418 case RAW_FILEINFO_EA_INFO:
1419 case RAW_FILEINFO_EA_INFORMATION:
1420 SET_VALID(RAW_FILEINFO_ALL_INFO);
1421 io->ea_info.out.ea_size=metadata->info_data.ea_size;
1422 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1423 case RAW_FILEINFO_COMPRESSION_INFO:
1424 SET_VALID(RAW_FILEINFO_COMPRESSION_INFO);
1425 io->compression_info.out.compressed_size=metadata->info_data.compressed_size;
1426 io->compression_info.out.format=metadata->info_data.format;
1427 io->compression_info.out.unit_shift=metadata->info_data.unit_shift;
1428 io->compression_info.out.chunk_shift=metadata->info_data.chunk_shift;
1429 io->compression_info.out.cluster_shift=metadata->info_data.cluster_shift;
1430 return metadata->info_data.status_RAW_FILEINFO_COMPRESSION_INFO;
1431 case RAW_FILEINFO_INTERNAL_INFORMATION:
1432 SET_VALID(RAW_FILEINFO_INTERNAL_INFORMATION);
1433 io->internal_information.out.file_id=metadata->info_data.file_id;
1434 return metadata->info_data.status_RAW_FILEINFO_INTERNAL_INFORMATION;
1435 case RAW_FILEINFO_ACCESS_INFORMATION:
1436 SET_VALID(RAW_FILEINFO_ACCESS_INFORMATION);
1437 io->access_information.out.access_flags=metadata->info_data.access_flags;
1438 return metadata->info_data.status_RAW_FILEINFO_ACCESS_INFORMATION;
1439 case RAW_FILEINFO_POSITION_INFORMATION:
1440 SET_VALID(RAW_FILEINFO_POSITION_INFORMATION);
1441 io->position_information.out.position=metadata->info_data.position;
1442 return metadata->info_data.status_RAW_FILEINFO_POSITION_INFORMATION;
1443 case RAW_FILEINFO_MODE_INFORMATION:
1444 SET_VALID(RAW_FILEINFO_MODE_INFORMATION);
1445 io->mode_information.out.mode=metadata->info_data.mode;
1446 return metadata->info_data.status_RAW_FILEINFO_MODE_INFORMATION;
1447 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1448 SET_VALID(RAW_FILEINFO_ALIGNMENT_INFORMATION);
1449 io->alignment_information.out.alignment_requirement=metadata->info_data.alignment_requirement;
1450 return metadata->info_data.status_RAW_FILEINFO_ALIGNMENT_INFORMATION;
1451 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1452 SET_VALID(RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION);
1453 io->attribute_tag_information.out.reparse_tag=metadata->info_data.reparse_tag;
1454 io->attribute_tag_information.out.attrib=metadata->info_data.reparse_attrib;
1455 return metadata->info_data.status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
1456 case RAW_FILEINFO_STREAM_INFO:
1457 case RAW_FILEINFO_STREAM_INFORMATION:
1458 SET_VALID(RAW_FILEINFO_STREAM_INFO);
1459 io->stream_info.out.num_streams=metadata->info_data.num_streams;
1460 if (metadata->info_data.num_streams > 0) {
1461 io->stream_info.out.streams = talloc_zero_array(io, struct stream_struct, metadata->info_data.num_streams);
1462 int c;
1463 if (! io->stream_info.out.streams) {
1464 if (*valid) *valid=false;
1465 io->stream_info.out.num_streams=0;
1466 return NT_STATUS_NO_MEMORY;
1468 for (c=0; c<io->stream_info.out.num_streams; c++) {
1469 io->stream_info.out.streams[c].size = metadata->info_data.streams[c].size;
1470 io->stream_info.out.streams[c].alloc_size = metadata->info_data.streams[c].alloc_size;
1471 io->stream_info.out.streams[c].stream_name.s = talloc_reference(io, metadata->info_data.streams[c].stream_name.s);
1472 io->stream_info.out.streams[c].stream_name.private_length = metadata->info_data.streams[c].stream_name.count;
1474 } else {
1475 io->stream_info.out.streams=NULL;
1477 return metadata->info_data.status_RAW_FILEINFO_STREAM_INFO;
1478 default:
1479 DEBUG(5,("%s: Unknown request\n",__FUNCTION__));
1480 if (valid) *valid=false;
1481 return NT_STATUS_INTERNAL_ERROR;
1486 a handler for async qpathinfo replies
1488 static void async_qpathinfo(struct smbcli_request *c_req)
1490 struct async_info *async = c_req->async.private;
1491 struct ntvfs_request *req = async->req;
1492 req->async_states->status = smb_raw_pathinfo_recv(c_req, req, async->parms);
1493 talloc_free(async);
1494 req->async_states->send_fn(req);
1497 static NTSTATUS async_proxy_qpathinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1499 struct proxy_private *private = async->proxy;
1500 struct smbcli_request *c_req = async->c_req;
1501 struct ntvfs_request *req = async->req;
1502 struct proxy_file *f = talloc_get_type_abort(async->f, struct proxy_file);
1503 union smb_fileinfo *io = talloc_get_type_abort(io1, union smb_fileinfo);
1504 struct proxy_GetInfo *r=talloc_get_type_abort(io2, struct proxy_GetInfo);
1506 if (c_req) status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1507 req->async_states->status=status;
1509 /* It's good to check for over-all status but we need to check status of each sub-message */
1510 NT_STATUS_NOT_OK_RETURN(status);
1512 /* populate the cache, and then fill the request from the cache */
1513 /* Assuming that r->count.in == 1 */
1514 SMB_ASSERT(r->out.count==1);
1515 DEBUG(5,("%s: Combined status of meta request: %s\n",__LOCATION__, get_friendly_nt_error_msg (r->out.info_data[0].status)));
1516 NT_STATUS_NOT_OK_RETURN(r->out.info_data[0].status);
1518 DEBUG(5,("%s: will set cache %p item=%p metadata=%p %p\n",__LOCATION__, f, f?f->metadata:NULL, r));
1519 proxy_set_cache_info(f->metadata, r);
1521 req->async_states->status=proxy_cache_info(io, f->metadata, NULL);
1522 DEBUG(5,("%s: set final response of original request to: %s\n",__LOCATION__, get_friendly_nt_error_msg (req->async_states->status)));
1524 return req->async_states->status;
1527 static void async_qpathinfo_notify(void* data, struct fdirmon* dirmon) {
1528 struct proxy_file* file=data;
1530 DEBUG(5,("%s: qpathinfo cache %s destroyed\n",__LOCATION__,file->filename));
1531 DLIST_REMOVE(file->proxy->closed_files, file);
1532 talloc_free(file);
1536 return info on a pathname
1538 static NTSTATUS proxy_qpathinfo(struct ntvfs_module_context *ntvfs,
1539 struct ntvfs_request *req, union smb_fileinfo *io)
1541 struct proxy_private *private = ntvfs->private_data;
1542 struct smbcli_request *c_req;
1543 struct proxy_file *f=NULL;
1544 const char* path;
1546 SETUP_PID;
1548 /* Look for closed files */
1549 if (private->enabled_qpathinfo) {
1550 int len=strlen(io->generic.in.file.path)+1;
1551 DEBUG(5,("%s: Looking for cached metadata for: %s\n",__LOCATION__,io->generic.in.file.path));
1552 DLIST_FIND(private->closed_files, f,
1553 (len==f->filename_size && fstrncmp(io->generic.in.file.path, f->filename, f->filename_size)==0));
1554 if (f) {
1555 /* stop cache going away while we are using it */
1556 talloc_reference(req, f);
1559 /* upgrade the request */
1560 switch(io->generic.level) {
1561 case RAW_FILEINFO_STANDARD_INFO:
1562 case RAW_FILEINFO_STANDARD_INFORMATION:
1563 case RAW_FILEINFO_BASIC_INFORMATION: /* we get this on file open */
1564 case RAW_FILEINFO_ALL_INFO:
1565 case RAW_FILEINFO_COMPRESSION_INFO:
1566 case RAW_FILEINFO_INTERNAL_INFORMATION:
1567 case RAW_FILEINFO_ACCESS_INFORMATION:
1568 case RAW_FILEINFO_POSITION_INFORMATION:
1569 case RAW_FILEINFO_MODE_INFORMATION:
1570 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1571 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1572 case RAW_FILEINFO_STREAM_INFO:
1573 case RAW_FILEINFO_STREAM_INFORMATION:
1574 case RAW_FILEINFO_EA_INFO:
1575 case RAW_FILEINFO_EA_INFORMATION:
1576 DEBUG(5,("%s: item is %p\n",__FUNCTION__, f));
1577 if (f && f->metadata) {
1578 NTSTATUS status;
1579 bool valid;
1580 DEBUG(5,("%s: Using cached metadata %x (item=%p)\n",__FUNCTION__, f->metadata->valid, f));
1581 status=proxy_cache_info(io, f->metadata, &valid);
1582 if (valid) return status;
1583 DEBUG(5,("%s: But cached metadata not valid :-(\n",__FUNCTION__));
1585 /* construct an item to hold the cache if we need to */
1586 if (! f && private->enabled_cache_info && PROXY_REMOTE_SERVER(private) && (f=talloc_zero(private, struct proxy_file))) {
1587 struct fdirmon* dirmon;
1588 dirmon=get_fdirmon(private, io->generic.in.file.path, true);
1589 if (f && dirmon) {
1590 f->proxy=private;
1591 dirmon_add_callback(dirmon, async_qpathinfo_notify, f);
1593 f->filename=talloc_strdup(f, io->generic.in.file.path);
1594 f->filename_size=strlen(f->filename)+1;
1595 f->metadata=talloc_zero(f, struct file_metadata);
1596 /* should not really add unless we succeeded */
1597 DLIST_ADD(private->closed_files, f);
1598 } else {
1599 talloc_free(f);
1600 f=NULL;
1603 if (f && f->metadata && private->enabled_cache_info && PROXY_REMOTE_SERVER(private)) {
1604 struct proxy_GetInfo *r;
1605 DEBUG(5,("%s: promoting request to proxy\n",__FUNCTION__));
1607 r=talloc_zero(req, struct proxy_GetInfo);
1608 NT_STATUS_HAVE_NO_MEMORY(r);
1610 r->in.count=1;
1611 r->in.info_tags=talloc_zero_array(req, struct info_tags, r->in.count);
1612 r->in.info_tags[0].tag_type=TAG_TYPE_PATH_INFO;
1613 /* 1+ to get the null */
1614 r->in.info_tags[0].info_tag.path.count=1+strlen(io->generic.in.file.path);
1615 r->in.info_tags[0].info_tag.path.s=io->generic.in.file.path;
1616 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
1617 /* the callback handler will populate the cache and respond from the cache */
1618 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_qpathinfo, NT_STATUS_INTERNAL_ERROR);
1620 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1621 DEBUG(5,("%s Sync waiting promotion\n",__FUNCTION__));
1622 return sync_chain_handler(c_req);
1623 } else {
1624 void* f=NULL;
1625 DEBUG(5,("%s Async waiting promotion\n",__FUNCTION__));
1626 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1627 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1628 return NT_STATUS_OK;
1633 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1634 return smb_raw_pathinfo(private->tree, req, io);
1637 c_req = smb_raw_pathinfo_send(private->tree, io);
1639 ASYNC_RECV_TAIL(io, async_qpathinfo);
1643 a handler for async qfileinfo replies
1645 static void async_qfileinfo(struct smbcli_request *c_req)
1647 struct async_info *async = c_req->async.private;
1648 struct ntvfs_request *req = async->req;
1649 req->async_states->status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1650 talloc_free(async);
1651 req->async_states->send_fn(req);
1654 static NTSTATUS async_proxy_qfileinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1656 struct proxy_private *private = async->proxy;
1657 struct smbcli_request *c_req = async->c_req;
1658 struct ntvfs_request *req = async->req;
1659 struct proxy_file *f = async->f;
1660 union smb_fileinfo *io = talloc_get_type_abort(io1, union smb_fileinfo);
1661 struct proxy_GetInfo *r=talloc_get_type_abort(io2, struct proxy_GetInfo);
1663 if (c_req) status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1664 req->async_states->status=status;
1666 NT_STATUS_NOT_OK_RETURN(status);
1668 /* populate the cache, and then fill the request from the cache */
1669 /* Assuming that r->count.in == 1 */
1670 SMB_ASSERT(r->out.count==1);
1671 NT_STATUS_NOT_OK_RETURN(r->out.info_data[0].status);
1673 proxy_set_cache_info(f->metadata, r);
1675 req->async_states->status=proxy_cache_info(io, f->metadata, NULL);
1677 return req->async_states->status;
1681 query info on a open file
1683 static NTSTATUS proxy_qfileinfo(struct ntvfs_module_context *ntvfs,
1684 struct ntvfs_request *req, union smb_fileinfo *io)
1686 struct proxy_private *private = ntvfs->private_data;
1687 struct smbcli_request *c_req;
1688 struct proxy_file *f;
1689 bool valid=false;
1690 NTSTATUS status;
1692 SETUP_PID;
1694 SETUP_FILE_HERE(f);
1696 /* upgrade the request */
1697 switch(io->generic.level) {
1698 case RAW_FILEINFO_STANDARD_INFO:
1699 case RAW_FILEINFO_STANDARD_INFORMATION:
1700 case RAW_FILEINFO_BASIC_INFORMATION: /* we get this on file open */
1701 case RAW_FILEINFO_ALL_INFO:
1702 case RAW_FILEINFO_COMPRESSION_INFO:
1703 case RAW_FILEINFO_INTERNAL_INFORMATION:
1704 case RAW_FILEINFO_ACCESS_INFORMATION:
1705 case RAW_FILEINFO_POSITION_INFORMATION:
1706 case RAW_FILEINFO_MODE_INFORMATION:
1707 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1708 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1709 case RAW_FILEINFO_STREAM_INFO:
1710 case RAW_FILEINFO_STREAM_INFORMATION:
1711 case RAW_FILEINFO_EA_INFO:
1712 case RAW_FILEINFO_EA_INFORMATION:
1713 DEBUG(5,("%s: oplock is %d\n",__FUNCTION__, f->oplock));
1714 if (f->oplock) {
1715 DEBUG(5,("%s: %p Using cached metadata %x (fnum=%d)\n",__FUNCTION__, f, f->metadata->valid, f->fnum));
1716 status=proxy_cache_info(io, f->metadata, &valid);
1717 if (valid) return status;
1718 DEBUG(5,("%s: But cached metadata not valid :-(\n",__FUNCTION__));
1720 if (private->enabled_cache_info && PROXY_REMOTE_SERVER(private)) {
1721 DEBUG(5,("%s: promoting request to proxy\n",__FUNCTION__));
1722 struct proxy_GetInfo *r=talloc_zero(req, struct proxy_GetInfo);
1723 NT_STATUS_HAVE_NO_MEMORY(r);
1724 r->in.count=1;
1725 r->in.info_tags=talloc_zero_array(req, struct info_tags, r->in.count);
1726 r->in.info_tags[0].tag_type=TAG_TYPE_FILE_INFO;
1727 r->in.info_tags[0].info_tag.fnum=io->generic.in.file.fnum;
1728 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
1729 /* the callback handler will populate the cache and respond from the cache */
1730 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_qfileinfo, NT_STATUS_INTERNAL_ERROR);
1732 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1733 DEBUG(5,("%s Sync waiting promotion\n",__FUNCTION__));
1734 return sync_chain_handler(c_req);
1735 } else {
1736 DEBUG(5,("%s Async waiting promotion\n",__FUNCTION__));
1737 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1738 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1739 return NT_STATUS_OK;
1744 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1745 return smb_raw_fileinfo(private->tree, req, io);
1748 c_req = smb_raw_fileinfo_send(private->tree, io);
1750 ASYNC_RECV_TAIL(io, async_qfileinfo);
1754 set info on a pathname
1756 static NTSTATUS proxy_setpathinfo(struct ntvfs_module_context *ntvfs,
1757 struct ntvfs_request *req, union smb_setfileinfo *st)
1759 struct proxy_private *private = ntvfs->private_data;
1760 struct smbcli_request *c_req;
1762 SETUP_PID;
1764 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1765 return smb_raw_setpathinfo(private->tree, st);
1768 c_req = smb_raw_setpathinfo_send(private->tree, st);
1770 SIMPLE_ASYNC_TAIL;
1775 a handler for async open replies
1777 static void async_open(struct smbcli_request *c_req)
1779 struct async_info *async = c_req->async.private;
1780 struct proxy_private *proxy = async->proxy;
1781 struct ntvfs_request *req = async->req;
1782 struct proxy_file *f = async->f;
1783 union smb_open *io = async->parms;
1784 union smb_handle *file;
1786 talloc_free(async);
1787 req->async_states->status = smb_raw_open_recv(c_req, req, io);
1788 SMB_OPEN_OUT_FILE(io, file);
1789 f->fnum = file->fnum;
1790 file->ntvfs = NULL;
1791 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
1792 req->async_states->status = ntvfs_handle_set_backend_data(f->h, proxy->ntvfs, f);
1793 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
1794 file->ntvfs = f->h;
1795 DLIST_ADD(proxy->files, f);
1797 f->oplock=io->generic.out.oplock_level;
1799 LOAD_CACHE_FILE_DATA (f->metadata->info_data, io->generic.out);
1800 f->metadata->valid |= valid_RAW_FILEINFO_BASIC_INFORMATION;
1801 DEBUG(5,("**** METADATA VALID %p %x LEN=%lld\n",f,f->metadata->valid,(long long)f->metadata->info_data.size));
1803 if (proxy->cache_enabled) {
1804 struct search_cache_item *item=NULL;
1805 struct search_cache *s=proxy->search_caches;
1806 /* If we are still monitoring the file for changes we can
1807 retain the previous cache state, [if it is more recent that the monitor]! */
1808 /* yeah yeah what if there is more than one.... :-( */
1809 if (! (io->generic.level == RAW_OPEN_NTCREATEX &&
1810 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID) &&
1811 find_search_cache_item(SMB_OPEN_IN_FILE(io), &s, &item) && item->cache) {
1812 DEBUG(5,("%s: Using cached file cache\n",__LOCATION__));
1813 f->cache=talloc_reference(f, item->cache);
1814 cache_beopen(f->cache);
1815 if (item->metadata) {
1816 *(f->metadata)=*(item->metadata);
1817 f->metadata->info_data.fname.s=talloc_strdup(f, item->metadata->info_data.fname.s);
1818 f->metadata->info_data.fname.count=item->metadata->info_data.fname.count;
1820 f->metadata->info_data.streams=talloc_zero_array(f, struct info_stream, f->metadata->info_data.num_streams);
1821 if (f->metadata->info_data.streams) {
1822 int c;
1823 for(c=0; c < f->metadata->info_data.num_streams; c++) {
1824 f->metadata->info_data.streams[c].size = item->metadata->info_data.streams[c].size;
1825 f->metadata->info_data.streams[c].alloc_size = item->metadata->info_data.streams[c].alloc_size;
1826 f->metadata->info_data.streams[c].stream_name.s= talloc_strdup(f, item->metadata->info_data.streams[c].stream_name.s);
1827 f->metadata->info_data.streams[c].stream_name.count=item->metadata->info_data.streams[c].stream_name.count;
1830 f->metadata->count=1;
1832 } else {
1833 f->cache=cache_open(proxy->cache, f, io, f->oplock, proxy->cache_readahead);
1834 if (proxy->fake_valid) {
1835 cache_handle_validated(f, cache_handle_len(f));
1837 if (! PROXY_REMOTE_SERVER(proxy)) cache_handle_novalidate(f);
1838 if (item) {
1839 item->cache = talloc_reference(item, f->cache);
1840 item->metadata=talloc_reference(item, f->metadata);
1841 DEBUG(5,("%s: Caching file cache for later\n",__LOCATION__));
1842 } else {
1843 DEBUG(5,("%s: NOT Caching file cache for later\n",__LOCATION__));
1848 failed:
1849 req->async_states->send_fn(req);
1853 open a file
1855 static NTSTATUS proxy_open(struct ntvfs_module_context *ntvfs,
1856 struct ntvfs_request *req, union smb_open *io)
1858 struct proxy_private *private = ntvfs->private_data;
1859 struct smbcli_request *c_req;
1860 struct ntvfs_handle *h;
1861 struct proxy_file *f, *clone;
1862 NTSTATUS status;
1863 void *filename;
1864 int filename_size;
1865 uint16_t fnum;
1867 SETUP_PID;
1869 if (io->generic.level != RAW_OPEN_GENERIC &&
1870 private->map_generic) {
1871 return ntvfs_map_open(ntvfs, req, io);
1874 status = ntvfs_handle_new(ntvfs, req, &h);
1875 #warning should we free this handle if the open fails?
1876 NT_STATUS_NOT_OK_RETURN(status);
1878 f = talloc_zero(h, struct proxy_file);
1879 NT_STATUS_HAVE_NO_MEMORY(f);
1880 f->proxy=private;
1882 /* If the file is being opened read only and we already have a read-only
1883 handle for this file, then just clone and ref-count the handle */
1884 /* First calculate the filename key */
1885 if (io->generic.level == RAW_OPEN_NTCREATEX &&
1886 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID) {
1887 filename_size=sizeof(uint64_t);
1888 filename=io->generic.in.fname;
1889 } else {
1890 filename=SMB_OPEN_IN_FILE(io);
1891 filename_size=strlen(filename)+1;
1893 f->filename=talloc_memdup(f, filename, filename_size);
1894 f->filename_size=filename_size;
1895 f->h = h;
1896 f->can_clone= (io->generic.in.access_mask & NTCREATEX_SHARE_ACCESS_MASK) == NTCREATEX_SHARE_ACCESS_READ &&
1897 (io->generic.in.impersonation == NTCREATEX_IMPERSONATION_IMPERSONATION) &&
1898 (io->generic.in.create_options & NTCREATEX_OPTIONS_DIRECTORY) == 0 &&
1899 (io->generic.in.open_disposition != NTCREATEX_DISP_CREATE) &&
1900 (io->generic.in.open_disposition != NTCREATEX_DISP_SUPERSEDE);
1901 /* see if we have a matching open file */
1902 clone=NULL;
1903 if (f->can_clone) for (clone=private->files; clone; clone=clone->next) {
1904 if (clone->can_clone && filename_size == clone->filename_size &&
1905 memcmp(filename, clone->filename, filename_size)==0) {
1906 break;
1910 /* if clone is not null, then we found a match */
1911 if (private->enabled_open_clone && clone) {
1912 union smb_handle *file;
1914 DEBUG(5,("%s: clone handle %d\n",__FUNCTION__,clone->fnum));
1915 SMB_OPEN_OUT_FILE(io, file);
1916 f->fnum = clone->fnum;
1917 file->ntvfs = NULL;
1918 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
1919 NT_STATUS_NOT_OK_RETURN(status);
1920 file->ntvfs = f->h;
1921 DLIST_ADD(private->files, f);
1922 /* but be sure to share the same metadata cache */
1923 f->metadata=talloc_reference(f, clone->metadata);
1924 f->metadata->count++;
1925 f->oplock=clone->oplock;
1926 f->cache=talloc_reference(f, clone->cache);
1927 /* We don't need to reduce the oplocks for both files if we are read-only */
1928 /* if (clone->oplock==EXCLUSIVE_OPLOCK_RETURN ||
1929 clone->oplock==BATCH_OPLOCK_RETURN) {
1930 DEBUG(5,("%s: Breaking clone oplock from %d\n",__LOCATION__, clone->oplock));
1931 clone->oplock==LEVEL_II_OPLOCK_RETURN;
1932 status = ntvfs_send_oplock_break(private->ntvfs, clone->h, OPLOCK_BREAK_TO_LEVEL_II);
1933 //if (!NT_STATUS_IS_OK(status)) result=false;
1934 } else if (clone->oplock==LEVEL_II_OPLOCK_RETURN) {
1935 DEBUG(5,("%s: Breaking clone oplock from %d, cache no longer valid\n",__LOCATION__, clone->oplock));
1936 cache_handle_stale(f);
1937 clone->oplock=NO_OPLOCK_RETURN;
1938 status = ntvfs_send_oplock_break(private->ntvfs, clone->h, OPLOCK_BREAK_TO_NONE);
1939 //if (!NT_STATUS_IS_OK(status)) result=false;
1942 f->oplock=clone->oplock;
1943 /* and fake the rest of the response struct */
1944 io->generic.out.oplock_level=f->oplock;
1945 io->generic.out.create_action=NTCREATEX_ACTION_EXISTED;
1946 io->generic.out.create_time=f->metadata->info_data.create_time;
1947 io->generic.out.access_time=f->metadata->info_data.access_time;
1948 io->generic.out.write_time=f->metadata->info_data.write_time;
1949 io->generic.out.change_time=f->metadata->info_data.change_time;
1950 io->generic.out.attrib=f->metadata->info_data.attrib;
1951 io->generic.out.alloc_size=f->metadata->info_data.alloc_size;
1952 io->generic.out.size=f->metadata->info_data.size;
1953 io->generic.out.file_type=f->metadata->info_data.file_type;
1954 io->generic.out.ipc_state=f->metadata->info_data.ipc_state;
1955 io->generic.out.is_directory=f->metadata->info_data.is_directory;
1956 /* optional return values matching SMB2 tagged
1957 values in the call */
1958 //io->generic.out.maximal_access;
1959 return NT_STATUS_OK;
1961 f->metadata=talloc_zero(f, struct file_metadata);
1962 NT_STATUS_HAVE_NO_MEMORY(f->metadata);
1963 f->metadata->count=1;
1965 /* if oplocks aren't requested, optionally override and request them */
1966 if (! (io->generic.in.flags & (OPENX_FLAGS_REQUEST_OPLOCK | OPENX_FLAGS_REQUEST_BATCH_OPLOCK))
1967 && private->fake_oplock) {
1968 io->generic.in.flags |= OPENX_FLAGS_REQUEST_OPLOCK | OPENX_FLAGS_REQUEST_BATCH_OPLOCK;
1971 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1972 union smb_handle *file;
1974 status = smb_raw_open(private->tree, req, io);
1975 NT_STATUS_NOT_OK_RETURN(status);
1977 SMB_OPEN_OUT_FILE(io, file);
1978 f->fnum = file->fnum;
1979 file->ntvfs = NULL;
1980 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
1981 NT_STATUS_NOT_OK_RETURN(status);
1982 file->ntvfs = f->h;
1983 DLIST_ADD(private->files, f);
1985 f->oplock=io->generic.out.oplock_level;
1987 LOAD_CACHE_FILE_DATA (f->metadata->info_data, io->generic.out);
1988 DEBUG(5,("**** METADATA VALID %p %x LEN=%lld\n",f,f->metadata->valid,(long long)f->metadata->info_data.size));
1989 f->metadata->valid |= valid_RAW_FILEINFO_BASIC_INFORMATION;
1991 if (private->cache_enabled) {
1992 f->cache=cache_open(private->cache, f, io, f->oplock, private->cache_readahead);
1993 if (private->fake_valid) {
1994 cache_handle_validated(f, cache_handle_len(f));
1996 if (! PROXY_REMOTE_SERVER(private)) cache_handle_novalidate(f);
1999 return NT_STATUS_OK;
2002 c_req = smb_raw_open_send(private->tree, io);
2004 ASYNC_RECV_TAIL_F(io, async_open, f);
2008 create a directory
2010 static NTSTATUS proxy_mkdir(struct ntvfs_module_context *ntvfs,
2011 struct ntvfs_request *req, union smb_mkdir *md)
2013 struct proxy_private *private = ntvfs->private_data;
2014 struct smbcli_request *c_req;
2016 SETUP_PID;
2018 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2019 return smb_raw_mkdir(private->tree, md);
2022 c_req = smb_raw_mkdir_send(private->tree, md);
2024 SIMPLE_ASYNC_TAIL;
2028 remove a directory
2030 static NTSTATUS proxy_rmdir(struct ntvfs_module_context *ntvfs,
2031 struct ntvfs_request *req, struct smb_rmdir *rd)
2033 struct proxy_private *private = ntvfs->private_data;
2034 struct smbcli_request *c_req;
2036 SETUP_PID;
2038 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2039 return smb_raw_rmdir(private->tree, rd);
2041 c_req = smb_raw_rmdir_send(private->tree, rd);
2043 SIMPLE_ASYNC_TAIL;
2047 rename a set of files
2049 static NTSTATUS proxy_rename(struct ntvfs_module_context *ntvfs,
2050 struct ntvfs_request *req, union smb_rename *ren)
2052 struct proxy_private *private = ntvfs->private_data;
2053 struct smbcli_request *c_req;
2055 SETUP_PID;
2057 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2058 return smb_raw_rename(private->tree, ren);
2061 c_req = smb_raw_rename_send(private->tree, ren);
2063 SIMPLE_ASYNC_TAIL;
2067 copy a set of files
2069 static NTSTATUS proxy_copy(struct ntvfs_module_context *ntvfs,
2070 struct ntvfs_request *req, struct smb_copy *cp)
2072 return NT_STATUS_NOT_SUPPORTED;
2075 /* we only define this seperately so we can easily spot read calls in
2076 pending based on ( c_req->private.fn == async_read_handler ) */
2077 static void async_read_handler(struct smbcli_request *c_req)
2079 async_chain_handler(c_req);
2082 NTSTATUS async_readahead_dec(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2084 struct proxy_private *private = async->proxy;
2085 struct smbcli_request *c_req = async->c_req;
2086 struct proxy_file *f = async->f;
2087 union smb_read *io = async->parms;
2089 /* if request is not already received by a chained handler, read it */
2090 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
2092 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
2093 f->readahead_pending, private->readahead_spare));
2095 f->readahead_pending--;
2096 private->readahead_spare++;
2098 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
2099 f->readahead_pending, private->readahead_spare));
2101 return status;
2105 a handler for async read replies - speculative read-aheads.
2106 It merely saves in the cache. The async chain handler will call send_fn if
2107 there is one, or if sync_chain_handler is used the send_fn is called by
2108 the ntvfs back end.
2110 NTSTATUS async_read_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2112 struct smbcli_request *c_req = async->c_req;
2113 struct proxy_file *f = async->f;
2114 union smb_read *io = async->parms;
2116 /* if request is not already received by a chained handler, read it */
2117 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
2119 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
2120 get_friendly_nt_error_msg(status)));
2122 NT_STATUS_NOT_OK_RETURN(status);
2124 /* if it was a validate read we don't to save anything unless it failed.
2125 Until we use Proxy_read structs we can't tell, so guess */
2126 if (io->generic.out.nread == io->generic.in.maxcnt &&
2127 io->generic.in.mincnt < io->generic.in.maxcnt) {
2128 /* looks like a validate read, just move the validate pointer, the
2129 original read-request has already been satisfied from cache */
2130 DEBUG(3,("%s megavalidate suceeded, validate to %lld\n",__FUNCTION__,
2131 io->generic.in.offset + io->generic.out.nread));
2132 cache_handle_validated(f, io->generic.in.offset + io->generic.out.nread);
2133 } else {
2134 DEBUG(5,("Not a mega-validate, save %d in cache\n",io->generic.out.nread));
2135 cache_handle_save(f, io->generic.out.data,
2136 io->generic.out.nread,
2137 io->generic.in.offset);
2140 DEBUG(3,("%s finished %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2141 return status;
2144 /* handler for fragmented reads */
2145 NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2147 struct proxy_private *private = async->proxy;
2148 struct smbcli_request *c_req = async->c_req;
2149 struct ntvfs_request *req = async->req;
2150 struct proxy_file *f = async->f;
2151 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
2152 /* this is the io against which the fragment is to be applied */
2153 union smb_read *io = talloc_get_type_abort(io1, union smb_read);
2154 /* this is the io for the read that issued the callback */
2155 union smb_read *io_frag = fragment->io_frag; /* async->parms; */
2156 struct async_read_fragments* fragments=fragment->fragments;
2158 /* if request is not already received by a chained handler, read it */
2159 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
2160 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
2162 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
2163 get_friendly_nt_error_msg(status)));
2165 fragment->status = status;
2167 /* remove fragment from fragments */
2168 DLIST_REMOVE(fragments->fragments, fragment);
2170 #warning maybe read requests beyond the short read won't return NT_STATUS_OK with nread=0
2171 /* in which case if we will want to collate all responses and return a valid read
2172 for the leading NT_STATUS_OK fragments */
2174 /* did this one fail, inducing a general fragments failure? */
2175 if (!NT_STATUS_IS_OK(fragment->status)) {
2176 /* preserve the status of the fragment with the smallest offset
2177 when we can work out how */
2178 if (NT_STATUS_IS_OK(fragments->status)) {
2179 fragments->status=fragment->status;
2182 cache_handle_novalidate(f);
2183 DEBUG(5,("** Devalidated proxy due to read failure\n"));
2184 } else {
2185 /* No fragments have yet failed, keep collecting responses */
2186 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
2187 /* Find memcpy window, copy data from the io_frag to the io */
2188 off_t start_offset=MAX(io_frag->generic.in.offset, io->generic.in.offset);
2189 /* used to use mincnt */
2190 off_t io_extent=io->generic.in.offset + io->generic.in.maxcnt;
2191 off_t end_offset=MIN(io_extent, extent);
2192 /* ASSERT(start_offset <= end_offset) */
2193 /* ASSERT(start_offset <= io_extent) */
2194 if (start_offset >= io_extent) {
2195 DEBUG(3,("useless read-ahead tagged on to: %s",__LOCATION__));
2196 } else {
2197 uint8_t* dst=io->generic.out.data+(start_offset - io->generic.in.offset);
2198 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
2199 /* src == dst in cases where we did not latch onto someone elses
2200 read, but are handling our own */
2201 if (src != dst)
2202 memcpy(dst, src, end_offset - start_offset);
2205 /* There should be a better way to detect, but it needs the proxy rpc struct
2206 not ths smb_read struct */
2207 if (io_frag->generic.out.nread < io_frag->generic.in.maxcnt) {
2208 DEBUG(5,("\n** Devalidated proxy due to small read: %lld min=%lld, max=%lld\n",
2209 (long long) io_frag->generic.out.nread,
2210 (long long) io_frag->generic.in.mincnt,
2211 (long long) io_frag->generic.in.maxcnt));
2212 cache_handle_novalidate(f);
2215 /* We broke up the original read. If not enough of this sub-read has
2216 been read, and then some of then next block, it could leave holes!
2217 We will only acknowledge up to the first partial read, and treat
2218 it as a small read. If server can return NT_STATUS_OK for a partial
2219 read so can we, so we preserve the response.
2220 "enough" is all of it (maxcnt), except on the last block, when it has to
2221 be enough to fill io->generic.in.mincnt. We know it is the last block
2222 if nread is small but we could fill io->generic.in.mincnt */
2223 if (io_frag->generic.out.nread < io_frag->generic.in.mincnt &&
2224 end_offset < io->generic.in.offset + io->generic.in.mincnt) {
2225 DEBUG(4,("Fragmented read only partially successful\n"));
2227 /* Shrink the master nread (or grow to this size if we are first partial */
2228 if (! fragments->partial ||
2229 (io->generic.in.offset + io->generic.out.nread) > extent) {
2230 io->generic.out.nread = extent - io->generic.in.offset;
2233 /* stop any further successes from extending the partial read */
2234 fragments->partial=true;
2235 } else {
2236 /* only grow the master nwritten if we haven't logged a partial write */
2237 if (! fragments->partial &&
2238 (io->generic.in.offset + io->generic.out.nread) < extent ) {
2239 io->generic.out.nread = MIN(io->generic.in.maxcnt, extent - io->generic.in.offset);
2244 /* Was it the last fragment, or do we know enought to send a response? */
2245 if (! fragments->fragments) {
2246 DEBUG(5,("Async read re-fragmented with %d of %d %s\n",
2247 io->generic.out.nread, io->generic.in.mincnt,
2248 get_friendly_nt_error_msg(fragments->status)));
2249 if (fragments->async) {
2250 req->async_states->status=fragments->status;
2251 DEBUG(5,("Fragments async response sending\n"));
2252 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
2253 /* esp. as they may be attached to by other reads. Maybe attachees should be taking reference, but how will they
2254 know the top level they need to take reference too.. */
2255 #warning should really queue a sender here, not call it */
2256 req->async_states->send_fn(req);
2257 DEBUG(5,("Async response sent\n"));
2258 } else {
2259 DEBUG(5,("Fragments SYNC return\n"));
2263 /* because a c_req may be shared by many req, chained handlers must return
2264 a status pertaining to the general validity of this specific c_req, not
2265 to their own private processing of the c_req for the benefit of their req
2266 which is returned in fragments->status
2268 return status;
2271 /* Issue read-ahead X bytes where X is the window size calculation based on
2272 server_latency * server_session_bandwidth
2273 where latency is the idle (link) latency and bandwidth is less than or equal_to
2274 to actual bandwidth available to the server.
2275 Read-ahead should honour locked areas in whatever way is neccessary (who knows?)
2276 read_ahead is defined here and not in the cache engine because it requires too
2277 much knowledge of private structures
2279 /* The concept is buggy unless we can tell the next proxy that these are
2280 read-aheads, otherwise chained proxy setups will each read-ahead of the
2281 read-ahead which can put a larger load on the final server.
2282 Also we probably need to distinguish between
2283 * cache-less read-ahead
2284 * cache-revalidating read-ahead
2286 NTSTATUS read_ahead(struct proxy_file *f, struct ntvfs_module_context *ntvfs,
2287 union smb_read *io, ssize_t as_read)
2289 struct proxy_private *private = ntvfs->private_data;
2290 struct smbcli_tree *tree = private->tree;
2291 struct cache_file_entry *cache;
2292 off_t next_position; /* this read offset+length+window */
2293 off_t end_position; /* position we read-ahead to */
2294 off_t cache_populated;
2295 off_t read_position, new_extent;
2297 if (! PROXY_REMOTE_SERVER(private)) return NT_STATUS_UNSUCCESSFUL;
2298 DEBUG(5,("A\n"));
2299 if (private->cache_readahead==0 || ! private->cache_enabled || ! f->cache) return NT_STATUS_UNSUCCESSFUL;
2300 DEBUG(5,("B\n"));
2301 cache=talloc_get_type_abort(f->cache, struct cache_file_entry);
2302 DEBUG(5,("C\n"));
2303 /* don't read-ahead if we are in bulk validate mode */
2304 if (cache->status & CACHE_VALIDATE) return NT_STATUS_UNSUCCESSFUL;
2305 DEBUG(5,("D\n"));
2306 /* if we can't trust what we read-ahead anyway then don't bother although
2307 * if delta-reads are enabled we can do so in order to get something to
2308 * delta against */
2309 DEBUG(CACHE_DEBUG_LEVEL,("DOING Asking read-aheads: len %lld ra-extend %lld as-read %lld RA %d (%d)\n",
2310 (long long int)(cache_len(cache)),
2311 (long long int)(cache->readahead_extent),
2312 (long long int)(as_read),
2313 cache->readahead_window,private->cache_readahead));
2314 if (private->cache_readahead ==0 || ! (cache->status & CACHE_READ_AHEAD) ) {
2315 DEBUG(CACHE_DEBUG_LEVEL,("FAILED Asking read-aheads: Can't read-ahead as no read-ahead on this file: %x\n",
2316 cache->status));
2317 return NT_STATUS_UNSUCCESSFUL;
2320 /* as_read is the mincnt bytes of a request being made or the
2321 out.nread of completed sync requests
2322 Here we presume that as_read bytes WILL be read. If there is a cache-ahead like ours,
2323 then this may often NOT be the case if readahead_window < requestsize; so we will
2324 get a small read, leaving a hole in the cache, and as we don't yet handle sparse caches,
2325 all future read-ahead will be wasted, so we need to adjust the read-ahead handler to handle
2326 this and have failed sparse writes adjust the cache->readahead_extent back to actual size */
2328 /* predict the file pointers next position */
2329 next_position=io->generic.in.offset + as_read;
2331 /* if we know how big the file is, don't read beyond */
2332 if (f->oplock && next_position > f->metadata->info_data.size) {
2333 next_position = f->metadata->info_data.size;
2335 DEBUG(5,("Next position: %lld (%lld + %lld)\n",
2336 (long long int)next_position,
2337 (long long int)io->generic.in.offset,
2338 (long long int)as_read));
2339 /* calculate the limit of the validated or requested cache */
2340 cache_populated=MAX(cache->validated_extent, cache->readahead_extent);
2342 /* will the new read take us beyond the current extent without gaps? */
2343 if (cache_populated < io->generic.in.offset) {
2344 /* this read-ahead is a read-behind-pointer */
2345 new_extent=cache_populated;
2346 } else {
2347 new_extent=MAX(next_position, cache_populated);
2350 /* as far as we can tell new_extent is the smallest offset that doesn't
2351 have a pending read request on. Of course if we got a short read then
2352 we will have a cache-gap which we can't handle and need to read from
2353 a shrunk readahead_extent, which we don't currently handle */
2354 read_position=new_extent;
2356 /* of course if we know how big the remote file is we should limit at that */
2357 /* we should also mark-out which read-ahead requests are pending so that we
2358 * don't repeat them while they are in-transit. */
2359 /* we can't really use next_position until we can have caches with holes
2360 UNLESS next_position < new_extent, because a next_position well before
2361 new_extent is no reason to extend it further, we only want to extended
2362 with read-aheads if we have cause to suppose the read-ahead data will
2363 be wanted, i.e. the next_position is near new_extent.
2364 So we can't justify reading beyond window+next_position, but if
2365 next_position is leaving gaps, we use new_extent instead */
2366 end_position=MIN(new_extent, next_position) + cache->readahead_window;
2367 if (f->oplock) {
2368 end_position=MIN(end_position, f->metadata->info_data.size);
2370 DEBUG(5,("** Read-ahead loop %lld < %lld window=%d, end=%lld, quota: %d\n",
2371 (long long int)read_position,
2372 (long long int)(next_position + cache->readahead_window),
2373 cache->readahead_window,
2374 (long long int)end_position,
2375 private->readahead_spare));
2376 /* do we even need to read? */
2377 if (! (read_position < end_position)) return NT_STATUS_OK;
2379 /* readahead_spare is for the whole session (mid/tid?) and may need sharing
2380 out over files and other tree-connects or something */
2381 while (read_position < end_position &&
2382 private->readahead_spare > 0) {
2383 struct smbcli_request *c_req = NULL;
2384 ssize_t read_remaining = end_position - read_position;
2385 ssize_t read_block = MIN(private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32),
2386 MIN(read_remaining, private->cache_readaheadblock));
2387 void *req = NULL; /* for the ASYNC_REC_TAIL_F_ORPHAN macro */
2388 uint8_t* data;
2389 union smb_read *io_copy=talloc_memdup_type(NULL, io, union smb_read);
2391 if (! io_copy)
2392 return NT_STATUS_NO_MEMORY;
2394 #warning we are ignoring read_for_execute as far as the cache goes
2395 io_copy->generic.in.read_for_execute=io->readx.in.read_for_execute;
2396 io_copy->generic.in.offset=read_position;
2397 io_copy->generic.in.mincnt=read_block;
2398 io_copy->generic.in.maxcnt=read_block;
2399 /* what is generic.in.remaining for? */
2400 io_copy->generic.in.remaining = MIN(65535,read_remaining);
2401 io_copy->generic.out.nread=0;
2403 #warning someone must own io_copy, tree, maybe?
2404 data=talloc_zero_size(io_copy, io_copy->generic.in.maxcnt);
2405 DEBUG(5,("Talloc read-ahead buffer %p size %d\n",data, io_copy->generic.in.maxcnt));
2406 if (! data) {
2407 talloc_free(io_copy);
2408 return NT_STATUS_NO_MEMORY;
2410 io_copy->generic.out.data=data;
2412 /* are we able to pull anything from the cache to validate this read-ahead?
2413 NOTE: there is no point in reading ahead merely to re-validate the
2414 cache if we don't have oplocks and can't save it....
2415 ... or maybe there is if we think a read will come that can be matched
2416 up to this reponse while it is still on the wire */
2417 #warning so we need to distinguish between pipe-line read-ahead and revalidation
2418 if (/*(cache->status & CACHE_READ)!=0 && */
2419 cache_len(cache) >
2420 (io_copy->generic.in.offset + io_copy->generic.in.mincnt) &&
2421 cache->validated_extent <
2422 (io_copy->generic.in.offset + io_copy->generic.in.maxcnt)) {
2423 ssize_t pre_fill;
2425 pre_fill = cache_raw_read(cache, data,
2426 io_copy->generic.in.offset,
2427 io_copy->generic.in.maxcnt);
2428 DEBUG(5,("Data read into %p %d\n",data, pre_fill));
2429 if (pre_fill > 0 && pre_fill >= io_copy->generic.in.mincnt) {
2430 io_copy->generic.out.nread=pre_fill;
2431 read_block=pre_fill;
2435 c_req = proxy_smb_raw_read_send(ntvfs, io_copy, f, NULL);
2437 if (c_req) {
2438 private->readahead_spare--;
2439 f->readahead_pending++;
2440 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead level %d request %p offset=%d size=%d\n",io_copy->generic.level,c_req,(int)read_position,(int)read_block));
2441 if (cache->readahead_extent < read_position+read_block)
2442 cache->readahead_extent=read_position+read_block;
2443 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
2444 /* so we can decrease read-ahead counter for this session */
2445 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_readahead_dec, NT_STATUS_INTERNAL_ERROR);
2446 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io_copy, async_read_handler);
2448 /* Make these be owned by the async struct so they are freed when the callback ends or is cancelled */
2449 talloc_steal(c_req->async.private, c_req);
2450 talloc_steal(c_req->async.private, io_copy);
2451 read_position+=read_block;
2452 } else {
2453 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead request FAILED offset=%d size=%d\n",(int)read_position,(int)read_block));
2454 talloc_free(io_copy);
2455 break;
2459 DEBUG(CACHE_DEBUG_LEVEL,("DONE: Asking read-aheads\n"));
2460 return NT_STATUS_OK;
2463 struct proxy_validate_parts_parts {
2464 struct proxy_Read* r;
2465 struct ntvfs_request *req;
2466 struct proxy_file *f;
2467 struct async_read_fragments *fragments;
2468 off_t offset;
2469 ssize_t remaining;
2470 bool complete;
2471 declare_checksum(digest);
2472 struct MD5Context context;
2475 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts);
2476 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status);
2477 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
2478 struct proxy_validate_parts_parts *parts);
2480 /* this will be the new struct proxy_Read based read function, for now
2481 it just deals with non-cached based validate to a regular server */
2482 static NTSTATUS proxy_validate(struct ntvfs_module_context *ntvfs,
2483 struct ntvfs_request *req,
2484 struct proxy_Read *r,
2485 struct proxy_file *f)
2487 struct proxy_private *private = ntvfs->private_data;
2488 struct proxy_validate_parts_parts *parts;
2489 struct async_read_fragments *fragments;
2490 NTSTATUS status;
2492 if (!f) return NT_STATUS_INVALID_HANDLE;
2494 DEBUG(5,("%s: fnum=%d **** %lld bytes \n\n\n\n",__LOCATION__,f->fnum,(long long int)r->in.maxcnt));
2496 parts = talloc_zero(req, struct proxy_validate_parts_parts);
2497 DEBUG(5,("%s: parts=%p\n",__FUNCTION__,parts));
2498 NT_STATUS_HAVE_NO_MEMORY(parts);
2500 fragments = talloc_zero(parts, struct async_read_fragments);
2501 NT_STATUS_HAVE_NO_MEMORY(fragments);
2503 parts->fragments=fragments;
2505 parts->r=r;
2506 parts->f=f;
2507 parts->req=req;
2508 /* processed offset */
2509 parts->offset=r->in.offset;
2510 parts->remaining=r->in.maxcnt;
2511 fragments->async=true;
2513 MD5Init (&parts->context);
2515 /* start a read-loop which will continue in the callback until it is
2516 all done */
2517 status=proxy_validate_parts(ntvfs, parts);
2518 if (parts->complete) {
2519 /* Make sure we are not async */
2520 DEBUG(5,("%s: completed EARLY\n",__FUNCTION__));
2521 return proxy_validate_complete(parts);
2524 /* Assert if status!=NT_STATUS_OK then parts->complete==true */
2525 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
2526 DEBUG(5,("%s: returning ASYNC\n",__FUNCTION__));
2527 return status;
2530 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts)
2532 NTSTATUS status;
2533 struct proxy_Read* r=parts->r;
2534 struct proxy_file *f=parts->f;
2536 DEBUG(5,("%s: %d/%d bytes \n\n\n\n",__LOCATION__,r->out.nread,r->in.maxcnt));
2538 MD5Final(parts->digest, &parts->context);
2540 status = parts->fragments->status;
2541 r->out.result = status;
2542 r->out.response.generic.count=r->out.nread;
2543 r->out.cache_name.count=0;
2545 DEBUG(5,("%s: %s nread=%d\n",__FUNCTION__, get_friendly_nt_error_msg (status),
2546 r->out.response.generic.count));
2548 DEBUG(5,("Anticipated validated digest for size: %lld\n", (long long) r->in.maxcnt));
2549 dump_data (5, r->in.digest.digest, sizeof(parts->digest));
2550 DEBUG(5,("read digest for size %lld\n",(long long) parts->offset));
2551 dump_data (5, parts->digest, sizeof(parts->digest));
2553 if (NT_STATUS_IS_OK(status) &&
2554 (memcmp(parts->digest, r->in.digest.digest, sizeof(parts->digest))==0)) {
2555 r->out.flags = PROXY_USE_CACHE | PROXY_VALIDATE;
2556 DEBUG(5,("======= VALIDATED FINE \n\n\n"));
2557 } else {
2558 if (r->in.flags & PROXY_USE_ZLIB) {
2559 ssize_t size = r->out.response.generic.count;
2560 DEBUG(5,("======= VALIDATED WRONG; compress size %d \n\n\n",size));
2561 if (compress_block(r->out.response.generic.data, &size) ) {
2562 r->out.flags|=PROXY_USE_ZLIB;
2563 r->out.response.compress.count=size;
2564 r->out.response.compress.data=r->out.response.generic.data;
2565 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
2566 __LOCATION__,r->out.nread,size,size*100/r->out.nread));
2569 /* return cache filename as a ghastly hack for now */
2570 r->out.cache_name.s=f->cache->cache_name;
2571 r->out.cache_name.count=strlen(r->out.cache_name.s)+1;
2572 DEBUG(5,("%s: writing cache name: %s\n",__LOCATION__, f->cache->cache_name));
2573 /* todo: what about tiny files, buffer to small, don't validate tiny files <1K */
2576 /* assert: this must only be true if we are in a callback */
2577 if (parts->req->async_states->state & NTVFS_ASYNC_STATE_ASYNC) {
2578 /* we are async complete, we need to call the sendfn */
2579 parts->req->async_states->status=status;
2580 DEBUG(5,("Fragments async response sending\n"));
2582 parts->req->async_states->send_fn(parts->req);
2583 return NT_STATUS_OK;
2585 return status;
2588 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2590 struct smbcli_request *c_req = async->c_req;
2591 struct ntvfs_request *req = async->req;
2592 struct proxy_file *f = async->f;
2593 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
2594 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
2595 /* this is the io against which the fragment is to be applied */
2596 struct proxy_validate_parts_parts *parts = talloc_get_type_abort(io1, struct proxy_validate_parts_parts);
2597 struct proxy_Read* r=parts->r;
2598 /* this is the io for the read that issued the callback */
2599 union smb_read *io_frag = fragment->io_frag;
2600 struct async_read_fragments* fragments=fragment->fragments;
2602 /* if request is not already received by a chained handler, read it */
2603 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
2604 DEBUG(5,("%s: status %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
2605 DEBUG(5,("\n\n%s: parts=%p c_req=%p io_frag=%p read %lld\n",__LOCATION__,parts, c_req, io_frag,(long long int)io_frag->generic.out.nread));
2607 fragment->status=status;
2609 if (NT_STATUS_IS_OK(status)) {
2610 /* TODO: If we are not sequentially "next" the queue until we can do it */
2611 /* log this data in r->out.generic.data */
2612 /* Find memcpy window, copy data from the io_frag to the io */
2614 /* Also write validate to cache */
2615 if (f && f->cache) {
2616 cache_save(f->cache, io_frag->generic.out.data, io_frag->generic.out.nread, io_frag->generic.in.offset);
2619 /* extent is the last byte we (don't) read for this frag */
2620 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
2621 /* start_offset is the file offset we first care about */
2622 off_t start_offset=MAX(io_frag->generic.in.offset, r->in.offset);
2623 /* Don't want to go past mincnt cos we don't have the buffer */
2624 off_t io_extent=r->in.offset + r->in.mincnt;
2625 off_t end_offset=MIN(io_extent, extent);
2627 /* ASSERT(start_offset <= end_offset) */
2628 /* ASSERT(start_offset <= io_extent) */
2629 /* Don't copy beyond buffer */
2630 if (! (start_offset >= io_extent)) {
2631 uint8_t* dst=r->out.response.generic.data + (start_offset - r->in.offset);
2632 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
2633 /* src == dst in cases where we did not latch onto someone elses
2634 read, but are handling our own */
2635 if (src != dst)
2636 memcpy(dst, src, end_offset - start_offset);
2637 r->out.nread=end_offset - r->in.offset;
2638 DEBUG(5,("%s: nread %lld ++++++++++++++++++\n", __LOCATION__,(long long int)r->out.nread));
2641 MD5Update(&parts->context, io_frag->generic.out.data,
2642 io_frag->generic.out.nread);
2644 parts->fragments->status=status;
2645 status=proxy_validate_parts(ntvfs, parts);
2646 } else {
2647 parts->fragments->status=status;
2650 DLIST_REMOVE(fragments->fragments, fragment);
2651 /* this will free the io_frag too */
2652 talloc_free(fragment);
2654 if (parts->complete || NT_STATUS_IS_ERR(status)) {
2655 /* this will call sendfn, the chain handler won't know... but
2656 should have no more handlers queued */
2657 return proxy_validate_complete(parts);
2660 return NT_STATUS_OK;
2663 /* continue a read loop, possibly from a callback */
2664 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
2665 struct proxy_validate_parts_parts *parts)
2667 struct proxy_private *private = ntvfs->private_data;
2668 union smb_read *io_frag;
2669 struct async_read_fragment *fragment;
2670 struct smbcli_request *c_req = NULL;
2671 ssize_t size=private->tree->session->transport->negotiate.max_xmit \
2672 - (MIN_SMB_SIZE+32);
2674 /* Have we already read enough? */
2675 if (parts->offset >= (parts->r->in.offset + parts->r->in.maxcnt)) {
2676 parts->complete=true;
2677 return NT_STATUS_OK;
2680 size=MIN(size, parts->remaining);
2682 fragment=talloc_zero(parts->fragments, struct async_read_fragment);
2683 NT_STATUS_HAVE_NO_MEMORY(fragment);
2685 io_frag = talloc_zero(fragment, union smb_read);
2686 NT_STATUS_HAVE_NO_MEMORY(io_frag);
2688 io_frag->generic.out.data = talloc_size(io_frag, size);
2689 NT_STATUS_HAVE_NO_MEMORY(io_frag->generic.out.data);
2691 io_frag->generic.level = RAW_READ_GENERIC;
2692 io_frag->generic.in.file.fnum = parts->r->in.fnum;
2693 io_frag->generic.in.offset = parts->offset;
2694 io_frag->generic.in.mincnt = size;
2695 io_frag->generic.in.maxcnt = size;
2696 io_frag->generic.in.remaining = 0;
2697 #warning maybe true is more permissive?
2698 io_frag->generic.in.read_for_execute = false;
2700 DEBUG(5,("%s: issue part read offset=%lld, size=%lld,%lld\n",__LOCATION__,
2701 (long long int)io_frag->generic.in.offset,
2702 (long long int)io_frag->generic.in.mincnt,
2703 (long long int)io_frag->generic.in.maxcnt));
2705 //c_req = smb_raw_read_send(ntvfs, io_frag, parts->f, parts->r);
2706 c_req = smb_raw_read_send(private->tree, io_frag);
2707 NT_STATUS_HAVE_NO_MEMORY(c_req);
2709 parts->offset+=size;
2710 parts->remaining-=size;
2711 fragment->c_req = c_req;
2712 fragment->io_frag = io_frag;
2713 fragment->fragments=parts->fragments;
2714 DLIST_ADD(parts->fragments->fragments, fragment);
2716 { void* req=NULL;
2717 ADD_ASYNC_RECV_TAIL(c_req, parts, fragment, parts->f, async_proxy_validate_parts, NT_STATUS_INTERNAL_ERROR);
2718 ASYNC_RECV_TAIL_F_ORPHAN(io_frag, async_read_handler, parts->f, c_req->async.private, NT_STATUS_UNSUCCESSFUL);
2721 DEBUG(5,("%s: issued read parts=%p c_req=%p io_frag=%p\n",__LOCATION__,parts, c_req, io_frag));
2723 return NT_STATUS_OK;
2727 read from a file
2729 static NTSTATUS proxy_read(struct ntvfs_module_context *ntvfs,
2730 struct ntvfs_request *req, union smb_read *io)
2732 struct proxy_private *private = ntvfs->private_data;
2733 struct smbcli_request *c_req;
2734 struct proxy_file *f;
2735 struct async_read_fragments *fragments=NULL;
2736 /* how much of read-from-cache is certainly valid */
2737 ssize_t valid=0;
2738 off_t offset=io->generic.in.offset+valid;
2739 off_t limit=io->generic.in.offset+io->generic.in.mincnt;
2741 SETUP_PID;
2743 if (io->generic.level != RAW_READ_GENERIC &&
2744 private->map_generic) {
2745 return ntvfs_map_read(ntvfs, req, io);
2748 SETUP_FILE_HERE(f);
2750 DEBUG(3,("\n%s() fnum=%d offset=%lld, mincnt=%d, maxcnt=%d\n",__FUNCTION__,
2751 io->generic.in.file.fnum,
2752 io->generic.in.offset,
2753 io->generic.in.mincnt,
2754 io->generic.in.maxcnt));
2756 io->generic.out.nread=0;
2758 /* if we have oplocks and know the files size, don't even ask the server
2759 for more */
2760 if (f->oplock) {
2761 if (io->generic.in.offset >= f->metadata->info_data.size) {
2762 io->generic.in.mincnt=0;
2763 io->generic.in.maxcnt=0;
2764 io->generic.out.nread=0;
2765 DEBUG(5,("Reading beyond known length %lld; return 0\n",(long long)f->metadata->info_data.size));
2766 return NT_STATUS_OK;
2767 } else {
2768 io->generic.in.mincnt=MIN(io->generic.in.mincnt,
2769 f->metadata->info_data.size - io->generic.in.offset);
2770 io->generic.in.maxcnt=MIN(io->generic.in.maxcnt,
2771 f->metadata->info_data.size - io->generic.in.offset);
2773 DEBUG(5,("Oplock and known size, limiting read to %lld (s=%d)\n",
2774 f->metadata->info_data.size, io->generic.in.mincnt));
2778 /* attempt to read from cache. if nread becomes non-zero then we
2779 have cache to validate. Instead of returning "valid" value, cache_read
2780 should probably return an async_read_fragment structure */
2782 if (private->cache_enabled) {
2783 NTSTATUS status=cache_smb_raw_read(f->cache, ntvfs, req, io, &valid);
2785 if (NT_STATUS_IS_OK(status)) {
2786 /* if we read enough valid data, return it */
2787 if (valid > 0 && valid>=io->generic.in.mincnt) {
2788 /* valid will not be bigger than maxcnt */
2789 io->generic.out.nread=valid;
2790 DEBUG(1,("Read from cache offset=%d size=%d\n",
2791 (int)(io->generic.in.offset),
2792 (int)(io->generic.out.nread)) );
2793 return status;
2796 DEBUG(5,("Cache read status: %s\n",get_friendly_nt_error_msg (status)));
2799 fragments=talloc_zero(req, struct async_read_fragments);
2800 fragments->async=!!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC);
2801 /* See if there are pending reads that would satisfy this request
2802 We have a validated read up to io->generic.out.nread. Anything between
2803 this and mincnt MUST be read, but we could first try and attach to
2804 any pending read-ahead on the same file.
2805 If those read-aheads fail we will re-issue a regular read from the
2806 callback handler and hope it hasn't taken too long. */
2808 /* offset is the extentof the file from which we still need to find
2809 matching read-requests. */
2810 offset=io->generic.in.offset+valid;
2811 /* limit is the byte beyond the last byte for which we need a request.
2812 This used to be mincnt, but is now maxcnt to cope with validate reads.
2813 Maybe we can switch back to mincnt when proxy_read struct is used
2814 instead of smb_read.
2816 limit=io->generic.in.offset+io->generic.in.maxcnt;
2818 while (offset < limit) {
2819 /* Should look for the read-ahead with offset <= in.offset+out.nread
2820 with the longest span, but there is only likely to be one anyway so
2821 just take the first */
2822 struct async_info* pending=private->pending;
2823 union smb_read *readahead_io=NULL;
2824 DEBUG(5,("Looping reads from offset=%lld, end=%lld\n",offset,limit));
2825 while(pending) {
2826 if (pending->c_req->async.fn == async_read_handler) {
2827 struct async_info *async=talloc_get_type_abort(pending->c_req->async.private, struct async_info);
2828 readahead_io=talloc_get_type_abort(async->parms, union smb_read);
2830 if (readahead_io->generic.in.file.fnum == io->generic.in.file.fnum &&
2831 readahead_io->generic.in.offset <= offset &&
2832 readahead_io->generic.in.offset +
2833 readahead_io->generic.in.mincnt > offset) break;
2835 readahead_io=NULL;
2836 pending=pending->next;
2838 /* ASSERT(readahead_io == pending->c_req->async.params) */
2839 if (pending && readahead_io) {
2840 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
2841 fragment->fragments=fragments;
2842 fragment->io_frag=readahead_io;
2843 fragment->c_req = pending->c_req;
2844 /* we found one, so attach to it. We DO need a talloc_reference
2845 because the original send_fn might be called before ALL chained
2846 handlers, and our handler will call its own send_fn first. ugh.
2847 Maybe we need to seperate reverse-mapping callbacks with data users? */
2848 /* Note: the read-ahead io is passed as io, and our req io is
2849 in io_frag->io */
2850 //talloc_reference(req, pending->req);
2851 DEBUG(5,("Attach to read for offset=%lld length=%d\n",
2852 readahead_io->generic.in.offset,
2853 readahead_io->generic.in.mincnt));
2854 ADD_ASYNC_RECV_TAIL(pending->c_req, io, fragment, f,
2855 async_read_fragment, NT_STATUS_INTERNAL_ERROR);
2856 DEBUG(5,("Attached OK\n"));
2857 #warning we don't want to return if we fail to attach, just break
2858 DLIST_ADD(fragments->fragments, fragment);
2859 /* updated offset for which we have reads */
2860 offset=readahead_io->generic.in.offset + readahead_io->generic.in.mincnt;
2861 } else {
2862 /* there are no pending reads to fill this so issue one up to
2863 the maximum supported read size. We could see when the next
2864 pending read is (if any) and only read up till there... later...
2865 Issue a fragment request for what is left, clone io.
2866 In the case that there were no fragments this will be the orginal read
2867 but with a cloned io struct */
2868 off_t next_offset;
2869 struct proxy_Read *r=NULL; /* used only for VALIDATE promotion */
2870 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
2871 union smb_read *io_frag=talloc_memdup_type(req, io, union smb_read);
2872 ssize_t offset_inc=offset-io_frag->generic.in.offset;
2873 /* 250 is a guess at ndr rpc overheads */
2874 ssize_t readsize=MIN(PROXY_NTIOCTL_MAXDATA,
2875 private->tree->session->transport->negotiate.max_xmit) \
2876 - (MIN_SMB_SIZE+32);
2877 if (readsize > 0xFFFF) readsize = 0xFFFF; /* - (MIN_SMB_SIZE+250) ?? */
2878 readsize=MIN(limit-offset, readsize);
2880 DEBUG(5,("Issuing direct read\n"));
2881 /* reduce the cached read (if any). nread is unsigned */
2882 if (io_frag->generic.out.nread > offset_inc) {
2883 io_frag->generic.out.nread-=offset_inc;
2884 /* don't make nread buffer look too big */
2885 if (io_frag->generic.out.nread > readsize)
2886 io_frag->generic.out.nread = readsize;
2887 } else {
2888 io_frag->generic.out.nread=0;
2890 /* adjust the data pointer so we read to the right place */
2891 io_frag->generic.out.data+=offset_inc;
2892 io_frag->generic.in.offset=offset;
2893 io_frag->generic.in.maxcnt=readsize;
2894 /* we don't mind mincnt being smaller if this is the last frag,
2895 but then we can already handle it being bigger but not reached...
2896 The spell would be:
2897 MIN(io_frag->generic.in.mincnt, io_frag->generic.in.maxcnt);
2899 io_frag->generic.in.mincnt=readsize;
2900 fragment->fragments=fragments;
2901 fragment->io_frag=io_frag;
2902 #warning attach to send_fn handler
2903 /* what if someone attaches to us? Our send_fn is called from our
2904 chained handler which will be before their handler and io will
2905 already be freed. We need to keep a reference to the io and the data
2906 but we don't know where it came from in order to take a reference.
2907 We need therefore to tackle calling of send_fn AFTER all other handlers */
2909 /* Calculate next offset (in advance) */
2910 next_offset=io_frag->generic.in.offset + io_frag->generic.in.mincnt;
2912 /* if we are (going to be) the last fragment and we are in VALIDATE
2913 mode, see if we can do a bulk validate now.
2914 io->generic.in.mincnt == io->generic.in.maxcnt is to make sure we
2915 don't do a validate on a receive validate read
2917 if (private->cache_validatesize && PROXY_REMOTE_SERVER(private) &&
2918 next_offset >= limit && (f->cache && f->cache->status & CACHE_VALIDATE)) {
2919 ssize_t length=private->cache_validatesize;
2920 declare_checksum(digest);
2922 DEBUG(5,("last read, maybe mega validate: frag length %zu, offset %llu\n",
2923 length, (unsigned long long) offset));
2924 NTSTATUS status=cache_smb_raw_checksum(f->cache, offset, &length, digest);
2925 /* no point in doing it if md5'd length < current out.nread
2926 remember: out.data contains this requests cached response
2927 if validate succeeds */
2928 if (NT_STATUS_IS_OK(status) && (length > io_frag->generic.out.nread)) {
2929 /* upgrade the read, allocate the proxy_read struct here
2930 and fill in the extras, no more out-of-band stuff */
2931 DEBUG(5,("%s: Promoting to validate read: %lld\n",__FUNCTION__,(long long) length));
2932 dump_data (5, digest, sizeof(digest));
2934 r=talloc_zero(io_frag, struct proxy_Read);
2935 memcpy(r->in.digest.digest, digest, sizeof(digest));
2936 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE;
2937 io_frag->generic.in.maxcnt = length;
2938 r->in.mincnt=io_frag->generic.in.mincnt;
2939 /* the proxy send function will calculate the checksum based on *data */
2940 } else {
2941 /* try bulk read */
2942 if (f->oplock) {
2943 DEBUG(5,("%s: *** faking bulkd read\n\n",__LOCATION__));
2944 r=talloc_zero(io_frag, struct proxy_Read);
2945 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE;//| PROXY_USE_ZLIB;
2946 io_frag->generic.in.maxcnt = MIN(f->metadata->info_data.size, private->cache_validatesize);
2947 r->in.mincnt=io_frag->generic.in.maxcnt;
2948 r->in.mincnt=io_frag->generic.in.mincnt;
2950 /* not enough in cache to make it worthwhile anymore */
2951 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x len=%lld\n",
2952 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0,
2953 (unsigned long long)length));
2954 //cache_handle_novalidate(f);
2955 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x\n",
2956 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0));
2958 } else {
2959 if (f->cache && f->cache->status & CACHE_VALIDATE) {
2960 DEBUG(5,(">>>Not last frag, no validate read: %lld %lld\n",
2961 (long long) next_offset,
2962 (long long) limit));
2966 DEBUG(5,("Frag read sending offset=%lld min=%d, size=%d\n",
2967 io_frag->generic.in.offset,io_frag->generic.in.mincnt,
2968 io_frag->generic.in.maxcnt));
2969 c_req = proxy_smb_raw_read_send(ntvfs, io_frag, f, r);
2970 DEBUG(5,("Frag read sent offset=%lld size=%d MID=%d\n",
2971 io_frag->generic.in.offset,io_frag->generic.in.maxcnt,c_req->mid));
2972 fragment->c_req=c_req;
2973 DLIST_ADD(fragments->fragments, fragment);
2974 ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
2975 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_read_fragment, NT_STATUS_INTERNAL_ERROR);
2976 DEBUG(5,("Frag response chained\n"));
2977 /* normally we would only install the chain_handler if we wanted async
2978 response, but as it is the async_read_fragment handler that calls send_fn
2979 based on fragments->async, instead of async_chain_handler, we don't
2980 need to worry about this call completing async'ly while we are
2981 waiting on the other attached calls. Otherwise we would not attach
2982 the async_chain_handler (via async_read_handler) because of the wait
2983 below */
2984 { /* We don't want the chain handler calling send_fn as it is done by the fragment handler */
2985 void* req=NULL;
2986 /* call async_chain_hander not read handler so that folk can't
2987 attach to it, till we solve the problem above */
2988 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
2990 offset = next_offset;
2992 DEBUG(5,("Next fragment\n"));
2995 /* do we still need a final fragment? Issue a read */
2997 DEBUG(5,("No frags left to read\n"));
3000 /* issue new round of read-aheads */
3001 DEBUG(5,("== Read aheads asread-%d\n",io->generic.in.mincnt));
3002 if (f->cache && ! (f->cache->status & CACHE_VALIDATE)) read_ahead(f, ntvfs, io, io->generic.in.mincnt);
3003 DEBUG(5,("== Done Read aheads\n"));
3005 /* If we have fragments but we are not called async, we must sync-wait on them */
3006 /* did we map the entire request to pending reads? */
3007 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3008 struct async_read_fragment *fragment;
3009 DEBUG(5,("Sync waiting\n"));
3010 /* fragment get's free'd during the chain_handler so we start at
3011 the top each time */
3012 for (fragment = fragments->fragments; fragment; fragment = fragments->fragments) {
3013 /* Any fragments async handled while we sync-wait on one
3014 will remove themselves from the list and not get sync waited */
3015 sync_chain_handler(fragment->c_req);
3016 /* if we have a non-ok result AND we know we have all the responses
3017 up to extent, then we could quit the loop early and change the
3018 fragments->async to true so the final irrelevant responses would
3019 come async and we could send our response now - but we don't
3020 track that detail until we have cache-maps that we can use to
3021 track the responded fragments and combine responsed linear extents
3022 if (! NT_STATUS_IS_OK(fragments->status) && xxx ) */
3024 DEBUG(5,("Sync return of proxy_read: %s\n",get_friendly_nt_error_msg (fragments->status)));
3025 return fragments->status;
3028 DEBUG(5,("Async returning\n"));
3029 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
3030 return NT_STATUS_OK;
3034 a handler to de-fragment async write replies back to one request.
3035 Can cope with out-of-order async responses by waiting for all responses
3036 on an NT_STATUS_OK case so that nwritten is properly adjusted
3038 NTSTATUS async_write_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
3040 struct smbcli_request *c_req = async->c_req;
3041 struct ntvfs_request *req = async->req;
3042 struct proxy_file *f=async->f;
3043 struct async_write_fragment* fragment=talloc_get_type_abort(io2, struct async_write_fragment);
3044 /* this is the io against which the fragment is to be applied */
3045 union smb_write *io = talloc_get_type_abort(io1, union smb_write);
3046 /* this is the io for the write that issued the callback */
3047 union smb_write *io_frag = fragment->io_frag; /* async->parms; */
3048 struct async_write_fragments* fragments=fragment->fragments;
3049 ssize_t extent=0;
3051 /* if request is not already received by a chained handler, read it */
3052 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
3053 if (c_req) status=smb_raw_write_recv(c_req, io_frag);
3055 DEBUG(3,("%s async_write status: %s\n",__FUNCTION__,
3056 get_friendly_nt_error_msg(status)));
3058 fragment->status = status;
3060 DLIST_REMOVE(fragments->fragments, fragment);
3062 /* did this one fail? */
3063 if (! NT_STATUS_IS_OK(fragment->status)) {
3064 if (NT_STATUS_IS_OK(fragments->status)) {
3065 fragments->status=fragment->status;
3067 } else {
3068 /* No fragments have yet failed, keep collecting responses */
3069 extent = io_frag->generic.in.offset + io_frag->generic.out.nwritten;
3071 /* we broke up the write so it could all be written. If only some has
3072 been written of this block, and then some of then next block,
3073 it could leave unwritten holes! We will only acknowledge up to the
3074 first partial write, and let the client deal with it.
3075 If server can return NT_STATUS_OK for a partial write so can we */
3076 if (io_frag->generic.out.nwritten != io_frag->generic.in.count) {
3077 DEBUG(4,("Fragmented write only partially successful\n"));
3079 /* Shrink the master nwritten */
3080 if ( ! fragments->partial ||
3081 (io->generic.in.offset + io->generic.out.nwritten) > extent) {
3082 io->generic.out.nwritten = extent - io->generic.in.offset;
3084 /* stop any further successes from extended the partial write */
3085 fragments->partial=true;
3086 } else {
3087 /* only grow the master nwritten if we haven't logged a partial write */
3088 if (! fragments->partial &&
3089 (io->generic.in.offset + io->generic.out.nwritten) < extent ) {
3090 io->generic.out.nwritten = extent - io->generic.in.offset;
3095 /* if this was the last fragment, clean up */
3096 if (! fragments->fragments) {
3097 DEBUG(5,("Async write re-fragmented with %d of %d\n",
3098 io->generic.out.nwritten,
3099 io->generic.in.count));
3100 if (NT_STATUS_IS_OK(fragments->status)) {
3101 cache_handle_save(f, io->generic.in.data, io->generic.out.nwritten,
3102 io->generic.in.offset);
3103 if (f->metadata->info_data.size < io->generic.in.offset+io->generic.in.count) {
3104 f->metadata->info_data.size=io->generic.in.offset+io->generic.in.count;
3107 if (fragments->async) {
3108 req->async_states->status=fragments->status;
3109 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
3110 req->async_states->send_fn(req);
3111 DEBUG(5,("Async response sent\n"));
3112 } else {
3113 DEBUG(5,("Fragments SYNC return\n"));
3117 return status;
3121 a handler for async write replies
3123 NTSTATUS async_write_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
3125 struct smbcli_request *c_req = async->c_req;
3126 struct ntvfs_request *req = async->req;
3127 struct proxy_file *f=async->f;
3128 union smb_write *io=async->parms;
3130 if (c_req)
3131 status = smb_raw_write_recv(c_req, async->parms);
3133 cache_handle_save(f, io->generic.in.data,
3134 io->generic.out.nwritten,
3135 io->generic.in.offset);
3137 return status;
3141 write to a file
3143 static NTSTATUS proxy_write(struct ntvfs_module_context *ntvfs,
3144 struct ntvfs_request *req, union smb_write *io)
3146 struct proxy_private *private = ntvfs->private_data;
3147 struct smbcli_request *c_req;
3148 struct proxy_file *f;
3150 SETUP_PID;
3152 if (io->generic.level != RAW_WRITE_GENERIC &&
3153 private->map_generic) {
3154 return ntvfs_map_write(ntvfs, req, io);
3156 SETUP_FILE_HERE(f);
3158 DEBUG(5,("proxy_write offset=%lld size=%d\n",io->generic.in.offset, io->generic.in.count));
3159 #warning ERROR get rid of this
3160 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3161 NTSTATUS status;
3162 if (PROXY_REMOTE_SERVER(private)) {
3163 /* Do a proxy write */
3164 status=proxy_smb_raw_write(ntvfs, io, f);
3165 } else if (io->generic.in.count >
3166 private->tree->session->transport->negotiate.max_xmit) {
3168 /* smbcli_write can deal with large writes, which are bigger than
3169 tree->session->transport->negotiate.max_xmit */
3170 ssize_t size=smbcli_write(private->tree,
3171 io->generic.in.file.fnum,
3172 io->generic.in.wmode,
3173 io->generic.in.data,
3174 io->generic.in.offset,
3175 io->generic.in.count);
3177 if (size==io->generic.in.count || size > 0) {
3178 io->generic.out.nwritten=size;
3179 status=NT_STATUS_OK;
3180 } else {
3181 status=NT_STATUS_UNSUCCESSFUL;
3183 } else {
3184 status=smb_raw_write(private->tree, io);
3187 /* Save write in cache */
3188 if (NT_STATUS_IS_OK(status)) {
3189 cache_handle_save(f, io->generic.in.data,
3190 io->generic.out.nwritten,
3191 io->generic.in.offset);
3192 if (f->metadata->info_data.size <
3193 io->generic.in.offset+io->generic.in.count) {
3194 f->metadata->info_data.size=io->generic.in.offset+io->generic.in.count;
3198 return status;
3201 /* smb_raw_write_send can't deal with large writes, which are bigger than
3202 tree->session->transport->negotiate.max_xmit so we have to break it up
3203 trying to preserve the async nature of the call as much as possible */
3204 if (PROXY_REMOTE_SERVER(private)) {
3205 DEBUG(5,("== %s call proxy_smb_raw_write_send\n",__FUNCTION__));
3206 c_req = proxy_smb_raw_write_send(ntvfs, io, f);
3207 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3208 } else if (io->generic.in.count <=
3209 private->tree->session->transport->negotiate.max_xmit) {
3210 DEBUG(5,("== %s call smb_raw_write_send\n",__FUNCTION__));
3211 c_req = smb_raw_write_send(private->tree, io);
3212 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3213 } else {
3214 ssize_t remaining = io->generic.in.count;
3215 #warning Need an audit of these magin numbers MIN_SMB_SIZE+32
3216 int block = (private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32));
3217 int done = 0;
3218 struct async_write_fragments *fragments = talloc_zero(req, struct async_write_fragments);
3220 DEBUG(3,("== %s Client sending too-big write sized %d, negotiated limit %d\n",
3221 __FUNCTION__, io->generic.in.count,
3222 private->tree->session->transport->negotiate.max_xmit));
3224 fragments->io = io;
3225 io->generic.out.nwritten=0;
3226 io->generic.out.remaining=0;
3228 do {
3229 union smb_write *io_frag = talloc_zero(fragments, union smb_write);
3230 struct async_write_fragment *fragment = talloc_zero(fragments, struct async_write_fragment);
3231 ssize_t size = MIN(block, remaining);
3233 fragment->fragments = fragments;
3234 fragment->io_frag = io_frag;
3236 io_frag->generic.level = io->generic.level;
3237 io_frag->generic.in.file.fnum = io->generic.in.file.fnum;
3238 io_frag->generic.in.wmode = io->generic.in.wmode;
3239 io_frag->generic.in.count = size;
3240 io_frag->generic.in.offset = io->generic.in.offset + done;
3241 io_frag->generic.in.data = io->generic.in.data + done;
3243 c_req = proxy_smb_raw_write_send(ntvfs, io_frag, f);
3244 if (! c_req) {
3245 /* let pending requests clean-up when ready */
3246 fragments->status=NT_STATUS_UNSUCCESSFUL;
3247 talloc_steal(NULL, fragments);
3248 DEBUG(3,("Can't send request fragment\n"));
3249 return NT_STATUS_UNSUCCESSFUL;
3252 DEBUG(5,("Frag write sent offset=%lld size=%d MID=%d\n",
3253 io_frag->generic.in.offset,io_frag->generic.in.count,c_req->mid));
3254 fragment->c_req=c_req;
3255 DLIST_ADD(fragments->fragments, fragment);
3257 // ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3258 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_write_fragment, NT_STATUS_INTERNAL_ERROR);
3259 DEBUG(5,("Frag response chained\n"));
3261 remaining -= size;
3262 done += size;
3263 } while(remaining > 0);
3265 /* this strategy has the callback chain attached to each c_req, so we
3266 don't use the ASYNC_RECV_TAIL* to install a general one */
3269 ASYNC_RECV_TAIL_HANDLER(io, async_chain_handler);
3273 a handler for async seek replies
3275 static void async_seek(struct smbcli_request *c_req)
3277 struct async_info *async = c_req->async.private;
3278 struct ntvfs_request *req = async->req;
3279 req->async_states->status = smb_raw_seek_recv(c_req, async->parms);
3280 talloc_free(async);
3281 req->async_states->send_fn(req);
3285 seek in a file
3287 static NTSTATUS proxy_seek(struct ntvfs_module_context *ntvfs,
3288 struct ntvfs_request *req,
3289 union smb_seek *io)
3291 struct proxy_private *private = ntvfs->private_data;
3292 struct smbcli_request *c_req;
3294 SETUP_PID_AND_FILE;
3296 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3297 return smb_raw_seek(private->tree, io);
3300 c_req = smb_raw_seek_send(private->tree, io);
3302 ASYNC_RECV_TAIL(io, async_seek);
3306 flush a file
3308 static NTSTATUS proxy_flush(struct ntvfs_module_context *ntvfs,
3309 struct ntvfs_request *req,
3310 union smb_flush *io)
3312 struct proxy_private *private = ntvfs->private_data;
3313 struct smbcli_request *c_req;
3315 SETUP_PID;
3316 switch (io->generic.level) {
3317 case RAW_FLUSH_FLUSH:
3318 SETUP_FILE;
3319 break;
3320 case RAW_FLUSH_ALL:
3321 io->generic.in.file.fnum = 0xFFFF;
3322 break;
3323 case RAW_FLUSH_SMB2:
3324 return NT_STATUS_INVALID_LEVEL;
3327 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3328 return smb_raw_flush(private->tree, io);
3331 c_req = smb_raw_flush_send(private->tree, io);
3333 SIMPLE_ASYNC_TAIL;
3337 close a file
3339 static NTSTATUS proxy_close(struct ntvfs_module_context *ntvfs,
3340 struct ntvfs_request *req, union smb_close *io)
3342 struct proxy_private *private = ntvfs->private_data;
3343 struct smbcli_request *c_req;
3344 struct proxy_file *f;
3345 union smb_close io2;
3346 bool can_clone;
3348 SETUP_PID;
3350 if (io->generic.level != RAW_CLOSE_GENERIC &&
3351 private->map_generic) {
3352 return ntvfs_map_close(ntvfs, req, io);
3354 SETUP_FILE_HERE(f);
3355 /* we free the backend data before we use this value, so save it */
3356 can_clone=f->can_clone;
3357 /* Note, we aren't free-ing f, or it's h here. Should we?
3358 even if file-close fails, we'll remove it from the list,
3359 what else would we do? Maybe we should not remove until
3360 after the proxied call completes? */
3361 DLIST_REMOVE(private->files, f);
3363 /* Don't send the close on cloned handles unless we are the last one */
3364 if (f->metadata && --(f->metadata->count)) {
3365 DEBUG(5,("%s: Fake close of %d, %d left\n",__FUNCTION__,f->fnum, f->metadata->count));
3366 return NT_STATUS_OK;
3368 DEBUG(5,("%s: Real close of %d\n",__FUNCTION__, f->fnum));
3369 /* only close the cache if we aren't keeping references */
3370 //cache_close(f->cache);
3372 /* possibly samba can't do RAW_CLOSE_SEND yet */
3373 if (! (c_req = smb_raw_close_send(private->tree, io))) {
3374 if (io->generic.level == RAW_CLOSE_GENERIC) {
3375 ZERO_STRUCT(io2);
3376 io2.close.level = RAW_CLOSE_CLOSE;
3377 io2.close.in.file = io->generic.in.file;
3378 io2.close.in.write_time = io->generic.in.write_time;
3379 io = &io2;
3381 c_req = smb_raw_close_send(private->tree, io);
3382 /* destroy handle */
3383 ntvfs_handle_remove_backend_data(f->h, ntvfs);
3386 /* If it is read-only, don't bother waiting for the result */
3387 if (can_clone) {
3388 DEBUG(5,("%s: not waiting for close response fnum=%d\n",__FUNCTION__,f->fnum));
3389 return NT_STATUS_OK;
3392 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3393 return smbcli_request_simple_recv(c_req);
3395 DEBUG(0,("%s\n",__LOCATION__));
3396 SIMPLE_ASYNC_TAIL;
3400 exit - closing files open by the pid
3402 static NTSTATUS proxy_exit(struct ntvfs_module_context *ntvfs,
3403 struct ntvfs_request *req)
3405 struct proxy_private *private = ntvfs->private_data;
3406 struct smbcli_request *c_req;
3408 SETUP_PID;
3410 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3411 return smb_raw_exit(private->tree->session);
3414 c_req = smb_raw_exit_send(private->tree->session);
3416 SIMPLE_ASYNC_TAIL;
3420 logoff - closing files open by the user
3422 static NTSTATUS proxy_logoff(struct ntvfs_module_context *ntvfs,
3423 struct ntvfs_request *req)
3425 /* we can't do this right in the proxy backend .... */
3426 return NT_STATUS_OK;
3430 setup for an async call - nothing to do yet
3432 static NTSTATUS proxy_async_setup(struct ntvfs_module_context *ntvfs,
3433 struct ntvfs_request *req,
3434 void *private)
3436 return NT_STATUS_OK;
3440 cancel an async call
3442 static NTSTATUS proxy_cancel(struct ntvfs_module_context *ntvfs,
3443 struct ntvfs_request *req)
3445 struct proxy_private *private = ntvfs->private_data;
3446 struct async_info *a;
3448 /* find the matching request */
3449 for (a=private->pending;a;a=a->next) {
3450 if (a->req == req) {
3451 break;
3455 if (a == NULL) {
3456 return NT_STATUS_INVALID_PARAMETER;
3459 return smb_raw_ntcancel(a->c_req);
3463 lock a byte range
3465 static NTSTATUS proxy_lock(struct ntvfs_module_context *ntvfs,
3466 struct ntvfs_request *req, union smb_lock *io)
3468 struct proxy_private *private = ntvfs->private_data;
3469 struct smbcli_request *c_req;
3471 SETUP_PID;
3473 if (io->generic.level != RAW_LOCK_GENERIC &&
3474 private->map_generic) {
3475 return ntvfs_map_lock(ntvfs, req, io);
3477 SETUP_FILE;
3479 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3480 return smb_raw_lock(private->tree, io);
3483 c_req = smb_raw_lock_send(private->tree, io);
3484 SIMPLE_ASYNC_TAIL;
3488 set info on a open file
3490 static NTSTATUS proxy_setfileinfo(struct ntvfs_module_context *ntvfs,
3491 struct ntvfs_request *req,
3492 union smb_setfileinfo *io)
3494 struct proxy_private *private = ntvfs->private_data;
3495 struct smbcli_request *c_req;
3497 SETUP_PID_AND_FILE;
3499 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3500 return smb_raw_setfileinfo(private->tree, io);
3502 c_req = smb_raw_setfileinfo_send(private->tree, io);
3504 SIMPLE_ASYNC_TAIL;
3509 a handler for async fsinfo replies
3511 static void async_fsinfo(struct smbcli_request *c_req)
3513 struct async_info *async = c_req->async.private;
3514 struct ntvfs_request *req = async->req;
3515 union smb_fsinfo *fs = async->parms;
3516 struct proxy_private *private = async->proxy;
3518 req->async_states->status = smb_raw_fsinfo_recv(c_req, req, fs);
3520 if (NT_STATUS_IS_OK(req->async_states->status) && (fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3521 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO)) {
3522 if (! private->fs_attribute_info && (private->fs_attribute_info=talloc_zero(private, struct fs_attribute_info))) {
3523 DEBUG(5,("%s: caching fs_attribute_info\n",__LOCATION__));
3524 private->fs_attribute_info->fs_attr=fs->attribute_info.out.fs_attr;
3525 private->fs_attribute_info->max_file_component_length=fs->attribute_info.out.max_file_component_length;
3526 private->fs_attribute_info->fs_type=talloc_smb_wire_string_dup(private, &(fs->attribute_info.out.fs_type));
3530 talloc_free(async);
3531 req->async_states->send_fn(req);
3535 return filesystem space info
3537 static NTSTATUS proxy_fsinfo(struct ntvfs_module_context *ntvfs,
3538 struct ntvfs_request *req, union smb_fsinfo *fs)
3540 struct proxy_private *private = ntvfs->private_data;
3541 struct smbcli_request *c_req;
3543 SETUP_PID;
3545 DEBUG(5,("%s: level %x\n",__LOCATION__,fs->generic.level));
3546 /* this value is easy to cache */
3547 if ((fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3548 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO) &&
3549 private->fs_attribute_info) {
3550 DEBUG(5,("%s: using cached fsinfo\n",__LOCATION__));
3551 fs->attribute_info.out.fs_attr=private->fs_attribute_info->fs_attr;
3552 fs->attribute_info.out.max_file_component_length=private->fs_attribute_info->max_file_component_length;
3553 fs->attribute_info.out.fs_type=talloc_smb_wire_string_dup(req, &(private->fs_attribute_info->fs_type));
3554 return NT_STATUS_OK;
3557 /* QFS Proxy */
3558 if (fs->generic.level == RAW_QFS_PROXY_INFO) {
3559 fs->proxy_info.out.major_version=1;
3560 fs->proxy_info.out.minor_version=0;
3561 fs->proxy_info.out.capability=0;
3562 return NT_STATUS_OK;
3565 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3566 NTSTATUS status = smb_raw_fsinfo(private->tree, req, fs);
3567 if (NT_STATUS_IS_OK(status) && (fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3568 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO)) {
3569 if (! private->fs_attribute_info && (private->fs_attribute_info=talloc_zero(private, struct fs_attribute_info))) {
3570 DEBUG(5,("%s: caching fs_attribute_info\n",__LOCATION__));
3571 private->fs_attribute_info->fs_attr=fs->attribute_info.out.fs_attr;
3572 private->fs_attribute_info->max_file_component_length=fs->attribute_info.out.max_file_component_length;
3573 private->fs_attribute_info->fs_type=talloc_smb_wire_string_dup(private, &(fs->attribute_info.out.fs_type));
3576 return status;
3578 c_req = smb_raw_fsinfo_send(private->tree, req, fs);
3580 ASYNC_RECV_TAIL(fs, async_fsinfo);
3584 return print queue info
3586 static NTSTATUS proxy_lpq(struct ntvfs_module_context *ntvfs,
3587 struct ntvfs_request *req, union smb_lpq *lpq)
3589 return NT_STATUS_NOT_SUPPORTED;
3593 find_first / find_next caching.
3594 For now, cache based on directory,search_attributes,search_pattern,ea stuff
3595 Consider in response:
3596 * search id
3597 * search count
3598 * end of search
3599 * ea stuff
3602 static union smb_search_data *smb_search_data_dup(void* mem_ctx, const union smb_search_data *file, enum smb_search_data_level data_level) {
3603 union smb_search_data *result;
3604 struct smb_wire_string *name;
3606 result=talloc_zero(mem_ctx, union smb_search_data);
3607 if (! result) {
3608 return result;
3611 *result = *file;
3613 switch(data_level) {
3614 case RAW_SEARCH_DATA_SEARCH:
3615 if (! (result->search.name=talloc_strdup(mem_ctx, file->search.name))) goto error;
3616 break;
3617 case RAW_SEARCH_DATA_STANDARD:
3618 if (sws_dup(result, result->standard.name, file->standard.name)) goto error;
3619 break;
3620 case RAW_SEARCH_DATA_EA_SIZE:
3621 if (sws_dup(result, result->ea_size.name, file->ea_size.name)) goto error;
3622 break;
3623 case RAW_SEARCH_DATA_EA_LIST:
3624 if (sws_dup(result, result->ea_list.name, file->ea_list.name)) goto error;
3625 break;
3626 case RAW_SEARCH_DATA_DIRECTORY_INFO:
3627 if (sws_dup(result, result->directory_info.name, file->directory_info.name)) goto error;
3628 break;
3629 case RAW_SEARCH_DATA_FULL_DIRECTORY_INFO:
3630 if (sws_dup(result, result->full_directory_info.name, file->full_directory_info.name)) goto error;
3631 break;
3632 case RAW_SEARCH_DATA_NAME_INFO:
3633 if (sws_dup(result, result->name_info.name, file->name_info.name)) goto error;
3634 break;
3635 case RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO:
3636 if (sws_dup(result, result->both_directory_info.name, file->both_directory_info.name)) goto error;
3637 if (sws_dup(result, result->both_directory_info.short_name, file->both_directory_info.short_name)) goto error;
3638 break;
3639 case RAW_SEARCH_DATA_ID_FULL_DIRECTORY_INFO:
3640 if (sws_dup(result, result->id_full_directory_info.name, file->id_full_directory_info.name)) goto error;
3641 break;
3642 case RAW_SEARCH_DATA_ID_BOTH_DIRECTORY_INFO:
3643 if (sws_dup(result, result->id_both_directory_info.name, file->id_both_directory_info.name)) goto error;
3644 if (sws_dup(result, result->id_both_directory_info.short_name, file->id_both_directory_info.short_name)) goto error;
3645 break;
3646 case RAW_SEARCH_DATA_UNIX_INFO:
3647 if (! (result->unix_info.name=talloc_strdup(mem_ctx, file->unix_info.name))) goto error;
3648 break;
3649 case RAW_SEARCH_DATA_UNIX_INFO2:
3650 if (sws_dup(result, result->unix_info2.name, file->unix_info2.name)) goto error;
3651 break;
3652 default:
3653 DEBUG(5,("%s: Error can't dup an unknown file data type: %x\n", __LOCATION__, data_level));
3654 goto error;
3656 return result;
3657 error:
3658 talloc_free(result);
3659 return NULL;
3662 /* callback function for search first/next */
3663 static bool find_callback(void *private, const union smb_search_data *file)
3665 struct search_state *state = (struct search_state *)private;
3666 struct search_handle *search_handle = state->search_handle;
3667 bool status;
3669 /* if we have a cache, copy this data */
3670 if (search_handle->cache) {
3671 struct search_cache_item *item = talloc_zero(search_handle->cache, struct search_cache_item);
3672 DEBUG(5,("%s: Copy %p to cache %p\n", __LOCATION__, item, search_handle->cache));
3673 if (item) {
3674 item->data_level=search_handle->data_level;
3675 item->file = smb_search_data_dup(item, file, item->data_level);
3676 if (! item->file) {
3677 talloc_free(item);
3678 item=NULL;
3681 if (item) {
3682 /* optimization to save enumerating the entire list each time, to find the end.
3683 the cached last_item is very short lived, it doesn't matter if something has
3684 been added since, as long as it hasn't been removed */
3685 if (state->last_item) {
3686 DLIST_ADD_END(state->last_item, item, struct search_cache_item*);
3687 } else {
3688 DLIST_ADD_END(search_handle->cache->items, item, struct search_cache_item*);
3690 state->last_item=item;
3691 state->all_count++;
3692 } else {
3693 DEBUG(5,("%s: Could not add name to search cache %p, invalidating cache\n", __LOCATION__, search_handle->cache));
3694 /* dear me, the whole cache will be invalid if we miss data */
3695 search_handle->cache->status=SEARCH_CACHE_DEAD;
3696 /* remove from the list of caches to use */
3697 DLIST_REMOVE(search_handle->cache->proxy->search_caches, search_handle->cache);
3698 /* Make it feel unwanted */
3699 talloc_unlink(private, search_handle->cache);
3700 talloc_unlink(search_handle, search_handle->cache);
3701 //if (talloc_unlink(search_handle, search_handle->cache)==0) {
3702 //talloc_free(search_handle->cache);
3704 /* stop us using it for this search too */
3705 search_handle->cache=NULL;
3709 status=state->callback(state->private, file);
3710 if (status) {
3711 state->count++;
3713 return status;
3717 list files in a directory matching a wildcard pattern
3719 static NTSTATUS proxy_search_first(struct ntvfs_module_context *ntvfs,
3720 struct ntvfs_request *req, union smb_search_first *io,
3721 void *search_private,
3722 bool (*callback)(void *, const union smb_search_data *))
3724 struct proxy_private *private = ntvfs->private_data;
3725 struct search_state *state;
3726 struct search_cache *search_cache=NULL;
3727 struct search_cache_key search_cache_key={0};
3728 struct ntvfs_handle *h=NULL;
3729 struct search_handle *s;
3730 uint16_t max_count;
3731 NTSTATUS status;
3733 SETUP_PID;
3735 if (! private->enabled_proxy_search) {
3736 return smb_raw_search_first(private->tree, req, io, search_private, callback);
3738 switch (io->generic.level) {
3739 /* case RAW_SEARCH_DATA_SEARCH:
3740 search_cache_key.search_attrib=io->search_first.in.search_attrib;
3741 search_cache_key.pattern=io->search_first.in.pattern;
3742 max_count = io->search_first.in.max_count;
3743 search_cache = find_search_cache(private->search_cache, &search_cache_key);
3744 break;*/
3745 case RAW_SEARCH_TRANS2:
3746 io->t2ffirst.in.max_count=MIN(io->t2ffirst.in.max_count,80);
3747 max_count = io->t2ffirst.in.max_count;
3749 search_cache_key.level=io->generic.level;
3750 search_cache_key.data_level=io->generic.data_level;
3751 search_cache_key.search_attrib=io->t2ffirst.in.search_attrib;
3752 search_cache_key.pattern=io->t2ffirst.in.pattern;
3753 search_cache_key.flags=io->t2ffirst.in.flags;
3754 search_cache_key.storage_type=io->t2ffirst.in.storage_type;
3755 /* try and find a search cache that is complete */
3756 search_cache = find_search_cache(private->search_caches, &search_cache_key);
3758 /* do handle mapping for TRANS2 */
3759 status = ntvfs_handle_new(ntvfs, req, &h);
3760 NT_STATUS_NOT_OK_RETURN(status);
3762 DEBUG(5,("%s: RAW_SEARCH_TRANS2 %s limit %d, cache=%p level=%x\n",__LOCATION__, search_cache_key.pattern, max_count, search_cache, search_cache_key.data_level));
3763 break;
3764 default: /* won't cache or proxy this */
3765 return smb_raw_search_first(private->tree, req, io, search_private, callback);
3768 /* finish setting up mapped handle */
3769 if (h) {
3770 s = talloc_zero(h, struct search_handle);
3771 NT_STATUS_HAVE_NO_MEMORY(s);
3772 s->proxy=private;
3773 talloc_set_destructor(s, search_handle_destructor);
3774 s->h=h;
3775 s->level=io->generic.level;
3776 s->data_level=io->generic.data_level;
3777 status = ntvfs_handle_set_backend_data(s->h, private->ntvfs, s);
3778 NT_STATUS_NOT_OK_RETURN(status);
3779 DLIST_ADD(private->search_handles, s);
3780 DEBUG(5,("%s: map handle create %d\n",__LOCATION__, smbsrv_fnum(h)));
3783 /* satisfy from cache */
3784 if (search_cache) {
3785 struct search_cache_item* item=search_cache->items;
3786 uint16_t count=0;
3788 /* stop cache going away while we are using it */
3789 s->cache = talloc_reference(s, search_cache);
3790 DEBUG(5,("%s: Serving from cache: %p\n",__LOCATION__, search_cache));
3791 /* Don't offer over the limit, but only count those that were accepted */
3792 DLIST_FIND(search_cache->items, item, !(count < max_count && callback(search_private, item->file) && ++count) );
3793 io->t2ffirst.out.count=count;
3794 s->resume_item=item;
3795 /* just because callback didn't accept any doesn't mean we are finished */
3796 if (item == NULL) {
3797 /* currently only caching for t2ffirst */
3798 io->t2ffirst.out.end_of_search = true;
3799 DEBUG(5,("%s: Serving from cache complete at %d\n", __LOCATION__, count));
3800 } else {
3801 /* count the rest */
3802 io->t2ffirst.out.end_of_search = false;
3803 DEBUG(5,("%s: Serving from cache incomplete at %d\n", __LOCATION__, count));
3804 DLIST_FOR_EACH(item, item, count++);
3805 DEBUG(5,("%s: Serving from cache max_count %d\n", __LOCATION__, count));
3808 if ((io->t2ffirst.out.end_of_search && io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
3809 io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE)
3811 /* destroy handle */
3812 ntvfs_handle_remove_backend_data(h, ntvfs);
3813 io->t2ffirst.out.handle=0;
3814 } else {
3815 /* now map handle */
3816 io->t2ffirst.out.handle=smbsrv_fnum(h);
3818 return NT_STATUS_OK;
3821 state = talloc_zero(req, struct search_state);
3822 NT_STATUS_HAVE_NO_MEMORY(state);
3824 /* if there isn't a matching cache already being generated by another search,
3825 start one, unless FLAG_TRANS2_FIND_BACKUP_INTENT which is always live */
3826 if (!(io->t2ffirst.in.flags & FLAG_TRANS2_FIND_BACKUP_INTENT) &&
3827 find_partial_search_cache(private->search_caches, &search_cache_key) == NULL) {
3828 /* need to opendir the folder being searched so we can get a notification */
3829 struct search_cache *search_cache=NULL;
3831 search_cache=new_search_cache(private, &search_cache_key);
3832 /* Stop cache going away while we are using it */
3833 if (search_cache) {
3834 s->cache=talloc_reference(s, search_cache);
3838 /* stop the handle going away while we are using it */
3839 state->search_handle=talloc_reference(state, s);
3840 state->private=search_private;
3841 state->callback=callback;
3843 status=smb_raw_search_first(private->tree, req, io, state, find_callback);
3844 // if (! NT_STATUS_IS_OK(status)) {
3845 // return (status);
3846 // }
3847 if (! NT_STATUS_IS_OK(status)) {
3848 if (s->cache) {
3849 DLIST_REMOVE(private->search_caches, s->cache);
3850 talloc_unlink(private, s->cache);
3851 talloc_unlink(s, s->cache);
3852 //if (talloc_unlink(s, s->cache)==0) {
3853 //talloc_free(s->cache);
3855 s->cache=NULL;
3857 s->h=NULL;
3858 ntvfs_handle_remove_backend_data(h, ntvfs);
3859 return (status);
3861 // DEBUG(1,("%s: %p; %s\n",__LOCATION__,io,get_friendly_nt_error_msg (status)));
3862 DEBUG(5,("%s: max %d, got %d, copied %d; %s\n",__LOCATION__,io->t2ffirst.out.count,state->all_count, state->count,get_friendly_nt_error_msg (status)));
3864 #warning check NT_STATUS_IS_OK ?
3865 if (io->t2ffirst.out.end_of_search) {
3866 /* cache might have gone away if problem filling */
3867 if (s->cache) {
3868 DEBUG(5,("B\n"));
3869 s->cache->status = SEARCH_CACHE_COMPLETE;
3870 DEBUG(5,("%s: Cache %p filled in first go!\n",__LOCATION__, s->cache));
3873 if ((io->t2ffirst.out.end_of_search && io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
3874 io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE) {
3875 DEBUG(5,("%s: Closing search\n",__LOCATION__));
3876 /* destroy partial cache */
3877 if (s->cache && (io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE) &&
3878 ! io->t2ffirst.out.end_of_search) {
3879 DEBUG(5,("%s: Destroying cache %p\n",__LOCATION__, s->cache));
3880 /* cache is no good now! */
3881 DLIST_REMOVE(private->search_caches, s->cache);
3882 talloc_unlink(private, s->cache);
3883 talloc_unlink(s, s->cache);
3884 //if (talloc_unlink(s, s->cache)==0) {
3885 //talloc_free(s->cache);
3887 s->cache=NULL;
3889 if (s->cache) {
3890 s->cache->status=SEARCH_CACHE_COMPLETE;
3892 /* Need to deal with the case when the client would not take them all but we still cache them
3893 if (state->count < io->t2ffirst.out.count && io->t2ffirst.out.end_of_search) {
3894 io->t2ffirst.out.end_of_search = false;
3895 //s->resume_item = state->last_item;
3897 /* destroy handle */
3898 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
3899 ntvfs_handle_remove_backend_data(h, ntvfs);
3900 io->t2ffirst.out.handle=0;
3901 } else {
3902 s->handle = io->t2ffirst.out.handle;
3903 io->t2ffirst.out.handle=smbsrv_fnum(h);
3905 io->t2ffirst.out.count=state->count;
3906 return status;
3909 #define DLIST_FIND_NEXT(start, item, test) do {\
3910 DLIST_FIND(start, item, test); \
3911 if (item) (item)=(item)->next; \
3912 } while(0)
3913 #define DLIST_TALLOC_FREE(list) do {\
3914 while(list) { \
3915 void *tmp=(list); \
3916 (list)=(list)->next; \
3917 talloc_free(tmp); \
3919 } while(0)
3921 /* continue a search */
3922 static NTSTATUS proxy_search_next(struct ntvfs_module_context *ntvfs,
3923 struct ntvfs_request *req, union smb_search_next *io,
3924 void *search_private,
3925 bool (*callback)(void *, const union smb_search_data *))
3927 struct proxy_private *private = ntvfs->private_data;
3928 struct search_state *state;
3929 struct ntvfs_handle *h=NULL;
3930 struct search_handle *s;
3931 const struct search_cache *search_cache=NULL;
3932 struct search_cache_item *start_at=NULL;
3933 uint16_t max_count;
3934 NTSTATUS status;
3936 SETUP_PID;
3938 if (! private->enabled_proxy_search) {
3939 return smb_raw_search_next(private->tree, req, io, search_private, callback);
3941 switch (io->generic.level) {
3942 case RAW_SEARCH_TRANS2:
3943 io->t2fnext.in.max_count=MIN(io->t2fnext.in.max_count,80);
3944 max_count = io->t2fnext.in.max_count;
3946 h = talloc_get_type(ntvfs_find_handle(ntvfs, req, io->t2fnext.in.handle), struct ntvfs_handle);
3947 if (! h) return NT_STATUS_INVALID_HANDLE;
3948 /* convert handle into search_cache */
3949 s=talloc_get_type(ntvfs_handle_get_backend_data(h, ntvfs), struct search_handle);
3950 if (! s) return NT_STATUS_INVALID_HANDLE;
3951 search_cache=s->cache;
3952 DEBUG(5,("%s: RAW_SEARCH_TRANS2 find_next h=%d [real %d] count %d, cache=%p\n",__LOCATION__, io->t2fnext.in.handle, s->handle, max_count, search_cache));
3953 io->t2fnext.in.handle=s->handle;
3954 if (! search_cache) {
3955 break;
3958 /* warning if: uint16_t flags or const char *last_name have changed, abort cache */
3959 /* skip up to resume key */
3960 /* TODO: resume key may be PRIOR to where we left off... in which case
3961 we need to avoid duplicating values */
3962 if (search_cache /*&& search_cache->status == SEARCH_CACHE_COMPLETE*/) {
3963 DEBUG(5,("%s: seek resume position\n",__LOCATION__));
3964 /* work out where in the cache to continue from */
3965 switch (io->generic.data_level) {
3966 case RAW_SEARCH_DATA_STANDARD:
3967 case RAW_SEARCH_DATA_EA_SIZE:
3968 case RAW_SEARCH_DATA_EA_LIST:
3969 /* have a resume key? */
3970 DEBUG(5,("%s: type %x seek on %x\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.resume_key));
3971 DLIST_FIND_NEXT(search_cache->items, start_at, io->t2fnext.in.resume_key == start_at->file->standard.resume_key);
3972 break;
3973 case RAW_SEARCH_DATA_DIRECTORY_INFO: /* TODO: maybe these should be strcasecmp for some filesystems */
3974 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3975 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->directory_info.name.s)==0);
3976 break;
3977 case RAW_SEARCH_DATA_FULL_DIRECTORY_INFO:
3978 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3979 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->full_directory_info.name.s)==0);
3980 break;
3981 case RAW_SEARCH_DATA_NAME_INFO:
3982 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3983 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->name_info.name.s)==0);
3984 break;
3985 case RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO:
3986 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3987 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->both_directory_info.name.s)==0);
3988 break;
3989 case RAW_SEARCH_DATA_ID_FULL_DIRECTORY_INFO:
3990 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3991 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->id_full_directory_info.name.s)==0);
3992 break;
3993 case RAW_SEARCH_DATA_ID_BOTH_DIRECTORY_INFO:
3994 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3995 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->id_both_directory_info.name.s)==0);
3996 break;
3997 case RAW_SEARCH_DATA_UNIX_INFO:
3998 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3999 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->unix_info.name)==0);
4000 break;
4001 case RAW_SEARCH_DATA_UNIX_INFO2:
4002 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
4003 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->unix_info2.name.s)==0);
4004 break;
4005 default:
4006 if (io->t2fnext.in.flags & FLAG_TRANS2_FIND_CONTINUE) {
4007 start_at = s->resume_item;
4008 } else {
4009 DEBUG(5,("%s: HELP! How can we resume?\n",__LOCATION__));
4010 start_at = s->resume_item;
4013 DEBUG(5,("%s: Start at %p\n",__LOCATION__,start_at));
4015 break;
4018 if (! search_cache) {
4019 DEBUG(5,("%s: No cache, pass-through\n",__LOCATION__));
4020 return smb_raw_search_next(private->tree, req, io, search_private, callback);
4022 //#define talloc_reference(ctx, ptr) (_TALLOC_TYPEOF(ptr))_talloc_reference((ctx),(ptr))
4023 //surely should be
4024 //#define talloc_reference(ctx, ptr) _talloc_reference((ctx),(ptr))?(ptr):(NULL) to preserve the type of ptr
4026 /* satisfy from cache */
4027 if (search_cache->status == SEARCH_CACHE_COMPLETE) {
4028 struct search_cache_item* item;
4029 uint16_t count=0;
4030 DEBUG(5,("%s: Serving from cache: %p\n",__LOCATION__, search_cache));
4032 if (! start_at) {
4033 start_at = search_cache->items;
4036 DLIST_FIND(start_at, item, !(count < max_count && callback(search_private, item->file) && ++count) );
4037 io->t2fnext.out.count=count;
4038 s->resume_item=item;
4039 if (item == NULL) {
4040 DEBUG(5,("%s: Serving from cache complete at %d\n", __LOCATION__, count));
4041 io->t2fnext.out.end_of_search = true;
4042 } else {
4043 DEBUG(5,("%s: Serving from cache incomplete at %d\n", __LOCATION__, count));
4044 io->t2fnext.out.end_of_search = false;
4045 /* count the rest */
4046 DLIST_FOR_EACH(item, item, count++);
4047 DEBUG(5,("%s: Serving from cache max_count %d\n", __LOCATION__, count));
4049 /* is it the end? */
4050 if ((io->t2fnext.out.end_of_search && io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
4051 io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE)
4053 /* destroy handle */
4054 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
4055 ntvfs_handle_remove_backend_data(h, ntvfs);
4058 return NT_STATUS_OK;
4061 /* pass-through and fill-cache */
4062 if (start_at) {
4063 /* risk of duplicate data */
4064 DEBUG(5,("\n\n\nCache-populating search has resumed but NOT where we left off!\n\n\n-d"));
4065 /* free everything from start_at onwards through start_at-> next*/
4066 /* cut from the list */
4067 start_at->prev->next=NULL;
4068 start_at->prev=NULL;
4069 /* now how to free a list? */
4070 DLIST_TALLOC_FREE(start_at);
4072 state = talloc_zero(req, struct search_state);
4073 NT_STATUS_HAVE_NO_MEMORY(state);
4075 state->search_handle=talloc_reference(state, s);
4076 state->private=search_private;
4077 state->callback=callback;
4079 status = smb_raw_search_next(private->tree, req, io, state, find_callback);
4080 if (! NT_STATUS_IS_OK(status)) {
4081 if (s->cache) {
4082 DLIST_REMOVE(private->search_caches, s->cache);
4083 talloc_unlink(private, s->cache);
4084 talloc_unlink(s, s->cache);
4085 //if (talloc_unlink(s, s->cache)==0) {
4086 //talloc_free(s->cache);
4088 s->cache=NULL;
4090 s->h=NULL;
4091 ntvfs_handle_remove_backend_data(h, ntvfs);
4092 return (status);
4095 DEBUG(5,("%s: max %d, got %d, copied %d; %s\n",__LOCATION__,io->t2fnext.out.count,state->all_count, state->count,get_friendly_nt_error_msg (status)));
4097 /* if closing, then close */
4098 if ((io->t2fnext.out.end_of_search && io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
4099 io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE)
4101 if (s->cache && (io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE) &&
4102 ! io->t2fnext.out.end_of_search) {
4103 /* partial cache is useless */
4104 DLIST_REMOVE(private->search_caches, s->cache);
4105 talloc_unlink(private, s->cache);
4106 talloc_unlink(s, s->cache);
4107 //if (talloc_unlink(s, s->cache)==0) {
4108 //talloc_free(s->cache);
4110 s->cache=NULL;
4112 if (s->cache) {
4113 s->cache->status=SEARCH_CACHE_COMPLETE;
4114 /* Need to deal with the case when the client would not take them all but we still cache them
4115 if (state->count < io->t2fnext.out.count && io->t2fnext.out.end_of_search) {
4116 io->t2fnext.out.end_of_search = false;
4119 /* destroy handle */
4120 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
4121 ntvfs_handle_remove_backend_data(h, ntvfs);
4123 io->t2fnext.out.count=state->count;
4125 return status;
4128 /* close a search */
4129 static NTSTATUS proxy_search_close(struct ntvfs_module_context *ntvfs,
4130 struct ntvfs_request *req, union smb_search_close *io)
4132 struct proxy_private *private = ntvfs->private_data;
4133 struct ntvfs_handle *h=NULL;
4134 struct search_handle *s;
4135 NTSTATUS status;
4137 SETUP_PID;
4139 if (! private->enabled_proxy_search) {
4140 return smb_raw_search_close(private->tree, io);
4142 switch (io->generic.level) {
4143 case RAW_SEARCH_TRANS2:
4144 h = talloc_get_type(ntvfs_find_handle(ntvfs, req, io->findclose.in.handle), struct ntvfs_handle);
4145 if (! h) return NT_STATUS_INVALID_HANDLE;
4146 /* convert handle into search_cache */
4147 s=talloc_get_type(ntvfs_handle_get_backend_data(h, ntvfs), struct search_handle);
4148 if (! s) return NT_STATUS_INVALID_HANDLE;
4149 io->findclose.in.handle=s->handle;
4150 default:
4151 return smb_raw_search_close(private->tree, io);
4154 if (! s->cache) {
4155 status = smb_raw_search_close(private->tree, io);
4156 } else {
4157 if (s->cache->status != SEARCH_CACHE_COMPLETE) {
4158 /* cache is useless */
4159 DLIST_REMOVE(private->search_caches, s->cache);
4160 talloc_unlink(private, s->cache);
4161 talloc_unlink(s, s->cache);
4162 //if (talloc_unlink(s, s->cache)==0) {
4163 //talloc_free(s->cache);
4166 status = NT_STATUS_OK;
4169 s->h=NULL;
4170 ntvfs_handle_remove_backend_data(h, ntvfs);
4171 /* s MAY also be gone at this point, if h was free'd, unless there were
4172 pending responses, in which case they see s->h is NULL as a sign to stop */
4173 return status;
4177 a handler for async trans2 replies
4179 static void async_trans2(struct smbcli_request *c_req)
4181 struct async_info *async = c_req->async.private;
4182 struct ntvfs_request *req = async->req;
4183 req->async_states->status = smb_raw_trans2_recv(c_req, req, async->parms);
4184 talloc_free(async);
4185 req->async_states->send_fn(req);
4188 /* raw trans2 */
4189 static NTSTATUS proxy_trans2(struct ntvfs_module_context *ntvfs,
4190 struct ntvfs_request *req,
4191 struct smb_trans2 *trans2)
4193 struct proxy_private *private = ntvfs->private_data;
4194 struct smbcli_request *c_req;
4196 if (private->map_trans2) {
4197 return NT_STATUS_NOT_IMPLEMENTED;
4200 SETUP_PID;
4201 #warning we should be mapping file handles here
4203 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4204 return smb_raw_trans2(private->tree, req, trans2);
4207 c_req = smb_raw_trans2_send(private->tree, trans2);
4209 ASYNC_RECV_TAIL(trans2, async_trans2);
4213 /* SMBtrans - not used on file shares */
4214 static NTSTATUS proxy_trans(struct ntvfs_module_context *ntvfs,
4215 struct ntvfs_request *req,
4216 struct smb_trans2 *trans2)
4218 return NT_STATUS_ACCESS_DENIED;
4222 a handler for async change notify replies
4224 static void async_changenotify(struct smbcli_request *c_req)
4226 struct async_info *async = c_req->async.private;
4227 struct ntvfs_request *req = async->req;
4228 req->async_states->status = smb_raw_changenotify_recv(c_req, req, async->parms);
4229 talloc_free(async);
4230 req->async_states->send_fn(req);
4233 /* change notify request - always async */
4234 static NTSTATUS proxy_notify(struct ntvfs_module_context *ntvfs,
4235 struct ntvfs_request *req,
4236 union smb_notify *io)
4238 struct proxy_private *private = ntvfs->private_data;
4239 struct smbcli_request *c_req;
4240 int saved_timeout = private->transport->options.request_timeout;
4241 struct proxy_file *f;
4243 if (io->nttrans.level != RAW_NOTIFY_NTTRANS) {
4244 return NT_STATUS_NOT_IMPLEMENTED;
4247 SETUP_PID;
4249 f = ntvfs_handle_get_backend_data(io->nttrans.in.file.ntvfs, ntvfs);
4250 if (!f) return NT_STATUS_INVALID_HANDLE;
4251 io->nttrans.in.file.fnum = f->fnum;
4253 /* this request doesn't make sense unless its async */
4254 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4255 return NT_STATUS_INVALID_PARAMETER;
4258 /* we must not timeout on notify requests - they wait
4259 forever */
4260 private->transport->options.request_timeout = 0;
4262 c_req = smb_raw_changenotify_send(private->tree, io);
4264 private->transport->options.request_timeout = saved_timeout;
4266 ASYNC_RECV_TAIL(io, async_changenotify);
4270 * A hander for converting from rpc struct replies to ntioctl
4272 static NTSTATUS proxy_rpclite_map_async_send(
4273 struct ntvfs_module_context *ntvfs,
4274 struct ntvfs_request *req,
4275 void *io1, void *io2, NTSTATUS status)
4277 union smb_ioctl* io=talloc_get_type_abort(io1, union smb_ioctl);
4278 struct async_rpclite_send *rpclite_send=talloc_get_type_abort(io2, struct async_rpclite_send);
4279 void* r=rpclite_send->struct_ptr;
4280 struct ndr_push* push;
4281 const struct ndr_interface_call* call=rpclite_send->call;
4282 enum ndr_err_code ndr_err;
4283 DATA_BLOB ndr;
4285 talloc_free(rpclite_send);
4287 DEBUG(5,("%s: converting r=%p back to ntiocl\n",__FUNCTION__, r));
4288 push = ndr_push_init_ctx(req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
4289 NT_STATUS_HAVE_NO_MEMORY(push);
4291 if (0) {
4292 push->flags |= LIBNDR_FLAG_BIGENDIAN;
4295 ndr_err = call->ndr_push(push, NDR_OUT, r);
4296 status=ndr_map_error2ntstatus(ndr_err);
4298 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4299 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
4300 nt_errstr(status)));
4301 return status;
4304 ndr=ndr_push_blob(push);
4305 //if (ndr.length > io->ntioctl.in.max_data) {
4306 DEBUG(3,("%s NDR size %d, max_size %d %p\n",__FUNCTION__, ndr.length,
4307 io->ntioctl.in.max_data, ndr.data));
4308 io->ntioctl.out.blob=ndr;
4309 return status;
4313 * A handler for sending async rpclite Read replies that were mapped to union smb_read
4315 static NTSTATUS rpclite_proxy_Read_map_async_send(
4316 struct ntvfs_module_context *ntvfs,
4317 struct ntvfs_request *req,
4318 void *io1, void *io2, NTSTATUS status)
4320 struct proxy_Read* r=talloc_get_type_abort(io1, struct proxy_Read);
4321 union smb_read* io=talloc_get_type_abort(io2, union smb_read);
4323 /* status here is a result of proxy_read, it doesn't reflect the status
4324 of the rpc transport or relates calls, just the read operation */
4325 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
4326 r->out.result=status;
4328 if (! NT_STATUS_IS_OK(status)) {
4329 /* We can't use result as a discriminator in IDL, so nread and flags always exist */
4330 r->out.nread=0;
4331 r->out.flags=0;
4332 } else {
4333 ssize_t size=io->readx.out.nread;
4334 r->out.flags=0;
4335 r->out.nread=io->readx.out.nread;
4337 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE) && io->readx.out.nread>0) {
4338 declare_checksum(digest);
4339 checksum_block(digest, io->readx.out.data, io->readx.out.nread);
4341 DEBUG(5,("New digest for size: %lld\n", (long long) io->readx.out.nread));
4342 dump_data (5, digest, sizeof(digest));
4343 DEBUG(5,("Cached digest\n"));
4344 dump_data (5, r->in.digest.digest, sizeof(digest));
4346 if (memcmp(digest, r->in.digest.digest, sizeof(digest))==0) {
4347 r->out.flags=PROXY_USE_CACHE;
4348 DEBUG(5,("%s: Use cached data len=%lld\n",__FUNCTION__,
4349 (long long)r->out.nread));
4350 if (r->in.flags & PROXY_VALIDATE) {
4351 r->out.flags |= PROXY_VALIDATE;
4352 DEBUG(5,("%s: Use VALIDATED len=%lld, %lld\n",__FUNCTION__,
4353 (long long)r->out.nread, (long long) io->readx.out.nread));
4355 goto done;
4357 DEBUG(5,("Cache does not match\n"));
4360 if (r->in.flags & PROXY_VALIDATE) {
4361 /* validate failed, shrink read to mincnt - so we don't fill link */
4362 r->out.nread=MIN(r->out.nread, r->in.mincnt);
4363 size=r->out.nread;
4364 DEBUG(5,("VALIDATE failed, shrink read of %d from %d to %d\n",
4365 r->in.maxcnt,r->out.nread,MIN(r->out.nread, r->in.mincnt)));
4368 if (r->in.flags & PROXY_USE_ZLIB) {
4369 if (compress_block(io->readx.out.data, &size) ) {
4370 r->out.flags|=PROXY_USE_ZLIB;
4371 r->out.response.compress.count=size;
4372 r->out.response.compress.data=io->readx.out.data;
4373 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
4374 __FUNCTION__,r->out.nread,size,size*100/r->out.nread));
4375 goto done;
4379 DEBUG(5,("%s: Compression not worthwhile\n", __FUNCTION__));
4380 r->out.response.generic.count=io->readx.out.nread;
4381 r->out.response.generic.data=io->readx.out.data;
4384 done:
4386 /* Or should we return NT_STATUS_OK ?*/
4387 DEBUG(5,("Finish %s status %s\n",__FUNCTION__,get_friendly_nt_error_msg(status)));
4389 /* the rpc transport succeeded even if the operation did not */
4390 return NT_STATUS_OK;
4394 * RPC implementation of Read
4396 static NTSTATUS rpclite_proxy_Read(struct ntvfs_module_context *ntvfs,
4397 struct ntvfs_request *req, struct proxy_Read *r)
4399 struct proxy_private *private = ntvfs->private_data;
4400 union smb_read* io=talloc(req, union smb_read);
4401 NTSTATUS status;
4402 struct proxy_file *f;
4403 struct ntvfs_handle *h;
4405 NT_STATUS_HAVE_NO_MEMORY(io);
4407 /* if next hop is a proxy just repeat this call also handle VALIDATE check
4408 that means have own callback handlers too... */
4409 SETUP_PID;
4411 RPCLITE_SETUP_FILE_HERE(f, h);
4413 DEBUG(5,("Opnum: proxy_Read min=%d max=%d offset=%lld, fnum=%d\n",
4414 r->in.mincnt, r->in.maxcnt, r->in.offset, r->in.fnum));
4415 DEBUG(5,("Anticipated digest\n"));
4416 dump_data (5, r->in.digest.digest, sizeof(r->in.digest.digest));
4418 /* If the remove end is a proxy, jusr fixup file handle and passthrough,
4419 but update cache on the way back
4420 if (PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
4423 /* prepare for response */
4424 r->out.response.generic.data=talloc_array(io, uint8_t, r->in.maxcnt);
4425 NT_STATUS_HAVE_NO_MEMORY(r->out.response.generic.data);
4427 if (! PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
4428 return proxy_validate(ntvfs, req, r, f);
4431 /* pack up an smb_read request and dispatch here */
4432 io->readx.level=RAW_READ_READX;
4433 io->readx.in.file.ntvfs=h;
4434 io->readx.in.mincnt=r->in.mincnt;
4435 io->readx.in.maxcnt=r->in.maxcnt;
4436 io->readx.in.offset=r->in.offset;
4437 io->readx.in.remaining=r->in.remaining;
4438 /* and something to hold the answer */
4439 io->readx.out.data=r->out.response.generic.data;
4441 /* so we get to pack the io->*.out response */
4442 status = ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Read_map_async_send);
4443 NT_STATUS_NOT_OK_RETURN(status);
4445 /* so the read will get processed normally */
4446 return proxy_read(ntvfs, req, io);
4450 * A handler for sending async rpclite Write replies
4452 static NTSTATUS rpclite_proxy_Write_map_async_send(
4453 struct ntvfs_module_context *ntvfs,
4454 struct ntvfs_request *req,
4455 void *io1, void *io2, NTSTATUS status)
4457 struct proxy_Write* r=talloc_get_type_abort(io1, struct proxy_Write);
4458 union smb_write* io=talloc_get_type_abort(io2, union smb_write);
4460 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
4461 r->out.result=status;
4463 r->out.nwritten=io->writex.out.nwritten;
4464 r->out.remaining=io->writex.out.remaining;
4466 /* the rpc transport succeeded even if the operation did not */
4467 return NT_STATUS_OK;
4471 * RPC implementation of write
4473 static NTSTATUS rpclite_proxy_Write(struct ntvfs_module_context *ntvfs,
4474 struct ntvfs_request *req, struct proxy_Write *r)
4476 struct proxy_private *private = ntvfs->private_data;
4477 union smb_write* io=talloc(req, union smb_write);
4478 NTSTATUS status;
4479 struct proxy_file* f;
4480 struct ntvfs_handle *h;
4482 SETUP_PID;
4484 RPCLITE_SETUP_FILE_HERE(f,h);
4486 DEBUG(5,("Opnum: proxy_Write count=%d offset=%lld, fnum=%d\n",
4487 r->in.count, r->in.offset, r->in.fnum));
4489 /* pack up an smb_write request and dispatch here */
4490 io->writex.level=RAW_WRITE_WRITEX;
4491 io->writex.in.file.ntvfs=h;
4492 io->writex.in.offset=r->in.offset;
4493 io->writex.in.wmode=r->in.mode;
4494 io->writex.in.count=r->in.count;
4496 /* and the data */
4497 if (PROXY_USE_ZLIB & r->in.flags) {
4498 ssize_t count=r->in.data.generic.count;
4499 io->writex.in.data=uncompress_block_talloc(io, r->in.data.compress.data,
4500 &count, r->in.count);
4501 if (count != r->in.count || !io->writex.in.data) {
4502 /* Didn't uncompress properly, but the RPC layer worked */
4503 r->out.result=NT_STATUS_BAD_COMPRESSION_BUFFER;
4504 return NT_STATUS_OK;
4506 } else {
4507 io->writex.in.data=r->in.data.generic.data;
4510 /* so we get to pack the io->*.out response */
4511 status=ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Write_map_async_send);
4512 NT_STATUS_NOT_OK_RETURN(status);
4514 /* so the read will get processed normally */
4515 return proxy_write(ntvfs, req, io);
4519 * RPC amalgamation of getinfo requests
4521 struct proxy_getinfo_fragments;
4522 struct proxy_getinfo_fragmentses;
4524 /* holds one smbcli_request to satisfy part of one proxy_GetInfo request */
4525 struct proxy_getinfo_fragment {
4526 struct proxy_getinfo_fragment *prev, *next;
4527 struct proxy_getinfo_fragments *fragments;
4528 union smb_fileinfo *smb_fileinfo;
4529 struct smbcli_request *c_req;
4530 NTSTATUS status;
4533 /* holds reference to many fragment smbcli_request that together make up one proxy_GetInfo request */
4534 struct proxy_getinfo_fragments {
4535 struct proxy_getinfo_fragments *prev, *next;
4536 struct proxy_getinfo_fragmentses *fragmentses;
4537 struct proxy_getinfo_fragment *fragments;
4538 uint32_t index;
4541 struct proxy_getinfo_fragmentses {
4542 struct proxy_getinfo_fragments *fragments;
4543 struct proxy_GetInfo *r;
4544 struct ntvfs_request *req;
4545 bool async;
4549 a handler for async write replies
4551 NTSTATUS async_proxy_getinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
4553 struct smbcli_request *c_req = async->c_req;
4554 struct ntvfs_request *req = async->req;
4555 struct proxy_file *f=async->f;
4556 struct proxy_getinfo_fragment *fragment=talloc_get_type_abort(io2, struct proxy_getinfo_fragment);
4557 struct proxy_getinfo_fragments* fragments=fragment->fragments;
4558 struct proxy_getinfo_fragmentses* fragmentses=fragments->fragmentses;
4559 struct proxy_GetInfo *r=talloc_get_type_abort(fragmentses->r, struct proxy_GetInfo);
4560 int c=fragments->index;
4561 struct info_data* d=&(r->out.info_data[c]);
4562 union smb_fileinfo *io=talloc_get_type_abort(io1, union smb_fileinfo);
4564 SMB_ASSERT(c_req == NULL || c_req == fragment->c_req);
4566 if (c_req) {
4567 switch (r->in.info_tags[0].tag_type) {
4568 case TAG_TYPE_FILE_INFO:
4569 status=smb_raw_fileinfo_recv(c_req, r, io);
4570 break;
4571 case TAG_TYPE_PATH_INFO:
4572 status=smb_raw_pathinfo_recv(c_req, r, io);
4573 break;
4574 default:
4575 status=NT_STATUS_INVALID_PARAMETER;
4577 c_req=NULL;
4580 /* stop callback occuring more than once sync'ly */
4581 fragment->c_req=NULL;
4583 DEBUG(5,("%s: async callback level %x %s\n",__FUNCTION__,io->generic.level, get_friendly_nt_error_msg (status)));
4584 switch (io->generic.level) {
4585 case RAW_FILEINFO_ALL_INFO:
4586 case RAW_FILEINFO_ALL_INFORMATION:
4587 DEBUG(5,("%s: async callback level RAW_FILEINFO_ALL_INFO\n",__FUNCTION__));
4588 d->status_RAW_FILEINFO_ALL_INFO=status;
4590 /* don't blindly overwrite BASIC_INFORMATION as we may already have it */
4591 if (1 || NT_STATUS_IS_OK(status)) {
4592 d->status_RAW_FILEINFO_BASIC_INFORMATION=status;
4593 d->create_time=io->all_info.out.create_time;
4594 d->access_time=io->all_info.out.access_time;
4595 d->write_time=io->all_info.out.write_time;
4596 d->change_time=io->all_info.out.change_time;
4597 d->attrib=io->all_info.out.attrib;
4599 d->alloc_size=io->all_info.out.alloc_size;
4600 d->size=io->all_info.out.size;
4601 dump_data(5, io, sizeof(*io));
4602 d->nlink=io->all_info.out.nlink;
4603 d->delete_pending=io->all_info.out.delete_pending;
4604 d->directory=io->all_info.out.directory;
4605 d->ea_size=io->all_info.out.ea_size;
4606 /* io is sticking around for as long as d is */
4607 d->fname.s=io->all_info.out.fname.s;
4608 d->fname.count=io->all_info.out.fname.private_length;
4609 break;
4610 case RAW_FILEINFO_BASIC_INFO:
4611 case RAW_FILEINFO_BASIC_INFORMATION:
4612 DEBUG(5,("%s: async callback level RAW_FILEINFO_BASIC_INFORMATION\n",__FUNCTION__));
4613 d->status_RAW_FILEINFO_BASIC_INFORMATION=status;
4614 d->create_time=io->basic_info.out.create_time;
4615 d->access_time=io->basic_info.out.access_time;
4616 d->write_time=io->basic_info.out.write_time;
4617 d->change_time=io->basic_info.out.change_time;
4618 d->attrib=io->basic_info.out.attrib;
4619 break;
4620 case RAW_FILEINFO_COMPRESSION_INFO:
4621 DEBUG(5,("%s: async callback level RAW_FILEINFO_COMPRESSION_INFO\n",__FUNCTION__));
4622 d->status_RAW_FILEINFO_COMPRESSION_INFO = status;
4623 d->compressed_size=io->compression_info.out.compressed_size;
4624 d->format=io->compression_info.out.format;
4625 d->unit_shift=io->compression_info.out.unit_shift;
4626 d->chunk_shift=io->compression_info.out.chunk_shift;
4627 d->cluster_shift=io->compression_info.out.cluster_shift;
4628 break;
4629 case RAW_FILEINFO_INTERNAL_INFORMATION:
4630 DEBUG(5,("%s: async callback level RAW_FILEINFO_INTERNAL_INFORMATION\n",__FUNCTION__));
4631 d->status_RAW_FILEINFO_INTERNAL_INFORMATION=status;
4632 d->file_id=io->internal_information.out.file_id;
4633 break;
4634 case RAW_FILEINFO_ACCESS_INFORMATION:
4635 DEBUG(5,("%s: async callback level RAW_FILEINFO_ACCESS_INFORMATION\n",__FUNCTION__));
4636 d->status_RAW_FILEINFO_ACCESS_INFORMATION=status;
4637 d->access_flags=io->access_information.out.access_flags;
4638 break;
4639 case RAW_FILEINFO_POSITION_INFORMATION:
4640 DEBUG(5,("%s: async callback level RAW_FILEINFO_POSITION_INFORMATION\n",__FUNCTION__));
4641 d->status_RAW_FILEINFO_POSITION_INFORMATION = status;
4642 d->position=io->position_information.out.position;
4643 break;
4644 case RAW_FILEINFO_MODE_INFORMATION:
4645 DEBUG(5,("%s: async callback level RAW_FILEINFO_MODE_INFORMATION\n",__FUNCTION__));
4646 d->status_RAW_FILEINFO_MODE_INFORMATION =status;
4647 d->mode=io->mode_information.out.mode;
4648 break;
4649 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
4650 DEBUG(5,("%s: async callback level RAW_FILEINFO_ALIGNMENT_INFORMATION\n",__FUNCTION__));
4651 d->status_RAW_FILEINFO_ALIGNMENT_INFORMATION=status;
4652 d->alignment_requirement=io->alignment_information.out.alignment_requirement;
4653 break;
4654 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
4655 DEBUG(5,("%s: async callback level RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION\n",__FUNCTION__));
4656 d->status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION=status;
4657 d->reparse_tag=io->attribute_tag_information.out.reparse_tag;
4658 d->reparse_attrib=io->attribute_tag_information.out.attrib;
4659 break;
4660 case RAW_FILEINFO_STREAM_INFO: {
4661 uint_t c;
4662 DEBUG(5,("%s: async callback level RAW_FILEINFO_STREAM_INFO %s,\n",__FUNCTION__));
4663 d->status_RAW_FILEINFO_STREAM_INFO=status;
4664 DEBUG(5,("Num Streams %d %s\n",io->stream_info.out.num_streams, get_friendly_nt_error_msg (status)));
4665 if (NT_STATUS_IS_OK(status)) {
4666 d->streams=talloc_zero_array(d, struct info_stream, io->stream_info.out.num_streams);
4667 if (! d->streams) {
4668 d->status_RAW_FILEINFO_STREAM_INFO=NT_STATUS_NO_MEMORY;
4669 } else {
4670 d->num_streams=io->stream_info.out.num_streams;
4671 for(c=0; c < io->stream_info.out.num_streams; c++) {
4672 d->streams[c].size = io->stream_info.out.streams[c].size;
4673 d->streams[c].alloc_size = io->stream_info.out.streams[c].alloc_size;
4674 d->streams[c].stream_name.s=io->stream_info.out.streams[c].stream_name.s;
4675 d->streams[c].stream_name.count=io->stream_info.out.streams[c].stream_name.private_length;
4679 break; }
4680 default:
4681 /* so... where's it from? */
4682 DEBUG(5,("Unexpected read level\n"));
4685 fragment->smb_fileinfo = NULL;
4686 fragment->c_req=NULL;
4688 /* are the fragments complete? */
4689 DLIST_REMOVE(fragments->fragments, fragment);
4690 /* if this index is complete, remove from fragmentses */
4691 if (! fragments->fragments) {
4692 DLIST_REMOVE(fragmentses->fragments, fragments);
4694 /* is that the end? */
4695 if (! fragmentses->fragments && fragmentses->async) {
4696 DEBUG(5,("Thats the end of the fragments, doing send\n"));
4697 /* call the send_fn */
4698 req=fragmentses->req;
4699 req->async_states->status=NT_STATUS_OK;
4700 DEBUG(5,("Fragments async response sending\n"));
4701 req->async_states->send_fn(req);
4703 DEBUG(5,("%s: Thats the end of the callback\n",__FUNCTION__));
4704 return status;
4707 #define FINISH_GETINFO_FRAGMENT(r, io) do { \
4708 struct smbcli_request *c_req; \
4709 switch (r->in.info_tags[0].tag_type) { \
4710 case TAG_TYPE_FILE_INFO: \
4711 io->all_info.in.file.fnum=r->in.info_tags[0].info_tag.fnum; \
4712 c_req=smb_raw_fileinfo_send(private->tree, io); \
4713 break; \
4714 case TAG_TYPE_PATH_INFO: \
4715 io->all_info.in.file.path=r->in.info_tags[0].info_tag.path.s; \
4716 c_req=smb_raw_pathinfo_send(private->tree, io); \
4717 break; \
4718 default: \
4719 return NT_STATUS_INVALID_PARAMETER; \
4721 /* Add fragment collator */ \
4722 fragment->c_req=c_req; \
4723 /* use the same stateful async handler for them all... */ \
4724 { void* req=NULL; \
4725 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_proxy_getinfo, NT_STATUS_INTERNAL_ERROR); \
4726 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler); \
4728 io=NULL; \
4729 } while (0)
4731 #define SETUP_GETINFO_FRAGMENT(io, LEVEL) do { \
4732 fragment=talloc_zero(fragments, struct proxy_getinfo_fragment); \
4733 NT_STATUS_HAVE_NO_MEMORY(fragment); \
4734 DLIST_ADD(fragments->fragments, fragment); \
4735 fragment->fragments=fragments; \
4736 io=talloc_zero(fragment, union smb_fileinfo); \
4737 NT_STATUS_HAVE_NO_MEMORY(io); \
4738 io->generic.level=LEVEL; \
4739 } while (0)
4741 static NTSTATUS rpclite_proxy_Getinfo(struct ntvfs_module_context *ntvfs,
4742 struct ntvfs_request *req, struct proxy_GetInfo *r)
4744 struct proxy_private *private = ntvfs->private_data;
4745 struct smbcli_request *c_req;
4746 union smb_fileinfo *io=NULL;
4747 NTSTATUS status;
4748 struct proxy_file* f;
4749 struct ntvfs_handle *h;
4750 struct proxy_getinfo_fragmentses *fragmentses;
4751 int c;
4753 SETUP_PID;
4755 DEBUG(5,("Opnum: proxy_Getinfo r=%p\n",r));
4757 DEBUG(5,("Convering %d handles for r=%p\n",r->in.count, r));
4758 for(c=0; c < r->in.count; c++) {
4759 if (r->in.info_tags[c].tag_type==TAG_TYPE_FILE_INFO) {
4760 RPCLITE_SETUP_THIS_FILE_HERE(r->in.info_tags[c].info_tag.fnum, f, h);
4764 if (PROXY_REMOTE_SERVER(private)) {
4765 DEBUG(5,("Remote proxy, doing transparent\n"));
4766 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
4767 /* No need to add a receive hander, the ntioctl transport adds
4768 the async chain handler which deals with the send_fn */
4769 // ADD_ASYNC_RECV_TAIL(c_req, r, NULL, f, rpclite_proxy_Getinfo_map_async_send, NT_STATUS_INTERNAL_ERROR);
4771 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4772 DEBUG(5,("%s:Sync waiting for nttrans response\n",__LOCATION__));
4773 return sync_chain_handler(c_req);
4774 } else {
4775 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
4776 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
4777 return NT_STATUS_OK;
4781 /* I thought this was done for me for [in,out] */
4782 r->out.info_data=talloc_zero_array(r, struct info_data, r->in.count);
4783 NT_STATUS_HAVE_NO_MEMORY(r->out.info_data);
4784 r->out.count = r->in.count;
4785 r->out.result = NT_STATUS_OK;
4787 fragmentses=talloc_zero(req, struct proxy_getinfo_fragmentses);
4788 fragmentses->r=r;
4789 fragmentses->req=req;
4790 NT_STATUS_HAVE_NO_MEMORY(fragmentses);
4792 #warning, if C is large, we need to do a few at a time according to resource limits
4793 for (c=0; c < r->in.count; c++) {
4794 struct proxy_getinfo_fragments *fragments;
4795 struct proxy_getinfo_fragment *fragment;
4797 fragments=talloc_zero(fragmentses, struct proxy_getinfo_fragments);
4798 NT_STATUS_HAVE_NO_MEMORY(fragments);
4799 DLIST_ADD(fragmentses->fragments, fragments);
4800 fragments->fragmentses=fragmentses;
4801 fragments->index=c;
4803 /* Issue a set of getinfo requests */
4804 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ALL_INFO);
4805 FINISH_GETINFO_FRAGMENT(r, io);
4807 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_BASIC_INFORMATION);
4808 FINISH_GETINFO_FRAGMENT(r, io);
4810 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_COMPRESSION_INFO);
4811 FINISH_GETINFO_FRAGMENT(r, io);
4813 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_INTERNAL_INFORMATION);
4814 FINISH_GETINFO_FRAGMENT(r, io);
4816 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ACCESS_INFORMATION);
4817 FINISH_GETINFO_FRAGMENT(r, io);
4819 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_POSITION_INFORMATION);
4820 FINISH_GETINFO_FRAGMENT(r, io);
4822 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_MODE_INFORMATION);
4823 FINISH_GETINFO_FRAGMENT(r, io);
4825 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ALIGNMENT_INFORMATION);
4826 FINISH_GETINFO_FRAGMENT(r, io);
4828 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION);
4829 FINISH_GETINFO_FRAGMENT(r, io);
4831 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_STREAM_INFO);
4832 FINISH_GETINFO_FRAGMENT(r, io);
4835 /* If ! async, wait for all requests to finish */
4837 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4838 struct proxy_getinfo_fragments *fragments;
4839 struct proxy_getinfo_fragment *fragment;
4840 while ((fragments = fragmentses->fragments) &&
4841 (fragment = fragments->fragments) &&
4842 fragment->c_req) {
4843 sync_chain_handler(fragment->c_req);
4844 /* and because the whole fragment / fragments may be gone now... */
4845 continue;
4847 return NT_STATUS_OK; /* see individual failures */
4850 DEBUG(5,("%s: Setting async response\n",__FUNCTION__));
4851 fragmentses->async=true;
4852 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
4853 return NT_STATUS_OK;
4856 /* rpclite dispatch table */
4857 #define RPC_PROXY_OPS 3
4858 struct {
4859 uint32_t opnum;
4860 NTSTATUS (*handler)(struct ntvfs_module_context *ntvfs,
4861 struct ntvfs_request *req, void* r);
4862 } rpcproxy_ops[RPC_PROXY_OPS]={
4863 {NDR_PROXY_READ, rpclite_proxy_Read},
4864 {NDR_PROXY_WRITE, rpclite_proxy_Write},
4865 {NDR_PROXY_GETINFO, rpclite_proxy_Getinfo}
4868 /* unmarshall ntioctl and rpc-dispatch, but push async map handler to convert
4869 back from rpc struct to ntioctl */
4870 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
4871 struct ntvfs_request *req, union smb_ioctl *io)
4873 struct proxy_private *private = ntvfs->private_data;
4874 DATA_BLOB *request;
4875 struct ndr_syntax_id* syntax_id;
4876 uint32_t opnum;
4877 const struct ndr_interface_table *table;
4878 struct ndr_pull* pull;
4879 void* r;
4880 NTSTATUS status;
4881 struct async_rpclite_send *rpclite_send;
4882 enum ndr_err_code ndr_err;
4884 SETUP_PID;
4886 /* We don't care about io->generic.in.file, ntvfs layer already proved it was valid,
4887 our operations will have the fnum embedded in them anyway */
4888 DEBUG(5,("START %s blob-size %d\n",__FUNCTION__,io->ntioctl.in.blob.length));
4889 /* unpack the NDR */
4890 request=&io->ntioctl.in.blob;
4892 pull = ndr_pull_init_blob(request, req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
4893 NT_STATUS_HAVE_NO_MEMORY(pull);
4894 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
4895 DEBUG(5,("%s pull init'd\n",__FUNCTION__));
4897 /* the blob is 4-aligned because it was memcpy'd */
4898 syntax_id=talloc_zero(pull, struct ndr_syntax_id);
4899 NT_STATUS_HAVE_NO_MEMORY(syntax_id);
4901 ndr_err=ndr_pull_ndr_syntax_id(pull, NDR_SCALARS, syntax_id);
4902 status=ndr_map_error2ntstatus(ndr_err);
4903 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4904 DEBUG(2,("Can't read syntax-id: %s\n",nt_errstr(status)));
4905 return status;
4908 /* now find the struct ndr_interface_table * for this syntax_id */
4909 table=ndr_table_by_uuid(&syntax_id->uuid);
4910 if (! table) ndr_table_init();
4911 table=ndr_table_by_uuid(&syntax_id->uuid);
4913 if (! table) {
4914 DEBUG(5,("Can't find table for uuid: %s\n",GUID_string(debug_ctx(),&syntax_id->uuid)));
4915 return NT_STATUS_NO_GUID_TRANSLATION;
4918 ndr_err=ndr_pull_uint32(pull, NDR_SCALARS, &opnum);
4919 status=ndr_map_error2ntstatus(ndr_err);
4920 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4921 DEBUG(2,("Can't read op-num: %s\n",nt_errstr(status)));
4922 return status;
4924 DEBUG(5,("%s opnum %d\n",__FUNCTION__,opnum));
4926 DEBUG(10,("rpc request data:\n"));
4927 dump_data(10, pull->data, pull->data_size);
4929 r = talloc_named(req, table->calls[opnum].struct_size, "struct %s",
4930 table->calls[opnum].name);
4931 NT_STATUS_HAVE_NO_MEMORY(r);
4933 memset(r, 0, table->calls[opnum].struct_size);
4935 ndr_err=table->calls[opnum].ndr_pull(pull, NDR_IN, r);
4936 status=ndr_map_error2ntstatus(ndr_err);
4937 DEBUG(5,("%s opnum %d pulled r=%p status %s\n",__FUNCTION__,opnum,r,get_friendly_nt_error_msg (status)));
4938 NT_STATUS_NOT_OK_RETURN(status);
4940 rpclite_send=talloc(req, struct async_rpclite_send);
4941 NT_STATUS_HAVE_NO_MEMORY(rpclite_send);
4942 rpclite_send->call=&table->calls[opnum];
4943 rpclite_send->struct_ptr=r;
4944 /* need to push conversion function to convert from r to io */
4945 status=ntvfs_map_async_setup(ntvfs, req, io, rpclite_send, proxy_rpclite_map_async_send);
4946 NT_STATUS_NOT_OK_RETURN(status);
4948 /* Magically despatch the call based on syntax_id, table and opnum.
4949 But there is no table of handlers.... so until then*/
4950 if (0==strcasecmp(table->name,"rpcproxy")) {
4951 if (opnum >= RPC_PROXY_OPS) {
4952 DEBUG(3,("Can't despatch %s:%d\n",table->name, opnum));
4953 return NT_STATUS_PROCEDURE_NOT_FOUND;
4955 status = rpcproxy_ops[opnum].handler(ntvfs, req, r);
4956 } else {
4957 DEBUG(5,("Can't despatch %s:%d %s\n",table->name, opnum,
4958 GUID_string(debug_ctx(),&syntax_id->uuid)));
4959 return NT_STATUS_NO_GUID_TRANSLATION;
4962 /* status is the status of the rpc layer. If it is NT_STATUS_OK then
4963 the handler status is in r->out.result */
4964 DEBUG(5,("%s now map_async_finish: status=%s async=%d\n", __FUNCTION__,
4965 get_friendly_nt_error_msg (status), req->async_states->state & NTVFS_ASYNC_STATE_ASYNC));
4967 return ntvfs_map_async_finish(req, status);
4970 /* unpack the ntioctl to make some rpc_struct */
4971 NTSTATUS ntioctl_rpc_unmap(struct async_info *async, void* io1, void* io2, NTSTATUS status)
4973 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
4974 struct proxy_private *proxy=async->proxy;
4975 struct smbcli_request *c_req = async->c_req;
4976 void* r=io1;
4977 struct ntioctl_rpc_unmap_info *info=talloc_get_type_abort(io2, struct ntioctl_rpc_unmap_info);
4978 union smb_ioctl* io =talloc_get_type_abort(info->io, union smb_ioctl);
4979 const struct ndr_interface_call *calls=info->calls;
4980 enum ndr_err_code ndr_err;
4981 DATA_BLOB *response;
4982 struct ndr_pull* pull;
4984 DEBUG(5,("START %s io2=%p\n",__FUNCTION__,io2));
4985 DEBUG(5,("%s op %s ntioctl: %s\n",
4986 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
4987 NT_STATUS_NOT_OK_RETURN(status);
4989 if (c_req) {
4990 DEBUG(5,("%s io2 MID=%d\n",__FUNCTION__,c_req->mid));
4991 status = smb_raw_ioctl_recv(c_req, io, io);
4992 #define SESSION_INFO proxy->remote_server, proxy->remote_share
4993 /* This status is the ntioctl wrapper status */
4994 if (! NT_STATUS_IS_OK(status)) {
4995 DEBUG(3,("[\\\\%s\\%s] RPC %s failed for %s: %s\n",SESSION_INFO,
4996 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
4997 if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) return status;
4998 return NT_STATUS_UNSUCCESSFUL;
5002 dump_data(10, io->ntioctl.out.blob.data, io->ntioctl.out.blob.length);
5004 response=&io->ntioctl.out.blob;
5005 pull = ndr_pull_init_blob(response, r, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
5006 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
5008 NT_STATUS_HAVE_NO_MEMORY(pull);
5010 ndr_err=calls->ndr_pull(pull, NDR_OUT, r);
5011 #warning can we free pull here?
5012 status=ndr_map_error2ntstatus(ndr_err);
5014 DEBUG(5,("END %s op status %s\n",
5015 __FUNCTION__, get_friendly_nt_error_msg(status)));
5016 return status;
5020 send an ntioctl request based on a NDR encoding.
5022 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
5023 struct smbcli_tree *tree,
5024 struct ntvfs_module_context *ntvfs,
5025 const struct ndr_interface_table *table,
5026 uint32_t opnum,
5027 void *r)
5029 struct proxy_private *private = ntvfs->private_data;
5030 struct smbcli_request * c_req;
5031 struct ndr_push *push;
5032 NTSTATUS status;
5033 DATA_BLOB request;
5034 enum ndr_err_code ndr_err;
5035 union smb_ioctl *io=talloc_zero(r, union smb_ioctl);
5038 /* setup for a ndr_push_* call, we can't free push until the message
5039 actually hits the wire */
5040 push = ndr_push_init_ctx(io, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
5041 if (!push) return NULL;
5043 /* first push interface table identifiers */
5044 ndr_err=ndr_push_ndr_syntax_id(push, NDR_SCALARS, &table->syntax_id);
5045 status=ndr_map_error2ntstatus(ndr_err);
5047 if (! NT_STATUS_IS_OK(status)) return NULL;
5049 ndr_err=ndr_push_uint32(push, NDR_SCALARS, opnum);
5050 status=ndr_map_error2ntstatus(ndr_err);
5051 if (! NT_STATUS_IS_OK(status)) return NULL;
5053 if (0) {
5054 push->flags |= LIBNDR_FLAG_BIGENDIAN;
5057 /* push the structure into a blob */
5058 ndr_err = table->calls[opnum].ndr_push(push, NDR_IN, r);
5059 status=ndr_map_error2ntstatus(ndr_err);
5060 if (!NT_STATUS_IS_OK(status)) {
5061 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
5062 nt_errstr(status)));
5063 return NULL;
5066 /* retrieve the blob */
5067 request = ndr_push_blob(push);
5069 io->ntioctl.level=RAW_IOCTL_NTIOCTL;
5070 io->ntioctl.in.function=FSCTL_UFOPROXY_RPCLITE;
5071 io->ntioctl.in.file.fnum=private->nttrans_fnum;
5072 io->ntioctl.in.fsctl=false;
5073 io->ntioctl.in.filter=0;
5074 io->ntioctl.in.max_data=PROXY_NTIOCTL_MAXDATA;
5075 io->ntioctl.in.blob=request;
5077 DEBUG(10,("smbcli_request packet:\n"));
5078 dump_data(10, request.data, request.length);
5080 c_req = smb_raw_ioctl_send(tree, io);
5082 if (! c_req) {
5083 return NULL;
5086 dump_data(10, c_req->out.data, c_req->out.data_size);
5088 { void* req=NULL;
5089 struct ntioctl_rpc_unmap_info* info=talloc_zero(r, struct ntioctl_rpc_unmap_info);
5090 info->io=io;
5091 info->table=table;
5092 info->opnum=opnum;
5093 info->calls=&table->calls[opnum];
5094 ADD_ASYNC_RECV_TAIL(c_req, r, info, NULL, ntioctl_rpc_unmap, NULL);
5097 return c_req;
5101 client helpers, mapping between proxy RPC calls and smbcli_* calls.
5105 * If the sync_chain_handler is called directly it unplugs the async handler
5106 which (as well as preventing loops) will also avoid req->send_fn being
5107 called - which is also nice! */
5108 NTSTATUS sync_chain_handler(struct smbcli_request *c_req)
5110 struct async_info *async=NULL;
5111 /* the first callback which will actually receive the c_req response */
5112 struct async_info_map *async_map;
5113 NTSTATUS status=NT_STATUS_OK;
5114 struct async_info_map** chain;
5116 DEBUG(5,("%s\n",__FUNCTION__));
5117 if (! c_req) return NT_STATUS_UNSUCCESSFUL;
5119 /* If there is a handler installed, it is using async_info to chain */
5120 if (c_req->async.fn) {
5121 /* not safe to talloc_free async if send_fn has been called for the request
5122 against which async was allocated, so steal it (and free below) or neither */
5123 async = talloc_get_type_abort(c_req->async.private, struct async_info);
5124 talloc_steal(NULL, async);
5125 chain=&async->chain;
5126 async_map = talloc_get_type_abort(*chain, struct async_info_map);
5127 } else {
5128 chain=(struct async_info_map**)&c_req->async.private;
5129 async_map = talloc_get_type_abort(*chain, struct async_info_map);
5132 /* unplug c_req->async.fn as if a callback handler calls smb_*_recv
5133 in order to receive the response, smbcli_transport_finish_recv will
5134 call us again and then call the c-req->async.fn
5135 Perhaps we should merely call smbcli_request_receive() IF
5136 c_req->request_state <= SMBCLI_REQUEST_RECV, but that might not
5137 help multi-part replies... except all parts are receive before
5138 callback if a handler WAS set */
5139 c_req->async.fn=NULL;
5141 /* Should we raise an error? Should we simple_recv? */
5142 while(async_map) {
5143 /* remove this one from the list before we call. We do this in case
5144 some callbacks free their async_map but also so that callbacks
5145 can navigate the async_map chain to add additional callbacks to
5146 the end - e.g. so that tag-along reads can call send_fn after
5147 the send_fn of the request they tagged along to, thus preserving
5148 the async response order - which may be a waste of time? */
5149 DLIST_REMOVE(*chain, async_map);
5151 DEBUG(5,("Callback for async_map=%p pre-status %s\n",async_map, get_friendly_nt_error_msg(status)));
5152 if (async_map->fn) {
5153 status=async_map->fn(async_map->async,
5154 async_map->parms1, async_map->parms2, status);
5156 DEBUG(5,("Callback complete for async_map=%p status %s\n",async_map, get_friendly_nt_error_msg(status)));
5157 /* Note: the callback may have added to the chain */
5158 #warning Async_maps have a null talloc_context, it is unclear who should own them
5159 /* it can't be c_req as it stops us chaining more than one, maybe it
5160 should be req but there isn't always a req. However sync_chain_handler
5161 will always free it if called */
5162 DEBUG(6,("Will free async map %p\n",async_map));
5163 #warning put me back
5164 talloc_free(async_map);
5165 DEBUG(6,("Free'd async_map\n"));
5166 if (*chain)
5167 async_map=talloc_get_type_abort(*chain, struct async_info_map);
5168 else
5169 async_map=NULL;
5170 DEBUG(6,("Switch to async_map %p\n",async_map));
5172 /* The first callback will have read c_req, thus talloc_free'ing it,
5173 so we don't let the other callbacks get hurt playing with it */
5174 if (async_map && async_map->async)
5175 async_map->async->c_req=NULL;
5178 talloc_free(async);
5180 DEBUG(5,("%s complete: %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
5181 return status;
5184 /* If the async handler is called, then the send_fn is called */
5185 static void async_chain_handler(struct smbcli_request *c_req)
5187 struct async_info *async = talloc_get_type_abort(c_req->async.private, struct async_info);
5188 struct ntvfs_request *req = async->req;
5189 NTSTATUS status;
5191 if (c_req->state <= SMBCLI_REQUEST_RECV) {
5192 /* Looks like async handlers has been called sync'ly */
5193 smb_panic("async_chain_handler called asyncly on req %p\n");
5196 status=sync_chain_handler(c_req);
5198 /* Should we insist that a chain'd handler does this?
5199 Which makes it hard to intercept the data by adding handlers
5200 before the send_fn handler sends it... */
5201 if (req) {
5202 DEBUG(5,("%s send_fn on req=%p\n",__FUNCTION__,req));
5203 req->async_states->status=status;
5204 req->async_states->send_fn(req);
5208 /* unpack the rpc struct to make some smb_write */
5209 NTSTATUS async_proxy_smb_raw_write_rpc(struct async_info *async,
5210 void* io1, void* io2, NTSTATUS status)
5212 union smb_write* io =talloc_get_type(io1, union smb_write);
5213 struct proxy_Write* r=talloc_get_type(io2, struct proxy_Write);
5215 DEBUG(5,("START: %s convert from rpc to smb with pre-status %s\n",__FUNCTION__,
5216 get_friendly_nt_error_msg (status)));
5217 DEBUG(3,("Write response for offset=%lld\n",io->generic.in.offset));
5218 NT_STATUS_NOT_OK_RETURN(status);
5220 status=r->out.result;
5221 DEBUG(5,("%s wrapped status: %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
5222 NT_STATUS_NOT_OK_RETURN(status);
5224 io->generic.out.remaining = r->out.remaining;
5225 io->generic.out.nwritten = r->out.nwritten;
5227 DEBUG(5,("END: %s status %s\n",__FUNCTION__,
5228 get_friendly_nt_error_msg (status)));
5229 return status;
5232 /* upgrade from smb to NDR and then send.
5233 The caller should ADD_ASYNC_RECV_TAIL the handler that tries to receive the response*/
5234 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
5235 union smb_write *io,
5236 struct proxy_file *f)
5238 struct proxy_private *private = ntvfs->private_data;
5239 struct smbcli_tree *tree=private->tree;
5241 if (PROXY_REMOTE_SERVER(private)) {
5242 struct smbcli_request *c_req;
5243 struct proxy_Write *r=talloc_zero(io, struct proxy_Write);
5244 ssize_t size;
5246 if (! r) return NULL;
5248 size=io->generic.in.count;
5249 /* upgrade the write */
5250 r->in.fnum = io->generic.in.file.fnum;
5251 r->in.offset = io->generic.in.offset;
5252 r->in.count = io->generic.in.count;
5253 r->in.mode = io->generic.in.wmode;
5254 // r->in.remaining = io->generic.in.remaining;
5255 #warning remove this
5256 /* prepare to lie */
5257 r->out.nwritten=r->in.count;
5258 r->out.remaining=0;
5260 /* try to compress */
5261 #warning compress!
5262 r->in.data.compress.data=compress_block_talloc(r, io->generic.in.data, &size);
5263 if (r->in.data.compress.data) {
5264 r->in.data.compress.count=size;
5265 r->in.flags = PROXY_USE_ZLIB;
5266 } else {
5267 r->in.flags = 0;
5268 /* we'll honour const, honest gov */
5269 r->in.data.generic.data=discard_const(io->generic.in.data);
5270 r->in.data.generic.count=io->generic.in.count;
5273 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
5274 ntvfs,
5275 &ndr_table_rpcproxy,
5276 NDR_PROXY_WRITE, r);
5277 if (! c_req) return NULL;
5279 /* yeah, filthy abuse of f */
5280 { void* req=NULL;
5281 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_write_rpc, NULL);
5284 return c_req;
5285 } else {
5286 return smb_raw_write_send(tree, io);
5290 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
5291 union smb_write *io,
5292 struct proxy_file *f)
5294 struct proxy_private *proxy = ntvfs->private_data;
5295 struct smbcli_tree *tree=proxy->tree;
5297 if (PROXY_REMOTE_SERVER(proxy)) {
5298 struct smbcli_request *c_req = proxy_smb_raw_write_send(ntvfs, io, f);
5299 return sync_chain_handler(c_req);
5300 } else {
5301 struct smbcli_request *c_req = smb_raw_write_send(tree, io);
5302 return smb_raw_write_recv(c_req, io);
5306 /* unpack the rpc struct to make some smb_read response */
5307 NTSTATUS async_proxy_smb_raw_read_rpc(struct async_info *async,
5308 void* io1, void* io2, NTSTATUS status)
5310 union smb_read* io =talloc_get_type_abort(io1, union smb_read);
5311 struct proxy_Read* r=talloc_get_type_abort(io2, struct proxy_Read);
5312 struct proxy_file *f = async->f;
5313 struct proxy_private *private=async->proxy;
5315 DEBUG(5,("\n>>\n%s() rpc status: %s\n",__FUNCTION__,
5316 get_friendly_nt_error_msg(status)));
5317 NT_STATUS_NOT_OK_RETURN(status);
5319 status=r->out.result;
5320 DEBUG(5,("%s() wrapped status: %s\n",__FUNCTION__,
5321 get_friendly_nt_error_msg(status)));
5322 NT_STATUS_NOT_OK_RETURN(status);
5324 io->generic.out.remaining = 0; /*r->out.response.generic.remaining;*/
5325 io->generic.out.compaction_mode = 0;
5327 if (r->out.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
5328 /* Use the io we already setup!
5329 if out.flags & PROXY_VALIDATE, we may need to validate more in
5330 cache then r->out.nread would suggest, see io->generic.out.nread */
5331 if (r->out.flags & PROXY_VALIDATE)
5332 io->generic.out.nread=io->generic.in.maxcnt;
5333 DEBUG(5,("Using cached data: size=%lld\n",
5334 (long long) io->generic.out.nread));
5335 return status;
5338 if (r->in.flags & PROXY_VALIDATE) {
5339 DEBUG(5,("Cached data did not validate, flags: %x\n",r->out.flags));
5340 /* turn off validate on this file */
5341 //cache_handle_novalidate(f);
5342 #warning turn off validate on this file - do an nread<maxcnt later
5345 if (r->in.flags & PROXY_USE_CACHE) {
5346 DEBUG(5,("Cached data did not match\n"));
5349 io->generic.out.nread = r->out.nread;
5351 /* we may need to uncompress */
5352 if (r->out.flags & PROXY_USE_ZLIB) {
5353 ssize_t size=r->out.response.compress.count;
5354 DEBUG(5,("%s: uncompress, %lld wanted %lld or %lld\n",__LOCATION__,
5355 (long long int)size,
5356 (long long int)io->generic.in.maxcnt,
5357 (long long int)io->generic.in.mincnt));
5358 if (size > io->generic.in.mincnt) {
5359 /* we did a bulk read for the cache */
5360 uint8_t *data=talloc_size(io, io->generic.in.maxcnt);
5361 DEBUG(5,("%s: bulk uncompress to %p\n",__LOCATION__,data));
5362 if (! uncompress_block_to(data,
5363 r->out.response.compress.data, &size,
5364 io->generic.in.maxcnt) ||
5365 size != r->out.nread) {
5366 status=NT_STATUS_INVALID_USER_BUFFER;
5367 } else {
5368 DEBUG(5,("%s: uncompressed\n",__LOCATION__));
5369 /* copy as much as they can take */
5370 io->generic.out.nread=MIN(io->generic.in.mincnt, size);
5371 memcpy(io->generic.out.data, data, io->generic.out.nread);
5372 /* copy the rest to the cache */
5373 cache_handle_save(f, data,
5374 size,
5375 io->generic.in.offset);
5377 } else if (! uncompress_block_to(io->generic.out.data,
5378 r->out.response.compress.data, &size,
5379 io->generic.in.maxcnt) ||
5380 size != r->out.nread) {
5381 io->generic.out.nread=size;
5382 status=NT_STATUS_INVALID_USER_BUFFER;
5384 } else if (io->generic.out.data != r->out.response.generic.data) {
5385 //Assert(r->out.nread == r->out.generic.out.count);
5386 memcpy(io->generic.out.data, r->out.response.generic.data, io->generic.out.nread);
5388 if (r->out.cache_name.s && r->out.cache_name.count && f && f->cache) {
5389 int result;
5390 setenv("WAFS_CACHE_REMOTE_NAME",r->out.cache_name.s,1);
5391 setenv("WAFS_CACHE_LOCAL_NAME",f->cache->cache_name,1);
5392 setenv("WAFS_REMOTE_SERVER",private->remote_server,1);
5393 DEBUG(5,("WAFS_CACHE_REMOTE_NAME=%s [cache_name]\nWAFS_CACHE_LOCAL_NAME=%s\nWAFS_REMOTE_SERVER=%s\n\n",getenv("WAFS_CACHE_REMOTE_NAME"),getenv("WAFS_CACHE_LOCAL_NAME"),getenv("WAFS_REMOTE_SERVER")));
5394 DEBUG(5,("%s running cache transfer command: %s\n",__LOCATION__,getenv("WAFS_CACHE_REMOTE_NAME")));
5395 system(getenv("WAFS_CACHE_TRANSFER"));
5396 DEBUG(5,("%s cache transfer command result %d\n",__LOCATION__,result));
5397 // now set cache to make whole local file valid
5398 cache_validated(f->cache, cache_len(f->cache));
5401 return status;
5404 /* Warning: Assumes that if io->generic.out.nread is not zero, then some
5405 data has been pre-read into io->generic.out.data and can be used for
5406 proxy<->proxy optimized reads */
5407 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
5408 union smb_read *io,
5409 struct proxy_file *f,
5410 struct proxy_Read *r)
5412 struct proxy_private *private = ntvfs->private_data;
5413 #warning we are using out.nread as a out-of-band parameter
5414 if (PROXY_REMOTE_SERVER(private)) {
5416 struct smbcli_request *c_req;
5417 if (! r) {
5418 r=talloc_zero(io, struct proxy_Read);
5419 if (! r) return NULL;
5420 r->in.mincnt = io->generic.in.mincnt;
5424 r->in.fnum = io->generic.in.file.fnum;
5425 r->in.read_for_execute=io->generic.in.read_for_execute;
5426 r->in.offset = io->generic.in.offset;
5427 r->in.maxcnt = io->generic.in.maxcnt;
5428 r->in.remaining = io->generic.in.remaining;
5429 r->in.flags |= PROXY_USE_ZLIB;
5430 if (! (r->in.flags & PROXY_VALIDATE) &&
5431 io->generic.out.data && io->generic.out.nread > 0) {
5432 /* maybe we should limit digest size to MIN(nread, maxcnt) to
5433 permit the caller to provider a larger nread as part of
5434 a split read */
5435 checksum_block(r->in.digest.digest, io->generic.out.data,
5436 io->generic.out.nread);
5438 if (io->generic.out.nread > r->in.maxcnt) {
5439 DEBUG(0,("Cache from nread is too big for requested read struct, ignoring cache\n"));
5440 } else {
5441 r->in.mincnt = io->generic.out.nread;
5442 r->in.maxcnt = io->generic.out.nread;
5443 r->in.flags |= PROXY_USE_CACHE;
5444 /* PROXY_VALIDATE will have been set by caller */
5448 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
5449 DEBUG(3,("Cache digest length=%lld\n", (long long)r->in.maxcnt));
5450 dump_data (3, r->in.digest.digest, sizeof(r->in.digest.digest));
5453 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
5454 ntvfs,
5455 &ndr_table_rpcproxy,
5456 NDR_PROXY_READ, r);
5457 if (! c_req) return NULL;
5459 { void* req=NULL;
5460 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_read_rpc, NULL);
5463 return c_req;
5464 } else {
5465 return smb_raw_read_send(private->tree, io);
5469 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
5470 union smb_read *io,
5471 struct proxy_file *f)
5473 struct proxy_private *proxy = ntvfs->private_data;
5474 struct smbcli_tree *tree=proxy->tree;
5476 if (PROXY_REMOTE_SERVER(proxy)) {
5477 struct smbcli_request *c_req = proxy_smb_raw_read_send(ntvfs, io, f, NULL);
5478 return sync_chain_handler(c_req);
5479 } else {
5480 struct smbcli_request *c_req = smb_raw_read_send(tree, io);
5481 return smb_raw_read_recv(c_req, io);
5487 initialise the PROXY->PROXY backend, registering ourselves with the ntvfs subsystem
5489 NTSTATUS ntvfs_proxy_init(void)
5491 NTSTATUS ret;
5492 struct ntvfs_ops ops;
5493 NTVFS_CURRENT_CRITICAL_SIZES(vers);
5495 ZERO_STRUCT(ops);
5497 /* fill in the name and type */
5498 ops.name = "proxy";
5499 ops.type = NTVFS_DISK;
5501 /* fill in all the operations */
5502 ops.connect = proxy_connect;
5503 ops.disconnect = proxy_disconnect;
5504 ops.unlink = proxy_unlink;
5505 ops.chkpath = proxy_chkpath;
5506 ops.qpathinfo = proxy_qpathinfo;
5507 ops.setpathinfo = proxy_setpathinfo;
5508 ops.open = proxy_open;
5509 ops.mkdir = proxy_mkdir;
5510 ops.rmdir = proxy_rmdir;
5511 ops.rename = proxy_rename;
5512 ops.copy = proxy_copy;
5513 ops.ioctl = proxy_ioctl;
5514 ops.read = proxy_read;
5515 ops.write = proxy_write;
5516 ops.seek = proxy_seek;
5517 ops.flush = proxy_flush;
5518 ops.close = proxy_close;
5519 ops.exit = proxy_exit;
5520 ops.lock = proxy_lock;
5521 ops.setfileinfo = proxy_setfileinfo;
5522 ops.qfileinfo = proxy_qfileinfo;
5523 ops.fsinfo = proxy_fsinfo;
5524 ops.lpq = proxy_lpq;
5525 ops.search_first = proxy_search_first;
5526 ops.search_next = proxy_search_next;
5527 ops.search_close = proxy_search_close;
5528 ops.trans = proxy_trans;
5529 ops.logoff = proxy_logoff;
5530 ops.async_setup = proxy_async_setup;
5531 ops.cancel = proxy_cancel;
5532 ops.notify = proxy_notify;
5533 ops.trans2 = proxy_trans2;
5535 /* register ourselves with the NTVFS subsystem. We register
5536 under the name 'proxy'. */
5537 ret = ntvfs_register(&ops, &vers);
5539 if (!NT_STATUS_IS_OK(ret)) {
5540 DEBUG(0,("Failed to register PROXY backend!\n"));
5543 return ret;