Use validate as read-ahead
[Samba/vfs_proxy.git] / source4 / ntvfs / proxy / vfs_proxy.c
blob51faf08b31f71f35c71ca6e8f08898176cc06089
1 /*
2 Unix SMB/PROXY implementation.
4 CIFS PROXY NTVFS filesystem backend
6 Copyright (C) Andrew Tridgell 2003
7 Copyright (C) James J Myers 2003 <myersjj@samba.org>
8 Copyright (C) Sam Liddicott <sam@liddicott.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>.
24 this implements a CIFS->CIFS NTVFS filesystem caching proxy.
28 #define TALLOC_ABORT(why) smb_panic(why)
29 #warning handle SMB_FLAGS2_COMPRESSED flag from client: http://msdn2.microsoft.com/en-us/library/cc246254.aspx
30 #define __LOCATION__ (talloc_asprintf(debug_ctx(),"%s:%d %s",__FILE__,__LINE__,__FUNCTION__))
31 #define PROXY_NTIOCTL_MAXDATA 0x2000000
33 #include "includes.h"
34 #include "libcli/raw/libcliraw.h"
35 #include "libcli/smb_composite/smb_composite.h"
36 #include "auth/auth.h"
37 #include "auth/credentials/credentials.h"
38 #include "ntvfs/ntvfs.h"
39 #include "../lib/util/dlinklist.h"
40 #include "param/param.h"
41 #include "libcli/resolve/resolve.h"
42 #include "libcli/libcli.h"
43 #include "libcli/raw/ioctl.h"
44 #include "librpc/gen_ndr/ndr_misc.h"
45 #include "librpc/gen_ndr/ndr_proxy.h"
46 #include "librpc/ndr/ndr_table.h"
47 #include "lib/cache/cache.h"
48 #include "lib/compression/zlib.h"
49 #include "libcli/raw/raw_proto.h"
50 #include "librpc/gen_ndr/proxy.h"
51 #include "smb_server/smb_server.h"
53 #define fstrcmp(a,b) strcasecmp((a),(b))
54 #define fstrncmp(a,b,len) strncasecmp((a),(b),(len))
56 #define LOAD_CACHE_FILE_DATA(dest, src) do { \
57 dest.create_time=src.create_time; \
58 dest.access_time=src.access_time; \
59 dest.write_time=src.write_time; \
60 dest.change_time=src.change_time; \
61 dest.attrib=src.attrib; \
62 dest.alloc_size=src.alloc_size; \
63 dest.size=src.size; \
64 dest.file_type=src.file_type; \
65 dest.ipc_state=src.ipc_state; \
66 dest.is_directory=src.is_directory; \
67 dest.delete_pending=0; \
68 } while(0)
70 /* taken from #include "librpc/gen_ndr/proxy.h" */
71 struct proxy_file_info_data {
72 /* first three are from ntcreatex */
73 uint16_t file_type;
74 uint16_t ipc_state;
75 uint8_t is_directory;
76 NTSTATUS status_RAW_FILEINFO_BASIC_INFORMATION;
77 uint32_t attrib; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
78 NTTIME create_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
79 NTTIME access_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
80 NTTIME write_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
81 NTTIME change_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
82 NTSTATUS status_RAW_FILEINFO_ALL_INFO;
83 uint32_t ea_size; /* RAW_FILEINFO_ALL_INFO */
84 uint64_t alloc_size; /* RAW_FILEINFO_ALL_INFO */
85 uint64_t size; /* RAW_FILEINFO_ALL_INFO */
86 uint32_t nlink; /* RAW_FILEINFO_ALL_INFO */
87 struct sws fname; /* RAW_FILEINFO_ALL_INFO */
88 uint8_t delete_pending; /* RAW_FILEINFO_ALL_INFO */
89 uint8_t directory; /* RAW_FILEINFO_ALL_INFO */
90 NTSTATUS status_RAW_FILEINFO_COMPRESSION_INFO;
91 uint64_t compressed_size; /* RAW_FILEINFO_COMPRESSION_INFO */
92 uint16_t format; /* RAW_FILEINFO_COMPRESSION_INFO */
93 uint8_t unit_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
94 uint8_t chunk_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
95 uint8_t cluster_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
96 NTSTATUS status_RAW_FILEINFO_INTERNAL_INFORMATION;
97 uint64_t file_id; /* RAW_FILEINFO_INTERNAL_INFORMATION */
98 NTSTATUS status_RAW_FILEINFO_ACCESS_INFORMATION;
99 uint32_t access_flags; /* RAW_FILEINFO_ACCESS_INFORMATION */
100 NTSTATUS status_RAW_FILEINFO_POSITION_INFORMATION;
101 uint64_t position; /* RAW_FILEINFO_POSITION_INFORMATION */
102 NTSTATUS status_RAW_FILEINFO_MODE_INFORMATION;
103 uint32_t mode; /* RAW_FILEINFO_MODE_INFORMATION */
104 NTSTATUS status_RAW_FILEINFO_ALIGNMENT_INFORMATION;
105 uint32_t alignment_requirement; /* RAW_FILEINFO_ALIGNMENT_INFORMATION */
106 NTSTATUS status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
107 uint32_t reparse_tag; /* RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION */
108 uint32_t reparse_attrib; /* RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION */
109 NTSTATUS status_RAW_FILEINFO_STREAM_INFO;
110 uint32_t num_streams; /* RAW_FILEINFO_STREAM_INFO */
111 struct info_stream *streams; /* RAW_FILEINFO_STREAM_INFO */
114 #define valid_RAW_FILEINFO_BASIC_INFORMATION 1
115 #define valid_RAW_FILEINFO_ALL_INFO 2
116 #define valid_RAW_FILEINFO_COMPRESSION_INFO 3
117 #define valid_RAW_FILEINFO_INTERNAL_INFORMATION 4
118 #define valid_RAW_FILEINFO_STANDARD_INFO 8
119 #define valid_RAW_FILEINFO_ACCESS_INFORMATION 16
120 #define valid_RAW_FILEINFO_POSITION_INFORMATION 32
121 #define valid_RAW_FILEINFO_MODE_INFORMATION 64
122 #define valid_RAW_FILEINFO_ALIGNMENT_INFORMATION 128
123 #define valid_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION 256
124 #define valid_RAW_FILEINFO_STREAM_INFO 512
126 struct file_metadata {
127 int count;
128 int valid;
129 struct proxy_file_info_data info_data;
132 struct proxy_file {
133 struct proxy_file *prev, *next;
134 struct proxy_private* proxy;
135 uint16_t fnum;
136 struct ntvfs_handle *h;
137 struct cache_file_entry *cache;
138 /* filename might not be a char*, but if so, _size includes null */
139 void* filename;
140 int filename_size;
141 int readahead_pending;
142 /* *_OPLOCK_RETURN values */
143 int oplock;
144 /* read-only, shareable normal file open, can be cloned by similar opens */
145 bool can_clone;
146 /* If we have an oplock, then the file is NOT bigger than size, which lets
147 us optimize reads */
148 struct file_metadata *metadata;
151 struct proxy_private;
153 struct search_handle {
154 struct search_handle *prev, *next;
155 struct proxy_private *proxy;
156 struct ntvfs_handle *h;
157 uint16_t handle;
158 union {
159 struct smb_search_id id;
160 uint32_t resume_key;
161 } resume_index;
162 struct search_cache_item *resume_item;
163 enum smb_search_level level;
164 enum smb_search_data_level data_level;
165 /* search cache (if any) being used */
166 struct search_cache *cache;
169 struct search_cache_item {
170 struct search_cache_item *prev, *next;
171 enum smb_search_data_level data_level;
172 struct cache_file_entry *cache;
173 union smb_search_data *file;
174 struct file_metadata *metadata;
176 enum search_cache_status {
177 SEARCH_CACHE_INCOMPLETE,
178 SEARCH_CACHE_COMPLETE,
179 SEARCH_CACHE_DEAD
182 struct fdirmon;
183 typedef void*(fdirmon_callback_fn)(void* data, struct fdirmon* fdirmon);
184 //NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS)
186 struct fdirmon {
187 struct fdirmon *prev, *next;
188 struct search_cache_item *items;
190 struct proxy_private *proxy;
192 union smb_notify *notify_io;
193 struct smbcli_request *notify_req;
194 uint16_t dir_fnum;
195 char* dir;
196 struct fdirmon_callback {
197 struct fdirmon_callback *prev, *next;
198 fdirmon_callback_fn *fn;
199 void* data;
200 } *callbacks;
203 struct search_cache {
204 struct search_cache *prev, *next;
205 struct search_cache_item *items;
207 struct proxy_private *proxy;
208 enum search_cache_status status;
210 union smb_notify *notify_io;
211 struct smbcli_request *notify_req;
212 uint16_t dir_fnum;
213 char* dir;
215 struct search_cache_key {
216 enum smb_search_level level;
217 enum smb_search_data_level data_level;
218 uint16_t search_attrib;
219 const char *pattern;
220 /* these only for trans2 */
221 uint16_t flags;
222 uint32_t storage_type;
223 } key;
225 struct search_state {
226 struct search_handle *search_handle;
227 void* private;
228 smbcli_search_callback callback;
229 struct search_cache_item *last_item;
230 uint16_t count;
233 struct fs_attribute_info {
234 uint32_t fs_attr;
235 uint32_t max_file_component_length;
236 struct smb_wire_string fs_type;
239 /* this is stored in ntvfs_private */
240 struct proxy_private {
241 struct smbcli_tree *tree;
242 struct smbcli_transport *transport;
243 struct ntvfs_module_context *ntvfs;
244 struct async_info *pending;
245 struct proxy_file *files;
246 struct proxy_file *closed_files;
247 struct fdirmon *dirmons;
248 struct search_cache *search_caches; /* cache's of find-first data */
249 struct search_handle *search_handles; /* cache's of find-first data */
250 bool map_generic;
251 bool map_trans2;
252 bool cache_enabled;
253 int cache_readahead; /* default read-ahead window size */
254 int cache_readaheadblock; /* size of each read-ahead request */
255 ssize_t cache_validatesize; /* chunk size to validate, results in a read this size on remote server */
256 char *remote_server;
257 char *remote_share;
258 struct cache_context *cache;
259 struct fs_attribute_info *fs_attribute_info;
260 int readahead_spare; /* amount of pending non-user generated requests */
261 bool fake_oplock; /* useful for testing, smbclient never asks for oplock */
262 bool fake_valid; /* useful for testing, smbclient never asks for oplock */
263 uint16_t nttrans_fnum; /* we need a handle for non-proxy operations */
264 bool enabled_cache_info;
265 bool enabled_proxy_search;
266 bool enabled_open_clone;
267 bool enabled_extra_protocol;
268 bool enabled_qpathinfo;
271 struct async_info_map;
273 /* a structure used to pass information to an async handler */
274 struct async_info {
275 struct async_info *next, *prev;
276 struct proxy_private *proxy;
277 struct ntvfs_request *req;
278 struct smbcli_request *c_req;
279 struct proxy_file *f;
280 struct async_info_map *chain;
281 void *parms;
284 /* used to chain async callbacks */
285 struct async_info_map {
286 struct async_info_map *next, *prev;
287 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
288 void *parms1;
289 void *parms2;
290 struct async_info *async;
293 struct ntioctl_rpc_unmap_info {
294 void* io;
295 const struct ndr_interface_call *calls;
296 const struct ndr_interface_table *table;
297 uint32_t opnum;
300 /* a structure used to pass information to an async handler */
301 struct async_rpclite_send {
302 const struct ndr_interface_call* call;
303 void* struct_ptr;
306 #define SETUP_PID private->tree->session->pid = req->smbpid
308 #define RPCLITE_SETUP_FILE_HERE(f, h) do { \
309 RPCLITE_SETUP_THIS_FILE_HERE(r->in.fnum, f, h); \
310 } while (0)
312 #define RPCLITE_SETUP_THIS_FILE_HERE(FNUM, f, h) do { \
313 if ((h = ntvfs_find_handle(private->ntvfs, req, FNUM)) && \
314 (f = ntvfs_handle_get_backend_data(h, ntvfs))) { \
315 FNUM = f->fnum; \
316 } else { \
317 r->out.result = NT_STATUS_INVALID_HANDLE; \
318 return NT_STATUS_OK; \
320 } while (0)
322 #define SETUP_FILE_HERE(f) do { \
323 f = ntvfs_handle_get_backend_data(io->generic.in.file.ntvfs, ntvfs); \
324 if (!f) return NT_STATUS_INVALID_HANDLE; \
325 io->generic.in.file.fnum = f->fnum; \
326 } while (0)
328 #define SETUP_FILE do { \
329 struct proxy_file *f; \
330 SETUP_FILE_HERE(f); \
331 } while (0)
333 #define SETUP_PID_AND_FILE do { \
334 SETUP_PID; \
335 SETUP_FILE; \
336 } while (0)
338 /* remove the MAY_ASYNC from a request, useful for testing */
339 #define MAKE_SYNC_REQ(req) do { req->async_states->state &= ~NTVFS_ASYNC_STATE_MAY_ASYNC; } while(0)
341 #define PROXY_SERVER "proxy:server"
342 #define PROXY_USER "proxy:user"
343 #define PROXY_PASSWORD "proxy:password"
344 #define PROXY_DOMAIN "proxy:domain"
345 #define PROXY_SHARE "proxy:share"
346 #define PROXY_USE_MACHINE_ACCT "proxy:use-machine-account"
347 #define PROXY_MAP_GENERIC "proxy:map-generic"
348 #define PROXY_MAP_TRANS2 "proxy:map-trans2"
350 #define PROXY_CACHE_ENABLED "proxy:cache-enabled"
351 #define PROXY_CACHE_ENABLED_DEFAULT false
353 #define PROXY_CACHE_READAHEAD "proxy:cache-readahead"
354 #define PROXY_CACHE_READAHEAD_DEFAULT 32768
355 /* size of each read-ahead request. */
356 #define PROXY_CACHE_READAHEAD_BLOCK "proxy:cache-readaheadblock"
357 /* the read-ahead block should always be less than max negotiated data */
358 #define PROXY_CACHE_READAHEAD_BLOCK_DEFAULT 4096
360 #define PROXY_CACHE_VALIDATE_SIZE "proxy:validate-size"
361 #define PROXY_CACHE_VALIDATE_SIZE_DEFAULT 256 /* 10M */
363 #define PROXY_FAKE_OPLOCK "proxy:fake-oplock"
364 #define PROXY_FAKE_OPLOCK_DEFAULT false
366 #define PROXY_FAKE_VALID "proxy:fake-valid"
367 #define PROXY_FAKE_VALID_DEFAULT false
369 /* how many read-ahead requests can be pending per mid */
370 #define PROXY_REQUEST_LIMIT "proxy:request-limit"
371 #define PROXY_REQUEST_LIMIT_DEFAULT 100
373 #define PROXY_USE_MACHINE_ACCT_DEFAULT false
374 /* These two really should be: true, and possibly not even configurable */
375 #define PROXY_MAP_GENERIC_DEFAULT true
376 #define PROXY_MAP_TRANS2_DEFAULT true
378 /* is the remote server a proxy? */
379 #define PROXY_REMOTE_SERVER(private) \
380 ((private)->tree->session->transport->negotiate.capabilities & CAP_COMPRESSION \
381 && (strcmp("A:",private->tree->device)==0) \
382 && (private->nttrans_fnum!=0) \
383 && (private->enabled_extra_protocol))
385 /* A few forward declarations */
386 static NTSTATUS sync_chain_handler(struct smbcli_request *c_req);
387 static void async_chain_handler(struct smbcli_request *c_req);
388 static void async_read_handler(struct smbcli_request *c_req);
389 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
390 struct ntvfs_request *req, union smb_ioctl *io);
392 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
393 struct smbcli_tree *tree,
394 struct ntvfs_module_context *ntvfs,
395 const struct ndr_interface_table *table,
396 uint32_t opnum, void *r);
397 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
398 union smb_read *io, struct proxy_file *f, struct proxy_Read *r);
399 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
400 union smb_read *io, struct proxy_file *f);
401 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
402 union smb_write *io, struct proxy_file *f);
403 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
404 union smb_write *io, struct proxy_file *f);
405 static NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status);
407 struct smb_wire_string talloc_smb_wire_string_dup(void* mem_ctx, const struct smb_wire_string* string)
409 struct smb_wire_string result;
410 result.private_length=string->private_length;
411 result.s=talloc_strndup(mem_ctx, string->s, string->private_length);
412 DEBUG(5,("%s: %s\n",__FUNCTION__, string->s));
413 return result;
416 #define sws_dup(mem_ctx, dest, src) (\
417 dest=talloc_smb_wire_string_dup(NULL, &(src)), \
418 (dest.s==NULL && src.s!=NULL))
420 /* These needs replacing with something more canonical perhaps */
421 static char* talloc_dirname(void* mem_ctx, const char* path) {
422 const char* dir;
424 if ((dir=strrchr(path,'\\'))) {
425 return talloc_strndup(mem_ctx, path, (dir - path));
426 } else {
427 return talloc_strdup(mem_ctx,"");
432 a handler for oplock break events from the server - these need to be passed
433 along to the client
435 static bool oplock_handler(struct smbcli_transport *transport, uint16_t tid, uint16_t fnum, uint8_t level, void *p_private)
437 struct proxy_private *private = p_private;
438 NTSTATUS status;
439 struct ntvfs_handle *h = NULL;
440 struct proxy_file *f;
441 bool result=true;
443 /* because we clone handles, there may be more than one match */
444 for (f=private->files; f; f=f->next) {
445 if (f->fnum != fnum) continue;
446 h = f->h;
448 if (level==OPLOCK_BREAK_TO_LEVEL_II) {
449 f->oplock=LEVEL_II_OPLOCK_RETURN;
450 } else {
451 /* If we don't have an oplock, then we can't rely on the cache */
452 cache_handle_stale(f);
453 f->oplock=NO_OPLOCK_RETURN;
456 DEBUG(5,("vfs_proxy: sending oplock break level %d for fnum %d\n", level, fnum));
457 status = ntvfs_send_oplock_break(private->ntvfs, h, level);
458 if (!NT_STATUS_IS_OK(status)) result=false;
460 if (!h) {
461 DEBUG(5,("vfs_proxy: ignoring oplock break level %d for fnum %d\n", level, fnum));
463 return result;
467 get file handle from clients fnum, (from ntvfs/ipc/vfs_ipc.c at metze suggestion)
469 static struct ntvfs_handle *ntvfs_find_handle(struct ntvfs_module_context *ntvfs,
470 struct ntvfs_request *req,
471 uint16_t fnum)
473 DATA_BLOB key;
474 uint16_t _fnum;
477 * the fnum is already in host byteorder
478 * but ntvfs_handle_search_by_wire_key() expects
479 * network byteorder
481 SSVAL(&_fnum, 0, fnum);
482 key = data_blob_const(&_fnum, 2);
484 return ntvfs_handle_search_by_wire_key(ntvfs, req, &key);
488 connect to a share - used when a tree_connect operation comes in.
490 static NTSTATUS proxy_connect(struct ntvfs_module_context *ntvfs,
491 struct ntvfs_request *req, const char *sharename)
493 NTSTATUS status;
494 struct proxy_private *private;
495 const char *host, *user, *pass, *domain, *remote_share;
496 struct smb_composite_connect io;
497 struct composite_context *creq;
498 struct share_config *scfg = ntvfs->ctx->config;
499 int nttrans_fnum;
501 struct cli_credentials *credentials;
502 bool machine_account;
504 /* Here we need to determine which server to connect to.
505 * For now we use parametric options, type proxy.
506 * Later we will use security=server and auth_server.c.
508 host = share_string_option(scfg, PROXY_SERVER, NULL);
509 user = share_string_option(scfg, PROXY_USER, NULL);
510 pass = share_string_option(scfg, PROXY_PASSWORD, NULL);
511 domain = share_string_option(scfg, PROXY_DOMAIN, NULL);
512 remote_share = share_string_option(scfg, PROXY_SHARE, NULL);
513 if (!remote_share) {
514 remote_share = sharename;
517 machine_account = share_bool_option(scfg, PROXY_USE_MACHINE_ACCT, PROXY_USE_MACHINE_ACCT_DEFAULT);
519 private = talloc_zero(ntvfs, struct proxy_private);
520 if (!private) {
521 return NT_STATUS_NO_MEMORY;
524 ntvfs->private_data = private;
526 if (!host) {
527 DEBUG(1,("PROXY backend: You must supply server\n"));
528 return NT_STATUS_INVALID_PARAMETER;
531 if (user && pass) {
532 DEBUG(5, ("PROXY backend: Using specified password\n"));
533 credentials = cli_credentials_init(private);
534 if (!credentials) {
535 return NT_STATUS_NO_MEMORY;
537 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
538 cli_credentials_set_username(credentials, user, CRED_SPECIFIED);
539 if (domain) {
540 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
542 cli_credentials_set_password(credentials, pass, CRED_SPECIFIED);
543 } else if (machine_account) {
544 DEBUG(5, ("PROXY backend: Using machine account\n"));
545 credentials = cli_credentials_init(private);
546 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
547 if (domain) {
548 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
550 status = cli_credentials_set_machine_account(credentials, ntvfs->ctx->lp_ctx);
551 if (!NT_STATUS_IS_OK(status)) {
552 return status;
554 } else if (req->session_info->credentials) {
555 DEBUG(5, ("PROXY backend: Using delegated credentials\n"));
556 credentials = req->session_info->credentials;
557 } else {
558 DEBUG(1,("PROXY backend: NO delegated credentials found: You must supply server, user and password or the client must supply delegated credentials\n"));
559 return NT_STATUS_INVALID_PARAMETER;
562 /* connect to the server, using the smbd event context */
563 io.in.dest_host = host;
564 io.in.dest_ports = lp_smb_ports(ntvfs->ctx->lp_ctx);
565 io.in.socket_options = lp_socket_options(ntvfs->ctx->lp_ctx);
566 io.in.called_name = host;
567 io.in.credentials = credentials;
568 io.in.fallback_to_anonymous = false;
569 io.in.workgroup = lp_workgroup(ntvfs->ctx->lp_ctx);
570 io.in.service = remote_share;
571 io.in.service_type = "?????";
572 io.in.iconv_convenience = lp_iconv_convenience(ntvfs->ctx->lp_ctx);
573 io.in.gensec_settings = lp_gensec_settings(private, ntvfs->ctx->lp_ctx);
574 lp_smbcli_options(ntvfs->ctx->lp_ctx, &io.in.options);
575 lp_smbcli_session_options(ntvfs->ctx->lp_ctx, &io.in.session_options);
577 creq = smb_composite_connect_send(&io, private,
578 lp_resolve_context(ntvfs->ctx->lp_ctx),
579 ntvfs->ctx->event_ctx);
580 status = smb_composite_connect_recv(creq, private);
581 NT_STATUS_NOT_OK_RETURN(status);
583 private->tree = io.out.tree;
585 private->transport = private->tree->session->transport;
586 SETUP_PID;
587 private->ntvfs = ntvfs;
589 ntvfs->ctx->fs_type = talloc_strdup(ntvfs->ctx, io.out.tree->fs_type);
590 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->fs_type);
591 ntvfs->ctx->dev_type = talloc_strdup(ntvfs->ctx, io.out.tree->device);
592 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->dev_type);
594 /* we need to receive oplock break requests from the server */
595 smbcli_oplock_handler(private->transport, oplock_handler, private);
597 private->map_generic = share_bool_option(scfg, PROXY_MAP_GENERIC, PROXY_MAP_GENERIC_DEFAULT);
599 private->map_trans2 = share_bool_option(scfg, PROXY_MAP_TRANS2, PROXY_MAP_TRANS2_DEFAULT);
601 private->cache_validatesize = 1024 * (long long) share_int_option(scfg, PROXY_CACHE_VALIDATE_SIZE, PROXY_CACHE_VALIDATE_SIZE_DEFAULT);
603 if (strcmp("A:",private->tree->device)==0) {
604 private->cache_enabled = share_bool_option(scfg, PROXY_CACHE_ENABLED, PROXY_CACHE_ENABLED_DEFAULT);
605 private->cache_readahead = share_int_option(scfg, PROXY_CACHE_READAHEAD, PROXY_CACHE_READAHEAD_DEFAULT);
606 private->cache_readaheadblock = share_int_option(scfg, PROXY_CACHE_READAHEAD_BLOCK,
607 MIN(private->cache_readahead,PROXY_CACHE_READAHEAD_BLOCK_DEFAULT));
608 private->fake_oplock = share_bool_option(scfg, PROXY_FAKE_OPLOCK, PROXY_FAKE_OPLOCK_DEFAULT);
609 private->fake_valid = share_bool_option(scfg, PROXY_FAKE_VALID, PROXY_FAKE_VALID_DEFAULT);
610 private->readahead_spare = share_int_option(scfg, PROXY_REQUEST_LIMIT, PROXY_REQUEST_LIMIT_DEFAULT);
611 private->cache = new_cache_context(private, lp_proxy_cache_root(ntvfs->ctx->lp_ctx), host, remote_share);
612 private->enabled_cache_info=true;
613 private->enabled_proxy_search=true;
614 private->enabled_open_clone=true;
615 private->enabled_extra_protocol=true;
616 private->enabled_qpathinfo=true;
618 DEBUG(0,("proxy tree connect caching for: %s (%s : %s) %s read-ahead: %d\n",
619 remote_share, private->tree->device,private->tree->fs_type,
620 (private->cache_enabled)?"enabled":"disabled",
621 private->cache_readahead));
622 } else {
623 private->cache_enabled = false;
624 DEBUG(0,("No caching or read-ahead for: %s (%s : %s)\n",
625 remote_share, private->tree->device,private->tree->fs_type));
628 private->remote_server = strlower_talloc(private, host);
629 private->remote_share = strlower_talloc(private, remote_share);
631 /* some proxy operations will not be performed on files, so open a handle
632 now that we can use for such things. We won't bother to close it on
633 shutdown, as the remote server ought to be able to close it for us
634 and we might be shutting down because the remote server went away and
635 so we don't want to delay further */
636 nttrans_fnum=smbcli_nt_create_full(private->tree, "\\",
638 SEC_FILE_READ_DATA,
639 FILE_ATTRIBUTE_NORMAL,
640 NTCREATEX_SHARE_ACCESS_MASK,
641 NTCREATEX_DISP_OPEN,
642 NTCREATEX_OPTIONS_DIRECTORY,
643 NTCREATEX_IMPERSONATION_IMPERSONATION);
644 if (nttrans_fnum < 0) {
645 DEBUG(5,("Could not open handle for ntioctl %d\n",private->nttrans_fnum));
646 //return NT_STATUS_UNSUCCESSFUL;
648 private->nttrans_fnum=nttrans_fnum;
649 DEBUG(5,("Got nttrans handle %d\n",private->nttrans_fnum));
651 return NT_STATUS_OK;
655 disconnect from a share
657 static NTSTATUS proxy_disconnect(struct ntvfs_module_context *ntvfs)
659 struct proxy_private *private = ntvfs->private_data;
660 struct async_info *a, *an;
661 struct search_cache *s;
663 /* first clean up caches because they have a pending request that
664 they will try and clean up later and fail during talloc_free */
665 for (s=private->search_caches; s; s=s->next) {
666 s->notify_req=NULL;
667 s->dir_fnum=65535;
670 /* first cleanup pending requests */
671 for (a=private->pending; a; a = an) {
672 an = a->next;
673 smbcli_request_destroy(a->c_req);
674 talloc_free(a);
677 talloc_free(private);
678 ntvfs->private_data = NULL;
680 return NT_STATUS_OK;
684 destroy an async info structure
686 static int async_info_destructor(struct async_info *async)
688 DLIST_REMOVE(async->proxy->pending, async);
689 return 0;
693 a handler for simple async replies
694 this handler can only be used for functions that don't return any
695 parameters (those that just return a status code)
697 static void async_simple(struct smbcli_request *c_req)
699 struct async_info *async = c_req->async.private;
700 struct ntvfs_request *req = async->req;
701 req->async_states->status = smbcli_request_simple_recv(c_req);
702 talloc_free(async);
703 req->async_states->send_fn(req);
706 /* hopefully this will optimize away */
707 #define TYPE_CHECK(type,check) do { \
708 type=check; \
709 t=t; \
710 } while (0)
712 /* save some typing for the simple functions */
713 #define ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, file, achain, error) do { \
714 if (!c_req) return (error); \
715 ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_fn, file, achain); \
716 if (! c_req->async.private) return (error); \
717 } while(0)
719 #define ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_fn, file, achain) do { \
720 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
722 struct async_info *async; \
723 async = talloc(req, struct async_info); \
724 if (async) { \
725 async->parms = io; \
726 async->req = req; \
727 async->f = file; \
728 async->proxy = private; \
729 async->c_req = c_req; \
730 async->chain = achain; \
731 DLIST_ADD(private->pending, async); \
732 c_req->async.private = async; \
733 talloc_set_destructor(async, async_info_destructor); \
736 c_req->async.fn = async_fn; \
737 } while (0)
739 #define ASYNC_RECV_TAIL_F(io, async_fn, file) do { \
740 if (!c_req) return NT_STATUS_UNSUCCESSFUL; \
741 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
743 struct async_info *async; \
744 async = talloc(req, struct async_info); \
745 if (!async) return NT_STATUS_NO_MEMORY; \
746 async->parms = io; \
747 async->req = req; \
748 async->f = file; \
749 async->proxy = private; \
750 async->c_req = c_req; \
751 DLIST_ADD(private->pending, async); \
752 c_req->async.private = async; \
753 talloc_set_destructor(async, async_info_destructor); \
755 c_req->async.fn = async_fn; \
756 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
757 return NT_STATUS_OK; \
758 } while (0)
760 #define ASYNC_RECV_TAIL(io, async_fn) ASYNC_RECV_TAIL_F(io, async_fn, NULL)
762 #define SIMPLE_ASYNC_TAIL ASYNC_RECV_TAIL(NULL, async_simple)
764 /* managers for chained async-callback.
765 The model of async handlers has changed.
766 backend async functions should be of the form:
767 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
768 And if async->c_req is NULL then an earlier chain has already rec'd the
769 request.
770 ADD_ASYNC_RECV_TAIL is used to add chained handlers.
771 The chained handler manager async_chain_handler is installed the usual way
772 and uses the io pointer to point to the first async_map record
773 static void async_chain_handler(struct smbcli_request *c_req).
774 It is safe to call ADD_ASYNC_RECV_TAIL before the chain manager is installed
775 and often desirable.
777 /* async_chain_handler has an async_info struct so that it can be safely inserted
778 into pending, but the io struct will point to (struct async_info_map *)
779 chained async_info_map will be in c_req->async.private */
780 #define ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_fn) do { \
781 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
782 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
783 } while(0)
785 #define ASYNC_RECV_TAIL_HANDLER(io, async_fn) do { \
786 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
787 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
788 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
789 return NT_STATUS_OK; \
790 } while(0)
793 DEBUG(0,("ADD_ASYNC_RECV_TAIL %s %s:%d\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%s\n", __FUNCTION__,__FILE__,__LINE__, \
794 creq, creq?talloc_get_name(creq):NULL, creq?talloc_get_name(creq):NULL,\
795 io1, io1?talloc_get_name(io1):NULL, io1?talloc_get_name(io1):NULL, \
796 io2, io2?talloc_get_name(io2):NULL, io2?talloc_get_name(io2):NULL, \
797 file, file?"file":"null", file?"file":"null", #async_fn)); \
799 #define ADD_ASYNC_RECV_TAIL(creq, io1, io2, file, async_fn, error) do { \
800 if (! creq) { \
801 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no creq\n",__FUNCTION__)); \
802 return (error); \
803 } else { \
804 struct async_info_map *async_map=talloc(NULL, struct async_info_map); \
805 if (! async_map) { \
806 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no async_map\n",__FUNCTION__)); \
807 return (error); \
809 async_map->async=talloc(async_map, struct async_info); \
810 if (! async_map->async) { \
811 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no async_map->async\n",__FUNCTION__)); \
812 return (error); \
814 async_map->parms1=io1; \
815 async_map->parms2=io2; \
816 async_map->fn=async_fn; \
817 async_map->async->parms = io1; \
818 async_map->async->req = req; \
819 async_map->async->f = file; \
820 async_map->async->proxy = private; \
821 async_map->async->c_req = creq; \
822 /* If async_chain_handler is installed, get the list from param */ \
823 if (creq->async.fn == async_chain_handler || creq->async.fn == async_read_handler) { \
824 struct async_info *i=creq->async.private; \
825 DLIST_ADD_END(i->chain, async_map, struct async_info_map *); \
826 } else if (creq->async.fn) { \
827 /* incompatible handler installed */ \
828 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL incompatible handler already installed\n",__FUNCTION__)); \
829 return (error); \
830 } else { \
831 DLIST_ADD_END(creq->async.private, async_map, struct async_info_map *); \
834 } while(0)
836 static void async_dirmon_notify(struct smbcli_request *c_req)
838 struct async_info *async = c_req->async.private;
839 struct ntvfs_request *req = async->req;
840 struct fdirmon *dirmon;
841 struct fdirmon_callback *callback;
842 struct proxy_private *proxy = async->proxy;
844 NTSTATUS status;
846 DEBUG(5,("%s: dirmon %p invalidated\n",__LOCATION__, (void*)async->f));
848 dirmon = talloc_get_type_abort((void*)async->f, struct fdirmon);
850 status = smb_raw_changenotify_recv(c_req, req, async->parms);
851 DEBUG(5,("%s: update status %s\n",__LOCATION__, get_friendly_nt_error_msg (status)));
853 dirmon->notify_req=NULL;
854 DLIST_FOR_EACH(dirmon->callbacks, callback, callback->fn(callback->data, dirmon));
855 /* So nothing can find it even if there are still in-use references */
856 DLIST_REMOVE(proxy->dirmons, dirmon);
857 /* free it */
858 //talloc_steal(async, search_cache);
859 talloc_free(async);
860 talloc_free(dirmon);
863 struct fdirmon* get_fdirmon(struct proxy_private *proxy, const char* path, bool dir_only) {
864 const char *file;
865 int pathlen;
867 if ((file=strrchr(path,'\\'))) {
868 if (dir_only) {
869 pathlen = file - path;
870 file++;
871 } else {
872 pathlen=strlen(path);
874 } else {
875 file = path;
876 pathlen = 0;
879 struct fdirmon *dirmon;
880 /* see if we have a matching dirmon */
881 DLIST_FIND(proxy->dirmons, dirmon, (strlen(dirmon->dir) == pathlen && fstrncmp(path, dirmon->dir, pathlen)==0));
882 if (! dirmon) {
883 int saved_timeout;
885 dirmon=talloc_zero(proxy, struct fdirmon);
886 if (! dirmon) {
887 goto error;
889 if (! (dirmon->dir=talloc_strndup(dirmon, path, pathlen))) {
890 goto error;
892 if (! (dirmon->notify_io=talloc_zero(dirmon, union smb_notify))) {
893 goto error;
896 dirmon->dir_fnum=smbcli_nt_create_full(proxy->tree, dirmon->dir,
898 SEC_FILE_READ_DATA,
899 FILE_ATTRIBUTE_NORMAL,
900 NTCREATEX_SHARE_ACCESS_MASK,
901 NTCREATEX_DISP_OPEN,
902 NTCREATEX_OPTIONS_DIRECTORY,
903 NTCREATEX_IMPERSONATION_IMPERSONATION);
904 if (dirmon->dir_fnum==65535) {
905 goto error;
908 saved_timeout = proxy->transport->options.request_timeout;
909 /* request notify changes on cache before we start to fill it */
910 dirmon->notify_io->nttrans.level=RAW_NOTIFY_NTTRANS;
911 dirmon->notify_io->nttrans.in.completion_filter=FILE_NOTIFY_CHANGE_ANY;
912 dirmon->notify_io->nttrans.in.file.fnum=dirmon->dir_fnum;
913 dirmon->notify_io->nttrans.in.recursive=false;
914 dirmon->notify_io->nttrans.in.buffer_size=1024;
915 proxy->transport->options.request_timeout = 0;
916 dirmon->notify_req=smb_raw_changenotify_send(proxy->tree, dirmon->notify_io);
917 /* Make the request hang around so we can tell if it needs cancelling */
918 talloc_reference(dirmon, dirmon->notify_req);
919 proxy->transport->options.request_timeout = saved_timeout;
921 if (! dirmon->notify_req) {
922 goto error;
923 }else {
924 struct ntvfs_request *req=NULL;
925 struct smbcli_request *c_req=dirmon->notify_req;
926 union smb_notify *io=dirmon->notify_io;
927 struct proxy_private *private=proxy;
928 ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_dirmon_notify,
929 (void*) dirmon, c_req->async.private);
930 DLIST_ADD(private->dirmons, dirmon);
934 return dirmon;
935 error:
936 talloc_free(dirmon);
937 return NULL;
940 bool dirmon_add_callback(struct fdirmon *dirmon, fdirmon_callback_fn *fn, void* data) {
941 struct fdirmon_callback *callback=talloc_zero(dirmon, struct fdirmon_callback);
942 if (! callback) {
943 return false;
945 callback->data=data;
946 callback->fn=fn;
947 DLIST_ADD(dirmon->callbacks, callback);
948 return true;
951 /* try and unify cache open function interface with this macro */
952 #define cache_open(cache_context, f, io, oplock, readahead_window) \
953 (io->generic.level == RAW_OPEN_NTCREATEX && \
954 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID)\
955 ?(cache_fileid_open(cache_context, f, (const uint64_t*)(io->generic.in.fname), oplock, readahead_window))\
956 :(cache_filename_open(cache_context, f, SMB_OPEN_IN_FILE(io), oplock, readahead_window))
958 struct search_cache* find_partial_search_cache(struct search_cache* search_cache, const struct search_cache_key* search_cache_key) {
959 struct search_cache* result;
960 DLIST_FIND(search_cache, result,
961 (result->key.level == search_cache_key->level) &&
962 (result->key.data_level == search_cache_key->data_level) &&
963 (result->key.search_attrib == search_cache_key->search_attrib) &&
964 (result->key.flags == search_cache_key->flags) &&
965 (result->key.storage_type == search_cache_key->storage_type) &&
966 (fstrcmp(result->key.pattern, search_cache_key->pattern) == 0));
967 DEBUG(5,("%s: found %p\n",__LOCATION__,result));
968 return result;
970 struct search_cache* find_search_cache(struct search_cache* search_cache, const struct search_cache_key* search_cache_key) {
971 struct search_cache* result = find_partial_search_cache(search_cache, search_cache_key);
972 if (result && result->status == SEARCH_CACHE_COMPLETE) {
973 DEBUG(5,("%s: found complete %p\n",__LOCATION__,result));
974 return result;
976 DEBUG(5,("%s: found INCOMPLETE %p\n",__LOCATION__,result));
977 return NULL;
980 uint16_t smbsrv_fnum(struct ntvfs_handle *h) {
981 uint16_t fnum;
982 smbsrv_push_fnum((uint8_t *)&fnum, 0, h);
983 return SVAL(&fnum, 0);
986 static void async_search_cache_notify(struct smbcli_request *c_req)
988 struct async_info *async = c_req->async.private;
989 struct ntvfs_request *req = async->req;
990 struct search_cache *search_cache;
991 NTSTATUS status;
993 DEBUG(5,("%s: search cache %p invalidated\n",__LOCATION__, (void*)async->f));
995 search_cache = talloc_get_type_abort((void*)async->f, struct search_cache);
997 status = smb_raw_changenotify_recv(c_req, req, async->parms);
998 DEBUG(5,("%s: update status %s\n",__LOCATION__, get_friendly_nt_error_msg (status)));
1000 search_cache->notify_req=NULL;
1001 /* dispose of the search_cache */
1002 search_cache->status=SEARCH_CACHE_DEAD;
1003 /* So nothing can find it even if there are still in-use references */
1004 DLIST_REMOVE(search_cache->proxy->search_caches, search_cache);
1005 /* free it */
1006 //talloc_steal(async, search_cache);
1007 talloc_free(async);
1011 destroy a search handle
1013 static int search_handle_destructor(struct search_handle *s)
1015 DLIST_REMOVE(s->proxy->search_handles, s);
1016 DEBUG(5,("%s: handle destructor %p\n",__LOCATION__,s));
1017 return 0;
1019 static int search_cache_destructor(struct search_cache *s)
1021 NTSTATUS status;
1023 DLIST_REMOVE(s->proxy->search_caches, s);
1024 DEBUG(5,("%s: cache destructor %p\n",__LOCATION__,s));
1025 if (s->notify_req) {
1026 status=smb_raw_ntcancel(s->notify_req);
1027 s->notify_req=NULL;
1028 DEBUG(5,("%s: Cancel notification %s\n",__LOCATION__,get_friendly_nt_error_msg (status)));
1030 if (s->dir_fnum!=65535) {
1031 struct smbcli_request *req;
1032 union smb_close close_parms;
1033 close_parms.close.level = RAW_CLOSE_CLOSE;
1034 close_parms.close.in.file.fnum = s->dir_fnum;
1035 close_parms.close.in.write_time = 0;
1037 /* destructor may be called from a notify response and won't be able
1038 to wait on this close response, not that we care anyway */
1039 req=smb_raw_close_send(s->proxy->tree, &close_parms);
1041 DEBUG(5,("%s: Close dir_fnum: %d %p\n",__LOCATION__, s->dir_fnum, req));
1042 s->dir_fnum=65535;
1044 return 0;
1047 struct search_cache* new_search_cache(struct proxy_private *private, struct search_cache_key* key) {
1048 /* need to opendir the folder being searched so we can get a notification */
1049 uint16_t dir_fnum=65535;
1050 struct search_cache *search_cache=NULL;
1052 search_cache=talloc_zero(private, struct search_cache);
1053 DEBUG(5,("%s: Start new cache %p for %s\n",__LOCATION__, search_cache, key->pattern));
1054 if (! search_cache) {
1055 return NULL;
1057 search_cache->proxy=private;
1058 if (! (search_cache->dir=talloc_dirname(search_cache, key->pattern))) {
1059 goto error;
1061 if (! (search_cache->notify_io=talloc_zero(search_cache, union smb_notify))) {
1062 goto error;
1064 search_cache->key=*key;
1065 /* make private copy of pattern now that we need it AND have something to own it */
1066 if (! (search_cache->key.pattern=talloc_strdup(search_cache, search_cache->key.pattern))) {
1067 goto error;
1069 dir_fnum=smbcli_nt_create_full(private->tree, search_cache->dir,
1071 SEC_FILE_READ_DATA,
1072 FILE_ATTRIBUTE_NORMAL,
1073 NTCREATEX_SHARE_ACCESS_MASK,
1074 NTCREATEX_DISP_OPEN,
1075 NTCREATEX_OPTIONS_DIRECTORY,
1076 NTCREATEX_IMPERSONATION_IMPERSONATION);
1077 DEBUG(5,("%s: %d=opendir on %s\n",__LOCATION__,dir_fnum, search_cache->dir));
1078 if (dir_fnum==65535) {
1079 goto error;
1081 /* The destructor will close the handle */
1082 talloc_set_destructor(search_cache, search_cache_destructor);
1083 search_cache->dir_fnum=dir_fnum;
1084 DEBUG(5,("%s: Start new cache %p, dir_fnum %d\n",__LOCATION__, search_cache, dir_fnum));
1087 int saved_timeout = private->transport->options.request_timeout;
1089 /* request notify changes on cache before we start to fill it */
1090 search_cache->notify_io->nttrans.level=RAW_NOTIFY_NTTRANS;
1091 search_cache->notify_io->nttrans.in.completion_filter=FILE_NOTIFY_CHANGE_ANY;
1092 search_cache->notify_io->nttrans.in.file.fnum=dir_fnum;
1093 search_cache->notify_io->nttrans.in.recursive=false;
1094 search_cache->notify_io->nttrans.in.buffer_size=1024;
1095 private->transport->options.request_timeout = 0;
1096 search_cache->notify_req=smb_raw_changenotify_send(private->tree, search_cache->notify_io);
1097 /* Make the request hang around so we can tell if it needs cancelling */
1098 talloc_reference(search_cache, search_cache->notify_req);
1099 private->transport->options.request_timeout = saved_timeout;
1101 if (! search_cache->notify_req) {
1102 goto error;
1103 } else {
1104 struct ntvfs_request *req=NULL;
1105 struct smbcli_request *c_req=search_cache->notify_req;
1106 union smb_notify *io=search_cache->notify_io;
1107 ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_search_cache_notify,
1108 (void*) search_cache, c_req->async.private);
1109 DLIST_ADD_END(private->search_caches, search_cache, struct search_cache*);
1112 return search_cache;
1113 error:
1114 talloc_free(search_cache);
1115 return NULL;
1119 delete a file - the dirtype specifies the file types to include in the search.
1120 The name can contain PROXY wildcards, but rarely does (except with OS/2 clients)
1122 static NTSTATUS proxy_unlink(struct ntvfs_module_context *ntvfs,
1123 struct ntvfs_request *req, union smb_unlink *unl)
1125 struct proxy_private *private = ntvfs->private_data;
1126 struct smbcli_request *c_req;
1128 SETUP_PID;
1130 /* see if the front end will allow us to perform this
1131 function asynchronously. */
1132 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1133 return smb_raw_unlink(private->tree, unl);
1136 c_req = smb_raw_unlink_send(private->tree, unl);
1138 SIMPLE_ASYNC_TAIL;
1142 a handler for async ioctl replies
1144 static void async_ioctl(struct smbcli_request *c_req)
1146 struct async_info *async = c_req->async.private;
1147 struct ntvfs_request *req = async->req;
1148 req->async_states->status = smb_raw_ioctl_recv(c_req, req, async->parms);
1149 talloc_free(async);
1150 req->async_states->send_fn(req);
1154 ioctl interface
1156 static NTSTATUS proxy_ioctl(struct ntvfs_module_context *ntvfs,
1157 struct ntvfs_request *req, union smb_ioctl *io)
1159 struct proxy_private *private = ntvfs->private_data;
1160 struct smbcli_request *c_req;
1162 if (io->ntioctl.level == RAW_IOCTL_NTIOCTL
1163 && io->ntioctl.in.function == FSCTL_UFOPROXY_RPCLITE) {
1164 return proxy_rpclite(ntvfs, req, io);
1167 SETUP_PID_AND_FILE;
1169 /* see if the front end will allow us to perform this
1170 function asynchronously. */
1171 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1172 return smb_raw_ioctl(private->tree, req, io);
1175 c_req = smb_raw_ioctl_send(private->tree, io);
1177 ASYNC_RECV_TAIL(io, async_ioctl);
1181 check if a directory exists
1183 static NTSTATUS proxy_chkpath(struct ntvfs_module_context *ntvfs,
1184 struct ntvfs_request *req, union smb_chkpath *cp)
1186 struct proxy_private *private = ntvfs->private_data;
1187 struct smbcli_request *c_req;
1189 SETUP_PID;
1191 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1192 return smb_raw_chkpath(private->tree, cp);
1195 c_req = smb_raw_chkpath_send(private->tree, cp);
1197 SIMPLE_ASYNC_TAIL;
1200 static bool find_search_cache_item(const char* path,
1201 struct search_cache **search_cache,
1202 struct search_cache_item **item) {
1203 struct search_cache *s=*search_cache;
1204 struct search_cache_item *i=*item;
1205 const char* file;
1206 int dir_len;
1208 /* see if we can satisfy from a directory cache */
1209 DEBUG(5,("%s: Looking for pathinfo: '%s'\n",__LOCATION__,path));
1210 if ((file=strrchr(path,'\\'))) {
1211 dir_len = file - path;
1212 /* point past the \ */
1213 file++;
1214 } else {
1215 file = path;
1216 dir_len = 0;
1218 /* convert empty path to . so we can find it in the cache */
1219 if (! *file) {
1220 file=".";
1222 DEBUG(5,("%s: Path='%s' File='%s'\n",__LOCATION__,path, file));
1224 /* Note we don't care if the cache is partial, as long as it has a hit */
1225 while(s) {
1226 /* One day we may support all directory levels */
1227 DLIST_FIND(s, s, (s->key.data_level == RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO &&
1228 strlen(s->dir)==dir_len &&
1229 fstrncmp(s->dir, path, dir_len)==0));
1230 if (! s) {
1231 break;
1233 DEBUG(5,("%s: found cache %p\n",__LOCATION__,s));
1234 /* search s for io->generic.in.file.path */
1235 DLIST_FIND(s->items, i, (i->data_level == RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO &&
1236 ((i->file->both_directory_info.name.s &&
1237 fstrcmp(i->file->both_directory_info.name.s, file) ==0) ||
1238 (i->file->both_directory_info.short_name.s &&
1239 fstrcmp(i->file->both_directory_info.short_name.s, file)==0)
1240 )));
1241 DEBUG(5,("%s: found cache %p item %p\n",__LOCATION__,s, i));
1242 if (i) {
1243 *item=i;
1244 *search_cache=s;
1245 return true;
1247 s=s->next;
1248 DEBUG(5,("%s: continue search at %p\n",__LOCATION__,s));
1250 *item=i;
1251 *search_cache=s;
1252 return false;
1255 static void proxy_set_cache_info(struct file_metadata *metadata, struct proxy_GetInfo *r) {
1256 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_BASIC_INFORMATION) ||
1257 NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALL_INFO)) {
1258 metadata->info_data.create_time=r->out.info_data[0].create_time;
1259 metadata->info_data.access_time =r->out.info_data[0].access_time;
1260 metadata->info_data.write_time=r->out.info_data[0].write_time;
1261 metadata->info_data.change_time=r->out.info_data[0].change_time;
1262 metadata->info_data.attrib=r->out.info_data[0].attrib;
1263 metadata->valid|=valid_RAW_FILEINFO_BASIC_INFORMATION;
1265 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALL_INFO)) {
1266 metadata->info_data.ea_size=r->out.info_data[0].ea_size;
1267 metadata->info_data.alloc_size=r->out.info_data[0].alloc_size;
1268 metadata->info_data.size=r->out.info_data[0].size;
1269 metadata->info_data.nlink=r->out.info_data[0].nlink;
1270 /* Are we duping this right? Would talloc_reference be ok? */
1271 //f->metadata->info_data.fname=
1272 metadata->info_data.fname.s=talloc_memdup(metadata, r->out.info_data[0].fname.s, r->out.info_data[0].fname.count);
1273 metadata->info_data.fname.count=r->out.info_data[0].fname.count;
1274 metadata->info_data.delete_pending=r->out.info_data[0].delete_pending;
1275 metadata->info_data.directory=r->out.info_data[0].directory;
1276 metadata->valid|=valid_RAW_FILEINFO_ALL_INFO | valid_RAW_FILEINFO_STANDARD_INFO;;
1278 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_COMPRESSION_INFO)) {
1279 metadata->info_data.compressed_size=r->out.info_data[0].compressed_size;
1280 metadata->info_data.format=r->out.info_data[0].format;
1281 metadata->info_data.unit_shift=r->out.info_data[0].unit_shift;
1282 metadata->info_data.chunk_shift=r->out.info_data[0].chunk_shift;
1283 metadata->info_data.cluster_shift=r->out.info_data[0].cluster_shift;
1284 metadata->valid|=valid_RAW_FILEINFO_COMPRESSION_INFO;
1286 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_INTERNAL_INFORMATION)) {
1287 metadata->info_data.file_id=r->out.info_data[0].file_id;
1288 metadata->valid|=valid_RAW_FILEINFO_INTERNAL_INFORMATION;
1290 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ACCESS_INFORMATION)) {
1291 metadata->info_data.access_flags=r->out.info_data[0].access_flags;
1292 metadata->valid|=valid_RAW_FILEINFO_ACCESS_INFORMATION;
1294 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_POSITION_INFORMATION)) {
1295 metadata->info_data.position=r->out.info_data[0].position;
1296 metadata->valid|=valid_RAW_FILEINFO_POSITION_INFORMATION;
1298 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_MODE_INFORMATION)) {
1299 metadata->info_data.mode=r->out.info_data[0].mode;
1300 metadata->valid|=valid_RAW_FILEINFO_MODE_INFORMATION;
1302 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALIGNMENT_INFORMATION)) {
1303 metadata->info_data.alignment_requirement=r->out.info_data[0].alignment_requirement;
1304 metadata->valid|=valid_RAW_FILEINFO_ALIGNMENT_INFORMATION;
1306 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION)) {
1307 metadata->info_data.reparse_tag=r->out.info_data[0].reparse_tag;
1308 metadata->info_data.reparse_attrib=r->out.info_data[0].reparse_attrib;
1309 metadata->valid|=valid_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
1311 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_STREAM_INFO)) {
1312 metadata->info_data.num_streams=r->out.info_data[0].num_streams;
1313 talloc_free(metadata->info_data.streams);
1314 metadata->info_data.streams=talloc_steal(metadata, r->out.info_data[0].streams);
1315 metadata->valid|=valid_RAW_FILEINFO_STREAM_INFO;
1318 /* satisfy a file-info request from cache */
1319 NTSTATUS proxy_cache_info(union smb_fileinfo *io, struct file_metadata *metadata, bool *valid)
1321 #define SET_VALID(FLAG) do { \
1322 if (valid) *valid=!!(metadata->valid & valid_ ## FLAG); \
1323 DEBUG(5,("%s check %s=%d (%x)\n",__FUNCTION__, #FLAG, !!(metadata->valid & valid_ ## FLAG), metadata->valid)); \
1324 } while(0)
1325 /* and now serve the request from the cache */
1326 switch(io->generic.level) {
1327 case RAW_FILEINFO_BASIC_INFORMATION:
1328 SET_VALID(RAW_FILEINFO_BASIC_INFORMATION);
1329 io->basic_info.out.create_time=metadata->info_data.create_time;
1330 io->basic_info.out.access_time=metadata->info_data.access_time;
1331 io->basic_info.out.write_time=metadata->info_data.write_time;
1332 io->basic_info.out.change_time=metadata->info_data.change_time;
1333 io->basic_info.out.attrib=metadata->info_data.attrib;
1334 return metadata->info_data.status_RAW_FILEINFO_BASIC_INFORMATION;
1335 case RAW_FILEINFO_ALL_INFO:
1336 SET_VALID(RAW_FILEINFO_ALL_INFO);
1337 io->all_info.out.create_time=metadata->info_data.create_time;
1338 io->all_info.out.access_time=metadata->info_data.access_time;
1339 io->all_info.out.write_time=metadata->info_data.write_time;
1340 io->all_info.out.change_time=metadata->info_data.change_time;
1341 io->all_info.out.attrib=metadata->info_data.attrib;
1342 io->all_info.out.alloc_size=metadata->info_data.alloc_size;
1343 io->all_info.out.size=metadata->info_data.size;
1344 io->all_info.out.directory=metadata->info_data.directory;
1345 io->all_info.out.nlink=metadata->info_data.nlink;
1346 io->all_info.out.delete_pending=metadata->info_data.delete_pending;
1347 io->all_info.out.fname.s=metadata->info_data.fname.s;
1348 io->all_info.out.fname.private_length=metadata->info_data.fname.count;
1349 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1350 case RAW_FILEINFO_STANDARD_INFO:
1351 case RAW_FILEINFO_STANDARD_INFORMATION:
1352 SET_VALID(RAW_FILEINFO_ALL_INFO);
1353 io->standard_info.out.alloc_size=metadata->info_data.alloc_size;
1354 io->standard_info.out.size=metadata->info_data.size;
1355 io->standard_info.out.directory=metadata->info_data.directory;
1356 io->standard_info.out.nlink=metadata->info_data.nlink; /* may be wrong */
1357 io->standard_info.out.delete_pending=metadata->info_data.delete_pending;
1358 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1359 case RAW_FILEINFO_EA_INFO:
1360 case RAW_FILEINFO_EA_INFORMATION:
1361 SET_VALID(RAW_FILEINFO_ALL_INFO);
1362 io->ea_info.out.ea_size=metadata->info_data.ea_size;
1363 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1364 case RAW_FILEINFO_COMPRESSION_INFO:
1365 SET_VALID(RAW_FILEINFO_COMPRESSION_INFO);
1366 io->compression_info.out.compressed_size=metadata->info_data.compressed_size;
1367 io->compression_info.out.format=metadata->info_data.format;
1368 io->compression_info.out.unit_shift=metadata->info_data.unit_shift;
1369 io->compression_info.out.chunk_shift=metadata->info_data.chunk_shift;
1370 io->compression_info.out.cluster_shift=metadata->info_data.cluster_shift;
1371 return metadata->info_data.status_RAW_FILEINFO_COMPRESSION_INFO;
1372 case RAW_FILEINFO_INTERNAL_INFORMATION:
1373 SET_VALID(RAW_FILEINFO_INTERNAL_INFORMATION);
1374 io->internal_information.out.file_id=metadata->info_data.file_id;
1375 return metadata->info_data.status_RAW_FILEINFO_INTERNAL_INFORMATION;
1376 case RAW_FILEINFO_ACCESS_INFORMATION:
1377 SET_VALID(RAW_FILEINFO_ACCESS_INFORMATION);
1378 io->access_information.out.access_flags=metadata->info_data.access_flags;
1379 return metadata->info_data.status_RAW_FILEINFO_ACCESS_INFORMATION;
1380 case RAW_FILEINFO_POSITION_INFORMATION:
1381 SET_VALID(RAW_FILEINFO_POSITION_INFORMATION);
1382 io->position_information.out.position=metadata->info_data.position;
1383 return metadata->info_data.status_RAW_FILEINFO_POSITION_INFORMATION;
1384 case RAW_FILEINFO_MODE_INFORMATION:
1385 SET_VALID(RAW_FILEINFO_MODE_INFORMATION);
1386 io->mode_information.out.mode=metadata->info_data.mode;
1387 return metadata->info_data.status_RAW_FILEINFO_MODE_INFORMATION;
1388 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1389 SET_VALID(RAW_FILEINFO_ALIGNMENT_INFORMATION);
1390 io->alignment_information.out.alignment_requirement=metadata->info_data.alignment_requirement;
1391 return metadata->info_data.status_RAW_FILEINFO_ALIGNMENT_INFORMATION;
1392 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1393 SET_VALID(RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION);
1394 io->attribute_tag_information.out.reparse_tag=metadata->info_data.reparse_tag;
1395 io->attribute_tag_information.out.attrib=metadata->info_data.reparse_attrib;
1396 return metadata->info_data.status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
1397 case RAW_FILEINFO_STREAM_INFO:
1398 case RAW_FILEINFO_STREAM_INFORMATION:
1399 SET_VALID(RAW_FILEINFO_STREAM_INFO);
1400 io->stream_info.out.num_streams=metadata->info_data.num_streams;
1401 if (metadata->info_data.num_streams > 0) {
1402 io->stream_info.out.streams = talloc_zero_array(io, struct stream_struct, metadata->info_data.num_streams);
1403 int c;
1404 if (! io->stream_info.out.streams) {
1405 if (*valid) *valid=false;
1406 io->stream_info.out.num_streams=0;
1407 return NT_STATUS_NO_MEMORY;
1409 for (c=0; c<io->stream_info.out.num_streams; c++) {
1410 io->stream_info.out.streams[c].size = metadata->info_data.streams[c].size;
1411 io->stream_info.out.streams[c].alloc_size = metadata->info_data.streams[c].alloc_size;
1412 io->stream_info.out.streams[c].stream_name.s = talloc_reference(io, metadata->info_data.streams[c].stream_name.s);
1413 io->stream_info.out.streams[c].stream_name.private_length = metadata->info_data.streams[c].stream_name.count;
1415 } else {
1416 io->stream_info.out.streams=NULL;
1418 return metadata->info_data.status_RAW_FILEINFO_STREAM_INFO;
1419 default:
1420 DEBUG(5,("%s: Unknown request\n",__FUNCTION__));
1421 if (valid) *valid=false;
1422 return NT_STATUS_INTERNAL_ERROR;
1427 a handler for async qpathinfo replies
1429 static void async_qpathinfo(struct smbcli_request *c_req)
1431 struct async_info *async = c_req->async.private;
1432 struct ntvfs_request *req = async->req;
1433 req->async_states->status = smb_raw_pathinfo_recv(c_req, req, async->parms);
1434 talloc_free(async);
1435 req->async_states->send_fn(req);
1438 static NTSTATUS async_proxy_qpathinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1440 struct proxy_private *private = async->proxy;
1441 struct smbcli_request *c_req = async->c_req;
1442 struct ntvfs_request *req = async->req;
1443 struct proxy_file *f = talloc_get_type_abort(async->f, struct proxy_file);
1444 union smb_fileinfo *io = talloc_get_type_abort(io1, union smb_fileinfo);
1445 struct proxy_GetInfo *r=talloc_get_type_abort(io2, struct proxy_GetInfo);
1447 if (c_req) status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1448 req->async_states->status=status;
1450 NT_STATUS_NOT_OK_RETURN(status);
1452 /* populate the cache, and then fill the request from the cache */
1453 /* Assuming that r->count.in == 1 */
1454 SMB_ASSERT(r->out.count==1);
1455 NT_STATUS_NOT_OK_RETURN(r->out.info_data[0].status);
1457 DEBUG(5,("%s: will set cache %p item=%p metadata=%p %p\n",__LOCATION__,f, f?f->metadata:NULL, r));
1458 proxy_set_cache_info(f->metadata, r);
1460 req->async_states->status=proxy_cache_info(io, f->metadata, NULL);
1462 return req->async_states->status;
1465 static void async_qpathinfo_notify(void* data, struct fdirmon* dirmon) {
1466 struct proxy_file* file=data;
1468 DEBUG(5,("%s: qpathinfo cache %s destroyed\n",__LOCATION__,file->filename));
1469 DLIST_REMOVE(file->proxy->closed_files, file);
1470 talloc_free(file);
1474 return info on a pathname
1476 static NTSTATUS proxy_qpathinfo(struct ntvfs_module_context *ntvfs,
1477 struct ntvfs_request *req, union smb_fileinfo *io)
1479 struct proxy_private *private = ntvfs->private_data;
1480 struct smbcli_request *c_req;
1481 struct proxy_file *f=NULL;
1482 const char* path;
1484 SETUP_PID;
1486 /* Look for closed files */
1487 if (private->enabled_qpathinfo) {
1488 int len=strlen(io->generic.in.file.path)+1;
1489 DEBUG(5,("%s: Looking for cached metadata for: %s\n",__LOCATION__,io->generic.in.file.path));
1490 DLIST_FIND(private->closed_files, f,
1491 (len==f->filename_size && fstrncmp(io->generic.in.file.path, f->filename, f->filename_size)==0));
1492 if (f) {
1493 /* stop cache going away while we are using it */
1494 talloc_reference(req, f);
1497 /* upgrade the request */
1498 switch(io->generic.level) {
1499 case RAW_FILEINFO_STANDARD_INFO:
1500 case RAW_FILEINFO_STANDARD_INFORMATION:
1501 case RAW_FILEINFO_BASIC_INFORMATION: /* we get this on file open */
1502 case RAW_FILEINFO_ALL_INFO:
1503 case RAW_FILEINFO_COMPRESSION_INFO:
1504 case RAW_FILEINFO_INTERNAL_INFORMATION:
1505 case RAW_FILEINFO_ACCESS_INFORMATION:
1506 case RAW_FILEINFO_POSITION_INFORMATION:
1507 case RAW_FILEINFO_MODE_INFORMATION:
1508 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1509 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1510 case RAW_FILEINFO_STREAM_INFO:
1511 case RAW_FILEINFO_STREAM_INFORMATION:
1512 case RAW_FILEINFO_EA_INFO:
1513 case RAW_FILEINFO_EA_INFORMATION:
1514 DEBUG(5,("%s: item is %p\n",__FUNCTION__, f));
1515 if (f && f->metadata) {
1516 NTSTATUS status;
1517 bool valid;
1518 DEBUG(5,("%s: Using cached metadata %x (item=%p)\n",__FUNCTION__, f->metadata->valid, f));
1519 status=proxy_cache_info(io, f->metadata, &valid);
1520 if (valid) return status;
1521 DEBUG(5,("%s: But cached metadata not valid :-(\n",__FUNCTION__));
1523 /* construct an item to hold the cache if we need to */
1524 if (! f && private->enabled_cache_info && PROXY_REMOTE_SERVER(private) && (f=talloc_zero(private, struct proxy_file))) {
1525 struct fdirmon* dirmon;
1526 dirmon=get_fdirmon(private, io->generic.in.file.path, true);
1527 f->proxy=private;
1528 dirmon_add_callback(dirmon, async_qpathinfo_notify, f);
1530 f->filename=talloc_strdup(f, io->generic.in.file.path);
1531 f->filename_size=strlen(f->filename)+1;
1532 f->metadata=talloc_zero(f, struct file_metadata);
1533 /* should not really add unless we succeeded */
1534 DLIST_ADD(private->closed_files, f);
1537 if (f && f->metadata && private->enabled_cache_info && PROXY_REMOTE_SERVER(private)) {
1538 struct proxy_GetInfo *r;
1539 DEBUG(5,("%s: promoting request to proxy\n",__FUNCTION__));
1541 r=talloc_zero(req, struct proxy_GetInfo);
1542 NT_STATUS_HAVE_NO_MEMORY(r);
1544 r->in.count=1;
1545 r->in.info_tags=talloc_zero_array(req, struct info_tags, r->in.count);
1546 r->in.info_tags[0].tag_type=TAG_TYPE_PATH_INFO;
1547 /* 1+ to get the null */
1548 r->in.info_tags[0].info_tag.path.count=1+strlen(io->generic.in.file.path);
1549 r->in.info_tags[0].info_tag.path.s=io->generic.in.file.path;
1550 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
1551 /* the callback handler will populate the cache and respond from the cache */
1552 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_qpathinfo, NT_STATUS_INTERNAL_ERROR);
1554 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1555 DEBUG(5,("%s Sync waiting promotion\n",__FUNCTION__));
1556 return sync_chain_handler(c_req);
1557 } else {
1558 void* f=NULL;
1559 DEBUG(5,("%s Async waiting promotion\n",__FUNCTION__));
1560 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1561 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1562 return NT_STATUS_OK;
1567 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1568 return smb_raw_pathinfo(private->tree, req, io);
1571 c_req = smb_raw_pathinfo_send(private->tree, io);
1573 ASYNC_RECV_TAIL(io, async_qpathinfo);
1577 a handler for async qfileinfo replies
1579 static void async_qfileinfo(struct smbcli_request *c_req)
1581 struct async_info *async = c_req->async.private;
1582 struct ntvfs_request *req = async->req;
1583 req->async_states->status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1584 talloc_free(async);
1585 req->async_states->send_fn(req);
1588 static NTSTATUS async_proxy_qfileinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1590 struct proxy_private *private = async->proxy;
1591 struct smbcli_request *c_req = async->c_req;
1592 struct ntvfs_request *req = async->req;
1593 struct proxy_file *f = async->f;
1594 union smb_fileinfo *io = talloc_get_type_abort(io1, union smb_fileinfo);
1595 struct proxy_GetInfo *r=talloc_get_type_abort(io2, struct proxy_GetInfo);
1597 if (c_req) status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1598 req->async_states->status=status;
1600 NT_STATUS_NOT_OK_RETURN(status);
1602 /* populate the cache, and then fill the request from the cache */
1603 /* Assuming that r->count.in == 1 */
1604 SMB_ASSERT(r->out.count==1);
1605 NT_STATUS_NOT_OK_RETURN(r->out.info_data[0].status);
1607 proxy_set_cache_info(f->metadata, r);
1609 req->async_states->status=proxy_cache_info(io, f->metadata, NULL);
1611 return req->async_states->status;
1615 query info on a open file
1617 static NTSTATUS proxy_qfileinfo(struct ntvfs_module_context *ntvfs,
1618 struct ntvfs_request *req, union smb_fileinfo *io)
1620 struct proxy_private *private = ntvfs->private_data;
1621 struct smbcli_request *c_req;
1622 struct proxy_file *f;
1623 bool valid=false;
1624 NTSTATUS status;
1626 SETUP_PID;
1628 SETUP_FILE_HERE(f);
1630 /* upgrade the request */
1631 switch(io->generic.level) {
1632 case RAW_FILEINFO_STANDARD_INFO:
1633 case RAW_FILEINFO_STANDARD_INFORMATION:
1634 case RAW_FILEINFO_BASIC_INFORMATION: /* we get this on file open */
1635 case RAW_FILEINFO_ALL_INFO:
1636 case RAW_FILEINFO_COMPRESSION_INFO:
1637 case RAW_FILEINFO_INTERNAL_INFORMATION:
1638 case RAW_FILEINFO_ACCESS_INFORMATION:
1639 case RAW_FILEINFO_POSITION_INFORMATION:
1640 case RAW_FILEINFO_MODE_INFORMATION:
1641 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1642 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1643 case RAW_FILEINFO_STREAM_INFO:
1644 case RAW_FILEINFO_STREAM_INFORMATION:
1645 case RAW_FILEINFO_EA_INFO:
1646 case RAW_FILEINFO_EA_INFORMATION:
1647 DEBUG(5,("%s: oplock is %d\n",__FUNCTION__, f->oplock));
1648 if (f->oplock) {
1649 DEBUG(5,("%s: %p Using cached metadata %x (fnum=%d)\n",__FUNCTION__, f, f->metadata->valid, f->fnum));
1650 status=proxy_cache_info(io, f->metadata, &valid);
1651 if (valid) return status;
1652 DEBUG(5,("%s: But cached metadata not valid :-(\n",__FUNCTION__));
1654 if (private->enabled_cache_info && PROXY_REMOTE_SERVER(private)) {
1655 DEBUG(5,("%s: promoting request to proxy\n",__FUNCTION__));
1656 struct proxy_GetInfo *r=talloc_zero(req, struct proxy_GetInfo);
1657 NT_STATUS_HAVE_NO_MEMORY(r);
1658 r->in.count=1;
1659 r->in.info_tags=talloc_zero_array(req, struct info_tags, r->in.count);
1660 r->in.info_tags[0].tag_type=TAG_TYPE_FILE_INFO;
1661 r->in.info_tags[0].info_tag.fnum=io->generic.in.file.fnum;
1662 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
1663 /* the callback handler will populate the cache and respond from the cache */
1664 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_qfileinfo, NT_STATUS_INTERNAL_ERROR);
1666 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1667 DEBUG(5,("%s Sync waiting promotion\n",__FUNCTION__));
1668 return sync_chain_handler(c_req);
1669 } else {
1670 DEBUG(5,("%s Async waiting promotion\n",__FUNCTION__));
1671 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1672 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1673 return NT_STATUS_OK;
1678 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1679 return smb_raw_fileinfo(private->tree, req, io);
1682 c_req = smb_raw_fileinfo_send(private->tree, io);
1684 ASYNC_RECV_TAIL(io, async_qfileinfo);
1688 set info on a pathname
1690 static NTSTATUS proxy_setpathinfo(struct ntvfs_module_context *ntvfs,
1691 struct ntvfs_request *req, union smb_setfileinfo *st)
1693 struct proxy_private *private = ntvfs->private_data;
1694 struct smbcli_request *c_req;
1696 SETUP_PID;
1698 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1699 return smb_raw_setpathinfo(private->tree, st);
1702 c_req = smb_raw_setpathinfo_send(private->tree, st);
1704 SIMPLE_ASYNC_TAIL;
1709 a handler for async open replies
1711 static void async_open(struct smbcli_request *c_req)
1713 struct async_info *async = c_req->async.private;
1714 struct proxy_private *proxy = async->proxy;
1715 struct ntvfs_request *req = async->req;
1716 struct proxy_file *f = async->f;
1717 union smb_open *io = async->parms;
1718 union smb_handle *file;
1720 talloc_free(async);
1721 req->async_states->status = smb_raw_open_recv(c_req, req, io);
1722 SMB_OPEN_OUT_FILE(io, file);
1723 f->fnum = file->fnum;
1724 file->ntvfs = NULL;
1725 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
1726 req->async_states->status = ntvfs_handle_set_backend_data(f->h, proxy->ntvfs, f);
1727 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
1728 file->ntvfs = f->h;
1729 DLIST_ADD(proxy->files, f);
1731 f->oplock=io->generic.out.oplock_level;
1733 LOAD_CACHE_FILE_DATA (f->metadata->info_data, io->generic.out);
1734 f->metadata->valid |= valid_RAW_FILEINFO_BASIC_INFORMATION;
1735 DEBUG(5,("**** METADATA VALID %p %x LEN=%lld\n",f,f->metadata->valid,(long long)f->metadata->info_data.size));
1737 if (proxy->cache_enabled) {
1738 struct search_cache_item *item=NULL;
1739 struct search_cache *s=proxy->search_caches;
1740 /* If we are still monitoring the file for changes we can
1741 retain the previous cache state */
1742 /* yeah yeah what if there is more than one.... :-( */
1743 if (! (io->generic.level == RAW_OPEN_NTCREATEX &&
1744 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID) &&
1745 find_search_cache_item(SMB_OPEN_IN_FILE(io), &s, &item) && item->cache) {
1746 DEBUG(5,("%s: Using cached file cache\n",__LOCATION__));
1747 f->cache=talloc_reference(f, item->cache);
1748 cache_beopen(f->cache);
1749 if (item->metadata) {
1750 *(f->metadata)=*(item->metadata);
1751 f->metadata->info_data.fname.s=talloc_strdup(f, item->metadata->info_data.fname.s);
1752 f->metadata->info_data.fname.count=item->metadata->info_data.fname.count;
1754 f->metadata->info_data.streams=talloc_zero_array(f, struct info_stream, f->metadata->info_data.num_streams);
1755 if (f->metadata->info_data.streams) {
1756 int c;
1757 for(c=0; c < f->metadata->info_data.num_streams; c++) {
1758 f->metadata->info_data.streams[c].size = item->metadata->info_data.streams[c].size;
1759 f->metadata->info_data.streams[c].alloc_size = item->metadata->info_data.streams[c].alloc_size;
1760 f->metadata->info_data.streams[c].stream_name.s= talloc_strdup(f, item->metadata->info_data.streams[c].stream_name.s);
1761 f->metadata->info_data.streams[c].stream_name.count=item->metadata->info_data.streams[c].stream_name.count;
1764 f->metadata->count=1;
1766 } else {
1767 f->cache=cache_open(proxy->cache, f, io, f->oplock, proxy->cache_readahead);
1768 if (proxy->fake_valid) {
1769 cache_handle_validated(f, cache_handle_len(f));
1771 if (! PROXY_REMOTE_SERVER(proxy)) cache_handle_novalidate(f);
1772 if (item) {
1773 item->cache = talloc_reference(item, f->cache);
1774 item->metadata=talloc_reference(item, f->metadata);
1775 DEBUG(5,("%s: Caching file cache for later\n",__LOCATION__));
1776 } else {
1777 DEBUG(5,("%s: NOT Caching file cache for later\n",__LOCATION__));
1782 failed:
1783 req->async_states->send_fn(req);
1787 open a file
1789 static NTSTATUS proxy_open(struct ntvfs_module_context *ntvfs,
1790 struct ntvfs_request *req, union smb_open *io)
1792 struct proxy_private *private = ntvfs->private_data;
1793 struct smbcli_request *c_req;
1794 struct ntvfs_handle *h;
1795 struct proxy_file *f, *clone;
1796 NTSTATUS status;
1797 void *filename;
1798 int filename_size;
1799 uint16_t fnum;
1801 SETUP_PID;
1803 if (io->generic.level != RAW_OPEN_GENERIC &&
1804 private->map_generic) {
1805 return ntvfs_map_open(ntvfs, req, io);
1808 status = ntvfs_handle_new(ntvfs, req, &h);
1809 #warning should we free this handle if the open fails?
1810 NT_STATUS_NOT_OK_RETURN(status);
1812 f = talloc_zero(h, struct proxy_file);
1813 NT_STATUS_HAVE_NO_MEMORY(f);
1814 f->proxy=private;
1816 /* If the file is being opened read only and we already have a read-only
1817 handle for this file, then just clone and ref-count the handle */
1818 /* First calculate the filename key */
1819 if (io->generic.level == RAW_OPEN_NTCREATEX &&
1820 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID) {
1821 filename_size=sizeof(uint64_t);
1822 filename=io->generic.in.fname;
1823 } else {
1824 filename=SMB_OPEN_IN_FILE(io);
1825 filename_size=strlen(filename)+1;
1827 f->filename=talloc_memdup(f, filename, filename_size);
1828 f->filename_size=filename_size;
1829 f->h = h;
1830 f->can_clone= (io->generic.in.access_mask & NTCREATEX_SHARE_ACCESS_MASK) == NTCREATEX_SHARE_ACCESS_READ &&
1831 (io->generic.in.impersonation == NTCREATEX_IMPERSONATION_IMPERSONATION) &&
1832 (io->generic.in.create_options & NTCREATEX_OPTIONS_DIRECTORY) == 0 &&
1833 (io->generic.in.open_disposition != NTCREATEX_DISP_CREATE) &&
1834 (io->generic.in.open_disposition != NTCREATEX_DISP_SUPERSEDE);
1835 /* see if we have a matching open file */
1836 clone=NULL;
1837 if (f->can_clone) for (clone=private->files; clone; clone=clone->next) {
1838 if (clone->can_clone && filename_size == clone->filename_size &&
1839 memcmp(filename, clone->filename, filename_size)==0) {
1840 break;
1844 /* if clone is not null, then we found a match */
1845 if (private->enabled_open_clone && clone) {
1846 union smb_handle *file;
1848 DEBUG(5,("%s: clone handle %d\n",__FUNCTION__,clone->fnum));
1849 SMB_OPEN_OUT_FILE(io, file);
1850 f->fnum = clone->fnum;
1851 file->ntvfs = NULL;
1852 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
1853 NT_STATUS_NOT_OK_RETURN(status);
1854 file->ntvfs = f->h;
1855 DLIST_ADD(private->files, f);
1856 /* but be sure to share the same metadata cache */
1857 f->metadata=talloc_reference(f, clone->metadata);
1858 f->metadata->count++;
1859 f->oplock=clone->oplock;
1860 f->cache=talloc_reference(f, clone->cache);
1861 /* We don't need to reduce the oplocks for both files if we are read-only */
1862 /* if (clone->oplock==EXCLUSIVE_OPLOCK_RETURN ||
1863 clone->oplock==BATCH_OPLOCK_RETURN) {
1864 DEBUG(5,("%s: Breaking clone oplock from %d\n",__LOCATION__, clone->oplock));
1865 clone->oplock==LEVEL_II_OPLOCK_RETURN;
1866 status = ntvfs_send_oplock_break(private->ntvfs, clone->h, OPLOCK_BREAK_TO_LEVEL_II);
1867 //if (!NT_STATUS_IS_OK(status)) result=false;
1868 } else if (clone->oplock==LEVEL_II_OPLOCK_RETURN) {
1869 DEBUG(5,("%s: Breaking clone oplock from %d, cache no longer valid\n",__LOCATION__, clone->oplock));
1870 cache_handle_stale(f);
1871 clone->oplock=NO_OPLOCK_RETURN;
1872 status = ntvfs_send_oplock_break(private->ntvfs, clone->h, OPLOCK_BREAK_TO_NONE);
1873 //if (!NT_STATUS_IS_OK(status)) result=false;
1876 f->oplock=clone->oplock;
1877 /* and fake the rest of the response struct */
1878 io->generic.out.oplock_level=f->oplock;
1879 io->generic.out.create_action=NTCREATEX_ACTION_EXISTED;
1880 io->generic.out.create_time=f->metadata->info_data.create_time;
1881 io->generic.out.access_time=f->metadata->info_data.access_time;
1882 io->generic.out.write_time=f->metadata->info_data.write_time;
1883 io->generic.out.change_time=f->metadata->info_data.change_time;
1884 io->generic.out.attrib=f->metadata->info_data.attrib;
1885 io->generic.out.alloc_size=f->metadata->info_data.alloc_size;
1886 io->generic.out.size=f->metadata->info_data.size;
1887 io->generic.out.file_type=f->metadata->info_data.file_type;
1888 io->generic.out.ipc_state=f->metadata->info_data.ipc_state;
1889 io->generic.out.is_directory=f->metadata->info_data.is_directory;
1890 /* optional return values matching SMB2 tagged
1891 values in the call */
1892 //io->generic.out.maximal_access;
1893 return NT_STATUS_OK;
1895 f->metadata=talloc_zero(f, struct file_metadata);
1896 NT_STATUS_HAVE_NO_MEMORY(f->metadata);
1897 f->metadata->count=1;
1899 /* if oplocks aren't requested, optionally override and request them */
1900 if (! (io->generic.in.flags & (OPENX_FLAGS_REQUEST_OPLOCK | OPENX_FLAGS_REQUEST_BATCH_OPLOCK))
1901 && private->fake_oplock) {
1902 io->generic.in.flags |= OPENX_FLAGS_REQUEST_OPLOCK | OPENX_FLAGS_REQUEST_BATCH_OPLOCK;
1905 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1906 union smb_handle *file;
1908 status = smb_raw_open(private->tree, req, io);
1909 NT_STATUS_NOT_OK_RETURN(status);
1911 SMB_OPEN_OUT_FILE(io, file);
1912 f->fnum = file->fnum;
1913 file->ntvfs = NULL;
1914 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
1915 NT_STATUS_NOT_OK_RETURN(status);
1916 file->ntvfs = f->h;
1917 DLIST_ADD(private->files, f);
1919 f->oplock=io->generic.out.oplock_level;
1921 LOAD_CACHE_FILE_DATA (f->metadata->info_data, io->generic.out);
1922 DEBUG(5,("**** METADATA VALID %p %x LEN=%lld\n",f,f->metadata->valid,(long long)f->metadata->info_data.size));
1923 f->metadata->valid |= valid_RAW_FILEINFO_BASIC_INFORMATION;
1925 if (private->cache_enabled) {
1926 f->cache=cache_open(private->cache, f, io, f->oplock, private->cache_readahead);
1927 if (private->fake_valid) {
1928 cache_handle_validated(f, cache_handle_len(f));
1930 if (! PROXY_REMOTE_SERVER(private)) cache_handle_novalidate(f);
1933 return NT_STATUS_OK;
1936 c_req = smb_raw_open_send(private->tree, io);
1938 ASYNC_RECV_TAIL_F(io, async_open, f);
1942 create a directory
1944 static NTSTATUS proxy_mkdir(struct ntvfs_module_context *ntvfs,
1945 struct ntvfs_request *req, union smb_mkdir *md)
1947 struct proxy_private *private = ntvfs->private_data;
1948 struct smbcli_request *c_req;
1950 SETUP_PID;
1952 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1953 return smb_raw_mkdir(private->tree, md);
1956 c_req = smb_raw_mkdir_send(private->tree, md);
1958 SIMPLE_ASYNC_TAIL;
1962 remove a directory
1964 static NTSTATUS proxy_rmdir(struct ntvfs_module_context *ntvfs,
1965 struct ntvfs_request *req, struct smb_rmdir *rd)
1967 struct proxy_private *private = ntvfs->private_data;
1968 struct smbcli_request *c_req;
1970 SETUP_PID;
1972 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1973 return smb_raw_rmdir(private->tree, rd);
1975 c_req = smb_raw_rmdir_send(private->tree, rd);
1977 SIMPLE_ASYNC_TAIL;
1981 rename a set of files
1983 static NTSTATUS proxy_rename(struct ntvfs_module_context *ntvfs,
1984 struct ntvfs_request *req, union smb_rename *ren)
1986 struct proxy_private *private = ntvfs->private_data;
1987 struct smbcli_request *c_req;
1989 SETUP_PID;
1991 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1992 return smb_raw_rename(private->tree, ren);
1995 c_req = smb_raw_rename_send(private->tree, ren);
1997 SIMPLE_ASYNC_TAIL;
2001 copy a set of files
2003 static NTSTATUS proxy_copy(struct ntvfs_module_context *ntvfs,
2004 struct ntvfs_request *req, struct smb_copy *cp)
2006 return NT_STATUS_NOT_SUPPORTED;
2009 /* we only define this seperately so we can easily spot read calls in
2010 pending based on ( c_req->private.fn == async_read_handler ) */
2011 static void async_read_handler(struct smbcli_request *c_req)
2013 async_chain_handler(c_req);
2016 NTSTATUS async_readahead_dec(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2018 struct proxy_private *private = async->proxy;
2019 struct smbcli_request *c_req = async->c_req;
2020 struct proxy_file *f = async->f;
2021 union smb_read *io = async->parms;
2023 /* if request is not already received by a chained handler, read it */
2024 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
2026 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
2027 f->readahead_pending, private->readahead_spare));
2029 f->readahead_pending--;
2030 private->readahead_spare++;
2032 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
2033 f->readahead_pending, private->readahead_spare));
2035 return status;
2039 a handler for async read replies - speculative read-aheads.
2040 It merely saves in the cache. The async chain handler will call send_fn if
2041 there is one, or if sync_chain_handler is used the send_fn is called by
2042 the ntvfs back end.
2044 NTSTATUS async_read_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2046 struct smbcli_request *c_req = async->c_req;
2047 struct proxy_file *f = async->f;
2048 union smb_read *io = async->parms;
2050 /* if request is not already received by a chained handler, read it */
2051 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
2053 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
2054 get_friendly_nt_error_msg(status)));
2056 NT_STATUS_NOT_OK_RETURN(status);
2058 /* if it was a validate read we don't to save anything unless it failed.
2059 Until we use Proxy_read structs we can't tell, so guess */
2060 if (io->generic.out.nread == io->generic.in.maxcnt &&
2061 io->generic.in.mincnt < io->generic.in.maxcnt) {
2062 /* looks like a validate read, just move the validate pointer, the
2063 original read-request has already been satisfied from cache */
2064 DEBUG(3,("%s megavalidate suceeded, validate to %lld\n",__FUNCTION__,
2065 io->generic.in.offset + io->generic.out.nread));
2066 cache_handle_validated(f, io->generic.in.offset + io->generic.out.nread);
2067 } else {
2068 DEBUG(5,("Not a mega-validate, save %d in cache\n",io->generic.out.nread));
2069 cache_handle_save(f, io->generic.out.data,
2070 io->generic.out.nread,
2071 io->generic.in.offset);
2074 DEBUG(3,("%s finished %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2075 return status;
2078 /* handler for fragmented reads */
2079 NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2081 struct proxy_private *private = async->proxy;
2082 struct smbcli_request *c_req = async->c_req;
2083 struct ntvfs_request *req = async->req;
2084 struct proxy_file *f = async->f;
2085 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
2086 /* this is the io against which the fragment is to be applied */
2087 union smb_read *io = talloc_get_type_abort(io1, union smb_read);
2088 /* this is the io for the read that issued the callback */
2089 union smb_read *io_frag = fragment->io_frag; /* async->parms; */
2090 struct async_read_fragments* fragments=fragment->fragments;
2092 /* if request is not already received by a chained handler, read it */
2093 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
2094 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
2096 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
2097 get_friendly_nt_error_msg(status)));
2099 fragment->status = status;
2101 /* remove fragment from fragments */
2102 DLIST_REMOVE(fragments->fragments, fragment);
2104 #warning maybe read requests beyond the short read won't return NT_STATUS_OK with nread=0
2105 /* in which case if we will want to collate all responses and return a valid read
2106 for the leading NT_STATUS_OK fragments */
2108 /* did this one fail, inducing a general fragments failure? */
2109 if (!NT_STATUS_IS_OK(fragment->status)) {
2110 /* preserve the status of the fragment with the smallest offset
2111 when we can work out how */
2112 if (NT_STATUS_IS_OK(fragments->status)) {
2113 fragments->status=fragment->status;
2116 cache_handle_novalidate(f);
2117 DEBUG(5,("** Devalidated proxy due to read failure\n"));
2118 } else {
2119 /* No fragments have yet failed, keep collecting responses */
2120 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
2121 /* Find memcpy window, copy data from the io_frag to the io */
2122 off_t start_offset=MAX(io_frag->generic.in.offset, io->generic.in.offset);
2123 /* used to use mincnt */
2124 off_t io_extent=io->generic.in.offset + io->generic.in.maxcnt;
2125 off_t end_offset=MIN(io_extent, extent);
2126 /* ASSERT(start_offset <= end_offset) */
2127 /* ASSERT(start_offset <= io_extent) */
2128 if (start_offset >= io_extent) {
2129 DEBUG(3,("useless read-ahead tagged on to: %s",__LOCATION__));
2130 } else {
2131 uint8_t* dst=io->generic.out.data+(start_offset - io->generic.in.offset);
2132 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
2133 /* src == dst in cases where we did not latch onto someone elses
2134 read, but are handling our own */
2135 if (src != dst)
2136 memcpy(dst, src, end_offset - start_offset);
2139 /* There should be a better way to detect, but it needs the proxy rpc struct
2140 not ths smb_read struct */
2141 if (io_frag->generic.out.nread < io_frag->generic.in.maxcnt) {
2142 DEBUG(5,("\n** Devalidated proxy due to small read: %lld min=%lld, max=%lld\n",
2143 (long long) io_frag->generic.out.nread,
2144 (long long) io_frag->generic.in.mincnt,
2145 (long long) io_frag->generic.in.maxcnt));
2146 cache_handle_novalidate(f);
2149 /* We broke up the original read. If not enough of this sub-read has
2150 been read, and then some of then next block, it could leave holes!
2151 We will only acknowledge up to the first partial read, and treat
2152 it as a small read. If server can return NT_STATUS_OK for a partial
2153 read so can we, so we preserve the response.
2154 "enough" is all of it (maxcnt), except on the last block, when it has to
2155 be enough to fill io->generic.in.mincnt. We know it is the last block
2156 if nread is small but we could fill io->generic.in.mincnt */
2157 if (io_frag->generic.out.nread < io_frag->generic.in.mincnt &&
2158 end_offset < io->generic.in.offset + io->generic.in.mincnt) {
2159 DEBUG(4,("Fragmented read only partially successful\n"));
2161 /* Shrink the master nread (or grow to this size if we are first partial */
2162 if (! fragments->partial ||
2163 (io->generic.in.offset + io->generic.out.nread) > extent) {
2164 io->generic.out.nread = extent - io->generic.in.offset;
2167 /* stop any further successes from extending the partial read */
2168 fragments->partial=true;
2169 } else {
2170 /* only grow the master nwritten if we haven't logged a partial write */
2171 if (! fragments->partial &&
2172 (io->generic.in.offset + io->generic.out.nread) < extent ) {
2173 io->generic.out.nread = MIN(io->generic.in.maxcnt, extent - io->generic.in.offset);
2178 /* Was it the last fragment, or do we know enought to send a response? */
2179 if (! fragments->fragments) {
2180 DEBUG(5,("Async read re-fragmented with %d of %d %s\n",
2181 io->generic.out.nread, io->generic.in.mincnt,
2182 get_friendly_nt_error_msg(fragments->status)));
2183 if (fragments->async) {
2184 req->async_states->status=fragments->status;
2185 DEBUG(5,("Fragments async response sending\n"));
2186 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
2187 /* esp. as they may be attached to by other reads. Maybe attachees should be taking reference, but how will they
2188 know the top level they need to take reference too.. */
2189 #warning should really queue a sender here, not call it */
2190 req->async_states->send_fn(req);
2191 DEBUG(5,("Async response sent\n"));
2192 } else {
2193 DEBUG(5,("Fragments SYNC return\n"));
2197 /* because a c_req may be shared by many req, chained handlers must return
2198 a status pertaining to the general validity of this specific c_req, not
2199 to their own private processing of the c_req for the benefit of their req
2200 which is returned in fragments->status
2202 return status;
2205 /* Issue read-ahead X bytes where X is the window size calculation based on
2206 server_latency * server_session_bandwidth
2207 where latency is the idle (link) latency and bandwidth is less than or equal_to
2208 to actual bandwidth available to the server.
2209 Read-ahead should honour locked areas in whatever way is neccessary (who knows?)
2210 read_ahead is defined here and not in the cache engine because it requires too
2211 much knowledge of private structures
2213 /* The concept is buggy unless we can tell the next proxy that these are
2214 read-aheads, otherwise chained proxy setups will each read-ahead of the
2215 read-ahead which can put a larger load on the final server.
2216 Also we probably need to distinguish between
2217 * cache-less read-ahead
2218 * cache-revalidating read-ahead
2220 NTSTATUS read_ahead(struct proxy_file *f, struct ntvfs_module_context *ntvfs,
2221 union smb_read *io, ssize_t as_read)
2223 struct proxy_private *private = ntvfs->private_data;
2224 struct smbcli_tree *tree = private->tree;
2225 struct cache_file_entry *cache;
2226 off_t next_position; /* this read offset+length+window */
2227 off_t end_position; /* position we read-ahead to */
2228 off_t cache_populated;
2229 off_t read_position, new_extent;
2231 if (! PROXY_REMOTE_SERVER(private)) return NT_STATUS_UNSUCCESSFUL;
2232 DEBUG(5,("A\n"));
2233 if (private->cache_readahead==0 || ! private->cache_enabled || ! f->cache) return NT_STATUS_UNSUCCESSFUL;
2234 DEBUG(5,("B\n"));
2235 cache=talloc_get_type_abort(f->cache, struct cache_file_entry);
2236 DEBUG(5,("C\n"));
2237 /* don't read-ahead if we are in bulk validate mode */
2238 if (cache->status & CACHE_VALIDATE) return NT_STATUS_UNSUCCESSFUL;
2239 DEBUG(5,("D\n"));
2240 /* if we can't trust what we read-ahead anyway then don't bother although
2241 * if delta-reads are enabled we can do so in order to get something to
2242 * delta against */
2243 DEBUG(CACHE_DEBUG_LEVEL,("DOING Asking read-aheads: len %lld ra-extend %lld as-read %lld RA %d (%d)\n",
2244 (long long int)(cache_len(cache)),
2245 (long long int)(cache->readahead_extent),
2246 (long long int)(as_read),
2247 cache->readahead_window,private->cache_readahead));
2248 if (private->cache_readahead ==0 || ! (cache->status & CACHE_READ_AHEAD) ) {
2249 DEBUG(CACHE_DEBUG_LEVEL,("FAILED Asking read-aheads: Can't read-ahead as no read-ahead on this file: %x\n",
2250 cache->status));
2251 return NT_STATUS_UNSUCCESSFUL;
2254 /* as_read is the mincnt bytes of a request being made or the
2255 out.nread of completed sync requests
2256 Here we presume that as_read bytes WILL be read. If there is a cache-ahead like ours,
2257 then this may often NOT be the case if readahead_window < requestsize; so we will
2258 get a small read, leaving a hole in the cache, and as we don't yet handle sparse caches,
2259 all future read-ahead will be wasted, so we need to adjust the read-ahead handler to handle
2260 this and have failed sparse writes adjust the cache->readahead_extent back to actual size */
2262 /* predict the file pointers next position */
2263 next_position=io->generic.in.offset + as_read;
2265 /* if we know how big the file is, don't read beyond */
2266 if (f->oplock && next_position > f->metadata->info_data.size) {
2267 next_position = f->metadata->info_data.size;
2269 DEBUG(5,("Next position: %lld (%lld + %lld)\n",
2270 (long long int)next_position,
2271 (long long int)io->generic.in.offset,
2272 (long long int)as_read));
2273 /* calculate the limit of the validated or requested cache */
2274 cache_populated=MAX(cache->validated_extent, cache->readahead_extent);
2276 /* will the new read take us beyond the current extent without gaps? */
2277 if (cache_populated < io->generic.in.offset) {
2278 /* this read-ahead is a read-behind-pointer */
2279 new_extent=cache_populated;
2280 } else {
2281 new_extent=MAX(next_position, cache_populated);
2284 /* as far as we can tell new_extent is the smallest offset that doesn't
2285 have a pending read request on. Of course if we got a short read then
2286 we will have a cache-gap which we can't handle and need to read from
2287 a shrunk readahead_extent, which we don't currently handle */
2288 read_position=new_extent;
2290 /* of course if we know how big the remote file is we should limit at that */
2291 /* we should also mark-out which read-ahead requests are pending so that we
2292 * don't repeat them while they are in-transit. */
2293 /* we can't really use next_position until we can have caches with holes
2294 UNLESS next_position < new_extent, because a next_position well before
2295 new_extent is no reason to extend it further, we only want to extended
2296 with read-aheads if we have cause to suppose the read-ahead data will
2297 be wanted, i.e. the next_position is near new_extent.
2298 So we can't justify reading beyond window+next_position, but if
2299 next_position is leaving gaps, we use new_extent instead */
2300 end_position=MIN(new_extent, next_position) + cache->readahead_window;
2301 if (f->oplock) {
2302 end_position=MIN(end_position, f->metadata->info_data.size);
2304 DEBUG(5,("** Read-ahead loop %lld < %lld window=%d, end=%lld, quota: %d\n",
2305 (long long int)read_position,
2306 (long long int)(next_position + cache->readahead_window),
2307 cache->readahead_window,
2308 (long long int)end_position,
2309 private->readahead_spare));
2310 /* do we even need to read? */
2311 if (! (read_position < end_position)) return NT_STATUS_OK;
2313 /* readahead_spare is for the whole session (mid/tid?) and may need sharing
2314 out over files and other tree-connects or something */
2315 while (read_position < end_position &&
2316 private->readahead_spare > 0) {
2317 struct smbcli_request *c_req = NULL;
2318 ssize_t read_remaining = end_position - read_position;
2319 ssize_t read_block = MIN(private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32),
2320 MIN(read_remaining, private->cache_readaheadblock));
2321 void *req = NULL; /* for the ASYNC_REC_TAIL_F_ORPHAN macro */
2322 uint8_t* data;
2323 union smb_read *io_copy=talloc_memdup_type(NULL, io, union smb_read);
2325 if (! io_copy)
2326 return NT_STATUS_NO_MEMORY;
2328 #warning we are ignoring read_for_execute as far as the cache goes
2329 io_copy->generic.in.read_for_execute=io->readx.in.read_for_execute;
2330 io_copy->generic.in.offset=read_position;
2331 io_copy->generic.in.mincnt=read_block;
2332 io_copy->generic.in.maxcnt=read_block;
2333 /* what is generic.in.remaining for? */
2334 io_copy->generic.in.remaining = MIN(65535,read_remaining);
2335 io_copy->generic.out.nread=0;
2337 #warning someone must own io_copy, tree, maybe?
2338 data=talloc_zero_size(io_copy, io_copy->generic.in.maxcnt);
2339 DEBUG(5,("Talloc read-ahead buffer %p size %d\n",data, io_copy->generic.in.maxcnt));
2340 if (! data) {
2341 talloc_free(io_copy);
2342 return NT_STATUS_NO_MEMORY;
2344 io_copy->generic.out.data=data;
2346 /* are we able to pull anything from the cache to validate this read-ahead?
2347 NOTE: there is no point in reading ahead merely to re-validate the
2348 cache if we don't have oplocks and can't save it....
2349 ... or maybe there is if we think a read will come that can be matched
2350 up to this reponse while it is still on the wire */
2351 #warning so we need to distinguish between pipe-line read-ahead and revalidation
2352 if (/*(cache->status & CACHE_READ)!=0 && */
2353 cache_len(cache) >
2354 (io_copy->generic.in.offset + io_copy->generic.in.mincnt) &&
2355 cache->validated_extent <
2356 (io_copy->generic.in.offset + io_copy->generic.in.maxcnt)) {
2357 ssize_t pre_fill;
2359 pre_fill = cache_raw_read(cache, data,
2360 io_copy->generic.in.offset,
2361 io_copy->generic.in.maxcnt);
2362 DEBUG(5,("Data read into %p %d\n",data, pre_fill));
2363 if (pre_fill > 0 && pre_fill >= io_copy->generic.in.mincnt) {
2364 io_copy->generic.out.nread=pre_fill;
2365 read_block=pre_fill;
2369 c_req = proxy_smb_raw_read_send(ntvfs, io_copy, f, NULL);
2371 if (c_req) {
2372 private->readahead_spare--;
2373 f->readahead_pending++;
2374 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead level %d request %p offset=%d size=%d\n",io_copy->generic.level,c_req,(int)read_position,(int)read_block));
2375 if (cache->readahead_extent < read_position+read_block)
2376 cache->readahead_extent=read_position+read_block;
2377 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
2378 /* so we can decrease read-ahead counter for this session */
2379 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_readahead_dec, NT_STATUS_INTERNAL_ERROR);
2380 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io_copy, async_read_handler);
2382 /* Make these be owned by the async struct so they are freed when the callback ends or is cancelled */
2383 talloc_steal(c_req->async.private, c_req);
2384 talloc_steal(c_req->async.private, io_copy);
2385 read_position+=read_block;
2386 } else {
2387 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead request FAILED offset=%d size=%d\n",(int)read_position,(int)read_block));
2388 talloc_free(io_copy);
2389 break;
2393 DEBUG(CACHE_DEBUG_LEVEL,("DONE: Asking read-aheads\n"));
2394 return NT_STATUS_OK;
2397 struct proxy_validate_parts_parts {
2398 struct proxy_Read* r;
2399 struct ntvfs_request *req;
2400 struct proxy_file *f;
2401 struct async_read_fragments *fragments;
2402 off_t offset;
2403 ssize_t remaining;
2404 bool complete;
2405 declare_checksum(digest);
2406 struct MD5Context context;
2409 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts);
2410 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status);
2411 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
2412 struct proxy_validate_parts_parts *parts);
2414 /* this will be the new struct proxy_Read based read function, for now
2415 it just deals with non-cached based validate to a regular server */
2416 static NTSTATUS proxy_validate(struct ntvfs_module_context *ntvfs,
2417 struct ntvfs_request *req,
2418 struct proxy_Read *r,
2419 struct proxy_file *f)
2421 struct proxy_private *private = ntvfs->private_data;
2422 struct proxy_validate_parts_parts *parts;
2423 struct async_read_fragments *fragments;
2424 NTSTATUS status;
2426 if (!f) return NT_STATUS_INVALID_HANDLE;
2428 DEBUG(5,("%s: fnum=%d **** %lld bytes \n\n\n\n",__LOCATION__,f->fnum,(long long int)r->in.maxcnt));
2430 parts = talloc_zero(req, struct proxy_validate_parts_parts);
2431 DEBUG(5,("%s: parts=%p\n",__FUNCTION__,parts));
2432 NT_STATUS_HAVE_NO_MEMORY(parts);
2434 fragments = talloc_zero(parts, struct async_read_fragments);
2435 NT_STATUS_HAVE_NO_MEMORY(fragments);
2437 parts->fragments=fragments;
2439 parts->r=r;
2440 parts->f=f;
2441 parts->req=req;
2442 /* processed offset */
2443 parts->offset=r->in.offset;
2444 parts->remaining=r->in.maxcnt;
2445 fragments->async=true;
2447 MD5Init (&parts->context);
2449 /* start a read-loop which will continue in the callback until it is
2450 all done */
2451 status=proxy_validate_parts(ntvfs, parts);
2452 if (parts->complete) {
2453 /* Make sure we are not async */
2454 DEBUG(5,("%s: completed EARLY\n",__FUNCTION__));
2455 return proxy_validate_complete(parts);
2458 /* Assert if status!=NT_STATUS_OK then parts->complete==true */
2459 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
2460 DEBUG(5,("%s: returning ASYNC\n",__FUNCTION__));
2461 return status;
2464 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts)
2466 NTSTATUS status;
2467 struct proxy_Read* r=parts->r;
2469 DEBUG(5,("%s: %d/%d bytes \n\n\n\n",__LOCATION__,r->out.nread,r->in.maxcnt));
2471 MD5Final(parts->digest, &parts->context);
2473 status = parts->fragments->status;
2474 r->out.result = status;
2475 r->out.response.generic.count=r->out.nread;
2477 DEBUG(5,("%s: %s nread=%d\n",__FUNCTION__, get_friendly_nt_error_msg (status),
2478 r->out.response.generic.count));
2480 DEBUG(5,("Anticipated validated digest for size: %lld\n", (long long) r->in.maxcnt));
2481 dump_data (5, r->in.digest.digest, sizeof(parts->digest));
2482 DEBUG(5,("read digest for size %lld\n",(long long) parts->offset));
2483 dump_data (5, parts->digest, sizeof(parts->digest));
2485 if (NT_STATUS_IS_OK(status) &&
2486 (memcmp(parts->digest, r->in.digest.digest, sizeof(parts->digest))==0)) {
2487 r->out.flags = PROXY_USE_CACHE | PROXY_VALIDATE;
2488 DEBUG(5,("======= VALIDATED FINE \n\n\n"));
2489 } else if (r->in.flags & PROXY_USE_ZLIB) {
2490 ssize_t size = r->out.response.generic.count;
2491 DEBUG(5,("======= VALIDATED WRONG; compress size %d \n\n\n",size));
2492 if (compress_block(r->out.response.generic.data, &size) ) {
2493 r->out.flags|=PROXY_USE_ZLIB;
2494 r->out.response.compress.count=size;
2495 r->out.response.compress.data=r->out.response.generic.data;
2496 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
2497 __LOCATION__,r->out.nread,size,size*100/r->out.nread));
2501 /* assert: this must only be true if we are in a callback */
2502 if (parts->req->async_states->state & NTVFS_ASYNC_STATE_ASYNC) {
2503 /* we are async complete, we need to call the sendfn */
2504 parts->req->async_states->status=status;
2505 DEBUG(5,("Fragments async response sending\n"));
2507 parts->req->async_states->send_fn(parts->req);
2508 return NT_STATUS_OK;
2510 return status;
2513 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2515 struct smbcli_request *c_req = async->c_req;
2516 struct ntvfs_request *req = async->req;
2517 struct proxy_file *f = async->f;
2518 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
2519 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
2520 /* this is the io against which the fragment is to be applied */
2521 struct proxy_validate_parts_parts *parts = talloc_get_type_abort(io1, struct proxy_validate_parts_parts);
2522 struct proxy_Read* r=parts->r;
2523 /* this is the io for the read that issued the callback */
2524 union smb_read *io_frag = fragment->io_frag;
2525 struct async_read_fragments* fragments=fragment->fragments;
2527 /* if request is not already received by a chained handler, read it */
2528 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
2529 DEBUG(5,("%s: status %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
2530 DEBUG(5,("\n\n%s: parts=%p c_req=%p io_frag=%p read %lld\n",__LOCATION__,parts, c_req, io_frag,(long long int)io_frag->generic.out.nread));
2532 fragment->status=status;
2534 if (NT_STATUS_IS_OK(status)) {
2535 /* TODO: If we are not sequentially "next" the queue until we can do it */
2536 /* log this data in r->out.generic.data */
2537 /* Find memcpy window, copy data from the io_frag to the io */
2539 /* extent is the last byte we (don't) read for this frag */
2540 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
2541 /* start_offset is the file offset we first care about */
2542 off_t start_offset=MAX(io_frag->generic.in.offset, r->in.offset);
2543 /* Don't want to go past mincnt cos we don't have the buffer */
2544 off_t io_extent=r->in.offset + r->in.mincnt;
2545 off_t end_offset=MIN(io_extent, extent);
2547 /* ASSERT(start_offset <= end_offset) */
2548 /* ASSERT(start_offset <= io_extent) */
2549 /* Don't copy beyond buffer */
2550 if (! (start_offset >= io_extent)) {
2551 uint8_t* dst=r->out.response.generic.data + (start_offset - r->in.offset);
2552 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
2553 /* src == dst in cases where we did not latch onto someone elses
2554 read, but are handling our own */
2555 if (src != dst)
2556 memcpy(dst, src, end_offset - start_offset);
2557 r->out.nread=end_offset - r->in.offset;
2558 DEBUG(5,("%s: nread %lld ++++++++++++++++++\n", __LOCATION__,(long long int)r->out.nread));
2561 MD5Update(&parts->context, io_frag->generic.out.data,
2562 io_frag->generic.out.nread);
2564 parts->fragments->status=status;
2565 status=proxy_validate_parts(ntvfs, parts);
2566 } else {
2567 parts->fragments->status=status;
2570 DLIST_REMOVE(fragments->fragments, fragment);
2571 /* this will free the io_frag too */
2572 talloc_free(fragment);
2574 if (parts->complete || NT_STATUS_IS_ERR(status)) {
2575 /* this will call sendfn, the chain handler won't know... but
2576 should have no more handlers queued */
2577 return proxy_validate_complete(parts);
2580 return NT_STATUS_OK;
2583 /* continue a read loop, possibly from a callback */
2584 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
2585 struct proxy_validate_parts_parts *parts)
2587 struct proxy_private *private = ntvfs->private_data;
2588 union smb_read *io_frag;
2589 struct async_read_fragment *fragment;
2590 struct smbcli_request *c_req = NULL;
2591 ssize_t size=private->tree->session->transport->negotiate.max_xmit \
2592 - (MIN_SMB_SIZE+32);
2594 /* Have we already read enough? */
2595 if (parts->offset >= (parts->r->in.offset + parts->r->in.maxcnt)) {
2596 parts->complete=true;
2597 return NT_STATUS_OK;
2600 size=MIN(size, parts->remaining);
2602 fragment=talloc_zero(parts->fragments, struct async_read_fragment);
2603 NT_STATUS_HAVE_NO_MEMORY(fragment);
2605 io_frag = talloc_zero(fragment, union smb_read);
2606 NT_STATUS_HAVE_NO_MEMORY(io_frag);
2608 io_frag->generic.out.data = talloc_size(io_frag, size);
2609 NT_STATUS_HAVE_NO_MEMORY(io_frag->generic.out.data);
2611 io_frag->generic.level = RAW_READ_GENERIC;
2612 io_frag->generic.in.file.fnum = parts->r->in.fnum;
2613 io_frag->generic.in.offset = parts->offset;
2614 io_frag->generic.in.mincnt = size;
2615 io_frag->generic.in.maxcnt = size;
2616 io_frag->generic.in.remaining = 0;
2617 #warning maybe true is more permissive?
2618 io_frag->generic.in.read_for_execute = false;
2620 DEBUG(5,("%s: issue part read offset=%lld, size=%lld,%lld\n",__LOCATION__,
2621 (long long int)io_frag->generic.in.offset,
2622 (long long int)io_frag->generic.in.mincnt,
2623 (long long int)io_frag->generic.in.maxcnt));
2625 //c_req = smb_raw_read_send(ntvfs, io_frag, parts->f, parts->r);
2626 c_req = smb_raw_read_send(private->tree, io_frag);
2627 NT_STATUS_HAVE_NO_MEMORY(c_req);
2629 parts->offset+=size;
2630 parts->remaining-=size;
2631 fragment->c_req = c_req;
2632 fragment->io_frag = io_frag;
2633 fragment->fragments=parts->fragments;
2634 DLIST_ADD(parts->fragments->fragments, fragment);
2636 { void* req=NULL;
2637 ADD_ASYNC_RECV_TAIL(c_req, parts, fragment, parts->f, async_proxy_validate_parts, NT_STATUS_INTERNAL_ERROR);
2638 ASYNC_RECV_TAIL_F_ORPHAN(io_frag, async_read_handler, parts->f, c_req->async.private, NT_STATUS_UNSUCCESSFUL);
2641 DEBUG(5,("%s: issued read parts=%p c_req=%p io_frag=%p\n",__LOCATION__,parts, c_req, io_frag));
2643 return NT_STATUS_OK;
2647 read from a file
2649 static NTSTATUS proxy_read(struct ntvfs_module_context *ntvfs,
2650 struct ntvfs_request *req, union smb_read *io)
2652 struct proxy_private *private = ntvfs->private_data;
2653 struct smbcli_request *c_req;
2654 struct proxy_file *f;
2655 struct async_read_fragments *fragments=NULL;
2656 /* how much of read-from-cache is certainly valid */
2657 ssize_t valid=0;
2658 off_t offset=io->generic.in.offset+valid;
2659 off_t limit=io->generic.in.offset+io->generic.in.mincnt;
2661 SETUP_PID;
2663 if (io->generic.level != RAW_READ_GENERIC &&
2664 private->map_generic) {
2665 return ntvfs_map_read(ntvfs, req, io);
2668 SETUP_FILE_HERE(f);
2670 DEBUG(3,("\n%s() fnum=%d offset=%lld, mincnt=%d, maxcnt=%d\n",__FUNCTION__,
2671 io->generic.in.file.fnum,
2672 io->generic.in.offset,
2673 io->generic.in.mincnt,
2674 io->generic.in.maxcnt));
2676 io->generic.out.nread=0;
2678 /* if we have oplocks and know the files size, don't even ask the server
2679 for more */
2680 if (f->oplock) {
2681 if (io->generic.in.offset >= f->metadata->info_data.size) {
2682 io->generic.in.mincnt=0;
2683 io->generic.in.maxcnt=0;
2684 io->generic.out.nread=0;
2685 DEBUG(5,("Reading beyond known length %lld; return 0\n",(long long)f->metadata->info_data.size));
2686 return NT_STATUS_OK;
2687 } else {
2688 io->generic.in.mincnt=MIN(io->generic.in.mincnt,
2689 f->metadata->info_data.size - io->generic.in.offset);
2690 io->generic.in.maxcnt=MIN(io->generic.in.maxcnt,
2691 f->metadata->info_data.size - io->generic.in.offset);
2693 DEBUG(5,("Oplock and known size, limiting read to %lld (s=%d)\n",
2694 f->metadata->info_data.size, io->generic.in.mincnt));
2698 /* attempt to read from cache. if nread becomes non-zero then we
2699 have cache to validate. Instead of returning "valid" value, cache_read
2700 should probably return an async_read_fragment structure */
2702 if (private->cache_enabled) {
2703 NTSTATUS status=cache_smb_raw_read(f->cache, ntvfs, req, io, &valid);
2705 if (NT_STATUS_IS_OK(status)) {
2706 /* if we read enough valid data, return it */
2707 if (valid > 0 && valid>=io->generic.in.mincnt) {
2708 /* valid will not be bigger than maxcnt */
2709 io->generic.out.nread=valid;
2710 DEBUG(1,("Read from cache offset=%d size=%d\n",
2711 (int)(io->generic.in.offset),
2712 (int)(io->generic.out.nread)) );
2713 return status;
2716 DEBUG(5,("Cache read status: %s\n",get_friendly_nt_error_msg (status)));
2719 fragments=talloc_zero(req, struct async_read_fragments);
2720 fragments->async=!!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC);
2721 /* See if there are pending reads that would satisfy this request
2722 We have a validated read up to io->generic.out.nread. Anything between
2723 this and mincnt MUST be read, but we could first try and attach to
2724 any pending read-ahead on the same file.
2725 If those read-aheads fail we will re-issue a regular read from the
2726 callback handler and hope it hasn't taken too long. */
2728 /* offset is the extentof the file from which we still need to find
2729 matching read-requests. */
2730 offset=io->generic.in.offset+valid;
2731 /* limit is the byte beyond the last byte for which we need a request.
2732 This used to be mincnt, but is now maxcnt to cope with validate reads.
2733 Maybe we can switch back to mincnt when proxy_read struct is used
2734 instead of smb_read.
2736 limit=io->generic.in.offset+io->generic.in.maxcnt;
2738 while (offset < limit) {
2739 /* Should look for the read-ahead with offset <= in.offset+out.nread
2740 with the longest span, but there is only likely to be one anyway so
2741 just take the first */
2742 struct async_info* pending=private->pending;
2743 union smb_read *readahead_io=NULL;
2744 DEBUG(5,("Looping reads from offset=%lld, end=%lld\n",offset,limit));
2745 while(pending) {
2746 if (pending->c_req->async.fn == async_read_handler) {
2747 struct async_info *async=talloc_get_type_abort(pending->c_req->async.private, struct async_info);
2748 readahead_io=talloc_get_type_abort(async->parms, union smb_read);
2750 if (readahead_io->generic.in.file.fnum == io->generic.in.file.fnum &&
2751 readahead_io->generic.in.offset <= offset &&
2752 readahead_io->generic.in.offset +
2753 readahead_io->generic.in.mincnt > offset) break;
2755 readahead_io=NULL;
2756 pending=pending->next;
2758 /* ASSERT(readahead_io == pending->c_req->async.params) */
2759 if (pending && readahead_io) {
2760 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
2761 fragment->fragments=fragments;
2762 fragment->io_frag=readahead_io;
2763 fragment->c_req = pending->c_req;
2764 /* we found one, so attach to it. We DO need a talloc_reference
2765 because the original send_fn might be called before ALL chained
2766 handlers, and our handler will call its own send_fn first. ugh.
2767 Maybe we need to seperate reverse-mapping callbacks with data users? */
2768 /* Note: the read-ahead io is passed as io, and our req io is
2769 in io_frag->io */
2770 //talloc_reference(req, pending->req);
2771 DEBUG(5,("Attach to read for offset=%lld length=%d\n",
2772 readahead_io->generic.in.offset,
2773 readahead_io->generic.in.mincnt));
2774 ADD_ASYNC_RECV_TAIL(pending->c_req, io, fragment, f,
2775 async_read_fragment, NT_STATUS_INTERNAL_ERROR);
2776 DEBUG(5,("Attached OK\n"));
2777 #warning we don't want to return if we fail to attach, just break
2778 DLIST_ADD(fragments->fragments, fragment);
2779 /* updated offset for which we have reads */
2780 offset=readahead_io->generic.in.offset + readahead_io->generic.in.mincnt;
2781 } else {
2782 /* there are no pending reads to fill this so issue one up to
2783 the maximum supported read size. We could see when the next
2784 pending read is (if any) and only read up till there... later...
2785 Issue a fragment request for what is left, clone io.
2786 In the case that there were no fragments this will be the orginal read
2787 but with a cloned io struct */
2788 off_t next_offset;
2789 struct proxy_Read *r=NULL; /* used only for VALIDATE promotion */
2790 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
2791 union smb_read *io_frag=talloc_memdup_type(req, io, union smb_read);
2792 ssize_t offset_inc=offset-io_frag->generic.in.offset;
2793 /* 250 is a guess at ndr rpc overheads */
2794 ssize_t readsize=MIN(PROXY_NTIOCTL_MAXDATA,
2795 private->tree->session->transport->negotiate.max_xmit) \
2796 - (MIN_SMB_SIZE+32);
2797 if (readsize > 0xFFFF) readsize = 0xFFFF; /* - (MIN_SMB_SIZE+250) ?? */
2798 readsize=MIN(limit-offset, readsize);
2800 DEBUG(5,("Issuing direct read\n"));
2801 /* reduce the cached read (if any). nread is unsigned */
2802 if (io_frag->generic.out.nread > offset_inc) {
2803 io_frag->generic.out.nread-=offset_inc;
2804 /* don't make nread buffer look too big */
2805 if (io_frag->generic.out.nread > readsize)
2806 io_frag->generic.out.nread = readsize;
2807 } else {
2808 io_frag->generic.out.nread=0;
2810 /* adjust the data pointer so we read to the right place */
2811 io_frag->generic.out.data+=offset_inc;
2812 io_frag->generic.in.offset=offset;
2813 io_frag->generic.in.maxcnt=readsize;
2814 /* we don't mind mincnt being smaller if this is the last frag,
2815 but then we can already handle it being bigger but not reached...
2816 The spell would be:
2817 MIN(io_frag->generic.in.mincnt, io_frag->generic.in.maxcnt);
2819 io_frag->generic.in.mincnt=readsize;
2820 fragment->fragments=fragments;
2821 fragment->io_frag=io_frag;
2822 #warning attach to send_fn handler
2823 /* what if someone attaches to us? Our send_fn is called from our
2824 chained handler which will be before their handler and io will
2825 already be freed. We need to keep a reference to the io and the data
2826 but we don't know where it came from in order to take a reference.
2827 We need therefore to tackle calling of send_fn AFTER all other handlers */
2829 /* Calculate next offset (in advance) */
2830 next_offset=io_frag->generic.in.offset + io_frag->generic.in.mincnt;
2832 /* if we are (going to be) the last fragment and we are in VALIDATE
2833 mode, see if we can do a bulk validate now.
2834 io->generic.in.mincnt == io->generic.in.maxcnt is to make sure we
2835 don't do a validate on a receive validate read
2837 if (private->cache_validatesize && PROXY_REMOTE_SERVER(private) &&
2838 next_offset >= limit && (f->cache && f->cache->status & CACHE_VALIDATE)) {
2839 ssize_t length=private->cache_validatesize;
2840 declare_checksum(digest);
2842 DEBUG(5,("last read, maybe mega validate: frag length %zu, offset %llu\n",
2843 length, (unsigned long long) offset));
2844 NTSTATUS status=cache_smb_raw_checksum(f->cache, offset, &length, digest);
2845 /* no point in doing it if md5'd length < current out.nread
2846 remember: out.data contains this requests cached response
2847 if validate succeeds */
2848 if (NT_STATUS_IS_OK(status) && (length > io_frag->generic.out.nread)) {
2849 /* upgrade the read, allocate the proxy_read struct here
2850 and fill in the extras, no more out-of-band stuff */
2851 DEBUG(5,("%s: Promoting to validate read: %lld\n",__FUNCTION__,(long long) length));
2852 dump_data (5, digest, sizeof(digest));
2854 r=talloc_zero(io_frag, struct proxy_Read);
2855 memcpy(r->in.digest.digest, digest, sizeof(digest));
2856 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE;
2857 io_frag->generic.in.maxcnt = length;
2858 r->in.mincnt=io_frag->generic.in.mincnt;
2859 /* the proxy send function will calculate the checksum based on *data */
2860 } else {
2861 /* try bulk read */
2862 if (f->oplock) {
2863 DEBUG(5,("%s: *** faking bulkd read\n\n",__LOCATION__));
2864 r=talloc_zero(io_frag, struct proxy_Read);
2865 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE | PROXY_USE_ZLIB;
2866 io_frag->generic.in.maxcnt = MIN(f->metadata->info_data.size, private->cache_validatesize);
2867 r->in.mincnt=io_frag->generic.in.maxcnt;
2869 /* not enough in cache to make it worthwhile anymore */
2870 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x len=%lld\n",
2871 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0,
2872 (unsigned long long)length));
2873 //cache_handle_novalidate(f);
2874 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x\n",
2875 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0));
2877 } else {
2878 if (f->cache && f->cache->status & CACHE_VALIDATE) {
2879 DEBUG(5,(">>>Not last frag, no validate read: %lld %lld\n",
2880 (long long) next_offset,
2881 (long long) limit));
2885 DEBUG(5,("Frag read sending offset=%lld min=%d, size=%d\n",
2886 io_frag->generic.in.offset,io_frag->generic.in.mincnt,
2887 io_frag->generic.in.maxcnt));
2888 c_req = proxy_smb_raw_read_send(ntvfs, io_frag, f, r);
2889 DEBUG(5,("Frag read sent offset=%lld size=%d MID=%d\n",
2890 io_frag->generic.in.offset,io_frag->generic.in.maxcnt,c_req->mid));
2891 fragment->c_req=c_req;
2892 DLIST_ADD(fragments->fragments, fragment);
2893 ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
2894 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_read_fragment, NT_STATUS_INTERNAL_ERROR);
2895 DEBUG(5,("Frag response chained\n"));
2896 /* normally we would only install the chain_handler if we wanted async
2897 response, but as it is the async_read_fragment handler that calls send_fn
2898 based on fragments->async, instead of async_chain_handler, we don't
2899 need to worry about this call completing async'ly while we are
2900 waiting on the other attached calls. Otherwise we would not attach
2901 the async_chain_handler (via async_read_handler) because of the wait
2902 below */
2903 { /* We don't want the chain handler calling send_fn as it is done by the fragment handler */
2904 void* req=NULL;
2905 /* call async_chain_hander not read handler so that folk can't
2906 attach to it, till we solve the problem above */
2907 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
2909 offset = next_offset;
2911 DEBUG(5,("Next fragment\n"));
2914 /* do we still need a final fragment? Issue a read */
2916 DEBUG(5,("No frags left to read\n"));
2919 /* issue new round of read-aheads */
2920 DEBUG(5,("== Read aheads asread-%d\n",io->generic.in.mincnt));
2921 if (f->cache && ! (f->cache->status & CACHE_VALIDATE)) read_ahead(f, ntvfs, io, io->generic.in.mincnt);
2922 DEBUG(5,("== Done Read aheads\n"));
2924 /* If we have fragments but we are not called async, we must sync-wait on them */
2925 /* did we map the entire request to pending reads? */
2926 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2927 struct async_read_fragment *fragment;
2928 DEBUG(5,("Sync waiting\n"));
2929 /* fragment get's free'd during the chain_handler so we start at
2930 the top each time */
2931 for (fragment = fragments->fragments; fragment; fragment = fragments->fragments) {
2932 /* Any fragments async handled while we sync-wait on one
2933 will remove themselves from the list and not get sync waited */
2934 sync_chain_handler(fragment->c_req);
2935 /* if we have a non-ok result AND we know we have all the responses
2936 up to extent, then we could quit the loop early and change the
2937 fragments->async to true so the final irrelevant responses would
2938 come async and we could send our response now - but we don't
2939 track that detail until we have cache-maps that we can use to
2940 track the responded fragments and combine responsed linear extents
2941 if (! NT_STATUS_IS_OK(fragments->status) && xxx ) */
2943 DEBUG(5,("Sync return of proxy_read: %s\n",get_friendly_nt_error_msg (fragments->status)));
2944 return fragments->status;
2947 DEBUG(5,("Async returning\n"));
2948 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
2949 return NT_STATUS_OK;
2953 a handler to de-fragment async write replies back to one request.
2954 Can cope with out-of-order async responses by waiting for all responses
2955 on an NT_STATUS_OK case so that nwritten is properly adjusted
2957 NTSTATUS async_write_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2959 struct smbcli_request *c_req = async->c_req;
2960 struct ntvfs_request *req = async->req;
2961 struct proxy_file *f=async->f;
2962 struct async_write_fragment* fragment=talloc_get_type_abort(io2, struct async_write_fragment);
2963 /* this is the io against which the fragment is to be applied */
2964 union smb_write *io = talloc_get_type_abort(io1, union smb_write);
2965 /* this is the io for the write that issued the callback */
2966 union smb_write *io_frag = fragment->io_frag; /* async->parms; */
2967 struct async_write_fragments* fragments=fragment->fragments;
2968 ssize_t extent=0;
2970 /* if request is not already received by a chained handler, read it */
2971 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
2972 if (c_req) status=smb_raw_write_recv(c_req, io_frag);
2974 DEBUG(3,("%s async_write status: %s\n",__FUNCTION__,
2975 get_friendly_nt_error_msg(status)));
2977 fragment->status = status;
2979 DLIST_REMOVE(fragments->fragments, fragment);
2981 /* did this one fail? */
2982 if (! NT_STATUS_IS_OK(fragment->status)) {
2983 if (NT_STATUS_IS_OK(fragments->status)) {
2984 fragments->status=fragment->status;
2986 } else {
2987 /* No fragments have yet failed, keep collecting responses */
2988 extent = io_frag->generic.in.offset + io_frag->generic.out.nwritten;
2990 /* we broke up the write so it could all be written. If only some has
2991 been written of this block, and then some of then next block,
2992 it could leave unwritten holes! We will only acknowledge up to the
2993 first partial write, and let the client deal with it.
2994 If server can return NT_STATUS_OK for a partial write so can we */
2995 if (io_frag->generic.out.nwritten != io_frag->generic.in.count) {
2996 DEBUG(4,("Fragmented write only partially successful\n"));
2998 /* Shrink the master nwritten */
2999 if ( ! fragments->partial ||
3000 (io->generic.in.offset + io->generic.out.nwritten) > extent) {
3001 io->generic.out.nwritten = extent - io->generic.in.offset;
3003 /* stop any further successes from extended the partial write */
3004 fragments->partial=true;
3005 } else {
3006 /* only grow the master nwritten if we haven't logged a partial write */
3007 if (! fragments->partial &&
3008 (io->generic.in.offset + io->generic.out.nwritten) < extent ) {
3009 io->generic.out.nwritten = extent - io->generic.in.offset;
3014 /* if this was the last fragment, clean up */
3015 if (! fragments->fragments) {
3016 DEBUG(5,("Async write re-fragmented with %d of %d\n",
3017 io->generic.out.nwritten,
3018 io->generic.in.count));
3019 if (NT_STATUS_IS_OK(fragments->status)) {
3020 cache_handle_save(f, io->generic.in.data, io->generic.out.nwritten,
3021 io->generic.in.offset);
3022 if (f->metadata->info_data.size < io->generic.in.offset+io->generic.in.count) {
3023 f->metadata->info_data.size=io->generic.in.offset+io->generic.in.count;
3026 if (fragments->async) {
3027 req->async_states->status=fragments->status;
3028 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
3029 req->async_states->send_fn(req);
3030 DEBUG(5,("Async response sent\n"));
3031 } else {
3032 DEBUG(5,("Fragments SYNC return\n"));
3036 return status;
3040 a handler for async write replies
3042 NTSTATUS async_write_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
3044 struct smbcli_request *c_req = async->c_req;
3045 struct ntvfs_request *req = async->req;
3046 struct proxy_file *f=async->f;
3047 union smb_write *io=async->parms;
3049 if (c_req)
3050 status = smb_raw_write_recv(c_req, async->parms);
3052 cache_handle_save(f, io->generic.in.data,
3053 io->generic.out.nwritten,
3054 io->generic.in.offset);
3056 return status;
3060 write to a file
3062 static NTSTATUS proxy_write(struct ntvfs_module_context *ntvfs,
3063 struct ntvfs_request *req, union smb_write *io)
3065 struct proxy_private *private = ntvfs->private_data;
3066 struct smbcli_request *c_req;
3067 struct proxy_file *f;
3069 SETUP_PID;
3071 if (io->generic.level != RAW_WRITE_GENERIC &&
3072 private->map_generic) {
3073 return ntvfs_map_write(ntvfs, req, io);
3075 SETUP_FILE_HERE(f);
3077 DEBUG(5,("proxy_write offset=%lld size=%d\n",io->generic.in.offset, io->generic.in.count));
3078 #warning ERROR get rid of this
3079 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3080 NTSTATUS status;
3081 if (PROXY_REMOTE_SERVER(private)) {
3082 /* Do a proxy write */
3083 status=proxy_smb_raw_write(ntvfs, io, f);
3084 } else if (io->generic.in.count >
3085 private->tree->session->transport->negotiate.max_xmit) {
3087 /* smbcli_write can deal with large writes, which are bigger than
3088 tree->session->transport->negotiate.max_xmit */
3089 ssize_t size=smbcli_write(private->tree,
3090 io->generic.in.file.fnum,
3091 io->generic.in.wmode,
3092 io->generic.in.data,
3093 io->generic.in.offset,
3094 io->generic.in.count);
3096 if (size==io->generic.in.count || size > 0) {
3097 io->generic.out.nwritten=size;
3098 status=NT_STATUS_OK;
3099 } else {
3100 status=NT_STATUS_UNSUCCESSFUL;
3102 } else {
3103 status=smb_raw_write(private->tree, io);
3106 /* Save write in cache */
3107 if (NT_STATUS_IS_OK(status)) {
3108 cache_handle_save(f, io->generic.in.data,
3109 io->generic.out.nwritten,
3110 io->generic.in.offset);
3111 if (f->metadata->info_data.size <
3112 io->generic.in.offset+io->generic.in.count) {
3113 f->metadata->info_data.size=io->generic.in.offset+io->generic.in.count;
3117 return status;
3120 /* smb_raw_write_send can't deal with large writes, which are bigger than
3121 tree->session->transport->negotiate.max_xmit so we have to break it up
3122 trying to preserve the async nature of the call as much as possible */
3123 if (PROXY_REMOTE_SERVER(private)) {
3124 DEBUG(5,("== %s call proxy_smb_raw_write_send\n",__FUNCTION__));
3125 c_req = proxy_smb_raw_write_send(ntvfs, io, f);
3126 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3127 } else if (io->generic.in.count <=
3128 private->tree->session->transport->negotiate.max_xmit) {
3129 DEBUG(5,("== %s call smb_raw_write_send\n",__FUNCTION__));
3130 c_req = smb_raw_write_send(private->tree, io);
3131 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3132 } else {
3133 ssize_t remaining = io->generic.in.count;
3134 #warning Need an audit of these magin numbers MIN_SMB_SIZE+32
3135 int block = (private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32));
3136 int done = 0;
3137 struct async_write_fragments *fragments = talloc_zero(req, struct async_write_fragments);
3139 DEBUG(3,("== %s Client sending too-big write sized %d, negotiated limit %d\n",
3140 __FUNCTION__, io->generic.in.count,
3141 private->tree->session->transport->negotiate.max_xmit));
3143 fragments->io = io;
3144 io->generic.out.nwritten=0;
3145 io->generic.out.remaining=0;
3147 do {
3148 union smb_write *io_frag = talloc_zero(fragments, union smb_write);
3149 struct async_write_fragment *fragment = talloc_zero(fragments, struct async_write_fragment);
3150 ssize_t size = MIN(block, remaining);
3152 fragment->fragments = fragments;
3153 fragment->io_frag = io_frag;
3155 io_frag->generic.level = io->generic.level;
3156 io_frag->generic.in.file.fnum = io->generic.in.file.fnum;
3157 io_frag->generic.in.wmode = io->generic.in.wmode;
3158 io_frag->generic.in.count = size;
3159 io_frag->generic.in.offset = io->generic.in.offset + done;
3160 io_frag->generic.in.data = io->generic.in.data + done;
3162 c_req = proxy_smb_raw_write_send(ntvfs, io_frag, f);
3163 if (! c_req) {
3164 /* let pending requests clean-up when ready */
3165 fragments->status=NT_STATUS_UNSUCCESSFUL;
3166 talloc_steal(NULL, fragments);
3167 DEBUG(3,("Can't send request fragment\n"));
3168 return NT_STATUS_UNSUCCESSFUL;
3171 DEBUG(5,("Frag write sent offset=%lld size=%d MID=%d\n",
3172 io_frag->generic.in.offset,io_frag->generic.in.count,c_req->mid));
3173 fragment->c_req=c_req;
3174 DLIST_ADD(fragments->fragments, fragment);
3176 // ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3177 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_write_fragment, NT_STATUS_INTERNAL_ERROR);
3178 DEBUG(5,("Frag response chained\n"));
3180 remaining -= size;
3181 done += size;
3182 } while(remaining > 0);
3184 /* this strategy has the callback chain attached to each c_req, so we
3185 don't use the ASYNC_RECV_TAIL* to install a general one */
3188 ASYNC_RECV_TAIL_HANDLER(io, async_chain_handler);
3192 a handler for async seek replies
3194 static void async_seek(struct smbcli_request *c_req)
3196 struct async_info *async = c_req->async.private;
3197 struct ntvfs_request *req = async->req;
3198 req->async_states->status = smb_raw_seek_recv(c_req, async->parms);
3199 talloc_free(async);
3200 req->async_states->send_fn(req);
3204 seek in a file
3206 static NTSTATUS proxy_seek(struct ntvfs_module_context *ntvfs,
3207 struct ntvfs_request *req,
3208 union smb_seek *io)
3210 struct proxy_private *private = ntvfs->private_data;
3211 struct smbcli_request *c_req;
3213 SETUP_PID_AND_FILE;
3215 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3216 return smb_raw_seek(private->tree, io);
3219 c_req = smb_raw_seek_send(private->tree, io);
3221 ASYNC_RECV_TAIL(io, async_seek);
3225 flush a file
3227 static NTSTATUS proxy_flush(struct ntvfs_module_context *ntvfs,
3228 struct ntvfs_request *req,
3229 union smb_flush *io)
3231 struct proxy_private *private = ntvfs->private_data;
3232 struct smbcli_request *c_req;
3234 SETUP_PID;
3235 switch (io->generic.level) {
3236 case RAW_FLUSH_FLUSH:
3237 SETUP_FILE;
3238 break;
3239 case RAW_FLUSH_ALL:
3240 io->generic.in.file.fnum = 0xFFFF;
3241 break;
3242 case RAW_FLUSH_SMB2:
3243 return NT_STATUS_INVALID_LEVEL;
3246 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3247 return smb_raw_flush(private->tree, io);
3250 c_req = smb_raw_flush_send(private->tree, io);
3252 SIMPLE_ASYNC_TAIL;
3256 close a file
3258 static NTSTATUS proxy_close(struct ntvfs_module_context *ntvfs,
3259 struct ntvfs_request *req, union smb_close *io)
3261 struct proxy_private *private = ntvfs->private_data;
3262 struct smbcli_request *c_req;
3263 struct proxy_file *f;
3264 union smb_close io2;
3266 SETUP_PID;
3268 if (io->generic.level != RAW_CLOSE_GENERIC &&
3269 private->map_generic) {
3270 return ntvfs_map_close(ntvfs, req, io);
3272 SETUP_FILE_HERE(f);
3273 /* Note, we aren't free-ing f, or it's h here. Should we?
3274 even if file-close fails, we'll remove it from the list,
3275 what else would we do? Maybe we should not remove until
3276 after the proxied call completes? */
3277 DLIST_REMOVE(private->files, f);
3279 /* Don't send the close on cloned handles unless we are the last one */
3280 if (f->metadata && --(f->metadata->count)) {
3281 DEBUG(5,("%s: Fake close of %d, %d left\n",__FUNCTION__,f->fnum, f->metadata->count));
3282 return NT_STATUS_OK;
3284 DEBUG(5,("%s: Real close of %d\n",__FUNCTION__, f->fnum));
3285 /* only close the cache if we aren't keeping references */
3286 //cache_close(f->cache);
3288 /* possibly samba can't do RAW_CLOSE_SEND yet */
3289 if (! (c_req = smb_raw_close_send(private->tree, io))) {
3290 if (io->generic.level == RAW_CLOSE_GENERIC) {
3291 ZERO_STRUCT(io2);
3292 io2.close.level = RAW_CLOSE_CLOSE;
3293 io2.close.in.file = io->generic.in.file;
3294 io2.close.in.write_time = io->generic.in.write_time;
3295 io = &io2;
3297 c_req = smb_raw_close_send(private->tree, io);
3298 /* destroy handle */
3299 ntvfs_handle_remove_backend_data(f->h, ntvfs);
3302 /* If it is read-only, don't bother waiting for the result */
3303 if (f->can_clone) {
3304 DEBUG(5,("%s: not waiting for close response fnum=%d\n",__FUNCTION__,f->fnum));
3305 return NT_STATUS_OK;
3308 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3309 return smbcli_request_simple_recv(c_req);
3312 SIMPLE_ASYNC_TAIL;
3316 exit - closing files open by the pid
3318 static NTSTATUS proxy_exit(struct ntvfs_module_context *ntvfs,
3319 struct ntvfs_request *req)
3321 struct proxy_private *private = ntvfs->private_data;
3322 struct smbcli_request *c_req;
3324 SETUP_PID;
3326 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3327 return smb_raw_exit(private->tree->session);
3330 c_req = smb_raw_exit_send(private->tree->session);
3332 SIMPLE_ASYNC_TAIL;
3336 logoff - closing files open by the user
3338 static NTSTATUS proxy_logoff(struct ntvfs_module_context *ntvfs,
3339 struct ntvfs_request *req)
3341 /* we can't do this right in the proxy backend .... */
3342 return NT_STATUS_OK;
3346 setup for an async call - nothing to do yet
3348 static NTSTATUS proxy_async_setup(struct ntvfs_module_context *ntvfs,
3349 struct ntvfs_request *req,
3350 void *private)
3352 return NT_STATUS_OK;
3356 cancel an async call
3358 static NTSTATUS proxy_cancel(struct ntvfs_module_context *ntvfs,
3359 struct ntvfs_request *req)
3361 struct proxy_private *private = ntvfs->private_data;
3362 struct async_info *a;
3364 /* find the matching request */
3365 for (a=private->pending;a;a=a->next) {
3366 if (a->req == req) {
3367 break;
3371 if (a == NULL) {
3372 return NT_STATUS_INVALID_PARAMETER;
3375 return smb_raw_ntcancel(a->c_req);
3379 lock a byte range
3381 static NTSTATUS proxy_lock(struct ntvfs_module_context *ntvfs,
3382 struct ntvfs_request *req, union smb_lock *io)
3384 struct proxy_private *private = ntvfs->private_data;
3385 struct smbcli_request *c_req;
3387 SETUP_PID;
3389 if (io->generic.level != RAW_LOCK_GENERIC &&
3390 private->map_generic) {
3391 return ntvfs_map_lock(ntvfs, req, io);
3393 SETUP_FILE;
3395 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3396 return smb_raw_lock(private->tree, io);
3399 c_req = smb_raw_lock_send(private->tree, io);
3400 SIMPLE_ASYNC_TAIL;
3404 set info on a open file
3406 static NTSTATUS proxy_setfileinfo(struct ntvfs_module_context *ntvfs,
3407 struct ntvfs_request *req,
3408 union smb_setfileinfo *io)
3410 struct proxy_private *private = ntvfs->private_data;
3411 struct smbcli_request *c_req;
3413 SETUP_PID_AND_FILE;
3415 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3416 return smb_raw_setfileinfo(private->tree, io);
3418 c_req = smb_raw_setfileinfo_send(private->tree, io);
3420 SIMPLE_ASYNC_TAIL;
3425 a handler for async fsinfo replies
3427 static void async_fsinfo(struct smbcli_request *c_req)
3429 struct async_info *async = c_req->async.private;
3430 struct ntvfs_request *req = async->req;
3431 union smb_fsinfo *fs = async->parms;
3432 struct proxy_private *private = async->proxy;
3434 req->async_states->status = smb_raw_fsinfo_recv(c_req, req, fs);
3436 if (NT_STATUS_IS_OK(req->async_states->status) && (fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3437 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO)) {
3438 if (! private->fs_attribute_info && (private->fs_attribute_info=talloc_zero(private, struct fs_attribute_info))) {
3439 DEBUG(5,("%s: caching fs_attribute_info\n",__LOCATION__));
3440 private->fs_attribute_info->fs_attr=fs->attribute_info.out.fs_attr;
3441 private->fs_attribute_info->max_file_component_length=fs->attribute_info.out.max_file_component_length;
3442 private->fs_attribute_info->fs_type=talloc_smb_wire_string_dup(private, &(fs->attribute_info.out.fs_type));
3446 talloc_free(async);
3447 req->async_states->send_fn(req);
3451 return filesystem space info
3453 static NTSTATUS proxy_fsinfo(struct ntvfs_module_context *ntvfs,
3454 struct ntvfs_request *req, union smb_fsinfo *fs)
3456 struct proxy_private *private = ntvfs->private_data;
3457 struct smbcli_request *c_req;
3459 SETUP_PID;
3461 DEBUG(5,("%s: level %x\n",__LOCATION__,fs->generic.level));
3462 /* this value is easy to cache */
3463 if ((fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3464 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO) &&
3465 private->fs_attribute_info) {
3466 DEBUG(5,("%s: using cached fsinfo\n",__LOCATION__));
3467 fs->attribute_info.out.fs_attr=private->fs_attribute_info->fs_attr;
3468 fs->attribute_info.out.max_file_component_length=private->fs_attribute_info->max_file_component_length;
3469 fs->attribute_info.out.fs_type=talloc_smb_wire_string_dup(req, &(private->fs_attribute_info->fs_type));
3470 return NT_STATUS_OK;
3473 /* QFS Proxy */
3474 if (fs->generic.level == RAW_QFS_PROXY_INFO) {
3475 fs->proxy_info.out.major_version=1;
3476 fs->proxy_info.out.minor_version=0;
3477 fs->proxy_info.out.capability=0;
3478 return NT_STATUS_OK;
3481 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3482 NTSTATUS status = smb_raw_fsinfo(private->tree, req, fs);
3483 if (NT_STATUS_IS_OK(status) && (fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3484 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO)) {
3485 if (! private->fs_attribute_info && (private->fs_attribute_info=talloc_zero(private, struct fs_attribute_info))) {
3486 DEBUG(5,("%s: caching fs_attribute_info\n",__LOCATION__));
3487 private->fs_attribute_info->fs_attr=fs->attribute_info.out.fs_attr;
3488 private->fs_attribute_info->max_file_component_length=fs->attribute_info.out.max_file_component_length;
3489 private->fs_attribute_info->fs_type=talloc_smb_wire_string_dup(private, &(fs->attribute_info.out.fs_type));
3492 return status;
3494 c_req = smb_raw_fsinfo_send(private->tree, req, fs);
3496 ASYNC_RECV_TAIL(fs, async_fsinfo);
3500 return print queue info
3502 static NTSTATUS proxy_lpq(struct ntvfs_module_context *ntvfs,
3503 struct ntvfs_request *req, union smb_lpq *lpq)
3505 return NT_STATUS_NOT_SUPPORTED;
3509 find_first / find_next caching.
3510 For now, cache based on directory,search_attributes,search_pattern,ea stuff
3511 Consider in response:
3512 * search id
3513 * search count
3514 * end of search
3515 * ea stuff
3518 static union smb_search_data *smb_search_data_dup(void* mem_ctx, const union smb_search_data *file, enum smb_search_data_level data_level) {
3519 union smb_search_data *result;
3520 struct smb_wire_string *name;
3522 result=talloc_zero(mem_ctx, union smb_search_data);
3523 if (! result) {
3524 return result;
3527 *result = *file;
3529 switch(data_level) {
3530 case RAW_SEARCH_DATA_SEARCH:
3531 if (! (result->search.name=talloc_strdup(mem_ctx, file->search.name))) goto error;
3532 break;
3533 case RAW_SEARCH_DATA_STANDARD:
3534 if (sws_dup(result, result->standard.name, file->standard.name)) goto error;
3535 break;
3536 case RAW_SEARCH_DATA_EA_SIZE:
3537 if (sws_dup(result, result->ea_size.name, file->ea_size.name)) goto error;
3538 break;
3539 case RAW_SEARCH_DATA_EA_LIST:
3540 if (sws_dup(result, result->ea_list.name, file->ea_list.name)) goto error;
3541 break;
3542 case RAW_SEARCH_DATA_DIRECTORY_INFO:
3543 if (sws_dup(result, result->directory_info.name, file->directory_info.name)) goto error;
3544 break;
3545 case RAW_SEARCH_DATA_FULL_DIRECTORY_INFO:
3546 if (sws_dup(result, result->full_directory_info.name, file->full_directory_info.name)) goto error;
3547 break;
3548 case RAW_SEARCH_DATA_NAME_INFO:
3549 if (sws_dup(result, result->name_info.name, file->name_info.name)) goto error;
3550 break;
3551 case RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO:
3552 if (sws_dup(result, result->both_directory_info.name, file->both_directory_info.name)) goto error;
3553 if (sws_dup(result, result->both_directory_info.short_name, file->both_directory_info.short_name)) goto error;
3554 break;
3555 case RAW_SEARCH_DATA_ID_FULL_DIRECTORY_INFO:
3556 if (sws_dup(result, result->id_full_directory_info.name, file->id_full_directory_info.name)) goto error;
3557 break;
3558 case RAW_SEARCH_DATA_ID_BOTH_DIRECTORY_INFO:
3559 if (sws_dup(result, result->id_both_directory_info.name, file->id_both_directory_info.name)) goto error;
3560 if (sws_dup(result, result->id_both_directory_info.short_name, file->id_both_directory_info.short_name)) goto error;
3561 break;
3562 case RAW_SEARCH_DATA_UNIX_INFO:
3563 if (! (result->unix_info.name=talloc_strdup(mem_ctx, file->unix_info.name))) goto error;
3564 break;
3565 case RAW_SEARCH_DATA_UNIX_INFO2:
3566 if (sws_dup(result, result->unix_info2.name, file->unix_info2.name)) goto error;
3567 break;
3568 default:
3569 DEBUG(5,("%s: Error can't dup an unknown file data type: %x\n", __LOCATION__, data_level));
3570 goto error;
3572 return result;
3573 error:
3574 talloc_free(result);
3575 return NULL;
3578 /* callback function for search first/next */
3579 static bool find_callback(void *private, const union smb_search_data *file)
3581 struct search_state *state = (struct search_state *)private;
3582 struct search_handle *search_handle = state->search_handle;
3583 bool status;
3585 /* if we have a cache, copy this data */
3586 if (search_handle->cache) {
3587 struct search_cache_item *item = talloc_zero(search_handle->cache, struct search_cache_item);
3588 DEBUG(5,("%s: Copy %p to cache %p\n", __LOCATION__, item, search_handle->cache));
3589 if (item) {
3590 item->data_level=search_handle->data_level;
3591 item->file = smb_search_data_dup(item, file, item->data_level);
3592 if (! item->file) {
3593 talloc_free(item);
3594 item=NULL;
3597 if (item) {
3598 /* optimization to save enumerating the entire list each time, to find the end.
3599 the cached last_item is very short lived, it doesn't matter if something has
3600 been added since, as long as it hasn't been removed */
3601 if (state->last_item) {
3602 DLIST_ADD_END(state->last_item, item, struct search_cache_item*);
3603 } else {
3604 DLIST_ADD_END(search_handle->cache->items, item, struct search_cache_item*);
3606 state->last_item=item;
3607 } else {
3608 DEBUG(5,("%s: Could not add name to search cache %p, invalidating cache\n", __LOCATION__, search_handle->cache));
3609 /* dear me, the whole cache will be invalid if we miss data */
3610 search_handle->cache->status=SEARCH_CACHE_DEAD;
3611 /* remove from the list of caches to use */
3612 DLIST_REMOVE(search_handle->cache->proxy->search_caches, search_handle->cache);
3613 /* Make it feel unwanted */
3614 //if (talloc_unlink(search_handle, search_handle->cache)==0) {
3615 //talloc_free(search_handle->cache);
3617 /* stop us using it for this search too */
3618 search_handle->cache=NULL;
3622 status=state->callback(state->private, file);
3623 if (status) {
3624 state->count++;
3626 return status;
3630 list files in a directory matching a wildcard pattern
3632 static NTSTATUS proxy_search_first(struct ntvfs_module_context *ntvfs,
3633 struct ntvfs_request *req, union smb_search_first *io,
3634 void *search_private,
3635 bool (*callback)(void *, const union smb_search_data *))
3637 struct proxy_private *private = ntvfs->private_data;
3638 struct search_state *state;
3639 struct search_cache *search_cache=NULL;
3640 struct search_cache_key search_cache_key={0};
3641 struct ntvfs_handle *h=NULL;
3642 struct search_handle *s;
3643 uint16_t max_count;
3644 NTSTATUS status;
3646 SETUP_PID;
3648 if (! private->enabled_proxy_search) {
3649 return smb_raw_search_first(private->tree, req, io, search_private, callback);
3651 switch (io->generic.level) {
3652 /* case RAW_SEARCH_DATA_SEARCH:
3653 search_cache_key.search_attrib=io->search_first.in.search_attrib;
3654 search_cache_key.pattern=io->search_first.in.pattern;
3655 max_count = io->search_first.in.max_count;
3656 search_cache = find_search_cache(private->search_cache, &search_cache_key);
3657 break;*/
3658 case RAW_SEARCH_TRANS2:
3659 io->t2ffirst.in.max_count=MIN(io->t2ffirst.in.max_count,100);
3660 max_count = io->t2ffirst.in.max_count;
3662 search_cache_key.level=io->generic.level;
3663 search_cache_key.data_level=io->generic.data_level;
3664 search_cache_key.search_attrib=io->t2ffirst.in.search_attrib;
3665 search_cache_key.pattern=io->t2ffirst.in.pattern;
3666 search_cache_key.flags=io->t2ffirst.in.flags;
3667 search_cache_key.storage_type=io->t2ffirst.in.storage_type;
3668 /* try and find a search cache that is complete */
3669 search_cache = find_search_cache(private->search_caches, &search_cache_key);
3671 /* do handle mapping for TRANS2 */
3672 status = ntvfs_handle_new(ntvfs, req, &h);
3673 NT_STATUS_NOT_OK_RETURN(status);
3675 DEBUG(5,("%s: RAW_SEARCH_TRANS2 %s max count %d, cache=%p level=%x\n",__LOCATION__, search_cache_key.pattern, max_count, search_cache, search_cache_key.data_level));
3676 break;
3677 default: /* won't cache or proxy this */
3678 return smb_raw_search_first(private->tree, req, io, search_private, callback);
3681 /* finish setting up mapped handle */
3682 if (h) {
3683 s = talloc_zero(h, struct search_handle);
3684 NT_STATUS_HAVE_NO_MEMORY(s);
3685 s->proxy=private;
3686 talloc_set_destructor(s, search_handle_destructor);
3687 s->h=h;
3688 s->level=io->generic.level;
3689 s->data_level=io->generic.data_level;
3690 status = ntvfs_handle_set_backend_data(s->h, private->ntvfs, s);
3691 NT_STATUS_NOT_OK_RETURN(status);
3692 DLIST_ADD(private->search_handles, s);
3693 DEBUG(5,("%s: map handle create %d\n",__LOCATION__, smbsrv_fnum(h)));
3696 /* satisfy from cache */
3697 if (search_cache) {
3698 struct search_cache_item* item=search_cache->items;
3699 uint16_t count=0;
3701 /* stop cache going away while we are using it */
3702 s->cache = talloc_reference(s, search_cache);
3703 DEBUG(5,("%s: Serving from cache: %p\n",__LOCATION__, search_cache));
3704 /* Don't offer over the limit, but only count those that were accepted */
3705 DLIST_FIND(search_cache->items, item, !(count < max_count && callback(search_private, item->file) && ++count) );
3706 io->t2ffirst.out.count=count;
3707 s->resume_item=item;
3708 /* just because callback didn't accept any doesn't mean we are finished */
3709 if (item == NULL) {
3710 /* currently only caching for t2ffirst */
3711 io->t2ffirst.out.end_of_search = true;
3712 DEBUG(5,("%s: Serving from cache complete at %d\n", __LOCATION__, count));
3713 } else {
3714 /* count the rest */
3715 io->t2ffirst.out.end_of_search = false;
3716 DEBUG(5,("%s: Serving from cache incomplete at %d\n", __LOCATION__, count));
3717 DLIST_FOR_EACH(item, item, count++);
3718 DEBUG(5,("%s: Serving from cache max_count %d\n", __LOCATION__, count));
3721 if ((io->t2ffirst.out.end_of_search && io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
3722 io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE)
3724 /* destroy handle */
3725 ntvfs_handle_remove_backend_data(h, ntvfs);
3726 io->t2ffirst.out.handle=0;
3727 } else {
3728 /* now map handle */
3729 io->t2ffirst.out.handle=smbsrv_fnum(h);
3731 return NT_STATUS_OK;
3734 state = talloc_zero(req, struct search_state);
3735 NT_STATUS_HAVE_NO_MEMORY(state);
3737 /* if there isn't a matching cache already being generated by another search,
3738 start one, unless FLAG_TRANS2_FIND_BACKUP_INTENT which is always live */
3739 if (!(io->t2ffirst.in.flags & FLAG_TRANS2_FIND_BACKUP_INTENT) &&
3740 find_partial_search_cache(private->search_caches, &search_cache_key) == NULL) {
3741 /* need to opendir the folder being searched so we can get a notification */
3742 struct search_cache *search_cache=NULL;
3744 search_cache=new_search_cache(private, &search_cache_key);
3745 /* Stop cache going away while we are using it */
3746 if (search_cache) {
3747 s->cache=talloc_reference(s, search_cache);
3751 /* stop the handle going away while we are using it */
3752 state->search_handle=talloc_reference(state, s);
3753 state->private=search_private;
3754 state->callback=callback;
3756 status=smb_raw_search_first(private->tree, req, io, state, find_callback);
3757 DEBUG(5,("%s: count from %d to %d\n",__LOCATION__,io->t2ffirst.out.count,state->count));
3759 DEBUG(5,("%s: Done %d %s\n",__LOCATION__, io->t2ffirst.out.count, get_friendly_nt_error_msg (status)));
3761 #warning check NT_STATUS_IS_OK ?
3762 if (io->t2ffirst.out.end_of_search) {
3763 /* cache might have gone away if problem filling */
3764 if (s->cache) {
3765 DEBUG(5,("B\n"));
3766 s->cache->status = SEARCH_CACHE_COMPLETE;
3767 DEBUG(5,("%s: Cache %p filled in first go!\n",__LOCATION__, s->cache));
3770 if ((io->t2ffirst.out.end_of_search && io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
3771 io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE) {
3772 DEBUG(5,("%s: Closing search\n",__LOCATION__));
3773 /* destroy partial cache */
3774 if (s->cache && (io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE) &&
3775 ! io->t2ffirst.out.end_of_search) {
3776 DEBUG(5,("%s: Destroying cache %p\n",__LOCATION__, s->cache));
3777 /* cache is no good now! */
3778 DLIST_REMOVE(private->search_caches, s->cache);
3779 //if (talloc_unlink(s, s->cache)==0) {
3780 //talloc_free(s->cache);
3782 s->cache=NULL;
3784 if (s->cache) {
3785 s->cache->status=SEARCH_CACHE_COMPLETE;
3787 /* Need to deal with the case when the client would not take them all but we still cache them
3788 if (state->count < io->t2ffirst.out.count && io->t2ffirst.out.end_of_search) {
3789 io->t2ffirst.out.end_of_search = false;
3790 //s->resume_item = state->last_item;
3792 /* destroy handle */
3793 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
3794 ntvfs_handle_remove_backend_data(h, ntvfs);
3795 io->t2ffirst.out.handle=0;
3796 } else {
3797 s->handle = io->t2ffirst.out.handle;
3798 io->t2ffirst.out.handle=smbsrv_fnum(h);
3800 io->t2ffirst.out.count=state->count;
3801 return status;
3804 #define DLIST_FIND_NEXT(start, item, test) do {\
3805 DLIST_FIND(start, item, test); \
3806 if (item) (item)=(item)->next; \
3807 } while(0)
3809 /* continue a search */
3810 static NTSTATUS proxy_search_next(struct ntvfs_module_context *ntvfs,
3811 struct ntvfs_request *req, union smb_search_next *io,
3812 void *search_private,
3813 bool (*callback)(void *, const union smb_search_data *))
3815 struct proxy_private *private = ntvfs->private_data;
3816 struct search_state *state;
3817 struct ntvfs_handle *h=NULL;
3818 struct search_handle *s;
3819 const struct search_cache *search_cache=NULL;
3820 struct search_cache_item *start_at=NULL;
3821 uint16_t max_count;
3822 NTSTATUS status;
3824 SETUP_PID;
3826 if (! private->enabled_proxy_search) {
3827 return smb_raw_search_next(private->tree, req, io, search_private, callback);
3829 switch (io->generic.level) {
3830 case RAW_SEARCH_TRANS2:
3831 io->t2fnext.in.max_count=MIN(io->t2fnext.in.max_count,20);
3832 max_count = io->t2fnext.in.max_count;
3834 h = talloc_get_type(ntvfs_find_handle(ntvfs, req, io->t2fnext.in.handle), struct ntvfs_handle);
3835 if (! h) return NT_STATUS_INVALID_HANDLE;
3836 /* convert handle into search_cache */
3837 s=talloc_get_type(ntvfs_handle_get_backend_data(h, ntvfs), struct search_handle);
3838 if (! s) return NT_STATUS_INVALID_HANDLE;
3839 search_cache=s->cache;
3840 DEBUG(5,("%s: RAW_SEARCH_TRANS2 find_next h=%d [real %d] count %d, cache=%p\n",__LOCATION__, io->t2fnext.in.handle, s->handle, max_count, search_cache));
3841 io->t2fnext.in.handle=s->handle;
3842 if (! search_cache) {
3843 break;
3846 /* warning if: uint16_t flags or const char *last_name have changed, abort cache */
3847 /* skip up to resume key */
3848 if (search_cache && search_cache->status == SEARCH_CACHE_COMPLETE) {
3849 DEBUG(5,("%s: seek resume position\n",__LOCATION__));
3850 /* work out where in the cache to continue from */
3851 switch (io->generic.data_level) {
3852 case RAW_SEARCH_DATA_STANDARD:
3853 case RAW_SEARCH_DATA_EA_SIZE:
3854 case RAW_SEARCH_DATA_EA_LIST:
3855 /* have a resume key? */
3856 DEBUG(5,("%s: type %x seek on %x\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.resume_key));
3857 DLIST_FIND_NEXT(search_cache->items, start_at, io->t2fnext.in.resume_key == start_at->file->standard.resume_key);
3858 break;
3859 case RAW_SEARCH_DATA_DIRECTORY_INFO: /* TODO: maybe these should be strcasecmp for some filesystems */
3860 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3861 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->directory_info.name.s)==0);
3862 break;
3863 case RAW_SEARCH_DATA_FULL_DIRECTORY_INFO:
3864 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3865 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->full_directory_info.name.s)==0);
3866 break;
3867 case RAW_SEARCH_DATA_NAME_INFO:
3868 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3869 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->name_info.name.s)==0);
3870 break;
3871 case RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO:
3872 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3873 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->both_directory_info.name.s)==0);
3874 break;
3875 case RAW_SEARCH_DATA_ID_FULL_DIRECTORY_INFO:
3876 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3877 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->id_full_directory_info.name.s)==0);
3878 break;
3879 case RAW_SEARCH_DATA_ID_BOTH_DIRECTORY_INFO:
3880 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3881 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->id_both_directory_info.name.s)==0);
3882 break;
3883 case RAW_SEARCH_DATA_UNIX_INFO:
3884 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3885 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->unix_info.name)==0);
3886 break;
3887 case RAW_SEARCH_DATA_UNIX_INFO2:
3888 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
3889 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->unix_info2.name.s)==0);
3890 break;
3891 default:
3892 if (io->t2fnext.in.flags & FLAG_TRANS2_FIND_CONTINUE) {
3893 start_at = s->resume_item;
3894 } else {
3895 DEBUG(5,("%s: HELP! How can we resume?\n",__LOCATION__));
3896 start_at = s->resume_item;
3899 DEBUG(5,("%s: Start at %p\n",__LOCATION__,start_at));
3901 break;
3904 if (! search_cache) {
3905 DEBUG(5,("%s: No cache, pass-through\n",__LOCATION__));
3906 return smb_raw_search_next(private->tree, req, io, search_private, callback);
3908 //#define talloc_reference(ctx, ptr) (_TALLOC_TYPEOF(ptr))_talloc_reference((ctx),(ptr))
3909 //surely should be
3910 //#define talloc_reference(ctx, ptr) _talloc_reference((ctx),(ptr))?(ptr):(NULL) to preserve the type of ptr
3912 /* satisfy from cache */
3913 if (search_cache->status == SEARCH_CACHE_COMPLETE) {
3914 struct search_cache_item* item;
3915 uint16_t count=0;
3916 DEBUG(5,("%s: Serving from cache: %p\n",__LOCATION__, search_cache));
3918 if (! start_at) {
3919 start_at = search_cache->items;
3922 DLIST_FIND(start_at, item, !(count < max_count && callback(search_private, item->file) && ++count) );
3923 io->t2fnext.out.count=count;
3924 s->resume_item=item;
3925 if (item == NULL) {
3926 DEBUG(5,("%s: Serving from cache complete at %d\n", __LOCATION__, count));
3927 io->t2fnext.out.end_of_search = true;
3928 } else {
3929 DEBUG(5,("%s: Serving from cache incomplete at %d\n", __LOCATION__, count));
3930 io->t2fnext.out.end_of_search = false;
3931 /* count the rest */
3932 DLIST_FOR_EACH(item, item, count++);
3933 DEBUG(5,("%s: Serving from cache max_count %d\n", __LOCATION__, count));
3935 /* is it the end? */
3936 if ((io->t2fnext.out.end_of_search && io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
3937 io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE)
3939 /* destroy handle */
3940 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
3941 ntvfs_handle_remove_backend_data(h, ntvfs);
3944 return NT_STATUS_OK;
3947 /* pass-through and fill-cache */
3948 state = talloc_zero(req, struct search_state);
3949 NT_STATUS_HAVE_NO_MEMORY(state);
3951 state->search_handle=talloc_reference(state, s);
3952 state->private=search_private;
3953 state->callback=callback;
3955 status = smb_raw_search_next(private->tree, req, io, state, find_callback);
3956 DEBUG(5,("%s: count from %d to %d\n",__LOCATION__,io->t2fnext.out.count,state->count));
3958 /* if closing, then close */
3959 if ((io->t2fnext.out.end_of_search && io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
3960 io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE)
3962 if (s->cache && (io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE) &&
3963 ! io->t2fnext.out.end_of_search) {
3964 /* partial cache is useless */
3965 DLIST_REMOVE(private->search_caches, s->cache);
3966 //if (talloc_unlink(s, s->cache)==0) {
3967 //talloc_free(s->cache);
3969 s->cache=NULL;
3971 if (s->cache) {
3972 s->cache->status=SEARCH_CACHE_COMPLETE;
3973 /* Need to deal with the case when the client would not take them all but we still cache them
3974 if (state->count < io->t2fnext.out.count && io->t2fnext.out.end_of_search) {
3975 io->t2fnext.out.end_of_search = false;
3978 /* destroy handle */
3979 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
3980 ntvfs_handle_remove_backend_data(h, ntvfs);
3982 io->t2fnext.out.count=state->count;
3984 return status;
3987 /* close a search */
3988 static NTSTATUS proxy_search_close(struct ntvfs_module_context *ntvfs,
3989 struct ntvfs_request *req, union smb_search_close *io)
3991 struct proxy_private *private = ntvfs->private_data;
3992 struct ntvfs_handle *h=NULL;
3993 struct search_handle *s;
3994 NTSTATUS status;
3996 SETUP_PID;
3998 if (! private->enabled_proxy_search) {
3999 return smb_raw_search_close(private->tree, io);
4001 switch (io->generic.level) {
4002 case RAW_SEARCH_TRANS2:
4003 h = talloc_get_type(ntvfs_find_handle(ntvfs, req, io->findclose.in.handle), struct ntvfs_handle);
4004 if (! h) return NT_STATUS_INVALID_HANDLE;
4005 /* convert handle into search_cache */
4006 s=talloc_get_type(ntvfs_handle_get_backend_data(h, ntvfs), struct search_handle);
4007 if (! s) return NT_STATUS_INVALID_HANDLE;
4008 io->findclose.in.handle=s->handle;
4009 default:
4010 return smb_raw_search_close(private->tree, io);
4013 if (! s->cache) {
4014 status = smb_raw_search_close(private->tree, io);
4015 } else {
4016 if (s->cache->status != SEARCH_CACHE_COMPLETE) {
4017 /* cache is useless */
4018 DLIST_REMOVE(private->search_caches, s->cache);
4019 //if (talloc_unlink(s, s->cache)==0) {
4020 //talloc_free(s->cache);
4023 status = NT_STATUS_OK;
4026 s->h=NULL;
4027 ntvfs_handle_remove_backend_data(h, ntvfs);
4028 /* s MAY also be gone at this point, if h was free'd, unless there were
4029 pending responses, in which case they see s->h is NULL as a sign to stop */
4030 return status;
4034 a handler for async trans2 replies
4036 static void async_trans2(struct smbcli_request *c_req)
4038 struct async_info *async = c_req->async.private;
4039 struct ntvfs_request *req = async->req;
4040 req->async_states->status = smb_raw_trans2_recv(c_req, req, async->parms);
4041 talloc_free(async);
4042 req->async_states->send_fn(req);
4045 /* raw trans2 */
4046 static NTSTATUS proxy_trans2(struct ntvfs_module_context *ntvfs,
4047 struct ntvfs_request *req,
4048 struct smb_trans2 *trans2)
4050 struct proxy_private *private = ntvfs->private_data;
4051 struct smbcli_request *c_req;
4053 if (private->map_trans2) {
4054 return NT_STATUS_NOT_IMPLEMENTED;
4057 SETUP_PID;
4058 #warning we should be mapping file handles here
4060 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4061 return smb_raw_trans2(private->tree, req, trans2);
4064 c_req = smb_raw_trans2_send(private->tree, trans2);
4066 ASYNC_RECV_TAIL(trans2, async_trans2);
4070 /* SMBtrans - not used on file shares */
4071 static NTSTATUS proxy_trans(struct ntvfs_module_context *ntvfs,
4072 struct ntvfs_request *req,
4073 struct smb_trans2 *trans2)
4075 return NT_STATUS_ACCESS_DENIED;
4079 a handler for async change notify replies
4081 static void async_changenotify(struct smbcli_request *c_req)
4083 struct async_info *async = c_req->async.private;
4084 struct ntvfs_request *req = async->req;
4085 req->async_states->status = smb_raw_changenotify_recv(c_req, req, async->parms);
4086 talloc_free(async);
4087 req->async_states->send_fn(req);
4090 /* change notify request - always async */
4091 static NTSTATUS proxy_notify(struct ntvfs_module_context *ntvfs,
4092 struct ntvfs_request *req,
4093 union smb_notify *io)
4095 struct proxy_private *private = ntvfs->private_data;
4096 struct smbcli_request *c_req;
4097 int saved_timeout = private->transport->options.request_timeout;
4098 struct proxy_file *f;
4100 if (io->nttrans.level != RAW_NOTIFY_NTTRANS) {
4101 return NT_STATUS_NOT_IMPLEMENTED;
4104 SETUP_PID;
4106 f = ntvfs_handle_get_backend_data(io->nttrans.in.file.ntvfs, ntvfs);
4107 if (!f) return NT_STATUS_INVALID_HANDLE;
4108 io->nttrans.in.file.fnum = f->fnum;
4110 /* this request doesn't make sense unless its async */
4111 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4112 return NT_STATUS_INVALID_PARAMETER;
4115 /* we must not timeout on notify requests - they wait
4116 forever */
4117 private->transport->options.request_timeout = 0;
4119 c_req = smb_raw_changenotify_send(private->tree, io);
4121 private->transport->options.request_timeout = saved_timeout;
4123 ASYNC_RECV_TAIL(io, async_changenotify);
4127 * A hander for converting from rpc struct replies to ntioctl
4129 static NTSTATUS proxy_rpclite_map_async_send(
4130 struct ntvfs_module_context *ntvfs,
4131 struct ntvfs_request *req,
4132 void *io1, void *io2, NTSTATUS status)
4134 union smb_ioctl* io=talloc_get_type_abort(io1, union smb_ioctl);
4135 struct async_rpclite_send *rpclite_send=talloc_get_type_abort(io2, struct async_rpclite_send);
4136 void* r=rpclite_send->struct_ptr;
4137 struct ndr_push* push;
4138 const struct ndr_interface_call* call=rpclite_send->call;
4139 enum ndr_err_code ndr_err;
4140 DATA_BLOB ndr;
4142 talloc_free(rpclite_send);
4144 DEBUG(5,("%s: converting r=%p back to ntiocl\n",__FUNCTION__, r));
4145 push = ndr_push_init_ctx(req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
4146 NT_STATUS_HAVE_NO_MEMORY(push);
4148 if (0) {
4149 push->flags |= LIBNDR_FLAG_BIGENDIAN;
4152 ndr_err = call->ndr_push(push, NDR_OUT, r);
4153 status=ndr_map_error2ntstatus(ndr_err);
4155 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4156 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
4157 nt_errstr(status)));
4158 return status;
4161 ndr=ndr_push_blob(push);
4162 //if (ndr.length > io->ntioctl.in.max_data) {
4163 DEBUG(3,("%s NDR size %d, max_size %d %p\n",__FUNCTION__, ndr.length,
4164 io->ntioctl.in.max_data, ndr.data));
4165 io->ntioctl.out.blob=ndr;
4166 return status;
4170 * A handler for sending async rpclite Read replies that were mapped to union smb_read
4172 static NTSTATUS rpclite_proxy_Read_map_async_send(
4173 struct ntvfs_module_context *ntvfs,
4174 struct ntvfs_request *req,
4175 void *io1, void *io2, NTSTATUS status)
4177 struct proxy_Read* r=talloc_get_type_abort(io1, struct proxy_Read);
4178 union smb_read* io=talloc_get_type_abort(io2, union smb_read);
4180 /* status here is a result of proxy_read, it doesn't reflect the status
4181 of the rpc transport or relates calls, just the read operation */
4182 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
4183 r->out.result=status;
4185 if (! NT_STATUS_IS_OK(status)) {
4186 /* We can't use result as a discriminator in IDL, so nread and flags always exist */
4187 r->out.nread=0;
4188 r->out.flags=0;
4189 } else {
4190 ssize_t size=io->readx.out.nread;
4191 r->out.flags=0;
4192 r->out.nread=io->readx.out.nread;
4194 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE) && io->readx.out.nread>0) {
4195 declare_checksum(digest);
4196 checksum_block(digest, io->readx.out.data, io->readx.out.nread);
4198 DEBUG(5,("New digest for size: %lld\n", (long long) io->readx.out.nread));
4199 dump_data (5, digest, sizeof(digest));
4200 DEBUG(5,("Cached digest\n"));
4201 dump_data (5, r->in.digest.digest, sizeof(digest));
4203 if (memcmp(digest, r->in.digest.digest, sizeof(digest))==0) {
4204 r->out.flags=PROXY_USE_CACHE;
4205 DEBUG(5,("%s: Use cached data len=%lld\n",__FUNCTION__,
4206 (long long)r->out.nread));
4207 if (r->in.flags & PROXY_VALIDATE) {
4208 r->out.flags |= PROXY_VALIDATE;
4209 DEBUG(5,("%s: Use VALIDATED len=%lld, %lld\n",__FUNCTION__,
4210 (long long)r->out.nread, (long long) io->readx.out.nread));
4212 goto done;
4214 DEBUG(5,("Cache does not match\n"));
4217 if (r->in.flags & PROXY_VALIDATE) {
4218 /* validate failed, shrink read to mincnt - so we don't fill link */
4219 r->out.nread=MIN(r->out.nread, r->in.mincnt);
4220 size=r->out.nread;
4221 DEBUG(5,("VALIDATE failed, shrink read of %d from %d to %d\n",
4222 r->in.maxcnt,r->out.nread,MIN(r->out.nread, r->in.mincnt)));
4225 if (r->in.flags & PROXY_USE_ZLIB) {
4226 if (compress_block(io->readx.out.data, &size) ) {
4227 r->out.flags|=PROXY_USE_ZLIB;
4228 r->out.response.compress.count=size;
4229 r->out.response.compress.data=io->readx.out.data;
4230 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
4231 __FUNCTION__,r->out.nread,size,size*100/r->out.nread));
4232 goto done;
4236 DEBUG(5,("%s: Compression not worthwhile\n", __FUNCTION__));
4237 r->out.response.generic.count=io->readx.out.nread;
4238 r->out.response.generic.data=io->readx.out.data;
4241 done:
4243 /* Or should we return NT_STATUS_OK ?*/
4244 DEBUG(5,("Finish %s status %s\n",__FUNCTION__,get_friendly_nt_error_msg(status)));
4246 /* the rpc transport succeeded even if the operation did not */
4247 return NT_STATUS_OK;
4251 * RPC implementation of Read
4253 static NTSTATUS rpclite_proxy_Read(struct ntvfs_module_context *ntvfs,
4254 struct ntvfs_request *req, struct proxy_Read *r)
4256 struct proxy_private *private = ntvfs->private_data;
4257 union smb_read* io=talloc(req, union smb_read);
4258 NTSTATUS status;
4259 struct proxy_file *f;
4260 struct ntvfs_handle *h;
4262 NT_STATUS_HAVE_NO_MEMORY(io);
4264 /* if next hop is a proxy just repeat this call also handle VALIDATE check
4265 that means have own callback handlers too... */
4266 SETUP_PID;
4268 RPCLITE_SETUP_FILE_HERE(f, h);
4270 DEBUG(5,("Opnum: proxy_Read min=%d max=%d offset=%lld, fnum=%d\n",
4271 r->in.mincnt, r->in.maxcnt, r->in.offset, r->in.fnum));
4272 DEBUG(5,("Anticipated digest\n"));
4273 dump_data (5, r->in.digest.digest, sizeof(r->in.digest.digest));
4275 /* If the remove end is a proxy, jusr fixup file handle and passthrough,
4276 but update cache on the way back
4277 if (PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
4280 /* prepare for response */
4281 r->out.response.generic.data=talloc_array(io, uint8_t, r->in.maxcnt);
4282 NT_STATUS_HAVE_NO_MEMORY(r->out.response.generic.data);
4284 if (! PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
4285 return proxy_validate(ntvfs, req, r, f);
4288 /* pack up an smb_read request and dispatch here */
4289 io->readx.level=RAW_READ_READX;
4290 io->readx.in.file.ntvfs=h;
4291 io->readx.in.mincnt=r->in.mincnt;
4292 io->readx.in.maxcnt=r->in.maxcnt;
4293 io->readx.in.offset=r->in.offset;
4294 io->readx.in.remaining=r->in.remaining;
4295 /* and something to hold the answer */
4296 io->readx.out.data=r->out.response.generic.data;
4298 /* so we get to pack the io->*.out response */
4299 status = ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Read_map_async_send);
4300 NT_STATUS_NOT_OK_RETURN(status);
4302 /* so the read will get processed normally */
4303 return proxy_read(ntvfs, req, io);
4307 * A handler for sending async rpclite Write replies
4309 static NTSTATUS rpclite_proxy_Write_map_async_send(
4310 struct ntvfs_module_context *ntvfs,
4311 struct ntvfs_request *req,
4312 void *io1, void *io2, NTSTATUS status)
4314 struct proxy_Write* r=talloc_get_type_abort(io1, struct proxy_Write);
4315 union smb_write* io=talloc_get_type_abort(io2, union smb_write);
4317 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
4318 r->out.result=status;
4320 r->out.nwritten=io->writex.out.nwritten;
4321 r->out.remaining=io->writex.out.remaining;
4323 /* the rpc transport succeeded even if the operation did not */
4324 return NT_STATUS_OK;
4328 * RPC implementation of write
4330 static NTSTATUS rpclite_proxy_Write(struct ntvfs_module_context *ntvfs,
4331 struct ntvfs_request *req, struct proxy_Write *r)
4333 struct proxy_private *private = ntvfs->private_data;
4334 union smb_write* io=talloc(req, union smb_write);
4335 NTSTATUS status;
4336 struct proxy_file* f;
4337 struct ntvfs_handle *h;
4339 SETUP_PID;
4341 RPCLITE_SETUP_FILE_HERE(f,h);
4343 DEBUG(5,("Opnum: proxy_Write count=%d offset=%lld, fnum=%d\n",
4344 r->in.count, r->in.offset, r->in.fnum));
4346 /* pack up an smb_write request and dispatch here */
4347 io->writex.level=RAW_WRITE_WRITEX;
4348 io->writex.in.file.ntvfs=h;
4349 io->writex.in.offset=r->in.offset;
4350 io->writex.in.wmode=r->in.mode;
4351 io->writex.in.count=r->in.count;
4353 /* and the data */
4354 if (PROXY_USE_ZLIB & r->in.flags) {
4355 ssize_t count=r->in.data.generic.count;
4356 io->writex.in.data=uncompress_block_talloc(io, r->in.data.compress.data,
4357 &count, r->in.count);
4358 if (count != r->in.count || !io->writex.in.data) {
4359 /* Didn't uncompress properly, but the RPC layer worked */
4360 r->out.result=NT_STATUS_BAD_COMPRESSION_BUFFER;
4361 return NT_STATUS_OK;
4363 } else {
4364 io->writex.in.data=r->in.data.generic.data;
4367 /* so we get to pack the io->*.out response */
4368 status=ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Write_map_async_send);
4369 NT_STATUS_NOT_OK_RETURN(status);
4371 /* so the read will get processed normally */
4372 return proxy_write(ntvfs, req, io);
4376 * RPC amalgamation of getinfo requests
4378 struct proxy_getinfo_fragments;
4379 struct proxy_getinfo_fragmentses;
4381 /* holds one smbcli_request to satisfy part of one proxy_GetInfo request */
4382 struct proxy_getinfo_fragment {
4383 struct proxy_getinfo_fragment *prev, *next;
4384 struct proxy_getinfo_fragments *fragments;
4385 union smb_fileinfo *smb_fileinfo;
4386 struct smbcli_request *c_req;
4387 NTSTATUS status;
4390 /* holds reference to many fragment smbcli_request that together make up one proxy_GetInfo request */
4391 struct proxy_getinfo_fragments {
4392 struct proxy_getinfo_fragments *prev, *next;
4393 struct proxy_getinfo_fragmentses *fragmentses;
4394 struct proxy_getinfo_fragment *fragments;
4395 uint32_t index;
4398 struct proxy_getinfo_fragmentses {
4399 struct proxy_getinfo_fragments *fragments;
4400 struct proxy_GetInfo *r;
4401 struct ntvfs_request *req;
4402 bool async;
4406 a handler for async write replies
4408 NTSTATUS async_proxy_getinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
4410 struct smbcli_request *c_req = async->c_req;
4411 struct ntvfs_request *req = async->req;
4412 struct proxy_file *f=async->f;
4413 struct proxy_getinfo_fragment *fragment=talloc_get_type_abort(io2, struct proxy_getinfo_fragment);
4414 struct proxy_getinfo_fragments* fragments=fragment->fragments;
4415 struct proxy_getinfo_fragmentses* fragmentses=fragments->fragmentses;
4416 struct proxy_GetInfo *r=talloc_get_type_abort(fragmentses->r, struct proxy_GetInfo);
4417 int c=fragments->index;
4418 struct info_data* d=&(r->out.info_data[c]);
4419 union smb_fileinfo *io=talloc_get_type_abort(io1, union smb_fileinfo);
4421 SMB_ASSERT(c_req == NULL || c_req == fragment->c_req);
4423 if (c_req) {
4424 switch (r->in.info_tags[0].tag_type) {
4425 case TAG_TYPE_FILE_INFO:
4426 status=smb_raw_fileinfo_recv(c_req, r, io);
4427 break;
4428 case TAG_TYPE_PATH_INFO:
4429 status=smb_raw_pathinfo_recv(c_req, r, io);
4430 break;
4431 default:
4432 status=NT_STATUS_INVALID_PARAMETER;
4434 c_req=NULL;
4437 /* stop callback occuring more than once sync'ly */
4438 fragment->c_req=NULL;
4440 DEBUG(5,("%s: async callback level %x %s\n",__FUNCTION__,io->generic.level, get_friendly_nt_error_msg (status)));
4441 switch (io->generic.level) {
4442 case RAW_FILEINFO_ALL_INFO:
4443 case RAW_FILEINFO_ALL_INFORMATION:
4444 DEBUG(5,("%s: async callback level RAW_FILEINFO_ALL_INFO\n",__FUNCTION__));
4445 d->status_RAW_FILEINFO_ALL_INFO=status;
4447 /* don't blindly overwrite BASIC_INFORMATION as we may already have it */
4448 if (NT_STATUS_IS_OK(status)) {
4449 d->status_RAW_FILEINFO_BASIC_INFORMATION=status;
4450 d->create_time=io->all_info.out.create_time;
4451 d->access_time=io->all_info.out.access_time;
4452 d->write_time=io->all_info.out.write_time;
4453 d->change_time=io->all_info.out.change_time;
4454 d->attrib=io->all_info.out.attrib;
4456 d->alloc_size=io->all_info.out.alloc_size;
4457 d->size=io->all_info.out.size;
4458 dump_data(5, io, sizeof(*io));
4459 d->nlink=io->all_info.out.nlink;
4460 d->delete_pending=io->all_info.out.delete_pending;
4461 d->directory=io->all_info.out.directory;
4462 d->ea_size=io->all_info.out.ea_size;
4463 /* io is sticking around for as long as d is */
4464 d->fname.s=io->all_info.out.fname.s;
4465 d->fname.count=io->all_info.out.fname.private_length;
4466 break;
4467 case RAW_FILEINFO_BASIC_INFO:
4468 case RAW_FILEINFO_BASIC_INFORMATION:
4469 DEBUG(5,("%s: async callback level RAW_FILEINFO_BASIC_INFORMATION\n",__FUNCTION__));
4470 d->status_RAW_FILEINFO_BASIC_INFORMATION=status;
4471 d->create_time=io->basic_info.out.create_time;
4472 d->access_time=io->basic_info.out.access_time;
4473 d->write_time=io->basic_info.out.write_time;
4474 d->change_time=io->basic_info.out.change_time;
4475 d->attrib=io->basic_info.out.attrib;
4476 break;
4477 case RAW_FILEINFO_COMPRESSION_INFO:
4478 DEBUG(5,("%s: async callback level RAW_FILEINFO_COMPRESSION_INFO\n",__FUNCTION__));
4479 d->status_RAW_FILEINFO_COMPRESSION_INFO = status;
4480 d->compressed_size=io->compression_info.out.compressed_size;
4481 d->format=io->compression_info.out.format;
4482 d->unit_shift=io->compression_info.out.unit_shift;
4483 d->chunk_shift=io->compression_info.out.chunk_shift;
4484 d->cluster_shift=io->compression_info.out.cluster_shift;
4485 break;
4486 case RAW_FILEINFO_INTERNAL_INFORMATION:
4487 DEBUG(5,("%s: async callback level RAW_FILEINFO_INTERNAL_INFORMATION\n",__FUNCTION__));
4488 d->status_RAW_FILEINFO_INTERNAL_INFORMATION=status;
4489 d->file_id=io->internal_information.out.file_id;
4490 break;
4491 case RAW_FILEINFO_ACCESS_INFORMATION:
4492 DEBUG(5,("%s: async callback level RAW_FILEINFO_ACCESS_INFORMATION\n",__FUNCTION__));
4493 d->status_RAW_FILEINFO_ACCESS_INFORMATION=status;
4494 d->access_flags=io->access_information.out.access_flags;
4495 break;
4496 case RAW_FILEINFO_POSITION_INFORMATION:
4497 DEBUG(5,("%s: async callback level RAW_FILEINFO_POSITION_INFORMATION\n",__FUNCTION__));
4498 d->status_RAW_FILEINFO_POSITION_INFORMATION = status;
4499 d->position=io->position_information.out.position;
4500 break;
4501 case RAW_FILEINFO_MODE_INFORMATION:
4502 DEBUG(5,("%s: async callback level RAW_FILEINFO_MODE_INFORMATION\n",__FUNCTION__));
4503 d->status_RAW_FILEINFO_MODE_INFORMATION =status;
4504 d->mode=io->mode_information.out.mode;
4505 break;
4506 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
4507 DEBUG(5,("%s: async callback level RAW_FILEINFO_ALIGNMENT_INFORMATION\n",__FUNCTION__));
4508 d->status_RAW_FILEINFO_ALIGNMENT_INFORMATION=status;
4509 d->alignment_requirement=io->alignment_information.out.alignment_requirement;
4510 break;
4511 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
4512 DEBUG(5,("%s: async callback level RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION\n",__FUNCTION__));
4513 d->status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION=status;
4514 d->reparse_tag=io->attribute_tag_information.out.reparse_tag;
4515 d->reparse_attrib=io->attribute_tag_information.out.attrib;
4516 break;
4517 case RAW_FILEINFO_STREAM_INFO: {
4518 uint_t c;
4519 DEBUG(5,("%s: async callback level RAW_FILEINFO_STREAM_INFO %s,\n",__FUNCTION__));
4520 d->status_RAW_FILEINFO_STREAM_INFO=status;
4521 DEBUG(5,("Num Streams %d %s\n",io->stream_info.out.num_streams, get_friendly_nt_error_msg (status)));
4522 if (NT_STATUS_IS_OK(status)) {
4523 d->streams=talloc_zero_array(d, struct info_stream, io->stream_info.out.num_streams);
4524 if (! d->streams) {
4525 d->status_RAW_FILEINFO_STREAM_INFO=NT_STATUS_NO_MEMORY;
4526 } else {
4527 d->num_streams=io->stream_info.out.num_streams;
4528 for(c=0; c < io->stream_info.out.num_streams; c++) {
4529 d->streams[c].size = io->stream_info.out.streams[c].size;
4530 d->streams[c].alloc_size = io->stream_info.out.streams[c].alloc_size;
4531 d->streams[c].stream_name.s=io->stream_info.out.streams[c].stream_name.s;
4532 d->streams[c].stream_name.count=io->stream_info.out.streams[c].stream_name.private_length;
4536 break; }
4537 default:
4538 /* so... where's it from? */
4539 DEBUG(5,("Unexpected read level\n"));
4542 fragment->smb_fileinfo = NULL;
4543 fragment->c_req=NULL;
4545 /* are the fragments complete? */
4546 DLIST_REMOVE(fragments->fragments, fragment);
4547 /* if this index is complete, remove from fragmentses */
4548 if (! fragments->fragments) {
4549 DLIST_REMOVE(fragmentses->fragments, fragments);
4551 /* is that the end? */
4552 if (! fragmentses->fragments && fragmentses->async) {
4553 DEBUG(5,("Thats the end of the fragments, doing send\n"));
4554 /* call the send_fn */
4555 req=fragmentses->req;
4556 req->async_states->status=NT_STATUS_OK;
4557 DEBUG(5,("Fragments async response sending\n"));
4558 req->async_states->send_fn(req);
4560 DEBUG(5,("%s: Thats the end of the callback\n",__FUNCTION__));
4561 return status;
4564 #define FINISH_GETINFO_FRAGMENT(r, io) do { \
4565 struct smbcli_request *c_req; \
4566 switch (r->in.info_tags[0].tag_type) { \
4567 case TAG_TYPE_FILE_INFO: \
4568 io->all_info.in.file.fnum=r->in.info_tags[0].info_tag.fnum; \
4569 c_req=smb_raw_fileinfo_send(private->tree, io); \
4570 break; \
4571 case TAG_TYPE_PATH_INFO: \
4572 io->all_info.in.file.path=r->in.info_tags[0].info_tag.path.s; \
4573 c_req=smb_raw_pathinfo_send(private->tree, io); \
4574 break; \
4575 default: \
4576 return NT_STATUS_INVALID_PARAMETER; \
4578 /* Add fragment collator */ \
4579 fragment->c_req=c_req; \
4580 /* use the same stateful async handler for them all... */ \
4581 { void* req=NULL; \
4582 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_proxy_getinfo, NT_STATUS_INTERNAL_ERROR); \
4583 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_read_handler); \
4585 io=NULL; \
4586 } while (0)
4588 #define SETUP_GETINFO_FRAGMENT(io, LEVEL) do { \
4589 fragment=talloc_zero(fragments, struct proxy_getinfo_fragment); \
4590 NT_STATUS_HAVE_NO_MEMORY(fragment); \
4591 DLIST_ADD(fragments->fragments, fragment); \
4592 fragment->fragments=fragments; \
4593 io=talloc_zero(fragment, union smb_fileinfo); \
4594 NT_STATUS_HAVE_NO_MEMORY(io); \
4595 io->generic.level=LEVEL; \
4596 } while (0)
4598 static NTSTATUS rpclite_proxy_Getinfo(struct ntvfs_module_context *ntvfs,
4599 struct ntvfs_request *req, struct proxy_GetInfo *r)
4601 struct proxy_private *private = ntvfs->private_data;
4602 struct smbcli_request *c_req;
4603 union smb_fileinfo *io=NULL;
4604 NTSTATUS status;
4605 struct proxy_file* f;
4606 struct ntvfs_handle *h;
4607 struct proxy_getinfo_fragmentses *fragmentses;
4608 int c;
4610 SETUP_PID;
4612 DEBUG(5,("Opnum: proxy_Getinfo r=%p\n",r));
4614 DEBUG(5,("Convering %d handles for r=%p\n",r->in.count, r));
4615 for(c=0; c < r->in.count; c++) {
4616 if (r->in.info_tags[c].tag_type==TAG_TYPE_FILE_INFO) {
4617 RPCLITE_SETUP_THIS_FILE_HERE(r->in.info_tags[c].info_tag.fnum, f, h);
4621 if (PROXY_REMOTE_SERVER(private)) {
4622 DEBUG(5,("Remote proxy, doing transparent\n"));
4623 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
4624 /* No need to add a receive hander, the ntioctl transport adds
4625 the async chain handler which deals with the send_fn */
4626 // ADD_ASYNC_RECV_TAIL(c_req, r, NULL, f, rpclite_proxy_Getinfo_map_async_send, NT_STATUS_INTERNAL_ERROR);
4628 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4629 DEBUG(5,("%s:Sync waiting for nttrans response\n",__LOCATION__));
4630 return sync_chain_handler(c_req);
4631 } else {
4632 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
4633 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
4634 return NT_STATUS_OK;
4638 /* I thought this was done for me for [in,out] */
4639 r->out.info_data=talloc_zero_array(r, struct info_data, r->in.count);
4640 NT_STATUS_HAVE_NO_MEMORY(r->out.info_data);
4641 r->out.count = r->in.count;
4642 r->out.result = NT_STATUS_OK;
4644 fragmentses=talloc_zero(req, struct proxy_getinfo_fragmentses);
4645 fragmentses->r=r;
4646 fragmentses->req=req;
4647 NT_STATUS_HAVE_NO_MEMORY(fragmentses);
4649 #warning, if C is large, we need to do a few at a time according to resource limits
4650 for (c=0; c < r->in.count; c++) {
4651 struct proxy_getinfo_fragments *fragments;
4652 struct proxy_getinfo_fragment *fragment;
4654 fragments=talloc_zero(fragmentses, struct proxy_getinfo_fragments);
4655 NT_STATUS_HAVE_NO_MEMORY(fragments);
4656 DLIST_ADD(fragmentses->fragments, fragments);
4657 fragments->fragmentses=fragmentses;
4658 fragments->index=c;
4660 /* Issue a set of getinfo requests */
4661 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ALL_INFO);
4662 FINISH_GETINFO_FRAGMENT(r, io);
4664 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_COMPRESSION_INFO);
4665 FINISH_GETINFO_FRAGMENT(r, io);
4667 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_INTERNAL_INFORMATION);
4668 FINISH_GETINFO_FRAGMENT(r, io);
4670 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ACCESS_INFORMATION);
4671 FINISH_GETINFO_FRAGMENT(r, io);
4673 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_POSITION_INFORMATION);
4674 FINISH_GETINFO_FRAGMENT(r, io);
4676 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_MODE_INFORMATION);
4677 FINISH_GETINFO_FRAGMENT(r, io);
4679 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ALIGNMENT_INFORMATION);
4680 FINISH_GETINFO_FRAGMENT(r, io);
4682 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION);
4683 FINISH_GETINFO_FRAGMENT(r, io);
4685 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_STREAM_INFO);
4686 FINISH_GETINFO_FRAGMENT(r, io);
4689 /* If ! async, wait for all requests to finish */
4691 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4692 struct proxy_getinfo_fragments *fragments;
4693 struct proxy_getinfo_fragment *fragment;
4694 while ((fragments = fragmentses->fragments) &&
4695 (fragment = fragments->fragments) &&
4696 fragment->c_req) {
4697 sync_chain_handler(fragment->c_req);
4698 /* and because the whole fragment / fragments may be gone now... */
4699 continue;
4701 return NT_STATUS_OK; /* see individual failures */
4704 DEBUG(5,("%s: Setting async response\n",__FUNCTION__));
4705 fragmentses->async=true;
4706 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
4707 return NT_STATUS_OK;
4710 /* rpclite dispatch table */
4711 #define RPC_PROXY_OPS 3
4712 struct {
4713 uint32_t opnum;
4714 NTSTATUS (*handler)(struct ntvfs_module_context *ntvfs,
4715 struct ntvfs_request *req, void* r);
4716 } rpcproxy_ops[RPC_PROXY_OPS]={
4717 {NDR_PROXY_READ, rpclite_proxy_Read},
4718 {NDR_PROXY_WRITE, rpclite_proxy_Write},
4719 {NDR_PROXY_GETINFO, rpclite_proxy_Getinfo}
4722 /* unmarshall ntioctl and rpc-dispatch, but push async map handler to convert
4723 back from rpc struct to ntioctl */
4724 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
4725 struct ntvfs_request *req, union smb_ioctl *io)
4727 struct proxy_private *private = ntvfs->private_data;
4728 DATA_BLOB *request;
4729 struct ndr_syntax_id* syntax_id;
4730 uint32_t opnum;
4731 const struct ndr_interface_table *table;
4732 struct ndr_pull* pull;
4733 void* r;
4734 NTSTATUS status;
4735 struct async_rpclite_send *rpclite_send;
4736 enum ndr_err_code ndr_err;
4738 SETUP_PID;
4740 /* We don't care about io->generic.in.file, ntvfs layer already proved it was valid,
4741 our operations will have the fnum embedded in them anyway */
4742 DEBUG(5,("START %s blob-size %d\n",__FUNCTION__,io->ntioctl.in.blob.length));
4743 /* unpack the NDR */
4744 request=&io->ntioctl.in.blob;
4746 pull = ndr_pull_init_blob(request, req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
4747 NT_STATUS_HAVE_NO_MEMORY(pull);
4748 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
4749 DEBUG(5,("%s pull init'd\n",__FUNCTION__));
4751 /* the blob is 4-aligned because it was memcpy'd */
4752 syntax_id=talloc_zero(pull, struct ndr_syntax_id);
4753 NT_STATUS_HAVE_NO_MEMORY(syntax_id);
4755 ndr_err=ndr_pull_ndr_syntax_id(pull, NDR_SCALARS, syntax_id);
4756 status=ndr_map_error2ntstatus(ndr_err);
4757 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4758 DEBUG(2,("Can't read syntax-id: %s\n",nt_errstr(status)));
4759 return status;
4762 /* now find the struct ndr_interface_table * for this syntax_id */
4763 table=ndr_table_by_uuid(&syntax_id->uuid);
4764 if (! table) {
4765 DEBUG(5,("Can't find table for uuid: %s\n",GUID_string(debug_ctx(),&syntax_id->uuid)));
4766 return NT_STATUS_NO_GUID_TRANSLATION;
4769 ndr_err=ndr_pull_uint32(pull, NDR_SCALARS, &opnum);
4770 status=ndr_map_error2ntstatus(ndr_err);
4771 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4772 DEBUG(2,("Can't read op-num: %s\n",nt_errstr(status)));
4773 return status;
4775 DEBUG(5,("%s opnum %d\n",__FUNCTION__,opnum));
4777 DEBUG(10,("rpc request data:\n"));
4778 dump_data(10, pull->data, pull->data_size);
4780 r = talloc_named(req, table->calls[opnum].struct_size, "struct %s",
4781 table->calls[opnum].name);
4782 NT_STATUS_HAVE_NO_MEMORY(r);
4784 memset(r, 0, table->calls[opnum].struct_size);
4786 ndr_err=table->calls[opnum].ndr_pull(pull, NDR_IN, r);
4787 status=ndr_map_error2ntstatus(ndr_err);
4788 DEBUG(5,("%s opnum %d pulled r=%p status %s\n",__FUNCTION__,opnum,r,get_friendly_nt_error_msg (status)));
4789 NT_STATUS_NOT_OK_RETURN(status);
4791 rpclite_send=talloc(req, struct async_rpclite_send);
4792 NT_STATUS_HAVE_NO_MEMORY(rpclite_send);
4793 rpclite_send->call=&table->calls[opnum];
4794 rpclite_send->struct_ptr=r;
4795 /* need to push conversion function to convert from r to io */
4796 status=ntvfs_map_async_setup(ntvfs, req, io, rpclite_send, proxy_rpclite_map_async_send);
4797 NT_STATUS_NOT_OK_RETURN(status);
4799 /* Magically despatch the call based on syntax_id, table and opnum.
4800 But there is no table of handlers.... so until then*/
4801 if (0==strcasecmp(table->name,"rpcproxy")) {
4802 if (opnum >= RPC_PROXY_OPS) {
4803 DEBUG(3,("Can't despatch %s:%d\n",table->name, opnum));
4804 return NT_STATUS_PROCEDURE_NOT_FOUND;
4806 status = rpcproxy_ops[opnum].handler(ntvfs, req, r);
4807 } else {
4808 DEBUG(5,("Can't despatch %s:%d %s\n",table->name, opnum,
4809 GUID_string(debug_ctx(),&syntax_id->uuid)));
4810 return NT_STATUS_NO_GUID_TRANSLATION;
4813 /* status is the status of the rpc layer. If it is NT_STATUS_OK then
4814 the handler status is in r->out.result */
4815 DEBUG(5,("%s now map_async_finish: status=%s async=%d\n", __FUNCTION__,
4816 get_friendly_nt_error_msg (status), req->async_states->state & NTVFS_ASYNC_STATE_ASYNC));
4818 return ntvfs_map_async_finish(req, status);
4821 /* unpack the ntioctl to make some rpc_struct */
4822 NTSTATUS ntioctl_rpc_unmap(struct async_info *async, void* io1, void* io2, NTSTATUS status)
4824 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
4825 struct proxy_private *proxy=async->proxy;
4826 struct smbcli_request *c_req = async->c_req;
4827 void* r=io1;
4828 struct ntioctl_rpc_unmap_info *info=talloc_get_type_abort(io2, struct ntioctl_rpc_unmap_info);
4829 union smb_ioctl* io =talloc_get_type_abort(info->io, union smb_ioctl);
4830 const struct ndr_interface_call *calls=info->calls;
4831 enum ndr_err_code ndr_err;
4832 DATA_BLOB *response;
4833 struct ndr_pull* pull;
4835 DEBUG(5,("START %s io2=%p\n",__FUNCTION__,io2));
4836 DEBUG(5,("%s op %s ntioctl: %s\n",
4837 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
4838 NT_STATUS_NOT_OK_RETURN(status);
4840 if (c_req) {
4841 DEBUG(5,("%s io2 MID=%d\n",__FUNCTION__,c_req->mid));
4842 status = smb_raw_ioctl_recv(c_req, io, io);
4843 #define SESSION_INFO proxy->remote_server, proxy->remote_share
4844 /* This status is the ntioctl wrapper status */
4845 if (! NT_STATUS_IS_OK(status)) {
4846 DEBUG(3,("[\\\\%s\\%s] RPC %s failed for %s: %s\n",SESSION_INFO,
4847 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
4848 if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) return status;
4849 return NT_STATUS_UNSUCCESSFUL;
4853 dump_data(10, io->ntioctl.out.blob.data, io->ntioctl.out.blob.length);
4855 response=&io->ntioctl.out.blob;
4856 pull = ndr_pull_init_blob(response, r, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
4857 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
4859 NT_STATUS_HAVE_NO_MEMORY(pull);
4861 ndr_err=calls->ndr_pull(pull, NDR_OUT, r);
4862 #warning can we free pull here?
4863 status=ndr_map_error2ntstatus(ndr_err);
4865 DEBUG(5,("END %s op status %s\n",
4866 __FUNCTION__, get_friendly_nt_error_msg(status)));
4867 return status;
4871 send an ntioctl request based on a NDR encoding.
4873 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
4874 struct smbcli_tree *tree,
4875 struct ntvfs_module_context *ntvfs,
4876 const struct ndr_interface_table *table,
4877 uint32_t opnum,
4878 void *r)
4880 struct proxy_private *private = ntvfs->private_data;
4881 struct smbcli_request * c_req;
4882 struct ndr_push *push;
4883 NTSTATUS status;
4884 DATA_BLOB request;
4885 enum ndr_err_code ndr_err;
4886 union smb_ioctl *io=talloc_zero(r, union smb_ioctl);
4889 /* setup for a ndr_push_* call, we can't free push until the message
4890 actually hits the wire */
4891 push = ndr_push_init_ctx(io, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
4892 if (!push) return NULL;
4894 /* first push interface table identifiers */
4895 ndr_err=ndr_push_ndr_syntax_id(push, NDR_SCALARS, &table->syntax_id);
4896 status=ndr_map_error2ntstatus(ndr_err);
4898 if (! NT_STATUS_IS_OK(status)) return NULL;
4900 ndr_err=ndr_push_uint32(push, NDR_SCALARS, opnum);
4901 status=ndr_map_error2ntstatus(ndr_err);
4902 if (! NT_STATUS_IS_OK(status)) return NULL;
4904 if (0) {
4905 push->flags |= LIBNDR_FLAG_BIGENDIAN;
4908 /* push the structure into a blob */
4909 ndr_err = table->calls[opnum].ndr_push(push, NDR_IN, r);
4910 status=ndr_map_error2ntstatus(ndr_err);
4911 if (!NT_STATUS_IS_OK(status)) {
4912 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
4913 nt_errstr(status)));
4914 return NULL;
4917 /* retrieve the blob */
4918 request = ndr_push_blob(push);
4920 io->ntioctl.level=RAW_IOCTL_NTIOCTL;
4921 io->ntioctl.in.function=FSCTL_UFOPROXY_RPCLITE;
4922 io->ntioctl.in.file.fnum=private->nttrans_fnum;
4923 io->ntioctl.in.fsctl=false;
4924 io->ntioctl.in.filter=0;
4925 io->ntioctl.in.max_data=PROXY_NTIOCTL_MAXDATA;
4926 io->ntioctl.in.blob=request;
4928 DEBUG(10,("smbcli_request packet:\n"));
4929 dump_data(10, request.data, request.length);
4931 c_req = smb_raw_ioctl_send(tree, io);
4933 if (! c_req) {
4934 return NULL;
4937 dump_data(10, c_req->out.data, c_req->out.data_size);
4939 { void* req=NULL;
4940 struct ntioctl_rpc_unmap_info* info=talloc_zero(r, struct ntioctl_rpc_unmap_info);
4941 info->io=io;
4942 info->table=table;
4943 info->opnum=opnum;
4944 info->calls=&table->calls[opnum];
4945 ADD_ASYNC_RECV_TAIL(c_req, r, info, NULL, ntioctl_rpc_unmap, NULL);
4948 return c_req;
4952 client helpers, mapping between proxy RPC calls and smbcli_* calls.
4956 * If the sync_chain_handler is called directly it unplugs the async handler
4957 which (as well as preventing loops) will also avoid req->send_fn being
4958 called - which is also nice! */
4959 NTSTATUS sync_chain_handler(struct smbcli_request *c_req)
4961 struct async_info *async=NULL;
4962 /* the first callback which will actually receive the c_req response */
4963 struct async_info_map *async_map;
4964 NTSTATUS status=NT_STATUS_OK;
4965 struct async_info_map** chain;
4967 DEBUG(5,("%s\n",__FUNCTION__));
4968 if (! c_req) return NT_STATUS_UNSUCCESSFUL;
4970 /* If there is a handler installed, it is using async_info to chain */
4971 if (c_req->async.fn) {
4972 /* not safe to talloc_free async if send_fn has been called for the request
4973 against which async was allocated, so steal it (and free below) or neither */
4974 async = talloc_get_type_abort(c_req->async.private, struct async_info);
4975 talloc_steal(NULL, async);
4976 chain=&async->chain;
4977 async_map = talloc_get_type_abort(*chain, struct async_info_map);
4978 } else {
4979 chain=(struct async_info_map**)&c_req->async.private;
4980 async_map = talloc_get_type_abort(*chain, struct async_info_map);
4983 /* unplug c_req->async.fn as if a callback handler calls smb_*_recv
4984 in order to receive the response, smbcli_transport_finish_recv will
4985 call us again and then call the c-req->async.fn
4986 Perhaps we should merely call smbcli_request_receive() IF
4987 c_req->request_state <= SMBCLI_REQUEST_RECV, but that might not
4988 help multi-part replies... except all parts are receive before
4989 callback if a handler WAS set */
4990 c_req->async.fn=NULL;
4992 /* Should we raise an error? Should we simple_recv? */
4993 while(async_map) {
4994 /* remove this one from the list before we call. We do this in case
4995 some callbacks free their async_map but also so that callbacks
4996 can navigate the async_map chain to add additional callbacks to
4997 the end - e.g. so that tag-along reads can call send_fn after
4998 the send_fn of the request they tagged along to, thus preserving
4999 the async response order - which may be a waste of time? */
5000 DLIST_REMOVE(*chain, async_map);
5002 DEBUG(5,("Callback for async_map=%p pre-status %s\n",async_map, get_friendly_nt_error_msg(status)));
5003 if (async_map->fn) {
5004 status=async_map->fn(async_map->async,
5005 async_map->parms1, async_map->parms2, status);
5007 DEBUG(5,("Callback complete for async_map=%p status %s\n",async_map, get_friendly_nt_error_msg(status)));
5008 /* Note: the callback may have added to the chain */
5009 #warning Async_maps have a null talloc_context, it is unclear who should own them
5010 /* it can't be c_req as it stops us chaining more than one, maybe it
5011 should be req but there isn't always a req. However sync_chain_handler
5012 will always free it if called */
5013 DEBUG(6,("Will free async map %p\n",async_map));
5014 #warning put me back
5015 talloc_free(async_map);
5016 DEBUG(6,("Free'd async_map\n"));
5017 if (*chain)
5018 async_map=talloc_get_type_abort(*chain, struct async_info_map);
5019 else
5020 async_map=NULL;
5021 DEBUG(6,("Switch to async_map %p\n",async_map));
5023 /* The first callback will have read c_req, thus talloc_free'ing it,
5024 so we don't let the other callbacks get hurt playing with it */
5025 if (async_map && async_map->async)
5026 async_map->async->c_req=NULL;
5029 talloc_free(async);
5031 DEBUG(5,("%s complete: %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
5032 return status;
5035 /* If the async handler is called, then the send_fn is called */
5036 static void async_chain_handler(struct smbcli_request *c_req)
5038 struct async_info *async = talloc_get_type_abort(c_req->async.private, struct async_info);
5039 struct ntvfs_request *req = async->req;
5040 NTSTATUS status;
5042 if (c_req->state <= SMBCLI_REQUEST_RECV) {
5043 /* Looks like async handlers has been called sync'ly */
5044 smb_panic("async_chain_handler called asyncly on req %p\n");
5047 status=sync_chain_handler(c_req);
5049 /* Should we insist that a chain'd handler does this?
5050 Which makes it hard to intercept the data by adding handlers
5051 before the send_fn handler sends it... */
5052 if (req) {
5053 DEBUG(5,("%s send_fn on req=%p\n",__FUNCTION__,req));
5054 req->async_states->status=status;
5055 req->async_states->send_fn(req);
5059 /* unpack the rpc struct to make some smb_write */
5060 NTSTATUS async_proxy_smb_raw_write_rpc(struct async_info *async,
5061 void* io1, void* io2, NTSTATUS status)
5063 union smb_write* io =talloc_get_type(io1, union smb_write);
5064 struct proxy_Write* r=talloc_get_type(io2, struct proxy_Write);
5066 DEBUG(5,("START: %s convert from rpc to smb with pre-status %s\n",__FUNCTION__,
5067 get_friendly_nt_error_msg (status)));
5068 DEBUG(3,("Write response for offset=%lld\n",io->generic.in.offset));
5069 NT_STATUS_NOT_OK_RETURN(status);
5071 status=r->out.result;
5072 DEBUG(5,("%s wrapped status: %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
5073 NT_STATUS_NOT_OK_RETURN(status);
5075 io->generic.out.remaining = r->out.remaining;
5076 io->generic.out.nwritten = r->out.nwritten;
5078 DEBUG(5,("END: %s status %s\n",__FUNCTION__,
5079 get_friendly_nt_error_msg (status)));
5080 return status;
5083 /* upgrade from smb to NDR and then send.
5084 The caller should ADD_ASYNC_RECV_TAIL the handler that tries to receive the response*/
5085 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
5086 union smb_write *io,
5087 struct proxy_file *f)
5089 struct proxy_private *private = ntvfs->private_data;
5090 struct smbcli_tree *tree=private->tree;
5092 if (PROXY_REMOTE_SERVER(private)) {
5093 struct smbcli_request *c_req;
5094 struct proxy_Write *r=talloc_zero(io, struct proxy_Write);
5095 ssize_t size;
5097 if (! r) return NULL;
5099 size=io->generic.in.count;
5100 /* upgrade the write */
5101 r->in.fnum = io->generic.in.file.fnum;
5102 r->in.offset = io->generic.in.offset;
5103 r->in.count = io->generic.in.count;
5104 r->in.mode = io->generic.in.wmode;
5105 // r->in.remaining = io->generic.in.remaining;
5106 #warning remove this
5107 /* prepare to lie */
5108 r->out.nwritten=r->in.count;
5109 r->out.remaining=0;
5111 /* try to compress */
5112 #warning compress!
5113 r->in.data.compress.data=compress_block_talloc(r, io->generic.in.data, &size);
5114 if (r->in.data.compress.data) {
5115 r->in.data.compress.count=size;
5116 r->in.flags = PROXY_USE_ZLIB;
5117 } else {
5118 r->in.flags = 0;
5119 /* we'll honour const, honest gov */
5120 r->in.data.generic.data=discard_const(io->generic.in.data);
5121 r->in.data.generic.count=io->generic.in.count;
5124 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
5125 ntvfs,
5126 &ndr_table_rpcproxy,
5127 NDR_PROXY_WRITE, r);
5128 if (! c_req) return NULL;
5130 /* yeah, filthy abuse of f */
5131 { void* req=NULL;
5132 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_write_rpc, NULL);
5135 return c_req;
5136 } else {
5137 return smb_raw_write_send(tree, io);
5141 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
5142 union smb_write *io,
5143 struct proxy_file *f)
5145 struct proxy_private *proxy = ntvfs->private_data;
5146 struct smbcli_tree *tree=proxy->tree;
5148 if (PROXY_REMOTE_SERVER(proxy)) {
5149 struct smbcli_request *c_req = proxy_smb_raw_write_send(ntvfs, io, f);
5150 return sync_chain_handler(c_req);
5151 } else {
5152 struct smbcli_request *c_req = smb_raw_write_send(tree, io);
5153 return smb_raw_write_recv(c_req, io);
5157 /* unpack the rpc struct to make some smb_read response */
5158 NTSTATUS async_proxy_smb_raw_read_rpc(struct async_info *async,
5159 void* io1, void* io2, NTSTATUS status)
5161 union smb_read* io =talloc_get_type_abort(io1, union smb_read);
5162 struct proxy_Read* r=talloc_get_type_abort(io2, struct proxy_Read);
5163 struct proxy_file *f = async->f;
5165 DEBUG(5,("\n>>\n%s() rpc status: %s\n",__FUNCTION__,
5166 get_friendly_nt_error_msg(status)));
5167 NT_STATUS_NOT_OK_RETURN(status);
5169 status=r->out.result;
5170 DEBUG(5,("%s() wrapped status: %s\n",__FUNCTION__,
5171 get_friendly_nt_error_msg(status)));
5172 NT_STATUS_NOT_OK_RETURN(status);
5174 io->generic.out.remaining = 0; /*r->out.response.generic.remaining;*/
5175 io->generic.out.compaction_mode = 0;
5177 if (r->out.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
5178 /* Use the io we already setup!
5179 if out.flags & PROXY_VALIDATE, we may need to validate more in
5180 cache then r->out.nread would suggest, see io->generic.out.nread */
5181 if (r->out.flags & PROXY_VALIDATE)
5182 io->generic.out.nread=io->generic.in.maxcnt;
5183 DEBUG(5,("Using cached data: size=%lld\n",
5184 (long long) io->generic.out.nread));
5185 return status;
5188 if (r->in.flags & PROXY_VALIDATE) {
5189 DEBUG(5,("Cached data did not validate, flags: %x\n",r->out.flags));
5190 /* turn off validate on this file */
5191 //cache_handle_novalidate(f);
5192 #warning turn off validate on this file - do an nread<maxcnt later
5195 if (r->in.flags & PROXY_USE_CACHE) {
5196 DEBUG(5,("Cached data did not match\n"));
5199 io->generic.out.nread = r->out.nread;
5201 /* we may need to uncompress */
5202 if (r->out.flags & PROXY_USE_ZLIB) {
5203 ssize_t size=r->out.response.compress.count;
5204 DEBUG(5,("%s: uncompress, %lld wanted %lld or %lld\n",__LOCATION__,
5205 (long long int)size,
5206 (long long int)io->generic.in.maxcnt,
5207 (long long int)io->generic.in.mincnt));
5208 if (size > io->generic.in.mincnt) {
5209 /* we did a bulk read for the cache */
5210 uint8_t *data=talloc_size(io, io->generic.in.maxcnt);
5211 DEBUG(5,("%s: bulk uncompress to %p\n",__LOCATION__,data));
5212 if (! uncompress_block_to(data,
5213 r->out.response.compress.data, &size,
5214 io->generic.in.maxcnt) ||
5215 size != r->out.nread) {
5216 status=NT_STATUS_INVALID_USER_BUFFER;
5217 } else {
5218 DEBUG(5,("%s: uncompressed\n",__LOCATION__));
5219 /* copy as much as they can take */
5220 io->generic.out.nread=MIN(io->generic.in.mincnt, size);
5221 memcpy(io->generic.out.data, data, io->generic.out.nread);
5222 /* copy the rest to the cache */
5223 cache_handle_save(f, data,
5224 size,
5225 io->generic.in.offset);
5227 } else if (! uncompress_block_to(io->generic.out.data,
5228 r->out.response.compress.data, &size,
5229 io->generic.in.maxcnt) ||
5230 size != r->out.nread) {
5231 io->generic.out.nread=size;
5232 status=NT_STATUS_INVALID_USER_BUFFER;
5234 } else if (io->generic.out.data != r->out.response.generic.data) {
5235 //Assert(r->out.nread == r->out.generic.out.count);
5236 memcpy(io->generic.out.data, r->out.response.generic.data, io->generic.out.nread);
5239 return status;
5242 /* Warning: Assumes that if io->generic.out.nread is not zero, then some
5243 data has been pre-read into io->generic.out.data and can be used for
5244 proxy<->proxy optimized reads */
5245 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
5246 union smb_read *io,
5247 struct proxy_file *f,
5248 struct proxy_Read *r)
5250 struct proxy_private *private = ntvfs->private_data;
5251 #warning we are using out.nread as a out-of-band parameter
5252 if (PROXY_REMOTE_SERVER(private)) {
5254 struct smbcli_request *c_req;
5255 if (! r) {
5256 r=talloc_zero(io, struct proxy_Read);
5257 if (! r) return NULL;
5258 r->in.mincnt = io->generic.in.mincnt;
5262 r->in.fnum = io->generic.in.file.fnum;
5263 r->in.read_for_execute=io->generic.in.read_for_execute;
5264 r->in.offset = io->generic.in.offset;
5265 r->in.maxcnt = io->generic.in.maxcnt;
5266 r->in.remaining = io->generic.in.remaining;
5267 r->in.flags |= PROXY_USE_ZLIB;
5268 if (! (r->in.flags & PROXY_VALIDATE) &&
5269 io->generic.out.data && io->generic.out.nread > 0) {
5270 /* maybe we should limit digest size to MIN(nread, maxcnt) to
5271 permit the caller to provider a larger nread as part of
5272 a split read */
5273 checksum_block(r->in.digest.digest, io->generic.out.data,
5274 io->generic.out.nread);
5276 if (io->generic.out.nread > r->in.maxcnt) {
5277 DEBUG(0,("Cache from nread is too big for requested read struct, ignoring cache\n"));
5278 } else {
5279 r->in.mincnt = io->generic.out.nread;
5280 r->in.maxcnt = io->generic.out.nread;
5281 r->in.flags |= PROXY_USE_CACHE;
5282 /* PROXY_VALIDATE will have been set by caller */
5286 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
5287 DEBUG(3,("Cache digest length=%lld\n", (long long)r->in.maxcnt));
5288 dump_data (3, r->in.digest.digest, sizeof(r->in.digest.digest));
5291 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
5292 ntvfs,
5293 &ndr_table_rpcproxy,
5294 NDR_PROXY_READ, r);
5295 if (! c_req) return NULL;
5297 { void* req=NULL;
5298 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_read_rpc, NULL);
5301 return c_req;
5302 } else {
5303 return smb_raw_read_send(private->tree, io);
5307 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
5308 union smb_read *io,
5309 struct proxy_file *f)
5311 struct proxy_private *proxy = ntvfs->private_data;
5312 struct smbcli_tree *tree=proxy->tree;
5314 if (PROXY_REMOTE_SERVER(proxy)) {
5315 struct smbcli_request *c_req = proxy_smb_raw_read_send(ntvfs, io, f, NULL);
5316 return sync_chain_handler(c_req);
5317 } else {
5318 struct smbcli_request *c_req = smb_raw_read_send(tree, io);
5319 return smb_raw_read_recv(c_req, io);
5325 initialise the PROXY->PROXY backend, registering ourselves with the ntvfs subsystem
5327 NTSTATUS ntvfs_proxy_init(void)
5329 NTSTATUS ret;
5330 struct ntvfs_ops ops;
5331 NTVFS_CURRENT_CRITICAL_SIZES(vers);
5333 ZERO_STRUCT(ops);
5335 /* fill in the name and type */
5336 ops.name = "proxy";
5337 ops.type = NTVFS_DISK;
5339 /* fill in all the operations */
5340 ops.connect = proxy_connect;
5341 ops.disconnect = proxy_disconnect;
5342 ops.unlink = proxy_unlink;
5343 ops.chkpath = proxy_chkpath;
5344 ops.qpathinfo = proxy_qpathinfo;
5345 ops.setpathinfo = proxy_setpathinfo;
5346 ops.open = proxy_open;
5347 ops.mkdir = proxy_mkdir;
5348 ops.rmdir = proxy_rmdir;
5349 ops.rename = proxy_rename;
5350 ops.copy = proxy_copy;
5351 ops.ioctl = proxy_ioctl;
5352 ops.read = proxy_read;
5353 ops.write = proxy_write;
5354 ops.seek = proxy_seek;
5355 ops.flush = proxy_flush;
5356 ops.close = proxy_close;
5357 ops.exit = proxy_exit;
5358 ops.lock = proxy_lock;
5359 ops.setfileinfo = proxy_setfileinfo;
5360 ops.qfileinfo = proxy_qfileinfo;
5361 ops.fsinfo = proxy_fsinfo;
5362 ops.lpq = proxy_lpq;
5363 ops.search_first = proxy_search_first;
5364 ops.search_next = proxy_search_next;
5365 ops.search_close = proxy_search_close;
5366 ops.trans = proxy_trans;
5367 ops.logoff = proxy_logoff;
5368 ops.async_setup = proxy_async_setup;
5369 ops.cancel = proxy_cancel;
5370 ops.notify = proxy_notify;
5371 ops.trans2 = proxy_trans2;
5373 /* register ourselves with the NTVFS subsystem. We register
5374 under the name 'proxy'. */
5375 ret = ntvfs_register(&ops, &vers);
5377 if (!NT_STATUS_IS_OK(ret)) {
5378 DEBUG(0,("Failed to register PROXY backend!\n"));
5381 return ret;