Deal with c_req send errors asynchronously
[Samba/vfs_proxy.git] / source4 / ntvfs / proxy / vfs_proxy.c
blobd16411f525bad74eae8e6bd0762263e91fe104d4
1 /*
2 Unix SMB/PROXY implementation.
4 CIFS PROXY NTVFS filesystem backend
6 Copyright (C) Andrew Tridgell 2003
7 Copyright (C) James J Myers 2003 <myersjj@samba.org>
8 Copyright (C) Sam Liddicott <sam@liddicott.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>.
24 this implements a CIFS->CIFS NTVFS filesystem caching proxy.
25 TODO:
26 New read-ahead
27 Delete cache
28 Share cache states between processes
29 Update to latest samba
30 limit dirmons etc
31 mapi delegated creds
34 #define TALLOC_ABORT(why) smb_panic(why)
35 #warning handle SMB_FLAGS2_COMPRESSED flag from client: http://msdn2.microsoft.com/en-us/library/cc246254.aspx
36 #define __LOCATION__ (talloc_asprintf(debug_ctx(),"%s:%d %s",__FILE__,__LINE__,__FUNCTION__))
37 #define PROXY_NTIOCTL_MAXDATA 0x2000000
39 #include "includes.h"
40 #include "libcli/raw/libcliraw.h"
41 #include "libcli/smb_composite/smb_composite.h"
42 #include "auth/auth.h"
43 #include "auth/credentials/credentials.h"
44 #include "ntvfs/ntvfs.h"
45 #include "../lib/util/dlinklist.h"
46 #include "param/param.h"
47 #include "libcli/resolve/resolve.h"
48 #include "libcli/libcli.h"
49 #include "libcli/raw/ioctl.h"
50 #include "librpc/gen_ndr/ndr_misc.h"
51 #include "librpc/gen_ndr/ndr_proxy.h"
52 #include "librpc/ndr/ndr_table.h"
53 #include "lib/cache/cache.h"
54 #include "lib/compression/zlib.h"
55 #include "libcli/raw/raw_proto.h"
56 #include "librpc/gen_ndr/proxy.h"
57 #include "smb_server/smb_server.h"
59 #define fstrcmp(a,b) strcasecmp((a),(b))
60 #define fstrncmp(a,b,len) strncasecmp((a),(b),(len))
62 #define LOAD_CACHE_FILE_DATA(dest, src) do { \
63 dest.create_time=src.create_time; \
64 dest.access_time=src.access_time; \
65 dest.write_time=src.write_time; \
66 dest.change_time=src.change_time; \
67 dest.attrib=src.attrib; \
68 dest.alloc_size=src.alloc_size; \
69 dest.size=src.size; \
70 dest.file_type=src.file_type; \
71 dest.ipc_state=src.ipc_state; \
72 dest.is_directory=src.is_directory; \
73 dest.delete_pending=0; \
74 } while(0)
76 /* taken from #include "librpc/gen_ndr/proxy.h" */
77 struct proxy_file_info_data {
78 /* first three are from ntcreatex */
79 uint16_t file_type;
80 uint16_t ipc_state;
81 uint8_t is_directory;
82 NTSTATUS status_RAW_FILEINFO_BASIC_INFORMATION;
83 uint32_t attrib; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
84 NTTIME create_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
85 NTTIME access_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
86 NTTIME write_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
87 NTTIME change_time; /* RAW_FILEINFO_ALL_INFO | RAW_FILEINFO_BASIC_INFORMATION */
88 NTSTATUS status_RAW_FILEINFO_ALL_INFO;
89 uint32_t ea_size; /* RAW_FILEINFO_ALL_INFO */
90 uint64_t alloc_size; /* RAW_FILEINFO_ALL_INFO */
91 uint64_t size; /* RAW_FILEINFO_ALL_INFO */
92 uint32_t nlink; /* RAW_FILEINFO_ALL_INFO */
93 struct sws fname; /* RAW_FILEINFO_ALL_INFO */
94 uint8_t delete_pending; /* RAW_FILEINFO_ALL_INFO */
95 uint8_t directory; /* RAW_FILEINFO_ALL_INFO */
96 NTSTATUS status_RAW_FILEINFO_COMPRESSION_INFO;
97 uint64_t compressed_size; /* RAW_FILEINFO_COMPRESSION_INFO */
98 uint16_t format; /* RAW_FILEINFO_COMPRESSION_INFO */
99 uint8_t unit_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
100 uint8_t chunk_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
101 uint8_t cluster_shift; /* RAW_FILEINFO_COMPRESSION_INFO */
102 NTSTATUS status_RAW_FILEINFO_INTERNAL_INFORMATION;
103 uint64_t file_id; /* RAW_FILEINFO_INTERNAL_INFORMATION */
104 NTSTATUS status_RAW_FILEINFO_ACCESS_INFORMATION;
105 uint32_t access_flags; /* RAW_FILEINFO_ACCESS_INFORMATION */
106 NTSTATUS status_RAW_FILEINFO_POSITION_INFORMATION;
107 uint64_t position; /* RAW_FILEINFO_POSITION_INFORMATION */
108 NTSTATUS status_RAW_FILEINFO_MODE_INFORMATION;
109 uint32_t mode; /* RAW_FILEINFO_MODE_INFORMATION */
110 NTSTATUS status_RAW_FILEINFO_ALIGNMENT_INFORMATION;
111 uint32_t alignment_requirement; /* RAW_FILEINFO_ALIGNMENT_INFORMATION */
112 NTSTATUS status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
113 uint32_t reparse_tag; /* RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION */
114 uint32_t reparse_attrib; /* RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION */
115 NTSTATUS status_RAW_FILEINFO_STREAM_INFO;
116 uint32_t num_streams; /* RAW_FILEINFO_STREAM_INFO */
117 struct info_stream *streams; /* RAW_FILEINFO_STREAM_INFO */
120 #define valid_RAW_FILEINFO_BASIC_INFORMATION 1
121 #define valid_RAW_FILEINFO_ALL_INFO 2
122 #define valid_RAW_FILEINFO_COMPRESSION_INFO 3
123 #define valid_RAW_FILEINFO_INTERNAL_INFORMATION 4
124 #define valid_RAW_FILEINFO_STANDARD_INFO 8
125 #define valid_RAW_FILEINFO_ACCESS_INFORMATION 16
126 #define valid_RAW_FILEINFO_POSITION_INFORMATION 32
127 #define valid_RAW_FILEINFO_MODE_INFORMATION 64
128 #define valid_RAW_FILEINFO_ALIGNMENT_INFORMATION 128
129 #define valid_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION 256
130 #define valid_RAW_FILEINFO_STREAM_INFO 512
132 struct file_metadata {
133 int count;
134 int valid;
135 struct proxy_file_info_data info_data;
138 struct proxy_file {
139 struct proxy_file *prev, *next;
140 struct proxy_private* proxy;
141 uint16_t fnum;
142 struct ntvfs_handle *h;
143 struct cache_file_entry *cache;
144 /* filename might not be a char*, but if so, _size includes null */
145 void* filename;
146 int filename_size;
147 int readahead_pending;
148 /* *_OPLOCK_RETURN values */
149 int oplock;
150 /* read-only, shareable normal file open, can be cloned by similar opens */
151 bool can_clone;
152 /* If we have an oplock, then the file is NOT bigger than size, which lets
153 us optimize reads */
154 struct file_metadata *metadata;
157 struct proxy_private;
159 struct search_handle {
160 struct search_handle *prev, *next;
161 struct proxy_private *proxy;
162 struct ntvfs_handle *h;
163 uint16_t handle;
164 union {
165 struct smb_search_id id;
166 uint32_t resume_key;
167 } resume_index;
168 struct search_cache_item *resume_item;
169 enum smb_search_level level;
170 enum smb_search_data_level data_level;
171 /* search cache (if any) being used */
172 struct search_cache *cache;
175 struct search_cache_item {
176 struct search_cache_item *prev, *next;
177 enum smb_search_data_level data_level;
178 struct cache_file_entry *cache;
179 union smb_search_data *file;
180 struct file_metadata *metadata;
182 enum search_cache_status {
183 SEARCH_CACHE_INCOMPLETE,
184 SEARCH_CACHE_COMPLETE,
185 SEARCH_CACHE_DEAD
188 struct fdirmon;
189 typedef void*(fdirmon_callback_fn)(void* data, struct fdirmon* fdirmon);
190 //NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS)
192 struct fdirmon {
193 struct fdirmon *prev, *next;
194 struct search_cache_item *items;
196 struct proxy_private *proxy;
198 union smb_notify *notify_io;
199 struct smbcli_request *notify_req;
200 uint16_t dir_fnum;
201 char* dir;
202 struct fdirmon_callback {
203 struct fdirmon_callback *prev, *next;
204 fdirmon_callback_fn *fn;
205 void* data;
206 } *callbacks;
209 struct search_cache {
210 struct search_cache *prev, *next;
211 struct search_cache_item *items;
213 struct proxy_private *proxy;
214 enum search_cache_status status;
216 union smb_notify *notify_io;
217 struct smbcli_request *notify_req;
218 uint16_t dir_fnum;
219 char* dir;
221 struct search_cache_key {
222 enum smb_search_level level;
223 enum smb_search_data_level data_level;
224 uint16_t search_attrib;
225 const char *pattern;
226 /* these only for trans2 */
227 uint16_t flags;
228 uint32_t storage_type;
229 } key;
231 struct search_state {
232 struct search_handle *search_handle;
233 void* private;
234 smbcli_search_callback callback;
235 struct search_cache_item *last_item;
236 uint16_t count; /* count how many client receives */
237 uint16_t all_count; /* count how many we receive */
240 struct fs_attribute_info {
241 uint32_t fs_attr;
242 uint32_t max_file_component_length;
243 struct smb_wire_string fs_type;
246 /* this is stored in ntvfs_private */
247 struct proxy_private {
248 struct smbcli_tree *tree;
249 struct smbcli_transport *transport;
250 struct ntvfs_module_context *ntvfs;
251 struct async_info *pending;
252 struct proxy_file *files;
253 struct proxy_file *closed_files;
254 struct fdirmon *dirmons;
255 struct search_cache *search_caches; /* cache's of find-first data */
256 struct search_handle *search_handles; /* cache's of find-first data */
257 bool map_generic;
258 bool map_trans2;
259 bool cache_enabled;
260 int cache_readahead; /* default read-ahead window size */
261 int cache_readaheadblock; /* size of each read-ahead request */
262 ssize_t cache_validatesize; /* chunk size to validate, results in a read this size on remote server */
263 char *remote_server;
264 char *remote_share;
265 struct cache_context *cache;
266 struct fs_attribute_info *fs_attribute_info;
267 int readahead_spare; /* amount of pending non-user generated requests */
268 bool fake_oplock; /* useful for testing, smbclient never asks for oplock */
269 bool fake_valid; /* useful for testing, smbclient never asks for oplock */
270 uint16_t nttrans_fnum; /* we need a handle for non-proxy operations */
271 bool enabled_cache_info;
272 bool enabled_proxy_search;
273 bool enabled_open_clone;
274 bool enabled_extra_protocol;
275 bool enabled_qpathinfo;
278 struct async_info_map;
280 /* a structure used to pass information to an async handler */
281 struct async_info {
282 struct async_info *next, *prev;
283 struct proxy_private *proxy;
284 struct ntvfs_request *req;
285 struct smbcli_request *c_req;
286 struct proxy_file *f;
287 struct async_info_map *chain;
288 void *parms;
291 /* used to chain async callbacks */
292 struct async_info_map {
293 struct async_info_map *next, *prev;
294 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
295 void *parms1;
296 void *parms2;
297 struct async_info *async;
300 struct ntioctl_rpc_unmap_info {
301 void* io;
302 const struct ndr_interface_call *calls;
303 const struct ndr_interface_table *table;
304 uint32_t opnum;
307 /* a structure used to pass information to an async handler */
308 struct async_rpclite_send {
309 const struct ndr_interface_call* call;
310 void* struct_ptr;
313 #define SETUP_PID private->tree->session->pid = req->smbpid
315 #define RPCLITE_SETUP_FILE_HERE(f, h) do { \
316 RPCLITE_SETUP_THIS_FILE_HERE(r->in.fnum, f, h); \
317 } while (0)
319 #define RPCLITE_SETUP_THIS_FILE_HERE(FNUM, f, h) do { \
320 if ((h = ntvfs_find_handle(private->ntvfs, req, FNUM)) && \
321 (f = ntvfs_handle_get_backend_data(h, ntvfs))) { \
322 FNUM = f->fnum; \
323 } else { \
324 r->out.result = NT_STATUS_INVALID_HANDLE; \
325 return NT_STATUS_OK; \
327 } while (0)
329 #define SETUP_FILE_HERE(f) do { \
330 f = ntvfs_handle_get_backend_data(io->generic.in.file.ntvfs, ntvfs); \
331 if (!f) return NT_STATUS_INVALID_HANDLE; \
332 io->generic.in.file.fnum = f->fnum; \
333 } while (0)
335 #define SETUP_FILE do { \
336 struct proxy_file *f; \
337 SETUP_FILE_HERE(f); \
338 } while (0)
340 #define SETUP_PID_AND_FILE do { \
341 SETUP_PID; \
342 SETUP_FILE; \
343 } while (0)
345 /* remove the MAY_ASYNC from a request, useful for testing */
346 #define MAKE_SYNC_REQ(req) do { req->async_states->state &= ~NTVFS_ASYNC_STATE_MAY_ASYNC; } while(0)
348 #define PROXY_SERVER "proxy:server"
349 #define PROXY_USER "proxy:user"
350 #define PROXY_PASSWORD "proxy:password"
351 #define PROXY_DOMAIN "proxy:domain"
352 #define PROXY_SHARE "proxy:share"
353 #define PROXY_USE_MACHINE_ACCT "proxy:use-machine-account"
354 #define PROXY_MAP_GENERIC "proxy:map-generic"
355 #define PROXY_MAP_TRANS2 "proxy:map-trans2"
357 #define PROXY_CACHE_ENABLED "proxy:cache-enabled"
358 #define PROXY_CACHE_ENABLED_DEFAULT false
360 #define PROXY_CACHE_READAHEAD "proxy:cache-readahead"
361 #define PROXY_CACHE_READAHEAD_DEFAULT 32768
362 /* size of each read-ahead request. */
363 #define PROXY_CACHE_READAHEAD_BLOCK "proxy:cache-readaheadblock"
364 /* the read-ahead block should always be less than max negotiated data */
365 #define PROXY_CACHE_READAHEAD_BLOCK_DEFAULT 4096
367 #define PROXY_CACHE_VALIDATE_SIZE "proxy:validate-size"
368 #define PROXY_CACHE_VALIDATE_SIZE_DEFAULT 256 /* 10M */
370 #define PROXY_FAKE_OPLOCK "proxy:fake-oplock"
371 #define PROXY_FAKE_OPLOCK_DEFAULT false
373 #define PROXY_FAKE_VALID "proxy:fake-valid"
374 #define PROXY_FAKE_VALID_DEFAULT false
376 /* how many read-ahead requests can be pending per mid */
377 #define PROXY_REQUEST_LIMIT "proxy:request-limit"
378 #define PROXY_REQUEST_LIMIT_DEFAULT 100
380 #define PROXY_USE_MACHINE_ACCT_DEFAULT false
381 /* These two really should be: true, and possibly not even configurable */
382 #define PROXY_MAP_GENERIC_DEFAULT true
383 #define PROXY_MAP_TRANS2_DEFAULT true
385 /* is the remote server a proxy? */
386 #define PROXY_REMOTE_SERVER(private) \
387 ((private)->tree->session->transport->negotiate.capabilities & CAP_COMPRESSION \
388 && (strcmp("A:",private->tree->device)==0) \
389 && (private->nttrans_fnum!=0) \
390 && (private->enabled_extra_protocol))
392 /* A few forward declarations */
393 static NTSTATUS sync_chain_handler(struct smbcli_request *c_req);
394 static void async_chain_handler(struct smbcli_request *c_req);
395 static void async_read_handler(struct smbcli_request *c_req);
396 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
397 struct ntvfs_request *req, union smb_ioctl *io);
399 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
400 struct smbcli_tree *tree,
401 struct ntvfs_module_context *ntvfs,
402 const struct ndr_interface_table *table,
403 uint32_t opnum, void *r);
404 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
405 union smb_read *io, struct proxy_file *f, struct proxy_Read *r);
406 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
407 union smb_read *io, struct proxy_file *f);
408 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
409 union smb_write *io, struct proxy_file *f);
410 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
411 union smb_write *io, struct proxy_file *f);
412 static NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status);
414 struct smb_wire_string talloc_smb_wire_string_dup(void* mem_ctx, const struct smb_wire_string* string)
416 struct smb_wire_string result;
417 result.private_length=string->private_length;
418 result.s=talloc_strndup(mem_ctx, string->s, string->private_length);
419 DEBUG(5,("%s: %s\n",__FUNCTION__, string->s));
420 return result;
423 #define sws_dup(mem_ctx, dest, src) (\
424 dest=talloc_smb_wire_string_dup(NULL, &(src)), \
425 (dest.s==NULL && src.s!=NULL))
427 /* These needs replacing with something more canonical perhaps */
428 static char* talloc_dirname(void* mem_ctx, const char* path) {
429 const char* dir;
431 if ((dir=strrchr(path,'\\'))) {
432 return talloc_strndup(mem_ctx, path, (dir - path));
433 } else {
434 return talloc_strdup(mem_ctx,"");
439 a handler for oplock break events from the server - these need to be passed
440 along to the client
442 static bool oplock_handler(struct smbcli_transport *transport, uint16_t tid, uint16_t fnum, uint8_t level, void *p_private)
444 struct proxy_private *private = p_private;
445 NTSTATUS status;
446 struct ntvfs_handle *h = NULL;
447 struct proxy_file *f;
448 bool result=true;
450 /* because we clone handles, there may be more than one match */
451 for (f=private->files; f; f=f->next) {
452 if (f->fnum != fnum) continue;
453 h = f->h;
455 if (level==OPLOCK_BREAK_TO_LEVEL_II) {
456 f->oplock=LEVEL_II_OPLOCK_RETURN;
457 } else {
458 /* If we don't have an oplock, then we can't rely on the cache */
459 cache_handle_stale(f);
460 f->oplock=NO_OPLOCK_RETURN;
463 DEBUG(5,("vfs_proxy: sending oplock break level %d for fnum %d\n", level, fnum));
464 status = ntvfs_send_oplock_break(private->ntvfs, h, level);
465 if (!NT_STATUS_IS_OK(status)) result=false;
467 if (!h) {
468 DEBUG(5,("vfs_proxy: ignoring oplock break level %d for fnum %d\n", level, fnum));
470 return result;
473 /* need to pass error upstream and then close? */
474 static void transport_dead(struct smbcli_transport *transport, NTSTATUS status, void* p_private) {
475 struct proxy_private *private = p_private;
476 struct async_info *a;
478 /* first cleanup pending requests */
479 if (transport->pending_recv) {
480 struct smbcli_request *req = transport->pending_recv;
481 req->state = SMBCLI_REQUEST_ERROR;
482 req->status = status;
483 DLIST_REMOVE(transport->pending_recv, req);
484 if (req->async.fn) {
485 req->async.fn(req);
488 // smbsrv_terminate_connection(private->ntvfs,"Upstream hates us");
492 get file handle from clients fnum, (from ntvfs/ipc/vfs_ipc.c at metze suggestion)
494 static struct ntvfs_handle *ntvfs_find_handle(struct ntvfs_module_context *ntvfs,
495 struct ntvfs_request *req,
496 uint16_t fnum)
498 DATA_BLOB key;
499 uint16_t _fnum;
502 * the fnum is already in host byteorder
503 * but ntvfs_handle_search_by_wire_key() expects
504 * network byteorder
506 SSVAL(&_fnum, 0, fnum);
507 key = data_blob_const(&_fnum, 2);
509 return ntvfs_handle_search_by_wire_key(ntvfs, req, &key);
513 connect to a share - used when a tree_connect operation comes in.
515 static NTSTATUS proxy_connect(struct ntvfs_module_context *ntvfs,
516 struct ntvfs_request *req, const char *sharename)
518 NTSTATUS status;
519 struct proxy_private *private;
520 const char *host, *user, *pass, *domain, *remote_share;
521 struct smb_composite_connect io;
522 struct composite_context *creq;
523 struct share_config *scfg = ntvfs->ctx->config;
524 int nttrans_fnum;
526 struct cli_credentials *credentials;
527 bool machine_account;
529 /* Here we need to determine which server to connect to.
530 * For now we use parametric options, type proxy.
531 * Later we will use security=server and auth_server.c.
533 host = share_string_option(scfg, PROXY_SERVER, NULL);
534 user = share_string_option(scfg, PROXY_USER, NULL);
535 pass = share_string_option(scfg, PROXY_PASSWORD, NULL);
536 domain = share_string_option(scfg, PROXY_DOMAIN, NULL);
537 remote_share = share_string_option(scfg, PROXY_SHARE, NULL);
538 if (!remote_share) {
539 remote_share = sharename;
542 machine_account = share_bool_option(scfg, PROXY_USE_MACHINE_ACCT, PROXY_USE_MACHINE_ACCT_DEFAULT);
544 private = talloc_zero(ntvfs, struct proxy_private);
545 if (!private) {
546 return NT_STATUS_NO_MEMORY;
549 ntvfs->private_data = private;
551 if (!host) {
552 DEBUG(1,("PROXY backend: You must supply server\n"));
553 return NT_STATUS_INVALID_PARAMETER;
556 if (user && pass) {
557 DEBUG(5, ("PROXY backend: Using specified password\n"));
558 credentials = cli_credentials_init(private);
559 if (!credentials) {
560 return NT_STATUS_NO_MEMORY;
562 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
563 cli_credentials_set_username(credentials, user, CRED_SPECIFIED);
564 if (domain) {
565 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
567 cli_credentials_set_password(credentials, pass, CRED_SPECIFIED);
568 } else if (machine_account) {
569 DEBUG(5, ("PROXY backend: Using machine account\n"));
570 credentials = cli_credentials_init(private);
571 cli_credentials_set_conf(credentials, ntvfs->ctx->lp_ctx);
572 if (domain) {
573 cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED);
575 status = cli_credentials_set_machine_account(credentials, ntvfs->ctx->lp_ctx);
576 if (!NT_STATUS_IS_OK(status)) {
577 return status;
579 } else if (req->session_info->credentials) {
580 DEBUG(5, ("PROXY backend: Using delegated credentials\n"));
581 credentials = req->session_info->credentials;
582 } else {
583 DEBUG(1,("PROXY backend: NO delegated credentials found: You must supply server, user and password or the client must supply delegated credentials\n"));
584 return NT_STATUS_INVALID_PARAMETER;
587 /* connect to the server, using the smbd event context */
588 io.in.dest_host = host;
589 io.in.dest_ports = lp_smb_ports(ntvfs->ctx->lp_ctx);
590 io.in.socket_options = lp_socket_options(ntvfs->ctx->lp_ctx);
591 io.in.called_name = host;
592 io.in.credentials = credentials;
593 io.in.fallback_to_anonymous = false;
594 io.in.workgroup = lp_workgroup(ntvfs->ctx->lp_ctx);
595 io.in.service = remote_share;
596 io.in.service_type = "?????";
597 io.in.iconv_convenience = lp_iconv_convenience(ntvfs->ctx->lp_ctx);
598 io.in.gensec_settings = lp_gensec_settings(private, ntvfs->ctx->lp_ctx);
599 lp_smbcli_options(ntvfs->ctx->lp_ctx, &io.in.options);
600 lp_smbcli_session_options(ntvfs->ctx->lp_ctx, &io.in.session_options);
602 creq = smb_composite_connect_send(&io, private,
603 lp_resolve_context(ntvfs->ctx->lp_ctx),
604 ntvfs->ctx->event_ctx);
605 status = smb_composite_connect_recv(creq, private);
606 NT_STATUS_NOT_OK_RETURN(status);
608 private->tree = io.out.tree;
610 private->transport = private->tree->session->transport;
611 SETUP_PID;
612 private->ntvfs = ntvfs;
614 ntvfs->ctx->fs_type = talloc_strdup(ntvfs->ctx, io.out.tree->fs_type);
615 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->fs_type);
616 ntvfs->ctx->dev_type = talloc_strdup(ntvfs->ctx, io.out.tree->device);
617 NT_STATUS_HAVE_NO_MEMORY(ntvfs->ctx->dev_type);
619 /* we need to receive oplock break requests from the server */
620 smbcli_oplock_handler(private->transport, oplock_handler, private);
622 /* we also want to know when the transport goes bad */
623 private->transport->transport_dead.handler = transport_dead;
624 private->transport->transport_dead.private = private;
626 private->map_generic = share_bool_option(scfg, PROXY_MAP_GENERIC, PROXY_MAP_GENERIC_DEFAULT);
628 private->map_trans2 = share_bool_option(scfg, PROXY_MAP_TRANS2, PROXY_MAP_TRANS2_DEFAULT);
630 private->cache_validatesize = 1024 * (long long) share_int_option(scfg, PROXY_CACHE_VALIDATE_SIZE, PROXY_CACHE_VALIDATE_SIZE_DEFAULT);
632 if (strcmp("A:",private->tree->device)==0) {
633 private->cache_enabled = share_bool_option(scfg, PROXY_CACHE_ENABLED, PROXY_CACHE_ENABLED_DEFAULT);
634 private->cache_readahead = share_int_option(scfg, PROXY_CACHE_READAHEAD, PROXY_CACHE_READAHEAD_DEFAULT);
635 private->cache_readaheadblock = share_int_option(scfg, PROXY_CACHE_READAHEAD_BLOCK,
636 MIN(private->cache_readahead,PROXY_CACHE_READAHEAD_BLOCK_DEFAULT));
637 private->fake_oplock = share_bool_option(scfg, PROXY_FAKE_OPLOCK, PROXY_FAKE_OPLOCK_DEFAULT);
638 private->fake_valid = share_bool_option(scfg, PROXY_FAKE_VALID, PROXY_FAKE_VALID_DEFAULT);
639 private->readahead_spare = share_int_option(scfg, PROXY_REQUEST_LIMIT, PROXY_REQUEST_LIMIT_DEFAULT);
640 private->cache = new_cache_context(private, lp_proxy_cache_root(ntvfs->ctx->lp_ctx), host, remote_share);
641 private->enabled_cache_info=true;
642 private->enabled_proxy_search=true;
643 private->enabled_open_clone=true;
644 private->enabled_extra_protocol=true;
645 private->enabled_qpathinfo=true;
647 DEBUG(0,("proxy tree connect caching for: %s (%s : %s) %s read-ahead: %d\n",
648 remote_share, private->tree->device,private->tree->fs_type,
649 (private->cache_enabled)?"enabled":"disabled",
650 private->cache_readahead));
651 } else {
652 private->cache_enabled = false;
653 DEBUG(0,("No caching or read-ahead for: %s (%s : %s)\n",
654 remote_share, private->tree->device,private->tree->fs_type));
657 private->remote_server = strlower_talloc(private, host);
658 private->remote_share = strlower_talloc(private, remote_share);
660 /* some proxy operations will not be performed on files, so open a handle
661 now that we can use for such things. We won't bother to close it on
662 shutdown, as the remote server ought to be able to close it for us
663 and we might be shutting down because the remote server went away and
664 so we don't want to delay further */
665 nttrans_fnum=smbcli_nt_create_full(private->tree, "\\",
667 SEC_FILE_READ_DATA,
668 FILE_ATTRIBUTE_NORMAL,
669 NTCREATEX_SHARE_ACCESS_MASK,
670 NTCREATEX_DISP_OPEN,
671 NTCREATEX_OPTIONS_DIRECTORY,
672 NTCREATEX_IMPERSONATION_IMPERSONATION);
673 if (nttrans_fnum < 0) {
674 DEBUG(5,("Could not open handle for ntioctl %d\n",private->nttrans_fnum));
675 //return NT_STATUS_UNSUCCESSFUL;
677 private->nttrans_fnum=nttrans_fnum;
678 DEBUG(5,("Got nttrans handle %d\n",private->nttrans_fnum));
680 return NT_STATUS_OK;
684 disconnect from a share
686 static NTSTATUS proxy_disconnect(struct ntvfs_module_context *ntvfs)
688 struct proxy_private *private = ntvfs->private_data;
689 struct async_info *a, *an;
690 struct search_cache *s;
692 /* first clean up caches because they have a pending request that
693 they will try and clean up later and fail during talloc_free */
694 for (s=private->search_caches; s; s=s->next) {
695 if (s->notify_req) {
696 talloc_unlink(s, s->notify_req);
697 s->notify_req=NULL;
699 s->dir_fnum=65535;
702 /* first cleanup pending requests */
703 for (a=private->pending; a; a = an) {
704 an = a->next;
705 smbcli_request_destroy(a->c_req);
706 talloc_free(a);
709 talloc_free(private);
710 ntvfs->private_data = NULL;
712 return NT_STATUS_OK;
716 destroy an async info structure
718 static int async_info_destructor(struct async_info *async)
720 DLIST_REMOVE(async->proxy->pending, async);
721 return 0;
725 a handler for simple async replies
726 this handler can only be used for functions that don't return any
727 parameters (those that just return a status code)
729 static void async_simple(struct smbcli_request *c_req)
731 struct async_info *async = c_req->async.private;
732 struct ntvfs_request *req = async->req;
733 req->async_states->status = smbcli_request_simple_recv(c_req);
734 talloc_free(async);
735 req->async_states->send_fn(req);
738 /* hopefully this will optimize away */
739 #define TYPE_CHECK(type,check) do { \
740 type=check; \
741 t=t; \
742 } while (0)
744 /* save some typing for the simple functions */
745 #define ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, file, achain, error) do { \
746 if (!c_req) return (error); \
747 ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_fn, file, achain); \
748 if (! c_req->async.private) return (error); \
749 MAKE_SYNC_ERROR_ASYNC(c_req, error); \
750 } while(0)
752 #define ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_fn, file, achain) do { \
753 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
755 struct async_info *async; \
756 async = talloc(req, struct async_info); \
757 if (async) { \
758 async->parms = io; \
759 async->req = req; \
760 async->f = file; \
761 async->proxy = private; \
762 async->c_req = c_req; \
763 async->chain = achain; \
764 DLIST_ADD(private->pending, async); \
765 c_req->async.private = async; \
766 talloc_set_destructor(async, async_info_destructor); \
769 c_req->async.fn = async_fn; \
770 } while (0)
772 #define ASYNC_RECV_TAIL_F(io, async_fn, file) do { \
773 if (!c_req) return NT_STATUS_UNSUCCESSFUL; \
774 TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \
776 struct async_info *async; \
777 async = talloc(req, struct async_info); \
778 if (!async) return NT_STATUS_NO_MEMORY; \
779 async->parms = io; \
780 async->req = req; \
781 async->f = file; \
782 async->proxy = private; \
783 async->c_req = c_req; \
784 DLIST_ADD(private->pending, async); \
785 c_req->async.private = async; \
786 talloc_set_destructor(async, async_info_destructor); \
788 c_req->async.fn = async_fn; \
789 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
790 MAKE_SYNC_ERROR_ASYNC(c_req, NT_STATUS_UNSUCCESSFUL); \
791 return NT_STATUS_OK; \
792 } while (0)
794 static void vasync_timer(struct event_context * ec, struct timed_event *te,
795 struct timeval tv, void *data) {
796 struct smbcli_request *c_req = talloc_get_type_abort(data, struct smbcli_request);
798 DEBUG(5,("Calling async timer on c_req %p with req %p\n",c_req->async.fn, c_req)); \
799 c_req->async.fn(c_req);
802 #define MAKE_SYNC_ERROR_ASYNC(c_req, error) do { \
803 if (c_req && c_req->state >= SMBCLI_REQUEST_DONE) { \
804 /* NOTE: the timer struct is allocated against c_req, so if the c_req */ \
805 /* handler is called manually, the timer will be destroyed with c_req */ \
806 if (! event_add_timed(private->ntvfs->ctx->event_ctx, c_req, \
807 timeval_current_ofs(0, 0), \
808 vasync_timer, \
809 c_req)) return (error); \
810 DEBUG(5,("Queueing async timer on c_req %p with req %p\n",c_req->async.fn, c_req)); \
812 } while(0)
814 #define ASYNC_RECV_TAIL(io, async_fn) ASYNC_RECV_TAIL_F(io, async_fn, NULL)
816 #define SIMPLE_ASYNC_TAIL ASYNC_RECV_TAIL(NULL, async_simple)
818 /* managers for chained async-callback.
819 The model of async handlers has changed.
820 backend async functions should be of the form:
821 NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS);
822 And if async->c_req is NULL then an earlier chain has already rec'd the
823 request.
824 ADD_ASYNC_RECV_TAIL is used to add chained handlers.
825 The chained handler manager async_chain_handler is installed the usual way
826 and uses the io pointer to point to the first async_map record
827 static void async_chain_handler(struct smbcli_request *c_req).
828 It is safe to call ADD_ASYNC_RECV_TAIL before the chain manager is installed
829 and often desirable.
831 /* async_chain_handler has an async_info struct so that it can be safely inserted
832 into pending, but the io struct will point to (struct async_info_map *)
833 chained async_info_map will be in c_req->async.private */
834 #define ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_fn) do { \
835 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
836 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
837 } while(0)
839 #define ASYNC_RECV_TAIL_HANDLER(io, async_fn) do { \
840 if (c_req->async.fn) return (NT_STATUS_UNSUCCESSFUL); \
841 ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, f, c_req->async.private, NT_STATUS_UNSUCCESSFUL); \
842 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; \
843 return NT_STATUS_OK; \
844 } while(0)
847 DEBUG(0,("ADD_ASYNC_RECV_TAIL %s %s:%d\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%p=%s %p\n\t%s\n", __FUNCTION__,__FILE__,__LINE__, \
848 creq, creq?talloc_get_name(creq):NULL, creq?talloc_get_name(creq):NULL,\
849 io1, io1?talloc_get_name(io1):NULL, io1?talloc_get_name(io1):NULL, \
850 io2, io2?talloc_get_name(io2):NULL, io2?talloc_get_name(io2):NULL, \
851 file, file?"file":"null", file?"file":"null", #async_fn)); \
853 #define ADD_ASYNC_RECV_TAIL(creq, io1, io2, file, async_fn, error) do { \
854 if (! creq) { \
855 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no creq\n",__FUNCTION__)); \
856 return (error); \
857 } else { \
858 struct async_info_map *async_map=talloc(NULL, struct async_info_map); \
859 if (! async_map) { \
860 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no async_map\n",__FUNCTION__)); \
861 return (error); \
863 async_map->async=talloc(async_map, struct async_info); \
864 if (! async_map->async) { \
865 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL no async_map->async\n",__FUNCTION__)); \
866 return (error); \
868 async_map->parms1=io1; \
869 async_map->parms2=io2; \
870 async_map->fn=async_fn; \
871 async_map->async->parms = io1; \
872 async_map->async->req = req; \
873 async_map->async->f = file; \
874 async_map->async->proxy = private; \
875 async_map->async->c_req = creq; \
876 /* If async_chain_handler is installed, get the list from param */ \
877 if (creq->async.fn == async_chain_handler || creq->async.fn == async_read_handler) { \
878 struct async_info *i=creq->async.private; \
879 DLIST_ADD_END(i->chain, async_map, struct async_info_map *); \
880 } else if (creq->async.fn) { \
881 /* incompatible handler installed */ \
882 DEBUG(5,("%s: ADD_ASYNC_RECV_TAIL incompatible handler already installed\n",__FUNCTION__)); \
883 return (error); \
884 } else { \
885 DLIST_ADD_END(creq->async.private, async_map, struct async_info_map *); \
888 } while(0)
890 static void async_dirmon_notify(struct smbcli_request *c_req)
892 struct async_info *async = c_req->async.private;
893 struct ntvfs_request *req = async->req;
894 struct fdirmon *dirmon;
895 struct fdirmon_callback *callback;
896 struct proxy_private *proxy = async->proxy;
897 int f;
899 NTSTATUS status;
901 dirmon = talloc_get_type_abort((void*)async->f, struct fdirmon);
902 DEBUG(5,("%s: dirmon %s invalidated\n",__LOCATION__, dirmon->dir));
904 status = smb_raw_changenotify_recv(c_req, req, async->parms);
905 DEBUG(5,("%s: update status %s\n",__LOCATION__, get_friendly_nt_error_msg (status)));
907 if (dirmon->notify_req) {
908 talloc_unlink(dirmon, dirmon->notify_req);
909 dirmon->notify_req=NULL;
911 /* Mark closed cached files as invalid if they changed, as they will be
912 assuming cache is valid if a dirmon exists and hasn't invalidated it */
913 for(f=0; f<dirmon->notify_io->nttrans.out.num_changes; f++) {
914 DEBUG(1,("DIRMON: %s changed\n",dirmon->notify_io->nttrans.out.changes[f].name.s));
916 DLIST_FOR_EACH(dirmon->callbacks, callback, callback->fn(callback->data, dirmon));
917 /* So nothing can find it even if there are still in-use references */
918 DLIST_REMOVE(proxy->dirmons, dirmon);
919 if (dirmon->dir_fnum!=65535) {
920 struct smbcli_request *req;
921 union smb_close close_parms;
922 close_parms.close.level = RAW_CLOSE_CLOSE;
923 close_parms.close.in.file.fnum = dirmon->dir_fnum;
924 close_parms.close.in.write_time = 0;
926 /* destructor may be called from a notify response and won't be able
927 to wait on this close response, not that we care anyway */
928 req=smb_raw_close_send(proxy->tree, &close_parms);
930 DEBUG(5,("%s: Close dir_fnum: %d %p\n",__LOCATION__, dirmon->dir_fnum, req));
931 dirmon->dir_fnum=65535;
933 talloc_free(async);
934 talloc_free(dirmon);
937 struct fdirmon* get_fdirmon(struct proxy_private *proxy, const char* path, bool dir_only) {
938 const char *file;
939 int pathlen;
941 if ((file=strrchr(path,'\\'))) {
942 if (dir_only) {
943 pathlen = file - path;
944 file++;
945 } else {
946 pathlen=strlen(path);
948 } else {
949 file = path;
950 pathlen = 0;
953 struct fdirmon *dirmon;
954 /* see if we have a matching dirmon */
955 DLIST_FIND(proxy->dirmons, dirmon, (strlen(dirmon->dir) == pathlen && fstrncmp(path, dirmon->dir, pathlen)==0));
956 if (! dirmon) {
957 int saved_timeout;
959 DEBUG(5,("%s: allocating new dirmon for %s\n",__FUNCTION__,path));
960 dirmon=talloc_zero(proxy, struct fdirmon);
961 if (! dirmon) {
962 goto error;
964 if (! (dirmon->dir=talloc_strndup(dirmon, path, pathlen))) {
965 goto error;
967 if (! (dirmon->notify_io=talloc_zero(dirmon, union smb_notify))) {
968 goto error;
971 dirmon->dir_fnum=smbcli_nt_create_full(proxy->tree, dirmon->dir,
973 SEC_FILE_READ_DATA,
974 FILE_ATTRIBUTE_NORMAL,
975 NTCREATEX_SHARE_ACCESS_MASK,
976 NTCREATEX_DISP_OPEN,
977 NTCREATEX_OPTIONS_DIRECTORY,
978 NTCREATEX_IMPERSONATION_IMPERSONATION);
980 if (dirmon->dir_fnum==65535) {
981 DEBUG(5,("%s: smbcli_nt_create_full %s failed\n",__FUNCTION__, dirmon->dir));
982 goto error;
985 saved_timeout = proxy->transport->options.request_timeout;
986 /* request notify changes on cache before we start to fill it */
987 dirmon->notify_io->nttrans.level=RAW_NOTIFY_NTTRANS;
988 dirmon->notify_io->nttrans.in.completion_filter=FILE_NOTIFY_CHANGE_ANY;
989 dirmon->notify_io->nttrans.in.file.fnum=dirmon->dir_fnum;
990 dirmon->notify_io->nttrans.in.recursive=false;
991 dirmon->notify_io->nttrans.in.buffer_size=10240;
992 proxy->transport->options.request_timeout = 0;
993 dirmon->notify_req=smb_raw_changenotify_send(proxy->tree, dirmon->notify_io);
994 /* Make the request hang around so we can tell if it needs cancelling */
995 proxy->transport->options.request_timeout = saved_timeout;
997 if (! dirmon->notify_req) {
998 goto error;
999 }else {
1000 struct ntvfs_request *req=NULL;
1001 struct smbcli_request *c_req=dirmon->notify_req;
1002 union smb_notify *io=dirmon->notify_io;
1003 struct proxy_private *private=proxy;
1005 talloc_reference(dirmon, dirmon->notify_req);
1006 ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_dirmon_notify,
1007 (void*) dirmon, c_req->async.private);
1008 DLIST_ADD(private->dirmons, dirmon);
1012 return dirmon;
1013 error:
1014 DEBUG(3,("%s: failed to allocate dirmon\n",__FUNCTION__));
1015 talloc_free(dirmon);
1016 return NULL;
1019 bool dirmon_add_callback(struct fdirmon *dirmon, fdirmon_callback_fn *fn, void* data) {
1020 struct fdirmon_callback *callback=talloc_zero(dirmon, struct fdirmon_callback);
1021 if (! callback) {
1022 return false;
1024 callback->data=data;
1025 callback->fn=fn;
1026 DLIST_ADD(dirmon->callbacks, callback);
1027 return true;
1030 /* try and unify cache open function interface with this macro */
1031 #define cache_open(cache_context, f, io, oplock, readahead_window) \
1032 (io->generic.level == RAW_OPEN_NTCREATEX && \
1033 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID)\
1034 ?(cache_fileid_open(cache_context, f, (const uint64_t*)(io->generic.in.fname), oplock, readahead_window))\
1035 :(cache_filename_open(cache_context, f, SMB_OPEN_IN_FILE(io), oplock, readahead_window))
1037 struct search_cache* find_partial_search_cache(struct search_cache* search_cache, const struct search_cache_key* search_cache_key) {
1038 struct search_cache* result;
1039 DLIST_FIND(search_cache, result,
1040 (result->key.level == search_cache_key->level) &&
1041 (result->key.data_level == search_cache_key->data_level) &&
1042 (result->key.search_attrib == search_cache_key->search_attrib) &&
1043 (result->key.flags == search_cache_key->flags) &&
1044 (result->key.storage_type == search_cache_key->storage_type) &&
1045 (fstrcmp(result->key.pattern, search_cache_key->pattern) == 0));
1046 DEBUG(5,("%s: found %p\n",__LOCATION__,result));
1047 return result;
1049 struct search_cache* find_search_cache(struct search_cache* search_cache, const struct search_cache_key* search_cache_key) {
1050 struct search_cache* result = find_partial_search_cache(search_cache, search_cache_key);
1051 if (result && result->status == SEARCH_CACHE_COMPLETE) {
1052 DEBUG(5,("%s: found complete %p\n",__LOCATION__,result));
1053 return result;
1055 DEBUG(5,("%s: found INCOMPLETE %p\n",__LOCATION__,result));
1056 return NULL;
1059 uint16_t smbsrv_fnum(struct ntvfs_handle *h) {
1060 uint16_t fnum;
1061 smbsrv_push_fnum((uint8_t *)&fnum, 0, h);
1062 return SVAL(&fnum, 0);
1065 static void async_search_cache_notify(struct smbcli_request *c_req)
1067 struct async_info *async = c_req->async.private;
1068 struct ntvfs_request *req = async->req;
1069 struct search_cache *search_cache;
1070 NTSTATUS status;
1072 DEBUG(5,("%s: search cache %p invalidated\n",__LOCATION__, (void*)async->f));
1074 search_cache = talloc_get_type_abort((void*)async->f, struct search_cache);
1076 status = smb_raw_changenotify_recv(c_req, req, async->parms);
1078 DEBUG(5,("%s: update status is %s\n",__LOCATION__, get_friendly_nt_error_msg (status)));
1080 search_cache->notify_req=NULL;
1081 /* dispose of the search_cache */
1082 search_cache->status=SEARCH_CACHE_DEAD;
1083 /* So nothing can find it even if there are still in-use references */
1084 DLIST_REMOVE(search_cache->proxy->search_caches, search_cache);
1085 /* free it */
1086 //talloc_steal(async, search_cache);
1087 //talloc_unlink(async->proxy, search_cache);
1088 if (search_cache->dir_fnum!=65535) {
1089 struct smbcli_request *req;
1090 union smb_close close_parms;
1091 close_parms.close.level = RAW_CLOSE_CLOSE;
1092 close_parms.close.in.file.fnum = search_cache->dir_fnum;
1093 close_parms.close.in.write_time = 0;
1095 /* destructor may be called from a notify response and won't be able
1096 to wait on this close response, not that we care anyway */
1097 req=smb_raw_close_send(search_cache->proxy->tree, &close_parms);
1099 DEBUG(5,("%s: Close dir_fnum: %d %p\n",__LOCATION__, search_cache->dir_fnum, req));
1100 search_cache->dir_fnum=65535;
1102 talloc_free(async);
1106 destroy a search handle
1108 static int search_handle_destructor(struct search_handle *s)
1110 DLIST_REMOVE(s->proxy->search_handles, s);
1111 DEBUG(5,("%s: handle destructor %p\n",__LOCATION__,s));
1112 return 0;
1114 static int search_cache_destructor(struct search_cache *s)
1116 NTSTATUS status;
1118 DLIST_REMOVE(s->proxy->search_caches, s);
1119 DEBUG(5,("%s: cache destructor %p\n",__LOCATION__,s));
1120 if (s->notify_req) {
1121 status=smb_raw_ntcancel(s->notify_req);
1122 s->notify_req=NULL;
1123 DEBUG(5,("%s: Cancel notification %s\n",__LOCATION__,get_friendly_nt_error_msg (status)));
1125 if (s->dir_fnum!=65535) {
1126 struct smbcli_request *req;
1127 union smb_close close_parms;
1128 close_parms.close.level = RAW_CLOSE_CLOSE;
1129 close_parms.close.in.file.fnum = s->dir_fnum;
1130 close_parms.close.in.write_time = 0;
1132 /* destructor may be called from a notify response and won't be able
1133 to wait on this close response, not that we care anyway */
1134 req=smb_raw_close_send(s->proxy->tree, &close_parms);
1136 DEBUG(5,("%s: Close dir_fnum: %d %p\n",__LOCATION__, s->dir_fnum, req));
1137 s->dir_fnum=65535;
1139 return 0;
1142 struct search_cache* new_search_cache(struct proxy_private *private, struct search_cache_key* key) {
1143 /* need to opendir the folder being searched so we can get a notification */
1144 uint16_t dir_fnum=65535;
1145 struct search_cache *search_cache=NULL;
1147 search_cache=talloc_zero(private, struct search_cache);
1148 DEBUG(5,("%s: Start new cache %p for %s\n",__LOCATION__, search_cache, key->pattern));
1149 if (! search_cache) {
1150 return NULL;
1152 search_cache->proxy=private;
1153 if (! (search_cache->dir=talloc_dirname(search_cache, key->pattern))) {
1154 goto error;
1156 if (! (search_cache->notify_io=talloc_zero(search_cache, union smb_notify))) {
1157 goto error;
1159 search_cache->key=*key;
1160 /* make private copy of pattern now that we need it AND have something to own it */
1161 if (! (search_cache->key.pattern=talloc_strdup(search_cache, search_cache->key.pattern))) {
1162 goto error;
1164 dir_fnum=smbcli_nt_create_full(private->tree, search_cache->dir,
1166 SEC_FILE_READ_DATA,
1167 FILE_ATTRIBUTE_NORMAL,
1168 NTCREATEX_SHARE_ACCESS_MASK,
1169 NTCREATEX_DISP_OPEN,
1170 NTCREATEX_OPTIONS_DIRECTORY,
1171 NTCREATEX_IMPERSONATION_IMPERSONATION);
1172 DEBUG(5,("%s: %d=opendir on %s\n",__LOCATION__,dir_fnum, search_cache->dir));
1173 if (dir_fnum==65535) {
1174 goto error;
1176 /* The destructor will close the handle */
1177 talloc_set_destructor(search_cache, search_cache_destructor);
1178 search_cache->dir_fnum=dir_fnum;
1179 DEBUG(5,("%s: Start new cache %p, dir_fnum %d\n",__LOCATION__, search_cache, dir_fnum));
1182 int saved_timeout = private->transport->options.request_timeout;
1184 /* request notify changes on cache before we start to fill it */
1185 search_cache->notify_io->nttrans.level=RAW_NOTIFY_NTTRANS;
1186 search_cache->notify_io->nttrans.in.completion_filter=FILE_NOTIFY_CHANGE_ANY;
1187 search_cache->notify_io->nttrans.in.file.fnum=dir_fnum;
1188 search_cache->notify_io->nttrans.in.recursive=false;
1189 search_cache->notify_io->nttrans.in.buffer_size=1024;
1190 private->transport->options.request_timeout = 0;
1191 search_cache->notify_req=smb_raw_changenotify_send(private->tree, search_cache->notify_io);
1192 /* Make the request hang around so we can tell if it needs cancelling */
1193 private->transport->options.request_timeout = saved_timeout;
1195 if (! search_cache->notify_req) {
1196 goto error;
1197 } else {
1198 struct ntvfs_request *req=NULL;
1199 struct smbcli_request *c_req=search_cache->notify_req;
1200 union smb_notify *io=search_cache->notify_io;
1202 talloc_reference(search_cache, search_cache->notify_req);
1203 ASYNC_RECV_TAIL_F_ORPHAN_NE(io, async_search_cache_notify,
1204 (void*) search_cache, c_req->async.private);
1205 DLIST_ADD_END(private->search_caches, search_cache, struct search_cache*);
1208 return search_cache;
1209 error:
1210 talloc_free(search_cache);
1211 return NULL;
1215 delete a file - the dirtype specifies the file types to include in the search.
1216 The name can contain PROXY wildcards, but rarely does (except with OS/2 clients)
1218 static NTSTATUS proxy_unlink(struct ntvfs_module_context *ntvfs,
1219 struct ntvfs_request *req, union smb_unlink *unl)
1221 struct proxy_private *private = ntvfs->private_data;
1222 struct smbcli_request *c_req;
1224 SETUP_PID;
1226 /* see if the front end will allow us to perform this
1227 function asynchronously. */
1228 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1229 return smb_raw_unlink(private->tree, unl);
1232 c_req = smb_raw_unlink_send(private->tree, unl);
1234 SIMPLE_ASYNC_TAIL;
1238 a handler for async ioctl replies
1240 static void async_ioctl(struct smbcli_request *c_req)
1242 struct async_info *async = c_req->async.private;
1243 struct ntvfs_request *req = async->req;
1244 req->async_states->status = smb_raw_ioctl_recv(c_req, req, async->parms);
1245 talloc_free(async);
1246 req->async_states->send_fn(req);
1250 ioctl interface
1252 static NTSTATUS proxy_ioctl(struct ntvfs_module_context *ntvfs,
1253 struct ntvfs_request *req, union smb_ioctl *io)
1255 struct proxy_private *private = ntvfs->private_data;
1256 struct smbcli_request *c_req;
1258 if (io->ntioctl.level == RAW_IOCTL_NTIOCTL
1259 && io->ntioctl.in.function == FSCTL_UFOPROXY_RPCLITE) {
1260 return proxy_rpclite(ntvfs, req, io);
1263 SETUP_PID_AND_FILE;
1265 /* see if the front end will allow us to perform this
1266 function asynchronously. */
1267 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1268 return smb_raw_ioctl(private->tree, req, io);
1271 c_req = smb_raw_ioctl_send(private->tree, io);
1273 ASYNC_RECV_TAIL(io, async_ioctl);
1277 check if a directory exists
1279 static NTSTATUS proxy_chkpath(struct ntvfs_module_context *ntvfs,
1280 struct ntvfs_request *req, union smb_chkpath *cp)
1282 struct proxy_private *private = ntvfs->private_data;
1283 struct smbcli_request *c_req;
1285 SETUP_PID;
1287 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1288 return smb_raw_chkpath(private->tree, cp);
1291 c_req = smb_raw_chkpath_send(private->tree, cp);
1293 SIMPLE_ASYNC_TAIL;
1296 static bool find_search_cache_item(const char* path,
1297 struct search_cache **search_cache,
1298 struct search_cache_item **item) {
1299 struct search_cache *s=*search_cache;
1300 struct search_cache_item *i=*item;
1301 const char* file;
1302 int dir_len;
1304 /* see if we can satisfy from a directory cache */
1305 DEBUG(5,("%s: Looking for pathinfo: '%s'\n",__LOCATION__,path));
1306 if ((file=strrchr(path,'\\'))) {
1307 dir_len = file - path;
1308 /* point past the \ */
1309 file++;
1310 } else {
1311 file = path;
1312 dir_len = 0;
1314 /* convert empty path to . so we can find it in the cache */
1315 if (! *file) {
1316 file=".";
1318 DEBUG(5,("%s: Path='%s' File='%s'\n",__LOCATION__,path, file));
1320 /* Note we don't care if the cache is partial, as long as it has a hit */
1321 while(s) {
1322 /* One day we may support all directory levels */
1323 DLIST_FIND(s, s, (s->key.data_level == RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO &&
1324 strlen(s->dir)==dir_len &&
1325 fstrncmp(s->dir, path, dir_len)==0));
1326 if (! s) {
1327 break;
1329 DEBUG(5,("%s: found cache %p\n",__LOCATION__,s));
1330 /* search s for io->generic.in.file.path */
1331 DLIST_FIND(s->items, i, (i->data_level == RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO &&
1332 ((i->file->both_directory_info.name.s &&
1333 fstrcmp(i->file->both_directory_info.name.s, file) ==0) ||
1334 (i->file->both_directory_info.short_name.s &&
1335 fstrcmp(i->file->both_directory_info.short_name.s, file)==0)
1336 )));
1337 DEBUG(5,("%s: found cache %p item %p\n",__LOCATION__,s, i));
1338 if (i) {
1339 *item=i;
1340 *search_cache=s;
1341 return true;
1343 s=s->next;
1344 DEBUG(5,("%s: continue search at %p\n",__LOCATION__,s));
1346 *item=i;
1347 *search_cache=s;
1348 return false;
1351 static void proxy_set_cache_info(struct file_metadata *metadata, struct proxy_GetInfo *r) {
1352 /* only set this if it was responded... I think they all are responded... */
1353 metadata->info_data.status_RAW_FILEINFO_BASIC_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_BASIC_INFORMATION;
1354 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_BASIC_INFORMATION) /*||
1355 /*NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALL_INFO)*/) {
1356 metadata->info_data.create_time=r->out.info_data[0].create_time;
1357 metadata->info_data.access_time =r->out.info_data[0].access_time;
1358 metadata->info_data.write_time=r->out.info_data[0].write_time;
1359 metadata->info_data.change_time=r->out.info_data[0].change_time;
1360 metadata->info_data.attrib=r->out.info_data[0].attrib;
1361 metadata->valid|=valid_RAW_FILEINFO_BASIC_INFORMATION;
1363 metadata->info_data.status_RAW_FILEINFO_ALL_INFO=r->out.info_data[0].status_RAW_FILEINFO_ALL_INFO;
1364 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALL_INFO)) {
1365 metadata->info_data.ea_size=r->out.info_data[0].ea_size;
1366 metadata->info_data.alloc_size=r->out.info_data[0].alloc_size;
1367 metadata->info_data.size=r->out.info_data[0].size;
1368 metadata->info_data.nlink=r->out.info_data[0].nlink;
1369 /* Are we duping this right? Would talloc_reference be ok? */
1370 //f->metadata->info_data.fname=
1371 metadata->info_data.fname.s=talloc_memdup(metadata, r->out.info_data[0].fname.s, r->out.info_data[0].fname.count);
1372 metadata->info_data.fname.count=r->out.info_data[0].fname.count;
1373 metadata->info_data.delete_pending=r->out.info_data[0].delete_pending;
1374 metadata->info_data.directory=r->out.info_data[0].directory;
1375 metadata->valid|=valid_RAW_FILEINFO_ALL_INFO | valid_RAW_FILEINFO_STANDARD_INFO;;
1377 metadata->info_data.status_RAW_FILEINFO_COMPRESSION_INFO=r->out.info_data[0].status_RAW_FILEINFO_COMPRESSION_INFO;
1378 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_COMPRESSION_INFO)) {
1379 metadata->info_data.compressed_size=r->out.info_data[0].compressed_size;
1380 metadata->info_data.format=r->out.info_data[0].format;
1381 metadata->info_data.unit_shift=r->out.info_data[0].unit_shift;
1382 metadata->info_data.chunk_shift=r->out.info_data[0].chunk_shift;
1383 metadata->info_data.cluster_shift=r->out.info_data[0].cluster_shift;
1384 metadata->valid|=valid_RAW_FILEINFO_COMPRESSION_INFO;
1386 metadata->info_data.status_RAW_FILEINFO_INTERNAL_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_INTERNAL_INFORMATION;
1387 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_INTERNAL_INFORMATION)) {
1388 metadata->info_data.file_id=r->out.info_data[0].file_id;
1389 metadata->valid|=valid_RAW_FILEINFO_INTERNAL_INFORMATION;
1391 metadata->info_data.status_RAW_FILEINFO_ACCESS_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_ACCESS_INFORMATION;
1392 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ACCESS_INFORMATION)) {
1393 metadata->info_data.access_flags=r->out.info_data[0].access_flags;
1394 metadata->valid|=valid_RAW_FILEINFO_ACCESS_INFORMATION;
1396 metadata->info_data.status_RAW_FILEINFO_POSITION_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_POSITION_INFORMATION;
1397 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_POSITION_INFORMATION)) {
1398 metadata->info_data.position=r->out.info_data[0].position;
1399 metadata->valid|=valid_RAW_FILEINFO_POSITION_INFORMATION;
1401 metadata->info_data.status_RAW_FILEINFO_MODE_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_MODE_INFORMATION;
1402 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_MODE_INFORMATION)) {
1403 metadata->info_data.mode=r->out.info_data[0].mode;
1404 metadata->valid|=valid_RAW_FILEINFO_MODE_INFORMATION;
1406 metadata->info_data.status_RAW_FILEINFO_ALIGNMENT_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_ALIGNMENT_INFORMATION;
1407 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ALIGNMENT_INFORMATION)) {
1408 metadata->info_data.alignment_requirement=r->out.info_data[0].alignment_requirement;
1409 metadata->valid|=valid_RAW_FILEINFO_ALIGNMENT_INFORMATION;
1411 metadata->info_data.status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION=r->out.info_data[0].status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
1412 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION)) {
1413 metadata->info_data.reparse_tag=r->out.info_data[0].reparse_tag;
1414 metadata->info_data.reparse_attrib=r->out.info_data[0].reparse_attrib;
1415 metadata->valid|=valid_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
1417 metadata->info_data.status_RAW_FILEINFO_STREAM_INFO=r->out.info_data[0].status_RAW_FILEINFO_STREAM_INFO;
1418 if (NT_STATUS_IS_OK(r->out.info_data[0].status_RAW_FILEINFO_STREAM_INFO)) {
1419 metadata->info_data.num_streams=r->out.info_data[0].num_streams;
1420 talloc_free(metadata->info_data.streams);
1421 metadata->info_data.streams=talloc_steal(metadata, r->out.info_data[0].streams);
1422 metadata->valid|=valid_RAW_FILEINFO_STREAM_INFO;
1425 /* satisfy a file-info request from cache */
1426 NTSTATUS proxy_cache_info(union smb_fileinfo *io, struct file_metadata *metadata, bool *valid)
1428 #define SET_VALID(FLAG) do { \
1429 if (valid) *valid=!!(metadata->valid & valid_ ## FLAG); \
1430 DEBUG(5,("%s check %s=%d (%x)\n",__FUNCTION__, #FLAG, !!(metadata->valid & valid_ ## FLAG), metadata->valid)); \
1431 } while(0)
1432 /* and now serve the request from the cache */
1433 switch(io->generic.level) {
1434 case RAW_FILEINFO_BASIC_INFORMATION:
1435 SET_VALID(RAW_FILEINFO_BASIC_INFORMATION);
1436 io->basic_info.out.create_time=metadata->info_data.create_time;
1437 io->basic_info.out.access_time=metadata->info_data.access_time;
1438 io->basic_info.out.write_time=metadata->info_data.write_time;
1439 io->basic_info.out.change_time=metadata->info_data.change_time;
1440 io->basic_info.out.attrib=metadata->info_data.attrib;
1441 return metadata->info_data.status_RAW_FILEINFO_BASIC_INFORMATION;
1442 case RAW_FILEINFO_ALL_INFO:
1443 SET_VALID(RAW_FILEINFO_ALL_INFO);
1444 io->all_info.out.create_time=metadata->info_data.create_time;
1445 io->all_info.out.access_time=metadata->info_data.access_time;
1446 io->all_info.out.write_time=metadata->info_data.write_time;
1447 io->all_info.out.change_time=metadata->info_data.change_time;
1448 io->all_info.out.attrib=metadata->info_data.attrib;
1449 io->all_info.out.alloc_size=metadata->info_data.alloc_size;
1450 io->all_info.out.size=metadata->info_data.size;
1451 io->all_info.out.directory=metadata->info_data.directory;
1452 io->all_info.out.nlink=metadata->info_data.nlink;
1453 io->all_info.out.delete_pending=metadata->info_data.delete_pending;
1454 io->all_info.out.fname.s=metadata->info_data.fname.s;
1455 io->all_info.out.fname.private_length=metadata->info_data.fname.count;
1456 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1457 case RAW_FILEINFO_STANDARD_INFO:
1458 case RAW_FILEINFO_STANDARD_INFORMATION:
1459 SET_VALID(RAW_FILEINFO_ALL_INFO);
1460 io->standard_info.out.alloc_size=metadata->info_data.alloc_size;
1461 io->standard_info.out.size=metadata->info_data.size;
1462 io->standard_info.out.directory=metadata->info_data.directory;
1463 io->standard_info.out.nlink=metadata->info_data.nlink; /* may be wrong */
1464 io->standard_info.out.delete_pending=metadata->info_data.delete_pending;
1465 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1466 case RAW_FILEINFO_EA_INFO:
1467 case RAW_FILEINFO_EA_INFORMATION:
1468 SET_VALID(RAW_FILEINFO_ALL_INFO);
1469 io->ea_info.out.ea_size=metadata->info_data.ea_size;
1470 return metadata->info_data.status_RAW_FILEINFO_ALL_INFO;
1471 case RAW_FILEINFO_COMPRESSION_INFO:
1472 SET_VALID(RAW_FILEINFO_COMPRESSION_INFO);
1473 io->compression_info.out.compressed_size=metadata->info_data.compressed_size;
1474 io->compression_info.out.format=metadata->info_data.format;
1475 io->compression_info.out.unit_shift=metadata->info_data.unit_shift;
1476 io->compression_info.out.chunk_shift=metadata->info_data.chunk_shift;
1477 io->compression_info.out.cluster_shift=metadata->info_data.cluster_shift;
1478 return metadata->info_data.status_RAW_FILEINFO_COMPRESSION_INFO;
1479 case RAW_FILEINFO_INTERNAL_INFORMATION:
1480 SET_VALID(RAW_FILEINFO_INTERNAL_INFORMATION);
1481 io->internal_information.out.file_id=metadata->info_data.file_id;
1482 return metadata->info_data.status_RAW_FILEINFO_INTERNAL_INFORMATION;
1483 case RAW_FILEINFO_ACCESS_INFORMATION:
1484 SET_VALID(RAW_FILEINFO_ACCESS_INFORMATION);
1485 io->access_information.out.access_flags=metadata->info_data.access_flags;
1486 return metadata->info_data.status_RAW_FILEINFO_ACCESS_INFORMATION;
1487 case RAW_FILEINFO_POSITION_INFORMATION:
1488 SET_VALID(RAW_FILEINFO_POSITION_INFORMATION);
1489 io->position_information.out.position=metadata->info_data.position;
1490 return metadata->info_data.status_RAW_FILEINFO_POSITION_INFORMATION;
1491 case RAW_FILEINFO_MODE_INFORMATION:
1492 SET_VALID(RAW_FILEINFO_MODE_INFORMATION);
1493 io->mode_information.out.mode=metadata->info_data.mode;
1494 return metadata->info_data.status_RAW_FILEINFO_MODE_INFORMATION;
1495 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1496 SET_VALID(RAW_FILEINFO_ALIGNMENT_INFORMATION);
1497 io->alignment_information.out.alignment_requirement=metadata->info_data.alignment_requirement;
1498 return metadata->info_data.status_RAW_FILEINFO_ALIGNMENT_INFORMATION;
1499 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1500 SET_VALID(RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION);
1501 io->attribute_tag_information.out.reparse_tag=metadata->info_data.reparse_tag;
1502 io->attribute_tag_information.out.attrib=metadata->info_data.reparse_attrib;
1503 return metadata->info_data.status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION;
1504 case RAW_FILEINFO_STREAM_INFO:
1505 case RAW_FILEINFO_STREAM_INFORMATION:
1506 SET_VALID(RAW_FILEINFO_STREAM_INFO);
1507 io->stream_info.out.num_streams=metadata->info_data.num_streams;
1508 if (metadata->info_data.num_streams > 0) {
1509 io->stream_info.out.streams = talloc_zero_array(io, struct stream_struct, metadata->info_data.num_streams);
1510 int c;
1511 if (! io->stream_info.out.streams) {
1512 if (*valid) *valid=false;
1513 io->stream_info.out.num_streams=0;
1514 return NT_STATUS_NO_MEMORY;
1516 for (c=0; c<io->stream_info.out.num_streams; c++) {
1517 io->stream_info.out.streams[c].size = metadata->info_data.streams[c].size;
1518 io->stream_info.out.streams[c].alloc_size = metadata->info_data.streams[c].alloc_size;
1519 io->stream_info.out.streams[c].stream_name.s = talloc_reference(io, metadata->info_data.streams[c].stream_name.s);
1520 io->stream_info.out.streams[c].stream_name.private_length = metadata->info_data.streams[c].stream_name.count;
1522 } else {
1523 io->stream_info.out.streams=NULL;
1525 return metadata->info_data.status_RAW_FILEINFO_STREAM_INFO;
1526 default:
1527 DEBUG(5,("%s: Unknown request\n",__FUNCTION__));
1528 if (valid) *valid=false;
1529 return NT_STATUS_INTERNAL_ERROR;
1534 a handler for async qpathinfo replies
1536 static void async_qpathinfo(struct smbcli_request *c_req)
1538 struct async_info *async = c_req->async.private;
1539 struct ntvfs_request *req = async->req;
1540 req->async_states->status = smb_raw_pathinfo_recv(c_req, req, async->parms);
1541 talloc_free(async);
1542 req->async_states->send_fn(req);
1545 static NTSTATUS async_proxy_qpathinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1547 struct proxy_private *private = async->proxy;
1548 struct smbcli_request *c_req = async->c_req;
1549 struct ntvfs_request *req = async->req;
1550 struct proxy_file *f = talloc_get_type_abort(async->f, struct proxy_file);
1551 union smb_fileinfo *io = talloc_get_type_abort(io1, union smb_fileinfo);
1552 struct proxy_GetInfo *r=talloc_get_type_abort(io2, struct proxy_GetInfo);
1554 if (c_req) status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1555 req->async_states->status=status;
1557 /* It's good to check for over-all status but we need to check status of each sub-message */
1558 NT_STATUS_NOT_OK_RETURN(status);
1560 /* populate the cache, and then fill the request from the cache */
1561 /* Assuming that r->count.in == 1 */
1562 SMB_ASSERT(r->out.count==1);
1563 DEBUG(5,("%s: Combined status of meta request: %s\n",__LOCATION__, get_friendly_nt_error_msg (r->out.info_data[0].status)));
1564 NT_STATUS_NOT_OK_RETURN(r->out.info_data[0].status);
1566 DEBUG(5,("%s: will set cache %p item=%p metadata=%p %p\n",__LOCATION__, f, f?f->metadata:NULL, r));
1567 proxy_set_cache_info(f->metadata, r);
1569 req->async_states->status=proxy_cache_info(io, f->metadata, NULL);
1570 DEBUG(5,("%s: set final response of original request to: %s\n",__LOCATION__, get_friendly_nt_error_msg (req->async_states->status)));
1572 return req->async_states->status;
1575 static void async_qpathinfo_notify(void* data, struct fdirmon* dirmon) {
1576 struct proxy_file* file=data;
1578 DEBUG(5,("%s: qpathinfo cache %s destroyed\n",__LOCATION__,file->filename));
1579 DLIST_REMOVE(file->proxy->closed_files, file);
1580 talloc_free(file);
1584 return info on a pathname
1586 static NTSTATUS proxy_qpathinfo(struct ntvfs_module_context *ntvfs,
1587 struct ntvfs_request *req, union smb_fileinfo *io)
1589 struct proxy_private *private = ntvfs->private_data;
1590 struct smbcli_request *c_req;
1591 struct proxy_file *f=NULL;
1592 const char* path;
1594 SETUP_PID;
1596 /* Look for closed files */
1597 if (private->enabled_qpathinfo) {
1598 int len=strlen(io->generic.in.file.path)+1;
1599 DEBUG(5,("%s: Looking for cached metadata for: %s\n",__LOCATION__,io->generic.in.file.path));
1600 DLIST_FIND(private->closed_files, f,
1601 (len==f->filename_size && fstrncmp(io->generic.in.file.path, f->filename, f->filename_size)==0));
1602 if (f) {
1603 /* stop cache going away while we are using it */
1604 talloc_reference(req, f);
1607 /* upgrade the request */
1608 switch(io->generic.level) {
1609 case RAW_FILEINFO_STANDARD_INFO:
1610 case RAW_FILEINFO_STANDARD_INFORMATION:
1611 case RAW_FILEINFO_BASIC_INFORMATION: /* we get this on file open */
1612 case RAW_FILEINFO_ALL_INFO:
1613 case RAW_FILEINFO_COMPRESSION_INFO:
1614 case RAW_FILEINFO_INTERNAL_INFORMATION:
1615 case RAW_FILEINFO_ACCESS_INFORMATION:
1616 case RAW_FILEINFO_POSITION_INFORMATION:
1617 case RAW_FILEINFO_MODE_INFORMATION:
1618 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1619 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1620 case RAW_FILEINFO_STREAM_INFO:
1621 case RAW_FILEINFO_STREAM_INFORMATION:
1622 case RAW_FILEINFO_EA_INFO:
1623 case RAW_FILEINFO_EA_INFORMATION:
1624 DEBUG(5,("%s: item is %p\n",__FUNCTION__, f));
1625 if (f && f->metadata) {
1626 NTSTATUS status;
1627 bool valid;
1628 DEBUG(5,("%s: Using cached metadata %x (item=%p)\n",__FUNCTION__, f->metadata->valid, f));
1629 status=proxy_cache_info(io, f->metadata, &valid);
1630 if (valid) return status;
1631 DEBUG(5,("%s: But cached metadata not valid :-(\n",__FUNCTION__));
1633 /* construct an item to hold the cache if we need to */
1634 if (! f && private->enabled_cache_info && PROXY_REMOTE_SERVER(private) && (f=talloc_zero(private, struct proxy_file))) {
1635 struct fdirmon* dirmon;
1636 dirmon=get_fdirmon(private, io->generic.in.file.path, true);
1637 if (f && dirmon) {
1638 f->proxy=private;
1639 dirmon_add_callback(dirmon, async_qpathinfo_notify, f);
1641 f->filename=talloc_strdup(f, io->generic.in.file.path);
1642 f->filename_size=strlen(f->filename)+1;
1643 f->metadata=talloc_zero(f, struct file_metadata);
1644 /* should not really add unless we succeeded */
1645 DLIST_ADD(private->closed_files, f);
1646 } else {
1647 talloc_free(f);
1648 f=NULL;
1651 if (f && f->metadata && private->enabled_cache_info && PROXY_REMOTE_SERVER(private)) {
1652 struct proxy_GetInfo *r;
1653 DEBUG(5,("%s: promoting request to proxy\n",__FUNCTION__));
1655 r=talloc_zero(req, struct proxy_GetInfo);
1656 NT_STATUS_HAVE_NO_MEMORY(r);
1658 r->in.count=1;
1659 r->in.info_tags=talloc_zero_array(req, struct info_tags, r->in.count);
1660 r->in.info_tags[0].tag_type=TAG_TYPE_PATH_INFO;
1661 /* 1+ to get the null */
1662 r->in.info_tags[0].info_tag.path.count=1+strlen(io->generic.in.file.path);
1663 r->in.info_tags[0].info_tag.path.s=io->generic.in.file.path;
1664 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
1665 /* the callback handler will populate the cache and respond from the cache */
1666 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_qpathinfo, NT_STATUS_INTERNAL_ERROR);
1668 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1669 DEBUG(5,("%s Sync waiting promotion\n",__FUNCTION__));
1670 return sync_chain_handler(c_req);
1671 } else {
1672 void* f=NULL;
1673 DEBUG(5,("%s Async waiting promotion\n",__FUNCTION__));
1674 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1675 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1676 return NT_STATUS_OK;
1681 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1682 return smb_raw_pathinfo(private->tree, req, io);
1685 c_req = smb_raw_pathinfo_send(private->tree, io);
1687 ASYNC_RECV_TAIL(io, async_qpathinfo);
1691 a handler for async qfileinfo replies
1693 static void async_qfileinfo(struct smbcli_request *c_req)
1695 struct async_info *async = c_req->async.private;
1696 struct ntvfs_request *req = async->req;
1697 req->async_states->status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1698 talloc_free(async);
1699 req->async_states->send_fn(req);
1702 static NTSTATUS async_proxy_qfileinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
1704 struct proxy_private *private = async->proxy;
1705 struct smbcli_request *c_req = async->c_req;
1706 struct ntvfs_request *req = async->req;
1707 struct proxy_file *f = async->f;
1708 union smb_fileinfo *io = talloc_get_type_abort(io1, union smb_fileinfo);
1709 struct proxy_GetInfo *r=talloc_get_type_abort(io2, struct proxy_GetInfo);
1711 if (c_req) status = smb_raw_fileinfo_recv(c_req, req, async->parms);
1712 req->async_states->status=status;
1714 NT_STATUS_NOT_OK_RETURN(status);
1716 /* populate the cache, and then fill the request from the cache */
1717 /* Assuming that r->count.in == 1 */
1718 SMB_ASSERT(r->out.count==1);
1719 NT_STATUS_NOT_OK_RETURN(r->out.info_data[0].status);
1721 proxy_set_cache_info(f->metadata, r);
1723 req->async_states->status=proxy_cache_info(io, f->metadata, NULL);
1725 return req->async_states->status;
1729 query info on a open file
1731 static NTSTATUS proxy_qfileinfo(struct ntvfs_module_context *ntvfs,
1732 struct ntvfs_request *req, union smb_fileinfo *io)
1734 struct proxy_private *private = ntvfs->private_data;
1735 struct smbcli_request *c_req;
1736 struct proxy_file *f;
1737 bool valid=false;
1738 NTSTATUS status;
1740 SETUP_PID;
1742 SETUP_FILE_HERE(f);
1744 /* upgrade the request */
1745 switch(io->generic.level) {
1746 case RAW_FILEINFO_STANDARD_INFO:
1747 case RAW_FILEINFO_STANDARD_INFORMATION:
1748 case RAW_FILEINFO_BASIC_INFORMATION: /* we get this on file open */
1749 case RAW_FILEINFO_ALL_INFO:
1750 case RAW_FILEINFO_COMPRESSION_INFO:
1751 case RAW_FILEINFO_INTERNAL_INFORMATION:
1752 case RAW_FILEINFO_ACCESS_INFORMATION:
1753 case RAW_FILEINFO_POSITION_INFORMATION:
1754 case RAW_FILEINFO_MODE_INFORMATION:
1755 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
1756 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
1757 case RAW_FILEINFO_STREAM_INFO:
1758 case RAW_FILEINFO_STREAM_INFORMATION:
1759 case RAW_FILEINFO_EA_INFO:
1760 case RAW_FILEINFO_EA_INFORMATION:
1761 DEBUG(5,("%s: oplock is %d\n",__FUNCTION__, f->oplock));
1762 if (f->oplock) {
1763 DEBUG(5,("%s: %p Using cached metadata %x (fnum=%d)\n",__FUNCTION__, f, f->metadata->valid, f->fnum));
1764 status=proxy_cache_info(io, f->metadata, &valid);
1765 if (valid) return status;
1766 DEBUG(5,("%s: But cached metadata not valid :-(\n",__FUNCTION__));
1768 if (private->enabled_cache_info && PROXY_REMOTE_SERVER(private)) {
1769 DEBUG(5,("%s: promoting request to proxy\n",__FUNCTION__));
1770 struct proxy_GetInfo *r=talloc_zero(req, struct proxy_GetInfo);
1771 NT_STATUS_HAVE_NO_MEMORY(r);
1772 r->in.count=1;
1773 r->in.info_tags=talloc_zero_array(req, struct info_tags, r->in.count);
1774 r->in.info_tags[0].tag_type=TAG_TYPE_FILE_INFO;
1775 r->in.info_tags[0].info_tag.fnum=io->generic.in.file.fnum;
1776 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
1777 /* the callback handler will populate the cache and respond from the cache */
1778 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_qfileinfo, NT_STATUS_INTERNAL_ERROR);
1780 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1781 DEBUG(5,("%s Sync waiting promotion\n",__FUNCTION__));
1782 return sync_chain_handler(c_req);
1783 } else {
1784 DEBUG(5,("%s Async waiting promotion\n",__FUNCTION__));
1785 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
1786 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
1787 return NT_STATUS_OK;
1792 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1793 return smb_raw_fileinfo(private->tree, req, io);
1796 c_req = smb_raw_fileinfo_send(private->tree, io);
1798 ASYNC_RECV_TAIL(io, async_qfileinfo);
1802 set info on a pathname
1804 static NTSTATUS proxy_setpathinfo(struct ntvfs_module_context *ntvfs,
1805 struct ntvfs_request *req, union smb_setfileinfo *st)
1807 struct proxy_private *private = ntvfs->private_data;
1808 struct smbcli_request *c_req;
1810 SETUP_PID;
1812 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
1813 return smb_raw_setpathinfo(private->tree, st);
1816 c_req = smb_raw_setpathinfo_send(private->tree, st);
1818 SIMPLE_ASYNC_TAIL;
1823 a handler for async open replies
1825 static void async_open(struct smbcli_request *c_req)
1827 struct async_info *async = c_req->async.private;
1828 struct proxy_private *proxy = async->proxy;
1829 struct ntvfs_request *req = async->req;
1830 struct proxy_file *f = async->f;
1831 union smb_open *io = async->parms;
1832 union smb_handle *file;
1834 talloc_free(async);
1835 req->async_states->status = smb_raw_open_recv(c_req, req, io);
1836 SMB_OPEN_OUT_FILE(io, file);
1837 f->fnum = file->fnum;
1838 file->ntvfs = NULL;
1839 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
1840 req->async_states->status = ntvfs_handle_set_backend_data(f->h, proxy->ntvfs, f);
1841 if (!NT_STATUS_IS_OK(req->async_states->status)) goto failed;
1842 file->ntvfs = f->h;
1843 DLIST_ADD(proxy->files, f);
1845 f->oplock=io->generic.out.oplock_level;
1847 LOAD_CACHE_FILE_DATA (f->metadata->info_data, io->generic.out);
1848 f->metadata->valid |= valid_RAW_FILEINFO_BASIC_INFORMATION;
1849 DEBUG(5,("**** METADATA VALID %p %x LEN=%lld\n",f,f->metadata->valid,(long long)f->metadata->info_data.size));
1851 if (proxy->cache_enabled) {
1852 struct search_cache_item *item=NULL;
1853 struct search_cache *s=proxy->search_caches;
1854 /* If we are still monitoring the file for changes we can
1855 retain the previous cache state, [if it is more recent that the monitor]! */
1856 /* yeah yeah what if there is more than one.... :-( */
1857 if (! (io->generic.level == RAW_OPEN_NTCREATEX &&
1858 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID) &&
1859 find_search_cache_item(SMB_OPEN_IN_FILE(io), &s, &item) && item->cache) {
1860 DEBUG(5,("%s: Using cached file cache\n",__LOCATION__));
1861 f->cache=talloc_reference(f, item->cache);
1862 cache_beopen(f->cache);
1863 if (item->metadata) {
1864 *(f->metadata)=*(item->metadata);
1865 f->metadata->info_data.fname.s=talloc_strdup(f, item->metadata->info_data.fname.s);
1866 f->metadata->info_data.fname.count=item->metadata->info_data.fname.count;
1868 f->metadata->info_data.streams=talloc_zero_array(f, struct info_stream, f->metadata->info_data.num_streams);
1869 if (f->metadata->info_data.streams) {
1870 int c;
1871 for(c=0; c < f->metadata->info_data.num_streams; c++) {
1872 f->metadata->info_data.streams[c].size = item->metadata->info_data.streams[c].size;
1873 f->metadata->info_data.streams[c].alloc_size = item->metadata->info_data.streams[c].alloc_size;
1874 f->metadata->info_data.streams[c].stream_name.s= talloc_strdup(f, item->metadata->info_data.streams[c].stream_name.s);
1875 f->metadata->info_data.streams[c].stream_name.count=item->metadata->info_data.streams[c].stream_name.count;
1878 f->metadata->count=1;
1880 } else {
1881 f->cache=cache_open(proxy->cache, f, io, f->oplock, proxy->cache_readahead);
1882 if (proxy->fake_valid) {
1883 cache_handle_validated(f, cache_handle_len(f));
1885 if (! PROXY_REMOTE_SERVER(proxy)) cache_handle_novalidate(f);
1886 if (item) {
1887 item->cache = talloc_reference(item, f->cache);
1888 item->metadata=talloc_reference(item, f->metadata);
1889 DEBUG(5,("%s: Caching file cache for later\n",__LOCATION__));
1890 } else {
1891 DEBUG(5,("%s: NOT Caching file cache for later\n",__LOCATION__));
1896 failed:
1897 req->async_states->send_fn(req);
1901 open a file
1903 static NTSTATUS proxy_open(struct ntvfs_module_context *ntvfs,
1904 struct ntvfs_request *req, union smb_open *io)
1906 struct proxy_private *private = ntvfs->private_data;
1907 struct smbcli_request *c_req;
1908 struct ntvfs_handle *h;
1909 struct proxy_file *f, *clone;
1910 NTSTATUS status;
1911 void *filename;
1912 int filename_size;
1913 uint16_t fnum;
1915 SETUP_PID;
1917 if (io->generic.level != RAW_OPEN_GENERIC &&
1918 private->map_generic) {
1919 return ntvfs_map_open(ntvfs, req, io);
1922 status = ntvfs_handle_new(ntvfs, req, &h);
1923 #warning should we free this handle if the open fails?
1924 NT_STATUS_NOT_OK_RETURN(status);
1926 f = talloc_zero(h, struct proxy_file);
1927 NT_STATUS_HAVE_NO_MEMORY(f);
1928 f->proxy=private;
1930 /* If the file is being opened read only and we already have a read-only
1931 handle for this file, then just clone and ref-count the handle */
1932 /* First calculate the filename key */
1933 if (io->generic.level == RAW_OPEN_NTCREATEX &&
1934 io->generic.in.create_options & NTCREATEX_OPTIONS_OPEN_BY_FILE_ID) {
1935 filename_size=sizeof(uint64_t);
1936 filename=io->generic.in.fname;
1937 } else {
1938 filename=SMB_OPEN_IN_FILE(io);
1939 filename_size=strlen(filename)+1;
1941 f->filename=talloc_memdup(f, filename, filename_size);
1942 f->filename_size=filename_size;
1943 f->h = h;
1944 f->can_clone= (io->generic.in.access_mask & NTCREATEX_SHARE_ACCESS_MASK) == NTCREATEX_SHARE_ACCESS_READ &&
1945 (io->generic.in.impersonation == NTCREATEX_IMPERSONATION_IMPERSONATION) &&
1946 (io->generic.in.create_options & NTCREATEX_OPTIONS_DIRECTORY) == 0 &&
1947 (io->generic.in.open_disposition != NTCREATEX_DISP_CREATE) &&
1948 (io->generic.in.open_disposition != NTCREATEX_DISP_SUPERSEDE);
1949 /* see if we have a matching open file */
1950 clone=NULL;
1951 if (f->can_clone) for (clone=private->files; clone; clone=clone->next) {
1952 if (clone->can_clone && filename_size == clone->filename_size &&
1953 memcmp(filename, clone->filename, filename_size)==0) {
1954 break;
1958 /* if clone is not null, then we found a match */
1959 if (private->enabled_open_clone && clone) {
1960 union smb_handle *file;
1962 DEBUG(5,("%s: clone handle %d\n",__FUNCTION__,clone->fnum));
1963 SMB_OPEN_OUT_FILE(io, file);
1964 f->fnum = clone->fnum;
1965 file->ntvfs = NULL;
1966 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
1967 NT_STATUS_NOT_OK_RETURN(status);
1968 file->ntvfs = f->h;
1969 DLIST_ADD(private->files, f);
1970 /* but be sure to share the same metadata cache */
1971 f->metadata=talloc_reference(f, clone->metadata);
1972 f->metadata->count++;
1973 f->oplock=clone->oplock;
1974 f->cache=talloc_reference(f, clone->cache);
1975 /* We don't need to reduce the oplocks for both files if we are read-only */
1976 /* if (clone->oplock==EXCLUSIVE_OPLOCK_RETURN ||
1977 clone->oplock==BATCH_OPLOCK_RETURN) {
1978 DEBUG(5,("%s: Breaking clone oplock from %d\n",__LOCATION__, clone->oplock));
1979 clone->oplock==LEVEL_II_OPLOCK_RETURN;
1980 status = ntvfs_send_oplock_break(private->ntvfs, clone->h, OPLOCK_BREAK_TO_LEVEL_II);
1981 //if (!NT_STATUS_IS_OK(status)) result=false;
1982 } else if (clone->oplock==LEVEL_II_OPLOCK_RETURN) {
1983 DEBUG(5,("%s: Breaking clone oplock from %d, cache no longer valid\n",__LOCATION__, clone->oplock));
1984 cache_handle_stale(f);
1985 clone->oplock=NO_OPLOCK_RETURN;
1986 status = ntvfs_send_oplock_break(private->ntvfs, clone->h, OPLOCK_BREAK_TO_NONE);
1987 //if (!NT_STATUS_IS_OK(status)) result=false;
1990 f->oplock=clone->oplock;
1991 /* and fake the rest of the response struct */
1992 io->generic.out.oplock_level=f->oplock;
1993 io->generic.out.create_action=NTCREATEX_ACTION_EXISTED;
1994 io->generic.out.create_time=f->metadata->info_data.create_time;
1995 io->generic.out.access_time=f->metadata->info_data.access_time;
1996 io->generic.out.write_time=f->metadata->info_data.write_time;
1997 io->generic.out.change_time=f->metadata->info_data.change_time;
1998 io->generic.out.attrib=f->metadata->info_data.attrib;
1999 io->generic.out.alloc_size=f->metadata->info_data.alloc_size;
2000 io->generic.out.size=f->metadata->info_data.size;
2001 io->generic.out.file_type=f->metadata->info_data.file_type;
2002 io->generic.out.ipc_state=f->metadata->info_data.ipc_state;
2003 io->generic.out.is_directory=f->metadata->info_data.is_directory;
2004 /* optional return values matching SMB2 tagged
2005 values in the call */
2006 //io->generic.out.maximal_access;
2007 return NT_STATUS_OK;
2009 f->metadata=talloc_zero(f, struct file_metadata);
2010 NT_STATUS_HAVE_NO_MEMORY(f->metadata);
2011 f->metadata->count=1;
2013 /* if oplocks aren't requested, optionally override and request them */
2014 if (! (io->generic.in.flags & (OPENX_FLAGS_REQUEST_OPLOCK | OPENX_FLAGS_REQUEST_BATCH_OPLOCK))
2015 && private->fake_oplock) {
2016 io->generic.in.flags |= OPENX_FLAGS_REQUEST_OPLOCK | OPENX_FLAGS_REQUEST_BATCH_OPLOCK;
2019 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2020 union smb_handle *file;
2022 status = smb_raw_open(private->tree, req, io);
2023 NT_STATUS_NOT_OK_RETURN(status);
2025 SMB_OPEN_OUT_FILE(io, file);
2026 f->fnum = file->fnum;
2027 file->ntvfs = NULL;
2028 status = ntvfs_handle_set_backend_data(f->h, private->ntvfs, f);
2029 NT_STATUS_NOT_OK_RETURN(status);
2030 file->ntvfs = f->h;
2031 DLIST_ADD(private->files, f);
2033 f->oplock=io->generic.out.oplock_level;
2035 LOAD_CACHE_FILE_DATA (f->metadata->info_data, io->generic.out);
2036 DEBUG(5,("**** METADATA VALID %p %x LEN=%lld\n",f,f->metadata->valid,(long long)f->metadata->info_data.size));
2037 f->metadata->valid |= valid_RAW_FILEINFO_BASIC_INFORMATION;
2039 if (private->cache_enabled) {
2040 f->cache=cache_open(private->cache, f, io, f->oplock, private->cache_readahead);
2041 if (private->fake_valid) {
2042 cache_handle_validated(f, cache_handle_len(f));
2044 if (! PROXY_REMOTE_SERVER(private)) cache_handle_novalidate(f);
2047 return NT_STATUS_OK;
2050 c_req = smb_raw_open_send(private->tree, io);
2052 ASYNC_RECV_TAIL_F(io, async_open, f);
2056 create a directory
2058 static NTSTATUS proxy_mkdir(struct ntvfs_module_context *ntvfs,
2059 struct ntvfs_request *req, union smb_mkdir *md)
2061 struct proxy_private *private = ntvfs->private_data;
2062 struct smbcli_request *c_req;
2064 SETUP_PID;
2066 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2067 return smb_raw_mkdir(private->tree, md);
2070 c_req = smb_raw_mkdir_send(private->tree, md);
2072 SIMPLE_ASYNC_TAIL;
2076 remove a directory
2078 static NTSTATUS proxy_rmdir(struct ntvfs_module_context *ntvfs,
2079 struct ntvfs_request *req, struct smb_rmdir *rd)
2081 struct proxy_private *private = ntvfs->private_data;
2082 struct smbcli_request *c_req;
2084 SETUP_PID;
2086 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2087 return smb_raw_rmdir(private->tree, rd);
2089 c_req = smb_raw_rmdir_send(private->tree, rd);
2091 SIMPLE_ASYNC_TAIL;
2095 rename a set of files
2097 static NTSTATUS proxy_rename(struct ntvfs_module_context *ntvfs,
2098 struct ntvfs_request *req, union smb_rename *ren)
2100 struct proxy_private *private = ntvfs->private_data;
2101 struct smbcli_request *c_req;
2103 SETUP_PID;
2105 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
2106 return smb_raw_rename(private->tree, ren);
2109 c_req = smb_raw_rename_send(private->tree, ren);
2111 SIMPLE_ASYNC_TAIL;
2115 copy a set of files
2117 static NTSTATUS proxy_copy(struct ntvfs_module_context *ntvfs,
2118 struct ntvfs_request *req, struct smb_copy *cp)
2120 return NT_STATUS_NOT_SUPPORTED;
2123 /* we only define this seperately so we can easily spot read calls in
2124 pending based on ( c_req->private.fn == async_read_handler ) */
2125 static void async_read_handler(struct smbcli_request *c_req)
2127 async_chain_handler(c_req);
2130 NTSTATUS async_readahead_dec(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2132 struct proxy_private *private = async->proxy;
2133 struct smbcli_request *c_req = async->c_req;
2134 struct proxy_file *f = async->f;
2135 union smb_read *io = async->parms;
2137 /* if request is not already received by a chained handler, read it */
2138 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
2140 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
2141 f->readahead_pending, private->readahead_spare));
2143 f->readahead_pending--;
2144 private->readahead_spare++;
2146 DEBUG(3,("%s : file count %d, tree count %d\n",__FUNCTION__,
2147 f->readahead_pending, private->readahead_spare));
2149 return status;
2153 a handler for async read replies - speculative read-aheads.
2154 It merely saves in the cache. The async chain handler will call send_fn if
2155 there is one, or if sync_chain_handler is used the send_fn is called by
2156 the ntvfs back end.
2158 NTSTATUS async_read_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2160 struct smbcli_request *c_req = async->c_req;
2161 struct proxy_file *f = async->f;
2162 union smb_read *io = async->parms;
2164 /* if request is not already received by a chained handler, read it */
2165 if (c_req) status=smb_raw_read_recv(c_req, async->parms);
2167 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
2168 get_friendly_nt_error_msg(status)));
2170 NT_STATUS_NOT_OK_RETURN(status);
2172 /* if it was a validate read we don't to save anything unless it failed.
2173 Until we use Proxy_read structs we can't tell, so guess */
2174 if (io->generic.out.nread == io->generic.in.maxcnt &&
2175 io->generic.in.mincnt < io->generic.in.maxcnt) {
2176 /* looks like a validate read, just move the validate pointer, the
2177 original read-request has already been satisfied from cache */
2178 DEBUG(3,("%s megavalidate suceeded, validate to %lld\n",__FUNCTION__,
2179 io->generic.in.offset + io->generic.out.nread));
2180 cache_handle_validated(f, io->generic.in.offset + io->generic.out.nread);
2181 } else {
2182 DEBUG(5,("Not a mega-validate, save %d in cache\n",io->generic.out.nread));
2183 cache_handle_save(f, io->generic.out.data,
2184 io->generic.out.nread,
2185 io->generic.in.offset);
2188 DEBUG(3,("%s finished %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
2189 return status;
2192 /* handler for fragmented reads */
2193 NTSTATUS async_read_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2195 struct proxy_private *private = async->proxy;
2196 struct smbcli_request *c_req = async->c_req;
2197 struct ntvfs_request *req = async->req;
2198 struct proxy_file *f = async->f;
2199 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
2200 /* this is the io against which the fragment is to be applied */
2201 union smb_read *io = talloc_get_type_abort(io1, union smb_read);
2202 /* this is the io for the read that issued the callback */
2203 union smb_read *io_frag = fragment->io_frag; /* async->parms; */
2204 struct async_read_fragments* fragments=fragment->fragments;
2206 /* if request is not already received by a chained handler, read it */
2207 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
2208 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
2210 DEBUG(3,("%s async_read status: %s\n",__FUNCTION__,
2211 get_friendly_nt_error_msg(status)));
2213 fragment->status = status;
2215 /* remove fragment from fragments */
2216 DLIST_REMOVE(fragments->fragments, fragment);
2218 #warning maybe read requests beyond the short read won't return NT_STATUS_OK with nread=0
2219 /* in which case if we will want to collate all responses and return a valid read
2220 for the leading NT_STATUS_OK fragments */
2222 /* did this one fail, inducing a general fragments failure? */
2223 if (!NT_STATUS_IS_OK(fragment->status)) {
2224 /* preserve the status of the fragment with the smallest offset
2225 when we can work out how */
2226 if (NT_STATUS_IS_OK(fragments->status)) {
2227 fragments->status=fragment->status;
2230 cache_handle_novalidate(f);
2231 DEBUG(5,("** Devalidated proxy due to read failure\n"));
2232 } else {
2233 /* No fragments have yet failed, keep collecting responses */
2234 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
2235 /* Find memcpy window, copy data from the io_frag to the io */
2236 off_t start_offset=MAX(io_frag->generic.in.offset, io->generic.in.offset);
2237 /* used to use mincnt */
2238 off_t io_extent=io->generic.in.offset + io->generic.in.maxcnt;
2239 off_t end_offset=MIN(io_extent, extent);
2240 /* ASSERT(start_offset <= end_offset) */
2241 /* ASSERT(start_offset <= io_extent) */
2242 if (start_offset >= io_extent) {
2243 DEBUG(3,("useless read-ahead tagged on to: %s",__LOCATION__));
2244 } else {
2245 uint8_t* dst=io->generic.out.data+(start_offset - io->generic.in.offset);
2246 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
2247 /* src == dst in cases where we did not latch onto someone elses
2248 read, but are handling our own */
2249 if (src != dst)
2250 memcpy(dst, src, end_offset - start_offset);
2253 /* There should be a better way to detect, but it needs the proxy rpc struct
2254 not ths smb_read struct */
2255 if (io_frag->generic.out.nread < io_frag->generic.in.maxcnt) {
2256 DEBUG(5,("\n** Devalidated proxy due to small read: %lld min=%lld, max=%lld\n",
2257 (long long) io_frag->generic.out.nread,
2258 (long long) io_frag->generic.in.mincnt,
2259 (long long) io_frag->generic.in.maxcnt));
2260 cache_handle_novalidate(f);
2263 /* We broke up the original read. If not enough of this sub-read has
2264 been read, and then some of then next block, it could leave holes!
2265 We will only acknowledge up to the first partial read, and treat
2266 it as a small read. If server can return NT_STATUS_OK for a partial
2267 read so can we, so we preserve the response.
2268 "enough" is all of it (maxcnt), except on the last block, when it has to
2269 be enough to fill io->generic.in.mincnt. We know it is the last block
2270 if nread is small but we could fill io->generic.in.mincnt */
2271 if (io_frag->generic.out.nread < io_frag->generic.in.mincnt &&
2272 end_offset < io->generic.in.offset + io->generic.in.mincnt) {
2273 DEBUG(4,("Fragmented read only partially successful\n"));
2275 /* Shrink the master nread (or grow to this size if we are first partial */
2276 if (! fragments->partial ||
2277 (io->generic.in.offset + io->generic.out.nread) > extent) {
2278 io->generic.out.nread = extent - io->generic.in.offset;
2281 /* stop any further successes from extending the partial read */
2282 fragments->partial=true;
2283 } else {
2284 /* only grow the master nwritten if we haven't logged a partial write */
2285 if (! fragments->partial &&
2286 (io->generic.in.offset + io->generic.out.nread) < extent ) {
2287 io->generic.out.nread = MIN(io->generic.in.maxcnt, extent - io->generic.in.offset);
2292 /* Was it the last fragment, or do we know enought to send a response? */
2293 if (! fragments->fragments) {
2294 DEBUG(5,("Async read re-fragmented with %d of %d %s\n",
2295 io->generic.out.nread, io->generic.in.mincnt,
2296 get_friendly_nt_error_msg(fragments->status)));
2297 if (fragments->async) {
2298 req->async_states->status=fragments->status;
2299 DEBUG(5,("Fragments async response sending\n"));
2300 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
2301 /* esp. as they may be attached to by other reads. Maybe attachees should be taking reference, but how will they
2302 know the top level they need to take reference too.. */
2303 #warning should really queue a sender here, not call it */
2304 req->async_states->send_fn(req);
2305 DEBUG(5,("Async response sent\n"));
2306 } else {
2307 DEBUG(5,("Fragments SYNC return\n"));
2311 /* because a c_req may be shared by many req, chained handlers must return
2312 a status pertaining to the general validity of this specific c_req, not
2313 to their own private processing of the c_req for the benefit of their req
2314 which is returned in fragments->status
2316 return status;
2319 /* Issue read-ahead X bytes where X is the window size calculation based on
2320 server_latency * server_session_bandwidth
2321 where latency is the idle (link) latency and bandwidth is less than or equal_to
2322 to actual bandwidth available to the server.
2323 Read-ahead should honour locked areas in whatever way is neccessary (who knows?)
2324 read_ahead is defined here and not in the cache engine because it requires too
2325 much knowledge of private structures
2327 /* The concept is buggy unless we can tell the next proxy that these are
2328 read-aheads, otherwise chained proxy setups will each read-ahead of the
2329 read-ahead which can put a larger load on the final server.
2330 Also we probably need to distinguish between
2331 * cache-less read-ahead
2332 * cache-revalidating read-ahead
2334 NTSTATUS read_ahead(struct proxy_file *f, struct ntvfs_module_context *ntvfs,
2335 union smb_read *io, ssize_t as_read)
2337 struct proxy_private *private = ntvfs->private_data;
2338 struct smbcli_tree *tree = private->tree;
2339 struct cache_file_entry *cache;
2340 off_t next_position; /* this read offset+length+window */
2341 off_t end_position; /* position we read-ahead to */
2342 off_t cache_populated;
2343 off_t read_position, new_extent;
2345 if (! PROXY_REMOTE_SERVER(private)) return NT_STATUS_UNSUCCESSFUL;
2346 DEBUG(5,("A\n"));
2347 if (private->cache_readahead==0 || ! private->cache_enabled || ! f->cache) return NT_STATUS_UNSUCCESSFUL;
2348 DEBUG(5,("B\n"));
2349 cache=talloc_get_type_abort(f->cache, struct cache_file_entry);
2350 DEBUG(5,("C\n"));
2351 /* don't read-ahead if we are in bulk validate mode */
2352 if (cache->status & CACHE_VALIDATE) return NT_STATUS_UNSUCCESSFUL;
2353 DEBUG(5,("D\n"));
2354 /* if we can't trust what we read-ahead anyway then don't bother although
2355 * if delta-reads are enabled we can do so in order to get something to
2356 * delta against */
2357 DEBUG(CACHE_DEBUG_LEVEL,("DOING Asking read-aheads: len %lld ra-extend %lld as-read %lld RA %d (%d)\n",
2358 (long long int)(cache_len(cache)),
2359 (long long int)(cache->readahead_extent),
2360 (long long int)(as_read),
2361 cache->readahead_window,private->cache_readahead));
2362 if (private->cache_readahead ==0 || ! (cache->status & CACHE_READ_AHEAD) ) {
2363 DEBUG(CACHE_DEBUG_LEVEL,("FAILED Asking read-aheads: Can't read-ahead as no read-ahead on this file: %x\n",
2364 cache->status));
2365 return NT_STATUS_UNSUCCESSFUL;
2368 /* as_read is the mincnt bytes of a request being made or the
2369 out.nread of completed sync requests
2370 Here we presume that as_read bytes WILL be read. If there is a cache-ahead like ours,
2371 then this may often NOT be the case if readahead_window < requestsize; so we will
2372 get a small read, leaving a hole in the cache, and as we don't yet handle sparse caches,
2373 all future read-ahead will be wasted, so we need to adjust the read-ahead handler to handle
2374 this and have failed sparse writes adjust the cache->readahead_extent back to actual size */
2376 /* predict the file pointers next position */
2377 next_position=io->generic.in.offset + as_read;
2379 /* if we know how big the file is, don't read beyond */
2380 if (f->oplock && next_position > f->metadata->info_data.size) {
2381 next_position = f->metadata->info_data.size;
2383 DEBUG(5,("Next position: %lld (%lld + %lld)\n",
2384 (long long int)next_position,
2385 (long long int)io->generic.in.offset,
2386 (long long int)as_read));
2387 /* calculate the limit of the validated or requested cache */
2388 cache_populated=MAX(cache->validated_extent, cache->readahead_extent);
2390 /* will the new read take us beyond the current extent without gaps? */
2391 if (cache_populated < io->generic.in.offset) {
2392 /* this read-ahead is a read-behind-pointer */
2393 new_extent=cache_populated;
2394 } else {
2395 new_extent=MAX(next_position, cache_populated);
2398 /* as far as we can tell new_extent is the smallest offset that doesn't
2399 have a pending read request on. Of course if we got a short read then
2400 we will have a cache-gap which we can't handle and need to read from
2401 a shrunk readahead_extent, which we don't currently handle */
2402 read_position=new_extent;
2404 /* of course if we know how big the remote file is we should limit at that */
2405 /* we should also mark-out which read-ahead requests are pending so that we
2406 * don't repeat them while they are in-transit. */
2407 /* we can't really use next_position until we can have caches with holes
2408 UNLESS next_position < new_extent, because a next_position well before
2409 new_extent is no reason to extend it further, we only want to extended
2410 with read-aheads if we have cause to suppose the read-ahead data will
2411 be wanted, i.e. the next_position is near new_extent.
2412 So we can't justify reading beyond window+next_position, but if
2413 next_position is leaving gaps, we use new_extent instead */
2414 end_position=MIN(new_extent, next_position) + cache->readahead_window;
2415 if (f->oplock) {
2416 end_position=MIN(end_position, f->metadata->info_data.size);
2418 DEBUG(5,("** Read-ahead loop %lld < %lld window=%d, end=%lld, quota: %d\n",
2419 (long long int)read_position,
2420 (long long int)(next_position + cache->readahead_window),
2421 cache->readahead_window,
2422 (long long int)end_position,
2423 private->readahead_spare));
2424 /* do we even need to read? */
2425 if (! (read_position < end_position)) return NT_STATUS_OK;
2427 /* readahead_spare is for the whole session (mid/tid?) and may need sharing
2428 out over files and other tree-connects or something */
2429 while (read_position < end_position &&
2430 private->readahead_spare > 0) {
2431 struct smbcli_request *c_req = NULL;
2432 ssize_t read_remaining = end_position - read_position;
2433 ssize_t read_block = MIN(private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32),
2434 MIN(read_remaining, private->cache_readaheadblock));
2435 void *req = NULL; /* for the ASYNC_REC_TAIL_F_ORPHAN macro */
2436 uint8_t* data;
2437 union smb_read *io_copy=talloc_memdup_type(NULL, io, union smb_read);
2439 if (! io_copy)
2440 return NT_STATUS_NO_MEMORY;
2442 #warning we are ignoring read_for_execute as far as the cache goes
2443 io_copy->generic.in.read_for_execute=io->readx.in.read_for_execute;
2444 io_copy->generic.in.offset=read_position;
2445 io_copy->generic.in.mincnt=read_block;
2446 io_copy->generic.in.maxcnt=read_block;
2447 /* what is generic.in.remaining for? */
2448 io_copy->generic.in.remaining = MIN(65535,read_remaining);
2449 io_copy->generic.out.nread=0;
2451 #warning someone must own io_copy, tree, maybe?
2452 data=talloc_zero_size(io_copy, io_copy->generic.in.maxcnt);
2453 DEBUG(5,("Talloc read-ahead buffer %p size %d\n",data, io_copy->generic.in.maxcnt));
2454 if (! data) {
2455 talloc_free(io_copy);
2456 return NT_STATUS_NO_MEMORY;
2458 io_copy->generic.out.data=data;
2460 /* are we able to pull anything from the cache to validate this read-ahead?
2461 NOTE: there is no point in reading ahead merely to re-validate the
2462 cache if we don't have oplocks and can't save it....
2463 ... or maybe there is if we think a read will come that can be matched
2464 up to this reponse while it is still on the wire */
2465 #warning so we need to distinguish between pipe-line read-ahead and revalidation
2466 if (/*(cache->status & CACHE_READ)!=0 && */
2467 cache_len(cache) >
2468 (io_copy->generic.in.offset + io_copy->generic.in.mincnt) &&
2469 cache->validated_extent <
2470 (io_copy->generic.in.offset + io_copy->generic.in.maxcnt)) {
2471 ssize_t pre_fill;
2473 pre_fill = cache_raw_read(cache, data,
2474 io_copy->generic.in.offset,
2475 io_copy->generic.in.maxcnt);
2476 DEBUG(5,("Data read into %p %d\n",data, pre_fill));
2477 if (pre_fill > 0 && pre_fill >= io_copy->generic.in.mincnt) {
2478 io_copy->generic.out.nread=pre_fill;
2479 read_block=pre_fill;
2483 c_req = proxy_smb_raw_read_send(ntvfs, io_copy, f, NULL);
2485 if (c_req) {
2486 private->readahead_spare--;
2487 f->readahead_pending++;
2488 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead level %d request %p offset=%d size=%d\n",io_copy->generic.level,c_req,(int)read_position,(int)read_block));
2489 if (cache->readahead_extent < read_position+read_block)
2490 cache->readahead_extent=read_position+read_block;
2491 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
2492 /* so we can decrease read-ahead counter for this session */
2493 ADD_ASYNC_RECV_TAIL(c_req, io_copy, NULL, f, async_readahead_dec, NT_STATUS_INTERNAL_ERROR);
2494 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io_copy, async_read_handler);
2496 /* Make these be owned by the async struct so they are freed when the callback ends or is cancelled */
2497 talloc_steal(c_req->async.private, c_req);
2498 talloc_steal(c_req->async.private, io_copy);
2499 read_position+=read_block;
2500 } else {
2501 DEBUG(CACHE_DEBUG_LEVEL,("Read-ahead request FAILED offset=%d size=%d\n",(int)read_position,(int)read_block));
2502 talloc_free(io_copy);
2503 break;
2507 DEBUG(CACHE_DEBUG_LEVEL,("DONE: Asking read-aheads\n"));
2508 return NT_STATUS_OK;
2511 struct proxy_validate_parts_parts {
2512 struct proxy_Read* r;
2513 struct ntvfs_request *req;
2514 struct proxy_file *f;
2515 struct async_read_fragments *fragments;
2516 off_t offset;
2517 ssize_t remaining;
2518 bool complete;
2519 declare_checksum(digest);
2520 struct MD5Context context;
2523 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts);
2524 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status);
2525 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
2526 struct proxy_validate_parts_parts *parts);
2528 /* this will be the new struct proxy_Read based read function, for now
2529 it just deals with non-cached based validate to a regular server */
2530 static NTSTATUS proxy_validate(struct ntvfs_module_context *ntvfs,
2531 struct ntvfs_request *req,
2532 struct proxy_Read *r,
2533 struct proxy_file *f)
2535 struct proxy_private *private = ntvfs->private_data;
2536 struct proxy_validate_parts_parts *parts;
2537 struct async_read_fragments *fragments;
2538 NTSTATUS status;
2540 if (!f) return NT_STATUS_INVALID_HANDLE;
2542 DEBUG(5,("%s: fnum=%d **** %lld bytes \n\n\n\n",__LOCATION__,f->fnum,(long long int)r->in.maxcnt));
2544 parts = talloc_zero(req, struct proxy_validate_parts_parts);
2545 DEBUG(5,("%s: parts=%p\n",__FUNCTION__,parts));
2546 NT_STATUS_HAVE_NO_MEMORY(parts);
2548 fragments = talloc_zero(parts, struct async_read_fragments);
2549 NT_STATUS_HAVE_NO_MEMORY(fragments);
2551 parts->fragments=fragments;
2553 parts->r=r;
2554 parts->f=f;
2555 parts->req=req;
2556 /* processed offset */
2557 parts->offset=r->in.offset;
2558 parts->remaining=r->in.maxcnt;
2559 fragments->async=true;
2561 MD5Init (&parts->context);
2563 /* start a read-loop which will continue in the callback until it is
2564 all done */
2565 status=proxy_validate_parts(ntvfs, parts);
2566 if (parts->complete) {
2567 /* Make sure we are not async */
2568 DEBUG(5,("%s: completed EARLY\n",__FUNCTION__));
2569 return proxy_validate_complete(parts);
2572 /* Assert if status!=NT_STATUS_OK then parts->complete==true */
2573 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
2574 DEBUG(5,("%s: returning ASYNC\n",__FUNCTION__));
2575 return status;
2578 NTSTATUS proxy_validate_complete(struct proxy_validate_parts_parts *parts)
2580 NTSTATUS status;
2581 struct proxy_Read* r=parts->r;
2582 struct proxy_file *f=parts->f;
2584 DEBUG(5,("%s: %d/%d bytes \n\n\n\n",__LOCATION__,r->out.nread,r->in.maxcnt));
2586 MD5Final(parts->digest, &parts->context);
2588 status = parts->fragments->status;
2589 r->out.result = status;
2590 r->out.response.generic.count=r->out.nread;
2591 r->out.cache_name.count=0;
2593 DEBUG(5,("%s: %s nread=%d\n",__FUNCTION__, get_friendly_nt_error_msg (status),
2594 r->out.response.generic.count));
2596 DEBUG(5,("Anticipated validated digest for size: %lld\n", (long long) r->in.maxcnt));
2597 dump_data (5, r->in.digest.digest, sizeof(parts->digest));
2598 DEBUG(5,("read digest for size %lld\n",(long long) parts->offset));
2599 dump_data (5, parts->digest, sizeof(parts->digest));
2601 if (NT_STATUS_IS_OK(status) &&
2602 (memcmp(parts->digest, r->in.digest.digest, sizeof(parts->digest))==0)) {
2603 r->out.flags = PROXY_USE_CACHE | PROXY_VALIDATE;
2604 DEBUG(5,("======= VALIDATED FINE \n\n\n"));
2605 } else {
2606 if (r->in.flags & PROXY_USE_ZLIB) {
2607 ssize_t size = r->out.response.generic.count;
2608 DEBUG(5,("======= VALIDATED WRONG; compress size %d \n\n\n",size));
2609 if (compress_block(r->out.response.generic.data, &size) ) {
2610 r->out.flags|=PROXY_USE_ZLIB;
2611 r->out.response.compress.count=size;
2612 r->out.response.compress.data=r->out.response.generic.data;
2613 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
2614 __LOCATION__,r->out.nread,size,size*100/r->out.nread));
2617 /* return cache filename as a ghastly hack for now */
2618 r->out.cache_name.s=f->cache->cache_name;
2619 r->out.cache_name.count=strlen(r->out.cache_name.s)+1;
2620 DEBUG(5,("%s: writing cache name: %s\n",__LOCATION__, f->cache->cache_name));
2621 /* todo: what about tiny files, buffer to small, don't validate tiny files <1K */
2624 /* assert: this must only be true if we are in a callback */
2625 if (parts->req->async_states->state & NTVFS_ASYNC_STATE_ASYNC) {
2626 /* we are async complete, we need to call the sendfn */
2627 parts->req->async_states->status=status;
2628 DEBUG(5,("Fragments async response sending\n"));
2630 parts->req->async_states->send_fn(parts->req);
2631 return NT_STATUS_OK;
2633 return status;
2636 NTSTATUS async_proxy_validate_parts(struct async_info *async, void* io1, void* io2, NTSTATUS status)
2638 struct smbcli_request *c_req = async->c_req;
2639 struct ntvfs_request *req = async->req;
2640 struct proxy_file *f = async->f;
2641 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
2642 struct async_read_fragment* fragment=talloc_get_type_abort(io2, struct async_read_fragment);
2643 /* this is the io against which the fragment is to be applied */
2644 struct proxy_validate_parts_parts *parts = talloc_get_type_abort(io1, struct proxy_validate_parts_parts);
2645 struct proxy_Read* r=parts->r;
2646 /* this is the io for the read that issued the callback */
2647 union smb_read *io_frag = fragment->io_frag;
2648 struct async_read_fragments* fragments=fragment->fragments;
2650 /* if request is not already received by a chained handler, read it */
2651 if (c_req) status=smb_raw_read_recv(c_req, io_frag);
2652 DEBUG(5,("%s: status %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
2653 DEBUG(5,("\n\n%s: parts=%p c_req=%p io_frag=%p read %lld\n",__LOCATION__,parts, c_req, io_frag,(long long int)io_frag->generic.out.nread));
2655 fragment->status=status;
2657 if (NT_STATUS_IS_OK(status)) {
2658 /* TODO: If we are not sequentially "next" the queue until we can do it */
2659 /* log this data in r->out.generic.data */
2660 /* Find memcpy window, copy data from the io_frag to the io */
2662 /* Also write validate to cache */
2663 if (f && f->cache) {
2664 cache_save(f->cache, io_frag->generic.out.data, io_frag->generic.out.nread, io_frag->generic.in.offset);
2667 /* extent is the last byte we (don't) read for this frag */
2668 ssize_t extent = io_frag->generic.in.offset + io_frag->generic.out.nread;
2669 /* start_offset is the file offset we first care about */
2670 off_t start_offset=MAX(io_frag->generic.in.offset, r->in.offset);
2671 /* Don't want to go past mincnt cos we don't have the buffer */
2672 off_t io_extent=r->in.offset + r->in.mincnt;
2673 off_t end_offset=MIN(io_extent, extent);
2675 /* ASSERT(start_offset <= end_offset) */
2676 /* ASSERT(start_offset <= io_extent) */
2677 /* Don't copy beyond buffer */
2678 if (! (start_offset >= io_extent)) {
2679 uint8_t* dst=r->out.response.generic.data + (start_offset - r->in.offset);
2680 uint8_t* src=io_frag->generic.out.data+(start_offset - io_frag->generic.in.offset);
2681 /* src == dst in cases where we did not latch onto someone elses
2682 read, but are handling our own */
2683 if (src != dst)
2684 memcpy(dst, src, end_offset - start_offset);
2685 r->out.nread=end_offset - r->in.offset;
2686 DEBUG(5,("%s: nread %lld ++++++++++++++++++\n", __LOCATION__,(long long int)r->out.nread));
2689 MD5Update(&parts->context, io_frag->generic.out.data,
2690 io_frag->generic.out.nread);
2692 parts->fragments->status=status;
2693 status=proxy_validate_parts(ntvfs, parts);
2694 } else {
2695 parts->fragments->status=status;
2698 DLIST_REMOVE(fragments->fragments, fragment);
2699 /* this will free the io_frag too */
2700 talloc_free(fragment);
2702 if (parts->complete || NT_STATUS_IS_ERR(status)) {
2703 /* this will call sendfn, the chain handler won't know... but
2704 should have no more handlers queued */
2705 return proxy_validate_complete(parts);
2708 return NT_STATUS_OK;
2711 /* continue a read loop, possibly from a callback */
2712 static NTSTATUS proxy_validate_parts(struct ntvfs_module_context *ntvfs,
2713 struct proxy_validate_parts_parts *parts)
2715 struct proxy_private *private = ntvfs->private_data;
2716 union smb_read *io_frag;
2717 struct async_read_fragment *fragment;
2718 struct smbcli_request *c_req = NULL;
2719 ssize_t size=private->tree->session->transport->negotiate.max_xmit \
2720 - (MIN_SMB_SIZE+32);
2722 /* Have we already read enough? */
2723 if (parts->offset >= (parts->r->in.offset + parts->r->in.maxcnt)) {
2724 parts->complete=true;
2725 return NT_STATUS_OK;
2728 size=MIN(size, parts->remaining);
2730 fragment=talloc_zero(parts->fragments, struct async_read_fragment);
2731 NT_STATUS_HAVE_NO_MEMORY(fragment);
2733 io_frag = talloc_zero(fragment, union smb_read);
2734 NT_STATUS_HAVE_NO_MEMORY(io_frag);
2736 io_frag->generic.out.data = talloc_size(io_frag, size);
2737 NT_STATUS_HAVE_NO_MEMORY(io_frag->generic.out.data);
2739 io_frag->generic.level = RAW_READ_GENERIC;
2740 io_frag->generic.in.file.fnum = parts->r->in.fnum;
2741 io_frag->generic.in.offset = parts->offset;
2742 io_frag->generic.in.mincnt = size;
2743 io_frag->generic.in.maxcnt = size;
2744 io_frag->generic.in.remaining = 0;
2745 #warning maybe true is more permissive?
2746 io_frag->generic.in.read_for_execute = false;
2748 DEBUG(5,("%s: issue part read offset=%lld, size=%lld,%lld\n",__LOCATION__,
2749 (long long int)io_frag->generic.in.offset,
2750 (long long int)io_frag->generic.in.mincnt,
2751 (long long int)io_frag->generic.in.maxcnt));
2753 //c_req = smb_raw_read_send(ntvfs, io_frag, parts->f, parts->r);
2754 c_req = smb_raw_read_send(private->tree, io_frag);
2755 NT_STATUS_HAVE_NO_MEMORY(c_req);
2757 parts->offset+=size;
2758 parts->remaining-=size;
2759 fragment->c_req = c_req;
2760 fragment->io_frag = io_frag;
2761 fragment->fragments=parts->fragments;
2762 DLIST_ADD(parts->fragments->fragments, fragment);
2764 { void* req=NULL;
2765 ADD_ASYNC_RECV_TAIL(c_req, parts, fragment, parts->f, async_proxy_validate_parts, NT_STATUS_INTERNAL_ERROR);
2766 ASYNC_RECV_TAIL_F_ORPHAN(io_frag, async_read_handler, parts->f, c_req->async.private, NT_STATUS_UNSUCCESSFUL);
2769 DEBUG(5,("%s: issued read parts=%p c_req=%p io_frag=%p\n",__LOCATION__,parts, c_req, io_frag));
2771 return NT_STATUS_OK;
2775 read from a file
2777 static NTSTATUS proxy_read(struct ntvfs_module_context *ntvfs,
2778 struct ntvfs_request *req, union smb_read *io)
2780 struct proxy_private *private = ntvfs->private_data;
2781 struct smbcli_request *c_req;
2782 struct proxy_file *f;
2783 struct async_read_fragments *fragments=NULL;
2784 /* how much of read-from-cache is certainly valid */
2785 ssize_t valid=0;
2786 off_t offset=io->generic.in.offset+valid;
2787 off_t limit=io->generic.in.offset+io->generic.in.mincnt;
2789 SETUP_PID;
2791 if (io->generic.level != RAW_READ_GENERIC &&
2792 private->map_generic) {
2793 return ntvfs_map_read(ntvfs, req, io);
2796 SETUP_FILE_HERE(f);
2798 DEBUG(3,("\n%s() fnum=%d offset=%lld, mincnt=%d, maxcnt=%d\n",__FUNCTION__,
2799 io->generic.in.file.fnum,
2800 io->generic.in.offset,
2801 io->generic.in.mincnt,
2802 io->generic.in.maxcnt));
2804 io->generic.out.nread=0;
2806 /* if we have oplocks and know the files size, don't even ask the server
2807 for more */
2808 if (f->oplock) {
2809 if (io->generic.in.offset >= f->metadata->info_data.size) {
2810 io->generic.in.mincnt=0;
2811 io->generic.in.maxcnt=0;
2812 io->generic.out.nread=0;
2813 DEBUG(5,("Reading beyond known length %lld; return 0\n",(long long)f->metadata->info_data.size));
2814 return NT_STATUS_OK;
2815 } else {
2816 io->generic.in.mincnt=MIN(io->generic.in.mincnt,
2817 f->metadata->info_data.size - io->generic.in.offset);
2818 io->generic.in.maxcnt=MIN(io->generic.in.maxcnt,
2819 f->metadata->info_data.size - io->generic.in.offset);
2821 DEBUG(5,("Oplock and known size, limiting read to %lld (s=%d)\n",
2822 f->metadata->info_data.size, io->generic.in.mincnt));
2826 /* attempt to read from cache. if nread becomes non-zero then we
2827 have cache to validate. Instead of returning "valid" value, cache_read
2828 should probably return an async_read_fragment structure */
2830 if (private->cache_enabled) {
2831 NTSTATUS status=cache_smb_raw_read(f->cache, ntvfs, req, io, &valid);
2833 if (NT_STATUS_IS_OK(status)) {
2834 /* if we read enough valid data, return it */
2835 if (valid > 0 && valid>=io->generic.in.mincnt) {
2836 /* valid will not be bigger than maxcnt */
2837 io->generic.out.nread=valid;
2838 DEBUG(1,("Read from cache offset=%d size=%d\n",
2839 (int)(io->generic.in.offset),
2840 (int)(io->generic.out.nread)) );
2841 return status;
2844 DEBUG(5,("Cache read status: %s\n",get_friendly_nt_error_msg (status)));
2847 fragments=talloc_zero(req, struct async_read_fragments);
2848 fragments->async=!!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC);
2849 /* See if there are pending reads that would satisfy this request
2850 We have a validated read up to io->generic.out.nread. Anything between
2851 this and mincnt MUST be read, but we could first try and attach to
2852 any pending read-ahead on the same file.
2853 If those read-aheads fail we will re-issue a regular read from the
2854 callback handler and hope it hasn't taken too long. */
2856 /* offset is the extentof the file from which we still need to find
2857 matching read-requests. */
2858 offset=io->generic.in.offset+valid;
2859 /* limit is the byte beyond the last byte for which we need a request.
2860 This used to be mincnt, but is now maxcnt to cope with validate reads.
2861 Maybe we can switch back to mincnt when proxy_read struct is used
2862 instead of smb_read.
2864 limit=io->generic.in.offset+io->generic.in.maxcnt;
2866 while (offset < limit) {
2867 /* Should look for the read-ahead with offset <= in.offset+out.nread
2868 with the longest span, but there is only likely to be one anyway so
2869 just take the first */
2870 struct async_info* pending=private->pending;
2871 union smb_read *readahead_io=NULL;
2872 DEBUG(5,("Looping reads from offset=%lld, end=%lld\n",offset,limit));
2873 while(pending) {
2874 if (pending->c_req->async.fn == async_read_handler) {
2875 struct async_info *async=talloc_get_type_abort(pending->c_req->async.private, struct async_info);
2876 readahead_io=talloc_get_type_abort(async->parms, union smb_read);
2878 if (readahead_io->generic.in.file.fnum == io->generic.in.file.fnum &&
2879 readahead_io->generic.in.offset <= offset &&
2880 readahead_io->generic.in.offset +
2881 readahead_io->generic.in.mincnt > offset) break;
2883 readahead_io=NULL;
2884 pending=pending->next;
2886 /* ASSERT(readahead_io == pending->c_req->async.params) */
2887 if (pending && readahead_io) {
2888 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
2889 fragment->fragments=fragments;
2890 fragment->io_frag=readahead_io;
2891 fragment->c_req = pending->c_req;
2892 /* we found one, so attach to it. We DO need a talloc_reference
2893 because the original send_fn might be called before ALL chained
2894 handlers, and our handler will call its own send_fn first. ugh.
2895 Maybe we need to seperate reverse-mapping callbacks with data users? */
2896 /* Note: the read-ahead io is passed as io, and our req io is
2897 in io_frag->io */
2898 //talloc_reference(req, pending->req);
2899 DEBUG(5,("Attach to read for offset=%lld length=%d\n",
2900 readahead_io->generic.in.offset,
2901 readahead_io->generic.in.mincnt));
2902 ADD_ASYNC_RECV_TAIL(pending->c_req, io, fragment, f,
2903 async_read_fragment, NT_STATUS_INTERNAL_ERROR);
2904 DEBUG(5,("Attached OK\n"));
2905 #warning we don't want to return if we fail to attach, just break
2906 DLIST_ADD(fragments->fragments, fragment);
2907 /* updated offset for which we have reads */
2908 offset=readahead_io->generic.in.offset + readahead_io->generic.in.mincnt;
2909 } else {
2910 /* there are no pending reads to fill this so issue one up to
2911 the maximum supported read size. We could see when the next
2912 pending read is (if any) and only read up till there... later...
2913 Issue a fragment request for what is left, clone io.
2914 In the case that there were no fragments this will be the orginal read
2915 but with a cloned io struct */
2916 off_t next_offset;
2917 struct proxy_Read *r=NULL; /* used only for VALIDATE promotion */
2918 struct async_read_fragment *fragment=talloc_zero(req, struct async_read_fragment);
2919 union smb_read *io_frag=talloc_memdup_type(req, io, union smb_read);
2920 ssize_t offset_inc=offset-io_frag->generic.in.offset;
2921 /* 250 is a guess at ndr rpc overheads */
2922 ssize_t readsize=MIN(PROXY_NTIOCTL_MAXDATA,
2923 private->tree->session->transport->negotiate.max_xmit) \
2924 - (MIN_SMB_SIZE+32);
2925 if (readsize > 0xFFFF) readsize = 0xFFFF; /* - (MIN_SMB_SIZE+250) ?? */
2926 readsize=MIN(limit-offset, readsize);
2928 DEBUG(5,("Issuing direct read\n"));
2929 /* reduce the cached read (if any). nread is unsigned */
2930 if (io_frag->generic.out.nread > offset_inc) {
2931 io_frag->generic.out.nread-=offset_inc;
2932 /* don't make nread buffer look too big */
2933 if (io_frag->generic.out.nread > readsize)
2934 io_frag->generic.out.nread = readsize;
2935 } else {
2936 io_frag->generic.out.nread=0;
2938 /* adjust the data pointer so we read to the right place */
2939 io_frag->generic.out.data+=offset_inc;
2940 io_frag->generic.in.offset=offset;
2941 io_frag->generic.in.maxcnt=readsize;
2942 /* we don't mind mincnt being smaller if this is the last frag,
2943 but then we can already handle it being bigger but not reached...
2944 The spell would be:
2945 MIN(io_frag->generic.in.mincnt, io_frag->generic.in.maxcnt);
2947 io_frag->generic.in.mincnt=readsize;
2948 fragment->fragments=fragments;
2949 fragment->io_frag=io_frag;
2950 #warning attach to send_fn handler
2951 /* what if someone attaches to us? Our send_fn is called from our
2952 chained handler which will be before their handler and io will
2953 already be freed. We need to keep a reference to the io and the data
2954 but we don't know where it came from in order to take a reference.
2955 We need therefore to tackle calling of send_fn AFTER all other handlers */
2957 /* Calculate next offset (in advance) */
2958 next_offset=io_frag->generic.in.offset + io_frag->generic.in.mincnt;
2960 /* if we are (going to be) the last fragment and we are in VALIDATE
2961 mode, see if we can do a bulk validate now.
2962 io->generic.in.mincnt == io->generic.in.maxcnt is to make sure we
2963 don't do a validate on a receive validate read
2965 if (private->cache_validatesize && PROXY_REMOTE_SERVER(private) &&
2966 next_offset >= limit && (f->cache && f->cache->status & CACHE_VALIDATE)) {
2967 ssize_t length=private->cache_validatesize;
2968 declare_checksum(digest);
2970 DEBUG(5,("last read, maybe mega validate: frag length %zu, offset %llu\n",
2971 length, (unsigned long long) offset));
2972 NTSTATUS status=cache_smb_raw_checksum(f->cache, offset, &length, digest);
2973 /* no point in doing it if md5'd length < current out.nread
2974 remember: out.data contains this requests cached response
2975 if validate succeeds */
2976 if (NT_STATUS_IS_OK(status) && (length > io_frag->generic.out.nread)) {
2977 /* upgrade the read, allocate the proxy_read struct here
2978 and fill in the extras, no more out-of-band stuff */
2979 DEBUG(5,("%s: Promoting to validate read: %lld\n",__FUNCTION__,(long long) length));
2980 dump_data (5, digest, sizeof(digest));
2982 r=talloc_zero(io_frag, struct proxy_Read);
2983 memcpy(r->in.digest.digest, digest, sizeof(digest));
2984 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE;
2985 io_frag->generic.in.maxcnt = length;
2986 r->in.mincnt=io_frag->generic.in.mincnt;
2987 /* the proxy send function will calculate the checksum based on *data */
2988 } else {
2989 /* try bulk read */
2990 if (f->oplock) {
2991 DEBUG(5,("%s: *** faking bulkd read\n\n",__LOCATION__));
2992 r=talloc_zero(io_frag, struct proxy_Read);
2993 r->in.flags |= PROXY_VALIDATE | PROXY_USE_CACHE;//| PROXY_USE_ZLIB;
2994 io_frag->generic.in.maxcnt = MIN(f->metadata->info_data.size, private->cache_validatesize);
2995 r->in.mincnt=io_frag->generic.in.maxcnt;
2996 r->in.mincnt=io_frag->generic.in.mincnt;
2998 /* not enough in cache to make it worthwhile anymore */
2999 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x len=%lld\n",
3000 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0,
3001 (unsigned long long)length));
3002 //cache_handle_novalidate(f);
3003 DEBUG(5,("VALIDATE DOWNGRADE 1, no more on this file: frag length %zu, offset %llu, cache=%x\n",
3004 length, (unsigned long long) offset, (f->cache)?(f->cache->status):0));
3006 } else {
3007 if (f->cache && f->cache->status & CACHE_VALIDATE) {
3008 DEBUG(5,(">>>Not last frag, no validate read: %lld %lld\n",
3009 (long long) next_offset,
3010 (long long) limit));
3014 DEBUG(5,("Frag read sending offset=%lld min=%d, size=%d\n",
3015 io_frag->generic.in.offset,io_frag->generic.in.mincnt,
3016 io_frag->generic.in.maxcnt));
3017 c_req = proxy_smb_raw_read_send(ntvfs, io_frag, f, r);
3018 DEBUG(5,("Frag read sent offset=%lld size=%d MID=%d\n",
3019 io_frag->generic.in.offset,io_frag->generic.in.maxcnt,c_req->mid));
3020 fragment->c_req=c_req;
3021 DLIST_ADD(fragments->fragments, fragment);
3022 ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_read_cache_save, NT_STATUS_INTERNAL_ERROR);
3023 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_read_fragment, NT_STATUS_INTERNAL_ERROR);
3024 DEBUG(5,("Frag response chained\n"));
3025 /* normally we would only install the chain_handler if we wanted async
3026 response, but as it is the async_read_fragment handler that calls send_fn
3027 based on fragments->async, instead of async_chain_handler, we don't
3028 need to worry about this call completing async'ly while we are
3029 waiting on the other attached calls. Otherwise we would not attach
3030 the async_chain_handler (via async_read_handler) because of the wait
3031 below */
3032 { /* We don't want the chain handler calling send_fn as it is done by the fragment handler */
3033 void* req=NULL;
3034 /* call async_chain_hander not read handler so that folk can't
3035 attach to it, till we solve the problem above */
3036 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
3038 offset = next_offset;
3040 DEBUG(5,("Next fragment\n"));
3043 /* do we still need a final fragment? Issue a read */
3045 DEBUG(5,("No frags left to read\n"));
3048 /* issue new round of read-aheads */
3049 DEBUG(5,("== Read aheads asread-%d\n",io->generic.in.mincnt));
3050 if (f->cache && ! (f->cache->status & CACHE_VALIDATE)) read_ahead(f, ntvfs, io, io->generic.in.mincnt);
3051 DEBUG(5,("== Done Read aheads\n"));
3053 /* If we have fragments but we are not called async, we must sync-wait on them */
3054 /* did we map the entire request to pending reads? */
3055 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3056 struct async_read_fragment *fragment;
3057 DEBUG(5,("Sync waiting\n"));
3058 /* fragment get's free'd during the chain_handler so we start at
3059 the top each time */
3060 for (fragment = fragments->fragments; fragment; fragment = fragments->fragments) {
3061 /* Any fragments async handled while we sync-wait on one
3062 will remove themselves from the list and not get sync waited */
3063 sync_chain_handler(fragment->c_req);
3064 /* if we have a non-ok result AND we know we have all the responses
3065 up to extent, then we could quit the loop early and change the
3066 fragments->async to true so the final irrelevant responses would
3067 come async and we could send our response now - but we don't
3068 track that detail until we have cache-maps that we can use to
3069 track the responded fragments and combine responsed linear extents
3070 if (! NT_STATUS_IS_OK(fragments->status) && xxx ) */
3072 DEBUG(5,("Sync return of proxy_read: %s\n",get_friendly_nt_error_msg (fragments->status)));
3073 return fragments->status;
3076 DEBUG(5,("Async returning\n"));
3077 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
3078 return NT_STATUS_OK;
3082 a handler to de-fragment async write replies back to one request.
3083 Can cope with out-of-order async responses by waiting for all responses
3084 on an NT_STATUS_OK case so that nwritten is properly adjusted
3086 NTSTATUS async_write_fragment(struct async_info *async, void* io1, void* io2, NTSTATUS status)
3088 struct smbcli_request *c_req = async->c_req;
3089 struct ntvfs_request *req = async->req;
3090 struct proxy_file *f=async->f;
3091 struct async_write_fragment* fragment=talloc_get_type_abort(io2, struct async_write_fragment);
3092 /* this is the io against which the fragment is to be applied */
3093 union smb_write *io = talloc_get_type_abort(io1, union smb_write);
3094 /* this is the io for the write that issued the callback */
3095 union smb_write *io_frag = fragment->io_frag; /* async->parms; */
3096 struct async_write_fragments* fragments=fragment->fragments;
3097 ssize_t extent=0;
3099 /* if request is not already received by a chained handler, read it */
3100 #warning the queuer of the request should first push a suitable decoder, they should not scatter handlers generically
3101 if (c_req) status=smb_raw_write_recv(c_req, io_frag);
3103 DEBUG(3,("%s async_write status: %s\n",__FUNCTION__,
3104 get_friendly_nt_error_msg(status)));
3106 fragment->status = status;
3108 DLIST_REMOVE(fragments->fragments, fragment);
3110 /* did this one fail? */
3111 if (! NT_STATUS_IS_OK(fragment->status)) {
3112 if (NT_STATUS_IS_OK(fragments->status)) {
3113 fragments->status=fragment->status;
3115 } else {
3116 /* No fragments have yet failed, keep collecting responses */
3117 extent = io_frag->generic.in.offset + io_frag->generic.out.nwritten;
3119 /* we broke up the write so it could all be written. If only some has
3120 been written of this block, and then some of then next block,
3121 it could leave unwritten holes! We will only acknowledge up to the
3122 first partial write, and let the client deal with it.
3123 If server can return NT_STATUS_OK for a partial write so can we */
3124 if (io_frag->generic.out.nwritten != io_frag->generic.in.count) {
3125 DEBUG(4,("Fragmented write only partially successful\n"));
3127 /* Shrink the master nwritten */
3128 if ( ! fragments->partial ||
3129 (io->generic.in.offset + io->generic.out.nwritten) > extent) {
3130 io->generic.out.nwritten = extent - io->generic.in.offset;
3132 /* stop any further successes from extended the partial write */
3133 fragments->partial=true;
3134 } else {
3135 /* only grow the master nwritten if we haven't logged a partial write */
3136 if (! fragments->partial &&
3137 (io->generic.in.offset + io->generic.out.nwritten) < extent ) {
3138 io->generic.out.nwritten = extent - io->generic.in.offset;
3143 /* if this was the last fragment, clean up */
3144 if (! fragments->fragments) {
3145 DEBUG(5,("Async write re-fragmented with %d of %d\n",
3146 io->generic.out.nwritten,
3147 io->generic.in.count));
3148 if (NT_STATUS_IS_OK(fragments->status)) {
3149 cache_handle_save(f, io->generic.in.data, io->generic.out.nwritten,
3150 io->generic.in.offset);
3151 if (f->metadata->info_data.size < io->generic.in.offset+io->generic.in.count) {
3152 f->metadata->info_data.size=io->generic.in.offset+io->generic.in.count;
3155 if (fragments->async) {
3156 req->async_states->status=fragments->status;
3157 #warning its not good freeing early if other pending requests have io allocated against this request which will now be freed
3158 req->async_states->send_fn(req);
3159 DEBUG(5,("Async response sent\n"));
3160 } else {
3161 DEBUG(5,("Fragments SYNC return\n"));
3165 return status;
3169 a handler for async write replies
3171 NTSTATUS async_write_cache_save(struct async_info *async, void* io1, void* io2, NTSTATUS status)
3173 struct smbcli_request *c_req = async->c_req;
3174 struct ntvfs_request *req = async->req;
3175 struct proxy_file *f=async->f;
3176 union smb_write *io=async->parms;
3178 if (c_req)
3179 status = smb_raw_write_recv(c_req, async->parms);
3181 cache_handle_save(f, io->generic.in.data,
3182 io->generic.out.nwritten,
3183 io->generic.in.offset);
3185 return status;
3189 write to a file
3191 static NTSTATUS proxy_write(struct ntvfs_module_context *ntvfs,
3192 struct ntvfs_request *req, union smb_write *io)
3194 struct proxy_private *private = ntvfs->private_data;
3195 struct smbcli_request *c_req;
3196 struct proxy_file *f;
3198 SETUP_PID;
3200 if (io->generic.level != RAW_WRITE_GENERIC &&
3201 private->map_generic) {
3202 return ntvfs_map_write(ntvfs, req, io);
3204 SETUP_FILE_HERE(f);
3206 DEBUG(5,("proxy_write offset=%lld size=%d\n",io->generic.in.offset, io->generic.in.count));
3207 #warning ERROR get rid of this
3208 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3209 NTSTATUS status;
3210 if (PROXY_REMOTE_SERVER(private)) {
3211 /* Do a proxy write */
3212 status=proxy_smb_raw_write(ntvfs, io, f);
3213 } else if (io->generic.in.count >
3214 private->tree->session->transport->negotiate.max_xmit) {
3216 /* smbcli_write can deal with large writes, which are bigger than
3217 tree->session->transport->negotiate.max_xmit */
3218 ssize_t size=smbcli_write(private->tree,
3219 io->generic.in.file.fnum,
3220 io->generic.in.wmode,
3221 io->generic.in.data,
3222 io->generic.in.offset,
3223 io->generic.in.count);
3225 if (size==io->generic.in.count || size > 0) {
3226 io->generic.out.nwritten=size;
3227 status=NT_STATUS_OK;
3228 } else {
3229 status=NT_STATUS_UNSUCCESSFUL;
3231 } else {
3232 status=smb_raw_write(private->tree, io);
3235 /* Save write in cache */
3236 if (NT_STATUS_IS_OK(status)) {
3237 cache_handle_save(f, io->generic.in.data,
3238 io->generic.out.nwritten,
3239 io->generic.in.offset);
3240 if (f->metadata->info_data.size <
3241 io->generic.in.offset+io->generic.in.count) {
3242 f->metadata->info_data.size=io->generic.in.offset+io->generic.in.count;
3246 return status;
3249 /* smb_raw_write_send can't deal with large writes, which are bigger than
3250 tree->session->transport->negotiate.max_xmit so we have to break it up
3251 trying to preserve the async nature of the call as much as possible */
3252 if (PROXY_REMOTE_SERVER(private)) {
3253 DEBUG(5,("== %s call proxy_smb_raw_write_send\n",__FUNCTION__));
3254 c_req = proxy_smb_raw_write_send(ntvfs, io, f);
3255 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3256 } else if (io->generic.in.count <=
3257 private->tree->session->transport->negotiate.max_xmit) {
3258 DEBUG(5,("== %s call smb_raw_write_send\n",__FUNCTION__));
3259 c_req = smb_raw_write_send(private->tree, io);
3260 ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3261 } else {
3262 ssize_t remaining = io->generic.in.count;
3263 #warning Need an audit of these magin numbers MIN_SMB_SIZE+32
3264 int block = (private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32));
3265 int done = 0;
3266 struct async_write_fragments *fragments = talloc_zero(req, struct async_write_fragments);
3268 DEBUG(3,("== %s Client sending too-big write sized %d, negotiated limit %d\n",
3269 __FUNCTION__, io->generic.in.count,
3270 private->tree->session->transport->negotiate.max_xmit));
3272 fragments->io = io;
3273 io->generic.out.nwritten=0;
3274 io->generic.out.remaining=0;
3276 do {
3277 union smb_write *io_frag = talloc_zero(fragments, union smb_write);
3278 struct async_write_fragment *fragment = talloc_zero(fragments, struct async_write_fragment);
3279 ssize_t size = MIN(block, remaining);
3281 fragment->fragments = fragments;
3282 fragment->io_frag = io_frag;
3284 io_frag->generic.level = io->generic.level;
3285 io_frag->generic.in.file.fnum = io->generic.in.file.fnum;
3286 io_frag->generic.in.wmode = io->generic.in.wmode;
3287 io_frag->generic.in.count = size;
3288 io_frag->generic.in.offset = io->generic.in.offset + done;
3289 io_frag->generic.in.data = io->generic.in.data + done;
3291 c_req = proxy_smb_raw_write_send(ntvfs, io_frag, f);
3292 if (! c_req) {
3293 /* let pending requests clean-up when ready */
3294 fragments->status=NT_STATUS_UNSUCCESSFUL;
3295 talloc_steal(NULL, fragments);
3296 DEBUG(3,("Can't send request fragment\n"));
3297 return NT_STATUS_UNSUCCESSFUL;
3300 DEBUG(5,("Frag write sent offset=%lld size=%d MID=%d\n",
3301 io_frag->generic.in.offset,io_frag->generic.in.count,c_req->mid));
3302 fragment->c_req=c_req;
3303 DLIST_ADD(fragments->fragments, fragment);
3305 // ADD_ASYNC_RECV_TAIL(c_req, io_frag, NULL, f, async_write_cache_save, NT_STATUS_INTERNAL_ERROR);
3306 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_write_fragment, NT_STATUS_INTERNAL_ERROR);
3307 DEBUG(5,("Frag response chained\n"));
3309 remaining -= size;
3310 done += size;
3311 } while(remaining > 0);
3313 /* this strategy has the callback chain attached to each c_req, so we
3314 don't use the ASYNC_RECV_TAIL* to install a general one */
3317 ASYNC_RECV_TAIL_HANDLER(io, async_chain_handler);
3321 a handler for async seek replies
3323 static void async_seek(struct smbcli_request *c_req)
3325 struct async_info *async = c_req->async.private;
3326 struct ntvfs_request *req = async->req;
3327 req->async_states->status = smb_raw_seek_recv(c_req, async->parms);
3328 talloc_free(async);
3329 req->async_states->send_fn(req);
3333 seek in a file
3335 static NTSTATUS proxy_seek(struct ntvfs_module_context *ntvfs,
3336 struct ntvfs_request *req,
3337 union smb_seek *io)
3339 struct proxy_private *private = ntvfs->private_data;
3340 struct smbcli_request *c_req;
3342 SETUP_PID_AND_FILE;
3344 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3345 return smb_raw_seek(private->tree, io);
3348 c_req = smb_raw_seek_send(private->tree, io);
3350 ASYNC_RECV_TAIL(io, async_seek);
3354 flush a file
3356 static NTSTATUS proxy_flush(struct ntvfs_module_context *ntvfs,
3357 struct ntvfs_request *req,
3358 union smb_flush *io)
3360 struct proxy_private *private = ntvfs->private_data;
3361 struct smbcli_request *c_req;
3363 SETUP_PID;
3364 switch (io->generic.level) {
3365 case RAW_FLUSH_FLUSH:
3366 SETUP_FILE;
3367 break;
3368 case RAW_FLUSH_ALL:
3369 io->generic.in.file.fnum = 0xFFFF;
3370 break;
3371 case RAW_FLUSH_SMB2:
3372 return NT_STATUS_INVALID_LEVEL;
3375 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3376 return smb_raw_flush(private->tree, io);
3379 c_req = smb_raw_flush_send(private->tree, io);
3381 SIMPLE_ASYNC_TAIL;
3385 close a file
3387 static NTSTATUS proxy_close(struct ntvfs_module_context *ntvfs,
3388 struct ntvfs_request *req, union smb_close *io)
3390 struct proxy_private *private = ntvfs->private_data;
3391 struct smbcli_request *c_req;
3392 struct proxy_file *f;
3393 union smb_close io2;
3394 bool can_clone;
3396 SETUP_PID;
3398 if (io->generic.level != RAW_CLOSE_GENERIC &&
3399 private->map_generic) {
3400 return ntvfs_map_close(ntvfs, req, io);
3402 SETUP_FILE_HERE(f);
3403 /* we free the backend data before we use this value, so save it */
3404 can_clone=f->can_clone;
3405 /* Note, we aren't free-ing f, or it's h here. Should we?
3406 even if file-close fails, we'll remove it from the list,
3407 what else would we do? Maybe we should not remove until
3408 after the proxied call completes? */
3409 DLIST_REMOVE(private->files, f);
3411 /* Don't send the close on cloned handles unless we are the last one */
3412 if (f->metadata && --(f->metadata->count)) {
3413 DEBUG(5,("%s: Fake close of %d, %d left\n",__FUNCTION__,f->fnum, f->metadata->count));
3414 return NT_STATUS_OK;
3416 DEBUG(5,("%s: Real close of %d\n",__FUNCTION__, f->fnum));
3417 /* only close the cache if we aren't keeping references */
3418 //cache_close(f->cache);
3420 /* possibly samba can't do RAW_CLOSE_SEND yet */
3421 if (! (c_req = smb_raw_close_send(private->tree, io))) {
3422 if (io->generic.level == RAW_CLOSE_GENERIC) {
3423 ZERO_STRUCT(io2);
3424 io2.close.level = RAW_CLOSE_CLOSE;
3425 io2.close.in.file = io->generic.in.file;
3426 io2.close.in.write_time = io->generic.in.write_time;
3427 io = &io2;
3429 c_req = smb_raw_close_send(private->tree, io);
3430 /* destroy handle */
3431 ntvfs_handle_remove_backend_data(f->h, ntvfs);
3434 /* If it is read-only, don't bother waiting for the result */
3435 if (can_clone) {
3436 DEBUG(5,("%s: not waiting for close response fnum=%d\n",__FUNCTION__,f->fnum));
3437 return NT_STATUS_OK;
3440 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3441 return smbcli_request_simple_recv(c_req);
3443 DEBUG(0,("%s\n",__LOCATION__));
3444 SIMPLE_ASYNC_TAIL;
3448 exit - closing files open by the pid
3450 static NTSTATUS proxy_exit(struct ntvfs_module_context *ntvfs,
3451 struct ntvfs_request *req)
3453 struct proxy_private *private = ntvfs->private_data;
3454 struct smbcli_request *c_req;
3456 SETUP_PID;
3458 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3459 return smb_raw_exit(private->tree->session);
3462 c_req = smb_raw_exit_send(private->tree->session);
3464 SIMPLE_ASYNC_TAIL;
3468 logoff - closing files open by the user
3470 static NTSTATUS proxy_logoff(struct ntvfs_module_context *ntvfs,
3471 struct ntvfs_request *req)
3473 /* we can't do this right in the proxy backend .... */
3474 return NT_STATUS_OK;
3478 setup for an async call - nothing to do yet
3480 static NTSTATUS proxy_async_setup(struct ntvfs_module_context *ntvfs,
3481 struct ntvfs_request *req,
3482 void *private)
3484 return NT_STATUS_OK;
3488 cancel an async call
3490 static NTSTATUS proxy_cancel(struct ntvfs_module_context *ntvfs,
3491 struct ntvfs_request *req)
3493 struct proxy_private *private = ntvfs->private_data;
3494 struct async_info *a;
3496 /* find the matching request */
3497 for (a=private->pending;a;a=a->next) {
3498 if (a->req == req) {
3499 break;
3503 if (a == NULL) {
3504 return NT_STATUS_INVALID_PARAMETER;
3507 return smb_raw_ntcancel(a->c_req);
3511 lock a byte range
3513 static NTSTATUS proxy_lock(struct ntvfs_module_context *ntvfs,
3514 struct ntvfs_request *req, union smb_lock *io)
3516 struct proxy_private *private = ntvfs->private_data;
3517 struct smbcli_request *c_req;
3519 SETUP_PID;
3521 if (io->generic.level != RAW_LOCK_GENERIC &&
3522 private->map_generic) {
3523 return ntvfs_map_lock(ntvfs, req, io);
3525 SETUP_FILE;
3527 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3528 return smb_raw_lock(private->tree, io);
3531 c_req = smb_raw_lock_send(private->tree, io);
3532 SIMPLE_ASYNC_TAIL;
3536 set info on a open file
3538 static NTSTATUS proxy_setfileinfo(struct ntvfs_module_context *ntvfs,
3539 struct ntvfs_request *req,
3540 union smb_setfileinfo *io)
3542 struct proxy_private *private = ntvfs->private_data;
3543 struct smbcli_request *c_req;
3545 SETUP_PID_AND_FILE;
3547 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3548 return smb_raw_setfileinfo(private->tree, io);
3550 c_req = smb_raw_setfileinfo_send(private->tree, io);
3552 SIMPLE_ASYNC_TAIL;
3557 a handler for async fsinfo replies
3559 static void async_fsinfo(struct smbcli_request *c_req)
3561 struct async_info *async = c_req->async.private;
3562 struct ntvfs_request *req = async->req;
3563 union smb_fsinfo *fs = async->parms;
3564 struct proxy_private *private = async->proxy;
3566 req->async_states->status = smb_raw_fsinfo_recv(c_req, req, fs);
3568 if (NT_STATUS_IS_OK(req->async_states->status) && (fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3569 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO)) {
3570 if (! private->fs_attribute_info && (private->fs_attribute_info=talloc_zero(private, struct fs_attribute_info))) {
3571 DEBUG(5,("%s: caching fs_attribute_info\n",__LOCATION__));
3572 private->fs_attribute_info->fs_attr=fs->attribute_info.out.fs_attr;
3573 private->fs_attribute_info->max_file_component_length=fs->attribute_info.out.max_file_component_length;
3574 private->fs_attribute_info->fs_type=talloc_smb_wire_string_dup(private, &(fs->attribute_info.out.fs_type));
3578 talloc_free(async);
3579 req->async_states->send_fn(req);
3583 return filesystem space info
3585 static NTSTATUS proxy_fsinfo(struct ntvfs_module_context *ntvfs,
3586 struct ntvfs_request *req, union smb_fsinfo *fs)
3588 struct proxy_private *private = ntvfs->private_data;
3589 struct smbcli_request *c_req;
3591 SETUP_PID;
3593 DEBUG(5,("%s: level %x\n",__LOCATION__,fs->generic.level));
3594 /* this value is easy to cache */
3595 if ((fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3596 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO) &&
3597 private->fs_attribute_info) {
3598 DEBUG(5,("%s: using cached fsinfo\n",__LOCATION__));
3599 fs->attribute_info.out.fs_attr=private->fs_attribute_info->fs_attr;
3600 fs->attribute_info.out.max_file_component_length=private->fs_attribute_info->max_file_component_length;
3601 fs->attribute_info.out.fs_type=talloc_smb_wire_string_dup(req, &(private->fs_attribute_info->fs_type));
3602 return NT_STATUS_OK;
3605 /* QFS Proxy */
3606 if (fs->generic.level == RAW_QFS_PROXY_INFO) {
3607 fs->proxy_info.out.major_version=1;
3608 fs->proxy_info.out.minor_version=0;
3609 fs->proxy_info.out.capability=0;
3610 return NT_STATUS_OK;
3613 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
3614 NTSTATUS status = smb_raw_fsinfo(private->tree, req, fs);
3615 if (NT_STATUS_IS_OK(status) && (fs->generic.level == RAW_QFS_ATTRIBUTE_INFORMATION ||
3616 fs->generic.level == RAW_QFS_ATTRIBUTE_INFO)) {
3617 if (! private->fs_attribute_info && (private->fs_attribute_info=talloc_zero(private, struct fs_attribute_info))) {
3618 DEBUG(5,("%s: caching fs_attribute_info\n",__LOCATION__));
3619 private->fs_attribute_info->fs_attr=fs->attribute_info.out.fs_attr;
3620 private->fs_attribute_info->max_file_component_length=fs->attribute_info.out.max_file_component_length;
3621 private->fs_attribute_info->fs_type=talloc_smb_wire_string_dup(private, &(fs->attribute_info.out.fs_type));
3624 return status;
3626 c_req = smb_raw_fsinfo_send(private->tree, req, fs);
3628 ASYNC_RECV_TAIL(fs, async_fsinfo);
3632 return print queue info
3634 static NTSTATUS proxy_lpq(struct ntvfs_module_context *ntvfs,
3635 struct ntvfs_request *req, union smb_lpq *lpq)
3637 return NT_STATUS_NOT_SUPPORTED;
3641 find_first / find_next caching.
3642 For now, cache based on directory,search_attributes,search_pattern,ea stuff
3643 Consider in response:
3644 * search id
3645 * search count
3646 * end of search
3647 * ea stuff
3650 static union smb_search_data *smb_search_data_dup(void* mem_ctx, const union smb_search_data *file, enum smb_search_data_level data_level) {
3651 union smb_search_data *result;
3652 struct smb_wire_string *name;
3654 result=talloc_zero(mem_ctx, union smb_search_data);
3655 if (! result) {
3656 return result;
3659 *result = *file;
3661 switch(data_level) {
3662 case RAW_SEARCH_DATA_SEARCH:
3663 if (! (result->search.name=talloc_strdup(mem_ctx, file->search.name))) goto error;
3664 break;
3665 case RAW_SEARCH_DATA_STANDARD:
3666 if (sws_dup(result, result->standard.name, file->standard.name)) goto error;
3667 break;
3668 case RAW_SEARCH_DATA_EA_SIZE:
3669 if (sws_dup(result, result->ea_size.name, file->ea_size.name)) goto error;
3670 break;
3671 case RAW_SEARCH_DATA_EA_LIST:
3672 if (sws_dup(result, result->ea_list.name, file->ea_list.name)) goto error;
3673 break;
3674 case RAW_SEARCH_DATA_DIRECTORY_INFO:
3675 if (sws_dup(result, result->directory_info.name, file->directory_info.name)) goto error;
3676 break;
3677 case RAW_SEARCH_DATA_FULL_DIRECTORY_INFO:
3678 if (sws_dup(result, result->full_directory_info.name, file->full_directory_info.name)) goto error;
3679 break;
3680 case RAW_SEARCH_DATA_NAME_INFO:
3681 if (sws_dup(result, result->name_info.name, file->name_info.name)) goto error;
3682 break;
3683 case RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO:
3684 if (sws_dup(result, result->both_directory_info.name, file->both_directory_info.name)) goto error;
3685 if (sws_dup(result, result->both_directory_info.short_name, file->both_directory_info.short_name)) goto error;
3686 break;
3687 case RAW_SEARCH_DATA_ID_FULL_DIRECTORY_INFO:
3688 if (sws_dup(result, result->id_full_directory_info.name, file->id_full_directory_info.name)) goto error;
3689 break;
3690 case RAW_SEARCH_DATA_ID_BOTH_DIRECTORY_INFO:
3691 if (sws_dup(result, result->id_both_directory_info.name, file->id_both_directory_info.name)) goto error;
3692 if (sws_dup(result, result->id_both_directory_info.short_name, file->id_both_directory_info.short_name)) goto error;
3693 break;
3694 case RAW_SEARCH_DATA_UNIX_INFO:
3695 if (! (result->unix_info.name=talloc_strdup(mem_ctx, file->unix_info.name))) goto error;
3696 break;
3697 case RAW_SEARCH_DATA_UNIX_INFO2:
3698 if (sws_dup(result, result->unix_info2.name, file->unix_info2.name)) goto error;
3699 break;
3700 default:
3701 DEBUG(5,("%s: Error can't dup an unknown file data type: %x\n", __LOCATION__, data_level));
3702 goto error;
3704 return result;
3705 error:
3706 talloc_free(result);
3707 return NULL;
3710 /* callback function for search first/next */
3711 static bool find_callback(void *private, const union smb_search_data *file)
3713 struct search_state *state = (struct search_state *)private;
3714 struct search_handle *search_handle = state->search_handle;
3715 bool status;
3717 /* if we have a cache, copy this data */
3718 if (search_handle->cache) {
3719 struct search_cache_item *item = talloc_zero(search_handle->cache, struct search_cache_item);
3720 DEBUG(5,("%s: Copy %p to cache %p\n", __LOCATION__, item, search_handle->cache));
3721 if (item) {
3722 item->data_level=search_handle->data_level;
3723 item->file = smb_search_data_dup(item, file, item->data_level);
3724 if (! item->file) {
3725 talloc_free(item);
3726 item=NULL;
3729 if (item) {
3730 /* optimization to save enumerating the entire list each time, to find the end.
3731 the cached last_item is very short lived, it doesn't matter if something has
3732 been added since, as long as it hasn't been removed */
3733 if (state->last_item) {
3734 DLIST_ADD_END(state->last_item, item, struct search_cache_item*);
3735 } else {
3736 DLIST_ADD_END(search_handle->cache->items, item, struct search_cache_item*);
3738 state->last_item=item;
3739 state->all_count++;
3740 } else {
3741 DEBUG(5,("%s: Could not add name to search cache %p, invalidating cache\n", __LOCATION__, search_handle->cache));
3742 /* dear me, the whole cache will be invalid if we miss data */
3743 search_handle->cache->status=SEARCH_CACHE_DEAD;
3744 /* remove from the list of caches to use */
3745 DLIST_REMOVE(search_handle->cache->proxy->search_caches, search_handle->cache);
3746 /* Make it feel unwanted */
3747 talloc_unlink(private, search_handle->cache);
3748 talloc_unlink(search_handle, search_handle->cache);
3749 //if (talloc_unlink(search_handle, search_handle->cache)==0) {
3750 //talloc_free(search_handle->cache);
3752 /* stop us using it for this search too */
3753 search_handle->cache=NULL;
3757 status=state->callback(state->private, file);
3758 if (status) {
3759 state->count++;
3761 return status;
3765 list files in a directory matching a wildcard pattern
3767 static NTSTATUS proxy_search_first(struct ntvfs_module_context *ntvfs,
3768 struct ntvfs_request *req, union smb_search_first *io,
3769 void *search_private,
3770 bool (*callback)(void *, const union smb_search_data *))
3772 struct proxy_private *private = ntvfs->private_data;
3773 struct search_state *state;
3774 struct search_cache *search_cache=NULL;
3775 struct search_cache_key search_cache_key={0};
3776 struct ntvfs_handle *h=NULL;
3777 struct search_handle *s;
3778 uint16_t max_count;
3779 NTSTATUS status;
3781 SETUP_PID;
3783 if (! private->enabled_proxy_search) {
3784 return smb_raw_search_first(private->tree, req, io, search_private, callback);
3786 switch (io->generic.level) {
3787 /* case RAW_SEARCH_DATA_SEARCH:
3788 search_cache_key.search_attrib=io->search_first.in.search_attrib;
3789 search_cache_key.pattern=io->search_first.in.pattern;
3790 max_count = io->search_first.in.max_count;
3791 search_cache = find_search_cache(private->search_cache, &search_cache_key);
3792 break;*/
3793 case RAW_SEARCH_TRANS2:
3794 io->t2ffirst.in.max_count=MIN(io->t2ffirst.in.max_count,80);
3795 max_count = io->t2ffirst.in.max_count;
3797 search_cache_key.level=io->generic.level;
3798 search_cache_key.data_level=io->generic.data_level;
3799 search_cache_key.search_attrib=io->t2ffirst.in.search_attrib;
3800 search_cache_key.pattern=io->t2ffirst.in.pattern;
3801 search_cache_key.flags=io->t2ffirst.in.flags;
3802 search_cache_key.storage_type=io->t2ffirst.in.storage_type;
3803 /* try and find a search cache that is complete */
3804 search_cache = find_search_cache(private->search_caches, &search_cache_key);
3806 /* do handle mapping for TRANS2 */
3807 status = ntvfs_handle_new(ntvfs, req, &h);
3808 NT_STATUS_NOT_OK_RETURN(status);
3810 DEBUG(5,("%s: RAW_SEARCH_TRANS2 %s limit %d, cache=%p level=%x\n",__LOCATION__, search_cache_key.pattern, max_count, search_cache, search_cache_key.data_level));
3811 break;
3812 default: /* won't cache or proxy this */
3813 return smb_raw_search_first(private->tree, req, io, search_private, callback);
3816 /* finish setting up mapped handle */
3817 if (h) {
3818 s = talloc_zero(h, struct search_handle);
3819 NT_STATUS_HAVE_NO_MEMORY(s);
3820 s->proxy=private;
3821 talloc_set_destructor(s, search_handle_destructor);
3822 s->h=h;
3823 s->level=io->generic.level;
3824 s->data_level=io->generic.data_level;
3825 status = ntvfs_handle_set_backend_data(s->h, private->ntvfs, s);
3826 NT_STATUS_NOT_OK_RETURN(status);
3827 DLIST_ADD(private->search_handles, s);
3828 DEBUG(5,("%s: map handle create %d\n",__LOCATION__, smbsrv_fnum(h)));
3831 /* satisfy from cache */
3832 if (search_cache) {
3833 struct search_cache_item* item=search_cache->items;
3834 uint16_t count=0;
3836 /* stop cache going away while we are using it */
3837 s->cache = talloc_reference(s, search_cache);
3838 DEBUG(5,("%s: Serving from cache: %p\n",__LOCATION__, search_cache));
3839 /* Don't offer over the limit, but only count those that were accepted */
3840 DLIST_FIND(search_cache->items, item, !(count < max_count && callback(search_private, item->file) && ++count) );
3841 io->t2ffirst.out.count=count;
3842 s->resume_item=item;
3843 /* just because callback didn't accept any doesn't mean we are finished */
3844 if (item == NULL) {
3845 /* currently only caching for t2ffirst */
3846 io->t2ffirst.out.end_of_search = true;
3847 DEBUG(5,("%s: Serving from cache complete at %d\n", __LOCATION__, count));
3848 } else {
3849 /* count the rest */
3850 io->t2ffirst.out.end_of_search = false;
3851 DEBUG(5,("%s: Serving from cache incomplete at %d\n", __LOCATION__, count));
3852 DLIST_FOR_EACH(item, item, count++);
3853 DEBUG(5,("%s: Serving from cache max_count %d\n", __LOCATION__, count));
3856 if ((io->t2ffirst.out.end_of_search && io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
3857 io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE)
3859 /* destroy handle */
3860 ntvfs_handle_remove_backend_data(h, ntvfs);
3861 io->t2ffirst.out.handle=0;
3862 } else {
3863 /* now map handle */
3864 io->t2ffirst.out.handle=smbsrv_fnum(h);
3866 return NT_STATUS_OK;
3869 state = talloc_zero(req, struct search_state);
3870 NT_STATUS_HAVE_NO_MEMORY(state);
3872 /* if there isn't a matching cache already being generated by another search,
3873 start one, unless FLAG_TRANS2_FIND_BACKUP_INTENT which is always live */
3874 if (!(io->t2ffirst.in.flags & FLAG_TRANS2_FIND_BACKUP_INTENT) &&
3875 find_partial_search_cache(private->search_caches, &search_cache_key) == NULL) {
3876 /* need to opendir the folder being searched so we can get a notification */
3877 struct search_cache *search_cache=NULL;
3879 search_cache=new_search_cache(private, &search_cache_key);
3880 /* Stop cache going away while we are using it */
3881 if (search_cache) {
3882 s->cache=talloc_reference(s, search_cache);
3886 /* stop the handle going away while we are using it */
3887 state->search_handle=talloc_reference(state, s);
3888 state->private=search_private;
3889 state->callback=callback;
3891 status=smb_raw_search_first(private->tree, req, io, state, find_callback);
3892 // if (! NT_STATUS_IS_OK(status)) {
3893 // return (status);
3894 // }
3895 if (! NT_STATUS_IS_OK(status)) {
3896 if (s->cache) {
3897 DLIST_REMOVE(private->search_caches, s->cache);
3898 talloc_unlink(private, s->cache);
3899 talloc_unlink(s, s->cache);
3900 //if (talloc_unlink(s, s->cache)==0) {
3901 //talloc_free(s->cache);
3903 s->cache=NULL;
3905 s->h=NULL;
3906 ntvfs_handle_remove_backend_data(h, ntvfs);
3907 return (status);
3909 // DEBUG(1,("%s: %p; %s\n",__LOCATION__,io,get_friendly_nt_error_msg (status)));
3910 DEBUG(5,("%s: max %d, got %d, copied %d; %s\n",__LOCATION__,io->t2ffirst.out.count,state->all_count, state->count,get_friendly_nt_error_msg (status)));
3912 #warning check NT_STATUS_IS_OK ?
3913 if (io->t2ffirst.out.end_of_search) {
3914 /* cache might have gone away if problem filling */
3915 if (s->cache) {
3916 DEBUG(5,("B\n"));
3917 s->cache->status = SEARCH_CACHE_COMPLETE;
3918 DEBUG(5,("%s: Cache %p filled in first go!\n",__LOCATION__, s->cache));
3921 if ((io->t2ffirst.out.end_of_search && io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
3922 io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE) {
3923 DEBUG(5,("%s: Closing search\n",__LOCATION__));
3924 /* destroy partial cache */
3925 if (s->cache && (io->t2ffirst.in.flags & FLAG_TRANS2_FIND_CLOSE) &&
3926 ! io->t2ffirst.out.end_of_search) {
3927 DEBUG(5,("%s: Destroying cache %p\n",__LOCATION__, s->cache));
3928 /* cache is no good now! */
3929 DLIST_REMOVE(private->search_caches, s->cache);
3930 talloc_unlink(private, s->cache);
3931 talloc_unlink(s, s->cache);
3932 //if (talloc_unlink(s, s->cache)==0) {
3933 //talloc_free(s->cache);
3935 s->cache=NULL;
3937 if (s->cache) {
3938 s->cache->status=SEARCH_CACHE_COMPLETE;
3940 /* Need to deal with the case when the client would not take them all but we still cache them
3941 if (state->count < io->t2ffirst.out.count && io->t2ffirst.out.end_of_search) {
3942 io->t2ffirst.out.end_of_search = false;
3943 //s->resume_item = state->last_item;
3945 /* destroy handle */
3946 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
3947 ntvfs_handle_remove_backend_data(h, ntvfs);
3948 io->t2ffirst.out.handle=0;
3949 } else {
3950 s->handle = io->t2ffirst.out.handle;
3951 io->t2ffirst.out.handle=smbsrv_fnum(h);
3953 io->t2ffirst.out.count=state->count;
3954 return status;
3957 #define DLIST_FIND_NEXT(start, item, test) do {\
3958 DLIST_FIND(start, item, test); \
3959 if (item) (item)=(item)->next; \
3960 } while(0)
3961 #define DLIST_TALLOC_FREE(list) do {\
3962 while(list) { \
3963 void *tmp=(list); \
3964 (list)=(list)->next; \
3965 talloc_free(tmp); \
3967 } while(0)
3969 /* continue a search */
3970 static NTSTATUS proxy_search_next(struct ntvfs_module_context *ntvfs,
3971 struct ntvfs_request *req, union smb_search_next *io,
3972 void *search_private,
3973 bool (*callback)(void *, const union smb_search_data *))
3975 struct proxy_private *private = ntvfs->private_data;
3976 struct search_state *state;
3977 struct ntvfs_handle *h=NULL;
3978 struct search_handle *s;
3979 const struct search_cache *search_cache=NULL;
3980 struct search_cache_item *start_at=NULL;
3981 uint16_t max_count;
3982 NTSTATUS status;
3984 SETUP_PID;
3986 if (! private->enabled_proxy_search) {
3987 return smb_raw_search_next(private->tree, req, io, search_private, callback);
3989 switch (io->generic.level) {
3990 case RAW_SEARCH_TRANS2:
3991 io->t2fnext.in.max_count=MIN(io->t2fnext.in.max_count,80);
3992 max_count = io->t2fnext.in.max_count;
3994 h = talloc_get_type(ntvfs_find_handle(ntvfs, req, io->t2fnext.in.handle), struct ntvfs_handle);
3995 if (! h) return NT_STATUS_INVALID_HANDLE;
3996 /* convert handle into search_cache */
3997 s=talloc_get_type(ntvfs_handle_get_backend_data(h, ntvfs), struct search_handle);
3998 if (! s) return NT_STATUS_INVALID_HANDLE;
3999 search_cache=s->cache;
4000 DEBUG(5,("%s: RAW_SEARCH_TRANS2 find_next h=%d [real %d] count %d, cache=%p\n",__LOCATION__, io->t2fnext.in.handle, s->handle, max_count, search_cache));
4001 io->t2fnext.in.handle=s->handle;
4002 if (! search_cache) {
4003 break;
4006 /* warning if: uint16_t flags or const char *last_name have changed, abort cache */
4007 /* skip up to resume key */
4008 /* TODO: resume key may be PRIOR to where we left off... in which case
4009 we need to avoid duplicating values */
4010 if (search_cache /*&& search_cache->status == SEARCH_CACHE_COMPLETE*/) {
4011 DEBUG(5,("%s: seek resume position\n",__LOCATION__));
4012 /* work out where in the cache to continue from */
4013 switch (io->generic.data_level) {
4014 case RAW_SEARCH_DATA_STANDARD:
4015 case RAW_SEARCH_DATA_EA_SIZE:
4016 case RAW_SEARCH_DATA_EA_LIST:
4017 /* have a resume key? */
4018 DEBUG(5,("%s: type %x seek on %x\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.resume_key));
4019 DLIST_FIND_NEXT(search_cache->items, start_at, io->t2fnext.in.resume_key == start_at->file->standard.resume_key);
4020 break;
4021 case RAW_SEARCH_DATA_DIRECTORY_INFO: /* TODO: maybe these should be strcasecmp for some filesystems */
4022 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
4023 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->directory_info.name.s)==0);
4024 break;
4025 case RAW_SEARCH_DATA_FULL_DIRECTORY_INFO:
4026 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
4027 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->full_directory_info.name.s)==0);
4028 break;
4029 case RAW_SEARCH_DATA_NAME_INFO:
4030 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
4031 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->name_info.name.s)==0);
4032 break;
4033 case RAW_SEARCH_DATA_BOTH_DIRECTORY_INFO:
4034 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
4035 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->both_directory_info.name.s)==0);
4036 break;
4037 case RAW_SEARCH_DATA_ID_FULL_DIRECTORY_INFO:
4038 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
4039 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->id_full_directory_info.name.s)==0);
4040 break;
4041 case RAW_SEARCH_DATA_ID_BOTH_DIRECTORY_INFO:
4042 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
4043 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->id_both_directory_info.name.s)==0);
4044 break;
4045 case RAW_SEARCH_DATA_UNIX_INFO:
4046 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
4047 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->unix_info.name)==0);
4048 break;
4049 case RAW_SEARCH_DATA_UNIX_INFO2:
4050 DEBUG(5,("%s: type %x seek on %s\n",__LOCATION__, io->generic.data_level, io->t2fnext.in.last_name));
4051 DLIST_FIND_NEXT(search_cache->items, start_at, fstrcmp(io->t2fnext.in.last_name, start_at->file->unix_info2.name.s)==0);
4052 break;
4053 default:
4054 if (io->t2fnext.in.flags & FLAG_TRANS2_FIND_CONTINUE) {
4055 start_at = s->resume_item;
4056 } else {
4057 DEBUG(5,("%s: HELP! How can we resume?\n",__LOCATION__));
4058 start_at = s->resume_item;
4061 DEBUG(5,("%s: Start at %p\n",__LOCATION__,start_at));
4063 break;
4066 if (! search_cache) {
4067 DEBUG(5,("%s: No cache, pass-through\n",__LOCATION__));
4068 return smb_raw_search_next(private->tree, req, io, search_private, callback);
4070 //#define talloc_reference(ctx, ptr) (_TALLOC_TYPEOF(ptr))_talloc_reference((ctx),(ptr))
4071 //surely should be
4072 //#define talloc_reference(ctx, ptr) _talloc_reference((ctx),(ptr))?(ptr):(NULL) to preserve the type of ptr
4074 /* satisfy from cache */
4075 if (search_cache->status == SEARCH_CACHE_COMPLETE) {
4076 struct search_cache_item* item;
4077 uint16_t count=0;
4078 DEBUG(5,("%s: Serving from cache: %p\n",__LOCATION__, search_cache));
4080 if (! start_at) {
4081 start_at = search_cache->items;
4084 DLIST_FIND(start_at, item, !(count < max_count && callback(search_private, item->file) && ++count) );
4085 io->t2fnext.out.count=count;
4086 s->resume_item=item;
4087 if (item == NULL) {
4088 DEBUG(5,("%s: Serving from cache complete at %d\n", __LOCATION__, count));
4089 io->t2fnext.out.end_of_search = true;
4090 } else {
4091 DEBUG(5,("%s: Serving from cache incomplete at %d\n", __LOCATION__, count));
4092 io->t2fnext.out.end_of_search = false;
4093 /* count the rest */
4094 DLIST_FOR_EACH(item, item, count++);
4095 DEBUG(5,("%s: Serving from cache max_count %d\n", __LOCATION__, count));
4097 /* is it the end? */
4098 if ((io->t2fnext.out.end_of_search && io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
4099 io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE)
4101 /* destroy handle */
4102 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
4103 ntvfs_handle_remove_backend_data(h, ntvfs);
4106 return NT_STATUS_OK;
4109 /* pass-through and fill-cache */
4110 if (start_at) {
4111 /* risk of duplicate data */
4112 DEBUG(5,("\n\n\nCache-populating search has resumed but NOT where we left off!\n\n\n-d"));
4113 /* free everything from start_at onwards through start_at-> next*/
4114 /* cut from the list */
4115 start_at->prev->next=NULL;
4116 start_at->prev=NULL;
4117 /* now how to free a list? */
4118 DLIST_TALLOC_FREE(start_at);
4120 state = talloc_zero(req, struct search_state);
4121 NT_STATUS_HAVE_NO_MEMORY(state);
4123 state->search_handle=talloc_reference(state, s);
4124 state->private=search_private;
4125 state->callback=callback;
4127 status = smb_raw_search_next(private->tree, req, io, state, find_callback);
4128 if (! NT_STATUS_IS_OK(status)) {
4129 if (s->cache) {
4130 DLIST_REMOVE(private->search_caches, s->cache);
4131 talloc_unlink(private, s->cache);
4132 talloc_unlink(s, s->cache);
4133 //if (talloc_unlink(s, s->cache)==0) {
4134 //talloc_free(s->cache);
4136 s->cache=NULL;
4138 s->h=NULL;
4139 ntvfs_handle_remove_backend_data(h, ntvfs);
4140 return (status);
4143 DEBUG(5,("%s: max %d, got %d, copied %d; %s\n",__LOCATION__,io->t2fnext.out.count,state->all_count, state->count,get_friendly_nt_error_msg (status)));
4145 /* if closing, then close */
4146 if ((io->t2fnext.out.end_of_search && io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) ||
4147 io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE)
4149 if (s->cache && (io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE) &&
4150 ! io->t2fnext.out.end_of_search) {
4151 /* partial cache is useless */
4152 DLIST_REMOVE(private->search_caches, s->cache);
4153 talloc_unlink(private, s->cache);
4154 talloc_unlink(s, s->cache);
4155 //if (talloc_unlink(s, s->cache)==0) {
4156 //talloc_free(s->cache);
4158 s->cache=NULL;
4160 if (s->cache) {
4161 s->cache->status=SEARCH_CACHE_COMPLETE;
4162 /* Need to deal with the case when the client would not take them all but we still cache them
4163 if (state->count < io->t2fnext.out.count && io->t2fnext.out.end_of_search) {
4164 io->t2fnext.out.end_of_search = false;
4167 /* destroy handle */
4168 DEBUG(5,("%s: Removing handle %p\n",__LOCATION__,h));
4169 ntvfs_handle_remove_backend_data(h, ntvfs);
4171 io->t2fnext.out.count=state->count;
4173 return status;
4176 /* close a search */
4177 static NTSTATUS proxy_search_close(struct ntvfs_module_context *ntvfs,
4178 struct ntvfs_request *req, union smb_search_close *io)
4180 struct proxy_private *private = ntvfs->private_data;
4181 struct ntvfs_handle *h=NULL;
4182 struct search_handle *s;
4183 NTSTATUS status;
4185 SETUP_PID;
4187 if (! private->enabled_proxy_search) {
4188 return smb_raw_search_close(private->tree, io);
4190 switch (io->generic.level) {
4191 case RAW_SEARCH_TRANS2:
4192 h = talloc_get_type(ntvfs_find_handle(ntvfs, req, io->findclose.in.handle), struct ntvfs_handle);
4193 if (! h) return NT_STATUS_INVALID_HANDLE;
4194 /* convert handle into search_cache */
4195 s=talloc_get_type(ntvfs_handle_get_backend_data(h, ntvfs), struct search_handle);
4196 if (! s) return NT_STATUS_INVALID_HANDLE;
4197 io->findclose.in.handle=s->handle;
4198 default:
4199 return smb_raw_search_close(private->tree, io);
4202 if (! s->cache) {
4203 status = smb_raw_search_close(private->tree, io);
4204 } else {
4205 if (s->cache->status != SEARCH_CACHE_COMPLETE) {
4206 /* cache is useless */
4207 DLIST_REMOVE(private->search_caches, s->cache);
4208 talloc_unlink(private, s->cache);
4209 talloc_unlink(s, s->cache);
4210 //if (talloc_unlink(s, s->cache)==0) {
4211 //talloc_free(s->cache);
4214 status = NT_STATUS_OK;
4217 s->h=NULL;
4218 ntvfs_handle_remove_backend_data(h, ntvfs);
4219 /* s MAY also be gone at this point, if h was free'd, unless there were
4220 pending responses, in which case they see s->h is NULL as a sign to stop */
4221 return status;
4225 a handler for async trans2 replies
4227 static void async_trans2(struct smbcli_request *c_req)
4229 struct async_info *async = c_req->async.private;
4230 struct ntvfs_request *req = async->req;
4231 req->async_states->status = smb_raw_trans2_recv(c_req, req, async->parms);
4232 talloc_free(async);
4233 req->async_states->send_fn(req);
4236 /* raw trans2 */
4237 static NTSTATUS proxy_trans2(struct ntvfs_module_context *ntvfs,
4238 struct ntvfs_request *req,
4239 struct smb_trans2 *trans2)
4241 struct proxy_private *private = ntvfs->private_data;
4242 struct smbcli_request *c_req;
4244 if (private->map_trans2) {
4245 return NT_STATUS_NOT_IMPLEMENTED;
4248 SETUP_PID;
4249 #warning we should be mapping file handles here
4251 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4252 return smb_raw_trans2(private->tree, req, trans2);
4255 c_req = smb_raw_trans2_send(private->tree, trans2);
4257 ASYNC_RECV_TAIL(trans2, async_trans2);
4261 /* SMBtrans - not used on file shares */
4262 static NTSTATUS proxy_trans(struct ntvfs_module_context *ntvfs,
4263 struct ntvfs_request *req,
4264 struct smb_trans2 *trans2)
4266 return NT_STATUS_ACCESS_DENIED;
4270 a handler for async change notify replies
4272 static void async_changenotify(struct smbcli_request *c_req)
4274 struct async_info *async = c_req->async.private;
4275 struct ntvfs_request *req = async->req;
4276 req->async_states->status = smb_raw_changenotify_recv(c_req, req, async->parms);
4277 talloc_free(async);
4278 req->async_states->send_fn(req);
4281 /* change notify request - always async */
4282 static NTSTATUS proxy_notify(struct ntvfs_module_context *ntvfs,
4283 struct ntvfs_request *req,
4284 union smb_notify *io)
4286 struct proxy_private *private = ntvfs->private_data;
4287 struct smbcli_request *c_req;
4288 int saved_timeout = private->transport->options.request_timeout;
4289 struct proxy_file *f;
4291 if (io->nttrans.level != RAW_NOTIFY_NTTRANS) {
4292 return NT_STATUS_NOT_IMPLEMENTED;
4295 SETUP_PID;
4297 f = ntvfs_handle_get_backend_data(io->nttrans.in.file.ntvfs, ntvfs);
4298 if (!f) return NT_STATUS_INVALID_HANDLE;
4299 io->nttrans.in.file.fnum = f->fnum;
4301 /* this request doesn't make sense unless its async */
4302 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4303 return NT_STATUS_INVALID_PARAMETER;
4306 /* we must not timeout on notify requests - they wait
4307 forever */
4308 private->transport->options.request_timeout = 0;
4310 c_req = smb_raw_changenotify_send(private->tree, io);
4312 private->transport->options.request_timeout = saved_timeout;
4314 ASYNC_RECV_TAIL(io, async_changenotify);
4318 * A hander for converting from rpc struct replies to ntioctl
4320 static NTSTATUS proxy_rpclite_map_async_send(
4321 struct ntvfs_module_context *ntvfs,
4322 struct ntvfs_request *req,
4323 void *io1, void *io2, NTSTATUS status)
4325 union smb_ioctl* io=talloc_get_type_abort(io1, union smb_ioctl);
4326 struct async_rpclite_send *rpclite_send=talloc_get_type_abort(io2, struct async_rpclite_send);
4327 void* r=rpclite_send->struct_ptr;
4328 struct ndr_push* push;
4329 const struct ndr_interface_call* call=rpclite_send->call;
4330 enum ndr_err_code ndr_err;
4331 DATA_BLOB ndr;
4333 talloc_free(rpclite_send);
4335 DEBUG(5,("%s: converting r=%p back to ntiocl\n",__FUNCTION__, r));
4336 push = ndr_push_init_ctx(req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
4337 NT_STATUS_HAVE_NO_MEMORY(push);
4339 if (0) {
4340 push->flags |= LIBNDR_FLAG_BIGENDIAN;
4343 ndr_err = call->ndr_push(push, NDR_OUT, r);
4344 status=ndr_map_error2ntstatus(ndr_err);
4346 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4347 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
4348 nt_errstr(status)));
4349 return status;
4352 ndr=ndr_push_blob(push);
4353 //if (ndr.length > io->ntioctl.in.max_data) {
4354 DEBUG(3,("%s NDR size %d, max_size %d %p\n",__FUNCTION__, ndr.length,
4355 io->ntioctl.in.max_data, ndr.data));
4356 io->ntioctl.out.blob=ndr;
4357 return status;
4361 * A handler for sending async rpclite Read replies that were mapped to union smb_read
4363 static NTSTATUS rpclite_proxy_Read_map_async_send(
4364 struct ntvfs_module_context *ntvfs,
4365 struct ntvfs_request *req,
4366 void *io1, void *io2, NTSTATUS status)
4368 struct proxy_Read* r=talloc_get_type_abort(io1, struct proxy_Read);
4369 union smb_read* io=talloc_get_type_abort(io2, union smb_read);
4371 /* status here is a result of proxy_read, it doesn't reflect the status
4372 of the rpc transport or relates calls, just the read operation */
4373 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
4374 r->out.result=status;
4376 if (! NT_STATUS_IS_OK(status)) {
4377 /* We can't use result as a discriminator in IDL, so nread and flags always exist */
4378 r->out.nread=0;
4379 r->out.flags=0;
4380 } else {
4381 ssize_t size=io->readx.out.nread;
4382 r->out.flags=0;
4383 r->out.nread=io->readx.out.nread;
4385 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE) && io->readx.out.nread>0) {
4386 declare_checksum(digest);
4387 checksum_block(digest, io->readx.out.data, io->readx.out.nread);
4389 DEBUG(5,("New digest for size: %lld\n", (long long) io->readx.out.nread));
4390 dump_data (5, digest, sizeof(digest));
4391 DEBUG(5,("Cached digest\n"));
4392 dump_data (5, r->in.digest.digest, sizeof(digest));
4394 if (memcmp(digest, r->in.digest.digest, sizeof(digest))==0) {
4395 r->out.flags=PROXY_USE_CACHE;
4396 DEBUG(5,("%s: Use cached data len=%lld\n",__FUNCTION__,
4397 (long long)r->out.nread));
4398 if (r->in.flags & PROXY_VALIDATE) {
4399 r->out.flags |= PROXY_VALIDATE;
4400 DEBUG(5,("%s: Use VALIDATED len=%lld, %lld\n",__FUNCTION__,
4401 (long long)r->out.nread, (long long) io->readx.out.nread));
4403 goto done;
4405 DEBUG(5,("Cache does not match\n"));
4408 if (r->in.flags & PROXY_VALIDATE) {
4409 /* validate failed, shrink read to mincnt - so we don't fill link */
4410 r->out.nread=MIN(r->out.nread, r->in.mincnt);
4411 size=r->out.nread;
4412 DEBUG(5,("VALIDATE failed, shrink read of %d from %d to %d\n",
4413 r->in.maxcnt,r->out.nread,MIN(r->out.nread, r->in.mincnt)));
4416 if (r->in.flags & PROXY_USE_ZLIB) {
4417 if (compress_block(io->readx.out.data, &size) ) {
4418 r->out.flags|=PROXY_USE_ZLIB;
4419 r->out.response.compress.count=size;
4420 r->out.response.compress.data=io->readx.out.data;
4421 DEBUG(3,("%s: Compressed from %d to %d = %d%%\n",
4422 __FUNCTION__,r->out.nread,size,size*100/r->out.nread));
4423 goto done;
4427 DEBUG(5,("%s: Compression not worthwhile\n", __FUNCTION__));
4428 r->out.response.generic.count=io->readx.out.nread;
4429 r->out.response.generic.data=io->readx.out.data;
4432 done:
4434 /* Or should we return NT_STATUS_OK ?*/
4435 DEBUG(5,("Finish %s status %s\n",__FUNCTION__,get_friendly_nt_error_msg(status)));
4437 /* the rpc transport succeeded even if the operation did not */
4438 return NT_STATUS_OK;
4442 * RPC implementation of Read
4444 static NTSTATUS rpclite_proxy_Read(struct ntvfs_module_context *ntvfs,
4445 struct ntvfs_request *req, struct proxy_Read *r)
4447 struct proxy_private *private = ntvfs->private_data;
4448 union smb_read* io=talloc(req, union smb_read);
4449 NTSTATUS status;
4450 struct proxy_file *f;
4451 struct ntvfs_handle *h;
4453 NT_STATUS_HAVE_NO_MEMORY(io);
4455 /* if next hop is a proxy just repeat this call also handle VALIDATE check
4456 that means have own callback handlers too... */
4457 SETUP_PID;
4459 RPCLITE_SETUP_FILE_HERE(f, h);
4461 DEBUG(5,("Opnum: proxy_Read min=%d max=%d offset=%lld, fnum=%d\n",
4462 r->in.mincnt, r->in.maxcnt, r->in.offset, r->in.fnum));
4463 DEBUG(5,("Anticipated digest\n"));
4464 dump_data (5, r->in.digest.digest, sizeof(r->in.digest.digest));
4466 /* If the remove end is a proxy, jusr fixup file handle and passthrough,
4467 but update cache on the way back
4468 if (PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
4471 /* prepare for response */
4472 r->out.response.generic.data=talloc_array(io, uint8_t, r->in.maxcnt);
4473 NT_STATUS_HAVE_NO_MEMORY(r->out.response.generic.data);
4475 if (! PROXY_REMOTE_SERVER(private) && (r->in.flags & PROXY_VALIDATE)) {
4476 return proxy_validate(ntvfs, req, r, f);
4479 /* pack up an smb_read request and dispatch here */
4480 io->readx.level=RAW_READ_READX;
4481 io->readx.in.file.ntvfs=h;
4482 io->readx.in.mincnt=r->in.mincnt;
4483 io->readx.in.maxcnt=r->in.maxcnt;
4484 io->readx.in.offset=r->in.offset;
4485 io->readx.in.remaining=r->in.remaining;
4486 /* and something to hold the answer */
4487 io->readx.out.data=r->out.response.generic.data;
4489 /* so we get to pack the io->*.out response */
4490 status = ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Read_map_async_send);
4491 NT_STATUS_NOT_OK_RETURN(status);
4493 /* so the read will get processed normally */
4494 return proxy_read(ntvfs, req, io);
4498 * A handler for sending async rpclite Write replies
4500 static NTSTATUS rpclite_proxy_Write_map_async_send(
4501 struct ntvfs_module_context *ntvfs,
4502 struct ntvfs_request *req,
4503 void *io1, void *io2, NTSTATUS status)
4505 struct proxy_Write* r=talloc_get_type_abort(io1, struct proxy_Write);
4506 union smb_write* io=talloc_get_type_abort(io2, union smb_write);
4508 DEBUG(5,("%s with jolly status %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
4509 r->out.result=status;
4511 r->out.nwritten=io->writex.out.nwritten;
4512 r->out.remaining=io->writex.out.remaining;
4514 /* the rpc transport succeeded even if the operation did not */
4515 return NT_STATUS_OK;
4519 * RPC implementation of write
4521 static NTSTATUS rpclite_proxy_Write(struct ntvfs_module_context *ntvfs,
4522 struct ntvfs_request *req, struct proxy_Write *r)
4524 struct proxy_private *private = ntvfs->private_data;
4525 union smb_write* io=talloc(req, union smb_write);
4526 NTSTATUS status;
4527 struct proxy_file* f;
4528 struct ntvfs_handle *h;
4530 SETUP_PID;
4532 RPCLITE_SETUP_FILE_HERE(f,h);
4534 DEBUG(5,("Opnum: proxy_Write count=%d offset=%lld, fnum=%d\n",
4535 r->in.count, r->in.offset, r->in.fnum));
4537 /* pack up an smb_write request and dispatch here */
4538 io->writex.level=RAW_WRITE_WRITEX;
4539 io->writex.in.file.ntvfs=h;
4540 io->writex.in.offset=r->in.offset;
4541 io->writex.in.wmode=r->in.mode;
4542 io->writex.in.count=r->in.count;
4544 /* and the data */
4545 if (PROXY_USE_ZLIB & r->in.flags) {
4546 ssize_t count=r->in.data.generic.count;
4547 io->writex.in.data=uncompress_block_talloc(io, r->in.data.compress.data,
4548 &count, r->in.count);
4549 if (count != r->in.count || !io->writex.in.data) {
4550 /* Didn't uncompress properly, but the RPC layer worked */
4551 r->out.result=NT_STATUS_BAD_COMPRESSION_BUFFER;
4552 return NT_STATUS_OK;
4554 } else {
4555 io->writex.in.data=r->in.data.generic.data;
4558 /* so we get to pack the io->*.out response */
4559 status=ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Write_map_async_send);
4560 NT_STATUS_NOT_OK_RETURN(status);
4562 /* so the read will get processed normally */
4563 return proxy_write(ntvfs, req, io);
4567 * RPC amalgamation of getinfo requests
4569 struct proxy_getinfo_fragments;
4570 struct proxy_getinfo_fragmentses;
4572 /* holds one smbcli_request to satisfy part of one proxy_GetInfo request */
4573 struct proxy_getinfo_fragment {
4574 struct proxy_getinfo_fragment *prev, *next;
4575 struct proxy_getinfo_fragments *fragments;
4576 union smb_fileinfo *smb_fileinfo;
4577 struct smbcli_request *c_req;
4578 NTSTATUS status;
4581 /* holds reference to many fragment smbcli_request that together make up one proxy_GetInfo request */
4582 struct proxy_getinfo_fragments {
4583 struct proxy_getinfo_fragments *prev, *next;
4584 struct proxy_getinfo_fragmentses *fragmentses;
4585 struct proxy_getinfo_fragment *fragments;
4586 uint32_t index;
4589 struct proxy_getinfo_fragmentses {
4590 struct proxy_getinfo_fragments *fragments;
4591 struct proxy_GetInfo *r;
4592 struct ntvfs_request *req;
4593 bool async;
4597 a handler for async write replies
4599 NTSTATUS async_proxy_getinfo(struct async_info *async, void* io1, void* io2, NTSTATUS status)
4601 struct smbcli_request *c_req = async->c_req;
4602 struct ntvfs_request *req = async->req;
4603 struct proxy_file *f=async->f;
4604 struct proxy_getinfo_fragment *fragment=talloc_get_type_abort(io2, struct proxy_getinfo_fragment);
4605 struct proxy_getinfo_fragments* fragments=fragment->fragments;
4606 struct proxy_getinfo_fragmentses* fragmentses=fragments->fragmentses;
4607 struct proxy_GetInfo *r=talloc_get_type_abort(fragmentses->r, struct proxy_GetInfo);
4608 int c=fragments->index;
4609 struct info_data* d=&(r->out.info_data[c]);
4610 union smb_fileinfo *io=talloc_get_type_abort(io1, union smb_fileinfo);
4612 SMB_ASSERT(c_req == NULL || c_req == fragment->c_req);
4614 if (c_req) {
4615 switch (r->in.info_tags[0].tag_type) {
4616 case TAG_TYPE_FILE_INFO:
4617 status=smb_raw_fileinfo_recv(c_req, r, io);
4618 break;
4619 case TAG_TYPE_PATH_INFO:
4620 status=smb_raw_pathinfo_recv(c_req, r, io);
4621 break;
4622 default:
4623 status=NT_STATUS_INVALID_PARAMETER;
4625 c_req=NULL;
4628 /* stop callback occuring more than once sync'ly */
4629 fragment->c_req=NULL;
4631 DEBUG(5,("%s: async callback level %x %s\n",__FUNCTION__,io->generic.level, get_friendly_nt_error_msg (status)));
4632 switch (io->generic.level) {
4633 case RAW_FILEINFO_ALL_INFO:
4634 case RAW_FILEINFO_ALL_INFORMATION:
4635 DEBUG(5,("%s: async callback level RAW_FILEINFO_ALL_INFO\n",__FUNCTION__));
4636 d->status_RAW_FILEINFO_ALL_INFO=status;
4638 /* don't blindly overwrite BASIC_INFORMATION as we may already have it */
4639 if (1 || NT_STATUS_IS_OK(status)) {
4640 d->status_RAW_FILEINFO_BASIC_INFORMATION=status;
4641 d->create_time=io->all_info.out.create_time;
4642 d->access_time=io->all_info.out.access_time;
4643 d->write_time=io->all_info.out.write_time;
4644 d->change_time=io->all_info.out.change_time;
4645 d->attrib=io->all_info.out.attrib;
4647 d->alloc_size=io->all_info.out.alloc_size;
4648 d->size=io->all_info.out.size;
4649 dump_data(5, io, sizeof(*io));
4650 d->nlink=io->all_info.out.nlink;
4651 d->delete_pending=io->all_info.out.delete_pending;
4652 d->directory=io->all_info.out.directory;
4653 d->ea_size=io->all_info.out.ea_size;
4654 /* io is sticking around for as long as d is */
4655 d->fname.s=io->all_info.out.fname.s;
4656 d->fname.count=io->all_info.out.fname.private_length;
4657 break;
4658 case RAW_FILEINFO_BASIC_INFO:
4659 case RAW_FILEINFO_BASIC_INFORMATION:
4660 DEBUG(5,("%s: async callback level RAW_FILEINFO_BASIC_INFORMATION\n",__FUNCTION__));
4661 d->status_RAW_FILEINFO_BASIC_INFORMATION=status;
4662 d->create_time=io->basic_info.out.create_time;
4663 d->access_time=io->basic_info.out.access_time;
4664 d->write_time=io->basic_info.out.write_time;
4665 d->change_time=io->basic_info.out.change_time;
4666 d->attrib=io->basic_info.out.attrib;
4667 break;
4668 case RAW_FILEINFO_COMPRESSION_INFO:
4669 DEBUG(5,("%s: async callback level RAW_FILEINFO_COMPRESSION_INFO\n",__FUNCTION__));
4670 d->status_RAW_FILEINFO_COMPRESSION_INFO = status;
4671 d->compressed_size=io->compression_info.out.compressed_size;
4672 d->format=io->compression_info.out.format;
4673 d->unit_shift=io->compression_info.out.unit_shift;
4674 d->chunk_shift=io->compression_info.out.chunk_shift;
4675 d->cluster_shift=io->compression_info.out.cluster_shift;
4676 break;
4677 case RAW_FILEINFO_INTERNAL_INFORMATION:
4678 DEBUG(5,("%s: async callback level RAW_FILEINFO_INTERNAL_INFORMATION\n",__FUNCTION__));
4679 d->status_RAW_FILEINFO_INTERNAL_INFORMATION=status;
4680 d->file_id=io->internal_information.out.file_id;
4681 break;
4682 case RAW_FILEINFO_ACCESS_INFORMATION:
4683 DEBUG(5,("%s: async callback level RAW_FILEINFO_ACCESS_INFORMATION\n",__FUNCTION__));
4684 d->status_RAW_FILEINFO_ACCESS_INFORMATION=status;
4685 d->access_flags=io->access_information.out.access_flags;
4686 break;
4687 case RAW_FILEINFO_POSITION_INFORMATION:
4688 DEBUG(5,("%s: async callback level RAW_FILEINFO_POSITION_INFORMATION\n",__FUNCTION__));
4689 d->status_RAW_FILEINFO_POSITION_INFORMATION = status;
4690 d->position=io->position_information.out.position;
4691 break;
4692 case RAW_FILEINFO_MODE_INFORMATION:
4693 DEBUG(5,("%s: async callback level RAW_FILEINFO_MODE_INFORMATION\n",__FUNCTION__));
4694 d->status_RAW_FILEINFO_MODE_INFORMATION =status;
4695 d->mode=io->mode_information.out.mode;
4696 break;
4697 case RAW_FILEINFO_ALIGNMENT_INFORMATION:
4698 DEBUG(5,("%s: async callback level RAW_FILEINFO_ALIGNMENT_INFORMATION\n",__FUNCTION__));
4699 d->status_RAW_FILEINFO_ALIGNMENT_INFORMATION=status;
4700 d->alignment_requirement=io->alignment_information.out.alignment_requirement;
4701 break;
4702 case RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION:
4703 DEBUG(5,("%s: async callback level RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION\n",__FUNCTION__));
4704 d->status_RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION=status;
4705 d->reparse_tag=io->attribute_tag_information.out.reparse_tag;
4706 d->reparse_attrib=io->attribute_tag_information.out.attrib;
4707 break;
4708 case RAW_FILEINFO_STREAM_INFO: {
4709 uint_t c;
4710 DEBUG(5,("%s: async callback level RAW_FILEINFO_STREAM_INFO %s,\n",__FUNCTION__));
4711 d->status_RAW_FILEINFO_STREAM_INFO=status;
4712 DEBUG(5,("Num Streams %d %s\n",io->stream_info.out.num_streams, get_friendly_nt_error_msg (status)));
4713 if (NT_STATUS_IS_OK(status)) {
4714 d->streams=talloc_zero_array(d, struct info_stream, io->stream_info.out.num_streams);
4715 if (! d->streams) {
4716 d->status_RAW_FILEINFO_STREAM_INFO=NT_STATUS_NO_MEMORY;
4717 } else {
4718 d->num_streams=io->stream_info.out.num_streams;
4719 for(c=0; c < io->stream_info.out.num_streams; c++) {
4720 d->streams[c].size = io->stream_info.out.streams[c].size;
4721 d->streams[c].alloc_size = io->stream_info.out.streams[c].alloc_size;
4722 d->streams[c].stream_name.s=io->stream_info.out.streams[c].stream_name.s;
4723 d->streams[c].stream_name.count=io->stream_info.out.streams[c].stream_name.private_length;
4727 break; }
4728 default:
4729 /* so... where's it from? */
4730 DEBUG(5,("Unexpected read level\n"));
4733 fragment->smb_fileinfo = NULL;
4734 fragment->c_req=NULL;
4736 /* are the fragments complete? */
4737 DLIST_REMOVE(fragments->fragments, fragment);
4738 /* if this index is complete, remove from fragmentses */
4739 if (! fragments->fragments) {
4740 DLIST_REMOVE(fragmentses->fragments, fragments);
4742 /* is that the end? */
4743 if (! fragmentses->fragments && fragmentses->async) {
4744 DEBUG(5,("Thats the end of the fragments, doing send\n"));
4745 /* call the send_fn */
4746 req=fragmentses->req;
4747 req->async_states->status=NT_STATUS_OK;
4748 DEBUG(5,("Fragments async response sending\n"));
4749 req->async_states->send_fn(req);
4751 DEBUG(5,("%s: Thats the end of the callback\n",__FUNCTION__));
4752 return status;
4755 #define FINISH_GETINFO_FRAGMENT(r, io) do { \
4756 struct smbcli_request *c_req; \
4757 switch (r->in.info_tags[0].tag_type) { \
4758 case TAG_TYPE_FILE_INFO: \
4759 io->all_info.in.file.fnum=r->in.info_tags[0].info_tag.fnum; \
4760 c_req=smb_raw_fileinfo_send(private->tree, io); \
4761 break; \
4762 case TAG_TYPE_PATH_INFO: \
4763 io->all_info.in.file.path=r->in.info_tags[0].info_tag.path.s; \
4764 c_req=smb_raw_pathinfo_send(private->tree, io); \
4765 break; \
4766 default: \
4767 return NT_STATUS_INVALID_PARAMETER; \
4769 /* Add fragment collator */ \
4770 fragment->c_req=c_req; \
4771 /* use the same stateful async handler for them all... */ \
4772 { void* req=NULL; \
4773 ADD_ASYNC_RECV_TAIL(c_req, io, fragment, f, async_proxy_getinfo, NT_STATUS_INTERNAL_ERROR); \
4774 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler); \
4776 io=NULL; \
4777 } while (0)
4779 #define SETUP_GETINFO_FRAGMENT(io, LEVEL) do { \
4780 fragment=talloc_zero(fragments, struct proxy_getinfo_fragment); \
4781 NT_STATUS_HAVE_NO_MEMORY(fragment); \
4782 DLIST_ADD(fragments->fragments, fragment); \
4783 fragment->fragments=fragments; \
4784 io=talloc_zero(fragment, union smb_fileinfo); \
4785 NT_STATUS_HAVE_NO_MEMORY(io); \
4786 io->generic.level=LEVEL; \
4787 } while (0)
4789 static NTSTATUS rpclite_proxy_Getinfo(struct ntvfs_module_context *ntvfs,
4790 struct ntvfs_request *req, struct proxy_GetInfo *r)
4792 struct proxy_private *private = ntvfs->private_data;
4793 struct smbcli_request *c_req;
4794 union smb_fileinfo *io=NULL;
4795 NTSTATUS status;
4796 struct proxy_file* f;
4797 struct ntvfs_handle *h;
4798 struct proxy_getinfo_fragmentses *fragmentses;
4799 int c;
4801 SETUP_PID;
4803 DEBUG(5,("Opnum: proxy_Getinfo r=%p\n",r));
4805 DEBUG(5,("Convering %d handles for r=%p\n",r->in.count, r));
4806 for(c=0; c < r->in.count; c++) {
4807 if (r->in.info_tags[c].tag_type==TAG_TYPE_FILE_INFO) {
4808 RPCLITE_SETUP_THIS_FILE_HERE(r->in.info_tags[c].info_tag.fnum, f, h);
4812 if (PROXY_REMOTE_SERVER(private)) {
4813 DEBUG(5,("Remote proxy, doing transparent\n"));
4814 c_req = smbcli_ndr_request_ntioctl_send(private->tree, ntvfs, &ndr_table_rpcproxy, NDR_PROXY_GETINFO, r);
4815 /* No need to add a receive hander, the ntioctl transport adds
4816 the async chain handler which deals with the send_fn */
4817 // ADD_ASYNC_RECV_TAIL(c_req, r, NULL, f, rpclite_proxy_Getinfo_map_async_send, NT_STATUS_INTERNAL_ERROR);
4819 if (! (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4820 DEBUG(5,("%s:Sync waiting for nttrans response\n",__LOCATION__));
4821 return sync_chain_handler(c_req);
4822 } else {
4823 ASYNC_RECV_TAIL_HANDLER_ORPHAN(io, async_chain_handler);
4824 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
4825 return NT_STATUS_OK;
4829 /* I thought this was done for me for [in,out] */
4830 r->out.info_data=talloc_zero_array(r, struct info_data, r->in.count);
4831 NT_STATUS_HAVE_NO_MEMORY(r->out.info_data);
4832 r->out.count = r->in.count;
4833 r->out.result = NT_STATUS_OK;
4835 fragmentses=talloc_zero(req, struct proxy_getinfo_fragmentses);
4836 fragmentses->r=r;
4837 fragmentses->req=req;
4838 NT_STATUS_HAVE_NO_MEMORY(fragmentses);
4840 #warning, if C is large, we need to do a few at a time according to resource limits
4841 for (c=0; c < r->in.count; c++) {
4842 struct proxy_getinfo_fragments *fragments;
4843 struct proxy_getinfo_fragment *fragment;
4845 fragments=talloc_zero(fragmentses, struct proxy_getinfo_fragments);
4846 NT_STATUS_HAVE_NO_MEMORY(fragments);
4847 DLIST_ADD(fragmentses->fragments, fragments);
4848 fragments->fragmentses=fragmentses;
4849 fragments->index=c;
4851 /* Issue a set of getinfo requests */
4852 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ALL_INFO);
4853 FINISH_GETINFO_FRAGMENT(r, io);
4855 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_BASIC_INFORMATION);
4856 FINISH_GETINFO_FRAGMENT(r, io);
4858 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_COMPRESSION_INFO);
4859 FINISH_GETINFO_FRAGMENT(r, io);
4861 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_INTERNAL_INFORMATION);
4862 FINISH_GETINFO_FRAGMENT(r, io);
4864 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ACCESS_INFORMATION);
4865 FINISH_GETINFO_FRAGMENT(r, io);
4867 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_POSITION_INFORMATION);
4868 FINISH_GETINFO_FRAGMENT(r, io);
4870 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_MODE_INFORMATION);
4871 FINISH_GETINFO_FRAGMENT(r, io);
4873 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ALIGNMENT_INFORMATION);
4874 FINISH_GETINFO_FRAGMENT(r, io);
4876 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_ATTRIBUTE_TAG_INFORMATION);
4877 FINISH_GETINFO_FRAGMENT(r, io);
4879 SETUP_GETINFO_FRAGMENT(io, RAW_FILEINFO_STREAM_INFO);
4880 FINISH_GETINFO_FRAGMENT(r, io);
4883 /* If ! async, wait for all requests to finish */
4885 if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
4886 struct proxy_getinfo_fragments *fragments;
4887 struct proxy_getinfo_fragment *fragment;
4888 while ((fragments = fragmentses->fragments) &&
4889 (fragment = fragments->fragments) &&
4890 fragment->c_req) {
4891 sync_chain_handler(fragment->c_req);
4892 /* and because the whole fragment / fragments may be gone now... */
4893 continue;
4895 return NT_STATUS_OK; /* see individual failures */
4898 DEBUG(5,("%s: Setting async response\n",__FUNCTION__));
4899 fragmentses->async=true;
4900 req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC;
4901 return NT_STATUS_OK;
4904 /* rpclite dispatch table */
4905 #define RPC_PROXY_OPS 3
4906 struct {
4907 uint32_t opnum;
4908 NTSTATUS (*handler)(struct ntvfs_module_context *ntvfs,
4909 struct ntvfs_request *req, void* r);
4910 } rpcproxy_ops[RPC_PROXY_OPS]={
4911 {NDR_PROXY_READ, rpclite_proxy_Read},
4912 {NDR_PROXY_WRITE, rpclite_proxy_Write},
4913 {NDR_PROXY_GETINFO, rpclite_proxy_Getinfo}
4916 /* unmarshall ntioctl and rpc-dispatch, but push async map handler to convert
4917 back from rpc struct to ntioctl */
4918 static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs,
4919 struct ntvfs_request *req, union smb_ioctl *io)
4921 struct proxy_private *private = ntvfs->private_data;
4922 DATA_BLOB *request;
4923 struct ndr_syntax_id* syntax_id;
4924 uint32_t opnum;
4925 const struct ndr_interface_table *table;
4926 struct ndr_pull* pull;
4927 void* r;
4928 NTSTATUS status;
4929 struct async_rpclite_send *rpclite_send;
4930 enum ndr_err_code ndr_err;
4932 SETUP_PID;
4934 /* We don't care about io->generic.in.file, ntvfs layer already proved it was valid,
4935 our operations will have the fnum embedded in them anyway */
4936 DEBUG(5,("START %s blob-size %d\n",__FUNCTION__,io->ntioctl.in.blob.length));
4937 /* unpack the NDR */
4938 request=&io->ntioctl.in.blob;
4940 pull = ndr_pull_init_blob(request, req, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
4941 NT_STATUS_HAVE_NO_MEMORY(pull);
4942 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
4943 DEBUG(5,("%s pull init'd\n",__FUNCTION__));
4945 /* the blob is 4-aligned because it was memcpy'd */
4946 syntax_id=talloc_zero(pull, struct ndr_syntax_id);
4947 NT_STATUS_HAVE_NO_MEMORY(syntax_id);
4949 ndr_err=ndr_pull_ndr_syntax_id(pull, NDR_SCALARS, syntax_id);
4950 status=ndr_map_error2ntstatus(ndr_err);
4951 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4952 DEBUG(2,("Can't read syntax-id: %s\n",nt_errstr(status)));
4953 return status;
4956 /* now find the struct ndr_interface_table * for this syntax_id */
4957 table=ndr_table_by_uuid(&syntax_id->uuid);
4958 if (! table) ndr_table_init();
4959 table=ndr_table_by_uuid(&syntax_id->uuid);
4961 if (! table) {
4962 DEBUG(5,("Can't find table for uuid: %s\n",GUID_string(debug_ctx(),&syntax_id->uuid)));
4963 return NT_STATUS_NO_GUID_TRANSLATION;
4966 ndr_err=ndr_pull_uint32(pull, NDR_SCALARS, &opnum);
4967 status=ndr_map_error2ntstatus(ndr_err);
4968 if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
4969 DEBUG(2,("Can't read op-num: %s\n",nt_errstr(status)));
4970 return status;
4972 DEBUG(5,("%s opnum %d\n",__FUNCTION__,opnum));
4974 DEBUG(10,("rpc request data:\n"));
4975 dump_data(10, pull->data, pull->data_size);
4977 r = talloc_named(req, table->calls[opnum].struct_size, "struct %s",
4978 table->calls[opnum].name);
4979 NT_STATUS_HAVE_NO_MEMORY(r);
4981 memset(r, 0, table->calls[opnum].struct_size);
4983 ndr_err=table->calls[opnum].ndr_pull(pull, NDR_IN, r);
4984 status=ndr_map_error2ntstatus(ndr_err);
4985 DEBUG(5,("%s opnum %d pulled r=%p status %s\n",__FUNCTION__,opnum,r,get_friendly_nt_error_msg (status)));
4986 NT_STATUS_NOT_OK_RETURN(status);
4988 rpclite_send=talloc(req, struct async_rpclite_send);
4989 NT_STATUS_HAVE_NO_MEMORY(rpclite_send);
4990 rpclite_send->call=&table->calls[opnum];
4991 rpclite_send->struct_ptr=r;
4992 /* need to push conversion function to convert from r to io */
4993 status=ntvfs_map_async_setup(ntvfs, req, io, rpclite_send, proxy_rpclite_map_async_send);
4994 NT_STATUS_NOT_OK_RETURN(status);
4996 /* Magically despatch the call based on syntax_id, table and opnum.
4997 But there is no table of handlers.... so until then*/
4998 if (0==strcasecmp(table->name,"rpcproxy")) {
4999 if (opnum >= RPC_PROXY_OPS) {
5000 DEBUG(3,("Can't despatch %s:%d\n",table->name, opnum));
5001 return NT_STATUS_PROCEDURE_NOT_FOUND;
5003 status = rpcproxy_ops[opnum].handler(ntvfs, req, r);
5004 } else {
5005 DEBUG(5,("Can't despatch %s:%d %s\n",table->name, opnum,
5006 GUID_string(debug_ctx(),&syntax_id->uuid)));
5007 return NT_STATUS_NO_GUID_TRANSLATION;
5010 /* status is the status of the rpc layer. If it is NT_STATUS_OK then
5011 the handler status is in r->out.result */
5012 DEBUG(5,("%s now map_async_finish: status=%s async=%d\n", __FUNCTION__,
5013 get_friendly_nt_error_msg (status), req->async_states->state & NTVFS_ASYNC_STATE_ASYNC));
5015 return ntvfs_map_async_finish(req, status);
5018 /* unpack the ntioctl to make some rpc_struct */
5019 NTSTATUS ntioctl_rpc_unmap(struct async_info *async, void* io1, void* io2, NTSTATUS status)
5021 struct ntvfs_module_context *ntvfs = async->proxy->ntvfs;
5022 struct proxy_private *proxy=async->proxy;
5023 struct smbcli_request *c_req = async->c_req;
5024 void* r=io1;
5025 struct ntioctl_rpc_unmap_info *info=talloc_get_type_abort(io2, struct ntioctl_rpc_unmap_info);
5026 union smb_ioctl* io =talloc_get_type_abort(info->io, union smb_ioctl);
5027 const struct ndr_interface_call *calls=info->calls;
5028 enum ndr_err_code ndr_err;
5029 DATA_BLOB *response;
5030 struct ndr_pull* pull;
5032 DEBUG(5,("START %s io2=%p\n",__FUNCTION__,io2));
5033 DEBUG(5,("%s op %s ntioctl: %s\n",
5034 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
5035 NT_STATUS_NOT_OK_RETURN(status);
5037 if (c_req) {
5038 DEBUG(5,("%s io2 MID=%d\n",__FUNCTION__,c_req->mid));
5039 status = smb_raw_ioctl_recv(c_req, io, io);
5040 #define SESSION_INFO proxy->remote_server, proxy->remote_share
5041 /* This status is the ntioctl wrapper status */
5042 if (! NT_STATUS_IS_OK(status)) {
5043 DEBUG(3,("[\\\\%s\\%s] RPC %s failed for %s: %s\n",SESSION_INFO,
5044 __FUNCTION__, calls->name, get_friendly_nt_error_msg(status)));
5045 if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) return status;
5046 return NT_STATUS_UNSUCCESSFUL;
5050 dump_data(10, io->ntioctl.out.blob.data, io->ntioctl.out.blob.length);
5052 response=&io->ntioctl.out.blob;
5053 pull = ndr_pull_init_blob(response, r, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
5054 /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */
5056 NT_STATUS_HAVE_NO_MEMORY(pull);
5058 ndr_err=calls->ndr_pull(pull, NDR_OUT, r);
5059 #warning can we free pull here?
5060 status=ndr_map_error2ntstatus(ndr_err);
5062 DEBUG(5,("END %s op status %s\n",
5063 __FUNCTION__, get_friendly_nt_error_msg(status)));
5064 return status;
5068 send an ntioctl request based on a NDR encoding.
5070 struct smbcli_request *smbcli_ndr_request_ntioctl_send(
5071 struct smbcli_tree *tree,
5072 struct ntvfs_module_context *ntvfs,
5073 const struct ndr_interface_table *table,
5074 uint32_t opnum,
5075 void *r)
5077 struct proxy_private *private = ntvfs->private_data;
5078 struct smbcli_request * c_req;
5079 struct ndr_push *push;
5080 NTSTATUS status;
5081 DATA_BLOB request;
5082 enum ndr_err_code ndr_err;
5083 union smb_ioctl *io=talloc_zero(r, union smb_ioctl);
5086 /* setup for a ndr_push_* call, we can't free push until the message
5087 actually hits the wire */
5088 push = ndr_push_init_ctx(io, lp_iconv_convenience(ntvfs->ctx->lp_ctx));
5089 if (!push) return NULL;
5091 /* first push interface table identifiers */
5092 ndr_err=ndr_push_ndr_syntax_id(push, NDR_SCALARS, &table->syntax_id);
5093 status=ndr_map_error2ntstatus(ndr_err);
5095 if (! NT_STATUS_IS_OK(status)) return NULL;
5097 ndr_err=ndr_push_uint32(push, NDR_SCALARS, opnum);
5098 status=ndr_map_error2ntstatus(ndr_err);
5099 if (! NT_STATUS_IS_OK(status)) return NULL;
5101 if (0) {
5102 push->flags |= LIBNDR_FLAG_BIGENDIAN;
5105 /* push the structure into a blob */
5106 ndr_err = table->calls[opnum].ndr_push(push, NDR_IN, r);
5107 status=ndr_map_error2ntstatus(ndr_err);
5108 if (!NT_STATUS_IS_OK(status)) {
5109 DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n",
5110 nt_errstr(status)));
5111 return NULL;
5114 /* retrieve the blob */
5115 request = ndr_push_blob(push);
5117 io->ntioctl.level=RAW_IOCTL_NTIOCTL;
5118 io->ntioctl.in.function=FSCTL_UFOPROXY_RPCLITE;
5119 io->ntioctl.in.file.fnum=private->nttrans_fnum;
5120 io->ntioctl.in.fsctl=false;
5121 io->ntioctl.in.filter=0;
5122 io->ntioctl.in.max_data=PROXY_NTIOCTL_MAXDATA;
5123 io->ntioctl.in.blob=request;
5125 DEBUG(10,("smbcli_request packet:\n"));
5126 dump_data(10, request.data, request.length);
5128 c_req = smb_raw_ioctl_send(tree, io);
5130 if (! c_req) {
5131 return NULL;
5134 dump_data(10, c_req->out.data, c_req->out.data_size);
5136 { void* req=NULL;
5137 struct ntioctl_rpc_unmap_info* info=talloc_zero(r, struct ntioctl_rpc_unmap_info);
5138 info->io=io;
5139 info->table=table;
5140 info->opnum=opnum;
5141 info->calls=&table->calls[opnum];
5142 ADD_ASYNC_RECV_TAIL(c_req, r, info, NULL, ntioctl_rpc_unmap, NULL);
5145 return c_req;
5149 client helpers, mapping between proxy RPC calls and smbcli_* calls.
5153 * If the sync_chain_handler is called directly it unplugs the async handler
5154 which (as well as preventing loops) will also avoid req->send_fn being
5155 called - which is also nice! */
5156 NTSTATUS sync_chain_handler(struct smbcli_request *c_req)
5158 struct async_info *async=NULL;
5159 /* the first callback which will actually receive the c_req response */
5160 struct async_info_map *async_map;
5161 NTSTATUS status=NT_STATUS_OK;
5162 struct async_info_map** chain;
5164 DEBUG(5,("%s\n",__FUNCTION__));
5165 if (! c_req) return NT_STATUS_UNSUCCESSFUL;
5167 /* If there is a handler installed, it is using async_info to chain */
5168 if (c_req->async.fn) {
5169 /* not safe to talloc_free async if send_fn has been called for the request
5170 against which async was allocated, so steal it (and free below) or neither */
5171 async = talloc_get_type_abort(c_req->async.private, struct async_info);
5172 talloc_steal(NULL, async);
5173 chain=&async->chain;
5174 async_map = talloc_get_type_abort(*chain, struct async_info_map);
5175 } else {
5176 chain=(struct async_info_map**)&c_req->async.private;
5177 async_map = talloc_get_type_abort(*chain, struct async_info_map);
5180 /* unplug c_req->async.fn as if a callback handler calls smb_*_recv
5181 in order to receive the response, smbcli_transport_finish_recv will
5182 call us again and then call the c-req->async.fn
5183 Perhaps we should merely call smbcli_request_receive() IF
5184 c_req->request_state <= SMBCLI_REQUEST_RECV, but that might not
5185 help multi-part replies... except all parts are receive before
5186 callback if a handler WAS set */
5187 c_req->async.fn=NULL;
5189 /* Should we raise an error? Should we simple_recv? */
5190 while(async_map) {
5191 /* remove this one from the list before we call. We do this in case
5192 some callbacks free their async_map but also so that callbacks
5193 can navigate the async_map chain to add additional callbacks to
5194 the end - e.g. so that tag-along reads can call send_fn after
5195 the send_fn of the request they tagged along to, thus preserving
5196 the async response order - which may be a waste of time? */
5197 DLIST_REMOVE(*chain, async_map);
5199 DEBUG(5,("Callback for async_map=%p pre-status %s\n",async_map, get_friendly_nt_error_msg(status)));
5200 if (async_map->fn) {
5201 status=async_map->fn(async_map->async,
5202 async_map->parms1, async_map->parms2, status);
5204 DEBUG(5,("Callback complete for async_map=%p status %s\n",async_map, get_friendly_nt_error_msg(status)));
5205 /* Note: the callback may have added to the chain */
5206 #warning Async_maps have a null talloc_context, it is unclear who should own them
5207 /* it can't be c_req as it stops us chaining more than one, maybe it
5208 should be req but there isn't always a req. However sync_chain_handler
5209 will always free it if called */
5210 DEBUG(6,("Will free async map %p\n",async_map));
5211 #warning put me back
5212 talloc_free(async_map);
5213 DEBUG(6,("Free'd async_map\n"));
5214 if (*chain)
5215 async_map=talloc_get_type_abort(*chain, struct async_info_map);
5216 else
5217 async_map=NULL;
5218 DEBUG(6,("Switch to async_map %p\n",async_map));
5220 /* The first callback will have read c_req, thus talloc_free'ing it,
5221 so we don't let the other callbacks get hurt playing with it */
5222 if (async_map && async_map->async)
5223 async_map->async->c_req=NULL;
5226 talloc_free(async);
5228 DEBUG(5,("%s complete: %s\n",__FUNCTION__,get_friendly_nt_error_msg (status)));
5229 return status;
5232 /* If the async handler is called, then the send_fn is called */
5233 static void async_chain_handler(struct smbcli_request *c_req)
5235 struct async_info *async = talloc_get_type_abort(c_req->async.private, struct async_info);
5236 struct ntvfs_request *req = async->req;
5237 NTSTATUS status;
5239 if (c_req->state <= SMBCLI_REQUEST_RECV) {
5240 /* Looks like async handlers has been called sync'ly */
5241 smb_panic("async_chain_handler called asyncly on req %p\n");
5244 status=sync_chain_handler(c_req);
5246 /* Should we insist that a chain'd handler does this?
5247 Which makes it hard to intercept the data by adding handlers
5248 before the send_fn handler sends it... */
5249 if (req) {
5250 DEBUG(5,("%s send_fn on req=%p\n",__FUNCTION__,req));
5251 req->async_states->status=status;
5252 req->async_states->send_fn(req);
5256 /* unpack the rpc struct to make some smb_write */
5257 NTSTATUS async_proxy_smb_raw_write_rpc(struct async_info *async,
5258 void* io1, void* io2, NTSTATUS status)
5260 union smb_write* io =talloc_get_type(io1, union smb_write);
5261 struct proxy_Write* r=talloc_get_type(io2, struct proxy_Write);
5263 DEBUG(5,("START: %s convert from rpc to smb with pre-status %s\n",__FUNCTION__,
5264 get_friendly_nt_error_msg (status)));
5265 DEBUG(3,("Write response for offset=%lld\n",io->generic.in.offset));
5266 NT_STATUS_NOT_OK_RETURN(status);
5268 status=r->out.result;
5269 DEBUG(5,("%s wrapped status: %s\n",__FUNCTION__, get_friendly_nt_error_msg(status)));
5270 NT_STATUS_NOT_OK_RETURN(status);
5272 io->generic.out.remaining = r->out.remaining;
5273 io->generic.out.nwritten = r->out.nwritten;
5275 DEBUG(5,("END: %s status %s\n",__FUNCTION__,
5276 get_friendly_nt_error_msg (status)));
5277 return status;
5280 /* upgrade from smb to NDR and then send.
5281 The caller should ADD_ASYNC_RECV_TAIL the handler that tries to receive the response*/
5282 struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs,
5283 union smb_write *io,
5284 struct proxy_file *f)
5286 struct proxy_private *private = ntvfs->private_data;
5287 struct smbcli_tree *tree=private->tree;
5289 if (PROXY_REMOTE_SERVER(private)) {
5290 struct smbcli_request *c_req;
5291 struct proxy_Write *r=talloc_zero(io, struct proxy_Write);
5292 ssize_t size;
5294 if (! r) return NULL;
5296 size=io->generic.in.count;
5297 /* upgrade the write */
5298 r->in.fnum = io->generic.in.file.fnum;
5299 r->in.offset = io->generic.in.offset;
5300 r->in.count = io->generic.in.count;
5301 r->in.mode = io->generic.in.wmode;
5302 // r->in.remaining = io->generic.in.remaining;
5303 #warning remove this
5304 /* prepare to lie */
5305 r->out.nwritten=r->in.count;
5306 r->out.remaining=0;
5308 /* try to compress */
5309 #warning compress!
5310 r->in.data.compress.data=compress_block_talloc(r, io->generic.in.data, &size);
5311 if (r->in.data.compress.data) {
5312 r->in.data.compress.count=size;
5313 r->in.flags = PROXY_USE_ZLIB;
5314 } else {
5315 r->in.flags = 0;
5316 /* we'll honour const, honest gov */
5317 r->in.data.generic.data=discard_const(io->generic.in.data);
5318 r->in.data.generic.count=io->generic.in.count;
5321 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
5322 ntvfs,
5323 &ndr_table_rpcproxy,
5324 NDR_PROXY_WRITE, r);
5325 if (! c_req) return NULL;
5327 /* yeah, filthy abuse of f */
5328 { void* req=NULL;
5329 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_write_rpc, NULL);
5332 return c_req;
5333 } else {
5334 return smb_raw_write_send(tree, io);
5338 NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs,
5339 union smb_write *io,
5340 struct proxy_file *f)
5342 struct proxy_private *proxy = ntvfs->private_data;
5343 struct smbcli_tree *tree=proxy->tree;
5345 if (PROXY_REMOTE_SERVER(proxy)) {
5346 struct smbcli_request *c_req = proxy_smb_raw_write_send(ntvfs, io, f);
5347 return sync_chain_handler(c_req);
5348 } else {
5349 struct smbcli_request *c_req = smb_raw_write_send(tree, io);
5350 return smb_raw_write_recv(c_req, io);
5354 /* unpack the rpc struct to make some smb_read response */
5355 NTSTATUS async_proxy_smb_raw_read_rpc(struct async_info *async,
5356 void* io1, void* io2, NTSTATUS status)
5358 union smb_read* io =talloc_get_type_abort(io1, union smb_read);
5359 struct proxy_Read* r=talloc_get_type_abort(io2, struct proxy_Read);
5360 struct proxy_file *f = async->f;
5361 struct proxy_private *private=async->proxy;
5363 DEBUG(5,("\n>>\n%s() rpc status: %s\n",__FUNCTION__,
5364 get_friendly_nt_error_msg(status)));
5365 NT_STATUS_NOT_OK_RETURN(status);
5367 status=r->out.result;
5368 DEBUG(5,("%s() wrapped status: %s\n",__FUNCTION__,
5369 get_friendly_nt_error_msg(status)));
5370 NT_STATUS_NOT_OK_RETURN(status);
5372 io->generic.out.remaining = 0; /*r->out.response.generic.remaining;*/
5373 io->generic.out.compaction_mode = 0;
5375 if (r->out.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
5376 /* Use the io we already setup!
5377 if out.flags & PROXY_VALIDATE, we may need to validate more in
5378 cache then r->out.nread would suggest, see io->generic.out.nread */
5379 if (r->out.flags & PROXY_VALIDATE)
5380 io->generic.out.nread=io->generic.in.maxcnt;
5381 DEBUG(5,("Using cached data: size=%lld\n",
5382 (long long) io->generic.out.nread));
5383 return status;
5386 if (r->in.flags & PROXY_VALIDATE) {
5387 DEBUG(5,("Cached data did not validate, flags: %x\n",r->out.flags));
5388 /* turn off validate on this file */
5389 //cache_handle_novalidate(f);
5390 #warning turn off validate on this file - do an nread<maxcnt later
5393 if (r->in.flags & PROXY_USE_CACHE) {
5394 DEBUG(5,("Cached data did not match\n"));
5397 io->generic.out.nread = r->out.nread;
5399 /* we may need to uncompress */
5400 if (r->out.flags & PROXY_USE_ZLIB) {
5401 ssize_t size=r->out.response.compress.count;
5402 DEBUG(5,("%s: uncompress, %lld wanted %lld or %lld\n",__LOCATION__,
5403 (long long int)size,
5404 (long long int)io->generic.in.maxcnt,
5405 (long long int)io->generic.in.mincnt));
5406 if (size > io->generic.in.mincnt) {
5407 /* we did a bulk read for the cache */
5408 uint8_t *data=talloc_size(io, io->generic.in.maxcnt);
5409 DEBUG(5,("%s: bulk uncompress to %p\n",__LOCATION__,data));
5410 if (! uncompress_block_to(data,
5411 r->out.response.compress.data, &size,
5412 io->generic.in.maxcnt) ||
5413 size != r->out.nread) {
5414 status=NT_STATUS_INVALID_USER_BUFFER;
5415 } else {
5416 DEBUG(5,("%s: uncompressed\n",__LOCATION__));
5417 /* copy as much as they can take */
5418 io->generic.out.nread=MIN(io->generic.in.mincnt, size);
5419 memcpy(io->generic.out.data, data, io->generic.out.nread);
5420 /* copy the rest to the cache */
5421 cache_handle_save(f, data,
5422 size,
5423 io->generic.in.offset);
5425 } else if (! uncompress_block_to(io->generic.out.data,
5426 r->out.response.compress.data, &size,
5427 io->generic.in.maxcnt) ||
5428 size != r->out.nread) {
5429 io->generic.out.nread=size;
5430 status=NT_STATUS_INVALID_USER_BUFFER;
5432 } else if (io->generic.out.data != r->out.response.generic.data) {
5433 //Assert(r->out.nread == r->out.generic.out.count);
5434 memcpy(io->generic.out.data, r->out.response.generic.data, io->generic.out.nread);
5436 if (r->out.cache_name.s && r->out.cache_name.count && f && f->cache) {
5437 int result;
5438 setenv("WAFS_CACHE_REMOTE_NAME",r->out.cache_name.s,1);
5439 setenv("WAFS_CACHE_LOCAL_NAME",f->cache->cache_name,1);
5440 setenv("WAFS_REMOTE_SERVER",private->remote_server,1);
5441 DEBUG(5,("WAFS_CACHE_REMOTE_NAME=%s [cache_name]\nWAFS_CACHE_LOCAL_NAME=%s\nWAFS_REMOTE_SERVER=%s\n\n",getenv("WAFS_CACHE_REMOTE_NAME"),getenv("WAFS_CACHE_LOCAL_NAME"),getenv("WAFS_REMOTE_SERVER")));
5442 DEBUG(5,("%s running cache transfer command: %s\n",__LOCATION__,getenv("WAFS_CACHE_REMOTE_NAME")));
5443 system(getenv("WAFS_CACHE_TRANSFER"));
5444 DEBUG(5,("%s cache transfer command result %d\n",__LOCATION__,result));
5445 // now set cache to make whole local file valid
5446 cache_validated(f->cache, cache_len(f->cache));
5449 return status;
5452 /* Warning: Assumes that if io->generic.out.nread is not zero, then some
5453 data has been pre-read into io->generic.out.data and can be used for
5454 proxy<->proxy optimized reads */
5455 struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs,
5456 union smb_read *io,
5457 struct proxy_file *f,
5458 struct proxy_Read *r)
5460 struct proxy_private *private = ntvfs->private_data;
5461 #warning we are using out.nread as a out-of-band parameter
5462 if (PROXY_REMOTE_SERVER(private)) {
5464 struct smbcli_request *c_req;
5465 if (! r) {
5466 r=talloc_zero(io, struct proxy_Read);
5467 if (! r) return NULL;
5468 r->in.mincnt = io->generic.in.mincnt;
5472 r->in.fnum = io->generic.in.file.fnum;
5473 r->in.read_for_execute=io->generic.in.read_for_execute;
5474 r->in.offset = io->generic.in.offset;
5475 r->in.maxcnt = io->generic.in.maxcnt;
5476 r->in.remaining = io->generic.in.remaining;
5477 r->in.flags |= PROXY_USE_ZLIB;
5478 if (! (r->in.flags & PROXY_VALIDATE) &&
5479 io->generic.out.data && io->generic.out.nread > 0) {
5480 /* maybe we should limit digest size to MIN(nread, maxcnt) to
5481 permit the caller to provider a larger nread as part of
5482 a split read */
5483 checksum_block(r->in.digest.digest, io->generic.out.data,
5484 io->generic.out.nread);
5486 if (io->generic.out.nread > r->in.maxcnt) {
5487 DEBUG(0,("Cache from nread is too big for requested read struct, ignoring cache\n"));
5488 } else {
5489 r->in.mincnt = io->generic.out.nread;
5490 r->in.maxcnt = io->generic.out.nread;
5491 r->in.flags |= PROXY_USE_CACHE;
5492 /* PROXY_VALIDATE will have been set by caller */
5496 if (r->in.flags & (PROXY_USE_CACHE | PROXY_VALIDATE)) {
5497 DEBUG(3,("Cache digest length=%lld\n", (long long)r->in.maxcnt));
5498 dump_data (3, r->in.digest.digest, sizeof(r->in.digest.digest));
5501 c_req = smbcli_ndr_request_ntioctl_send(private->tree,
5502 ntvfs,
5503 &ndr_table_rpcproxy,
5504 NDR_PROXY_READ, r);
5505 if (! c_req) return NULL;
5507 { void* req=NULL;
5508 ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_read_rpc, NULL);
5511 return c_req;
5512 } else {
5513 return smb_raw_read_send(private->tree, io);
5517 NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs,
5518 union smb_read *io,
5519 struct proxy_file *f)
5521 struct proxy_private *proxy = ntvfs->private_data;
5522 struct smbcli_tree *tree=proxy->tree;
5524 if (PROXY_REMOTE_SERVER(proxy)) {
5525 struct smbcli_request *c_req = proxy_smb_raw_read_send(ntvfs, io, f, NULL);
5526 return sync_chain_handler(c_req);
5527 } else {
5528 struct smbcli_request *c_req = smb_raw_read_send(tree, io);
5529 return smb_raw_read_recv(c_req, io);
5535 initialise the PROXY->PROXY backend, registering ourselves with the ntvfs subsystem
5537 NTSTATUS ntvfs_proxy_init(void)
5539 NTSTATUS ret;
5540 struct ntvfs_ops ops;
5541 NTVFS_CURRENT_CRITICAL_SIZES(vers);
5543 ZERO_STRUCT(ops);
5545 /* fill in the name and type */
5546 ops.name = "proxy";
5547 ops.type = NTVFS_DISK;
5549 /* fill in all the operations */
5550 ops.connect = proxy_connect;
5551 ops.disconnect = proxy_disconnect;
5552 ops.unlink = proxy_unlink;
5553 ops.chkpath = proxy_chkpath;
5554 ops.qpathinfo = proxy_qpathinfo;
5555 ops.setpathinfo = proxy_setpathinfo;
5556 ops.open = proxy_open;
5557 ops.mkdir = proxy_mkdir;
5558 ops.rmdir = proxy_rmdir;
5559 ops.rename = proxy_rename;
5560 ops.copy = proxy_copy;
5561 ops.ioctl = proxy_ioctl;
5562 ops.read = proxy_read;
5563 ops.write = proxy_write;
5564 ops.seek = proxy_seek;
5565 ops.flush = proxy_flush;
5566 ops.close = proxy_close;
5567 ops.exit = proxy_exit;
5568 ops.lock = proxy_lock;
5569 ops.setfileinfo = proxy_setfileinfo;
5570 ops.qfileinfo = proxy_qfileinfo;
5571 ops.fsinfo = proxy_fsinfo;
5572 ops.lpq = proxy_lpq;
5573 ops.search_first = proxy_search_first;
5574 ops.search_next = proxy_search_next;
5575 ops.search_close = proxy_search_close;
5576 ops.trans = proxy_trans;
5577 ops.logoff = proxy_logoff;
5578 ops.async_setup = proxy_async_setup;
5579 ops.cancel = proxy_cancel;
5580 ops.notify = proxy_notify;
5581 ops.trans2 = proxy_trans2;
5583 /* register ourselves with the NTVFS subsystem. We register
5584 under the name 'proxy'. */
5585 ret = ntvfs_register(&ops, &vers);
5587 if (!NT_STATUS_IS_OK(ret)) {
5588 DEBUG(0,("Failed to register PROXY backend!\n"));
5591 return ret;