From 73856f161e1c80d074b04308cf030735491232d6 Mon Sep 17 00:00:00 2001 From: Amin Azez Date: Thu, 31 Jan 2008 11:13:51 +0000 Subject: [PATCH] Main Proxy/Caching handlers in vfs_proxy These handlers are currently corrupting memory and I'm trying to find out how. It includes a new async request handler that can chain handlers, to reverse the packaing done when rpc structs are packed to NDR and then sent over ntioctl --- source/ntvfs/proxy/vfs_proxy.c | 918 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 879 insertions(+), 39 deletions(-) diff --git a/source/ntvfs/proxy/vfs_proxy.c b/source/ntvfs/proxy/vfs_proxy.c index b71a3f90f0c..c142bc16244 100644 --- a/source/ntvfs/proxy/vfs_proxy.c +++ b/source/ntvfs/proxy/vfs_proxy.c @@ -35,7 +35,14 @@ #include "param/param.h" #include "libcli/resolve/resolve.h" #include "libcli/libcli.h" +#include "libcli/raw/ioctl.h" +#include "librpc/gen_ndr/ndr_proxy.h" +#include "librpc/ndr/ndr_table.h" #include "lib/cache/cache.h" +#include "lib/compress/compress.h" + +#define dump_data(a,b,c) +#define talloc_check_type(ptr, type) (type *)( (ptr && (! talloc_check_name(ptr, #type)))?(printf("************************* Bad type: %s %s",__location__, #type),exit(1),NULL):talloc_check_name(ptr, #type)) struct proxy_file { struct proxy_file *prev, *next; @@ -66,6 +73,21 @@ struct async_info { void *parms; }; +/* used to chain async callbacks */ +struct async_info_map { + struct async_info_map *next, *prev; + NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS); + void *parms1; + void *parms2; + struct async_info *async; +}; + +/* a structure used to pass information to an async handler */ +struct async_rpclite_send { + const struct ndr_interface_call* call; + void* struct_ptr; +}; + #define SETUP_PID private->tree->session->pid = req->smbpid #define SETUP_FILE_HERE(f) do { \ @@ -84,22 +106,49 @@ struct async_info { SETUP_FILE; \ } while (0) -#define PROXY_SERVER "proxy:server" -#define PROXY_USER "proxy:user" -#define PROXY_PASSWORD "proxy:password" -#define PROXY_DOMAIN "proxy:domain" -#define PROXY_SHARE "proxy:share" +#define PROXY_SERVER "proxy:server" +#define PROXY_USER "proxy:user" +#define PROXY_PASSWORD "proxy:password" +#define PROXY_DOMAIN "proxy:domain" +#define PROXY_SHARE "proxy:share" #define PROXY_USE_MACHINE_ACCT "proxy:use-machine-account" -#define PROXY_MAP_GENERIC "proxy:map-generic" +#define PROXY_MAP_GENERIC "proxy:map-generic" #define PROXY_MAP_TRANS2 "proxy:map-trans2" -#define PROXY_CACHE_ENABLED "proxy:cache-enabled" -#define PROXY_CACHE_ENABLED_DEFAULT false +#define PROXY_CACHE_ENABLED "proxy:cache-enabled" +#define PROXY_CACHE_ENABLED_DEFAULT false #define PROXY_USE_MACHINE_ACCT_DEFAULT false -#define PROXY_MAP_GENERIC_DEFAULT false +/* These two really should be: true, and possibly not even configurable */ +#define PROXY_MAP_GENERIC_DEFAULT true #define PROXY_MAP_TRANS2_DEFAULT true +/* is the remote server a proxy? */ +#define PROXY_REMOTE_SERVER(private) \ + ((private)->tree->session->transport->negotiate.capabilities & CAP_COMPRESSION) + +/* A few forward declarations */ +static void async_chain_handler(struct smbcli_request *c_req); +static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs, + struct ntvfs_request *req, union smb_ioctl *io); + +struct smbcli_request *smbcli_ndr_request_ntioctl_send( + struct smbcli_tree *tree, struct ntvfs_request *req, + uint16_t fnum, const struct ndr_interface_table *table, + uint32_t opnum, void *r, union smb_ioctl* io); +struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs, + struct ntvfs_request *req, union smb_read *io, + struct proxy_file *f); +NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs, + struct ntvfs_request *req, union smb_read *io, + struct proxy_file *f); +struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs, + struct ntvfs_request *req, union smb_write *io, + struct proxy_file *f); +NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs, + struct ntvfs_request *req, union smb_write *io, + struct proxy_file *f); + /* a handler for oplock break events from the server - these need to be passed along to the client @@ -291,13 +340,20 @@ static void async_simple(struct smbcli_request *c_req) req->async_states->send_fn(req); } +/* hopefully this will optimize away */ +#define TYPE_CHECK(type,check) do { \ + type=check; \ + t=t; \ +} while (0) + /* save some typing for the simple functions */ -#define ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, file) do { \ - if (!c_req) return NT_STATUS_UNSUCCESSFUL; \ +#define ASYNC_RECV_TAIL_F_ORPHAN(io, async_fn, file, error) do { \ + if (!c_req) return (error); \ + TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \ { \ struct async_info *async; \ async = talloc(req, struct async_info); \ - if (!async) return NT_STATUS_NO_MEMORY; \ + if (!async) return (error); \ async->parms = io; \ async->req = req; \ async->f = file; \ @@ -312,6 +368,7 @@ static void async_simple(struct smbcli_request *c_req) #define ASYNC_RECV_TAIL_F(io, async_fn, file) do { \ if (!c_req) return NT_STATUS_UNSUCCESSFUL; \ + TYPE_CHECK(void (*t)(struct smbcli_request *),async_fn); \ { \ struct async_info *async; \ async = talloc(req, struct async_info); \ @@ -334,6 +391,41 @@ static void async_simple(struct smbcli_request *c_req) #define SIMPLE_ASYNC_TAIL ASYNC_RECV_TAIL(NULL, async_simple) +/* managers for chained async-callback. + The model of async handlers has changed. + backend async functions should be of the form: + NTSTATUS (*fn)(struct async_info*, void*, void*, NTSTATUS); + And if async->c_req is NULL then an earlier chain has already rec'd the + request. + ADD_ASYNC_RECV_TAIL is used to add chained handlers. + The chained handler manager async_chain_handler is installed the usual way + and uses the io pointer to point to the first async_map record + static void async_chain_handler(struct smbcli_request *c_req). + It is safe to call ADD_ASYNC_RECV_TAIL before the chain manager is installed + and often desirable. + */ +#define ASYNC_RECV_TAIL_HANDLER(f) ASYNC_RECV_TAIL_F(c_req->async.private, async_chain_handler, f); + +#define ADD_ASYNC_RECV_TAIL(c_req, io1, io2, file, async_fn, error) do { \ + if (! c_req) return (error); \ + TYPE_CHECK(NTSTATUS (*t)(struct async_info*, void*, void*, NTSTATUS status), async_fn); \ + { \ + struct async_info_map *async_map=talloc(c_req->async.private, struct async_info_map); \ + if (! async_map) return (error); \ + async_map->async=talloc(async_map, struct async_info); \ + if (! async_map->async) return (error); \ + async_map->parms1=io1; \ + async_map->parms2=io2; \ + async_map->fn=async_fn; \ + async_map->async->parms = io1; \ + async_map->async->req = req; \ + async_map->async->f = file; \ + async_map->async->proxy = private; \ + async_map->async->c_req = c_req; \ + DLIST_ADD_END(c_req->async.private, async_map, struct async_info_map *); \ + } \ +} while(0) + /* delete a file - the dirtype specifies the file types to include in the search. The name can contain PROXY wildcards, but rarely does (except with OS/2 clients) @@ -378,6 +470,11 @@ static NTSTATUS proxy_ioctl(struct ntvfs_module_context *ntvfs, struct proxy_private *private = ntvfs->private_data; struct smbcli_request *c_req; + if (io->ntioctl.level == RAW_IOCTL_NTIOCTL + && io->ntioctl.in.function == FSCTL_UFOPROXY_RPCLITE) { + return proxy_rpclite(ntvfs, req, io); + } + SETUP_PID_AND_FILE; /* see if the front end will allow us to perform this @@ -650,13 +747,29 @@ static NTSTATUS proxy_copy(struct ntvfs_module_context *ntvfs, /* a handler for async read replies */ -static void async_read(struct smbcli_request *c_req) +//static void async_read(struct smbcli_request *c_req) +NTSTATUS async_read(struct async_info *async, void* io1, void* io2, NTSTATUS status) { - struct async_info *async = c_req->async.private; - struct ntvfs_request *req = async->req; - req->async_states->status = smb_raw_read_recv(c_req, async->parms); + struct smbcli_request *c_req = async->c_req; + struct proxy_file *f = async->f; + union smb_read *io = async->parms; + + /* if request is not already received by a chained handler, read it */ + if (c_req) status=smb_raw_read_recv(c_req, async->parms); + + DEBUG(5,("%s Save read in cache\n",__FUNCTION__)); + NT_STATUS_NOT_OK_RETURN(status); + + cache_handle_save(f, io->generic.out.data, + io->generic.out.nread, + io->generic.in.offset); + + /* optional */ talloc_free(async); - req->async_states->send_fn(req); + /* forbidden, the chain manager will do it */ + //req->async_states->send_fn(req); + + return status; } /* @@ -667,6 +780,8 @@ static NTSTATUS proxy_read(struct ntvfs_module_context *ntvfs, { struct proxy_private *private = ntvfs->private_data; struct smbcli_request *c_req; + struct proxy_file *f; + declare_checksum(checksum); SETUP_PID; @@ -675,15 +790,63 @@ static NTSTATUS proxy_read(struct ntvfs_module_context *ntvfs, return ntvfs_map_read(ntvfs, req, io); } - SETUP_FILE; + SETUP_FILE_HERE(f); - if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) { - return smb_raw_read(private->tree, io); + /* attempt to read from cache */ + /* if nread becomes non-zero then we have cache to validate */ + io->generic.out.nread=0; + + if (private->cache_enabled) { + ssize_t valid=0; + NTSTATUS status=cache_smb_raw_read(f->cache, ntvfs, req, io, &valid); + + if (NT_STATUS_IS_OK(status)) { + /* shrink the read to what has been validated */ + if (io->generic.out.nread > valid && valid > 0) { + io->generic.out.nread=valid; +#warning if we reduce a read count, should we increase out.remaining? + /* Should we increase io->readx.out.remaining too? */ + /* does it matter if nread is less than mincnt? */ + } + /* Can (shrunk if needed) cache read be trusted? */ + if (valid > 0) { + DEBUG(1,("Read from cache s=%d offset=%d\n", + (int)(io->generic.in.offset), + (int)(io->generic.out.nread)) ); + return status; + } + /* We read from invalid cache but we can prepare to issue a + * slightly optimized read. */ + if (PROXY_REMOTE_SERVER(private) && io->readx.out.nread > 0 && valid==0) { + checksum_block(checksum, io->generic.out.data, io->generic.out.nread); + } + } } - c_req = smb_raw_read_send(private->tree, io); + if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) { + NTSTATUS status; - ASYNC_RECV_TAIL(io, async_read); + if (PROXY_REMOTE_SERVER(private)) { + status=proxy_smb_raw_read(ntvfs, req, io, f); + } else { + status=smb_raw_read(private->tree, io); + } + /* save in cache */ + cache_handle_save(f, io->generic.out.data, + io->generic.out.nread, + io->generic.in.offset); + return status; + } + + if (PROXY_REMOTE_SERVER(private)) { + c_req = proxy_smb_raw_read_send(ntvfs, req, io, f); + } else { + c_req = smb_raw_read_send(private->tree, io); + DEBUG(0,(">>> NO CAP_COMPRESSION\n")); + } + ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_read, NT_STATUS_INTERNAL_ERROR); + /* install chain handler */ + ASYNC_RECV_TAIL_HANDLER(f); } /* @@ -764,19 +927,29 @@ failure: DEBUG(3,("Async write refragment failed with: %s\n", get_nt_error_c_code(req->async_states->status))); req->async_states->send_fn(req); - return; + return; } /* a handler for async write replies */ -static void async_write(struct smbcli_request *c_req) +//static void async_write(struct smbcli_request *c_req) +NTSTATUS async_write(struct async_info *async, void* io1, void* io2, NTSTATUS status) { - struct async_info *async = c_req->async.private; + struct smbcli_request *c_req = async->c_req; struct ntvfs_request *req = async->req; - req->async_states->status = smb_raw_write_recv(c_req, async->parms); + struct proxy_file *f=async->f; + union smb_write *io=async->parms; + + if (req) + req->async_states->status = smb_raw_write_recv(c_req, async->parms); + + cache_handle_save(f, io->generic.in.data, + io->generic.out.nwritten, + io->generic.in.offset); + talloc_free(async); - req->async_states->send_fn(req); + return NT_STATUS_OK; } /* @@ -787,6 +960,7 @@ static NTSTATUS proxy_write(struct ntvfs_module_context *ntvfs, { struct proxy_private *private = ntvfs->private_data; struct smbcli_request *c_req; + struct proxy_file *f; SETUP_PID; @@ -794,12 +968,18 @@ static NTSTATUS proxy_write(struct ntvfs_module_context *ntvfs, private->map_generic) { return ntvfs_map_write(ntvfs, req, io); } - SETUP_FILE; + SETUP_FILE_HERE(f); if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) { - if (io->generic.in.count > + NTSTATUS status; + if (0 && PROXY_REMOTE_SERVER(private)) { + /* Do a proxy write */ + status=proxy_smb_raw_write(ntvfs, req, io, f); + } else if (io->generic.in.count > private->tree->session->transport->negotiate.max_xmit) { + /* smbcli_write can deal with large writes, which are bigger than + tree->session->transport->negotiate.max_xmit */ ssize_t size=smbcli_write(private->tree, io->generic.in.file.fnum, io->generic.in.wmode, @@ -809,16 +989,33 @@ static NTSTATUS proxy_write(struct ntvfs_module_context *ntvfs, if (size==io->generic.in.count || size > 0) { io->generic.out.nwritten=size; - return NT_STATUS_OK; + status=NT_STATUS_OK; + } else { + status=NT_STATUS_UNSUCCESSFUL; } + } else { + status=smb_raw_write(private->tree, io); + } - return NT_STATUS_UNSUCCESSFUL; + /* Save write in cache */ + if (NT_STATUS_IS_OK(status)) { + cache_handle_save(f, io->generic.in.data, + io->generic.out.nwritten, + io->generic.in.offset); } - return smb_raw_write(private->tree, io); + + return status; } - - if (io->generic.in.count > + + /* smb_raw_write_send can't deal with large writes, which are bigger than + tree->session->transport->negotiate.max_xmit so we have to break it up + trying to preserve the async nature of the call as much as possible */ + if (0 && PROXY_REMOTE_SERVER(private)) { + c_req = proxy_smb_raw_write_send(ntvfs, req, io, f); + } else if (io->generic.in.count <= private->tree->session->transport->negotiate.max_xmit) { + c_req = smb_raw_write_send(private->tree, io); + } else { ssize_t remaining = io->generic.in.count; int block = (private->tree->session->transport->negotiate.max_xmit - (MIN_SMB_SIZE+32)); int done = 0; @@ -857,19 +1054,19 @@ static NTSTATUS proxy_write(struct ntvfs_module_context *ntvfs, } DLIST_ADD(fragments->fragments, fragment); - ASYNC_RECV_TAIL_F_ORPHAN(fragment, async_write_fragment, NULL); + ASYNC_RECV_TAIL_F_ORPHAN(fragment, async_write_fragment, NULL, NT_STATUS_UNSUCCESSFUL); remaining -= size; done += size; } while(remaining > 0); - + + /* this strategy has no callback chain, return directly */ req->async_states->state |= NTVFS_ASYNC_STATE_ASYNC; return NT_STATUS_OK; } - c_req = smb_raw_write_send(private->tree, io); - - ASYNC_RECV_TAIL(io, async_write); + ADD_ASYNC_RECV_TAIL(c_req, io, NULL, f, async_write, NT_STATUS_INTERNAL_ERROR); + ASYNC_RECV_TAIL_HANDLER(f); } /* @@ -1182,6 +1379,7 @@ static NTSTATUS proxy_trans2(struct ntvfs_module_context *ntvfs, } SETUP_PID; +#warning we should be mapping file handles here if (!(req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) { return smb_raw_trans2(private->tree, req, trans2); @@ -1250,6 +1448,648 @@ static NTSTATUS proxy_notify(struct ntvfs_module_context *ntvfs, } /* + * A hander for converting from rpc struct replies to ntioctl + */ +static NTSTATUS proxy_rpclite_map_async_send( + struct ntvfs_module_context *ntvfs, + struct ntvfs_request *req, + void *io1, void *io2, NTSTATUS status) +{ + union smb_ioctl* io=talloc_check_type(io1, union smb_ioctl); + struct async_rpclite_send *rpclite_send=talloc_check_type(io2, struct async_rpclite_send); + void* r=rpclite_send->struct_ptr; + struct ndr_push* push; + const struct ndr_interface_call* call=rpclite_send->call; + enum ndr_err_code ndr_err; + + talloc_free(rpclite_send); + + push = ndr_push_init_ctx(req, lp_iconv_convenience(ntvfs->ctx->lp_ctx)); + NT_STATUS_HAVE_NO_MEMORY(push); + + if (0) { + push->flags |= LIBNDR_FLAG_BIGENDIAN; + } + + ndr_err = call->ndr_push(push, NDR_OUT, r); + status=ndr_map_error2ntstatus(ndr_err); + + if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) { + DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n", + nt_errstr(status))); + return status; + } + + io->ntioctl.out.blob=ndr_push_blob(push); + return status; +} + +/* + * A handler for sending async rpclite Read replies + */ +static NTSTATUS rpclite_proxy_Read_map_async_send( + struct ntvfs_module_context *ntvfs, + struct ntvfs_request *req, + void *io1, void *io2, NTSTATUS status) +{ + struct proxy_Read* r=talloc_check_type(io1, struct proxy_Read); + union smb_read* io=talloc_check_type(io2, union smb_read); + + /* status here is a result of proxy_read, it doesn't mean the rpc + call failed, just the read operation */ + r->out.result=status; + + if (! NT_STATUS_IS_OK(status)) { + /* can we use result as a discriminator in IDL ? */ + r->out.nread=0; + } else { + ssize_t size=io->readx.out.nread; + r->out.flags=0; + r->out.nread=io->readx.out.nread; + + if (r->in.flags & PROXY_USE_CACHE && io->readx.out.nread>0) { + declare_checksum(digest); + checksum_block(digest, io->readx.out.data, io->readx.out.nread); + if (memcmp(digest, r->in.digest.digest, sizeof(digest))==0) { + r->out.flags=PROXY_USE_CACHE; + DEBUG(3,("%s: Use cached data\n",__FUNCTION__)); + } + } else if (r->in.flags & PROXY_USE_ZLIB && + compress_block(io->readx.out.data, &size) ) { + r->out.flags|=PROXY_USE_ZLIB; + r->out.response.compress.count=size; + r->out.response.compress.data=io->readx.out.data; + DEBUG(3,("%s: Compressed from %d to %d = %d%%\n", + __FUNCTION__,r->out.nread,size,size*100/r->out.nread)); + } else { + DEBUG(3,("%s: Compressed not worthwhile\n", __FUNCTION__)); + r->out.response.generic.count=io->readx.out.nread; + r->out.response.generic.data=io->readx.out.data; + } + } + + /* Or should we return NT_STATUS_OK ?*/ + DEBUG(5,("Finish %s status %s\n",__FUNCTION__,get_friendly_nt_error_msg(status))); + + return NT_STATUS_OK; +} + +/* + * RPC implementation of Read + */ +static NTSTATUS rpclite_proxy_Read(struct ntvfs_module_context *ntvfs, + struct ntvfs_request *req, struct proxy_Read *r, + union smb_handle file) +{ + struct proxy_private *private = ntvfs->private_data; + union smb_read* io=talloc(req, union smb_read); + NTSTATUS status; + + SETUP_PID; + + DEBUG(0,("Opnum: proxy_Read min=%d max=%d offset=%lld, fnum=%d\n", + r->in.mincnt, r->in.maxcnt, r->in.offset, r->in.fnum)); + + /* prepare for response */ + r->out.response.generic.data=talloc_array(io, uint8_t, r->in.maxcnt); + if (! r->out.response.generic.data) { + /* I don't feel like returning NO_MEMORY as the rpc result if we + can't setup a valid rpc struct */ + return NT_STATUS_NO_MEMORY; + } + + /* pack up an smb_read request and dispatch here */ + io->readx.level=RAW_READ_READX; + io->readx.in.file=file; + io->readx.in.mincnt=r->in.mincnt; + io->readx.in.maxcnt=r->in.maxcnt; + io->readx.in.offset=r->in.offset; + /* and something to hold the answer */ + io->readx.out.data=r->out.response.generic.data; + + /* so we get to pack the io->*.out response */ + status=ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Read_map_async_send); + NT_STATUS_NOT_OK_RETURN(status); + + /* so the read will get processed normally */ + r->out.result=proxy_read(ntvfs, req, io); + /* but the rpclite layer was successful */ + return ntvfs_map_async_finish(req, NT_STATUS_OK); +} + +/* + * A handler for sending async rpclite Write replies + */ +static NTSTATUS rpclite_proxy_Write_map_async_send( + struct ntvfs_module_context *ntvfs, + struct ntvfs_request *req, + void *io1, void *io2, NTSTATUS status) +{ + struct proxy_Write* r=talloc_check_type(io1, struct proxy_Write); + union smb_write* io=talloc_check_type(io2, union smb_write); + + r->out.result=status; + + if (NT_STATUS_IS_OK(status)) { + r->out.nwritten=io->writex.out.nwritten; + r->out.remaining=io->writex.out.remaining; + r->out.result=status; + } + + /* the rpc transport succeeded even if the operation did not */ + return NT_STATUS_OK; +} + +/* + * RPC implementation of write + */ +static NTSTATUS rpclite_proxy_Write(struct ntvfs_module_context *ntvfs, + struct ntvfs_request *req, struct proxy_Write *r, + union smb_handle file) +{ + struct proxy_private *private = ntvfs->private_data; + union smb_write* io=talloc(req, union smb_write); + NTSTATUS status; + + SETUP_PID; + + DEBUG(0,("Opnum: proxy_Write count=%d offset=%lld, fnum=%d\n", + r->in.count, r->in.offset, r->in.fnum)); + + /* pack up an smb_write request and dispatch here */ + io->writex.level=RAW_WRITE_WRITEX; + io->writex.in.file=file; + io->writex.in.offset=r->in.offset; + io->writex.in.wmode=r->in.mode; + io->writex.in.count=r->in.count; + + /* and the data */ + if (PROXY_USE_ZLIB & r->in.flags) { + ssize_t count=r->in.data.generic.count; + io->writex.in.data=uncompress_block_talloc(io, r->in.data.compress.data, + &count, r->in.count); + if (count != r->in.count || !io->writex.in.data) { + /* Didn't uncompress properly, but the RPC layer worked */ + r->out.result=NT_STATUS_BAD_COMPRESSION_BUFFER; + return NT_STATUS_OK; + } + } else { + io->writex.in.data=r->in.data.generic.data; + } + + /* so we get to pack the io->*.out response */ + status=ntvfs_map_async_setup(ntvfs, req, r, io, rpclite_proxy_Write_map_async_send); + NT_STATUS_NOT_OK_RETURN(status); + + /* so the read will get processed normally */ + r->out.result=proxy_write(ntvfs, req, io); + /* But the rpc transport worked */ + return ntvfs_map_async_finish(req, NT_STATUS_OK); +} + +/* unmarshall ntioctl and rpc-dispatch, but push async map handler to convert + back from rpc struct to ntioctl */ +static NTSTATUS proxy_rpclite(struct ntvfs_module_context *ntvfs, + struct ntvfs_request *req, union smb_ioctl *io) +{ + struct proxy_private *private = ntvfs->private_data; + DATA_BLOB *request; + struct ndr_syntax_id* syntax_id; + uint32_t opnum; + const struct ndr_interface_table *table; + struct ndr_pull* pull; + void* r; + NTSTATUS status; + struct async_rpclite_send *rpclite_send; + enum ndr_err_code ndr_err; + + SETUP_PID; + + /* unpack the NDR */ + request=&io->ntioctl.in.blob; + + pull = ndr_pull_init_blob(request, req, lp_iconv_convenience(ntvfs->ctx->lp_ctx)); + NT_STATUS_HAVE_NO_MEMORY(pull); + /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */ + + /* the blob is 4-aligned because it was memcpy'd */ + syntax_id=talloc_zero(pull, struct ndr_syntax_id); + NT_STATUS_HAVE_NO_MEMORY(syntax_id); + + ndr_err=ndr_pull_ndr_syntax_id(pull, NDR_SCALARS, syntax_id); + status=ndr_map_error2ntstatus(ndr_err); + if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) { + DEBUG(2,("Can't read syntax-id: %s\n",nt_errstr(status))); + return status; + } + + /* now find the struct ndr_interface_table * for this syntax_id */ + table=ndr_table_by_uuid(&syntax_id->uuid); + if (! table) { + DEBUG(2,("Can't find table for uuid\n")); + return NT_STATUS_NO_GUID_TRANSLATION; + } + + ndr_err=ndr_pull_uint32(pull, NDR_SCALARS, &opnum); + status=ndr_map_error2ntstatus(ndr_err); + if (! NDR_ERR_CODE_IS_SUCCESS(ndr_err)) { + DEBUG(2,("Can't read op-num: %s\n",nt_errstr(status))); + return status; + } + + DEBUG(10,("rpc reply data:\n")); + dump_data(10, pull->data, pull->data_size); + + r = talloc_named(req, table->calls[opnum].struct_size, "struct %s", + table->calls[opnum].name); + NT_STATUS_HAVE_NO_MEMORY(r); + + ndr_err=table->calls[opnum].ndr_pull(pull, NDR_IN, r); + status=ndr_map_error2ntstatus(ndr_err); + NT_STATUS_NOT_OK_RETURN(status); + + rpclite_send=talloc(req, struct async_rpclite_send); + NT_STATUS_HAVE_NO_MEMORY(rpclite_send); + rpclite_send->call=&table->calls[opnum]; + rpclite_send->struct_ptr=r; + /* need to push conversion function to convert from r to io */ + status=ntvfs_map_async_setup(ntvfs, req, io, rpclite_send, proxy_rpclite_map_async_send); + + /* Magically despatch the call based on syntax_id, table and opnum. + But there is no table of handlers.... so until then*/ + if (0==strcasecmp(table->name,"rpcproxy")) { + switch(opnum) { + case(NDR_PROXY_READ): + status=rpclite_proxy_Read(ntvfs, req, r, io->generic.in.file); + break; + case(NDR_PROXY_WRITE): + status=rpclite_proxy_Write(ntvfs, req, r, io->generic.in.file); + break; + default: + DEBUG(3,("Can't despatch %s:%d\n",table->name, opnum)); + return NT_STATUS_PROCEDURE_NOT_FOUND; + } + } else { + DEBUG(3,("Can't despatch %s:%d\n",table->name, opnum)); + return NT_STATUS_NO_GUID_TRANSLATION; + } + + /* status is the status of the rpc layer. If it is NT_STATUS_OK then + the handler status is in r->out.result */ + return ntvfs_map_async_finish(req, status); +} + + +/* + send an ntioctl request based on a NDR encoding. + */ +struct smbcli_request *smbcli_ndr_request_ntioctl_send( + struct smbcli_tree *tree, + struct ntvfs_request *req, + uint16_t fnum, + const struct ndr_interface_table *table, + uint32_t opnum, + void *r, + union smb_ioctl* io) +{ + struct smbcli_request * c_req; + struct ndr_push *push; + NTSTATUS status; + DATA_BLOB request; + enum ndr_err_code ndr_err; + + /* setup for a ndr_push_* call */ + push = ndr_push_init_ctx(req, lp_iconv_convenience(req->ctx->lp_ctx)); + if (!push) return NULL; + + /* first push interface table identifiers */ + ndr_err=ndr_push_ndr_syntax_id(push, NDR_SCALARS, &table->syntax_id); + status=ndr_map_error2ntstatus(ndr_err); + + if (! NT_STATUS_IS_OK(status)) return NULL; + + ndr_err=ndr_push_uint32(push, NDR_SCALARS, opnum); + status=ndr_map_error2ntstatus(ndr_err); + if (! NT_STATUS_IS_OK(status)) return NULL; + + if (0) { + push->flags |= LIBNDR_FLAG_BIGENDIAN; + } + + /* push the structure into a blob */ + ndr_err = table->calls[opnum].ndr_push(push, NDR_IN, r); + status=ndr_map_error2ntstatus(ndr_err); + if (!NT_STATUS_IS_OK(status)) { + DEBUG(2,("Unable to ndr_push structure in dcerpc_ndr_request_send - %s\n", + nt_errstr(status))); + return NULL; + } + + /* retrieve the blob */ + request = ndr_push_blob(push); + + io->ntioctl.level=RAW_IOCTL_NTIOCTL; + io->ntioctl.in.function=FSCTL_UFOPROXY_RPCLITE; + io->ntioctl.in.file.fnum=fnum; + io->ntioctl.in.fsctl=false; + io->ntioctl.in.filter=0; + io->ntioctl.in.max_data=65536; + io->ntioctl.in.blob=request; + + DEBUG(0,("smbcli_request packet:\n")); + dump_data(0, request.data, request.length); + + /* we can free push without freeing smbcli_request */ + talloc_free(push); + + c_req = smb_raw_ioctl_send(tree, io); + + if (! c_req) { + return NULL; + } + + dump_data(0, c_req->out.data, c_req->out.data_size); + return c_req; +} + +/* unpack the ntioctl to make some rpc_struct */ +NTSTATUS ntioctl_rpc_unmap(struct async_info *async, void* io1, void* io2, NTSTATUS status) +{ + struct ntvfs_module_context *ntvfs = async->proxy->ntvfs; + struct smbcli_request *c_req = async->c_req; + struct ntvfs_request *req = async->req; + struct proxy_Read* r=talloc_check_type(io1, struct proxy_Read); + union smb_ioctl* io =talloc_check_type(io2, union smb_ioctl); + const struct ndr_interface_call *calls=(void*)async->f; + enum ndr_err_code ndr_err; + DATA_BLOB *response; + struct ndr_pull* pull; + + DEBUG(5,("%s convert from ntioctl to rpc for %s\n",__FUNCTION__, calls->name)); + NT_STATUS_NOT_OK_RETURN(status); + + if (c_req) + status = smb_raw_ioctl_recv(c_req, req, io); + + /* This status is the ntioctl status, maybe we should */ + if (! NT_STATUS_IS_OK(status)) { + DEBUG(3,("RPC over ntioctl failed: %s for %s\n",__FUNCTION__, calls->name)); + return NT_STATUS_UNSUCCESSFUL; + } + + dump_data(0, io->ntioctl.out.blob.data, io->ntioctl.out.blob.length); + + response=&io->ntioctl.out.blob; + pull = ndr_pull_init_blob(response, req, lp_iconv_convenience(ntvfs->ctx->lp_ctx)); + /* set pull->flags; LIBNDR_FLAG_PAD_CHECK, LIBNDR_FLAG_REF_ALLOC */ + + NT_STATUS_HAVE_NO_MEMORY(pull); + + ndr_err=calls->ndr_pull(pull, NDR_OUT, r); + status=ndr_map_error2ntstatus(ndr_err); + + return status; +} + +/* + client helpers, mapping between proxy RPC calls and smbcli_* calls. + */ +NTSTATUS sync_chain_handler(struct smbcli_request *c_req) +{ + struct async_info *async; + /* the first callback which will actually receive the c_req response */ + struct async_info_map *async_map; + NTSTATUS status=NT_STATUS_OK; + + if (! c_req) return NT_STATUS_UNSUCCESSFUL; + + async = talloc_check_type(c_req->async.private, struct async_info); + async_map = async->parms; + + /* Should we raise an error? Should we simple_recv? */ + while(async_map) { + if (async_map->fn) { + status=async_map->fn(async_map->async, + async_map->parms1, async_map->parms2, status); + } + + /* no need to free each async_map, they were talloc'd against async */ + async_map=async_map->next; + /* The first callback will have read c_req */ + if (async_map && async_map->async) async_map->async->c_req=NULL; + } + + talloc_free(async); + + return status; +} + +static void async_chain_handler(struct smbcli_request *c_req) +{ + struct async_info *async = c_req->async.private; + struct ntvfs_request *req = async->req; + NTSTATUS status=sync_chain_handler(c_req); + + if (req) { + req->async_states->status=status; + req->async_states->send_fn(req); + } +} + +/* unpack the rpc struct to make some smb_write */ +NTSTATUS async_proxy_smb_raw_write_rpc(struct async_info *async, + void* io1, void* io2, NTSTATUS status) +{ + union smb_write* io =talloc_get_type(io1, union smb_write); + struct proxy_Write* r=talloc_get_type(io2, struct proxy_Write); + + DEBUG(5,("%s convert from rpc to smb",__FUNCTION__)); + NT_STATUS_NOT_OK_RETURN(status); + +// NT_STATUS_NOT_OK_RETURN(r->out.result); + + io->generic.out.remaining = r->out.remaining; + io->generic.out.nwritten = r->out.nwritten; + + return status; +} + +/* upgrade from smb to NDR and then send. + The caller should ADD_ASYNC_RECV_TAIL the handler that tries to receive the response*/ +struct smbcli_request *proxy_smb_raw_write_send(struct ntvfs_module_context *ntvfs, + struct ntvfs_request *req, + union smb_write *io, + struct proxy_file *f) +{ + struct proxy_private *private = ntvfs->private_data; + struct smbcli_tree *tree=private->tree; + + if (PROXY_REMOTE_SERVER(private)) { + const struct ndr_interface_call *calls=&ndr_table_rpcproxy.calls[NDR_PROXY_WRITE]; + struct smbcli_request *c_req; + struct proxy_Write *r=talloc(io, struct proxy_Write); + union smb_ioctl *ntio=talloc(io, union smb_ioctl); + ssize_t size; + + if (! r) return NULL; + if (! ntio) return NULL; + + size=io->generic.in.count; + /* upgrade the write */ + r->in.fnum = io->generic.in.file.fnum; + r->in.offset = io->generic.in.offset; + r->in.count = io->generic.in.count; + r->in.mode = io->generic.in.wmode; +// r->in.remaining = io->generic.in.remaining; + + /* try to compress */ + r->in.data.compress.data=compress_block_talloc(r, io->generic.in.data, &size); + if (r->in.data.compress.data) { + r->in.data.compress.count=size; + r->in.flags = PROXY_USE_ZLIB; + } else { + r->in.flags = 0; + /* we'll honour const, honest gov */ + r->in.data.generic.data=discard_const(io->generic.in.data); + r->in.data.generic.count=io->generic.in.count; + } + + c_req = smbcli_ndr_request_ntioctl_send(private->tree, req, + io->generic.in.file.fnum, + &ndr_table_rpcproxy, + NDR_PROXY_WRITE, r, ntio); + if (! c_req) return NULL; + + /* yeah, filthy abuse of f */ + ADD_ASYNC_RECV_TAIL(c_req, r, ntio, (void*)calls, ntioctl_rpc_unmap, NULL); + ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_write_rpc, NULL); + + return c_req; + } else { + return smb_raw_write_send(tree, io); + } +} + +NTSTATUS proxy_smb_raw_write(struct ntvfs_module_context *ntvfs, + struct ntvfs_request *req, union smb_write *io, + struct proxy_file *f) +{ + struct proxy_private *proxy = ntvfs->private_data; + struct smbcli_tree *tree=proxy->tree; + + if (PROXY_REMOTE_SERVER(proxy)) { + NTSTATUS status; + struct smbcli_request *c_req = + proxy_smb_raw_write_send(ntvfs, req, io, f); + return sync_chain_handler(c_req); + return status; + } else { + struct smbcli_request *c_req = smb_raw_write_send(tree, io); + return smb_raw_write_recv(c_req, io); + } +} + +/* unpack the rpc struct to make some smb_write */ +NTSTATUS async_proxy_smb_raw_read_rpc(struct async_info *async, + void* io1, void* io2, NTSTATUS status) +{ + union smb_read* io =talloc_check_type(io1, union smb_read); + struct proxy_Read* r=talloc_check_type(io2, struct proxy_Read); + + DEBUG(5,("%s convert from rpc to smb",__FUNCTION__)); + NT_STATUS_NOT_OK_RETURN(status); + + if (r->out.flags & PROXY_USE_CACHE) { + /* Use the io we already setup! */ + return status; + } + + io->generic.out.remaining = 0; /*r->out.response.generic.remaining;*/ + io->generic.out.nread = r->out.nread; + io->generic.out.compaction_mode = 0; + + /* we may need to uncompress */ + if (r->out.flags & PROXY_USE_ZLIB) { + ssize_t size=r->out.response.compress.count; + if (! uncompress_block_to(io->generic.out.data, + r->out.response.compress.data, &size, + io->generic.in.maxcnt)) { + io->generic.out.nread=size; + status=NT_STATUS_INVALID_USER_BUFFER; + } + } else if (io->generic.out.data != r->out.response.generic.data) { + //Assert(r->out.nread == r->out.generic.out.count); + memcpy(io->generic.out.data, r->out.response.generic.data, io->generic.out.nread); + } + + return status; +} + +struct smbcli_request *proxy_smb_raw_read_send(struct ntvfs_module_context *ntvfs, + struct ntvfs_request *req, + union smb_read *io, + struct proxy_file *f) +{ + struct proxy_private *private = ntvfs->private_data; + + if (PROXY_REMOTE_SERVER(private)) { + const struct ndr_interface_call *calls=&ndr_table_rpcproxy.calls[NDR_PROXY_READ]; + struct smbcli_request *c_req; + struct proxy_Read *r=talloc(io, struct proxy_Read); + union smb_ioctl *ntio=talloc(io, union smb_ioctl); + + + if (! r) return NULL; + if (! ntio) return NULL; + + r->in.fnum = io->generic.in.file.fnum; + r->in.read_for_execute=io->generic.in.read_for_execute; + r->in.offset = io->generic.in.offset; + r->in.mincnt = io->generic.in.mincnt; + r->in.maxcnt = io->generic.in.maxcnt; + r->in.flags = PROXY_USE_ZLIB; + if (io->generic.out.nread > 0) { + checksum_block(r->in.digest.digest, io->generic.out.data, + io->generic.out.nread); + r->in.mincnt = io->generic.out.nread; + r->in.maxcnt = io->generic.out.nread; + r->in.flags |= PROXY_USE_CACHE; + } + + c_req = smbcli_ndr_request_ntioctl_send(private->tree, req, + io->generic.in.file.fnum, + &ndr_table_rpcproxy, + NDR_PROXY_READ, r, ntio); + if (! c_req) return NULL; + + + ADD_ASYNC_RECV_TAIL(c_req, r, ntio, (void*)calls, ntioctl_rpc_unmap, NULL); + ADD_ASYNC_RECV_TAIL(c_req, io, r, f, async_proxy_smb_raw_read_rpc, NULL); + + return c_req; + } else { + return smb_raw_read_send(private->tree, io); + } +} + +NTSTATUS proxy_smb_raw_read(struct ntvfs_module_context *ntvfs, + struct ntvfs_request *req, union smb_read *io, + struct proxy_file *f) +{ + struct proxy_private *proxy = ntvfs->private_data; + struct smbcli_tree *tree=proxy->tree; + + if (PROXY_REMOTE_SERVER(proxy)) { + struct smbcli_request *c_req = proxy_smb_raw_read_send(ntvfs, req, io, f); + return sync_chain_handler(c_req); + } else { + struct smbcli_request *c_req = smb_raw_read_send(tree, io); + return smb_raw_read_recv(c_req, io); + } +} + + +/* initialise the PROXY->PROXY backend, registering ourselves with the ntvfs subsystem */ NTSTATUS ntvfs_proxy_init(void) -- 2.11.4.GIT