2 * Unix SMB/CIFS implementation.
3 * Support for OneFS system interfaces.
5 * Copyright (C) Tim Prouty, 2008
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 3 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "onefs_config.h"
24 #include "oplock_onefs.h"
26 #include <ifs/ifs_syscalls.h>
27 #include <isi_acl/isi_acl_util.h>
28 #include <sys/isi_acl.h>
31 * Initialize the sm_lock struct before passing it to ifs_createfile.
33 static void smlock_init(connection_struct
*conn
, struct sm_lock
*sml
,
34 bool isexe
, uint32_t access_mask
, uint32_t share_access
,
35 uint32_t create_options
)
37 sml
->sm_type
.doc
= false;
38 sml
->sm_type
.isexe
= isexe
;
39 sml
->sm_type
.statonly
= is_stat_open(access_mask
);
40 sml
->sm_type
.access_mask
= access_mask
;
41 sml
->sm_type
.share_access
= share_access
;
44 * private_options was previously used for DENY_DOS/DENY_FCB checks in
45 * the kernel, but are now properly handled by fcb_or_dos_open. In
46 * these cases, ifs_createfile will return a sharing violation, which
47 * gives fcb_or_dos_open the chance to open a duplicate file handle.
49 sml
->sm_type
.private_options
= 0;
51 /* 1 second delay is handled in onefs_open.c by deferring the open */
52 sml
->sm_timeout
= timeval_set(0, 0);
55 static void smlock_dump(int debuglevel
, const struct sm_lock
*sml
)
58 DEBUG(debuglevel
, ("sml == NULL\n"));
63 ("smlock: doc=%s, isexec=%s, statonly=%s, access_mask=0x%x, "
64 "share_access=0x%x, private_options=0x%x timeout=%d/%d\n",
65 sml
->sm_type
.doc
? "True" : "False",
66 sml
->sm_type
.isexe
? "True" : "False",
67 sml
->sm_type
.statonly
? "True" : "False",
68 sml
->sm_type
.access_mask
,
69 sml
->sm_type
.share_access
,
70 sml
->sm_type
.private_options
,
71 (int)sml
->sm_timeout
.tv_sec
,
72 (int)sml
->sm_timeout
.tv_usec
));
76 * External interface to ifs_createfile
78 int onefs_sys_create_file(connection_struct
*conn
,
82 uint32_t open_access_mask
,
83 uint32_t share_access
,
84 uint32_t create_options
,
89 struct security_descriptor
*sd
,
93 struct sm_lock sml
, *psml
= NULL
;
94 enum oplock_type onefs_oplock
;
95 enum oplock_type onefs_granted_oplock
= OPLOCK_NONE
;
96 struct ifs_security_descriptor ifs_sd
= {}, *pifs_sd
= NULL
;
97 uint32_t sec_info_effective
= 0;
99 uint32_t onefs_dos_attributes
;
100 struct ifs_createfile_flags cf_flags
= CF_FLAGS_NONE
;
101 char *mapped_name
= NULL
;
104 START_PROFILE(syscall_createfile
);
106 /* Translate the name to UNIX before calling ifs_createfile */
107 mapped_name
= talloc_strdup(talloc_tos(), path
);
108 if (mapped_name
== NULL
) {
112 result
= SMB_VFS_TRANSLATE_NAME(conn
, &mapped_name
,
113 vfs_translate_to_unix
);
114 if (!NT_STATUS_IS_OK(result
)) {
118 /* Setup security descriptor and get secinfo. */
121 uint32_t sec_info_sent
= 0;
123 sec_info_sent
= (get_sec_info(sd
) & IFS_SEC_INFO_KNOWN_MASK
);
125 status
= onefs_samba_sd_to_sd(sec_info_sent
, sd
, &ifs_sd
,
126 SNUM(conn
), &sec_info_effective
);
128 if (!NT_STATUS_IS_OK(status
)) {
129 DEBUG(1, ("SD initialization failure: %s\n",
138 /* Stripping off private bits will be done for us. */
139 onefs_oplock
= onefs_samba_oplock_to_oplock(oplock_request
);
141 if (!lp_oplocks(SNUM(conn
))) {
142 SMB_ASSERT(onefs_oplock
== OPLOCK_NONE
);
145 /* Convert samba dos flags to UF_DOS_* attributes. */
146 onefs_dos_attributes
= dos_attributes_to_stat_dos_flags(dos_flags
);
149 * Deal with kernel creating Default ACLs. (Isilon bug 47447.)
151 * 1) "nt acl support = no", default_acl = no
152 * 2) "inherit permissions = yes", default_acl = no
154 if (lp_nt_acl_support(SNUM(conn
)) && !lp_inherit_perms(SNUM(conn
)))
155 cf_flags
= cf_flags_or(cf_flags
, CF_FLAGS_DEFAULT_ACL
);
158 * Some customer workflows require the execute bit to be ignored.
160 if (lp_parm_bool(SNUM(conn
), PARM_ONEFS_TYPE
,
161 PARM_ALLOW_EXECUTE_ALWAYS
,
162 PARM_ALLOW_EXECUTE_ALWAYS_DEFAULT
) &&
163 (open_access_mask
& FILE_EXECUTE
)) {
165 DEBUG(3, ("Stripping execute bit from %s: (0x%x)\n", mapped_name
,
169 open_access_mask
&= ~FILE_EXECUTE
;
172 * Add READ_DATA, so we're not left with desired_access=0. An
173 * execute call should imply the client will read the data.
175 open_access_mask
|= FILE_READ_DATA
;
177 DEBUGADD(3, ("New stripped access mask: 0x%x\n",
181 DEBUG(10,("onefs_sys_create_file: base_fd = %d, fname = %s "
182 "open_access_mask = 0x%x, flags = 0x%x, mode = 0%o, "
183 "desired_oplock = %s, id = 0x%x, secinfo = 0x%x, sd = %p, "
184 "dos_attributes = 0x%x, path = %s, "
185 "default_acl=%s\n", base_fd
, mapped_name
,
186 (unsigned int)open_access_mask
,
189 onefs_oplock_str(onefs_oplock
),
191 sec_info_effective
, sd
,
192 (unsigned int)onefs_dos_attributes
, mapped_name
,
193 cf_flags_and_bool(cf_flags
, CF_FLAGS_DEFAULT_ACL
) ?
196 /* Initialize smlock struct for files/dirs but not internal opens */
197 if (!(oplock_request
& INTERNAL_OPEN_ONLY
)) {
198 smlock_init(conn
, &sml
, is_executable(mapped_name
), access_mask
,
199 share_access
, create_options
);
203 smlock_dump(10, psml
);
205 ret_fd
= ifs_createfile(base_fd
, mapped_name
,
206 (enum ifs_ace_rights
)open_access_mask
, flags
& ~O_ACCMODE
, mode
,
207 onefs_oplock
, id
, psml
, sec_info_effective
, pifs_sd
,
208 onefs_dos_attributes
, cf_flags
, &onefs_granted_oplock
);
210 DEBUG(10,("onefs_sys_create_file(%s): ret_fd = %d, "
211 "onefs_granted_oplock = %s\n",
212 ret_fd
< 0 ? strerror(errno
) : "success", ret_fd
,
213 onefs_oplock_str(onefs_granted_oplock
)));
215 if (granted_oplock
) {
217 onefs_oplock_to_samba_oplock(onefs_granted_oplock
);
221 END_PROFILE(syscall_createfile
);
222 aclu_free_sd(pifs_sd
, false);
223 TALLOC_FREE(mapped_name
);
229 * FreeBSD based sendfile implementation that allows for atomic semantics.
231 static ssize_t
onefs_sys_do_sendfile(int tofd
, int fromfd
,
232 const DATA_BLOB
*header
, SMB_OFF_T offset
, size_t count
, bool atomic
)
244 hdr
.headers
= &hdtrl
;
249 /* Set up the header iovec. */
251 hdtrl
.iov_base
= (void *)header
->data
;
252 hdtrl
.iov_len
= hdr_len
= header
->length
;
254 hdtrl
.iov_base
= NULL
;
259 while (total
+ hdtrl
.iov_len
) {
264 * FreeBSD sendfile returns 0 on success, -1 on error.
265 * Remember, the tofd and fromfd are reversed..... :-).
266 * nwritten includes the header data sent.
270 ret
= sendfile(fromfd
, tofd
, offset
, total
, &hdr
,
272 } while (ret
== -1 && errno
== EINTR
);
274 /* On error we're done. */
280 * If this was an ATOMIC sendfile, nwritten doesn't
281 * necessarily indicate an error. It could mean count > than
282 * what sendfile can handle atomically (usually 64K) or that
283 * there was a short read due to the file being truncated.
286 return atomic
? 0 : -1;
290 * An atomic sendfile should never send partial data!
292 if (atomic
&& nwritten
!= total
+ hdtrl
.iov_len
) {
293 DEBUG(0,("Atomic sendfile() sent partial data: "
294 "%llu of %d\n", nwritten
,
295 total
+ hdtrl
.iov_len
));
300 * If this was a short (signal interrupted) write we may need
301 * to subtract it from the header data, or null out the header
302 * data altogether if we wrote more than hdtrl.iov_len bytes.
303 * We change nwritten to be the number of file bytes written.
306 if (hdtrl
.iov_base
&& hdtrl
.iov_len
) {
307 if (nwritten
>= hdtrl
.iov_len
) {
308 nwritten
-= hdtrl
.iov_len
;
309 hdtrl
.iov_base
= NULL
;
313 (void *)((caddr_t
)hdtrl
.iov_base
+ nwritten
);
314 hdtrl
.iov_len
-= nwritten
;
321 return count
+ hdr_len
;
325 * Handles the subtleties of using sendfile with CIFS.
327 ssize_t
onefs_sys_sendfile(connection_struct
*conn
, int tofd
, int fromfd
,
328 const DATA_BLOB
*header
, SMB_OFF_T offset
,
334 START_PROFILE_BYTES(syscall_sendfile
, count
);
336 if (lp_parm_bool(SNUM(conn
), PARM_ONEFS_TYPE
,
337 PARM_ATOMIC_SENDFILE
,
338 PARM_ATOMIC_SENDFILE_DEFAULT
)) {
342 /* Try the sendfile */
343 ret
= onefs_sys_do_sendfile(tofd
, fromfd
, header
, offset
, count
,
346 /* If the sendfile wasn't atomic, we're done. */
348 DEBUG(10, ("non-atomic sendfile read %ul bytes\n", ret
));
349 END_PROFILE(syscall_sendfile
);
354 * Atomic sendfile takes care to not write anything to the socket
355 * until all of the requested bytes have been read from the file.
356 * There are two atomic cases that need to be handled.
358 * 1. The file was truncated causing less data to be read than was
359 * requested. In this case, we return back to the caller to
360 * indicate 0 bytes were written to the socket. This should
361 * prompt the caller to fallback to the standard read path: read
362 * the data, create a header that indicates how many bytes were
363 * actually read, and send the header/data back to the client.
365 * This saves us from standard sendfile behavior of sending a
366 * header promising more data then will actually be sent. The
367 * only two options are to close the socket and kill the client
368 * connection, or write a bunch of 0s. Closing the client
369 * connection is bad because there could actually be multiple
370 * sessions multiplexed from the same client that are all dropped
371 * because of a truncate. Writing the remaining data as 0s also
372 * isn't good, because the client will have an incorrect version
373 * of the file. If the file is written back to the server, the 0s
374 * will be written back. Fortunately, atomic sendfile allows us
375 * to avoid making this choice in most cases.
377 * 2. One downside of atomic sendfile, is that there is a limit on
378 * the number of bytes that can be sent atomically. The kernel
379 * has a limited amount of mbuf space that it can read file data
380 * into without exhausting the system's mbufs, so a buffer of
381 * length xfsize is used. The xfsize at the time of writing this
382 * is 64K. xfsize bytes are read from the file, and subsequently
383 * written to the socket. This makes it impossible to do the
384 * sendfile atomically for a byte count > xfsize.
386 * To cope with large requests, atomic sendfile returns -1 with
387 * errno set to E2BIG. Since windows maxes out at 64K writes,
388 * this is currently only a concern with non-windows clients.
389 * Posix extensions allow the full 24bit bytecount field to be
390 * used in ReadAndX, and clients such as smbclient and the linux
391 * cifs client can request up to 16MB reads! There are a few
392 * options for handling large sendfile requests.
394 * a. Fall back to the standard read path. This is unacceptable
395 * because it would require prohibitively large mallocs.
397 * b. Fall back to using samba's fake_send_file which emulates
398 * the kernel sendfile in userspace. This still has the same
399 * problem of sending the header before all of the data has
400 * been read, so it doesn't buy us anything, and has worse
401 * performance than the kernel's zero-copy sendfile.
403 * c. Use non-atomic sendfile syscall to attempt a zero copy
404 * read, and hope that there isn't a short read due to
405 * truncation. In the case of a short read, there are two
408 * 1. Kill the client connection
410 * 2. Write zeros to the socket for the remaining bytes
411 * promised in the header.
413 * It is safer from a data corruption perspective to kill the
414 * client connection, so this is our default behavior, but if
415 * this causes problems this can be configured to write zeros
419 /* Handle case 1: short read -> truncated file. */
421 END_PROFILE(syscall_sendfile
);
425 /* Handle case 2: large read. */
426 if (ret
== -1 && errno
== E2BIG
) {
428 if (!lp_parm_bool(SNUM(conn
), PARM_ONEFS_TYPE
,
429 PARM_SENDFILE_LARGE_READS
,
430 PARM_SENDFILE_LARGE_READS_DEFAULT
)) {
431 DEBUG(3, ("Not attempting non-atomic large sendfile: "
432 "%lu bytes\n", count
));
433 END_PROFILE(syscall_sendfile
);
437 if (count
< 0x10000) {
438 DEBUG(0, ("Count < 2^16 and E2BIG was returned! %lu\n",
442 DEBUG(10, ("attempting non-atomic large sendfile: %lu bytes\n",
445 /* Try a non-atomic sendfile. */
446 ret
= onefs_sys_do_sendfile(tofd
, fromfd
, header
, offset
,
448 /* Real error: kill the client connection. */
450 DEBUG(1, ("error on non-atomic large sendfile "
451 "(%lu bytes): %s\n", count
,
453 END_PROFILE(syscall_sendfile
);
457 /* Short read: kill the client connection. */
458 if (ret
!= count
+ header
->length
) {
459 DEBUG(1, ("short read on non-atomic large sendfile "
460 "(%lu of %lu bytes): %s\n", ret
, count
,
464 * Returning ret here would cause us to drop into the
465 * codepath that calls sendfile_short_send, which
466 * sends the client a bunch of zeros instead.
467 * Returning -1 kills the connection.
469 if (lp_parm_bool(SNUM(conn
), PARM_ONEFS_TYPE
,
471 PARM_SENDFILE_SAFE_DEFAULT
)) {
472 END_PROFILE(syscall_sendfile
);
476 END_PROFILE(syscall_sendfile
);
480 DEBUG(10, ("non-atomic large sendfile successful\n"));
483 /* There was error in the atomic sendfile. */
485 DEBUG(1, ("error on %s sendfile (%lu bytes): %s\n",
486 atomic
? "atomic" : "non-atomic",
487 count
, strerror(errno
)));
490 END_PROFILE(syscall_sendfile
);
495 * Only talloc the spill buffer once (reallocing when necessary).
497 static char *get_spill_buffer(size_t new_count
)
499 static int cur_count
= 0;
500 static char *spill_buffer
= NULL
;
502 /* If a sufficiently sized buffer exists, just return. */
503 if (new_count
<= cur_count
) {
504 SMB_ASSERT(spill_buffer
);
508 /* Allocate the first time. */
509 if (cur_count
== 0) {
510 SMB_ASSERT(!spill_buffer
);
511 spill_buffer
= talloc_array(NULL
, char, new_count
);
513 cur_count
= new_count
;
518 /* A buffer exists, but it's not big enough, so realloc. */
519 SMB_ASSERT(spill_buffer
);
520 spill_buffer
= talloc_realloc(NULL
, spill_buffer
, char, new_count
);
522 cur_count
= new_count
;
528 * recvfile does zero-copy writes given an fd to write to, and a socket with
529 * some data to write. If recvfile read more than it was able to write, it
530 * spills the data into a buffer. After first reading any additional data
531 * from the socket into the buffer, the spill buffer is then written with a
534 ssize_t
onefs_sys_recvfile(int fromfd
, int tofd
, SMB_OFF_T offset
,
537 char *spill_buffer
= NULL
;
538 bool socket_drained
= false;
540 off_t total_rbytes
= 0;
541 off_t total_wbytes
= 0;
545 START_PROFILE_BYTES(syscall_recvfile
, count
);
547 DEBUG(10,("onefs_recvfile: from = %d, to = %d, offset=%llu, count = "
548 "%lu\n", fromfd
, tofd
, offset
, count
));
551 END_PROFILE(syscall_recvfile
);
556 * Setup up a buffer for recvfile to spill data that has been read
557 * from the socket but not written.
559 spill_buffer
= get_spill_buffer(count
);
560 if (spill_buffer
== NULL
) {
566 * Keep trying recvfile until:
567 * - There is no data left to read on the socket, or
568 * - bytes read != bytes written, or
569 * - An error is returned that isn't EINTR/EAGAIN
572 /* Keep track of bytes read/written for recvfile */
576 DEBUG(10, ("calling recvfile loop, offset + total_wbytes = "
577 "%llu, count - total_rbytes = %llu\n",
578 offset
+ total_wbytes
, count
- total_rbytes
));
580 ret
= recvfile(tofd
, fromfd
, offset
+ total_wbytes
,
581 count
- total_wbytes
, &rbytes
, &wbytes
, 0,
584 DEBUG(10, ("recvfile ret = %d, errno = %d, rbytes = %llu, "
585 "wbytes = %llu\n", ret
, ret
>= 0 ? 0 : errno
,
588 /* Update our progress so far */
589 total_rbytes
+= rbytes
;
590 total_wbytes
+= wbytes
;
592 } while ((count
- total_rbytes
) && (rbytes
== wbytes
) &&
593 (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
)));
595 DEBUG(10, ("total_rbytes = %llu, total_wbytes = %llu\n",
596 total_rbytes
, total_wbytes
));
598 /* Log if recvfile didn't write everything it read. */
599 if (total_rbytes
!= total_wbytes
) {
600 DEBUG(3, ("partial recvfile: total_rbytes=%llu but "
601 "total_wbytes=%llu, diff = %llu\n", total_rbytes
,
602 total_wbytes
, total_rbytes
- total_wbytes
));
603 SMB_ASSERT(total_rbytes
> total_wbytes
);
607 * If there is still data on the socket, read it off.
609 while (total_rbytes
< count
) {
611 DEBUG(3, ("shallow recvfile (%s), reading %llu\n",
612 strerror(errno
), count
- total_rbytes
));
615 * Read the remaining data into the spill buffer. recvfile
616 * may already have some data in the spill buffer, so start
617 * filling the buffer at total_rbytes - total_wbytes.
619 ret
= sys_read(fromfd
,
620 spill_buffer
+ (total_rbytes
- total_wbytes
),
621 count
- total_rbytes
);
625 DEBUG(0, ("shallow recvfile read: EOF\n"));
627 DEBUG(0, ("shallow recvfile read failed: %s\n",
630 /* Socket is dead, so treat as if it were drained. */
631 socket_drained
= true;
635 /* Data was read so update the rbytes */
639 if (total_rbytes
!= count
) {
640 smb_panic("Unread recvfile data still on the socket!");
644 * Now write any spilled data + the extra data read off the socket.
646 while (total_wbytes
< count
) {
648 DEBUG(3, ("partial recvfile, writing %llu\n", count
- total_wbytes
));
650 ret
= sys_pwrite(tofd
, spill_buffer
, count
- total_wbytes
,
651 offset
+ total_wbytes
);
654 DEBUG(0, ("partial recvfile write failed: %s\n",
659 /* Data was written so update the wbytes */
668 END_PROFILE(syscall_recvfile
);
670 /* Make sure we always try to drain the socket. */
671 if (!socket_drained
&& count
- total_rbytes
) {
672 int saved_errno
= errno
;
674 if (drain_socket(fromfd
, count
- total_rbytes
) !=
675 count
- total_rbytes
) {
676 /* Socket is dead! */
677 DEBUG(0, ("drain socket failed: %d\n", errno
));
685 void init_stat_ex_from_onefs_stat(struct stat_ex
*dst
, const struct stat
*src
)
689 dst
->st_ex_dev
= src
->st_dev
;
690 dst
->st_ex_ino
= src
->st_ino
;
691 dst
->st_ex_mode
= src
->st_mode
;
692 dst
->st_ex_nlink
= src
->st_nlink
;
693 dst
->st_ex_uid
= src
->st_uid
;
694 dst
->st_ex_gid
= src
->st_gid
;
695 dst
->st_ex_rdev
= src
->st_rdev
;
696 dst
->st_ex_size
= src
->st_size
;
697 dst
->st_ex_atime
= src
->st_atimespec
;
698 dst
->st_ex_mtime
= src
->st_mtimespec
;
699 dst
->st_ex_ctime
= src
->st_ctimespec
;
700 dst
->st_ex_btime
= src
->st_birthtimespec
;
701 dst
->st_ex_blksize
= src
->st_blksize
;
702 dst
->st_ex_blocks
= src
->st_blocks
;
704 dst
->st_ex_flags
= src
->st_flags
;
706 dst
->vfs_private
= src
->st_snapid
;
709 int onefs_sys_stat(const char *fname
, SMB_STRUCT_STAT
*sbuf
)
712 struct stat onefs_sbuf
;
714 ret
= stat(fname
, &onefs_sbuf
);
717 /* we always want directories to appear zero size */
718 if (S_ISDIR(onefs_sbuf
.st_mode
)) {
719 onefs_sbuf
.st_size
= 0;
721 init_stat_ex_from_onefs_stat(sbuf
, &onefs_sbuf
);
726 int onefs_sys_fstat(int fd
, SMB_STRUCT_STAT
*sbuf
)
729 struct stat onefs_sbuf
;
731 ret
= fstat(fd
, &onefs_sbuf
);
734 /* we always want directories to appear zero size */
735 if (S_ISDIR(onefs_sbuf
.st_mode
)) {
736 onefs_sbuf
.st_size
= 0;
738 init_stat_ex_from_onefs_stat(sbuf
, &onefs_sbuf
);
743 int onefs_sys_fstat_at(int base_fd
, const char *fname
, SMB_STRUCT_STAT
*sbuf
,
747 struct stat onefs_sbuf
;
749 ret
= enc_fstatat(base_fd
, fname
, ENC_DEFAULT
, &onefs_sbuf
, flags
);
752 /* we always want directories to appear zero size */
753 if (S_ISDIR(onefs_sbuf
.st_mode
)) {
754 onefs_sbuf
.st_size
= 0;
756 init_stat_ex_from_onefs_stat(sbuf
, &onefs_sbuf
);
761 int onefs_sys_lstat(const char *fname
, SMB_STRUCT_STAT
*sbuf
)
764 struct stat onefs_sbuf
;
766 ret
= lstat(fname
, &onefs_sbuf
);
769 /* we always want directories to appear zero size */
770 if (S_ISDIR(onefs_sbuf
.st_mode
)) {
771 onefs_sbuf
.st_size
= 0;
773 init_stat_ex_from_onefs_stat(sbuf
, &onefs_sbuf
);