Restrict unprivileged access to kernel syslog
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / cifs / misc.c
blob43f10281bc19e80be48303e1bd9c974b4882cb37
1 /*
2 * fs/cifs/misc.c
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/slab.h>
23 #include <linux/ctype.h>
24 #include <linux/mempool.h>
25 #include "cifspdu.h"
26 #include "cifsglob.h"
27 #include "cifsproto.h"
28 #include "cifs_debug.h"
29 #include "smberr.h"
30 #include "nterr.h"
31 #include "cifs_unicode.h"
33 extern mempool_t *cifs_sm_req_poolp;
34 extern mempool_t *cifs_req_poolp;
36 /* The xid serves as a useful identifier for each incoming vfs request,
37 in a similar way to the mid which is useful to track each sent smb,
38 and CurrentXid can also provide a running counter (although it
39 will eventually wrap past zero) of the total vfs operations handled
40 since the cifs fs was mounted */
42 unsigned int
43 _GetXid(void)
45 unsigned int xid;
47 spin_lock(&GlobalMid_Lock);
48 GlobalTotalActiveXid++;
50 /* keep high water mark for number of simultaneous ops in filesystem */
51 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
52 GlobalMaxActiveXid = GlobalTotalActiveXid;
53 if (GlobalTotalActiveXid > 65000)
54 cFYI(1, "warning: more than 65000 requests active");
55 xid = GlobalCurrentXid++;
56 spin_unlock(&GlobalMid_Lock);
57 return xid;
60 void
61 _FreeXid(unsigned int xid)
63 spin_lock(&GlobalMid_Lock);
64 /* if (GlobalTotalActiveXid == 0)
65 BUG(); */
66 GlobalTotalActiveXid--;
67 spin_unlock(&GlobalMid_Lock);
70 struct cifsSesInfo *
71 sesInfoAlloc(void)
73 struct cifsSesInfo *ret_buf;
75 ret_buf = kzalloc(sizeof(struct cifsSesInfo), GFP_KERNEL);
76 if (ret_buf) {
77 atomic_inc(&sesInfoAllocCount);
78 ret_buf->status = CifsNew;
79 ++ret_buf->ses_count;
80 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
81 INIT_LIST_HEAD(&ret_buf->tcon_list);
82 mutex_init(&ret_buf->session_mutex);
84 return ret_buf;
87 void
88 sesInfoFree(struct cifsSesInfo *buf_to_free)
90 if (buf_to_free == NULL) {
91 cFYI(1, "Null buffer passed to sesInfoFree");
92 return;
95 atomic_dec(&sesInfoAllocCount);
96 kfree(buf_to_free->serverOS);
97 kfree(buf_to_free->serverDomain);
98 kfree(buf_to_free->serverNOS);
99 if (buf_to_free->password) {
100 memset(buf_to_free->password, 0, strlen(buf_to_free->password));
101 kfree(buf_to_free->password);
103 kfree(buf_to_free->domainName);
104 kfree(buf_to_free);
107 struct cifsTconInfo *
108 tconInfoAlloc(void)
110 struct cifsTconInfo *ret_buf;
111 ret_buf = kzalloc(sizeof(struct cifsTconInfo), GFP_KERNEL);
112 if (ret_buf) {
113 atomic_inc(&tconInfoAllocCount);
114 ret_buf->tidStatus = CifsNew;
115 ++ret_buf->tc_count;
116 INIT_LIST_HEAD(&ret_buf->openFileList);
117 INIT_LIST_HEAD(&ret_buf->tcon_list);
118 #ifdef CONFIG_CIFS_STATS
119 spin_lock_init(&ret_buf->stat_lock);
120 #endif
122 return ret_buf;
125 void
126 tconInfoFree(struct cifsTconInfo *buf_to_free)
128 if (buf_to_free == NULL) {
129 cFYI(1, "Null buffer passed to tconInfoFree");
130 return;
132 atomic_dec(&tconInfoAllocCount);
133 kfree(buf_to_free->nativeFileSystem);
134 if (buf_to_free->password) {
135 memset(buf_to_free->password, 0, strlen(buf_to_free->password));
136 kfree(buf_to_free->password);
138 kfree(buf_to_free);
141 struct smb_hdr *
142 cifs_buf_get(void)
144 struct smb_hdr *ret_buf = NULL;
146 /* We could use negotiated size instead of max_msgsize -
147 but it may be more efficient to always alloc same size
148 albeit slightly larger than necessary and maxbuffersize
149 defaults to this and can not be bigger */
150 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
152 /* clear the first few header bytes */
153 /* for most paths, more is cleared in header_assemble */
154 if (ret_buf) {
155 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
156 atomic_inc(&bufAllocCount);
157 #ifdef CONFIG_CIFS_STATS2
158 atomic_inc(&totBufAllocCount);
159 #endif /* CONFIG_CIFS_STATS2 */
162 return ret_buf;
165 void
166 cifs_buf_release(void *buf_to_free)
168 if (buf_to_free == NULL) {
169 /* cFYI(1, "Null buffer passed to cifs_buf_release");*/
170 return;
172 mempool_free(buf_to_free, cifs_req_poolp);
174 atomic_dec(&bufAllocCount);
175 return;
178 struct smb_hdr *
179 cifs_small_buf_get(void)
181 struct smb_hdr *ret_buf = NULL;
183 /* We could use negotiated size instead of max_msgsize -
184 but it may be more efficient to always alloc same size
185 albeit slightly larger than necessary and maxbuffersize
186 defaults to this and can not be bigger */
187 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
188 if (ret_buf) {
189 /* No need to clear memory here, cleared in header assemble */
190 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
191 atomic_inc(&smBufAllocCount);
192 #ifdef CONFIG_CIFS_STATS2
193 atomic_inc(&totSmBufAllocCount);
194 #endif /* CONFIG_CIFS_STATS2 */
197 return ret_buf;
200 void
201 cifs_small_buf_release(void *buf_to_free)
204 if (buf_to_free == NULL) {
205 cFYI(1, "Null buffer passed to cifs_small_buf_release");
206 return;
208 mempool_free(buf_to_free, cifs_sm_req_poolp);
210 atomic_dec(&smBufAllocCount);
211 return;
215 Find a free multiplex id (SMB mid). Otherwise there could be
216 mid collisions which might cause problems, demultiplexing the
217 wrong response to this request. Multiplex ids could collide if
218 one of a series requests takes much longer than the others, or
219 if a very large number of long lived requests (byte range
220 locks or FindNotify requests) are pending. No more than
221 64K-1 requests can be outstanding at one time. If no
222 mids are available, return zero. A future optimization
223 could make the combination of mids and uid the key we use
224 to demultiplex on (rather than mid alone).
225 In addition to the above check, the cifs demultiplex
226 code already used the command code as a secondary
227 check of the frame and if signing is negotiated the
228 response would be discarded if the mid were the same
229 but the signature was wrong. Since the mid is not put in the
230 pending queue until later (when it is about to be dispatched)
231 we do have to limit the number of outstanding requests
232 to somewhat less than 64K-1 although it is hard to imagine
233 so many threads being in the vfs at one time.
235 __u16 GetNextMid(struct TCP_Server_Info *server)
237 __u16 mid = 0;
238 __u16 last_mid;
239 int collision;
241 if (server == NULL)
242 return mid;
244 spin_lock(&GlobalMid_Lock);
245 last_mid = server->CurrentMid; /* we do not want to loop forever */
246 server->CurrentMid++;
247 /* This nested loop looks more expensive than it is.
248 In practice the list of pending requests is short,
249 fewer than 50, and the mids are likely to be unique
250 on the first pass through the loop unless some request
251 takes longer than the 64 thousand requests before it
252 (and it would also have to have been a request that
253 did not time out) */
254 while (server->CurrentMid != last_mid) {
255 struct list_head *tmp;
256 struct mid_q_entry *mid_entry;
258 collision = 0;
259 if (server->CurrentMid == 0)
260 server->CurrentMid++;
262 list_for_each(tmp, &server->pending_mid_q) {
263 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
265 if ((mid_entry->mid == server->CurrentMid) &&
266 (mid_entry->midState == MID_REQUEST_SUBMITTED)) {
267 /* This mid is in use, try a different one */
268 collision = 1;
269 break;
272 if (collision == 0) {
273 mid = server->CurrentMid;
274 break;
276 server->CurrentMid++;
278 spin_unlock(&GlobalMid_Lock);
279 return mid;
282 /* NB: MID can not be set if treeCon not passed in, in that
283 case it is responsbility of caller to set the mid */
284 void
285 header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
286 const struct cifsTconInfo *treeCon, int word_count
287 /* length of fixed section (word count) in two byte units */)
289 struct list_head *temp_item;
290 struct cifsSesInfo *ses;
291 char *temp = (char *) buffer;
293 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
295 buffer->smb_buf_length =
296 (2 * word_count) + sizeof(struct smb_hdr) -
297 4 /* RFC 1001 length field does not count */ +
298 2 /* for bcc field itself */ ;
299 /* Note that this is the only network field that has to be converted
300 to big endian and it is done just before we send it */
302 buffer->Protocol[0] = 0xFF;
303 buffer->Protocol[1] = 'S';
304 buffer->Protocol[2] = 'M';
305 buffer->Protocol[3] = 'B';
306 buffer->Command = smb_command;
307 buffer->Flags = 0x00; /* case sensitive */
308 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
309 buffer->Pid = cpu_to_le16((__u16)current->tgid);
310 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
311 if (treeCon) {
312 buffer->Tid = treeCon->tid;
313 if (treeCon->ses) {
314 if (treeCon->ses->capabilities & CAP_UNICODE)
315 buffer->Flags2 |= SMBFLG2_UNICODE;
316 if (treeCon->ses->capabilities & CAP_STATUS32)
317 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
319 /* Uid is not converted */
320 buffer->Uid = treeCon->ses->Suid;
321 buffer->Mid = GetNextMid(treeCon->ses->server);
322 if (multiuser_mount != 0) {
323 /* For the multiuser case, there are few obvious technically */
324 /* possible mechanisms to match the local linux user (uid) */
325 /* to a valid remote smb user (smb_uid): */
326 /* 1) Query Winbind (or other local pam/nss daemon */
327 /* for userid/password/logon_domain or credential */
328 /* 2) Query Winbind for uid to sid to username mapping */
329 /* and see if we have a matching password for existing*/
330 /* session for that user perhas getting password by */
331 /* adding a new pam_cifs module that stores passwords */
332 /* so that the cifs vfs can get at that for all logged*/
333 /* on users */
334 /* 3) (Which is the mechanism we have chosen) */
335 /* Search through sessions to the same server for a */
336 /* a match on the uid that was passed in on mount */
337 /* with the current processes uid (or euid?) and use */
338 /* that smb uid. If no existing smb session for */
339 /* that uid found, use the default smb session ie */
340 /* the smb session for the volume mounted which is */
341 /* the same as would be used if the multiuser mount */
342 /* flag were disabled. */
344 /* BB Add support for establishing new tCon and SMB Session */
345 /* with userid/password pairs found on the smb session */
346 /* for other target tcp/ip addresses BB */
347 if (current_fsuid() != treeCon->ses->linux_uid) {
348 cFYI(1, "Multiuser mode and UID "
349 "did not match tcon uid");
350 spin_lock(&cifs_tcp_ses_lock);
351 list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
352 ses = list_entry(temp_item, struct cifsSesInfo, smb_ses_list);
353 if (ses->linux_uid == current_fsuid()) {
354 if (ses->server == treeCon->ses->server) {
355 cFYI(1, "found matching uid substitute right smb_uid");
356 buffer->Uid = ses->Suid;
357 break;
358 } else {
359 /* BB eventually call cifs_setup_session here */
360 cFYI(1, "local UID found but no smb sess with this server exists");
364 spin_unlock(&cifs_tcp_ses_lock);
368 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
369 buffer->Flags2 |= SMBFLG2_DFS;
370 if (treeCon->nocase)
371 buffer->Flags |= SMBFLG_CASELESS;
372 if ((treeCon->ses) && (treeCon->ses->server))
373 if (treeCon->ses->server->secMode &
374 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
375 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
378 /* endian conversion of flags is now done just before sending */
379 buffer->WordCount = (char) word_count;
380 return;
383 static int
384 checkSMBhdr(struct smb_hdr *smb, __u16 mid)
386 /* Make sure that this really is an SMB, that it is a response,
387 and that the message ids match */
388 if ((*(__le32 *) smb->Protocol == cpu_to_le32(0x424d53ff)) &&
389 (mid == smb->Mid)) {
390 if (smb->Flags & SMBFLG_RESPONSE)
391 return 0;
392 else {
393 /* only one valid case where server sends us request */
394 if (smb->Command == SMB_COM_LOCKING_ANDX)
395 return 0;
396 else
397 cERROR(1, "Received Request not response");
399 } else { /* bad signature or mid */
400 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff))
401 cERROR(1, "Bad protocol string signature header %x",
402 *(unsigned int *) smb->Protocol);
403 if (mid != smb->Mid)
404 cERROR(1, "Mids do not match");
406 cERROR(1, "bad smb detected. The Mid=%d", smb->Mid);
407 return 1;
411 checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
413 __u32 len = smb->smb_buf_length;
414 __u32 clc_len; /* calculated length */
415 cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len);
417 if (length < 2 + sizeof(struct smb_hdr)) {
418 if ((length >= sizeof(struct smb_hdr) - 1)
419 && (smb->Status.CifsError != 0)) {
420 smb->WordCount = 0;
421 /* some error cases do not return wct and bcc */
422 return 0;
423 } else if ((length == sizeof(struct smb_hdr) + 1) &&
424 (smb->WordCount == 0)) {
425 char *tmp = (char *)smb;
426 /* Need to work around a bug in two servers here */
427 /* First, check if the part of bcc they sent was zero */
428 if (tmp[sizeof(struct smb_hdr)] == 0) {
429 /* some servers return only half of bcc
430 * on simple responses (wct, bcc both zero)
431 * in particular have seen this on
432 * ulogoffX and FindClose. This leaves
433 * one byte of bcc potentially unitialized
435 /* zero rest of bcc */
436 tmp[sizeof(struct smb_hdr)+1] = 0;
437 return 0;
439 cERROR(1, "rcvd invalid byte count (bcc)");
440 } else {
441 cERROR(1, "Length less than smb header size");
443 return 1;
445 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
446 cERROR(1, "smb length greater than MaxBufSize, mid=%d",
447 smb->Mid);
448 return 1;
451 if (checkSMBhdr(smb, mid))
452 return 1;
453 clc_len = smbCalcSize_LE(smb);
455 if (4 + len != length) {
456 cERROR(1, "Length read does not match RFC1001 length %d",
457 len);
458 return 1;
461 if (4 + len != clc_len) {
462 /* check if bcc wrapped around for large read responses */
463 if ((len > 64 * 1024) && (len > clc_len)) {
464 /* check if lengths match mod 64K */
465 if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
466 return 0; /* bcc wrapped */
468 cFYI(1, "Calculated size %d vs length %d mismatch for mid %d",
469 clc_len, 4 + len, smb->Mid);
470 /* Windows XP can return a few bytes too much, presumably
471 an illegal pad, at the end of byte range lock responses
472 so we allow for that three byte pad, as long as actual
473 received length is as long or longer than calculated length */
474 /* We have now had to extend this more, since there is a
475 case in which it needs to be bigger still to handle a
476 malformed response to transact2 findfirst from WinXP when
477 access denied is returned and thus bcc and wct are zero
478 but server says length is 0x21 bytes too long as if the server
479 forget to reset the smb rfc1001 length when it reset the
480 wct and bcc to minimum size and drop the t2 parms and data */
481 if ((4+len > clc_len) && (len <= clc_len + 512))
482 return 0;
483 else {
484 cERROR(1, "RFC1001 size %d bigger than SMB for Mid=%d",
485 len, smb->Mid);
486 return 1;
489 return 0;
492 bool
493 is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
495 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
496 struct list_head *tmp, *tmp1, *tmp2;
497 struct cifsSesInfo *ses;
498 struct cifsTconInfo *tcon;
499 struct cifsInodeInfo *pCifsInode;
500 struct cifsFileInfo *netfile;
502 cFYI(1, "Checking for oplock break or dnotify response");
503 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
504 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
505 struct smb_com_transaction_change_notify_rsp *pSMBr =
506 (struct smb_com_transaction_change_notify_rsp *)buf;
507 struct file_notify_information *pnotify;
508 __u32 data_offset = 0;
509 if (pSMBr->ByteCount > sizeof(struct file_notify_information)) {
510 data_offset = le32_to_cpu(pSMBr->DataOffset);
512 pnotify = (struct file_notify_information *)
513 ((char *)&pSMBr->hdr.Protocol + data_offset);
514 cFYI(1, "dnotify on %s Action: 0x%x",
515 pnotify->FileName, pnotify->Action);
516 /* cifs_dump_mem("Rcvd notify Data: ",buf,
517 sizeof(struct smb_hdr)+60); */
518 return true;
520 if (pSMBr->hdr.Status.CifsError) {
521 cFYI(1, "notify err 0x%d",
522 pSMBr->hdr.Status.CifsError);
523 return true;
525 return false;
527 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
528 return false;
529 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
530 /* no sense logging error on invalid handle on oplock
531 break - harmless race between close request and oplock
532 break response is expected from time to time writing out
533 large dirty files cached on the client */
534 if ((NT_STATUS_INVALID_HANDLE) ==
535 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
536 cFYI(1, "invalid handle on oplock break");
537 return true;
538 } else if (ERRbadfid ==
539 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
540 return true;
541 } else {
542 return false; /* on valid oplock brk we get "request" */
545 if (pSMB->hdr.WordCount != 8)
546 return false;
548 cFYI(1, "oplock type 0x%d level 0x%d",
549 pSMB->LockType, pSMB->OplockLevel);
550 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
551 return false;
553 /* look up tcon based on tid & uid */
554 spin_lock(&cifs_tcp_ses_lock);
555 list_for_each(tmp, &srv->smb_ses_list) {
556 ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
557 list_for_each(tmp1, &ses->tcon_list) {
558 tcon = list_entry(tmp1, struct cifsTconInfo, tcon_list);
559 if (tcon->tid != buf->Tid)
560 continue;
562 cifs_stats_inc(&tcon->num_oplock_brks);
563 spin_lock(&cifs_file_list_lock);
564 list_for_each(tmp2, &tcon->openFileList) {
565 netfile = list_entry(tmp2, struct cifsFileInfo,
566 tlist);
567 if (pSMB->Fid != netfile->netfid)
568 continue;
570 cFYI(1, "file id match, oplock break");
571 pCifsInode = CIFS_I(netfile->dentry->d_inode);
573 cifs_set_oplock_level(pCifsInode,
574 pSMB->OplockLevel);
576 * cifs_oplock_break_put() can't be called
577 * from here. Get reference after queueing
578 * succeeded. cifs_oplock_break() will
579 * synchronize using cifs_file_list_lock.
581 if (queue_work(system_nrt_wq,
582 &netfile->oplock_break))
583 cifs_oplock_break_get(netfile);
584 netfile->oplock_break_cancelled = false;
586 spin_unlock(&cifs_file_list_lock);
587 spin_unlock(&cifs_tcp_ses_lock);
588 return true;
590 spin_unlock(&cifs_file_list_lock);
591 spin_unlock(&cifs_tcp_ses_lock);
592 cFYI(1, "No matching file for oplock break");
593 return true;
596 spin_unlock(&cifs_tcp_ses_lock);
597 cFYI(1, "Can not process oplock break for non-existent connection");
598 return true;
601 void
602 dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
604 int i, j;
605 char debug_line[17];
606 unsigned char *buffer;
608 if (traceSMB == 0)
609 return;
611 buffer = (unsigned char *) smb_buf;
612 for (i = 0, j = 0; i < smb_buf_length; i++, j++) {
613 if (i % 8 == 0) {
614 /* have reached the beginning of line */
615 printk(KERN_DEBUG "| ");
616 j = 0;
618 printk("%0#4x ", buffer[i]);
619 debug_line[2 * j] = ' ';
620 if (isprint(buffer[i]))
621 debug_line[1 + (2 * j)] = buffer[i];
622 else
623 debug_line[1 + (2 * j)] = '_';
625 if (i % 8 == 7) {
626 /* reached end of line, time to print ascii */
627 debug_line[16] = 0;
628 printk(" | %s\n", debug_line);
631 for (; j < 8; j++) {
632 printk(" ");
633 debug_line[2 * j] = ' ';
634 debug_line[1 + (2 * j)] = ' ';
636 printk(" | %s\n", debug_line);
637 return;
640 /* Convert 16 bit Unicode pathname to wire format from string in current code
641 page. Conversion may involve remapping up the seven characters that are
642 only legal in POSIX-like OS (if they are present in the string). Path
643 names are little endian 16 bit Unicode on the wire */
645 cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
646 const struct nls_table *cp, int mapChars)
648 int i, j, charlen;
649 int len_remaining = maxlen;
650 char src_char;
651 __u16 temp;
653 if (!mapChars)
654 return cifs_strtoUCS(target, source, PATH_MAX, cp);
656 for (i = 0, j = 0; i < maxlen; j++) {
657 src_char = source[i];
658 switch (src_char) {
659 case 0:
660 target[j] = 0;
661 goto ctoUCS_out;
662 case ':':
663 target[j] = cpu_to_le16(UNI_COLON);
664 break;
665 case '*':
666 target[j] = cpu_to_le16(UNI_ASTERIK);
667 break;
668 case '?':
669 target[j] = cpu_to_le16(UNI_QUESTION);
670 break;
671 case '<':
672 target[j] = cpu_to_le16(UNI_LESSTHAN);
673 break;
674 case '>':
675 target[j] = cpu_to_le16(UNI_GRTRTHAN);
676 break;
677 case '|':
678 target[j] = cpu_to_le16(UNI_PIPE);
679 break;
680 /* BB We can not handle remapping slash until
681 all the calls to build_path_from_dentry
682 are modified, as they use slash as separator BB */
683 /* case '\\':
684 target[j] = cpu_to_le16(UNI_SLASH);
685 break;*/
686 default:
687 charlen = cp->char2uni(source+i,
688 len_remaining, &temp);
689 /* if no match, use question mark, which
690 at least in some cases servers as wild card */
691 if (charlen < 1) {
692 target[j] = cpu_to_le16(0x003f);
693 charlen = 1;
694 } else
695 target[j] = cpu_to_le16(temp);
696 len_remaining -= charlen;
697 /* character may take more than one byte in the
698 the source string, but will take exactly two
699 bytes in the target string */
700 i += charlen;
701 continue;
703 i++; /* move to next char in source string */
704 len_remaining--;
707 ctoUCS_out:
708 return i;
711 void
712 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
714 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
715 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
716 cERROR(1, "Autodisabling the use of server inode numbers on "
717 "%s. This server doesn't seem to support them "
718 "properly. Hardlinks will not be recognized on this "
719 "mount. Consider mounting with the \"noserverino\" "
720 "option to silence this message.",
721 cifs_sb_master_tcon(cifs_sb)->treeName);
725 void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
727 oplock &= 0xF;
729 if (oplock == OPLOCK_EXCLUSIVE) {
730 cinode->clientCanCacheAll = true;
731 cinode->clientCanCacheRead = true;
732 cFYI(1, "Exclusive Oplock granted on inode %p",
733 &cinode->vfs_inode);
734 } else if (oplock == OPLOCK_READ) {
735 cinode->clientCanCacheAll = false;
736 cinode->clientCanCacheRead = true;
737 cFYI(1, "Level II Oplock granted on inode %p",
738 &cinode->vfs_inode);
739 } else {
740 cinode->clientCanCacheAll = false;
741 cinode->clientCanCacheRead = false;