ARM: 6771/1: vexpress: add support for multiple core tiles
[linux-2.6/kvm.git] / fs / cifs / misc.c
blob2a930a752a784faea9ee912211c3f7a88e1ce9d5
1 /*
2 * fs/cifs/misc.c
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/slab.h>
23 #include <linux/ctype.h>
24 #include <linux/mempool.h>
25 #include "cifspdu.h"
26 #include "cifsglob.h"
27 #include "cifsproto.h"
28 #include "cifs_debug.h"
29 #include "smberr.h"
30 #include "nterr.h"
31 #include "cifs_unicode.h"
33 extern mempool_t *cifs_sm_req_poolp;
34 extern mempool_t *cifs_req_poolp;
36 /* The xid serves as a useful identifier for each incoming vfs request,
37 in a similar way to the mid which is useful to track each sent smb,
38 and CurrentXid can also provide a running counter (although it
39 will eventually wrap past zero) of the total vfs operations handled
40 since the cifs fs was mounted */
42 unsigned int
43 _GetXid(void)
45 unsigned int xid;
47 spin_lock(&GlobalMid_Lock);
48 GlobalTotalActiveXid++;
50 /* keep high water mark for number of simultaneous ops in filesystem */
51 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
52 GlobalMaxActiveXid = GlobalTotalActiveXid;
53 if (GlobalTotalActiveXid > 65000)
54 cFYI(1, "warning: more than 65000 requests active");
55 xid = GlobalCurrentXid++;
56 spin_unlock(&GlobalMid_Lock);
57 return xid;
60 void
61 _FreeXid(unsigned int xid)
63 spin_lock(&GlobalMid_Lock);
64 /* if (GlobalTotalActiveXid == 0)
65 BUG(); */
66 GlobalTotalActiveXid--;
67 spin_unlock(&GlobalMid_Lock);
70 struct cifsSesInfo *
71 sesInfoAlloc(void)
73 struct cifsSesInfo *ret_buf;
75 ret_buf = kzalloc(sizeof(struct cifsSesInfo), GFP_KERNEL);
76 if (ret_buf) {
77 atomic_inc(&sesInfoAllocCount);
78 ret_buf->status = CifsNew;
79 ++ret_buf->ses_count;
80 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
81 INIT_LIST_HEAD(&ret_buf->tcon_list);
82 mutex_init(&ret_buf->session_mutex);
84 return ret_buf;
87 void
88 sesInfoFree(struct cifsSesInfo *buf_to_free)
90 if (buf_to_free == NULL) {
91 cFYI(1, "Null buffer passed to sesInfoFree");
92 return;
95 atomic_dec(&sesInfoAllocCount);
96 kfree(buf_to_free->serverOS);
97 kfree(buf_to_free->serverDomain);
98 kfree(buf_to_free->serverNOS);
99 if (buf_to_free->password) {
100 memset(buf_to_free->password, 0, strlen(buf_to_free->password));
101 kfree(buf_to_free->password);
103 kfree(buf_to_free->domainName);
104 kfree(buf_to_free);
107 struct cifsTconInfo *
108 tconInfoAlloc(void)
110 struct cifsTconInfo *ret_buf;
111 ret_buf = kzalloc(sizeof(struct cifsTconInfo), GFP_KERNEL);
112 if (ret_buf) {
113 atomic_inc(&tconInfoAllocCount);
114 ret_buf->tidStatus = CifsNew;
115 ++ret_buf->tc_count;
116 INIT_LIST_HEAD(&ret_buf->openFileList);
117 INIT_LIST_HEAD(&ret_buf->tcon_list);
118 #ifdef CONFIG_CIFS_STATS
119 spin_lock_init(&ret_buf->stat_lock);
120 #endif
122 return ret_buf;
125 void
126 tconInfoFree(struct cifsTconInfo *buf_to_free)
128 if (buf_to_free == NULL) {
129 cFYI(1, "Null buffer passed to tconInfoFree");
130 return;
132 atomic_dec(&tconInfoAllocCount);
133 kfree(buf_to_free->nativeFileSystem);
134 if (buf_to_free->password) {
135 memset(buf_to_free->password, 0, strlen(buf_to_free->password));
136 kfree(buf_to_free->password);
138 kfree(buf_to_free);
141 struct smb_hdr *
142 cifs_buf_get(void)
144 struct smb_hdr *ret_buf = NULL;
146 /* We could use negotiated size instead of max_msgsize -
147 but it may be more efficient to always alloc same size
148 albeit slightly larger than necessary and maxbuffersize
149 defaults to this and can not be bigger */
150 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
152 /* clear the first few header bytes */
153 /* for most paths, more is cleared in header_assemble */
154 if (ret_buf) {
155 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
156 atomic_inc(&bufAllocCount);
157 #ifdef CONFIG_CIFS_STATS2
158 atomic_inc(&totBufAllocCount);
159 #endif /* CONFIG_CIFS_STATS2 */
162 return ret_buf;
165 void
166 cifs_buf_release(void *buf_to_free)
168 if (buf_to_free == NULL) {
169 /* cFYI(1, "Null buffer passed to cifs_buf_release");*/
170 return;
172 mempool_free(buf_to_free, cifs_req_poolp);
174 atomic_dec(&bufAllocCount);
175 return;
178 struct smb_hdr *
179 cifs_small_buf_get(void)
181 struct smb_hdr *ret_buf = NULL;
183 /* We could use negotiated size instead of max_msgsize -
184 but it may be more efficient to always alloc same size
185 albeit slightly larger than necessary and maxbuffersize
186 defaults to this and can not be bigger */
187 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
188 if (ret_buf) {
189 /* No need to clear memory here, cleared in header assemble */
190 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
191 atomic_inc(&smBufAllocCount);
192 #ifdef CONFIG_CIFS_STATS2
193 atomic_inc(&totSmBufAllocCount);
194 #endif /* CONFIG_CIFS_STATS2 */
197 return ret_buf;
200 void
201 cifs_small_buf_release(void *buf_to_free)
204 if (buf_to_free == NULL) {
205 cFYI(1, "Null buffer passed to cifs_small_buf_release");
206 return;
208 mempool_free(buf_to_free, cifs_sm_req_poolp);
210 atomic_dec(&smBufAllocCount);
211 return;
215 Find a free multiplex id (SMB mid). Otherwise there could be
216 mid collisions which might cause problems, demultiplexing the
217 wrong response to this request. Multiplex ids could collide if
218 one of a series requests takes much longer than the others, or
219 if a very large number of long lived requests (byte range
220 locks or FindNotify requests) are pending. No more than
221 64K-1 requests can be outstanding at one time. If no
222 mids are available, return zero. A future optimization
223 could make the combination of mids and uid the key we use
224 to demultiplex on (rather than mid alone).
225 In addition to the above check, the cifs demultiplex
226 code already used the command code as a secondary
227 check of the frame and if signing is negotiated the
228 response would be discarded if the mid were the same
229 but the signature was wrong. Since the mid is not put in the
230 pending queue until later (when it is about to be dispatched)
231 we do have to limit the number of outstanding requests
232 to somewhat less than 64K-1 although it is hard to imagine
233 so many threads being in the vfs at one time.
235 __u16 GetNextMid(struct TCP_Server_Info *server)
237 __u16 mid = 0;
238 __u16 last_mid;
239 bool collision;
241 spin_lock(&GlobalMid_Lock);
242 last_mid = server->CurrentMid; /* we do not want to loop forever */
243 server->CurrentMid++;
244 /* This nested loop looks more expensive than it is.
245 In practice the list of pending requests is short,
246 fewer than 50, and the mids are likely to be unique
247 on the first pass through the loop unless some request
248 takes longer than the 64 thousand requests before it
249 (and it would also have to have been a request that
250 did not time out) */
251 while (server->CurrentMid != last_mid) {
252 struct mid_q_entry *mid_entry;
253 unsigned int num_mids;
255 collision = false;
256 if (server->CurrentMid == 0)
257 server->CurrentMid++;
259 num_mids = 0;
260 list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
261 ++num_mids;
262 if (mid_entry->mid == server->CurrentMid &&
263 mid_entry->midState == MID_REQUEST_SUBMITTED) {
264 /* This mid is in use, try a different one */
265 collision = true;
266 break;
271 * if we have more than 32k mids in the list, then something
272 * is very wrong. Possibly a local user is trying to DoS the
273 * box by issuing long-running calls and SIGKILL'ing them. If
274 * we get to 2^16 mids then we're in big trouble as this
275 * function could loop forever.
277 * Go ahead and assign out the mid in this situation, but force
278 * an eventual reconnect to clean out the pending_mid_q.
280 if (num_mids > 32768)
281 server->tcpStatus = CifsNeedReconnect;
283 if (!collision) {
284 mid = server->CurrentMid;
285 break;
287 server->CurrentMid++;
289 spin_unlock(&GlobalMid_Lock);
290 return mid;
293 /* NB: MID can not be set if treeCon not passed in, in that
294 case it is responsbility of caller to set the mid */
295 void
296 header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
297 const struct cifsTconInfo *treeCon, int word_count
298 /* length of fixed section (word count) in two byte units */)
300 struct list_head *temp_item;
301 struct cifsSesInfo *ses;
302 char *temp = (char *) buffer;
304 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
306 buffer->smb_buf_length =
307 (2 * word_count) + sizeof(struct smb_hdr) -
308 4 /* RFC 1001 length field does not count */ +
309 2 /* for bcc field itself */ ;
310 /* Note that this is the only network field that has to be converted
311 to big endian and it is done just before we send it */
313 buffer->Protocol[0] = 0xFF;
314 buffer->Protocol[1] = 'S';
315 buffer->Protocol[2] = 'M';
316 buffer->Protocol[3] = 'B';
317 buffer->Command = smb_command;
318 buffer->Flags = 0x00; /* case sensitive */
319 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
320 buffer->Pid = cpu_to_le16((__u16)current->tgid);
321 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
322 if (treeCon) {
323 buffer->Tid = treeCon->tid;
324 if (treeCon->ses) {
325 if (treeCon->ses->capabilities & CAP_UNICODE)
326 buffer->Flags2 |= SMBFLG2_UNICODE;
327 if (treeCon->ses->capabilities & CAP_STATUS32)
328 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
330 /* Uid is not converted */
331 buffer->Uid = treeCon->ses->Suid;
332 buffer->Mid = GetNextMid(treeCon->ses->server);
333 if (multiuser_mount != 0) {
334 /* For the multiuser case, there are few obvious technically */
335 /* possible mechanisms to match the local linux user (uid) */
336 /* to a valid remote smb user (smb_uid): */
337 /* 1) Query Winbind (or other local pam/nss daemon */
338 /* for userid/password/logon_domain or credential */
339 /* 2) Query Winbind for uid to sid to username mapping */
340 /* and see if we have a matching password for existing*/
341 /* session for that user perhas getting password by */
342 /* adding a new pam_cifs module that stores passwords */
343 /* so that the cifs vfs can get at that for all logged*/
344 /* on users */
345 /* 3) (Which is the mechanism we have chosen) */
346 /* Search through sessions to the same server for a */
347 /* a match on the uid that was passed in on mount */
348 /* with the current processes uid (or euid?) and use */
349 /* that smb uid. If no existing smb session for */
350 /* that uid found, use the default smb session ie */
351 /* the smb session for the volume mounted which is */
352 /* the same as would be used if the multiuser mount */
353 /* flag were disabled. */
355 /* BB Add support for establishing new tCon and SMB Session */
356 /* with userid/password pairs found on the smb session */
357 /* for other target tcp/ip addresses BB */
358 if (current_fsuid() != treeCon->ses->linux_uid) {
359 cFYI(1, "Multiuser mode and UID "
360 "did not match tcon uid");
361 spin_lock(&cifs_tcp_ses_lock);
362 list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
363 ses = list_entry(temp_item, struct cifsSesInfo, smb_ses_list);
364 if (ses->linux_uid == current_fsuid()) {
365 if (ses->server == treeCon->ses->server) {
366 cFYI(1, "found matching uid substitute right smb_uid");
367 buffer->Uid = ses->Suid;
368 break;
369 } else {
370 /* BB eventually call cifs_setup_session here */
371 cFYI(1, "local UID found but no smb sess with this server exists");
375 spin_unlock(&cifs_tcp_ses_lock);
379 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
380 buffer->Flags2 |= SMBFLG2_DFS;
381 if (treeCon->nocase)
382 buffer->Flags |= SMBFLG_CASELESS;
383 if ((treeCon->ses) && (treeCon->ses->server))
384 if (treeCon->ses->server->secMode &
385 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
386 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
389 /* endian conversion of flags is now done just before sending */
390 buffer->WordCount = (char) word_count;
391 return;
394 static int
395 check_smb_hdr(struct smb_hdr *smb, __u16 mid)
397 /* does it have the right SMB "signature" ? */
398 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
399 cERROR(1, "Bad protocol string signature header 0x%x",
400 *(unsigned int *)smb->Protocol);
401 return 1;
404 /* Make sure that message ids match */
405 if (mid != smb->Mid) {
406 cERROR(1, "Mids do not match. received=%u expected=%u",
407 smb->Mid, mid);
408 return 1;
411 /* if it's a response then accept */
412 if (smb->Flags & SMBFLG_RESPONSE)
413 return 0;
415 /* only one valid case where server sends us request */
416 if (smb->Command == SMB_COM_LOCKING_ANDX)
417 return 0;
419 cERROR(1, "Server sent request, not response. mid=%u", smb->Mid);
420 return 1;
424 checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
426 __u32 len = smb->smb_buf_length;
427 __u32 clc_len; /* calculated length */
428 cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len);
430 if (length < 2 + sizeof(struct smb_hdr)) {
431 if ((length >= sizeof(struct smb_hdr) - 1)
432 && (smb->Status.CifsError != 0)) {
433 smb->WordCount = 0;
434 /* some error cases do not return wct and bcc */
435 return 0;
436 } else if ((length == sizeof(struct smb_hdr) + 1) &&
437 (smb->WordCount == 0)) {
438 char *tmp = (char *)smb;
439 /* Need to work around a bug in two servers here */
440 /* First, check if the part of bcc they sent was zero */
441 if (tmp[sizeof(struct smb_hdr)] == 0) {
442 /* some servers return only half of bcc
443 * on simple responses (wct, bcc both zero)
444 * in particular have seen this on
445 * ulogoffX and FindClose. This leaves
446 * one byte of bcc potentially unitialized
448 /* zero rest of bcc */
449 tmp[sizeof(struct smb_hdr)+1] = 0;
450 return 0;
452 cERROR(1, "rcvd invalid byte count (bcc)");
453 } else {
454 cERROR(1, "Length less than smb header size");
456 return 1;
458 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
459 cERROR(1, "smb length greater than MaxBufSize, mid=%d",
460 smb->Mid);
461 return 1;
464 if (check_smb_hdr(smb, mid))
465 return 1;
466 clc_len = smbCalcSize_LE(smb);
468 if (4 + len != length) {
469 cERROR(1, "Length read does not match RFC1001 length %d",
470 len);
471 return 1;
474 if (4 + len != clc_len) {
475 /* check if bcc wrapped around for large read responses */
476 if ((len > 64 * 1024) && (len > clc_len)) {
477 /* check if lengths match mod 64K */
478 if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
479 return 0; /* bcc wrapped */
481 cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u",
482 clc_len, 4 + len, smb->Mid);
484 if (4 + len < clc_len) {
485 cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u",
486 len, smb->Mid);
487 return 1;
488 } else if (len > clc_len + 512) {
490 * Some servers (Windows XP in particular) send more
491 * data than the lengths in the SMB packet would
492 * indicate on certain calls (byte range locks and
493 * trans2 find first calls in particular). While the
494 * client can handle such a frame by ignoring the
495 * trailing data, we choose limit the amount of extra
496 * data to 512 bytes.
498 cERROR(1, "RFC1001 size %u more than 512 bytes larger "
499 "than SMB for mid=%u", len, smb->Mid);
500 return 1;
503 return 0;
506 bool
507 is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
509 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
510 struct list_head *tmp, *tmp1, *tmp2;
511 struct cifsSesInfo *ses;
512 struct cifsTconInfo *tcon;
513 struct cifsInodeInfo *pCifsInode;
514 struct cifsFileInfo *netfile;
516 cFYI(1, "Checking for oplock break or dnotify response");
517 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
518 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
519 struct smb_com_transaction_change_notify_rsp *pSMBr =
520 (struct smb_com_transaction_change_notify_rsp *)buf;
521 struct file_notify_information *pnotify;
522 __u32 data_offset = 0;
523 if (pSMBr->ByteCount > sizeof(struct file_notify_information)) {
524 data_offset = le32_to_cpu(pSMBr->DataOffset);
526 pnotify = (struct file_notify_information *)
527 ((char *)&pSMBr->hdr.Protocol + data_offset);
528 cFYI(1, "dnotify on %s Action: 0x%x",
529 pnotify->FileName, pnotify->Action);
530 /* cifs_dump_mem("Rcvd notify Data: ",buf,
531 sizeof(struct smb_hdr)+60); */
532 return true;
534 if (pSMBr->hdr.Status.CifsError) {
535 cFYI(1, "notify err 0x%d",
536 pSMBr->hdr.Status.CifsError);
537 return true;
539 return false;
541 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
542 return false;
543 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
544 /* no sense logging error on invalid handle on oplock
545 break - harmless race between close request and oplock
546 break response is expected from time to time writing out
547 large dirty files cached on the client */
548 if ((NT_STATUS_INVALID_HANDLE) ==
549 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
550 cFYI(1, "invalid handle on oplock break");
551 return true;
552 } else if (ERRbadfid ==
553 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
554 return true;
555 } else {
556 return false; /* on valid oplock brk we get "request" */
559 if (pSMB->hdr.WordCount != 8)
560 return false;
562 cFYI(1, "oplock type 0x%d level 0x%d",
563 pSMB->LockType, pSMB->OplockLevel);
564 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
565 return false;
567 /* look up tcon based on tid & uid */
568 spin_lock(&cifs_tcp_ses_lock);
569 list_for_each(tmp, &srv->smb_ses_list) {
570 ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
571 list_for_each(tmp1, &ses->tcon_list) {
572 tcon = list_entry(tmp1, struct cifsTconInfo, tcon_list);
573 if (tcon->tid != buf->Tid)
574 continue;
576 cifs_stats_inc(&tcon->num_oplock_brks);
577 spin_lock(&cifs_file_list_lock);
578 list_for_each(tmp2, &tcon->openFileList) {
579 netfile = list_entry(tmp2, struct cifsFileInfo,
580 tlist);
581 if (pSMB->Fid != netfile->netfid)
582 continue;
584 cFYI(1, "file id match, oplock break");
585 pCifsInode = CIFS_I(netfile->dentry->d_inode);
587 cifs_set_oplock_level(pCifsInode,
588 pSMB->OplockLevel ? OPLOCK_READ : 0);
590 * cifs_oplock_break_put() can't be called
591 * from here. Get reference after queueing
592 * succeeded. cifs_oplock_break() will
593 * synchronize using cifs_file_list_lock.
595 if (queue_work(system_nrt_wq,
596 &netfile->oplock_break))
597 cifs_oplock_break_get(netfile);
598 netfile->oplock_break_cancelled = false;
600 spin_unlock(&cifs_file_list_lock);
601 spin_unlock(&cifs_tcp_ses_lock);
602 return true;
604 spin_unlock(&cifs_file_list_lock);
605 spin_unlock(&cifs_tcp_ses_lock);
606 cFYI(1, "No matching file for oplock break");
607 return true;
610 spin_unlock(&cifs_tcp_ses_lock);
611 cFYI(1, "Can not process oplock break for non-existent connection");
612 return true;
615 void
616 dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
618 int i, j;
619 char debug_line[17];
620 unsigned char *buffer;
622 if (traceSMB == 0)
623 return;
625 buffer = (unsigned char *) smb_buf;
626 for (i = 0, j = 0; i < smb_buf_length; i++, j++) {
627 if (i % 8 == 0) {
628 /* have reached the beginning of line */
629 printk(KERN_DEBUG "| ");
630 j = 0;
632 printk("%0#4x ", buffer[i]);
633 debug_line[2 * j] = ' ';
634 if (isprint(buffer[i]))
635 debug_line[1 + (2 * j)] = buffer[i];
636 else
637 debug_line[1 + (2 * j)] = '_';
639 if (i % 8 == 7) {
640 /* reached end of line, time to print ascii */
641 debug_line[16] = 0;
642 printk(" | %s\n", debug_line);
645 for (; j < 8; j++) {
646 printk(" ");
647 debug_line[2 * j] = ' ';
648 debug_line[1 + (2 * j)] = ' ';
650 printk(" | %s\n", debug_line);
651 return;
654 void
655 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
657 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
658 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
659 cERROR(1, "Autodisabling the use of server inode numbers on "
660 "%s. This server doesn't seem to support them "
661 "properly. Hardlinks will not be recognized on this "
662 "mount. Consider mounting with the \"noserverino\" "
663 "option to silence this message.",
664 cifs_sb_master_tcon(cifs_sb)->treeName);
668 void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
670 oplock &= 0xF;
672 if (oplock == OPLOCK_EXCLUSIVE) {
673 cinode->clientCanCacheAll = true;
674 cinode->clientCanCacheRead = true;
675 cFYI(1, "Exclusive Oplock granted on inode %p",
676 &cinode->vfs_inode);
677 } else if (oplock == OPLOCK_READ) {
678 cinode->clientCanCacheAll = false;
679 cinode->clientCanCacheRead = true;
680 cFYI(1, "Level II Oplock granted on inode %p",
681 &cinode->vfs_inode);
682 } else {
683 cinode->clientCanCacheAll = false;
684 cinode->clientCanCacheRead = false;