smbd: Remove struct blocking_lock_record
[Samba.git] / source3 / smbd / smb2_lock.c
blob7d983cc3456de7cd25d7a85513883f1d476dac8a
1 /*
2 Unix SMB/CIFS implementation.
3 Core SMB2 server
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "smbd/smbd.h"
24 #include "smbd/globals.h"
25 #include "../libcli/smb/smb_common.h"
26 #include "../lib/util/tevent_ntstatus.h"
27 #include "lib/dbwrap/dbwrap_watch.h"
28 #include "librpc/gen_ndr/open_files.h"
29 #include "messages.h"
31 #undef DBGC_CLASS
32 #define DBGC_CLASS DBGC_SMB2
34 struct smbd_smb2_lock_element {
35 uint64_t offset;
36 uint64_t length;
37 uint32_t flags;
40 struct smbd_smb2_lock_state {
41 struct tevent_context *ev;
42 struct smbd_smb2_request *smb2req;
43 struct smb_request *smb1req;
44 struct files_struct *fsp;
45 uint16_t lock_count;
46 struct smbd_lock_element *locks;
49 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
50 struct tevent_context *ev,
51 struct smbd_smb2_request *smb2req,
52 struct files_struct *in_fsp,
53 uint16_t in_lock_count,
54 struct smbd_smb2_lock_element *in_locks);
55 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
57 static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
58 NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
60 const uint8_t *inbody;
61 uint16_t in_lock_count;
62 uint64_t in_file_id_persistent;
63 uint64_t in_file_id_volatile;
64 struct files_struct *in_fsp;
65 struct smbd_smb2_lock_element *in_locks;
66 struct tevent_req *subreq;
67 const uint8_t *lock_buffer;
68 uint16_t l;
69 NTSTATUS status;
71 status = smbd_smb2_request_verify_sizes(req, 0x30);
72 if (!NT_STATUS_IS_OK(status)) {
73 return smbd_smb2_request_error(req, status);
75 inbody = SMBD_SMB2_IN_BODY_PTR(req);
77 in_lock_count = CVAL(inbody, 0x02);
78 /* 0x04 - 4 bytes reserved */
79 in_file_id_persistent = BVAL(inbody, 0x08);
80 in_file_id_volatile = BVAL(inbody, 0x10);
82 if (in_lock_count < 1) {
83 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
86 if (((in_lock_count - 1) * 0x18) > SMBD_SMB2_IN_DYN_LEN(req)) {
87 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
90 in_locks = talloc_array(req, struct smbd_smb2_lock_element,
91 in_lock_count);
92 if (in_locks == NULL) {
93 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
96 l = 0;
97 lock_buffer = inbody + 0x18;
99 in_locks[l].offset = BVAL(lock_buffer, 0x00);
100 in_locks[l].length = BVAL(lock_buffer, 0x08);
101 in_locks[l].flags = IVAL(lock_buffer, 0x10);
102 /* 0x14 - 4 reserved bytes */
104 status = req->session->status;
105 if (NT_STATUS_EQUAL(status, NT_STATUS_NETWORK_SESSION_EXPIRED)) {
107 * We need to catch NT_STATUS_NETWORK_SESSION_EXPIRED
108 * for lock requests only.
110 * Unlock requests still need to be processed!
112 * This means smbd_smb2_request_check_session()
113 * can't handle the difference and always
114 * allows SMB2_OP_LOCK.
116 if (in_locks[0].flags != SMB2_LOCK_FLAG_UNLOCK) {
117 return smbd_smb2_request_error(req, status);
121 lock_buffer = SMBD_SMB2_IN_DYN_PTR(req);
123 for (l=1; l < in_lock_count; l++) {
124 in_locks[l].offset = BVAL(lock_buffer, 0x00);
125 in_locks[l].length = BVAL(lock_buffer, 0x08);
126 in_locks[l].flags = IVAL(lock_buffer, 0x10);
127 /* 0x14 - 4 reserved bytes */
129 lock_buffer += 0x18;
132 in_fsp = file_fsp_smb2(req, in_file_id_persistent, in_file_id_volatile);
133 if (in_fsp == NULL) {
134 return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
137 subreq = smbd_smb2_lock_send(req, req->sconn->ev_ctx,
138 req, in_fsp,
139 in_lock_count,
140 in_locks);
141 if (subreq == NULL) {
142 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
144 tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
146 return smbd_smb2_request_pending_queue(req, subreq, 500);
149 static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
151 struct smbd_smb2_request *smb2req = tevent_req_callback_data(subreq,
152 struct smbd_smb2_request);
153 DATA_BLOB outbody;
154 NTSTATUS status;
155 NTSTATUS error; /* transport error */
157 status = smbd_smb2_lock_recv(subreq);
158 TALLOC_FREE(subreq);
159 if (!NT_STATUS_IS_OK(status)) {
160 error = smbd_smb2_request_error(smb2req, status);
161 if (!NT_STATUS_IS_OK(error)) {
162 smbd_server_connection_terminate(smb2req->xconn,
163 nt_errstr(error));
164 return;
166 return;
169 outbody = smbd_smb2_generate_outbody(smb2req, 0x04);
170 if (outbody.data == NULL) {
171 error = smbd_smb2_request_error(smb2req, NT_STATUS_NO_MEMORY);
172 if (!NT_STATUS_IS_OK(error)) {
173 smbd_server_connection_terminate(smb2req->xconn,
174 nt_errstr(error));
175 return;
177 return;
180 SSVAL(outbody.data, 0x00, 0x04); /* struct size */
181 SSVAL(outbody.data, 0x02, 0); /* reserved */
183 error = smbd_smb2_request_done(smb2req, outbody, NULL);
184 if (!NT_STATUS_IS_OK(error)) {
185 smbd_server_connection_terminate(smb2req->xconn,
186 nt_errstr(error));
187 return;
191 static void smbd_smb2_lock_retry(struct tevent_req *subreq);
192 static bool smbd_smb2_lock_cancel(struct tevent_req *req);
194 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
195 struct tevent_context *ev,
196 struct smbd_smb2_request *smb2req,
197 struct files_struct *fsp,
198 uint16_t in_lock_count,
199 struct smbd_smb2_lock_element *in_locks)
201 struct tevent_req *req;
202 struct smbd_smb2_lock_state *state;
203 bool blocking = false;
204 bool isunlock = false;
205 uint16_t i;
206 struct smbd_lock_element *locks;
207 struct share_mode_lock *lck = NULL;
208 uint16_t blocker_idx;
209 struct server_id blocking_pid = { 0 };
210 uint64_t blocking_smblctx;
211 NTSTATUS status;
213 req = tevent_req_create(mem_ctx, &state,
214 struct smbd_smb2_lock_state);
215 if (req == NULL) {
216 return NULL;
218 state->ev = ev;
219 state->fsp = fsp;
220 state->smb2req = smb2req;
221 smb2req->subreq = req; /* So we can find this when going async. */
223 state->smb1req = smbd_smb2_fake_smb_request(smb2req);
224 if (tevent_req_nomem(state->smb1req, req)) {
225 return tevent_req_post(req, ev);
228 DEBUG(10,("smbd_smb2_lock_send: %s - %s\n",
229 fsp_str_dbg(fsp), fsp_fnum_dbg(fsp)));
231 locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
232 if (locks == NULL) {
233 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
234 return tevent_req_post(req, ev);
237 switch (in_locks[0].flags) {
238 case SMB2_LOCK_FLAG_SHARED:
239 case SMB2_LOCK_FLAG_EXCLUSIVE:
240 if (in_lock_count > 1) {
241 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
242 return tevent_req_post(req, ev);
244 blocking = true;
245 break;
247 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
248 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
249 break;
251 case SMB2_LOCK_FLAG_UNLOCK:
252 /* only the first lock gives the UNLOCK bit - see
253 MS-SMB2 3.3.5.14 */
254 isunlock = true;
255 break;
257 default:
258 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
259 return tevent_req_post(req, ev);
262 if (!isunlock && (in_lock_count > 1)) {
265 * 3.3.5.14.2 says we SHOULD fail with INVALID_PARAMETER if we
266 * have more than one lock and one of those is blocking.
269 for (i=0; i<in_lock_count; i++) {
270 uint32_t flags = in_locks[i].flags;
272 if ((flags & SMB2_LOCK_FLAG_FAIL_IMMEDIATELY) == 0) {
273 tevent_req_nterror(
274 req, NT_STATUS_INVALID_PARAMETER);
275 return tevent_req_post(req, ev);
280 for (i=0; i<in_lock_count; i++) {
281 bool invalid = false;
283 switch (in_locks[i].flags) {
284 case SMB2_LOCK_FLAG_SHARED:
285 case SMB2_LOCK_FLAG_EXCLUSIVE:
286 if (isunlock) {
287 invalid = true;
288 break;
290 break;
292 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
293 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
294 if (isunlock) {
295 invalid = true;
297 break;
299 case SMB2_LOCK_FLAG_UNLOCK:
300 if (!isunlock) {
301 tevent_req_nterror(req,
302 NT_STATUS_INVALID_PARAMETER);
303 return tevent_req_post(req, ev);
305 break;
307 default:
308 if (isunlock) {
310 * If the first element was a UNLOCK
311 * we need to defer the error response
312 * to the backend, because we need to process
313 * all unlock elements before
315 invalid = true;
316 break;
318 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
319 return tevent_req_post(req, ev);
322 locks[i].smblctx = fsp->op->global->open_persistent_id;
323 locks[i].offset = in_locks[i].offset;
324 locks[i].count = in_locks[i].length;
326 if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
327 locks[i].brltype = WRITE_LOCK;
328 } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
329 locks[i].brltype = READ_LOCK;
330 } else if (invalid) {
332 * this is an invalid UNLOCK element
333 * and the backend needs to test for
334 * brltype != UNLOCK_LOCK and return
335 * NT_STATUS_INVALID_PARAMETER
337 locks[i].brltype = READ_LOCK;
338 } else {
339 locks[i].brltype = UNLOCK_LOCK;
342 DBG_DEBUG("index %"PRIu16" offset=%"PRIu64", count=%"PRIu64", "
343 "smblctx = %"PRIu64" type %d\n",
345 locks[i].offset,
346 locks[i].count,
347 locks[i].smblctx,
348 (int)locks[i].brltype);
351 state->locks = locks;
352 state->lock_count = in_lock_count;
354 if (isunlock) {
355 status = smbd_do_unlocking(
356 state->smb1req, fsp, in_lock_count, locks, WINDOWS_LOCK);
358 if (tevent_req_nterror(req, status)) {
359 return tevent_req_post(req, ev);
361 tevent_req_done(req);
362 return tevent_req_post(req, ev);
365 lck = get_existing_share_mode_lock(
366 talloc_tos(), state->fsp->file_id);
367 if (tevent_req_nomem(lck, req)) {
368 return tevent_req_post(req, ev);
371 status = smbd_do_locks_try(
372 state->smb1req->sconn->msg_ctx,
373 state->fsp,
374 WINDOWS_LOCK,
375 state->lock_count,
376 state->locks,
377 &blocker_idx,
378 &blocking_pid,
379 &blocking_smblctx);
381 if (NT_STATUS_IS_OK(status)) {
382 TALLOC_FREE(lck);
383 tevent_req_done(req);
384 return tevent_req_post(req, ev);
387 if (blocking &&
388 (NT_STATUS_EQUAL(status, NT_STATUS_LOCK_NOT_GRANTED) ||
389 NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT))) {
390 struct tevent_req *subreq;
392 DBG_DEBUG("Watching share mode lock\n");
394 subreq = dbwrap_watched_watch_send(
395 state, state->ev, lck->data->record, blocking_pid);
396 TALLOC_FREE(lck);
397 if (tevent_req_nomem(subreq, req)) {
398 return tevent_req_post(req, ev);
400 tevent_req_set_callback(subreq, smbd_smb2_lock_retry, req);
402 tevent_req_defer_callback(req, smb2req->sconn->ev_ctx);
403 aio_add_req_to_fsp(state->fsp, req);
404 tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
406 return req;
409 TALLOC_FREE(lck);
410 tevent_req_nterror(req, status);
411 return tevent_req_post(req, ev);
414 static void smbd_smb2_lock_retry(struct tevent_req *subreq)
416 struct tevent_req *req = tevent_req_callback_data(
417 subreq, struct tevent_req);
418 struct smbd_smb2_lock_state *state = tevent_req_data(
419 req, struct smbd_smb2_lock_state);
420 struct share_mode_lock *lck = NULL;
421 uint16_t blocker_idx;
422 struct server_id blocking_pid = { 0 };
423 uint64_t blocking_smblctx;
424 NTSTATUS status;
426 status = dbwrap_watched_watch_recv(subreq, NULL, NULL);
427 TALLOC_FREE(subreq);
428 if (tevent_req_nterror(req, status)) {
429 return;
432 lck = get_existing_share_mode_lock(
433 talloc_tos(), state->fsp->file_id);
434 if (tevent_req_nomem(lck, req)) {
435 return;
438 status = smbd_do_locks_try(
439 state->smb1req->sconn->msg_ctx,
440 state->fsp,
441 WINDOWS_LOCK,
442 state->lock_count,
443 state->locks,
444 &blocker_idx,
445 &blocking_pid,
446 &blocking_smblctx);
447 if (NT_STATUS_IS_OK(status)) {
448 TALLOC_FREE(lck);
449 tevent_req_done(req);
450 return;
453 subreq = dbwrap_watched_watch_send(
454 state, state->ev, lck->data->record, blocking_pid);
455 TALLOC_FREE(lck);
456 if (tevent_req_nomem(subreq, req)) {
457 return;
459 tevent_req_set_callback(subreq, smbd_smb2_lock_retry, req);
462 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
464 return tevent_req_simple_recv_ntstatus(req);
467 /****************************************************************
468 Cancel an outstanding blocking lock request.
469 *****************************************************************/
471 static bool smbd_smb2_lock_cancel(struct tevent_req *req)
473 struct smbd_smb2_request *smb2req = NULL;
474 struct smbd_smb2_lock_state *state = tevent_req_data(req,
475 struct smbd_smb2_lock_state);
476 if (!state) {
477 return false;
480 if (!state->smb2req) {
481 return false;
484 smb2req = state->smb2req;
487 * If the request is canceled because of close, logoff or tdis
488 * the status is NT_STATUS_RANGE_NOT_LOCKED instead of
489 * NT_STATUS_CANCELLED.
491 if (state->fsp->closing ||
492 !NT_STATUS_IS_OK(smb2req->session->status) ||
493 !NT_STATUS_IS_OK(smb2req->tcon->status)) {
494 tevent_req_nterror(req, NT_STATUS_RANGE_NOT_LOCKED);
495 return true;
498 tevent_req_nterror(req, NT_STATUS_CANCELLED);
499 return true;