5045 use atomic_{inc,dec}_* instead of atomic_add_*
[illumos-gate.git] / usr / src / uts / common / io / comstar / lu / stmf_sbd / sbd.c
blob0a25fdac4b7f0559c95569dec611cbf283e35bfc
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2013 by Delphix. All rights reserved.
28 #include <sys/conf.h>
29 #include <sys/file.h>
30 #include <sys/ddi.h>
31 #include <sys/sunddi.h>
32 #include <sys/modctl.h>
33 #include <sys/scsi/scsi.h>
34 #include <sys/scsi/impl/scsi_reset_notify.h>
35 #include <sys/disp.h>
36 #include <sys/byteorder.h>
37 #include <sys/pathname.h>
38 #include <sys/atomic.h>
39 #include <sys/nvpair.h>
40 #include <sys/fs/zfs.h>
41 #include <sys/sdt.h>
42 #include <sys/dkio.h>
43 #include <sys/zfs_ioctl.h>
45 #include <sys/stmf.h>
46 #include <sys/lpif.h>
47 #include <sys/stmf_ioctl.h>
48 #include <sys/stmf_sbd_ioctl.h>
50 #include "stmf_sbd.h"
51 #include "sbd_impl.h"
53 #define SBD_IS_ZVOL(zvol) (strncmp("/dev/zvol", zvol, 9))
55 extern sbd_status_t sbd_pgr_meta_init(sbd_lu_t *sl);
56 extern sbd_status_t sbd_pgr_meta_load(sbd_lu_t *sl);
57 extern void sbd_pgr_reset(sbd_lu_t *sl);
59 static int sbd_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
60 void **result);
61 static int sbd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
62 static int sbd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
63 static int sbd_open(dev_t *devp, int flag, int otype, cred_t *credp);
64 static int sbd_close(dev_t dev, int flag, int otype, cred_t *credp);
65 static int stmf_sbd_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
66 cred_t *credp, int *rval);
67 void sbd_lp_cb(stmf_lu_provider_t *lp, int cmd, void *arg, uint32_t flags);
68 stmf_status_t sbd_proxy_reg_lu(uint8_t *luid, void *proxy_reg_arg,
69 uint32_t proxy_reg_arg_len);
70 stmf_status_t sbd_proxy_dereg_lu(uint8_t *luid, void *proxy_reg_arg,
71 uint32_t proxy_reg_arg_len);
72 stmf_status_t sbd_proxy_msg(uint8_t *luid, void *proxy_arg,
73 uint32_t proxy_arg_len, uint32_t type);
74 int sbd_create_register_lu(sbd_create_and_reg_lu_t *slu, int struct_sz,
75 uint32_t *err_ret);
76 int sbd_create_standby_lu(sbd_create_standby_lu_t *slu, uint32_t *err_ret);
77 int sbd_set_lu_standby(sbd_set_lu_standby_t *stlu, uint32_t *err_ret);
78 int sbd_import_lu(sbd_import_lu_t *ilu, int struct_sz, uint32_t *err_ret,
79 int no_register, sbd_lu_t **slr);
80 int sbd_import_active_lu(sbd_import_lu_t *ilu, sbd_lu_t *sl, uint32_t *err_ret);
81 int sbd_delete_lu(sbd_delete_lu_t *dlu, int struct_sz, uint32_t *err_ret);
82 int sbd_modify_lu(sbd_modify_lu_t *mlu, int struct_sz, uint32_t *err_ret);
83 int sbd_set_global_props(sbd_global_props_t *mlu, int struct_sz,
84 uint32_t *err_ret);
85 int sbd_get_global_props(sbd_global_props_t *oslp, uint32_t oslp_sz,
86 uint32_t *err_ret);
87 int sbd_get_lu_props(sbd_lu_props_t *islp, uint32_t islp_sz,
88 sbd_lu_props_t *oslp, uint32_t oslp_sz, uint32_t *err_ret);
89 static char *sbd_get_zvol_name(sbd_lu_t *);
90 static int sbd_get_unmap_props(sbd_unmap_props_t *sup, sbd_unmap_props_t *osup,
91 uint32_t *err_ret);
92 sbd_status_t sbd_create_zfs_meta_object(sbd_lu_t *sl);
93 sbd_status_t sbd_open_zfs_meta(sbd_lu_t *sl);
94 sbd_status_t sbd_read_zfs_meta(sbd_lu_t *sl, uint8_t *buf, uint64_t sz,
95 uint64_t off);
96 sbd_status_t sbd_write_zfs_meta(sbd_lu_t *sl, uint8_t *buf, uint64_t sz,
97 uint64_t off);
98 sbd_status_t sbd_update_zfs_prop(sbd_lu_t *sl);
99 int sbd_is_zvol(char *path);
100 int sbd_zvolget(char *zvol_name, char **comstarprop);
101 int sbd_zvolset(char *zvol_name, char *comstarprop);
102 char sbd_ctoi(char c);
103 void sbd_close_lu(sbd_lu_t *sl);
105 static ldi_ident_t sbd_zfs_ident;
106 static stmf_lu_provider_t *sbd_lp;
107 static sbd_lu_t *sbd_lu_list = NULL;
108 static kmutex_t sbd_lock;
109 static dev_info_t *sbd_dip;
110 static uint32_t sbd_lu_count = 0;
112 /* Global property settings for the logical unit */
113 char sbd_vendor_id[] = "SUN ";
114 char sbd_product_id[] = "COMSTAR ";
115 char sbd_revision[] = "1.0 ";
116 char *sbd_mgmt_url = NULL;
117 uint16_t sbd_mgmt_url_alloc_size = 0;
118 krwlock_t sbd_global_prop_lock;
120 static char sbd_name[] = "sbd";
122 static struct cb_ops sbd_cb_ops = {
123 sbd_open, /* open */
124 sbd_close, /* close */
125 nodev, /* strategy */
126 nodev, /* print */
127 nodev, /* dump */
128 nodev, /* read */
129 nodev, /* write */
130 stmf_sbd_ioctl, /* ioctl */
131 nodev, /* devmap */
132 nodev, /* mmap */
133 nodev, /* segmap */
134 nochpoll, /* chpoll */
135 ddi_prop_op, /* cb_prop_op */
136 0, /* streamtab */
137 D_NEW | D_MP, /* cb_flag */
138 CB_REV, /* rev */
139 nodev, /* aread */
140 nodev /* awrite */
143 static struct dev_ops sbd_ops = {
144 DEVO_REV,
146 sbd_getinfo,
147 nulldev, /* identify */
148 nulldev, /* probe */
149 sbd_attach,
150 sbd_detach,
151 nodev, /* reset */
152 &sbd_cb_ops,
153 NULL, /* bus_ops */
154 NULL /* power */
157 #define SBD_NAME "COMSTAR SBD"
159 static struct modldrv modldrv = {
160 &mod_driverops,
161 SBD_NAME,
162 &sbd_ops
165 static struct modlinkage modlinkage = {
166 MODREV_1,
167 &modldrv,
168 NULL
172 _init(void)
174 int ret;
176 ret = mod_install(&modlinkage);
177 if (ret)
178 return (ret);
179 sbd_lp = (stmf_lu_provider_t *)stmf_alloc(STMF_STRUCT_LU_PROVIDER,
180 0, 0);
181 sbd_lp->lp_lpif_rev = LPIF_REV_2;
182 sbd_lp->lp_instance = 0;
183 sbd_lp->lp_name = sbd_name;
184 sbd_lp->lp_cb = sbd_lp_cb;
185 sbd_lp->lp_alua_support = 1;
186 sbd_lp->lp_proxy_msg = sbd_proxy_msg;
187 sbd_zfs_ident = ldi_ident_from_anon();
189 if (stmf_register_lu_provider(sbd_lp) != STMF_SUCCESS) {
190 (void) mod_remove(&modlinkage);
191 stmf_free(sbd_lp);
192 return (EINVAL);
194 mutex_init(&sbd_lock, NULL, MUTEX_DRIVER, NULL);
195 rw_init(&sbd_global_prop_lock, NULL, RW_DRIVER, NULL);
196 return (0);
200 _fini(void)
202 int ret;
205 * If we have registered lus, then make sure they are all offline
206 * if so then deregister them. This should drop the sbd_lu_count
207 * to zero.
209 if (sbd_lu_count) {
210 sbd_lu_t *slu;
212 /* See if all of them are offline */
213 mutex_enter(&sbd_lock);
214 for (slu = sbd_lu_list; slu != NULL; slu = slu->sl_next) {
215 if ((slu->sl_state != STMF_STATE_OFFLINE) ||
216 slu->sl_state_not_acked) {
217 mutex_exit(&sbd_lock);
218 return (EBUSY);
221 mutex_exit(&sbd_lock);
223 #if 0
224 /* ok start deregistering them */
225 while (sbd_lu_list) {
226 sbd_store_t *sst = sbd_lu_list->sl_sst;
227 if (sst->sst_deregister_lu(sst) != STMF_SUCCESS)
228 return (EBUSY);
230 #endif
231 return (EBUSY);
233 if (stmf_deregister_lu_provider(sbd_lp) != STMF_SUCCESS)
234 return (EBUSY);
235 ret = mod_remove(&modlinkage);
236 if (ret != 0) {
237 (void) stmf_register_lu_provider(sbd_lp);
238 return (ret);
240 stmf_free(sbd_lp);
241 mutex_destroy(&sbd_lock);
242 rw_destroy(&sbd_global_prop_lock);
243 ldi_ident_release(sbd_zfs_ident);
244 return (0);
248 _info(struct modinfo *modinfop)
250 return (mod_info(&modlinkage, modinfop));
253 /* ARGSUSED */
254 static int
255 sbd_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
257 switch (cmd) {
258 case DDI_INFO_DEVT2DEVINFO:
259 *result = sbd_dip;
260 break;
261 case DDI_INFO_DEVT2INSTANCE:
262 *result = (void *)(uintptr_t)ddi_get_instance(sbd_dip);
263 break;
264 default:
265 return (DDI_FAILURE);
268 return (DDI_SUCCESS);
271 static int
272 sbd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
274 switch (cmd) {
275 case DDI_ATTACH:
276 sbd_dip = dip;
278 if (ddi_create_minor_node(dip, "admin", S_IFCHR, 0,
279 DDI_NT_STMF_LP, 0) != DDI_SUCCESS) {
280 break;
282 ddi_report_dev(dip);
283 return (DDI_SUCCESS);
286 return (DDI_FAILURE);
289 static int
290 sbd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
292 switch (cmd) {
293 case DDI_DETACH:
294 ddi_remove_minor_node(dip, 0);
295 return (DDI_SUCCESS);
298 return (DDI_FAILURE);
301 /* ARGSUSED */
302 static int
303 sbd_open(dev_t *devp, int flag, int otype, cred_t *credp)
305 if (otype != OTYP_CHR)
306 return (EINVAL);
307 return (0);
310 /* ARGSUSED */
311 static int
312 sbd_close(dev_t dev, int flag, int otype, cred_t *credp)
314 return (0);
317 /* ARGSUSED */
318 static int
319 stmf_sbd_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
320 cred_t *credp, int *rval)
322 stmf_iocdata_t *iocd;
323 void *ibuf = NULL;
324 void *obuf = NULL;
325 sbd_lu_t *nsl;
326 int i;
327 int ret;
329 if (drv_priv(credp) != 0) {
330 return (EPERM);
333 ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
334 if (ret)
335 return (ret);
336 iocd->stmf_error = 0;
338 switch (cmd) {
339 case SBD_IOCTL_CREATE_AND_REGISTER_LU:
340 if (iocd->stmf_ibuf_size <
341 (sizeof (sbd_create_and_reg_lu_t) - 8)) {
342 ret = EFAULT;
343 break;
345 if ((iocd->stmf_obuf_size == 0) ||
346 (iocd->stmf_obuf_size > iocd->stmf_ibuf_size)) {
347 ret = EINVAL;
348 break;
350 ret = sbd_create_register_lu((sbd_create_and_reg_lu_t *)
351 ibuf, iocd->stmf_ibuf_size, &iocd->stmf_error);
352 bcopy(ibuf, obuf, iocd->stmf_obuf_size);
353 break;
354 case SBD_IOCTL_SET_LU_STANDBY:
355 if (iocd->stmf_ibuf_size < sizeof (sbd_set_lu_standby_t)) {
356 ret = EFAULT;
357 break;
359 if (iocd->stmf_obuf_size) {
360 ret = EINVAL;
361 break;
363 ret = sbd_set_lu_standby((sbd_set_lu_standby_t *)ibuf,
364 &iocd->stmf_error);
365 break;
366 case SBD_IOCTL_IMPORT_LU:
367 if (iocd->stmf_ibuf_size <
368 (sizeof (sbd_import_lu_t) - 8)) {
369 ret = EFAULT;
370 break;
372 if ((iocd->stmf_obuf_size == 0) ||
373 (iocd->stmf_obuf_size > iocd->stmf_ibuf_size)) {
374 ret = EINVAL;
375 break;
377 ret = sbd_import_lu((sbd_import_lu_t *)ibuf,
378 iocd->stmf_ibuf_size, &iocd->stmf_error, 0, NULL);
379 bcopy(ibuf, obuf, iocd->stmf_obuf_size);
380 break;
381 case SBD_IOCTL_DELETE_LU:
382 if (iocd->stmf_ibuf_size < (sizeof (sbd_delete_lu_t) - 8)) {
383 ret = EFAULT;
384 break;
386 if (iocd->stmf_obuf_size) {
387 ret = EINVAL;
388 break;
390 ret = sbd_delete_lu((sbd_delete_lu_t *)ibuf,
391 iocd->stmf_ibuf_size, &iocd->stmf_error);
392 break;
393 case SBD_IOCTL_MODIFY_LU:
394 if (iocd->stmf_ibuf_size < (sizeof (sbd_modify_lu_t) - 8)) {
395 ret = EFAULT;
396 break;
398 if (iocd->stmf_obuf_size) {
399 ret = EINVAL;
400 break;
402 ret = sbd_modify_lu((sbd_modify_lu_t *)ibuf,
403 iocd->stmf_ibuf_size, &iocd->stmf_error);
404 break;
405 case SBD_IOCTL_SET_GLOBAL_LU:
406 if (iocd->stmf_ibuf_size < (sizeof (sbd_global_props_t) - 8)) {
407 ret = EFAULT;
408 break;
410 if (iocd->stmf_obuf_size) {
411 ret = EINVAL;
412 break;
414 ret = sbd_set_global_props((sbd_global_props_t *)ibuf,
415 iocd->stmf_ibuf_size, &iocd->stmf_error);
416 break;
417 case SBD_IOCTL_GET_GLOBAL_LU:
418 if (iocd->stmf_ibuf_size) {
419 ret = EINVAL;
420 break;
422 if (iocd->stmf_obuf_size < sizeof (sbd_global_props_t)) {
423 ret = EINVAL;
424 break;
426 ret = sbd_get_global_props((sbd_global_props_t *)obuf,
427 iocd->stmf_obuf_size, &iocd->stmf_error);
428 break;
429 case SBD_IOCTL_GET_LU_PROPS:
430 if (iocd->stmf_ibuf_size < (sizeof (sbd_lu_props_t) - 8)) {
431 ret = EFAULT;
432 break;
434 if (iocd->stmf_obuf_size < sizeof (sbd_lu_props_t)) {
435 ret = EINVAL;
436 break;
438 ret = sbd_get_lu_props((sbd_lu_props_t *)ibuf,
439 iocd->stmf_ibuf_size, (sbd_lu_props_t *)obuf,
440 iocd->stmf_obuf_size, &iocd->stmf_error);
441 break;
442 case SBD_IOCTL_GET_LU_LIST:
443 mutex_enter(&sbd_lock);
444 iocd->stmf_obuf_max_nentries = sbd_lu_count;
445 iocd->stmf_obuf_nentries = min((iocd->stmf_obuf_size >> 4),
446 sbd_lu_count);
447 for (nsl = sbd_lu_list, i = 0; nsl &&
448 (i < iocd->stmf_obuf_nentries); i++, nsl = nsl->sl_next) {
449 bcopy(nsl->sl_device_id + 4,
450 &(((uint8_t *)obuf)[i << 4]), 16);
452 mutex_exit(&sbd_lock);
453 ret = 0;
454 iocd->stmf_error = 0;
455 break;
456 case SBD_IOCTL_GET_UNMAP_PROPS:
457 if (iocd->stmf_ibuf_size < sizeof (sbd_unmap_props_t)) {
458 ret = EFAULT;
459 break;
461 if (iocd->stmf_obuf_size < sizeof (sbd_unmap_props_t)) {
462 ret = EINVAL;
463 break;
465 ret = sbd_get_unmap_props((sbd_unmap_props_t *)ibuf,
466 (sbd_unmap_props_t *)obuf, &iocd->stmf_error);
467 break;
468 default:
469 ret = ENOTTY;
472 if (ret == 0) {
473 ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
474 } else if (iocd->stmf_error) {
475 (void) stmf_copyout_iocdata(data, mode, iocd, obuf);
477 if (obuf) {
478 kmem_free(obuf, iocd->stmf_obuf_size);
479 obuf = NULL;
481 if (ibuf) {
482 kmem_free(ibuf, iocd->stmf_ibuf_size);
483 ibuf = NULL;
485 kmem_free(iocd, sizeof (stmf_iocdata_t));
486 return (ret);
489 /* ARGSUSED */
490 void
491 sbd_lp_cb(stmf_lu_provider_t *lp, int cmd, void *arg, uint32_t flags)
493 nvpair_t *np;
494 char *s;
495 sbd_import_lu_t *ilu;
496 uint32_t ilu_sz;
497 uint32_t struct_sz;
498 uint32_t err_ret;
499 int iret;
501 if ((cmd != STMF_PROVIDER_DATA_UPDATED) || (arg == NULL)) {
502 return;
505 if ((flags & (STMF_PCB_STMF_ONLINING | STMF_PCB_PREG_COMPLETE)) == 0) {
506 return;
509 np = NULL;
510 ilu_sz = 1024;
511 ilu = (sbd_import_lu_t *)kmem_zalloc(ilu_sz, KM_SLEEP);
512 while ((np = nvlist_next_nvpair((nvlist_t *)arg, np)) != NULL) {
513 if (nvpair_type(np) != DATA_TYPE_STRING) {
514 continue;
516 if (nvpair_value_string(np, &s) != 0) {
517 continue;
519 struct_sz = max(8, strlen(s) + 1);
520 struct_sz += sizeof (sbd_import_lu_t) - 8;
521 if (struct_sz > ilu_sz) {
522 kmem_free(ilu, ilu_sz);
523 ilu_sz = struct_sz + 32;
524 ilu = (sbd_import_lu_t *)kmem_zalloc(ilu_sz, KM_SLEEP);
526 ilu->ilu_struct_size = struct_sz;
527 (void) strcpy(ilu->ilu_meta_fname, s);
528 iret = sbd_import_lu(ilu, struct_sz, &err_ret, 0, NULL);
529 if (iret) {
530 stmf_trace(0, "sbd_lp_cb: import_lu failed, ret = %d, "
531 "err_ret = %d", iret, err_ret);
532 } else {
533 stmf_trace(0, "Imported the LU %s", nvpair_name(np));
537 if (ilu) {
538 kmem_free(ilu, ilu_sz);
539 ilu = NULL;
543 sbd_status_t
544 sbd_link_lu(sbd_lu_t *sl)
546 sbd_lu_t *nsl;
548 mutex_enter(&sbd_lock);
549 mutex_enter(&sl->sl_lock);
550 ASSERT(sl->sl_trans_op != SL_OP_NONE);
552 if (sl->sl_flags & SL_LINKED) {
553 mutex_exit(&sbd_lock);
554 mutex_exit(&sl->sl_lock);
555 return (SBD_ALREADY);
557 for (nsl = sbd_lu_list; nsl; nsl = nsl->sl_next) {
558 if (strcmp(nsl->sl_name, sl->sl_name) == 0)
559 break;
561 if (nsl) {
562 mutex_exit(&sbd_lock);
563 mutex_exit(&sl->sl_lock);
564 return (SBD_ALREADY);
566 sl->sl_next = sbd_lu_list;
567 sbd_lu_list = sl;
568 sl->sl_flags |= SL_LINKED;
569 mutex_exit(&sbd_lock);
570 mutex_exit(&sl->sl_lock);
571 return (SBD_SUCCESS);
574 void
575 sbd_unlink_lu(sbd_lu_t *sl)
577 sbd_lu_t **ppnsl;
579 mutex_enter(&sbd_lock);
580 mutex_enter(&sl->sl_lock);
581 ASSERT(sl->sl_trans_op != SL_OP_NONE);
583 ASSERT(sl->sl_flags & SL_LINKED);
584 for (ppnsl = &sbd_lu_list; *ppnsl; ppnsl = &((*ppnsl)->sl_next)) {
585 if (*ppnsl == sl)
586 break;
588 ASSERT(*ppnsl);
589 *ppnsl = (*ppnsl)->sl_next;
590 sl->sl_flags &= ~SL_LINKED;
591 mutex_exit(&sbd_lock);
592 mutex_exit(&sl->sl_lock);
595 sbd_status_t
596 sbd_find_and_lock_lu(uint8_t *guid, uint8_t *meta_name, uint8_t op,
597 sbd_lu_t **ppsl)
599 sbd_lu_t *sl;
600 int found = 0;
601 sbd_status_t sret;
603 mutex_enter(&sbd_lock);
604 for (sl = sbd_lu_list; sl; sl = sl->sl_next) {
605 if (guid) {
606 found = bcmp(sl->sl_device_id + 4, guid, 16) == 0;
607 } else {
608 found = strcmp(sl->sl_name, (char *)meta_name) == 0;
610 if (found)
611 break;
613 if (!found) {
614 mutex_exit(&sbd_lock);
615 return (SBD_NOT_FOUND);
617 mutex_enter(&sl->sl_lock);
618 if (sl->sl_trans_op == SL_OP_NONE) {
619 sl->sl_trans_op = op;
620 *ppsl = sl;
621 sret = SBD_SUCCESS;
622 } else {
623 sret = SBD_BUSY;
625 mutex_exit(&sl->sl_lock);
626 mutex_exit(&sbd_lock);
627 return (sret);
630 sbd_status_t
631 sbd_read_meta(sbd_lu_t *sl, uint64_t offset, uint64_t size, uint8_t *buf)
633 uint64_t meta_align;
634 uint64_t starting_off;
635 uint64_t data_off;
636 uint64_t ending_off;
637 uint64_t io_size;
638 uint8_t *io_buf;
639 vnode_t *vp;
640 sbd_status_t ret;
641 ssize_t resid;
642 int vret;
644 ASSERT(sl->sl_flags & SL_META_OPENED);
645 if (sl->sl_flags & SL_SHARED_META) {
646 meta_align = (((uint64_t)1) << sl->sl_data_blocksize_shift) - 1;
647 vp = sl->sl_data_vp;
648 ASSERT(vp);
649 } else {
650 meta_align = (((uint64_t)1) << sl->sl_meta_blocksize_shift) - 1;
651 if ((sl->sl_flags & SL_ZFS_META) == 0) {
652 vp = sl->sl_meta_vp;
653 ASSERT(vp);
656 starting_off = offset & ~(meta_align);
657 data_off = offset & meta_align;
658 ending_off = (offset + size + meta_align) & (~meta_align);
659 if (ending_off > sl->sl_meta_size_used) {
660 bzero(buf, size);
661 if (starting_off >= sl->sl_meta_size_used) {
662 return (SBD_SUCCESS);
664 ending_off = (sl->sl_meta_size_used + meta_align) &
665 (~meta_align);
666 if (size > (ending_off - (starting_off + data_off))) {
667 size = ending_off - (starting_off + data_off);
670 io_size = ending_off - starting_off;
671 io_buf = (uint8_t *)kmem_zalloc(io_size, KM_SLEEP);
672 ASSERT((starting_off + io_size) <= sl->sl_total_meta_size);
675 * Don't proceed if the device has been closed
676 * This can occur on an access state change to standby or
677 * a delete. The writer lock is acquired before closing the
678 * lu. If importing, reading the metadata is valid, hence
679 * the check on SL_OP_IMPORT_LU.
681 rw_enter(&sl->sl_access_state_lock, RW_READER);
682 if ((sl->sl_flags & SL_MEDIA_LOADED) == 0 &&
683 sl->sl_trans_op != SL_OP_IMPORT_LU) {
684 rw_exit(&sl->sl_access_state_lock);
685 ret = SBD_FILEIO_FAILURE;
686 goto sbd_read_meta_failure;
688 if (sl->sl_flags & SL_ZFS_META) {
689 if ((ret = sbd_read_zfs_meta(sl, io_buf, io_size,
690 starting_off)) != SBD_SUCCESS) {
691 rw_exit(&sl->sl_access_state_lock);
692 goto sbd_read_meta_failure;
694 } else {
695 vret = vn_rdwr(UIO_READ, vp, (caddr_t)io_buf, (ssize_t)io_size,
696 (offset_t)starting_off, UIO_SYSSPACE, FRSYNC,
697 RLIM64_INFINITY, CRED(), &resid);
699 if (vret || resid) {
700 ret = SBD_FILEIO_FAILURE | vret;
701 rw_exit(&sl->sl_access_state_lock);
702 goto sbd_read_meta_failure;
705 rw_exit(&sl->sl_access_state_lock);
707 bcopy(io_buf + data_off, buf, size);
708 ret = SBD_SUCCESS;
710 sbd_read_meta_failure:
711 kmem_free(io_buf, io_size);
712 return (ret);
715 sbd_status_t
716 sbd_write_meta(sbd_lu_t *sl, uint64_t offset, uint64_t size, uint8_t *buf)
718 uint64_t meta_align;
719 uint64_t starting_off;
720 uint64_t data_off;
721 uint64_t ending_off;
722 uint64_t io_size;
723 uint8_t *io_buf;
724 vnode_t *vp;
725 sbd_status_t ret;
726 ssize_t resid;
727 int vret;
729 ASSERT(sl->sl_flags & SL_META_OPENED);
730 if (sl->sl_flags & SL_SHARED_META) {
731 meta_align = (((uint64_t)1) << sl->sl_data_blocksize_shift) - 1;
732 vp = sl->sl_data_vp;
733 ASSERT(vp);
734 } else {
735 meta_align = (((uint64_t)1) << sl->sl_meta_blocksize_shift) - 1;
736 if ((sl->sl_flags & SL_ZFS_META) == 0) {
737 vp = sl->sl_meta_vp;
738 ASSERT(vp);
741 starting_off = offset & ~(meta_align);
742 data_off = offset & meta_align;
743 ending_off = (offset + size + meta_align) & (~meta_align);
744 io_size = ending_off - starting_off;
745 io_buf = (uint8_t *)kmem_zalloc(io_size, KM_SLEEP);
746 ret = sbd_read_meta(sl, starting_off, io_size, io_buf);
747 if (ret != SBD_SUCCESS) {
748 goto sbd_write_meta_failure;
750 bcopy(buf, io_buf + data_off, size);
752 * Don't proceed if the device has been closed
753 * This can occur on an access state change to standby or
754 * a delete. The writer lock is acquired before closing the
755 * lu. If importing, reading the metadata is valid, hence
756 * the check on SL_OP_IMPORT_LU.
758 rw_enter(&sl->sl_access_state_lock, RW_READER);
759 if ((sl->sl_flags & SL_MEDIA_LOADED) == 0 &&
760 sl->sl_trans_op != SL_OP_IMPORT_LU) {
761 rw_exit(&sl->sl_access_state_lock);
762 ret = SBD_FILEIO_FAILURE;
763 goto sbd_write_meta_failure;
765 if (sl->sl_flags & SL_ZFS_META) {
766 if ((ret = sbd_write_zfs_meta(sl, io_buf, io_size,
767 starting_off)) != SBD_SUCCESS) {
768 rw_exit(&sl->sl_access_state_lock);
769 goto sbd_write_meta_failure;
771 } else {
772 vret = vn_rdwr(UIO_WRITE, vp, (caddr_t)io_buf, (ssize_t)io_size,
773 (offset_t)starting_off, UIO_SYSSPACE, FDSYNC,
774 RLIM64_INFINITY, CRED(), &resid);
776 if (vret || resid) {
777 ret = SBD_FILEIO_FAILURE | vret;
778 rw_exit(&sl->sl_access_state_lock);
779 goto sbd_write_meta_failure;
782 rw_exit(&sl->sl_access_state_lock);
784 ret = SBD_SUCCESS;
786 sbd_write_meta_failure:
787 kmem_free(io_buf, io_size);
788 return (ret);
791 uint8_t
792 sbd_calc_sum(uint8_t *buf, int size)
794 uint8_t s = 0;
796 while (size > 0)
797 s += buf[--size];
799 return (s);
802 uint8_t
803 sbd_calc_section_sum(sm_section_hdr_t *sm, uint32_t sz)
805 uint8_t s, o;
807 o = sm->sms_chksum;
808 sm->sms_chksum = 0;
809 s = sbd_calc_sum((uint8_t *)sm, sz);
810 sm->sms_chksum = o;
812 return (s);
815 uint32_t
816 sbd_strlen(char *str, uint32_t maxlen)
818 uint32_t i;
820 for (i = 0; i < maxlen; i++) {
821 if (str[i] == 0)
822 return (i);
824 return (i);
827 void
828 sbd_swap_meta_start(sbd_meta_start_t *sm)
830 if (sm->sm_magic == SBD_MAGIC)
831 return;
832 sm->sm_magic = BSWAP_64(sm->sm_magic);
833 sm->sm_meta_size = BSWAP_64(sm->sm_meta_size);
834 sm->sm_meta_size_used = BSWAP_64(sm->sm_meta_size_used);
835 sm->sm_ver_major = BSWAP_16(sm->sm_ver_major);
836 sm->sm_ver_minor = BSWAP_16(sm->sm_ver_minor);
837 sm->sm_ver_subminor = BSWAP_16(sm->sm_ver_subminor);
840 void
841 sbd_swap_section_hdr(sm_section_hdr_t *sm)
843 if (sm->sms_data_order == SMS_DATA_ORDER)
844 return;
845 sm->sms_offset = BSWAP_64(sm->sms_offset);
846 sm->sms_size = BSWAP_32(sm->sms_size);
847 sm->sms_id = BSWAP_16(sm->sms_id);
848 sm->sms_chksum += SMS_DATA_ORDER - sm->sms_data_order;
849 sm->sms_data_order = SMS_DATA_ORDER;
852 void
853 sbd_swap_lu_info_1_0(sbd_lu_info_1_0_t *sli)
855 sbd_swap_section_hdr(&sli->sli_sms_header);
856 if (sli->sli_data_order == SMS_DATA_ORDER)
857 return;
858 sli->sli_sms_header.sms_chksum += SMS_DATA_ORDER - sli->sli_data_order;
859 sli->sli_data_order = SMS_DATA_ORDER;
860 sli->sli_total_store_size = BSWAP_64(sli->sli_total_store_size);
861 sli->sli_total_meta_size = BSWAP_64(sli->sli_total_meta_size);
862 sli->sli_lu_data_offset = BSWAP_64(sli->sli_lu_data_offset);
863 sli->sli_lu_data_size = BSWAP_64(sli->sli_lu_data_size);
864 sli->sli_flags = BSWAP_32(sli->sli_flags);
865 sli->sli_blocksize = BSWAP_16(sli->sli_blocksize);
868 void
869 sbd_swap_lu_info_1_1(sbd_lu_info_1_1_t *sli)
871 sbd_swap_section_hdr(&sli->sli_sms_header);
872 if (sli->sli_data_order == SMS_DATA_ORDER)
873 return;
874 sli->sli_sms_header.sms_chksum += SMS_DATA_ORDER - sli->sli_data_order;
875 sli->sli_data_order = SMS_DATA_ORDER;
876 sli->sli_flags = BSWAP_32(sli->sli_flags);
877 sli->sli_lu_size = BSWAP_64(sli->sli_lu_size);
878 sli->sli_meta_fname_offset = BSWAP_64(sli->sli_meta_fname_offset);
879 sli->sli_data_fname_offset = BSWAP_64(sli->sli_data_fname_offset);
880 sli->sli_serial_offset = BSWAP_64(sli->sli_serial_offset);
881 sli->sli_alias_offset = BSWAP_64(sli->sli_alias_offset);
882 sli->sli_mgmt_url_offset = BSWAP_64(sli->sli_mgmt_url_offset);
885 sbd_status_t
886 sbd_load_section_hdr(sbd_lu_t *sl, sm_section_hdr_t *sms)
888 sm_section_hdr_t h;
889 uint64_t st;
890 sbd_status_t ret;
892 for (st = sl->sl_meta_offset + sizeof (sbd_meta_start_t);
893 st < sl->sl_meta_size_used; st += h.sms_size) {
894 if ((ret = sbd_read_meta(sl, st, sizeof (sm_section_hdr_t),
895 (uint8_t *)&h)) != SBD_SUCCESS) {
896 return (ret);
898 if (h.sms_data_order != SMS_DATA_ORDER) {
899 sbd_swap_section_hdr(&h);
901 if ((h.sms_data_order != SMS_DATA_ORDER) ||
902 (h.sms_offset != st) || (h.sms_size < sizeof (h)) ||
903 ((st + h.sms_size) > sl->sl_meta_size_used)) {
904 return (SBD_META_CORRUPTED);
906 if (h.sms_id == sms->sms_id) {
907 bcopy(&h, sms, sizeof (h));
908 return (SBD_SUCCESS);
912 return (SBD_NOT_FOUND);
915 sbd_status_t
916 sbd_load_meta_start(sbd_lu_t *sl)
918 sbd_meta_start_t *sm;
919 sbd_status_t ret;
921 /* Fake meta params initially */
922 sl->sl_total_meta_size = (uint64_t)-1;
923 sl->sl_meta_size_used = sl->sl_meta_offset + sizeof (sbd_meta_start_t);
925 sm = kmem_zalloc(sizeof (*sm), KM_SLEEP);
926 ret = sbd_read_meta(sl, sl->sl_meta_offset, sizeof (*sm),
927 (uint8_t *)sm);
928 if (ret != SBD_SUCCESS) {
929 goto load_meta_start_failed;
932 if (sm->sm_magic != SBD_MAGIC) {
933 sbd_swap_meta_start(sm);
936 if ((sm->sm_magic != SBD_MAGIC) || (sbd_calc_sum((uint8_t *)sm,
937 sizeof (*sm) - 1) != sm->sm_chksum)) {
938 ret = SBD_META_CORRUPTED;
939 goto load_meta_start_failed;
942 if (sm->sm_ver_major != SBD_VER_MAJOR) {
943 ret = SBD_NOT_SUPPORTED;
944 goto load_meta_start_failed;
947 sl->sl_total_meta_size = sm->sm_meta_size;
948 sl->sl_meta_size_used = sm->sm_meta_size_used;
949 ret = SBD_SUCCESS;
951 load_meta_start_failed:
952 kmem_free(sm, sizeof (*sm));
953 return (ret);
956 sbd_status_t
957 sbd_write_meta_start(sbd_lu_t *sl, uint64_t meta_size, uint64_t meta_size_used)
959 sbd_meta_start_t *sm;
960 sbd_status_t ret;
962 sm = (sbd_meta_start_t *)kmem_zalloc(sizeof (sbd_meta_start_t),
963 KM_SLEEP);
965 sm->sm_magic = SBD_MAGIC;
966 sm->sm_meta_size = meta_size;
967 sm->sm_meta_size_used = meta_size_used;
968 sm->sm_ver_major = SBD_VER_MAJOR;
969 sm->sm_ver_minor = SBD_VER_MINOR;
970 sm->sm_ver_subminor = SBD_VER_SUBMINOR;
971 sm->sm_chksum = sbd_calc_sum((uint8_t *)sm, sizeof (*sm) - 1);
973 ret = sbd_write_meta(sl, sl->sl_meta_offset, sizeof (*sm),
974 (uint8_t *)sm);
975 kmem_free(sm, sizeof (*sm));
977 return (ret);
980 sbd_status_t
981 sbd_read_meta_section(sbd_lu_t *sl, sm_section_hdr_t **ppsms, uint16_t sms_id)
983 sbd_status_t ret;
984 sm_section_hdr_t sms;
985 int alloced = 0;
987 mutex_enter(&sl->sl_metadata_lock);
988 if (((*ppsms) == NULL) || ((*ppsms)->sms_offset == 0)) {
989 bzero(&sms, sizeof (sm_section_hdr_t));
990 sms.sms_id = sms_id;
991 if ((ret = sbd_load_section_hdr(sl, &sms)) != SBD_SUCCESS) {
992 mutex_exit(&sl->sl_metadata_lock);
993 return (ret);
994 } else {
995 if ((*ppsms) == NULL) {
996 *ppsms = (sm_section_hdr_t *)kmem_zalloc(
997 sms.sms_size, KM_SLEEP);
998 alloced = 1;
1000 bcopy(&sms, *ppsms, sizeof (sm_section_hdr_t));
1004 ret = sbd_read_meta(sl, (*ppsms)->sms_offset, (*ppsms)->sms_size,
1005 (uint8_t *)(*ppsms));
1006 if (ret == SBD_SUCCESS) {
1007 uint8_t s;
1008 if ((*ppsms)->sms_data_order != SMS_DATA_ORDER)
1009 sbd_swap_section_hdr(*ppsms);
1010 if ((*ppsms)->sms_id != SMS_ID_UNUSED) {
1011 s = sbd_calc_section_sum(*ppsms, (*ppsms)->sms_size);
1012 if (s != (*ppsms)->sms_chksum)
1013 ret = SBD_META_CORRUPTED;
1016 mutex_exit(&sl->sl_metadata_lock);
1018 if ((ret != SBD_SUCCESS) && alloced)
1019 kmem_free(*ppsms, sms.sms_size);
1020 return (ret);
1023 sbd_status_t
1024 sbd_load_section_hdr_unbuffered(sbd_lu_t *sl, sm_section_hdr_t *sms)
1026 sbd_status_t ret;
1029 * Bypass buffering and re-read the meta data from permanent storage.
1031 if (sl->sl_flags & SL_ZFS_META) {
1032 if ((ret = sbd_open_zfs_meta(sl)) != SBD_SUCCESS) {
1033 return (ret);
1036 /* Re-get the meta sizes into sl */
1037 if ((ret = sbd_load_meta_start(sl)) != SBD_SUCCESS) {
1038 return (ret);
1040 return (sbd_load_section_hdr(sl, sms));
1043 sbd_status_t
1044 sbd_write_meta_section(sbd_lu_t *sl, sm_section_hdr_t *sms)
1046 sm_section_hdr_t t;
1047 uint64_t off, s;
1048 uint64_t unused_start;
1049 sbd_status_t ret;
1050 sbd_status_t write_meta_ret = SBD_SUCCESS;
1051 uint8_t *cb;
1052 int meta_size_changed = 0;
1053 sm_section_hdr_t sms_before_unused = {0};
1055 mutex_enter(&sl->sl_metadata_lock);
1056 write_meta_section_again:
1057 if (sms->sms_offset) {
1059 * If the section already exists and the size is the
1060 * same as this new data then overwrite in place. If
1061 * the sizes are different then mark the existing as
1062 * unused and look for free space.
1064 ret = sbd_read_meta(sl, sms->sms_offset, sizeof (t),
1065 (uint8_t *)&t);
1066 if (ret != SBD_SUCCESS) {
1067 mutex_exit(&sl->sl_metadata_lock);
1068 return (ret);
1070 if (t.sms_data_order != SMS_DATA_ORDER) {
1071 sbd_swap_section_hdr(&t);
1073 if (t.sms_id != sms->sms_id) {
1074 mutex_exit(&sl->sl_metadata_lock);
1075 return (SBD_INVALID_ARG);
1077 if (t.sms_size == sms->sms_size) {
1078 ret = sbd_write_meta(sl, sms->sms_offset,
1079 sms->sms_size, (uint8_t *)sms);
1080 mutex_exit(&sl->sl_metadata_lock);
1081 return (ret);
1083 sms_before_unused = t;
1085 t.sms_id = SMS_ID_UNUSED;
1087 * For unused sections we only use chksum of the header. for
1088 * all other sections, the chksum is for the entire section.
1090 t.sms_chksum = sbd_calc_section_sum(&t, sizeof (t));
1091 ret = sbd_write_meta(sl, t.sms_offset, sizeof (t),
1092 (uint8_t *)&t);
1093 if (ret != SBD_SUCCESS) {
1094 mutex_exit(&sl->sl_metadata_lock);
1095 return (ret);
1097 sms->sms_offset = 0;
1098 } else {
1099 /* Section location is unknown, search for it. */
1100 t.sms_id = sms->sms_id;
1101 t.sms_data_order = SMS_DATA_ORDER;
1102 ret = sbd_load_section_hdr(sl, &t);
1103 if (ret == SBD_SUCCESS) {
1104 sms->sms_offset = t.sms_offset;
1105 sms->sms_chksum =
1106 sbd_calc_section_sum(sms, sms->sms_size);
1107 goto write_meta_section_again;
1108 } else if (ret != SBD_NOT_FOUND) {
1109 mutex_exit(&sl->sl_metadata_lock);
1110 return (ret);
1115 * At this point we know that section does not already exist.
1116 * Find space large enough to hold the section or grow meta if
1117 * possible.
1119 unused_start = 0;
1120 s = 0; /* size of space found */
1123 * Search all sections for unused space of sufficient size.
1124 * The first one found is taken. Contiguous unused sections
1125 * will be combined.
1127 for (off = sl->sl_meta_offset + sizeof (sbd_meta_start_t);
1128 off < sl->sl_meta_size_used; off += t.sms_size) {
1129 ret = sbd_read_meta(sl, off, sizeof (t), (uint8_t *)&t);
1130 if (ret != SBD_SUCCESS) {
1131 mutex_exit(&sl->sl_metadata_lock);
1132 return (ret);
1134 if (t.sms_data_order != SMS_DATA_ORDER)
1135 sbd_swap_section_hdr(&t);
1136 if (t.sms_size == 0) {
1137 mutex_exit(&sl->sl_metadata_lock);
1138 return (SBD_META_CORRUPTED);
1140 if (t.sms_id == SMS_ID_UNUSED) {
1141 if (unused_start == 0)
1142 unused_start = off;
1144 * Calculate size of the unused space, break out
1145 * if it satisfies the requirement.
1147 s = t.sms_size - unused_start + off;
1148 if ((s == sms->sms_size) || (s >= (sms->sms_size +
1149 sizeof (t)))) {
1150 break;
1151 } else {
1152 s = 0;
1154 } else {
1155 unused_start = 0;
1159 off = (unused_start == 0) ? sl->sl_meta_size_used : unused_start;
1161 * If none found, how much room is at the end?
1162 * See if the data can be expanded.
1164 if (s == 0) {
1165 s = sl->sl_total_meta_size - off;
1166 if (s >= sms->sms_size || !(sl->sl_flags & SL_SHARED_META)) {
1167 s = sms->sms_size;
1168 meta_size_changed = 1;
1169 } else {
1170 s = 0;
1174 if (s == 0) {
1175 mutex_exit(&sl->sl_metadata_lock);
1176 return (SBD_ALLOC_FAILURE);
1179 sms->sms_offset = off;
1180 sms->sms_chksum = sbd_calc_section_sum(sms, sms->sms_size);
1182 * Since we may have to write more than one section (current +
1183 * any unused), use a combined buffer.
1185 cb = kmem_zalloc(s, KM_SLEEP);
1186 bcopy(sms, cb, sms->sms_size);
1187 if (s > sms->sms_size) {
1188 t.sms_offset = off + sms->sms_size;
1189 t.sms_size = s - sms->sms_size;
1190 t.sms_id = SMS_ID_UNUSED;
1191 t.sms_data_order = SMS_DATA_ORDER;
1192 t.sms_chksum = sbd_calc_section_sum(&t, sizeof (t));
1193 bcopy(&t, cb + sms->sms_size, sizeof (t));
1196 * Two write events & statuses take place. Failure writing the
1197 * meta section takes precedence, can possibly be rolled back,
1198 * & gets reported. Else return status from writing the meta start.
1200 ret = SBD_SUCCESS; /* Set a default, it's not always loaded below. */
1201 if (meta_size_changed) {
1202 uint64_t old_meta_size;
1203 uint64_t old_sz_used = sl->sl_meta_size_used; /* save a copy */
1204 old_meta_size = sl->sl_total_meta_size; /* save a copy */
1206 write_meta_ret = sbd_write_meta(sl, off, s, cb);
1207 if (write_meta_ret == SBD_SUCCESS) {
1208 sl->sl_meta_size_used = off + s;
1209 if (sl->sl_total_meta_size < sl->sl_meta_size_used) {
1210 uint64_t meta_align =
1211 (((uint64_t)1) <<
1212 sl->sl_meta_blocksize_shift) - 1;
1213 sl->sl_total_meta_size =
1214 (sl->sl_meta_size_used + meta_align) &
1215 (~meta_align);
1217 ret = sbd_write_meta_start(sl, sl->sl_total_meta_size,
1218 sl->sl_meta_size_used);
1219 if (ret != SBD_SUCCESS) {
1220 sl->sl_meta_size_used = old_sz_used;
1221 sl->sl_total_meta_size = old_meta_size;
1223 } else {
1224 sl->sl_meta_size_used = old_sz_used;
1225 sl->sl_total_meta_size = old_meta_size;
1227 } else {
1228 write_meta_ret = sbd_write_meta(sl, off, s, cb);
1230 if ((write_meta_ret != SBD_SUCCESS) &&
1231 (sms_before_unused.sms_offset != 0)) {
1232 sm_section_hdr_t new_sms;
1233 sm_section_hdr_t *unused_sms;
1235 * On failure writing the meta section attempt to undo
1236 * the change to unused.
1237 * Re-read the meta data from permanent storage.
1238 * The section id can't exist for undo to be possible.
1239 * Read what should be the entire old section data and
1240 * insure the old data's still present by validating
1241 * against it's old checksum.
1243 new_sms.sms_id = sms->sms_id;
1244 new_sms.sms_data_order = SMS_DATA_ORDER;
1245 if (sbd_load_section_hdr_unbuffered(sl, &new_sms) !=
1246 SBD_NOT_FOUND) {
1247 goto done;
1249 unused_sms = kmem_zalloc(sms_before_unused.sms_size, KM_SLEEP);
1250 if (sbd_read_meta(sl, sms_before_unused.sms_offset,
1251 sms_before_unused.sms_size,
1252 (uint8_t *)unused_sms) != SBD_SUCCESS) {
1253 goto done;
1255 if (unused_sms->sms_data_order != SMS_DATA_ORDER) {
1256 sbd_swap_section_hdr(unused_sms);
1258 if (unused_sms->sms_id != SMS_ID_UNUSED) {
1259 goto done;
1261 if (unused_sms->sms_offset != sms_before_unused.sms_offset) {
1262 goto done;
1264 if (unused_sms->sms_size != sms_before_unused.sms_size) {
1265 goto done;
1267 unused_sms->sms_id = sms_before_unused.sms_id;
1268 if (sbd_calc_section_sum(unused_sms,
1269 sizeof (sm_section_hdr_t)) !=
1270 sbd_calc_section_sum(&sms_before_unused,
1271 sizeof (sm_section_hdr_t))) {
1272 goto done;
1274 unused_sms->sms_chksum =
1275 sbd_calc_section_sum(unused_sms, unused_sms->sms_size);
1276 if (unused_sms->sms_chksum != sms_before_unused.sms_chksum) {
1277 goto done;
1279 (void) sbd_write_meta(sl, unused_sms->sms_offset,
1280 sizeof (sm_section_hdr_t), (uint8_t *)unused_sms);
1282 done:
1283 mutex_exit(&sl->sl_metadata_lock);
1284 kmem_free(cb, s);
1285 if (write_meta_ret != SBD_SUCCESS) {
1286 return (write_meta_ret);
1288 return (ret);
1291 sbd_status_t
1292 sbd_write_lu_info(sbd_lu_t *sl)
1294 sbd_lu_info_1_1_t *sli;
1295 int s;
1296 uint8_t *p;
1297 char *zvol_name = NULL;
1298 sbd_status_t ret;
1300 mutex_enter(&sl->sl_lock);
1302 s = sl->sl_serial_no_size;
1303 if ((sl->sl_flags & (SL_SHARED_META | SL_ZFS_META)) == 0) {
1304 if (sl->sl_data_filename) {
1305 s += strlen(sl->sl_data_filename) + 1;
1308 if (sl->sl_flags & SL_ZFS_META) {
1309 zvol_name = sbd_get_zvol_name(sl);
1310 s += strlen(zvol_name) + 1;
1312 if (sl->sl_alias) {
1313 s += strlen(sl->sl_alias) + 1;
1315 if (sl->sl_mgmt_url) {
1316 s += strlen(sl->sl_mgmt_url) + 1;
1318 sli = (sbd_lu_info_1_1_t *)kmem_zalloc(sizeof (*sli) + s, KM_SLEEP);
1319 p = sli->sli_buf;
1320 if ((sl->sl_flags & (SL_SHARED_META | SL_ZFS_META)) == 0) {
1321 sli->sli_flags |= SLI_SEPARATE_META;
1322 (void) strcpy((char *)p, sl->sl_data_filename);
1323 sli->sli_data_fname_offset =
1324 (uintptr_t)p - (uintptr_t)sli->sli_buf;
1325 sli->sli_flags |= SLI_DATA_FNAME_VALID;
1326 p += strlen(sl->sl_data_filename) + 1;
1328 if (sl->sl_flags & SL_ZFS_META) {
1329 (void) strcpy((char *)p, zvol_name);
1330 sli->sli_meta_fname_offset =
1331 (uintptr_t)p - (uintptr_t)sli->sli_buf;
1332 sli->sli_flags |= SLI_META_FNAME_VALID | SLI_ZFS_META;
1333 p += strlen(zvol_name) + 1;
1334 kmem_free(zvol_name, strlen(zvol_name) + 1);
1335 zvol_name = NULL;
1337 if (sl->sl_alias) {
1338 (void) strcpy((char *)p, sl->sl_alias);
1339 sli->sli_alias_offset =
1340 (uintptr_t)p - (uintptr_t)sli->sli_buf;
1341 sli->sli_flags |= SLI_ALIAS_VALID;
1342 p += strlen(sl->sl_alias) + 1;
1344 if (sl->sl_mgmt_url) {
1345 (void) strcpy((char *)p, sl->sl_mgmt_url);
1346 sli->sli_mgmt_url_offset =
1347 (uintptr_t)p - (uintptr_t)sli->sli_buf;
1348 sli->sli_flags |= SLI_MGMT_URL_VALID;
1349 p += strlen(sl->sl_mgmt_url) + 1;
1351 if (sl->sl_flags & SL_WRITE_PROTECTED) {
1352 sli->sli_flags |= SLI_WRITE_PROTECTED;
1354 if (sl->sl_flags & SL_SAVED_WRITE_CACHE_DISABLE) {
1355 sli->sli_flags |= SLI_WRITEBACK_CACHE_DISABLE;
1357 if (sl->sl_flags & SL_VID_VALID) {
1358 bcopy(sl->sl_vendor_id, sli->sli_vid, 8);
1359 sli->sli_flags |= SLI_VID_VALID;
1361 if (sl->sl_flags & SL_PID_VALID) {
1362 bcopy(sl->sl_product_id, sli->sli_pid, 16);
1363 sli->sli_flags |= SLI_PID_VALID;
1365 if (sl->sl_flags & SL_REV_VALID) {
1366 bcopy(sl->sl_revision, sli->sli_rev, 4);
1367 sli->sli_flags |= SLI_REV_VALID;
1369 if (sl->sl_serial_no_size) {
1370 bcopy(sl->sl_serial_no, p, sl->sl_serial_no_size);
1371 sli->sli_serial_size = sl->sl_serial_no_size;
1372 sli->sli_serial_offset =
1373 (uintptr_t)p - (uintptr_t)sli->sli_buf;
1374 sli->sli_flags |= SLI_SERIAL_VALID;
1375 p += sli->sli_serial_size;
1377 sli->sli_lu_size = sl->sl_lu_size;
1378 sli->sli_data_blocksize_shift = sl->sl_data_blocksize_shift;
1379 sli->sli_data_order = SMS_DATA_ORDER;
1380 bcopy(sl->sl_device_id, sli->sli_device_id, 20);
1382 sli->sli_sms_header.sms_size = sizeof (*sli) + s;
1383 sli->sli_sms_header.sms_id = SMS_ID_LU_INFO_1_1;
1384 sli->sli_sms_header.sms_data_order = SMS_DATA_ORDER;
1386 mutex_exit(&sl->sl_lock);
1387 ret = sbd_write_meta_section(sl, (sm_section_hdr_t *)sli);
1388 kmem_free(sli, sizeof (*sli) + s);
1389 return (ret);
1393 * Will scribble SL_UNMAP_ENABLED into sl_flags if we succeed.
1395 static void
1396 do_unmap_setup(sbd_lu_t *sl)
1398 ASSERT((sl->sl_flags & SL_UNMAP_ENABLED) == 0);
1400 if ((sl->sl_flags & SL_ZFS_META) == 0)
1401 return; /* No UNMAP for you. */
1403 sl->sl_flags |= SL_UNMAP_ENABLED;
1407 sbd_populate_and_register_lu(sbd_lu_t *sl, uint32_t *err_ret)
1409 stmf_lu_t *lu = sl->sl_lu;
1410 stmf_status_t ret;
1412 do_unmap_setup(sl);
1414 lu->lu_id = (scsi_devid_desc_t *)sl->sl_device_id;
1415 if (sl->sl_alias) {
1416 lu->lu_alias = sl->sl_alias;
1417 } else {
1418 lu->lu_alias = sl->sl_name;
1420 if (sl->sl_access_state == SBD_LU_STANDBY) {
1421 /* call set access state */
1422 ret = stmf_set_lu_access(lu, STMF_LU_STANDBY);
1423 if (ret != STMF_SUCCESS) {
1424 *err_ret = SBD_RET_ACCESS_STATE_FAILED;
1425 return (EIO);
1428 /* set proxy_reg_cb_arg to meta filename */
1429 if (sl->sl_meta_filename) {
1430 lu->lu_proxy_reg_arg = sl->sl_meta_filename;
1431 lu->lu_proxy_reg_arg_len = strlen(sl->sl_meta_filename) + 1;
1432 } else {
1433 lu->lu_proxy_reg_arg = sl->sl_data_filename;
1434 lu->lu_proxy_reg_arg_len = strlen(sl->sl_data_filename) + 1;
1436 lu->lu_lp = sbd_lp;
1437 lu->lu_task_alloc = sbd_task_alloc;
1438 lu->lu_new_task = sbd_new_task;
1439 lu->lu_dbuf_xfer_done = sbd_dbuf_xfer_done;
1440 lu->lu_send_status_done = sbd_send_status_done;
1441 lu->lu_task_free = sbd_task_free;
1442 lu->lu_abort = sbd_abort;
1443 lu->lu_dbuf_free = sbd_dbuf_free;
1444 lu->lu_ctl = sbd_ctl;
1445 lu->lu_info = sbd_info;
1446 sl->sl_state = STMF_STATE_OFFLINE;
1448 if ((ret = stmf_register_lu(lu)) != STMF_SUCCESS) {
1449 stmf_trace(0, "Failed to register with framework, ret=%llx",
1450 ret);
1451 if (ret == STMF_ALREADY) {
1452 *err_ret = SBD_RET_GUID_ALREADY_REGISTERED;
1454 return (EIO);
1457 *err_ret = 0;
1458 return (0);
1462 sbd_open_data_file(sbd_lu_t *sl, uint32_t *err_ret, int lu_size_valid,
1463 int vp_valid, int keep_open)
1465 int ret;
1466 int flag;
1467 ulong_t nbits;
1468 uint64_t supported_size;
1469 vattr_t vattr;
1470 enum vtype vt;
1471 struct dk_cinfo dki;
1472 int unused;
1474 mutex_enter(&sl->sl_lock);
1475 if (vp_valid) {
1476 goto odf_over_open;
1478 if (sl->sl_data_filename[0] != '/') {
1479 *err_ret = SBD_RET_DATA_PATH_NOT_ABSOLUTE;
1480 mutex_exit(&sl->sl_lock);
1481 return (EINVAL);
1483 if ((ret = lookupname(sl->sl_data_filename, UIO_SYSSPACE, FOLLOW,
1484 NULLVPP, &sl->sl_data_vp)) != 0) {
1485 *err_ret = SBD_RET_DATA_FILE_LOOKUP_FAILED;
1486 mutex_exit(&sl->sl_lock);
1487 return (ret);
1489 sl->sl_data_vtype = vt = sl->sl_data_vp->v_type;
1490 VN_RELE(sl->sl_data_vp);
1491 if ((vt != VREG) && (vt != VCHR) && (vt != VBLK)) {
1492 *err_ret = SBD_RET_WRONG_DATA_FILE_TYPE;
1493 mutex_exit(&sl->sl_lock);
1494 return (EINVAL);
1496 if (sl->sl_flags & SL_WRITE_PROTECTED) {
1497 flag = FREAD | FOFFMAX;
1498 } else {
1499 flag = FREAD | FWRITE | FOFFMAX | FEXCL;
1501 if ((ret = vn_open(sl->sl_data_filename, UIO_SYSSPACE, flag, 0,
1502 &sl->sl_data_vp, 0, 0)) != 0) {
1503 *err_ret = SBD_RET_DATA_FILE_OPEN_FAILED;
1504 mutex_exit(&sl->sl_lock);
1505 return (ret);
1507 odf_over_open:
1508 vattr.va_mask = AT_SIZE;
1509 if ((ret = VOP_GETATTR(sl->sl_data_vp, &vattr, 0, CRED(), NULL)) != 0) {
1510 *err_ret = SBD_RET_DATA_FILE_GETATTR_FAILED;
1511 goto odf_close_data_and_exit;
1513 if ((vt != VREG) && (vattr.va_size == 0)) {
1515 * Its a zero byte block or char device. This cannot be
1516 * a raw disk.
1518 *err_ret = SBD_RET_WRONG_DATA_FILE_TYPE;
1519 ret = EINVAL;
1520 goto odf_close_data_and_exit;
1522 /* sl_data_readable size includes any metadata. */
1523 sl->sl_data_readable_size = vattr.va_size;
1525 if (VOP_PATHCONF(sl->sl_data_vp, _PC_FILESIZEBITS, &nbits,
1526 CRED(), NULL) != 0) {
1527 nbits = 0;
1529 /* nbits cannot be greater than 64 */
1530 sl->sl_data_fs_nbits = (uint8_t)nbits;
1531 if (lu_size_valid) {
1532 sl->sl_total_data_size = sl->sl_lu_size;
1533 if (sl->sl_flags & SL_SHARED_META) {
1534 sl->sl_total_data_size += SHARED_META_DATA_SIZE;
1536 if ((nbits > 0) && (nbits < 64)) {
1538 * The expression below is correct only if nbits is
1539 * positive and less than 64.
1541 supported_size = (((uint64_t)1) << nbits) - 1;
1542 if (sl->sl_total_data_size > supported_size) {
1543 *err_ret = SBD_RET_SIZE_NOT_SUPPORTED_BY_FS;
1544 ret = EINVAL;
1545 goto odf_close_data_and_exit;
1548 } else {
1549 sl->sl_total_data_size = vattr.va_size;
1550 if (sl->sl_flags & SL_SHARED_META) {
1551 if (vattr.va_size > SHARED_META_DATA_SIZE) {
1552 sl->sl_lu_size = vattr.va_size -
1553 SHARED_META_DATA_SIZE;
1554 } else {
1555 *err_ret = SBD_RET_FILE_SIZE_ERROR;
1556 ret = EINVAL;
1557 goto odf_close_data_and_exit;
1559 } else {
1560 sl->sl_lu_size = vattr.va_size;
1563 if (sl->sl_lu_size < SBD_MIN_LU_SIZE) {
1564 *err_ret = SBD_RET_FILE_SIZE_ERROR;
1565 ret = EINVAL;
1566 goto odf_close_data_and_exit;
1568 if (sl->sl_lu_size &
1569 ((((uint64_t)1) << sl->sl_data_blocksize_shift) - 1)) {
1570 *err_ret = SBD_RET_FILE_ALIGN_ERROR;
1571 ret = EINVAL;
1572 goto odf_close_data_and_exit;
1575 * Get the minor device for direct zvol access
1577 if (sl->sl_flags & SL_ZFS_META) {
1578 if ((ret = VOP_IOCTL(sl->sl_data_vp, DKIOCINFO, (intptr_t)&dki,
1579 FKIOCTL, kcred, &unused, NULL)) != 0) {
1580 cmn_err(CE_WARN, "ioctl(DKIOCINFO) failed %d", ret);
1581 /* zvol reserves 0, so this would fail later */
1582 sl->sl_zvol_minor = 0;
1583 } else {
1584 sl->sl_zvol_minor = dki.dki_unit;
1585 if (sbd_zvol_get_volume_params(sl) == 0)
1586 sl->sl_flags |= SL_CALL_ZVOL;
1589 sl->sl_flags |= SL_MEDIA_LOADED;
1590 mutex_exit(&sl->sl_lock);
1591 return (0);
1593 odf_close_data_and_exit:
1594 if (!keep_open) {
1595 (void) VOP_CLOSE(sl->sl_data_vp, flag, 1, 0, CRED(), NULL);
1596 VN_RELE(sl->sl_data_vp);
1598 mutex_exit(&sl->sl_lock);
1599 return (ret);
1602 void
1603 sbd_close_lu(sbd_lu_t *sl)
1605 int flag;
1607 if (((sl->sl_flags & SL_SHARED_META) == 0) &&
1608 (sl->sl_flags & SL_META_OPENED)) {
1609 if (sl->sl_flags & SL_ZFS_META) {
1610 rw_destroy(&sl->sl_zfs_meta_lock);
1611 if (sl->sl_zfs_meta) {
1612 kmem_free(sl->sl_zfs_meta, ZAP_MAXVALUELEN / 2);
1613 sl->sl_zfs_meta = NULL;
1615 } else {
1616 flag = FREAD | FWRITE | FOFFMAX | FEXCL;
1617 (void) VOP_CLOSE(sl->sl_meta_vp, flag, 1, 0,
1618 CRED(), NULL);
1619 VN_RELE(sl->sl_meta_vp);
1621 sl->sl_flags &= ~SL_META_OPENED;
1623 if (sl->sl_flags & SL_MEDIA_LOADED) {
1624 if (sl->sl_flags & SL_WRITE_PROTECTED) {
1625 flag = FREAD | FOFFMAX;
1626 } else {
1627 flag = FREAD | FWRITE | FOFFMAX | FEXCL;
1629 (void) VOP_CLOSE(sl->sl_data_vp, flag, 1, 0, CRED(), NULL);
1630 VN_RELE(sl->sl_data_vp);
1631 sl->sl_flags &= ~SL_MEDIA_LOADED;
1632 if (sl->sl_flags & SL_SHARED_META) {
1633 sl->sl_flags &= ~SL_META_OPENED;
1639 sbd_set_lu_standby(sbd_set_lu_standby_t *stlu, uint32_t *err_ret)
1641 sbd_lu_t *sl;
1642 sbd_status_t sret;
1643 stmf_status_t stret;
1644 uint8_t old_access_state;
1646 sret = sbd_find_and_lock_lu(stlu->stlu_guid, NULL,
1647 SL_OP_MODIFY_LU, &sl);
1648 if (sret != SBD_SUCCESS) {
1649 if (sret == SBD_BUSY) {
1650 *err_ret = SBD_RET_LU_BUSY;
1651 return (EBUSY);
1652 } else if (sret == SBD_NOT_FOUND) {
1653 *err_ret = SBD_RET_NOT_FOUND;
1654 return (ENOENT);
1656 *err_ret = SBD_RET_ACCESS_STATE_FAILED;
1657 return (EIO);
1660 old_access_state = sl->sl_access_state;
1661 sl->sl_access_state = SBD_LU_TRANSITION_TO_STANDBY;
1662 stret = stmf_set_lu_access((stmf_lu_t *)sl->sl_lu, STMF_LU_STANDBY);
1663 if (stret != STMF_SUCCESS) {
1664 sl->sl_trans_op = SL_OP_NONE;
1665 *err_ret = SBD_RET_ACCESS_STATE_FAILED;
1666 sl->sl_access_state = old_access_state;
1667 return (EIO);
1671 * acquire the writer lock here to ensure we're not pulling
1672 * the rug from the vn_rdwr to the backing store
1674 rw_enter(&sl->sl_access_state_lock, RW_WRITER);
1675 sbd_close_lu(sl);
1676 rw_exit(&sl->sl_access_state_lock);
1678 sl->sl_trans_op = SL_OP_NONE;
1679 return (0);
1683 sbd_close_delete_lu(sbd_lu_t *sl, int ret)
1687 * acquire the writer lock here to ensure we're not pulling
1688 * the rug from the vn_rdwr to the backing store
1690 rw_enter(&sl->sl_access_state_lock, RW_WRITER);
1691 sbd_close_lu(sl);
1692 rw_exit(&sl->sl_access_state_lock);
1694 if (sl->sl_flags & SL_LINKED)
1695 sbd_unlink_lu(sl);
1696 mutex_destroy(&sl->sl_metadata_lock);
1697 mutex_destroy(&sl->sl_lock);
1698 rw_destroy(&sl->sl_pgr->pgr_lock);
1699 rw_destroy(&sl->sl_access_state_lock);
1700 if (sl->sl_serial_no_alloc_size) {
1701 kmem_free(sl->sl_serial_no, sl->sl_serial_no_alloc_size);
1703 if (sl->sl_data_fname_alloc_size) {
1704 kmem_free(sl->sl_data_filename, sl->sl_data_fname_alloc_size);
1706 if (sl->sl_alias_alloc_size) {
1707 kmem_free(sl->sl_alias, sl->sl_alias_alloc_size);
1709 if (sl->sl_mgmt_url_alloc_size) {
1710 kmem_free(sl->sl_mgmt_url, sl->sl_mgmt_url_alloc_size);
1712 stmf_free(sl->sl_lu);
1713 return (ret);
1717 sbd_create_register_lu(sbd_create_and_reg_lu_t *slu, int struct_sz,
1718 uint32_t *err_ret)
1720 char *namebuf;
1721 sbd_lu_t *sl;
1722 stmf_lu_t *lu;
1723 char *p;
1724 int sz;
1725 int alloc_sz;
1726 int ret = EIO;
1727 int flag;
1728 int wcd = 0;
1729 uint32_t hid = 0;
1730 enum vtype vt;
1732 sz = struct_sz - sizeof (sbd_create_and_reg_lu_t) + 8 + 1;
1734 *err_ret = 0;
1736 /* Lets validate various offsets */
1737 if (((slu->slu_meta_fname_valid) &&
1738 (slu->slu_meta_fname_off >= sz)) ||
1739 (slu->slu_data_fname_off >= sz) ||
1740 ((slu->slu_alias_valid) &&
1741 (slu->slu_alias_off >= sz)) ||
1742 ((slu->slu_mgmt_url_valid) &&
1743 (slu->slu_mgmt_url_off >= sz)) ||
1744 ((slu->slu_serial_valid) &&
1745 ((slu->slu_serial_off + slu->slu_serial_size) >= sz))) {
1746 return (EINVAL);
1749 namebuf = kmem_zalloc(sz, KM_SLEEP);
1750 bcopy(slu->slu_buf, namebuf, sz - 1);
1751 namebuf[sz - 1] = 0;
1753 alloc_sz = sizeof (sbd_lu_t) + sizeof (sbd_pgr_t);
1754 if (slu->slu_meta_fname_valid) {
1755 alloc_sz += strlen(namebuf + slu->slu_meta_fname_off) + 1;
1757 alloc_sz += strlen(namebuf + slu->slu_data_fname_off) + 1;
1758 if (slu->slu_alias_valid) {
1759 alloc_sz += strlen(namebuf + slu->slu_alias_off) + 1;
1761 if (slu->slu_mgmt_url_valid) {
1762 alloc_sz += strlen(namebuf + slu->slu_mgmt_url_off) + 1;
1764 if (slu->slu_serial_valid) {
1765 alloc_sz += slu->slu_serial_size;
1768 lu = (stmf_lu_t *)stmf_alloc(STMF_STRUCT_STMF_LU, alloc_sz, 0);
1769 if (lu == NULL) {
1770 kmem_free(namebuf, sz);
1771 return (ENOMEM);
1773 sl = (sbd_lu_t *)lu->lu_provider_private;
1774 bzero(sl, alloc_sz);
1775 sl->sl_lu = lu;
1776 sl->sl_alloc_size = alloc_sz;
1777 sl->sl_pgr = (sbd_pgr_t *)(sl + 1);
1778 rw_init(&sl->sl_pgr->pgr_lock, NULL, RW_DRIVER, NULL);
1779 mutex_init(&sl->sl_lock, NULL, MUTEX_DRIVER, NULL);
1780 mutex_init(&sl->sl_metadata_lock, NULL, MUTEX_DRIVER, NULL);
1781 rw_init(&sl->sl_access_state_lock, NULL, RW_DRIVER, NULL);
1782 p = ((char *)sl) + sizeof (sbd_lu_t) + sizeof (sbd_pgr_t);
1783 sl->sl_data_filename = p;
1784 (void) strcpy(sl->sl_data_filename, namebuf + slu->slu_data_fname_off);
1785 p += strlen(sl->sl_data_filename) + 1;
1786 sl->sl_meta_offset = SBD_META_OFFSET;
1787 sl->sl_access_state = SBD_LU_ACTIVE;
1788 if (slu->slu_meta_fname_valid) {
1789 sl->sl_alias = sl->sl_name = sl->sl_meta_filename = p;
1790 (void) strcpy(sl->sl_meta_filename, namebuf +
1791 slu->slu_meta_fname_off);
1792 p += strlen(sl->sl_meta_filename) + 1;
1793 } else {
1794 sl->sl_alias = sl->sl_name = sl->sl_data_filename;
1795 if (sbd_is_zvol(sl->sl_data_filename)) {
1796 sl->sl_flags |= SL_ZFS_META;
1797 sl->sl_meta_offset = 0;
1798 } else {
1799 sl->sl_flags |= SL_SHARED_META;
1800 sl->sl_data_offset = SHARED_META_DATA_SIZE;
1801 sl->sl_total_meta_size = SHARED_META_DATA_SIZE;
1802 sl->sl_meta_size_used = 0;
1805 if (slu->slu_alias_valid) {
1806 sl->sl_alias = p;
1807 (void) strcpy(p, namebuf + slu->slu_alias_off);
1808 p += strlen(sl->sl_alias) + 1;
1810 if (slu->slu_mgmt_url_valid) {
1811 sl->sl_mgmt_url = p;
1812 (void) strcpy(p, namebuf + slu->slu_mgmt_url_off);
1813 p += strlen(sl->sl_mgmt_url) + 1;
1815 if (slu->slu_serial_valid) {
1816 sl->sl_serial_no = (uint8_t *)p;
1817 bcopy(namebuf + slu->slu_serial_off, sl->sl_serial_no,
1818 slu->slu_serial_size);
1819 sl->sl_serial_no_size = slu->slu_serial_size;
1820 p += slu->slu_serial_size;
1822 kmem_free(namebuf, sz);
1823 if (slu->slu_vid_valid) {
1824 bcopy(slu->slu_vid, sl->sl_vendor_id, 8);
1825 sl->sl_flags |= SL_VID_VALID;
1827 if (slu->slu_pid_valid) {
1828 bcopy(slu->slu_pid, sl->sl_product_id, 16);
1829 sl->sl_flags |= SL_PID_VALID;
1831 if (slu->slu_rev_valid) {
1832 bcopy(slu->slu_rev, sl->sl_revision, 4);
1833 sl->sl_flags |= SL_REV_VALID;
1835 if (slu->slu_write_protected) {
1836 sl->sl_flags |= SL_WRITE_PROTECTED;
1838 if (slu->slu_blksize_valid) {
1839 if ((slu->slu_blksize & (slu->slu_blksize - 1)) ||
1840 (slu->slu_blksize > (32 * 1024)) ||
1841 (slu->slu_blksize == 0)) {
1842 *err_ret = SBD_RET_INVALID_BLKSIZE;
1843 ret = EINVAL;
1844 goto scm_err_out;
1846 while ((1 << sl->sl_data_blocksize_shift) != slu->slu_blksize) {
1847 sl->sl_data_blocksize_shift++;
1849 } else {
1850 sl->sl_data_blocksize_shift = 9; /* 512 by default */
1851 slu->slu_blksize = 512;
1854 /* Now lets start creating meta */
1855 sl->sl_trans_op = SL_OP_CREATE_REGISTER_LU;
1856 if (sbd_link_lu(sl) != SBD_SUCCESS) {
1857 *err_ret = SBD_RET_FILE_ALREADY_REGISTERED;
1858 ret = EALREADY;
1859 goto scm_err_out;
1862 /* 1st focus on the data store */
1863 if (slu->slu_lu_size_valid) {
1864 sl->sl_lu_size = slu->slu_lu_size;
1866 ret = sbd_open_data_file(sl, err_ret, slu->slu_lu_size_valid, 0, 0);
1867 slu->slu_ret_filesize_nbits = sl->sl_data_fs_nbits;
1868 slu->slu_lu_size = sl->sl_lu_size;
1869 if (ret) {
1870 goto scm_err_out;
1874 * Check if we were explicitly asked to disable/enable write
1875 * cache on the device, otherwise get current device setting.
1877 if (slu->slu_writeback_cache_disable_valid) {
1878 if (slu->slu_writeback_cache_disable) {
1880 * Set write cache disable on the device. If it fails,
1881 * we'll support it using sync/flush.
1883 (void) sbd_wcd_set(1, sl);
1884 wcd = 1;
1885 } else {
1887 * Set write cache enable on the device. If it fails,
1888 * return an error.
1890 if (sbd_wcd_set(0, sl) != SBD_SUCCESS) {
1891 *err_ret = SBD_RET_WRITE_CACHE_SET_FAILED;
1892 ret = EFAULT;
1893 goto scm_err_out;
1896 } else {
1897 sbd_wcd_get(&wcd, sl);
1900 if (wcd) {
1901 sl->sl_flags |= SL_WRITEBACK_CACHE_DISABLE |
1902 SL_SAVED_WRITE_CACHE_DISABLE;
1905 if (sl->sl_flags & SL_SHARED_META) {
1906 goto over_meta_open;
1908 if (sl->sl_flags & SL_ZFS_META) {
1909 if (sbd_create_zfs_meta_object(sl) != SBD_SUCCESS) {
1910 *err_ret = SBD_RET_ZFS_META_CREATE_FAILED;
1911 ret = ENOMEM;
1912 goto scm_err_out;
1914 sl->sl_meta_blocksize_shift = 0;
1915 goto over_meta_create;
1917 if ((ret = lookupname(sl->sl_meta_filename, UIO_SYSSPACE, FOLLOW,
1918 NULLVPP, &sl->sl_meta_vp)) != 0) {
1919 *err_ret = SBD_RET_META_FILE_LOOKUP_FAILED;
1920 goto scm_err_out;
1922 sl->sl_meta_vtype = vt = sl->sl_meta_vp->v_type;
1923 VN_RELE(sl->sl_meta_vp);
1924 if ((vt != VREG) && (vt != VCHR) && (vt != VBLK)) {
1925 *err_ret = SBD_RET_WRONG_META_FILE_TYPE;
1926 ret = EINVAL;
1927 goto scm_err_out;
1929 if (vt == VREG) {
1930 sl->sl_meta_blocksize_shift = 0;
1931 } else {
1932 sl->sl_meta_blocksize_shift = 9;
1934 flag = FREAD | FWRITE | FOFFMAX | FEXCL;
1935 if ((ret = vn_open(sl->sl_meta_filename, UIO_SYSSPACE, flag, 0,
1936 &sl->sl_meta_vp, 0, 0)) != 0) {
1937 *err_ret = SBD_RET_META_FILE_OPEN_FAILED;
1938 goto scm_err_out;
1940 over_meta_create:
1941 sl->sl_total_meta_size = sl->sl_meta_offset + sizeof (sbd_meta_start_t);
1942 sl->sl_total_meta_size +=
1943 (((uint64_t)1) << sl->sl_meta_blocksize_shift) - 1;
1944 sl->sl_total_meta_size &=
1945 ~((((uint64_t)1) << sl->sl_meta_blocksize_shift) - 1);
1946 sl->sl_meta_size_used = 0;
1947 over_meta_open:
1948 sl->sl_flags |= SL_META_OPENED;
1950 sl->sl_device_id[3] = 16;
1951 if (slu->slu_guid_valid) {
1952 sl->sl_device_id[0] = 0xf1;
1953 sl->sl_device_id[1] = 3;
1954 sl->sl_device_id[2] = 0;
1955 bcopy(slu->slu_guid, sl->sl_device_id + 4, 16);
1956 } else {
1957 if (slu->slu_host_id_valid)
1958 hid = slu->slu_host_id;
1959 if (!slu->slu_company_id_valid)
1960 slu->slu_company_id = COMPANY_ID_SUN;
1961 if (stmf_scsilib_uniq_lu_id2(slu->slu_company_id, hid,
1962 (scsi_devid_desc_t *)&sl->sl_device_id[0]) !=
1963 STMF_SUCCESS) {
1964 *err_ret = SBD_RET_META_CREATION_FAILED;
1965 ret = EIO;
1966 goto scm_err_out;
1968 bcopy(sl->sl_device_id + 4, slu->slu_guid, 16);
1971 /* Lets create the meta now */
1972 mutex_enter(&sl->sl_metadata_lock);
1973 if (sbd_write_meta_start(sl, sl->sl_total_meta_size,
1974 sizeof (sbd_meta_start_t)) != SBD_SUCCESS) {
1975 mutex_exit(&sl->sl_metadata_lock);
1976 *err_ret = SBD_RET_META_CREATION_FAILED;
1977 ret = EIO;
1978 goto scm_err_out;
1980 mutex_exit(&sl->sl_metadata_lock);
1981 sl->sl_meta_size_used = sl->sl_meta_offset + sizeof (sbd_meta_start_t);
1983 if (sbd_write_lu_info(sl) != SBD_SUCCESS) {
1984 *err_ret = SBD_RET_META_CREATION_FAILED;
1985 ret = EIO;
1986 goto scm_err_out;
1989 if (sbd_pgr_meta_init(sl) != SBD_SUCCESS) {
1990 *err_ret = SBD_RET_META_CREATION_FAILED;
1991 ret = EIO;
1992 goto scm_err_out;
1996 * Update the zvol separately as this need only be called upon
1997 * completion of the metadata initialization.
1999 if (sl->sl_flags & SL_ZFS_META) {
2000 if (sbd_update_zfs_prop(sl) != SBD_SUCCESS) {
2001 *err_ret = SBD_RET_META_CREATION_FAILED;
2002 ret = EIO;
2003 goto scm_err_out;
2007 ret = sbd_populate_and_register_lu(sl, err_ret);
2008 if (ret) {
2009 goto scm_err_out;
2012 sl->sl_trans_op = SL_OP_NONE;
2013 atomic_inc_32(&sbd_lu_count);
2014 return (0);
2016 scm_err_out:
2017 return (sbd_close_delete_lu(sl, ret));
2020 stmf_status_t
2021 sbd_proxy_msg(uint8_t *luid, void *proxy_arg, uint32_t proxy_arg_len,
2022 uint32_t type)
2024 switch (type) {
2025 case STMF_MSG_LU_ACTIVE:
2026 return (sbd_proxy_reg_lu(luid, proxy_arg,
2027 proxy_arg_len));
2028 case STMF_MSG_LU_REGISTER:
2029 return (sbd_proxy_reg_lu(luid, proxy_arg,
2030 proxy_arg_len));
2031 case STMF_MSG_LU_DEREGISTER:
2032 return (sbd_proxy_dereg_lu(luid, proxy_arg,
2033 proxy_arg_len));
2034 default:
2035 return (STMF_INVALID_ARG);
2041 * register a standby logical unit
2042 * proxy_reg_arg contains the meta filename
2044 stmf_status_t
2045 sbd_proxy_reg_lu(uint8_t *luid, void *proxy_reg_arg, uint32_t proxy_reg_arg_len)
2047 sbd_lu_t *sl;
2048 sbd_status_t sret;
2049 sbd_create_standby_lu_t *stlu;
2050 int alloc_sz;
2051 uint32_t err_ret = 0;
2052 stmf_status_t stret = STMF_SUCCESS;
2054 if (luid == NULL) {
2055 return (STMF_INVALID_ARG);
2058 do {
2059 sret = sbd_find_and_lock_lu(luid, NULL, SL_OP_MODIFY_LU, &sl);
2060 } while (sret == SBD_BUSY);
2062 if (sret == SBD_NOT_FOUND) {
2063 alloc_sz = sizeof (*stlu) + proxy_reg_arg_len - 8;
2064 stlu = (sbd_create_standby_lu_t *)kmem_zalloc(alloc_sz,
2065 KM_SLEEP);
2066 bcopy(luid, stlu->stlu_guid, 16);
2067 if (proxy_reg_arg_len) {
2068 bcopy(proxy_reg_arg, stlu->stlu_meta_fname,
2069 proxy_reg_arg_len);
2070 stlu->stlu_meta_fname_size = proxy_reg_arg_len;
2072 if (sbd_create_standby_lu(stlu, &err_ret) != 0) {
2073 cmn_err(CE_WARN,
2074 "Unable to create standby logical unit for %s",
2075 stlu->stlu_meta_fname);
2076 stret = STMF_FAILURE;
2078 kmem_free(stlu, alloc_sz);
2079 return (stret);
2080 } else if (sret == SBD_SUCCESS) {
2082 * if the lu is already registered, then the lu should now
2083 * be in standby mode
2085 sbd_it_data_t *it;
2086 if (sl->sl_access_state != SBD_LU_STANDBY) {
2087 mutex_enter(&sl->sl_lock);
2088 sl->sl_access_state = SBD_LU_STANDBY;
2089 for (it = sl->sl_it_list; it != NULL;
2090 it = it->sbd_it_next) {
2091 it->sbd_it_ua_conditions |=
2092 SBD_UA_ASYMMETRIC_ACCESS_CHANGED;
2093 it->sbd_it_flags &=
2094 ~SBD_IT_HAS_SCSI2_RESERVATION;
2095 sl->sl_flags &= ~SL_LU_HAS_SCSI2_RESERVATION;
2097 mutex_exit(&sl->sl_lock);
2098 sbd_pgr_reset(sl);
2100 sl->sl_trans_op = SL_OP_NONE;
2101 } else {
2102 cmn_err(CE_WARN, "could not find and lock logical unit");
2103 stret = STMF_FAILURE;
2105 out:
2106 return (stret);
2109 /* ARGSUSED */
2110 stmf_status_t
2111 sbd_proxy_dereg_lu(uint8_t *luid, void *proxy_reg_arg,
2112 uint32_t proxy_reg_arg_len)
2114 sbd_delete_lu_t dlu = {0};
2115 uint32_t err_ret;
2117 if (luid == NULL) {
2118 cmn_err(CE_WARN, "de-register lu request had null luid");
2119 return (STMF_INVALID_ARG);
2122 bcopy(luid, &dlu.dlu_guid, 16);
2124 if (sbd_delete_lu(&dlu, (int)sizeof (dlu), &err_ret) != 0) {
2125 cmn_err(CE_WARN, "failed to delete de-register lu request");
2126 return (STMF_FAILURE);
2129 return (STMF_SUCCESS);
2133 sbd_create_standby_lu(sbd_create_standby_lu_t *slu, uint32_t *err_ret)
2135 sbd_lu_t *sl;
2136 stmf_lu_t *lu;
2137 int ret = EIO;
2138 int alloc_sz;
2140 alloc_sz = sizeof (sbd_lu_t) + sizeof (sbd_pgr_t) +
2141 slu->stlu_meta_fname_size;
2142 lu = (stmf_lu_t *)stmf_alloc(STMF_STRUCT_STMF_LU, alloc_sz, 0);
2143 if (lu == NULL) {
2144 return (ENOMEM);
2146 sl = (sbd_lu_t *)lu->lu_provider_private;
2147 bzero(sl, alloc_sz);
2148 sl->sl_lu = lu;
2149 sl->sl_alloc_size = alloc_sz;
2151 sl->sl_pgr = (sbd_pgr_t *)(sl + 1);
2152 sl->sl_meta_filename = ((char *)sl) + sizeof (sbd_lu_t) +
2153 sizeof (sbd_pgr_t);
2155 if (slu->stlu_meta_fname_size > 0) {
2156 (void) strcpy(sl->sl_meta_filename, slu->stlu_meta_fname);
2158 sl->sl_name = sl->sl_meta_filename;
2160 sl->sl_device_id[3] = 16;
2161 sl->sl_device_id[0] = 0xf1;
2162 sl->sl_device_id[1] = 3;
2163 sl->sl_device_id[2] = 0;
2164 bcopy(slu->stlu_guid, sl->sl_device_id + 4, 16);
2165 lu->lu_id = (scsi_devid_desc_t *)sl->sl_device_id;
2166 sl->sl_access_state = SBD_LU_STANDBY;
2168 rw_init(&sl->sl_pgr->pgr_lock, NULL, RW_DRIVER, NULL);
2169 mutex_init(&sl->sl_lock, NULL, MUTEX_DRIVER, NULL);
2170 mutex_init(&sl->sl_metadata_lock, NULL, MUTEX_DRIVER, NULL);
2171 rw_init(&sl->sl_access_state_lock, NULL, RW_DRIVER, NULL);
2173 sl->sl_trans_op = SL_OP_CREATE_REGISTER_LU;
2175 if (sbd_link_lu(sl) != SBD_SUCCESS) {
2176 *err_ret = SBD_RET_FILE_ALREADY_REGISTERED;
2177 ret = EALREADY;
2178 goto scs_err_out;
2181 ret = sbd_populate_and_register_lu(sl, err_ret);
2182 if (ret) {
2183 goto scs_err_out;
2186 sl->sl_trans_op = SL_OP_NONE;
2187 atomic_inc_32(&sbd_lu_count);
2188 return (0);
2190 scs_err_out:
2191 return (sbd_close_delete_lu(sl, ret));
2195 sbd_load_sli_1_0(sbd_lu_t *sl, uint32_t *err_ret)
2197 sbd_lu_info_1_0_t *sli = NULL;
2198 sbd_status_t sret;
2200 sret = sbd_read_meta_section(sl, (sm_section_hdr_t **)&sli,
2201 SMS_ID_LU_INFO_1_0);
2203 if (sret != SBD_SUCCESS) {
2204 *err_ret = SBD_RET_NO_META;
2205 return (EIO);
2207 if (sli->sli_data_order != SMS_DATA_ORDER) {
2208 sbd_swap_lu_info_1_0(sli);
2209 if (sli->sli_data_order != SMS_DATA_ORDER) {
2210 kmem_free(sli, sli->sli_sms_header.sms_size);
2211 *err_ret = SBD_RET_NO_META;
2212 return (EIO);
2216 sl->sl_flags |= SL_SHARED_META;
2217 sl->sl_data_blocksize_shift = 9;
2218 sl->sl_data_offset = SHARED_META_DATA_SIZE;
2219 sl->sl_lu_size = sli->sli_total_store_size - SHARED_META_DATA_SIZE;
2220 sl->sl_total_data_size = SHARED_META_DATA_SIZE + sl->sl_lu_size;
2221 bcopy(sli->sli_lu_devid, sl->sl_device_id, 20);
2223 kmem_free(sli, sli->sli_sms_header.sms_size);
2224 return (0);
2228 sbd_import_lu(sbd_import_lu_t *ilu, int struct_sz, uint32_t *err_ret,
2229 int no_register, sbd_lu_t **slr)
2231 stmf_lu_t *lu;
2232 sbd_lu_t *sl;
2233 sbd_lu_info_1_1_t *sli = NULL;
2234 int asz;
2235 int ret = 0;
2236 stmf_status_t stret;
2237 int flag;
2238 int wcd = 0;
2239 int data_opened;
2240 uint16_t sli_buf_sz;
2241 uint8_t *sli_buf_copy = NULL;
2242 enum vtype vt;
2243 int standby = 0;
2244 sbd_status_t sret;
2246 if (no_register && slr == NULL) {
2247 return (EINVAL);
2249 ilu->ilu_meta_fname[struct_sz - sizeof (*ilu) + 8 - 1] = 0;
2251 * check whether logical unit is already registered ALUA
2252 * For a standby logical unit, the meta filename is set. Use
2253 * that to search for an existing logical unit.
2255 sret = sbd_find_and_lock_lu(NULL, (uint8_t *)&(ilu->ilu_meta_fname),
2256 SL_OP_IMPORT_LU, &sl);
2258 if (sret == SBD_SUCCESS) {
2259 if (sl->sl_access_state != SBD_LU_ACTIVE) {
2260 no_register = 1;
2261 standby = 1;
2262 lu = sl->sl_lu;
2263 if (sl->sl_alias_alloc_size) {
2264 kmem_free(sl->sl_alias,
2265 sl->sl_alias_alloc_size);
2266 sl->sl_alias_alloc_size = 0;
2267 sl->sl_alias = NULL;
2268 lu->lu_alias = NULL;
2270 if (sl->sl_meta_filename == NULL) {
2271 sl->sl_meta_filename = sl->sl_data_filename;
2272 } else if (sl->sl_data_fname_alloc_size) {
2273 kmem_free(sl->sl_data_filename,
2274 sl->sl_data_fname_alloc_size);
2275 sl->sl_data_fname_alloc_size = 0;
2277 if (sl->sl_serial_no_alloc_size) {
2278 kmem_free(sl->sl_serial_no,
2279 sl->sl_serial_no_alloc_size);
2280 sl->sl_serial_no_alloc_size = 0;
2282 if (sl->sl_mgmt_url_alloc_size) {
2283 kmem_free(sl->sl_mgmt_url,
2284 sl->sl_mgmt_url_alloc_size);
2285 sl->sl_mgmt_url_alloc_size = 0;
2287 } else {
2288 *err_ret = SBD_RET_FILE_ALREADY_REGISTERED;
2289 bcopy(sl->sl_device_id + 4, ilu->ilu_ret_guid, 16);
2290 sl->sl_trans_op = SL_OP_NONE;
2291 return (EALREADY);
2293 } else if (sret == SBD_NOT_FOUND) {
2294 asz = strlen(ilu->ilu_meta_fname) + 1;
2296 lu = (stmf_lu_t *)stmf_alloc(STMF_STRUCT_STMF_LU,
2297 sizeof (sbd_lu_t) + sizeof (sbd_pgr_t) + asz, 0);
2298 if (lu == NULL) {
2299 return (ENOMEM);
2301 sl = (sbd_lu_t *)lu->lu_provider_private;
2302 bzero(sl, sizeof (*sl));
2303 sl->sl_lu = lu;
2304 sl->sl_pgr = (sbd_pgr_t *)(sl + 1);
2305 sl->sl_meta_filename = ((char *)sl) + sizeof (*sl) +
2306 sizeof (sbd_pgr_t);
2307 (void) strcpy(sl->sl_meta_filename, ilu->ilu_meta_fname);
2308 sl->sl_name = sl->sl_meta_filename;
2309 rw_init(&sl->sl_pgr->pgr_lock, NULL, RW_DRIVER, NULL);
2310 rw_init(&sl->sl_access_state_lock, NULL, RW_DRIVER, NULL);
2311 mutex_init(&sl->sl_lock, NULL, MUTEX_DRIVER, NULL);
2312 mutex_init(&sl->sl_metadata_lock, NULL, MUTEX_DRIVER, NULL);
2313 sl->sl_trans_op = SL_OP_IMPORT_LU;
2314 } else {
2315 *err_ret = SBD_RET_META_FILE_LOOKUP_FAILED;
2316 return (EIO);
2319 /* we're only loading the metadata */
2320 if (!no_register) {
2321 if (sbd_link_lu(sl) != SBD_SUCCESS) {
2322 *err_ret = SBD_RET_FILE_ALREADY_REGISTERED;
2323 bcopy(sl->sl_device_id + 4, ilu->ilu_ret_guid, 16);
2324 ret = EALREADY;
2325 goto sim_err_out;
2328 if ((ret = lookupname(sl->sl_meta_filename, UIO_SYSSPACE, FOLLOW,
2329 NULLVPP, &sl->sl_meta_vp)) != 0) {
2330 *err_ret = SBD_RET_META_FILE_LOOKUP_FAILED;
2331 goto sim_err_out;
2333 if (sbd_is_zvol(sl->sl_meta_filename)) {
2334 sl->sl_flags |= SL_ZFS_META;
2335 sl->sl_data_filename = sl->sl_meta_filename;
2337 sl->sl_meta_vtype = vt = sl->sl_meta_vp->v_type;
2338 VN_RELE(sl->sl_meta_vp);
2339 if ((vt != VREG) && (vt != VCHR) && (vt != VBLK)) {
2340 *err_ret = SBD_RET_WRONG_META_FILE_TYPE;
2341 ret = EINVAL;
2342 goto sim_err_out;
2344 if (sl->sl_flags & SL_ZFS_META) {
2345 if (sbd_open_zfs_meta(sl) != SBD_SUCCESS) {
2346 /* let see if metadata is in the 64k block */
2347 sl->sl_flags &= ~SL_ZFS_META;
2350 if (!(sl->sl_flags & SL_ZFS_META)) {
2351 /* metadata is always writable */
2352 flag = FREAD | FWRITE | FOFFMAX | FEXCL;
2353 if ((ret = vn_open(sl->sl_meta_filename, UIO_SYSSPACE, flag, 0,
2354 &sl->sl_meta_vp, 0, 0)) != 0) {
2355 *err_ret = SBD_RET_META_FILE_OPEN_FAILED;
2356 goto sim_err_out;
2359 if ((sl->sl_flags & SL_ZFS_META) || (vt == VREG)) {
2360 sl->sl_meta_blocksize_shift = 0;
2361 } else {
2362 sl->sl_meta_blocksize_shift = 9;
2364 sl->sl_meta_offset = (sl->sl_flags & SL_ZFS_META) ? 0 : SBD_META_OFFSET;
2365 sl->sl_flags |= SL_META_OPENED;
2367 mutex_enter(&sl->sl_metadata_lock);
2368 sret = sbd_load_meta_start(sl);
2369 mutex_exit(&sl->sl_metadata_lock);
2370 if (sret != SBD_SUCCESS) {
2371 if (sret == SBD_META_CORRUPTED) {
2372 *err_ret = SBD_RET_NO_META;
2373 } else if (sret == SBD_NOT_SUPPORTED) {
2374 *err_ret = SBD_RET_VERSION_NOT_SUPPORTED;
2375 } else {
2376 *err_ret = SBD_RET_NO_META;
2378 ret = EINVAL;
2379 goto sim_err_out;
2382 /* Now lets see if we can read the most recent LU info */
2383 sret = sbd_read_meta_section(sl, (sm_section_hdr_t **)&sli,
2384 SMS_ID_LU_INFO_1_1);
2385 if ((sret == SBD_NOT_FOUND) && ((sl->sl_flags & SL_ZFS_META) == 0)) {
2386 ret = sbd_load_sli_1_0(sl, err_ret);
2387 if (ret) {
2388 goto sim_err_out;
2390 goto sim_sli_loaded;
2392 if (sret != SBD_SUCCESS) {
2393 *err_ret = SBD_RET_NO_META;
2394 ret = EIO;
2395 goto sim_err_out;
2397 /* load sli 1.1 */
2398 if (sli->sli_data_order != SMS_DATA_ORDER) {
2399 sbd_swap_lu_info_1_1(sli);
2400 if (sli->sli_data_order != SMS_DATA_ORDER) {
2401 *err_ret = SBD_RET_NO_META;
2402 ret = EIO;
2403 goto sim_err_out;
2407 sli_buf_sz = sli->sli_sms_header.sms_size -
2408 sizeof (sbd_lu_info_1_1_t) + 8;
2409 sli_buf_copy = kmem_alloc(sli_buf_sz + 1, KM_SLEEP);
2410 bcopy(sli->sli_buf, sli_buf_copy, sli_buf_sz);
2411 sli_buf_copy[sli_buf_sz] = 0;
2413 /* Make sure all the offsets are within limits */
2414 if (((sli->sli_flags & SLI_META_FNAME_VALID) &&
2415 (sli->sli_meta_fname_offset > sli_buf_sz)) ||
2416 ((sli->sli_flags & SLI_DATA_FNAME_VALID) &&
2417 (sli->sli_data_fname_offset > sli_buf_sz)) ||
2418 ((sli->sli_flags & SLI_MGMT_URL_VALID) &&
2419 (sli->sli_mgmt_url_offset > sli_buf_sz)) ||
2420 ((sli->sli_flags & SLI_SERIAL_VALID) &&
2421 ((sli->sli_serial_offset + sli->sli_serial_size) > sli_buf_sz)) ||
2422 ((sli->sli_flags & SLI_ALIAS_VALID) &&
2423 (sli->sli_alias_offset > sli_buf_sz))) {
2424 *err_ret = SBD_RET_NO_META;
2425 ret = EIO;
2426 goto sim_err_out;
2429 sl->sl_lu_size = sli->sli_lu_size;
2430 sl->sl_data_blocksize_shift = sli->sli_data_blocksize_shift;
2431 bcopy(sli->sli_device_id, sl->sl_device_id, 20);
2432 if (sli->sli_flags & SLI_SERIAL_VALID) {
2433 sl->sl_serial_no_size = sl->sl_serial_no_alloc_size =
2434 sli->sli_serial_size;
2435 sl->sl_serial_no = kmem_zalloc(sli->sli_serial_size, KM_SLEEP);
2436 bcopy(sli_buf_copy + sli->sli_serial_offset, sl->sl_serial_no,
2437 sl->sl_serial_no_size);
2439 if (sli->sli_flags & SLI_SEPARATE_META) {
2440 sl->sl_total_data_size = sl->sl_lu_size;
2441 if (sli->sli_flags & SLI_DATA_FNAME_VALID) {
2442 sl->sl_data_fname_alloc_size = strlen((char *)
2443 sli_buf_copy + sli->sli_data_fname_offset) + 1;
2444 sl->sl_data_filename = kmem_zalloc(
2445 sl->sl_data_fname_alloc_size, KM_SLEEP);
2446 (void) strcpy(sl->sl_data_filename,
2447 (char *)sli_buf_copy + sli->sli_data_fname_offset);
2449 } else {
2450 if (sl->sl_flags & SL_ZFS_META) {
2451 sl->sl_total_data_size = sl->sl_lu_size;
2452 sl->sl_data_offset = 0;
2453 } else {
2454 sl->sl_total_data_size =
2455 sl->sl_lu_size + SHARED_META_DATA_SIZE;
2456 sl->sl_data_offset = SHARED_META_DATA_SIZE;
2457 sl->sl_flags |= SL_SHARED_META;
2460 if (sli->sli_flags & SLI_ALIAS_VALID) {
2461 sl->sl_alias_alloc_size = strlen((char *)sli_buf_copy +
2462 sli->sli_alias_offset) + 1;
2463 sl->sl_alias = kmem_alloc(sl->sl_alias_alloc_size, KM_SLEEP);
2464 (void) strcpy(sl->sl_alias, (char *)sli_buf_copy +
2465 sli->sli_alias_offset);
2467 if (sli->sli_flags & SLI_MGMT_URL_VALID) {
2468 sl->sl_mgmt_url_alloc_size = strlen((char *)sli_buf_copy +
2469 sli->sli_mgmt_url_offset) + 1;
2470 sl->sl_mgmt_url = kmem_alloc(sl->sl_mgmt_url_alloc_size,
2471 KM_SLEEP);
2472 (void) strcpy(sl->sl_mgmt_url, (char *)sli_buf_copy +
2473 sli->sli_mgmt_url_offset);
2475 if (sli->sli_flags & SLI_WRITE_PROTECTED) {
2476 sl->sl_flags |= SL_WRITE_PROTECTED;
2478 if (sli->sli_flags & SLI_VID_VALID) {
2479 sl->sl_flags |= SL_VID_VALID;
2480 bcopy(sli->sli_vid, sl->sl_vendor_id, 8);
2482 if (sli->sli_flags & SLI_PID_VALID) {
2483 sl->sl_flags |= SL_PID_VALID;
2484 bcopy(sli->sli_pid, sl->sl_product_id, 16);
2486 if (sli->sli_flags & SLI_REV_VALID) {
2487 sl->sl_flags |= SL_REV_VALID;
2488 bcopy(sli->sli_rev, sl->sl_revision, 4);
2490 if (sli->sli_flags & SLI_WRITEBACK_CACHE_DISABLE) {
2491 sl->sl_flags |= SL_WRITEBACK_CACHE_DISABLE;
2493 sim_sli_loaded:
2494 if ((sl->sl_flags & SL_SHARED_META) == 0) {
2495 data_opened = 0;
2496 } else {
2497 data_opened = 1;
2498 sl->sl_data_filename = sl->sl_meta_filename;
2499 sl->sl_data_vp = sl->sl_meta_vp;
2500 sl->sl_data_vtype = sl->sl_meta_vtype;
2503 sret = sbd_pgr_meta_load(sl);
2504 if (sret != SBD_SUCCESS) {
2505 *err_ret = SBD_RET_NO_META;
2506 ret = EIO;
2507 goto sim_err_out;
2510 ret = sbd_open_data_file(sl, err_ret, 1, data_opened, 0);
2511 if (ret) {
2512 goto sim_err_out;
2516 * set write cache disable on the device
2517 * Note: this shouldn't fail on import unless the cache capabilities
2518 * of the device changed. If that happened, modify will need to
2519 * be used to set the cache flag appropriately after import is done.
2521 if (sl->sl_flags & SL_WRITEBACK_CACHE_DISABLE) {
2522 (void) sbd_wcd_set(1, sl);
2523 wcd = 1;
2525 * if not explicitly set, attempt to set it to enable, if that fails
2526 * get the current setting and use that
2528 } else {
2529 sret = sbd_wcd_set(0, sl);
2530 if (sret != SBD_SUCCESS) {
2531 sbd_wcd_get(&wcd, sl);
2535 if (wcd) {
2536 sl->sl_flags |= SL_WRITEBACK_CACHE_DISABLE |
2537 SL_SAVED_WRITE_CACHE_DISABLE;
2540 /* we're only loading the metadata */
2541 if (!no_register) {
2542 ret = sbd_populate_and_register_lu(sl, err_ret);
2543 if (ret) {
2544 goto sim_err_out;
2546 atomic_inc_32(&sbd_lu_count);
2549 bcopy(sl->sl_device_id + 4, ilu->ilu_ret_guid, 16);
2550 sl->sl_trans_op = SL_OP_NONE;
2552 if (sli) {
2553 kmem_free(sli, sli->sli_sms_header.sms_size);
2554 sli = NULL;
2556 if (sli_buf_copy) {
2557 kmem_free(sli_buf_copy, sli_buf_sz + 1);
2558 sli_buf_copy = NULL;
2560 if (no_register && !standby) {
2561 *slr = sl;
2565 * if this was imported from standby, set the access state
2566 * to active.
2568 if (standby) {
2569 sbd_it_data_t *it;
2570 mutex_enter(&sl->sl_lock);
2571 sl->sl_access_state = SBD_LU_ACTIVE;
2572 for (it = sl->sl_it_list; it != NULL;
2573 it = it->sbd_it_next) {
2574 it->sbd_it_ua_conditions |=
2575 SBD_UA_ASYMMETRIC_ACCESS_CHANGED;
2576 it->sbd_it_ua_conditions |= SBD_UA_POR;
2577 it->sbd_it_flags |= SBD_IT_PGR_CHECK_FLAG;
2579 mutex_exit(&sl->sl_lock);
2580 /* call set access state */
2581 stret = stmf_set_lu_access(lu, STMF_LU_ACTIVE);
2582 if (stret != STMF_SUCCESS) {
2583 *err_ret = SBD_RET_ACCESS_STATE_FAILED;
2584 sl->sl_access_state = SBD_LU_STANDBY;
2585 goto sim_err_out;
2587 if (sl->sl_alias) {
2588 lu->lu_alias = sl->sl_alias;
2589 } else {
2590 lu->lu_alias = sl->sl_name;
2593 sl->sl_access_state = SBD_LU_ACTIVE;
2594 return (0);
2596 sim_err_out:
2597 if (sli) {
2598 kmem_free(sli, sli->sli_sms_header.sms_size);
2599 sli = NULL;
2601 if (sli_buf_copy) {
2602 kmem_free(sli_buf_copy, sli_buf_sz + 1);
2603 sli_buf_copy = NULL;
2606 if (standby) {
2607 *err_ret = SBD_RET_ACCESS_STATE_FAILED;
2608 sl->sl_trans_op = SL_OP_NONE;
2609 return (EIO);
2610 } else {
2611 return (sbd_close_delete_lu(sl, ret));
2616 sbd_modify_lu(sbd_modify_lu_t *mlu, int struct_sz, uint32_t *err_ret)
2618 sbd_lu_t *sl = NULL;
2619 uint16_t alias_sz;
2620 int ret = 0;
2621 sbd_it_data_t *it;
2622 sbd_status_t sret;
2623 uint64_t old_size;
2624 int modify_unregistered = 0;
2625 int ua = 0;
2626 sbd_import_lu_t *ilu;
2627 stmf_lu_t *lu;
2628 uint32_t ilu_sz;
2629 uint32_t sz;
2631 sz = struct_sz - sizeof (*mlu) + 8 + 1;
2633 /* if there is data in the buf, null terminate it */
2634 if (struct_sz > sizeof (*mlu)) {
2635 mlu->mlu_buf[struct_sz - sizeof (*mlu) + 8 - 1] = 0;
2638 *err_ret = 0;
2640 /* Lets validate offsets */
2641 if (((mlu->mlu_alias_valid) &&
2642 (mlu->mlu_alias_off >= sz)) ||
2643 ((mlu->mlu_mgmt_url_valid) &&
2644 (mlu->mlu_mgmt_url_off >= sz)) ||
2645 (mlu->mlu_by_fname) &&
2646 (mlu->mlu_fname_off >= sz)) {
2647 return (EINVAL);
2651 * We'll look for the device but if we don't find it registered,
2652 * we'll still try to modify the unregistered device.
2654 if (mlu->mlu_by_guid) {
2655 sret = sbd_find_and_lock_lu(mlu->mlu_input_guid, NULL,
2656 SL_OP_MODIFY_LU, &sl);
2657 } else if (mlu->mlu_by_fname) {
2658 sret = sbd_find_and_lock_lu(NULL,
2659 (uint8_t *)&(mlu->mlu_buf[mlu->mlu_fname_off]),
2660 SL_OP_MODIFY_LU, &sl);
2661 } else {
2662 return (EINVAL);
2666 if (sret != SBD_SUCCESS) {
2667 if (sret == SBD_BUSY) {
2668 *err_ret = SBD_RET_LU_BUSY;
2669 return (EBUSY);
2670 } else if (sret != SBD_NOT_FOUND) {
2671 return (EIO);
2672 } else if (!mlu->mlu_by_fname) {
2673 return (EINVAL);
2675 /* Okay, try to import the device */
2676 struct_sz = max(8, strlen(&(mlu->mlu_buf[mlu->mlu_fname_off]))
2677 + 1);
2678 struct_sz += sizeof (sbd_import_lu_t) - 8;
2679 ilu_sz = struct_sz;
2680 ilu = (sbd_import_lu_t *)kmem_zalloc(ilu_sz, KM_SLEEP);
2681 ilu->ilu_struct_size = struct_sz;
2682 (void) strcpy(ilu->ilu_meta_fname,
2683 &(mlu->mlu_buf[mlu->mlu_fname_off]));
2684 ret = sbd_import_lu(ilu, struct_sz, err_ret, 1, &sl);
2685 kmem_free(ilu, ilu_sz);
2686 if (ret != SBD_SUCCESS) {
2687 return (ENOENT);
2689 modify_unregistered = 1;
2692 if (sl->sl_access_state != SBD_LU_ACTIVE) {
2693 *err_ret = SBD_RET_ACCESS_STATE_FAILED;
2694 ret = EINVAL;
2695 goto smm_err_out;
2698 /* check for write cache change */
2699 if (mlu->mlu_writeback_cache_disable_valid) {
2700 /* set wce on device */
2701 sret = sbd_wcd_set(mlu->mlu_writeback_cache_disable, sl);
2702 if (!mlu->mlu_writeback_cache_disable && sret != SBD_SUCCESS) {
2703 *err_ret = SBD_RET_WRITE_CACHE_SET_FAILED;
2704 ret = EFAULT;
2705 goto smm_err_out;
2707 mutex_enter(&sl->sl_lock);
2708 if (!mlu->mlu_writeback_cache_disable) {
2709 if (sl->sl_flags & SL_WRITEBACK_CACHE_DISABLE) {
2710 ua = 1;
2711 sl->sl_flags &= ~SL_WRITEBACK_CACHE_DISABLE;
2712 sl->sl_flags &= ~SL_SAVED_WRITE_CACHE_DISABLE;
2714 } else {
2715 if ((sl->sl_flags & SL_WRITEBACK_CACHE_DISABLE) == 0) {
2716 ua = 1;
2717 sl->sl_flags |= SL_WRITEBACK_CACHE_DISABLE;
2718 sl->sl_flags |= SL_SAVED_WRITE_CACHE_DISABLE;
2721 for (it = sl->sl_it_list; ua && it != NULL;
2722 it = it->sbd_it_next) {
2723 it->sbd_it_ua_conditions |=
2724 SBD_UA_MODE_PARAMETERS_CHANGED;
2726 mutex_exit(&sl->sl_lock);
2728 ua = 0;
2730 if (mlu->mlu_alias_valid) {
2731 alias_sz = strlen((char *)mlu->mlu_buf +
2732 mlu->mlu_alias_off) + 1;
2734 * Use the allocated buffer or alloc a new one.
2735 * Don't copy into sl_alias if sl_alias_alloc_size is 0
2736 * otherwise or you'll be writing over the data/metadata
2737 * filename.
2739 mutex_enter(&sl->sl_lock);
2740 if (sl->sl_alias_alloc_size > 0 &&
2741 sl->sl_alias_alloc_size < alias_sz) {
2742 kmem_free(sl->sl_alias,
2743 sl->sl_alias_alloc_size);
2744 sl->sl_alias_alloc_size = 0;
2746 if (sl->sl_alias_alloc_size == 0) {
2747 sl->sl_alias = kmem_alloc(alias_sz, KM_SLEEP);
2748 sl->sl_alias_alloc_size = alias_sz;
2750 (void) strcpy(sl->sl_alias, (char *)mlu->mlu_buf +
2751 mlu->mlu_alias_off);
2752 lu = sl->sl_lu;
2753 lu->lu_alias = sl->sl_alias;
2754 mutex_exit(&sl->sl_lock);
2757 if (mlu->mlu_mgmt_url_valid) {
2758 uint16_t url_sz;
2760 url_sz = strlen((char *)mlu->mlu_buf + mlu->mlu_mgmt_url_off);
2761 if (url_sz > 0)
2762 url_sz++;
2764 mutex_enter(&sl->sl_lock);
2765 if (sl->sl_mgmt_url_alloc_size > 0 &&
2766 (url_sz == 0 || sl->sl_mgmt_url_alloc_size < url_sz)) {
2767 kmem_free(sl->sl_mgmt_url, sl->sl_mgmt_url_alloc_size);
2768 sl->sl_mgmt_url = NULL;
2769 sl->sl_mgmt_url_alloc_size = 0;
2771 if (url_sz > 0) {
2772 if (sl->sl_mgmt_url_alloc_size == 0) {
2773 sl->sl_mgmt_url = kmem_alloc(url_sz, KM_SLEEP);
2774 sl->sl_mgmt_url_alloc_size = url_sz;
2776 (void) strcpy(sl->sl_mgmt_url, (char *)mlu->mlu_buf +
2777 mlu->mlu_mgmt_url_off);
2779 for (it = sl->sl_it_list; it != NULL;
2780 it = it->sbd_it_next) {
2781 it->sbd_it_ua_conditions |=
2782 SBD_UA_MODE_PARAMETERS_CHANGED;
2784 mutex_exit(&sl->sl_lock);
2787 if (mlu->mlu_write_protected_valid) {
2788 mutex_enter(&sl->sl_lock);
2789 if (mlu->mlu_write_protected) {
2790 if ((sl->sl_flags & SL_WRITE_PROTECTED) == 0) {
2791 ua = 1;
2792 sl->sl_flags |= SL_WRITE_PROTECTED;
2794 } else {
2795 if (sl->sl_flags & SL_WRITE_PROTECTED) {
2796 ua = 1;
2797 sl->sl_flags &= ~SL_WRITE_PROTECTED;
2800 for (it = sl->sl_it_list; ua && it != NULL;
2801 it = it->sbd_it_next) {
2802 it->sbd_it_ua_conditions |=
2803 SBD_UA_MODE_PARAMETERS_CHANGED;
2805 mutex_exit(&sl->sl_lock);
2808 if (mlu->mlu_lu_size_valid) {
2810 * validate lu size and set
2811 * For open file only (registered lu)
2813 mutex_enter(&sl->sl_lock);
2814 old_size = sl->sl_lu_size;
2815 sl->sl_lu_size = mlu->mlu_lu_size;
2816 mutex_exit(&sl->sl_lock);
2817 ret = sbd_open_data_file(sl, err_ret, 1, 1, 1);
2818 if (ret) {
2819 mutex_enter(&sl->sl_lock);
2820 sl->sl_lu_size = old_size;
2821 mutex_exit(&sl->sl_lock);
2822 goto smm_err_out;
2824 if (old_size != mlu->mlu_lu_size) {
2825 mutex_enter(&sl->sl_lock);
2826 for (it = sl->sl_it_list; it != NULL;
2827 it = it->sbd_it_next) {
2828 it->sbd_it_ua_conditions |=
2829 SBD_UA_CAPACITY_CHANGED;
2831 mutex_exit(&sl->sl_lock);
2835 if (sbd_write_lu_info(sl) != SBD_SUCCESS) {
2836 *err_ret = SBD_RET_META_CREATION_FAILED;
2837 ret = EIO;
2840 smm_err_out:
2841 if (modify_unregistered) {
2842 (void) sbd_close_delete_lu(sl, 0);
2843 } else {
2844 sl->sl_trans_op = SL_OP_NONE;
2846 return (ret);
2850 sbd_set_global_props(sbd_global_props_t *mlu, int struct_sz,
2851 uint32_t *err_ret)
2853 sbd_lu_t *sl = NULL;
2854 int ret = 0;
2855 sbd_it_data_t *it;
2856 uint32_t sz;
2858 sz = struct_sz - sizeof (*mlu) + 8 + 1;
2860 /* if there is data in the buf, null terminate it */
2861 if (struct_sz > sizeof (*mlu)) {
2862 mlu->mlu_buf[struct_sz - sizeof (*mlu) + 8 - 1] = 0;
2865 *err_ret = 0;
2867 /* Lets validate offsets */
2868 if (((mlu->mlu_mgmt_url_valid) &&
2869 (mlu->mlu_mgmt_url_off >= sz))) {
2870 return (EINVAL);
2873 if (mlu->mlu_mgmt_url_valid) {
2874 uint16_t url_sz;
2876 url_sz = strlen((char *)mlu->mlu_buf + mlu->mlu_mgmt_url_off);
2877 if (url_sz > 0)
2878 url_sz++;
2880 rw_enter(&sbd_global_prop_lock, RW_WRITER);
2881 if (sbd_mgmt_url_alloc_size > 0 &&
2882 (url_sz == 0 || sbd_mgmt_url_alloc_size < url_sz)) {
2883 kmem_free(sbd_mgmt_url, sbd_mgmt_url_alloc_size);
2884 sbd_mgmt_url = NULL;
2885 sbd_mgmt_url_alloc_size = 0;
2887 if (url_sz > 0) {
2888 if (sbd_mgmt_url_alloc_size == 0) {
2889 sbd_mgmt_url = kmem_alloc(url_sz, KM_SLEEP);
2890 sbd_mgmt_url_alloc_size = url_sz;
2892 (void) strcpy(sbd_mgmt_url, (char *)mlu->mlu_buf +
2893 mlu->mlu_mgmt_url_off);
2896 * check each lu to determine whether a UA is needed.
2898 mutex_enter(&sbd_lock);
2899 for (sl = sbd_lu_list; sl; sl = sl->sl_next) {
2900 if (sl->sl_mgmt_url) {
2901 continue;
2903 mutex_enter(&sl->sl_lock);
2904 for (it = sl->sl_it_list; it != NULL;
2905 it = it->sbd_it_next) {
2906 it->sbd_it_ua_conditions |=
2907 SBD_UA_MODE_PARAMETERS_CHANGED;
2909 mutex_exit(&sl->sl_lock);
2911 mutex_exit(&sbd_lock);
2912 rw_exit(&sbd_global_prop_lock);
2914 return (ret);
2917 /* ARGSUSED */
2919 sbd_delete_locked_lu(sbd_lu_t *sl, uint32_t *err_ret,
2920 stmf_state_change_info_t *ssi)
2922 int i;
2923 stmf_status_t ret;
2925 if ((sl->sl_state == STMF_STATE_OFFLINE) &&
2926 !sl->sl_state_not_acked) {
2927 goto sdl_do_dereg;
2930 if ((sl->sl_state != STMF_STATE_ONLINE) ||
2931 sl->sl_state_not_acked) {
2932 return (EBUSY);
2935 ret = stmf_ctl(STMF_CMD_LU_OFFLINE, sl->sl_lu, ssi);
2936 if ((ret != STMF_SUCCESS) && (ret != STMF_ALREADY)) {
2937 return (EBUSY);
2940 for (i = 0; i < 500; i++) {
2941 if ((sl->sl_state == STMF_STATE_OFFLINE) &&
2942 !sl->sl_state_not_acked) {
2943 goto sdl_do_dereg;
2945 delay(drv_usectohz(10000));
2947 return (EBUSY);
2949 sdl_do_dereg:;
2950 if (stmf_deregister_lu(sl->sl_lu) != STMF_SUCCESS)
2951 return (EBUSY);
2952 atomic_dec_32(&sbd_lu_count);
2954 return (sbd_close_delete_lu(sl, 0));
2958 sbd_delete_lu(sbd_delete_lu_t *dlu, int struct_sz, uint32_t *err_ret)
2960 sbd_lu_t *sl;
2961 sbd_status_t sret;
2962 stmf_state_change_info_t ssi;
2963 int ret;
2965 if (dlu->dlu_by_meta_name) {
2966 ((char *)dlu)[struct_sz - 1] = 0;
2967 sret = sbd_find_and_lock_lu(NULL, dlu->dlu_meta_name,
2968 SL_OP_DELETE_LU, &sl);
2969 } else {
2970 sret = sbd_find_and_lock_lu(dlu->dlu_guid, NULL,
2971 SL_OP_DELETE_LU, &sl);
2973 if (sret != SBD_SUCCESS) {
2974 if (sret == SBD_BUSY) {
2975 *err_ret = SBD_RET_LU_BUSY;
2976 return (EBUSY);
2977 } else if (sret == SBD_NOT_FOUND) {
2978 *err_ret = SBD_RET_NOT_FOUND;
2979 return (ENOENT);
2981 return (EIO);
2984 ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
2985 ssi.st_additional_info = "sbd_delete_lu call (ioctl)";
2986 ret = sbd_delete_locked_lu(sl, err_ret, &ssi);
2988 if (ret) {
2989 /* Once its locked, no need to grab mutex again */
2990 sl->sl_trans_op = SL_OP_NONE;
2992 return (ret);
2995 sbd_status_t
2996 sbd_data_read(sbd_lu_t *sl, struct scsi_task *task,
2997 uint64_t offset, uint64_t size, uint8_t *buf)
2999 int ret;
3000 long resid;
3002 if ((offset + size) > sl->sl_lu_size) {
3003 return (SBD_IO_PAST_EOF);
3006 offset += sl->sl_data_offset;
3008 if ((offset + size) > sl->sl_data_readable_size) {
3009 uint64_t store_end;
3010 if (offset > sl->sl_data_readable_size) {
3011 bzero(buf, size);
3012 return (SBD_SUCCESS);
3014 store_end = sl->sl_data_readable_size - offset;
3015 bzero(buf + store_end, size - store_end);
3016 size = store_end;
3019 DTRACE_PROBE5(backing__store__read__start, sbd_lu_t *, sl,
3020 uint8_t *, buf, uint64_t, size, uint64_t, offset,
3021 scsi_task_t *, task);
3024 * Don't proceed if the device has been closed
3025 * This can occur on an access state change to standby or
3026 * a delete. The writer lock is acquired before closing the
3027 * lu.
3029 rw_enter(&sl->sl_access_state_lock, RW_READER);
3030 if ((sl->sl_flags & SL_MEDIA_LOADED) == 0) {
3031 rw_exit(&sl->sl_access_state_lock);
3032 return (SBD_FAILURE);
3034 ret = vn_rdwr(UIO_READ, sl->sl_data_vp, (caddr_t)buf, (ssize_t)size,
3035 (offset_t)offset, UIO_SYSSPACE, 0, RLIM64_INFINITY, CRED(),
3036 &resid);
3037 rw_exit(&sl->sl_access_state_lock);
3039 DTRACE_PROBE6(backing__store__read__end, sbd_lu_t *, sl,
3040 uint8_t *, buf, uint64_t, size, uint64_t, offset,
3041 int, ret, scsi_task_t *, task);
3043 over_sl_data_read:
3044 if (ret || resid) {
3045 stmf_trace(0, "UIO_READ failed, ret = %d, resid = %d", ret,
3046 resid);
3047 return (SBD_FAILURE);
3050 return (SBD_SUCCESS);
3053 sbd_status_t
3054 sbd_data_write(sbd_lu_t *sl, struct scsi_task *task,
3055 uint64_t offset, uint64_t size, uint8_t *buf)
3057 int ret;
3058 long resid;
3059 sbd_status_t sret = SBD_SUCCESS;
3060 int ioflag;
3062 if ((offset + size) > sl->sl_lu_size) {
3063 return (SBD_IO_PAST_EOF);
3066 offset += sl->sl_data_offset;
3068 if ((sl->sl_flags & SL_WRITEBACK_CACHE_DISABLE) &&
3069 (sl->sl_flags & SL_FLUSH_ON_DISABLED_WRITECACHE)) {
3070 ioflag = FSYNC;
3071 } else {
3072 ioflag = 0;
3075 DTRACE_PROBE5(backing__store__write__start, sbd_lu_t *, sl,
3076 uint8_t *, buf, uint64_t, size, uint64_t, offset,
3077 scsi_task_t *, task);
3080 * Don't proceed if the device has been closed
3081 * This can occur on an access state change to standby or
3082 * a delete. The writer lock is acquired before closing the
3083 * lu.
3085 rw_enter(&sl->sl_access_state_lock, RW_READER);
3086 if ((sl->sl_flags & SL_MEDIA_LOADED) == 0) {
3087 rw_exit(&sl->sl_access_state_lock);
3088 return (SBD_FAILURE);
3090 ret = vn_rdwr(UIO_WRITE, sl->sl_data_vp, (caddr_t)buf, (ssize_t)size,
3091 (offset_t)offset, UIO_SYSSPACE, ioflag, RLIM64_INFINITY, CRED(),
3092 &resid);
3093 rw_exit(&sl->sl_access_state_lock);
3095 DTRACE_PROBE6(backing__store__write__end, sbd_lu_t *, sl,
3096 uint8_t *, buf, uint64_t, size, uint64_t, offset,
3097 int, ret, scsi_task_t *, task);
3099 if ((ret == 0) && (resid == 0) &&
3100 (sl->sl_flags & SL_WRITEBACK_CACHE_DISABLE) &&
3101 (sl->sl_flags & SL_FLUSH_ON_DISABLED_WRITECACHE)) {
3102 sret = sbd_flush_data_cache(sl, 1);
3104 over_sl_data_write:
3106 if ((ret || resid) || (sret != SBD_SUCCESS)) {
3107 return (SBD_FAILURE);
3108 } else if ((offset + size) > sl->sl_data_readable_size) {
3109 uint64_t old_size, new_size;
3111 do {
3112 old_size = sl->sl_data_readable_size;
3113 if ((offset + size) <= old_size)
3114 break;
3115 new_size = offset + size;
3116 } while (atomic_cas_64(&sl->sl_data_readable_size, old_size,
3117 new_size) != old_size);
3120 return (SBD_SUCCESS);
3124 sbd_get_global_props(sbd_global_props_t *oslp, uint32_t oslp_sz,
3125 uint32_t *err_ret)
3127 uint32_t sz = 0;
3128 uint16_t off;
3130 rw_enter(&sbd_global_prop_lock, RW_READER);
3131 if (sbd_mgmt_url) {
3132 sz += strlen(sbd_mgmt_url) + 1;
3134 bzero(oslp, sizeof (*oslp) - 8);
3135 oslp->mlu_buf_size_needed = sz;
3137 if (sz > (oslp_sz - sizeof (*oslp) + 8)) {
3138 *err_ret = SBD_RET_INSUFFICIENT_BUF_SPACE;
3139 rw_exit(&sbd_global_prop_lock);
3140 return (ENOMEM);
3143 off = 0;
3144 if (sbd_mgmt_url) {
3145 oslp->mlu_mgmt_url_valid = 1;
3146 oslp->mlu_mgmt_url_off = off;
3147 (void) strcpy((char *)&oslp->mlu_buf[off], sbd_mgmt_url);
3148 off += strlen(sbd_mgmt_url) + 1;
3151 rw_exit(&sbd_global_prop_lock);
3152 return (0);
3155 static int
3156 sbd_get_unmap_props(sbd_unmap_props_t *sup,
3157 sbd_unmap_props_t *osup, uint32_t *err_ret)
3159 sbd_status_t sret;
3160 sbd_lu_t *sl = NULL;
3162 if (sup->sup_guid_valid) {
3163 sret = sbd_find_and_lock_lu(sup->sup_guid,
3164 NULL, SL_OP_LU_PROPS, &sl);
3165 } else {
3166 sret = sbd_find_and_lock_lu(NULL,
3167 (uint8_t *)sup->sup_zvol_path, SL_OP_LU_PROPS,
3168 &sl);
3170 if (sret != SBD_SUCCESS) {
3171 if (sret == SBD_BUSY) {
3172 *err_ret = SBD_RET_LU_BUSY;
3173 return (EBUSY);
3174 } else if (sret == SBD_NOT_FOUND) {
3175 *err_ret = SBD_RET_NOT_FOUND;
3176 return (ENOENT);
3178 return (EIO);
3181 sup->sup_found_lu = 1;
3182 sup->sup_guid_valid = 1;
3183 bcopy(sl->sl_device_id + 4, sup->sup_guid, 16);
3184 if (sl->sl_flags & SL_UNMAP_ENABLED)
3185 sup->sup_unmap_enabled = 1;
3186 else
3187 sup->sup_unmap_enabled = 0;
3189 *osup = *sup;
3190 sl->sl_trans_op = SL_OP_NONE;
3192 return (0);
3196 sbd_get_lu_props(sbd_lu_props_t *islp, uint32_t islp_sz,
3197 sbd_lu_props_t *oslp, uint32_t oslp_sz, uint32_t *err_ret)
3199 sbd_status_t sret;
3200 sbd_lu_t *sl = NULL;
3201 uint32_t sz;
3202 uint16_t off;
3204 if (islp->slp_input_guid) {
3205 sret = sbd_find_and_lock_lu(islp->slp_guid, NULL,
3206 SL_OP_LU_PROPS, &sl);
3207 } else {
3208 ((char *)islp)[islp_sz - 1] = 0;
3209 sret = sbd_find_and_lock_lu(NULL, islp->slp_buf,
3210 SL_OP_LU_PROPS, &sl);
3212 if (sret != SBD_SUCCESS) {
3213 if (sret == SBD_BUSY) {
3214 *err_ret = SBD_RET_LU_BUSY;
3215 return (EBUSY);
3216 } else if (sret == SBD_NOT_FOUND) {
3217 *err_ret = SBD_RET_NOT_FOUND;
3218 return (ENOENT);
3220 return (EIO);
3223 sz = strlen(sl->sl_name) + 1;
3224 if ((sl->sl_flags & (SL_ZFS_META | SL_SHARED_META)) == 0) {
3225 if (sl->sl_data_filename) {
3226 sz += strlen(sl->sl_data_filename) + 1;
3229 sz += sl->sl_serial_no_size;
3230 if (sl->sl_alias) {
3231 sz += strlen(sl->sl_alias) + 1;
3234 rw_enter(&sbd_global_prop_lock, RW_READER);
3235 if (sl->sl_mgmt_url) {
3236 sz += strlen(sl->sl_mgmt_url) + 1;
3237 } else if (sbd_mgmt_url) {
3238 sz += strlen(sbd_mgmt_url) + 1;
3240 bzero(oslp, sizeof (*oslp) - 8);
3241 oslp->slp_buf_size_needed = sz;
3243 if (sz > (oslp_sz - sizeof (*oslp) + 8)) {
3244 sl->sl_trans_op = SL_OP_NONE;
3245 *err_ret = SBD_RET_INSUFFICIENT_BUF_SPACE;
3246 rw_exit(&sbd_global_prop_lock);
3247 return (ENOMEM);
3250 off = 0;
3251 (void) strcpy((char *)oslp->slp_buf, sl->sl_name);
3252 oslp->slp_meta_fname_off = off;
3253 off += strlen(sl->sl_name) + 1;
3254 if ((sl->sl_flags & (SL_ZFS_META | SL_SHARED_META)) == 0) {
3255 oslp->slp_meta_fname_valid = 1;
3256 oslp->slp_separate_meta = 1;
3257 if (sl->sl_data_filename) {
3258 oslp->slp_data_fname_valid = 1;
3259 oslp->slp_data_fname_off = off;
3260 (void) strcpy((char *)&oslp->slp_buf[off],
3261 sl->sl_data_filename);
3262 off += strlen(sl->sl_data_filename) + 1;
3264 } else {
3265 oslp->slp_data_fname_valid = 1;
3266 oslp->slp_data_fname_off = oslp->slp_meta_fname_off;
3267 if (sl->sl_flags & SL_ZFS_META) {
3268 oslp->slp_zfs_meta = 1;
3271 if (sl->sl_alias) {
3272 oslp->slp_alias_valid = 1;
3273 oslp->slp_alias_off = off;
3274 (void) strcpy((char *)&oslp->slp_buf[off], sl->sl_alias);
3275 off += strlen(sl->sl_alias) + 1;
3277 if (sl->sl_mgmt_url) {
3278 oslp->slp_mgmt_url_valid = 1;
3279 oslp->slp_mgmt_url_off = off;
3280 (void) strcpy((char *)&oslp->slp_buf[off], sl->sl_mgmt_url);
3281 off += strlen(sl->sl_mgmt_url) + 1;
3282 } else if (sbd_mgmt_url) {
3283 oslp->slp_mgmt_url_valid = 1;
3284 oslp->slp_mgmt_url_off = off;
3285 (void) strcpy((char *)&oslp->slp_buf[off], sbd_mgmt_url);
3286 off += strlen(sbd_mgmt_url) + 1;
3288 if (sl->sl_serial_no_size) {
3289 oslp->slp_serial_off = off;
3290 bcopy(sl->sl_serial_no, &oslp->slp_buf[off],
3291 sl->sl_serial_no_size);
3292 oslp->slp_serial_size = sl->sl_serial_no_size;
3293 oslp->slp_serial_valid = 1;
3294 off += sl->sl_serial_no_size;
3297 oslp->slp_lu_size = sl->sl_lu_size;
3298 oslp->slp_blksize = ((uint16_t)1) << sl->sl_data_blocksize_shift;
3300 oslp->slp_access_state = sl->sl_access_state;
3302 if (sl->sl_flags & SL_VID_VALID) {
3303 oslp->slp_lu_vid = 1;
3304 bcopy(sl->sl_vendor_id, oslp->slp_vid, 8);
3305 } else {
3306 bcopy(sbd_vendor_id, oslp->slp_vid, 8);
3308 if (sl->sl_flags & SL_PID_VALID) {
3309 oslp->slp_lu_pid = 1;
3310 bcopy(sl->sl_product_id, oslp->slp_pid, 16);
3311 } else {
3312 bcopy(sbd_product_id, oslp->slp_pid, 16);
3314 if (sl->sl_flags & SL_REV_VALID) {
3315 oslp->slp_lu_rev = 1;
3316 bcopy(sl->sl_revision, oslp->slp_rev, 4);
3317 } else {
3318 bcopy(sbd_revision, oslp->slp_rev, 4);
3320 bcopy(sl->sl_device_id + 4, oslp->slp_guid, 16);
3322 if (sl->sl_flags & SL_WRITEBACK_CACHE_DISABLE)
3323 oslp->slp_writeback_cache_disable_cur = 1;
3324 if (sl->sl_flags & SL_SAVED_WRITE_CACHE_DISABLE)
3325 oslp->slp_writeback_cache_disable_saved = 1;
3326 if (sl->sl_flags & SL_WRITE_PROTECTED)
3327 oslp->slp_write_protected = 1;
3329 sl->sl_trans_op = SL_OP_NONE;
3331 rw_exit(&sbd_global_prop_lock);
3332 return (0);
3336 * Returns an allocated string with the "<pool>/..." form of the zvol name.
3338 static char *
3339 sbd_get_zvol_name(sbd_lu_t *sl)
3341 char *src;
3342 char *p;
3344 if (sl->sl_data_filename)
3345 src = sl->sl_data_filename;
3346 else
3347 src = sl->sl_meta_filename;
3348 /* There has to be a better way */
3349 if (SBD_IS_ZVOL(src) != 0) {
3350 ASSERT(0);
3352 src += 14; /* Past /dev/zvol/dsk/ */
3353 if (*src == '/')
3354 src++; /* or /dev/zvol/rdsk/ */
3355 p = (char *)kmem_alloc(strlen(src) + 1, KM_SLEEP);
3356 (void) strcpy(p, src);
3357 return (p);
3361 * this function creates a local metadata zvol property
3363 sbd_status_t
3364 sbd_create_zfs_meta_object(sbd_lu_t *sl)
3367 * -allocate 1/2 the property size, the zfs property
3368 * is 8k in size and stored as ascii hex string, all
3369 * we needed is 4k buffer to store the binary data.
3370 * -initialize reader/write lock
3372 if ((sl->sl_zfs_meta = kmem_zalloc(ZAP_MAXVALUELEN / 2, KM_SLEEP))
3373 == NULL)
3374 return (SBD_FAILURE);
3375 rw_init(&sl->sl_zfs_meta_lock, NULL, RW_DRIVER, NULL);
3376 return (SBD_SUCCESS);
3379 char
3380 sbd_ctoi(char c)
3382 if ((c >= '0') && (c <= '9'))
3383 c -= '0';
3384 else if ((c >= 'A') && (c <= 'F'))
3385 c = c - 'A' + 10;
3386 else if ((c >= 'a') && (c <= 'f'))
3387 c = c - 'a' + 10;
3388 else
3389 c = -1;
3390 return (c);
3394 * read zvol property and convert to binary
3396 sbd_status_t
3397 sbd_open_zfs_meta(sbd_lu_t *sl)
3399 char *meta = NULL, cl, ch;
3400 int i;
3401 char *tmp, *ptr;
3402 uint64_t rc = SBD_SUCCESS;
3403 int len;
3404 char *file;
3406 if (sl->sl_zfs_meta == NULL) {
3407 if (sbd_create_zfs_meta_object(sl) == SBD_FAILURE)
3408 return (SBD_FAILURE);
3409 } else {
3410 bzero(sl->sl_zfs_meta, (ZAP_MAXVALUELEN / 2));
3413 rw_enter(&sl->sl_zfs_meta_lock, RW_WRITER);
3414 file = sbd_get_zvol_name(sl);
3415 if (sbd_zvolget(file, &meta)) {
3416 rc = SBD_FAILURE;
3417 goto done;
3419 tmp = meta;
3420 /* convert ascii hex to binary meta */
3421 len = strlen(meta);
3422 ptr = sl->sl_zfs_meta;
3423 for (i = 0; i < len; i += 2) {
3424 ch = sbd_ctoi(*tmp++);
3425 cl = sbd_ctoi(*tmp++);
3426 if (ch == -1 || cl == -1) {
3427 rc = SBD_FAILURE;
3428 break;
3430 *ptr++ = (ch << 4) + cl;
3432 done:
3433 rw_exit(&sl->sl_zfs_meta_lock);
3434 if (meta)
3435 kmem_free(meta, len + 1);
3436 kmem_free(file, strlen(file) + 1);
3437 return (rc);
3440 sbd_status_t
3441 sbd_read_zfs_meta(sbd_lu_t *sl, uint8_t *buf, uint64_t sz, uint64_t off)
3443 ASSERT(sl->sl_zfs_meta);
3444 rw_enter(&sl->sl_zfs_meta_lock, RW_READER);
3445 bcopy(&sl->sl_zfs_meta[off], buf, sz);
3446 rw_exit(&sl->sl_zfs_meta_lock);
3447 return (SBD_SUCCESS);
3450 sbd_status_t
3451 sbd_write_zfs_meta(sbd_lu_t *sl, uint8_t *buf, uint64_t sz, uint64_t off)
3453 ASSERT(sl->sl_zfs_meta);
3454 if ((off + sz) > (ZAP_MAXVALUELEN / 2 - 1)) {
3455 return (SBD_META_CORRUPTED);
3457 if ((off + sz) > sl->sl_meta_size_used) {
3458 sl->sl_meta_size_used = off + sz;
3459 if (sl->sl_total_meta_size < sl->sl_meta_size_used) {
3460 uint64_t meta_align =
3461 (((uint64_t)1) << sl->sl_meta_blocksize_shift) - 1;
3462 sl->sl_total_meta_size = (sl->sl_meta_size_used +
3463 meta_align) & (~meta_align);
3466 rw_enter(&sl->sl_zfs_meta_lock, RW_WRITER);
3467 bcopy(buf, &sl->sl_zfs_meta[off], sz);
3468 rw_exit(&sl->sl_zfs_meta_lock);
3470 * During creation of a logical unit, sbd_update_zfs_prop will be
3471 * called separately to avoid multiple calls as each meta section
3472 * create/update will result in a call to sbd_write_zfs_meta().
3473 * We only need to update the zvol once during create.
3475 mutex_enter(&sl->sl_lock);
3476 if (sl->sl_trans_op != SL_OP_CREATE_REGISTER_LU) {
3477 mutex_exit(&sl->sl_lock);
3478 return (sbd_update_zfs_prop(sl));
3480 mutex_exit(&sl->sl_lock);
3481 return (SBD_SUCCESS);
3484 sbd_status_t
3485 sbd_update_zfs_prop(sbd_lu_t *sl)
3487 char *ptr, *ah_meta;
3488 char *dp = NULL;
3489 int i, num;
3490 char *file;
3491 sbd_status_t ret = SBD_SUCCESS;
3493 ASSERT(sl->sl_zfs_meta);
3494 ptr = ah_meta = kmem_zalloc(ZAP_MAXVALUELEN, KM_SLEEP);
3495 rw_enter(&sl->sl_zfs_meta_lock, RW_READER);
3496 /* convert local copy to ascii hex */
3497 dp = sl->sl_zfs_meta;
3498 for (i = 0; i < sl->sl_total_meta_size; i++, dp++) {
3499 num = ((*dp) >> 4) & 0xF;
3500 *ah_meta++ = (num < 10) ? (num + '0') : (num + ('a' - 10));
3501 num = (*dp) & 0xF;
3502 *ah_meta++ = (num < 10) ? (num + '0') : (num + ('a' - 10));
3504 *ah_meta = NULL;
3505 file = sbd_get_zvol_name(sl);
3506 if (sbd_zvolset(file, (char *)ptr)) {
3507 ret = SBD_META_CORRUPTED;
3509 rw_exit(&sl->sl_zfs_meta_lock);
3510 kmem_free(ptr, ZAP_MAXVALUELEN);
3511 kmem_free(file, strlen(file) + 1);
3512 return (ret);
3516 sbd_is_zvol(char *path)
3518 int is_zfs = 0;
3520 if (SBD_IS_ZVOL(path) == 0)
3521 is_zfs = 1;
3523 return (is_zfs);
3527 * set write cache disable
3528 * wcd - 1 = disable, 0 = enable
3530 sbd_status_t
3531 sbd_wcd_set(int wcd, sbd_lu_t *sl)
3533 /* translate to wce bit */
3534 int wce = wcd ? 0 : 1;
3535 int ret;
3536 sbd_status_t sret = SBD_SUCCESS;
3538 mutex_enter(&sl->sl_lock);
3539 sl->sl_flags &= ~SL_WRITEBACK_CACHE_SET_UNSUPPORTED;
3541 if (sl->sl_data_vp->v_type == VREG) {
3542 sl->sl_flags |= SL_FLUSH_ON_DISABLED_WRITECACHE;
3543 goto done;
3546 ret = VOP_IOCTL(sl->sl_data_vp, DKIOCSETWCE, (intptr_t)&wce, FKIOCTL,
3547 kcred, NULL, NULL);
3548 if (ret == 0) {
3549 sl->sl_flags &= ~SL_WRITEBACK_CACHE_SET_UNSUPPORTED;
3550 sl->sl_flags &= ~SL_FLUSH_ON_DISABLED_WRITECACHE;
3551 } else {
3552 sl->sl_flags |= SL_WRITEBACK_CACHE_SET_UNSUPPORTED;
3553 sl->sl_flags |= SL_FLUSH_ON_DISABLED_WRITECACHE;
3554 sret = SBD_FAILURE;
3555 goto done;
3558 done:
3559 mutex_exit(&sl->sl_lock);
3560 return (sret);
3564 * get write cache disable
3565 * wcd - 1 = disable, 0 = enable
3567 void
3568 sbd_wcd_get(int *wcd, sbd_lu_t *sl)
3570 int wce;
3571 int ret;
3573 if (sl->sl_data_vp->v_type == VREG) {
3574 *wcd = 0;
3575 return;
3578 ret = VOP_IOCTL(sl->sl_data_vp, DKIOCGETWCE, (intptr_t)&wce, FKIOCTL,
3579 kcred, NULL, NULL);
3580 /* if write cache get failed, assume disabled */
3581 if (ret) {
3582 *wcd = 1;
3583 } else {
3584 /* translate to wcd bit */
3585 *wcd = wce ? 0 : 1;
3590 sbd_zvolget(char *zvol_name, char **comstarprop)
3592 ldi_handle_t zfs_lh;
3593 nvlist_t *nv = NULL, *nv2;
3594 zfs_cmd_t *zc;
3595 char *ptr;
3596 int size = 1024;
3597 int unused;
3598 int rc;
3600 if ((rc = ldi_open_by_name("/dev/zfs", FREAD | FWRITE, kcred,
3601 &zfs_lh, sbd_zfs_ident)) != 0) {
3602 cmn_err(CE_WARN, "ldi_open %d", rc);
3603 return (ENXIO);
3606 zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP);
3607 (void) strlcpy(zc->zc_name, zvol_name, sizeof (zc->zc_name));
3608 again:
3609 zc->zc_nvlist_dst = (uint64_t)(intptr_t)kmem_alloc(size,
3610 KM_SLEEP);
3611 zc->zc_nvlist_dst_size = size;
3612 rc = ldi_ioctl(zfs_lh, ZFS_IOC_OBJSET_STATS, (intptr_t)zc,
3613 FKIOCTL, kcred, &unused);
3615 * ENOMEM means the list is larger than what we've allocated
3616 * ldi_ioctl will fail with ENOMEM only once
3618 if (rc == ENOMEM) {
3619 int newsize;
3620 newsize = zc->zc_nvlist_dst_size;
3621 kmem_free((void *)(uintptr_t)zc->zc_nvlist_dst, size);
3622 size = newsize;
3623 goto again;
3624 } else if (rc != 0) {
3625 goto out;
3627 rc = nvlist_unpack((char *)(uintptr_t)zc->zc_nvlist_dst,
3628 zc->zc_nvlist_dst_size, &nv, 0);
3629 ASSERT(rc == 0); /* nvlist_unpack should not fail */
3630 if ((rc = nvlist_lookup_nvlist(nv, "stmf_sbd_lu", &nv2)) == 0) {
3631 rc = nvlist_lookup_string(nv2, ZPROP_VALUE, &ptr);
3632 if (rc != 0) {
3633 cmn_err(CE_WARN, "couldn't get value");
3634 } else {
3635 *comstarprop = kmem_alloc(strlen(ptr) + 1,
3636 KM_SLEEP);
3637 (void) strcpy(*comstarprop, ptr);
3640 out:
3641 if (nv != NULL)
3642 nvlist_free(nv);
3643 kmem_free((void *)(uintptr_t)zc->zc_nvlist_dst, size);
3644 kmem_free(zc, sizeof (zfs_cmd_t));
3645 (void) ldi_close(zfs_lh, FREAD|FWRITE, kcred);
3647 return (rc);
3651 sbd_zvolset(char *zvol_name, char *comstarprop)
3653 ldi_handle_t zfs_lh;
3654 nvlist_t *nv;
3655 char *packed = NULL;
3656 size_t len;
3657 zfs_cmd_t *zc;
3658 int unused;
3659 int rc;
3661 if ((rc = ldi_open_by_name("/dev/zfs", FREAD | FWRITE, kcred,
3662 &zfs_lh, sbd_zfs_ident)) != 0) {
3663 cmn_err(CE_WARN, "ldi_open %d", rc);
3664 return (ENXIO);
3666 (void) nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP);
3667 (void) nvlist_add_string(nv, "stmf_sbd_lu", comstarprop);
3668 if ((rc = nvlist_pack(nv, &packed, &len, NV_ENCODE_NATIVE, KM_SLEEP))) {
3669 goto out;
3672 zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP);
3673 (void) strlcpy(zc->zc_name, zvol_name, sizeof (zc->zc_name));
3674 zc->zc_nvlist_src = (uint64_t)(intptr_t)packed;
3675 zc->zc_nvlist_src_size = len;
3676 rc = ldi_ioctl(zfs_lh, ZFS_IOC_SET_PROP, (intptr_t)zc,
3677 FKIOCTL, kcred, &unused);
3678 if (rc != 0) {
3679 cmn_err(CE_NOTE, "ioctl failed %d", rc);
3681 kmem_free(zc, sizeof (zfs_cmd_t));
3682 if (packed)
3683 kmem_free(packed, len);
3684 out:
3685 nvlist_free(nv);
3686 (void) ldi_close(zfs_lh, FREAD|FWRITE, kcred);
3687 return (rc);
3691 * Unmap a region in a volume. Currently only supported for zvols.
3694 sbd_unmap(sbd_lu_t *sl, uint64_t offset, uint64_t length)
3696 vnode_t *vp;
3697 int unused;
3698 dkioc_free_t df;
3700 /* Right now, we only support UNMAP on zvols. */
3701 if (!(sl->sl_flags & SL_ZFS_META))
3702 return (EIO);
3704 df.df_flags = (sl->sl_flags & SL_WRITEBACK_CACHE_DISABLE) ?
3705 DF_WAIT_SYNC : 0;
3706 df.df_start = offset;
3707 df.df_length = length;
3709 /* Use the data vnode we have to send a fop_ioctl(). */
3710 vp = sl->sl_data_vp;
3711 if (vp == NULL) {
3712 cmn_err(CE_WARN, "Cannot unmap - no vnode pointer.");
3713 return (EIO);
3716 return (VOP_IOCTL(vp, DKIOCFREE, (intptr_t)(&df), FKIOCTL, kcred,
3717 &unused, NULL));