1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
7 * (c) Copyright 2003-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/net.h>
28 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/timer.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/kthread.h>
35 #include <linux/export.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_device.h>
41 #include <target/target_core_base.h>
42 #include <target/target_core_backend.h>
43 #include <target/target_core_fabric.h>
45 #include "target_core_internal.h"
46 #include "target_core_alua.h"
47 #include "target_core_pr.h"
48 #include "target_core_ua.h"
50 DEFINE_MUTEX(g_device_mutex
);
51 LIST_HEAD(g_device_list
);
53 static struct se_hba
*lun0_hba
;
54 /* not static, needed by tpg.c */
55 struct se_device
*g_lun0_dev
;
58 transport_lookup_cmd_lun(struct se_cmd
*se_cmd
, u32 unpacked_lun
)
60 struct se_lun
*se_lun
= NULL
;
61 struct se_session
*se_sess
= se_cmd
->se_sess
;
62 struct se_device
*dev
;
65 if (unpacked_lun
>= TRANSPORT_MAX_LUNS_PER_TPG
)
66 return TCM_NON_EXISTENT_LUN
;
68 spin_lock_irqsave(&se_sess
->se_node_acl
->device_list_lock
, flags
);
69 se_cmd
->se_deve
= se_sess
->se_node_acl
->device_list
[unpacked_lun
];
70 if (se_cmd
->se_deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
71 struct se_dev_entry
*deve
= se_cmd
->se_deve
;
75 if ((se_cmd
->data_direction
== DMA_TO_DEVICE
) &&
76 (deve
->lun_flags
& TRANSPORT_LUNFLAGS_READ_ONLY
)) {
77 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
78 " Access for 0x%08x\n",
79 se_cmd
->se_tfo
->get_fabric_name(),
81 spin_unlock_irqrestore(&se_sess
->se_node_acl
->device_list_lock
, flags
);
82 return TCM_WRITE_PROTECTED
;
85 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
86 deve
->write_bytes
+= se_cmd
->data_length
;
87 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
88 deve
->read_bytes
+= se_cmd
->data_length
;
90 se_lun
= deve
->se_lun
;
91 se_cmd
->se_lun
= deve
->se_lun
;
92 se_cmd
->pr_res_key
= deve
->pr_res_key
;
93 se_cmd
->orig_fe_lun
= unpacked_lun
;
94 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
96 percpu_ref_get(&se_lun
->lun_ref
);
97 se_cmd
->lun_ref_active
= true;
99 spin_unlock_irqrestore(&se_sess
->se_node_acl
->device_list_lock
, flags
);
103 * Use the se_portal_group->tpg_virt_lun0 to allow for
104 * REPORT_LUNS, et al to be returned when no active
105 * MappedLUN=0 exists for this Initiator Port.
107 if (unpacked_lun
!= 0) {
108 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
109 " Access for 0x%08x\n",
110 se_cmd
->se_tfo
->get_fabric_name(),
112 return TCM_NON_EXISTENT_LUN
;
115 * Force WRITE PROTECT for virtual LUN 0
117 if ((se_cmd
->data_direction
!= DMA_FROM_DEVICE
) &&
118 (se_cmd
->data_direction
!= DMA_NONE
))
119 return TCM_WRITE_PROTECTED
;
121 se_lun
= &se_sess
->se_tpg
->tpg_virt_lun0
;
122 se_cmd
->se_lun
= &se_sess
->se_tpg
->tpg_virt_lun0
;
123 se_cmd
->orig_fe_lun
= 0;
124 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
126 percpu_ref_get(&se_lun
->lun_ref
);
127 se_cmd
->lun_ref_active
= true;
130 /* Directly associate cmd with se_dev */
131 se_cmd
->se_dev
= se_lun
->lun_se_dev
;
133 dev
= se_lun
->lun_se_dev
;
134 atomic_long_inc(&dev
->num_cmds
);
135 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
136 atomic_long_add(se_cmd
->data_length
, &dev
->write_bytes
);
137 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
138 atomic_long_add(se_cmd
->data_length
, &dev
->read_bytes
);
142 EXPORT_SYMBOL(transport_lookup_cmd_lun
);
144 int transport_lookup_tmr_lun(struct se_cmd
*se_cmd
, u32 unpacked_lun
)
146 struct se_dev_entry
*deve
;
147 struct se_lun
*se_lun
= NULL
;
148 struct se_session
*se_sess
= se_cmd
->se_sess
;
149 struct se_tmr_req
*se_tmr
= se_cmd
->se_tmr_req
;
152 if (unpacked_lun
>= TRANSPORT_MAX_LUNS_PER_TPG
)
155 spin_lock_irqsave(&se_sess
->se_node_acl
->device_list_lock
, flags
);
156 se_cmd
->se_deve
= se_sess
->se_node_acl
->device_list
[unpacked_lun
];
157 deve
= se_cmd
->se_deve
;
159 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
160 se_tmr
->tmr_lun
= deve
->se_lun
;
161 se_cmd
->se_lun
= deve
->se_lun
;
162 se_lun
= deve
->se_lun
;
163 se_cmd
->pr_res_key
= deve
->pr_res_key
;
164 se_cmd
->orig_fe_lun
= unpacked_lun
;
166 spin_unlock_irqrestore(&se_sess
->se_node_acl
->device_list_lock
, flags
);
169 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
170 " Access for 0x%08x\n",
171 se_cmd
->se_tfo
->get_fabric_name(),
176 /* Directly associate cmd with se_dev */
177 se_cmd
->se_dev
= se_lun
->lun_se_dev
;
178 se_tmr
->tmr_dev
= se_lun
->lun_se_dev
;
180 spin_lock_irqsave(&se_tmr
->tmr_dev
->se_tmr_lock
, flags
);
181 list_add_tail(&se_tmr
->tmr_list
, &se_tmr
->tmr_dev
->dev_tmr_list
);
182 spin_unlock_irqrestore(&se_tmr
->tmr_dev
->se_tmr_lock
, flags
);
186 EXPORT_SYMBOL(transport_lookup_tmr_lun
);
189 * This function is called from core_scsi3_emulate_pro_register_and_move()
190 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
191 * when a matching rtpi is found.
193 struct se_dev_entry
*core_get_se_deve_from_rtpi(
194 struct se_node_acl
*nacl
,
197 struct se_dev_entry
*deve
;
199 struct se_port
*port
;
200 struct se_portal_group
*tpg
= nacl
->se_tpg
;
203 spin_lock_irq(&nacl
->device_list_lock
);
204 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
205 deve
= nacl
->device_list
[i
];
207 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
212 pr_err("%s device entries device pointer is"
213 " NULL, but Initiator has access.\n",
214 tpg
->se_tpg_tfo
->get_fabric_name());
219 pr_err("%s device entries device pointer is"
220 " NULL, but Initiator has access.\n",
221 tpg
->se_tpg_tfo
->get_fabric_name());
224 if (port
->sep_rtpi
!= rtpi
)
227 atomic_inc_mb(&deve
->pr_ref_count
);
228 spin_unlock_irq(&nacl
->device_list_lock
);
232 spin_unlock_irq(&nacl
->device_list_lock
);
237 int core_free_device_list_for_node(
238 struct se_node_acl
*nacl
,
239 struct se_portal_group
*tpg
)
241 struct se_dev_entry
*deve
;
245 if (!nacl
->device_list
)
248 spin_lock_irq(&nacl
->device_list_lock
);
249 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
250 deve
= nacl
->device_list
[i
];
252 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
256 pr_err("%s device entries device pointer is"
257 " NULL, but Initiator has access.\n",
258 tpg
->se_tpg_tfo
->get_fabric_name());
263 spin_unlock_irq(&nacl
->device_list_lock
);
264 core_disable_device_list_for_node(lun
, NULL
, deve
->mapped_lun
,
265 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
);
266 spin_lock_irq(&nacl
->device_list_lock
);
268 spin_unlock_irq(&nacl
->device_list_lock
);
270 array_free(nacl
->device_list
, TRANSPORT_MAX_LUNS_PER_TPG
);
271 nacl
->device_list
= NULL
;
276 void core_update_device_list_access(
279 struct se_node_acl
*nacl
)
281 struct se_dev_entry
*deve
;
283 spin_lock_irq(&nacl
->device_list_lock
);
284 deve
= nacl
->device_list
[mapped_lun
];
285 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
286 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
287 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
289 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
290 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
292 spin_unlock_irq(&nacl
->device_list_lock
);
295 /* core_enable_device_list_for_node():
299 int core_enable_device_list_for_node(
301 struct se_lun_acl
*lun_acl
,
304 struct se_node_acl
*nacl
,
305 struct se_portal_group
*tpg
)
307 struct se_port
*port
= lun
->lun_sep
;
308 struct se_dev_entry
*deve
;
310 spin_lock_irq(&nacl
->device_list_lock
);
312 deve
= nacl
->device_list
[mapped_lun
];
315 * Check if the call is handling demo mode -> explicit LUN ACL
316 * transition. This transition must be for the same struct se_lun
317 * + mapped_lun that was setup in demo mode..
319 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
320 if (deve
->se_lun_acl
!= NULL
) {
321 pr_err("struct se_dev_entry->se_lun_acl"
322 " already set for demo mode -> explicit"
323 " LUN ACL transition\n");
324 spin_unlock_irq(&nacl
->device_list_lock
);
327 if (deve
->se_lun
!= lun
) {
328 pr_err("struct se_dev_entry->se_lun does"
329 " match passed struct se_lun for demo mode"
330 " -> explicit LUN ACL transition\n");
331 spin_unlock_irq(&nacl
->device_list_lock
);
334 deve
->se_lun_acl
= lun_acl
;
336 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
337 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
338 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
340 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
341 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
344 spin_unlock_irq(&nacl
->device_list_lock
);
349 deve
->se_lun_acl
= lun_acl
;
350 deve
->mapped_lun
= mapped_lun
;
351 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
;
353 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
354 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
355 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
357 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
358 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
361 deve
->creation_time
= get_jiffies_64();
362 deve
->attach_count
++;
363 spin_unlock_irq(&nacl
->device_list_lock
);
365 spin_lock_bh(&port
->sep_alua_lock
);
366 list_add_tail(&deve
->alua_port_list
, &port
->sep_alua_list
);
367 spin_unlock_bh(&port
->sep_alua_lock
);
372 /* core_disable_device_list_for_node():
376 int core_disable_device_list_for_node(
378 struct se_lun_acl
*lun_acl
,
381 struct se_node_acl
*nacl
,
382 struct se_portal_group
*tpg
)
384 struct se_port
*port
= lun
->lun_sep
;
385 struct se_dev_entry
*deve
= nacl
->device_list
[mapped_lun
];
388 * If the MappedLUN entry is being disabled, the entry in
389 * port->sep_alua_list must be removed now before clearing the
390 * struct se_dev_entry pointers below as logic in
391 * core_alua_do_transition_tg_pt() depends on these being present.
393 * deve->se_lun_acl will be NULL for demo-mode created LUNs
394 * that have not been explicitly converted to MappedLUNs ->
395 * struct se_lun_acl, but we remove deve->alua_port_list from
396 * port->sep_alua_list. This also means that active UAs and
397 * NodeACL context specific PR metadata for demo-mode
398 * MappedLUN *deve will be released below..
400 spin_lock_bh(&port
->sep_alua_lock
);
401 list_del(&deve
->alua_port_list
);
402 spin_unlock_bh(&port
->sep_alua_lock
);
404 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
405 * PR operation to complete.
407 while (atomic_read(&deve
->pr_ref_count
) != 0)
410 spin_lock_irq(&nacl
->device_list_lock
);
412 * Disable struct se_dev_entry LUN ACL mapping
414 core_scsi3_ua_release_all(deve
);
416 deve
->se_lun_acl
= NULL
;
418 deve
->creation_time
= 0;
419 deve
->attach_count
--;
420 spin_unlock_irq(&nacl
->device_list_lock
);
422 core_scsi3_free_pr_reg_from_nacl(lun
->lun_se_dev
, nacl
);
426 /* core_clear_lun_from_tpg():
430 void core_clear_lun_from_tpg(struct se_lun
*lun
, struct se_portal_group
*tpg
)
432 struct se_node_acl
*nacl
;
433 struct se_dev_entry
*deve
;
436 spin_lock_irq(&tpg
->acl_node_lock
);
437 list_for_each_entry(nacl
, &tpg
->acl_node_list
, acl_list
) {
438 spin_unlock_irq(&tpg
->acl_node_lock
);
440 spin_lock_irq(&nacl
->device_list_lock
);
441 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
442 deve
= nacl
->device_list
[i
];
443 if (lun
!= deve
->se_lun
)
445 spin_unlock_irq(&nacl
->device_list_lock
);
447 core_disable_device_list_for_node(lun
, NULL
,
448 deve
->mapped_lun
, TRANSPORT_LUNFLAGS_NO_ACCESS
,
451 spin_lock_irq(&nacl
->device_list_lock
);
453 spin_unlock_irq(&nacl
->device_list_lock
);
455 spin_lock_irq(&tpg
->acl_node_lock
);
457 spin_unlock_irq(&tpg
->acl_node_lock
);
460 static struct se_port
*core_alloc_port(struct se_device
*dev
)
462 struct se_port
*port
, *port_tmp
;
464 port
= kzalloc(sizeof(struct se_port
), GFP_KERNEL
);
466 pr_err("Unable to allocate struct se_port\n");
467 return ERR_PTR(-ENOMEM
);
469 INIT_LIST_HEAD(&port
->sep_alua_list
);
470 INIT_LIST_HEAD(&port
->sep_list
);
471 atomic_set(&port
->sep_tg_pt_secondary_offline
, 0);
472 spin_lock_init(&port
->sep_alua_lock
);
473 mutex_init(&port
->sep_tg_pt_md_mutex
);
475 spin_lock(&dev
->se_port_lock
);
476 if (dev
->dev_port_count
== 0x0000ffff) {
477 pr_warn("Reached dev->dev_port_count =="
479 spin_unlock(&dev
->se_port_lock
);
480 return ERR_PTR(-ENOSPC
);
484 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
485 * Here is the table from spc4r17 section 7.7.3.8.
487 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
491 * 1h Relative port 1, historically known as port A
492 * 2h Relative port 2, historically known as port B
493 * 3h to FFFFh Relative port 3 through 65 535
495 port
->sep_rtpi
= dev
->dev_rpti_counter
++;
499 list_for_each_entry(port_tmp
, &dev
->dev_sep_list
, sep_list
) {
501 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
504 if (port
->sep_rtpi
== port_tmp
->sep_rtpi
)
507 spin_unlock(&dev
->se_port_lock
);
512 static void core_export_port(
513 struct se_device
*dev
,
514 struct se_portal_group
*tpg
,
515 struct se_port
*port
,
518 struct t10_alua_tg_pt_gp_member
*tg_pt_gp_mem
= NULL
;
520 spin_lock(&dev
->se_port_lock
);
521 spin_lock(&lun
->lun_sep_lock
);
525 spin_unlock(&lun
->lun_sep_lock
);
527 list_add_tail(&port
->sep_list
, &dev
->dev_sep_list
);
528 spin_unlock(&dev
->se_port_lock
);
530 if (dev
->transport
->transport_type
!= TRANSPORT_PLUGIN_PHBA_PDEV
&&
531 !(dev
->se_hba
->hba_flags
& HBA_FLAGS_INTERNAL_USE
)) {
532 tg_pt_gp_mem
= core_alua_allocate_tg_pt_gp_mem(port
);
533 if (IS_ERR(tg_pt_gp_mem
) || !tg_pt_gp_mem
) {
534 pr_err("Unable to allocate t10_alua_tg_pt"
538 spin_lock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
539 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem
,
540 dev
->t10_alua
.default_tg_pt_gp
);
541 spin_unlock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
542 pr_debug("%s/%s: Adding to default ALUA Target Port"
543 " Group: alua/default_tg_pt_gp\n",
544 dev
->transport
->name
, tpg
->se_tpg_tfo
->get_fabric_name());
547 dev
->dev_port_count
++;
548 port
->sep_index
= port
->sep_rtpi
; /* RELATIVE TARGET PORT IDENTIFIER */
552 * Called with struct se_device->se_port_lock spinlock held.
554 static void core_release_port(struct se_device
*dev
, struct se_port
*port
)
555 __releases(&dev
->se_port_lock
) __acquires(&dev
->se_port_lock
)
558 * Wait for any port reference for PR ALL_TG_PT=1 operation
559 * to complete in __core_scsi3_alloc_registration()
561 spin_unlock(&dev
->se_port_lock
);
562 if (atomic_read(&port
->sep_tg_pt_ref_cnt
))
564 spin_lock(&dev
->se_port_lock
);
566 core_alua_free_tg_pt_gp_mem(port
);
568 list_del(&port
->sep_list
);
569 dev
->dev_port_count
--;
574 struct se_device
*dev
,
575 struct se_portal_group
*tpg
,
578 struct se_hba
*hba
= dev
->se_hba
;
579 struct se_port
*port
;
581 port
= core_alloc_port(dev
);
583 return PTR_ERR(port
);
585 lun
->lun_se_dev
= dev
;
587 spin_lock(&hba
->device_lock
);
589 spin_unlock(&hba
->device_lock
);
591 core_export_port(dev
, tpg
, port
, lun
);
595 void core_dev_unexport(
596 struct se_device
*dev
,
597 struct se_portal_group
*tpg
,
600 struct se_hba
*hba
= dev
->se_hba
;
601 struct se_port
*port
= lun
->lun_sep
;
603 spin_lock(&lun
->lun_sep_lock
);
604 if (lun
->lun_se_dev
== NULL
) {
605 spin_unlock(&lun
->lun_sep_lock
);
608 spin_unlock(&lun
->lun_sep_lock
);
610 spin_lock(&dev
->se_port_lock
);
611 core_release_port(dev
, port
);
612 spin_unlock(&dev
->se_port_lock
);
614 spin_lock(&hba
->device_lock
);
616 spin_unlock(&hba
->device_lock
);
619 lun
->lun_se_dev
= NULL
;
622 static void se_release_vpd_for_dev(struct se_device
*dev
)
624 struct t10_vpd
*vpd
, *vpd_tmp
;
626 spin_lock(&dev
->t10_wwn
.t10_vpd_lock
);
627 list_for_each_entry_safe(vpd
, vpd_tmp
,
628 &dev
->t10_wwn
.t10_vpd_list
, vpd_list
) {
629 list_del(&vpd
->vpd_list
);
632 spin_unlock(&dev
->t10_wwn
.t10_vpd_lock
);
635 static u32
se_dev_align_max_sectors(u32 max_sectors
, u32 block_size
)
637 u32 aligned_max_sectors
;
640 * Limit max_sectors to a PAGE_SIZE aligned value for modern
641 * transport_allocate_data_tasks() operation.
643 alignment
= max(1ul, PAGE_SIZE
/ block_size
);
644 aligned_max_sectors
= rounddown(max_sectors
, alignment
);
646 if (max_sectors
!= aligned_max_sectors
)
647 pr_info("Rounding down aligned max_sectors from %u to %u\n",
648 max_sectors
, aligned_max_sectors
);
650 return aligned_max_sectors
;
653 int se_dev_set_max_unmap_lba_count(
654 struct se_device
*dev
,
655 u32 max_unmap_lba_count
)
657 dev
->dev_attrib
.max_unmap_lba_count
= max_unmap_lba_count
;
658 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
659 dev
, dev
->dev_attrib
.max_unmap_lba_count
);
662 EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count
);
664 int se_dev_set_max_unmap_block_desc_count(
665 struct se_device
*dev
,
666 u32 max_unmap_block_desc_count
)
668 dev
->dev_attrib
.max_unmap_block_desc_count
=
669 max_unmap_block_desc_count
;
670 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
671 dev
, dev
->dev_attrib
.max_unmap_block_desc_count
);
674 EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count
);
676 int se_dev_set_unmap_granularity(
677 struct se_device
*dev
,
678 u32 unmap_granularity
)
680 dev
->dev_attrib
.unmap_granularity
= unmap_granularity
;
681 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
682 dev
, dev
->dev_attrib
.unmap_granularity
);
685 EXPORT_SYMBOL(se_dev_set_unmap_granularity
);
687 int se_dev_set_unmap_granularity_alignment(
688 struct se_device
*dev
,
689 u32 unmap_granularity_alignment
)
691 dev
->dev_attrib
.unmap_granularity_alignment
= unmap_granularity_alignment
;
692 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
693 dev
, dev
->dev_attrib
.unmap_granularity_alignment
);
696 EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment
);
698 int se_dev_set_max_write_same_len(
699 struct se_device
*dev
,
700 u32 max_write_same_len
)
702 dev
->dev_attrib
.max_write_same_len
= max_write_same_len
;
703 pr_debug("dev[%p]: Set max_write_same_len: %u\n",
704 dev
, dev
->dev_attrib
.max_write_same_len
);
707 EXPORT_SYMBOL(se_dev_set_max_write_same_len
);
709 static void dev_set_t10_wwn_model_alias(struct se_device
*dev
)
711 const char *configname
;
713 configname
= config_item_name(&dev
->dev_group
.cg_item
);
714 if (strlen(configname
) >= 16) {
715 pr_warn("dev[%p]: Backstore name '%s' is too long for "
716 "INQUIRY_MODEL, truncating to 16 bytes\n", dev
,
719 snprintf(&dev
->t10_wwn
.model
[0], 16, "%s", configname
);
722 int se_dev_set_emulate_model_alias(struct se_device
*dev
, int flag
)
724 if (dev
->export_count
) {
725 pr_err("dev[%p]: Unable to change model alias"
726 " while export_count is %d\n",
727 dev
, dev
->export_count
);
731 if (flag
!= 0 && flag
!= 1) {
732 pr_err("Illegal value %d\n", flag
);
737 dev_set_t10_wwn_model_alias(dev
);
739 strncpy(&dev
->t10_wwn
.model
[0],
740 dev
->transport
->inquiry_prod
, 16);
742 dev
->dev_attrib
.emulate_model_alias
= flag
;
746 EXPORT_SYMBOL(se_dev_set_emulate_model_alias
);
748 int se_dev_set_emulate_dpo(struct se_device
*dev
, int flag
)
750 if (flag
!= 0 && flag
!= 1) {
751 pr_err("Illegal value %d\n", flag
);
756 pr_err("dpo_emulated not supported\n");
762 EXPORT_SYMBOL(se_dev_set_emulate_dpo
);
764 int se_dev_set_emulate_fua_write(struct se_device
*dev
, int flag
)
766 if (flag
!= 0 && flag
!= 1) {
767 pr_err("Illegal value %d\n", flag
);
770 dev
->dev_attrib
.emulate_fua_write
= flag
;
771 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
772 dev
, dev
->dev_attrib
.emulate_fua_write
);
775 EXPORT_SYMBOL(se_dev_set_emulate_fua_write
);
777 int se_dev_set_emulate_fua_read(struct se_device
*dev
, int flag
)
779 if (flag
!= 0 && flag
!= 1) {
780 pr_err("Illegal value %d\n", flag
);
785 pr_err("ua read emulated not supported\n");
791 EXPORT_SYMBOL(se_dev_set_emulate_fua_read
);
793 int se_dev_set_emulate_write_cache(struct se_device
*dev
, int flag
)
795 if (flag
!= 0 && flag
!= 1) {
796 pr_err("Illegal value %d\n", flag
);
800 dev
->transport
->get_write_cache
) {
801 pr_err("emulate_write_cache not supported for this device\n");
805 dev
->dev_attrib
.emulate_write_cache
= flag
;
806 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
807 dev
, dev
->dev_attrib
.emulate_write_cache
);
810 EXPORT_SYMBOL(se_dev_set_emulate_write_cache
);
812 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device
*dev
, int flag
)
814 if ((flag
!= 0) && (flag
!= 1) && (flag
!= 2)) {
815 pr_err("Illegal value %d\n", flag
);
819 if (dev
->export_count
) {
820 pr_err("dev[%p]: Unable to change SE Device"
821 " UA_INTRLCK_CTRL while export_count is %d\n",
822 dev
, dev
->export_count
);
825 dev
->dev_attrib
.emulate_ua_intlck_ctrl
= flag
;
826 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
827 dev
, dev
->dev_attrib
.emulate_ua_intlck_ctrl
);
831 EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl
);
833 int se_dev_set_emulate_tas(struct se_device
*dev
, int flag
)
835 if ((flag
!= 0) && (flag
!= 1)) {
836 pr_err("Illegal value %d\n", flag
);
840 if (dev
->export_count
) {
841 pr_err("dev[%p]: Unable to change SE Device TAS while"
842 " export_count is %d\n",
843 dev
, dev
->export_count
);
846 dev
->dev_attrib
.emulate_tas
= flag
;
847 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
848 dev
, (dev
->dev_attrib
.emulate_tas
) ? "Enabled" : "Disabled");
852 EXPORT_SYMBOL(se_dev_set_emulate_tas
);
854 int se_dev_set_emulate_tpu(struct se_device
*dev
, int flag
)
856 if ((flag
!= 0) && (flag
!= 1)) {
857 pr_err("Illegal value %d\n", flag
);
861 * We expect this value to be non-zero when generic Block Layer
862 * Discard supported is detected iblock_create_virtdevice().
864 if (flag
&& !dev
->dev_attrib
.max_unmap_block_desc_count
) {
865 pr_err("Generic Block Discard not supported\n");
869 dev
->dev_attrib
.emulate_tpu
= flag
;
870 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
874 EXPORT_SYMBOL(se_dev_set_emulate_tpu
);
876 int se_dev_set_emulate_tpws(struct se_device
*dev
, int flag
)
878 if ((flag
!= 0) && (flag
!= 1)) {
879 pr_err("Illegal value %d\n", flag
);
883 * We expect this value to be non-zero when generic Block Layer
884 * Discard supported is detected iblock_create_virtdevice().
886 if (flag
&& !dev
->dev_attrib
.max_unmap_block_desc_count
) {
887 pr_err("Generic Block Discard not supported\n");
891 dev
->dev_attrib
.emulate_tpws
= flag
;
892 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
896 EXPORT_SYMBOL(se_dev_set_emulate_tpws
);
898 int se_dev_set_emulate_caw(struct se_device
*dev
, int flag
)
900 if (flag
!= 0 && flag
!= 1) {
901 pr_err("Illegal value %d\n", flag
);
904 dev
->dev_attrib
.emulate_caw
= flag
;
905 pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
910 EXPORT_SYMBOL(se_dev_set_emulate_caw
);
912 int se_dev_set_emulate_3pc(struct se_device
*dev
, int flag
)
914 if (flag
!= 0 && flag
!= 1) {
915 pr_err("Illegal value %d\n", flag
);
918 dev
->dev_attrib
.emulate_3pc
= flag
;
919 pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
924 EXPORT_SYMBOL(se_dev_set_emulate_3pc
);
926 int se_dev_set_pi_prot_type(struct se_device
*dev
, int flag
)
928 int rc
, old_prot
= dev
->dev_attrib
.pi_prot_type
;
930 if (flag
!= 0 && flag
!= 1 && flag
!= 2 && flag
!= 3) {
931 pr_err("Illegal value %d for pi_prot_type\n", flag
);
935 pr_err("DIF TYPE2 protection currently not supported\n");
938 if (dev
->dev_attrib
.hw_pi_prot_type
) {
939 pr_warn("DIF protection enabled on underlying hardware,"
943 if (!dev
->transport
->init_prot
|| !dev
->transport
->free_prot
) {
944 /* 0 is only allowed value for non-supporting backends */
948 pr_err("DIF protection not supported by backend: %s\n",
949 dev
->transport
->name
);
952 if (!(dev
->dev_flags
& DF_CONFIGURED
)) {
953 pr_err("DIF protection requires device to be configured\n");
956 if (dev
->export_count
) {
957 pr_err("dev[%p]: Unable to change SE Device PROT type while"
958 " export_count is %d\n", dev
, dev
->export_count
);
962 dev
->dev_attrib
.pi_prot_type
= flag
;
964 if (flag
&& !old_prot
) {
965 rc
= dev
->transport
->init_prot(dev
);
967 dev
->dev_attrib
.pi_prot_type
= old_prot
;
971 } else if (!flag
&& old_prot
) {
972 dev
->transport
->free_prot(dev
);
974 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev
, flag
);
978 EXPORT_SYMBOL(se_dev_set_pi_prot_type
);
980 int se_dev_set_pi_prot_format(struct se_device
*dev
, int flag
)
988 pr_err("Illegal value %d for pi_prot_format\n", flag
);
991 if (!dev
->transport
->format_prot
) {
992 pr_err("DIF protection format not supported by backend %s\n",
993 dev
->transport
->name
);
996 if (!(dev
->dev_flags
& DF_CONFIGURED
)) {
997 pr_err("DIF protection format requires device to be configured\n");
1000 if (dev
->export_count
) {
1001 pr_err("dev[%p]: Unable to format SE Device PROT type while"
1002 " export_count is %d\n", dev
, dev
->export_count
);
1006 rc
= dev
->transport
->format_prot(dev
);
1010 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev
);
1014 EXPORT_SYMBOL(se_dev_set_pi_prot_format
);
1016 int se_dev_set_enforce_pr_isids(struct se_device
*dev
, int flag
)
1018 if ((flag
!= 0) && (flag
!= 1)) {
1019 pr_err("Illegal value %d\n", flag
);
1022 dev
->dev_attrib
.enforce_pr_isids
= flag
;
1023 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev
,
1024 (dev
->dev_attrib
.enforce_pr_isids
) ? "Enabled" : "Disabled");
1027 EXPORT_SYMBOL(se_dev_set_enforce_pr_isids
);
1029 int se_dev_set_force_pr_aptpl(struct se_device
*dev
, int flag
)
1031 if ((flag
!= 0) && (flag
!= 1)) {
1032 printk(KERN_ERR
"Illegal value %d\n", flag
);
1035 if (dev
->export_count
) {
1036 pr_err("dev[%p]: Unable to set force_pr_aptpl while"
1037 " export_count is %d\n", dev
, dev
->export_count
);
1041 dev
->dev_attrib
.force_pr_aptpl
= flag
;
1042 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev
, flag
);
1045 EXPORT_SYMBOL(se_dev_set_force_pr_aptpl
);
1047 int se_dev_set_is_nonrot(struct se_device
*dev
, int flag
)
1049 if ((flag
!= 0) && (flag
!= 1)) {
1050 printk(KERN_ERR
"Illegal value %d\n", flag
);
1053 dev
->dev_attrib
.is_nonrot
= flag
;
1054 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
1058 EXPORT_SYMBOL(se_dev_set_is_nonrot
);
1060 int se_dev_set_emulate_rest_reord(struct se_device
*dev
, int flag
)
1063 printk(KERN_ERR
"dev[%p]: SE Device emulatation of restricted"
1064 " reordering not implemented\n", dev
);
1067 dev
->dev_attrib
.emulate_rest_reord
= flag
;
1068 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev
, flag
);
1071 EXPORT_SYMBOL(se_dev_set_emulate_rest_reord
);
1074 * Note, this can only be called on unexported SE Device Object.
1076 int se_dev_set_queue_depth(struct se_device
*dev
, u32 queue_depth
)
1078 if (dev
->export_count
) {
1079 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1080 " export_count is %d\n",
1081 dev
, dev
->export_count
);
1085 pr_err("dev[%p]: Illegal ZERO value for queue"
1090 if (queue_depth
> dev
->dev_attrib
.queue_depth
) {
1091 if (queue_depth
> dev
->dev_attrib
.hw_queue_depth
) {
1092 pr_err("dev[%p]: Passed queue_depth:"
1093 " %u exceeds TCM/SE_Device MAX"
1094 " TCQ: %u\n", dev
, queue_depth
,
1095 dev
->dev_attrib
.hw_queue_depth
);
1099 dev
->dev_attrib
.queue_depth
= dev
->queue_depth
= queue_depth
;
1100 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1104 EXPORT_SYMBOL(se_dev_set_queue_depth
);
1106 int se_dev_set_optimal_sectors(struct se_device
*dev
, u32 optimal_sectors
)
1108 if (dev
->export_count
) {
1109 pr_err("dev[%p]: Unable to change SE Device"
1110 " optimal_sectors while export_count is %d\n",
1111 dev
, dev
->export_count
);
1114 if (optimal_sectors
> dev
->dev_attrib
.hw_max_sectors
) {
1115 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1116 " greater than hw_max_sectors: %u\n", dev
,
1117 optimal_sectors
, dev
->dev_attrib
.hw_max_sectors
);
1121 dev
->dev_attrib
.optimal_sectors
= optimal_sectors
;
1122 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1123 dev
, optimal_sectors
);
1126 EXPORT_SYMBOL(se_dev_set_optimal_sectors
);
1128 int se_dev_set_block_size(struct se_device
*dev
, u32 block_size
)
1130 if (dev
->export_count
) {
1131 pr_err("dev[%p]: Unable to change SE Device block_size"
1132 " while export_count is %d\n",
1133 dev
, dev
->export_count
);
1137 if ((block_size
!= 512) &&
1138 (block_size
!= 1024) &&
1139 (block_size
!= 2048) &&
1140 (block_size
!= 4096)) {
1141 pr_err("dev[%p]: Illegal value for block_device: %u"
1142 " for SE device, must be 512, 1024, 2048 or 4096\n",
1147 dev
->dev_attrib
.block_size
= block_size
;
1148 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1151 if (dev
->dev_attrib
.max_bytes_per_io
)
1152 dev
->dev_attrib
.hw_max_sectors
=
1153 dev
->dev_attrib
.max_bytes_per_io
/ block_size
;
1157 EXPORT_SYMBOL(se_dev_set_block_size
);
1159 struct se_lun
*core_dev_add_lun(
1160 struct se_portal_group
*tpg
,
1161 struct se_device
*dev
,
1167 lun
= core_tpg_alloc_lun(tpg
, unpacked_lun
);
1171 rc
= core_tpg_add_lun(tpg
, lun
,
1172 TRANSPORT_LUNFLAGS_READ_WRITE
, dev
);
1176 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1177 " CORE HBA: %u\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1178 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
1179 tpg
->se_tpg_tfo
->get_fabric_name(), dev
->se_hba
->hba_id
);
1181 * Update LUN maps for dynamically added initiators when
1182 * generate_node_acl is enabled.
1184 if (tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
)) {
1185 struct se_node_acl
*acl
;
1186 spin_lock_irq(&tpg
->acl_node_lock
);
1187 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
1188 if (acl
->dynamic_node_acl
&&
1189 (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
||
1190 !tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
))) {
1191 spin_unlock_irq(&tpg
->acl_node_lock
);
1192 core_tpg_add_node_to_devs(acl
, tpg
);
1193 spin_lock_irq(&tpg
->acl_node_lock
);
1196 spin_unlock_irq(&tpg
->acl_node_lock
);
1202 /* core_dev_del_lun():
1206 void core_dev_del_lun(
1207 struct se_portal_group
*tpg
,
1210 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from"
1211 " device object\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1212 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
1213 tpg
->se_tpg_tfo
->get_fabric_name());
1215 core_tpg_remove_lun(tpg
, lun
);
1218 struct se_lun
*core_get_lun_from_tpg(struct se_portal_group
*tpg
, u32 unpacked_lun
)
1222 spin_lock(&tpg
->tpg_lun_lock
);
1223 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
1224 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1225 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1226 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1227 TRANSPORT_MAX_LUNS_PER_TPG
-1,
1228 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1229 spin_unlock(&tpg
->tpg_lun_lock
);
1232 lun
= tpg
->tpg_lun_list
[unpacked_lun
];
1234 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_FREE
) {
1235 pr_err("%s Logical Unit Number: %u is not free on"
1236 " Target Portal Group: %hu, ignoring request.\n",
1237 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1238 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1239 spin_unlock(&tpg
->tpg_lun_lock
);
1242 spin_unlock(&tpg
->tpg_lun_lock
);
1247 /* core_dev_get_lun():
1251 static struct se_lun
*core_dev_get_lun(struct se_portal_group
*tpg
, u32 unpacked_lun
)
1255 spin_lock(&tpg
->tpg_lun_lock
);
1256 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
1257 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1258 "_TPG-1: %u for Target Portal Group: %hu\n",
1259 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1260 TRANSPORT_MAX_LUNS_PER_TPG
-1,
1261 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1262 spin_unlock(&tpg
->tpg_lun_lock
);
1265 lun
= tpg
->tpg_lun_list
[unpacked_lun
];
1267 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) {
1268 pr_err("%s Logical Unit Number: %u is not active on"
1269 " Target Portal Group: %hu, ignoring request.\n",
1270 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1271 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1272 spin_unlock(&tpg
->tpg_lun_lock
);
1275 spin_unlock(&tpg
->tpg_lun_lock
);
1280 struct se_lun_acl
*core_dev_init_initiator_node_lun_acl(
1281 struct se_portal_group
*tpg
,
1282 struct se_node_acl
*nacl
,
1286 struct se_lun_acl
*lacl
;
1288 if (strlen(nacl
->initiatorname
) >= TRANSPORT_IQN_LEN
) {
1289 pr_err("%s InitiatorName exceeds maximum size.\n",
1290 tpg
->se_tpg_tfo
->get_fabric_name());
1294 lacl
= kzalloc(sizeof(struct se_lun_acl
), GFP_KERNEL
);
1296 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1301 INIT_LIST_HEAD(&lacl
->lacl_list
);
1302 lacl
->mapped_lun
= mapped_lun
;
1303 lacl
->se_lun_nacl
= nacl
;
1304 snprintf(lacl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s",
1305 nacl
->initiatorname
);
1310 int core_dev_add_initiator_node_lun_acl(
1311 struct se_portal_group
*tpg
,
1312 struct se_lun_acl
*lacl
,
1317 struct se_node_acl
*nacl
;
1319 lun
= core_dev_get_lun(tpg
, unpacked_lun
);
1321 pr_err("%s Logical Unit Number: %u is not active on"
1322 " Target Portal Group: %hu, ignoring request.\n",
1323 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1324 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1328 nacl
= lacl
->se_lun_nacl
;
1332 if ((lun
->lun_access
& TRANSPORT_LUNFLAGS_READ_ONLY
) &&
1333 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
))
1334 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
1338 if (core_enable_device_list_for_node(lun
, lacl
, lacl
->mapped_lun
,
1339 lun_access
, nacl
, tpg
) < 0)
1342 spin_lock(&lun
->lun_acl_lock
);
1343 list_add_tail(&lacl
->lacl_list
, &lun
->lun_acl_list
);
1344 atomic_inc_mb(&lun
->lun_acl_count
);
1345 spin_unlock(&lun
->lun_acl_lock
);
1347 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1348 " InitiatorNode: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1349 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), unpacked_lun
, lacl
->mapped_lun
,
1350 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) ? "RW" : "RO",
1351 lacl
->initiatorname
);
1353 * Check to see if there are any existing persistent reservation APTPL
1354 * pre-registrations that need to be enabled for this LUN ACL..
1356 core_scsi3_check_aptpl_registration(lun
->lun_se_dev
, tpg
, lun
, nacl
,
1361 /* core_dev_del_initiator_node_lun_acl():
1365 int core_dev_del_initiator_node_lun_acl(
1366 struct se_portal_group
*tpg
,
1368 struct se_lun_acl
*lacl
)
1370 struct se_node_acl
*nacl
;
1372 nacl
= lacl
->se_lun_nacl
;
1376 spin_lock(&lun
->lun_acl_lock
);
1377 list_del(&lacl
->lacl_list
);
1378 atomic_dec_mb(&lun
->lun_acl_count
);
1379 spin_unlock(&lun
->lun_acl_lock
);
1381 core_disable_device_list_for_node(lun
, NULL
, lacl
->mapped_lun
,
1382 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
);
1384 lacl
->se_lun
= NULL
;
1386 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1387 " InitiatorNode: %s Mapped LUN: %u\n",
1388 tpg
->se_tpg_tfo
->get_fabric_name(),
1389 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
1390 lacl
->initiatorname
, lacl
->mapped_lun
);
1395 void core_dev_free_initiator_node_lun_acl(
1396 struct se_portal_group
*tpg
,
1397 struct se_lun_acl
*lacl
)
1399 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1400 " Mapped LUN: %u\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1401 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
),
1402 tpg
->se_tpg_tfo
->get_fabric_name(),
1403 lacl
->initiatorname
, lacl
->mapped_lun
);
1408 static void scsi_dump_inquiry(struct se_device
*dev
)
1410 struct t10_wwn
*wwn
= &dev
->t10_wwn
;
1414 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1416 for (i
= 0; i
< 8; i
++)
1417 if (wwn
->vendor
[i
] >= 0x20)
1418 buf
[i
] = wwn
->vendor
[i
];
1422 pr_debug(" Vendor: %s\n", buf
);
1424 for (i
= 0; i
< 16; i
++)
1425 if (wwn
->model
[i
] >= 0x20)
1426 buf
[i
] = wwn
->model
[i
];
1430 pr_debug(" Model: %s\n", buf
);
1432 for (i
= 0; i
< 4; i
++)
1433 if (wwn
->revision
[i
] >= 0x20)
1434 buf
[i
] = wwn
->revision
[i
];
1438 pr_debug(" Revision: %s\n", buf
);
1440 device_type
= dev
->transport
->get_device_type(dev
);
1441 pr_debug(" Type: %s ", scsi_device_type(device_type
));
1444 struct se_device
*target_alloc_device(struct se_hba
*hba
, const char *name
)
1446 struct se_device
*dev
;
1447 struct se_lun
*xcopy_lun
;
1449 dev
= hba
->transport
->alloc_device(hba
, name
);
1453 dev
->dev_link_magic
= SE_DEV_LINK_MAGIC
;
1455 dev
->transport
= hba
->transport
;
1456 dev
->prot_length
= sizeof(struct se_dif_v1_tuple
);
1458 INIT_LIST_HEAD(&dev
->dev_list
);
1459 INIT_LIST_HEAD(&dev
->dev_sep_list
);
1460 INIT_LIST_HEAD(&dev
->dev_tmr_list
);
1461 INIT_LIST_HEAD(&dev
->delayed_cmd_list
);
1462 INIT_LIST_HEAD(&dev
->state_list
);
1463 INIT_LIST_HEAD(&dev
->qf_cmd_list
);
1464 INIT_LIST_HEAD(&dev
->g_dev_node
);
1465 spin_lock_init(&dev
->execute_task_lock
);
1466 spin_lock_init(&dev
->delayed_cmd_lock
);
1467 spin_lock_init(&dev
->dev_reservation_lock
);
1468 spin_lock_init(&dev
->se_port_lock
);
1469 spin_lock_init(&dev
->se_tmr_lock
);
1470 spin_lock_init(&dev
->qf_cmd_lock
);
1471 sema_init(&dev
->caw_sem
, 1);
1472 atomic_set(&dev
->dev_ordered_id
, 0);
1473 INIT_LIST_HEAD(&dev
->t10_wwn
.t10_vpd_list
);
1474 spin_lock_init(&dev
->t10_wwn
.t10_vpd_lock
);
1475 INIT_LIST_HEAD(&dev
->t10_pr
.registration_list
);
1476 INIT_LIST_HEAD(&dev
->t10_pr
.aptpl_reg_list
);
1477 spin_lock_init(&dev
->t10_pr
.registration_lock
);
1478 spin_lock_init(&dev
->t10_pr
.aptpl_reg_lock
);
1479 INIT_LIST_HEAD(&dev
->t10_alua
.tg_pt_gps_list
);
1480 spin_lock_init(&dev
->t10_alua
.tg_pt_gps_lock
);
1481 INIT_LIST_HEAD(&dev
->t10_alua
.lba_map_list
);
1482 spin_lock_init(&dev
->t10_alua
.lba_map_lock
);
1484 dev
->t10_wwn
.t10_dev
= dev
;
1485 dev
->t10_alua
.t10_dev
= dev
;
1487 dev
->dev_attrib
.da_dev
= dev
;
1488 dev
->dev_attrib
.emulate_model_alias
= DA_EMULATE_MODEL_ALIAS
;
1489 dev
->dev_attrib
.emulate_dpo
= DA_EMULATE_DPO
;
1490 dev
->dev_attrib
.emulate_fua_write
= DA_EMULATE_FUA_WRITE
;
1491 dev
->dev_attrib
.emulate_fua_read
= DA_EMULATE_FUA_READ
;
1492 dev
->dev_attrib
.emulate_write_cache
= DA_EMULATE_WRITE_CACHE
;
1493 dev
->dev_attrib
.emulate_ua_intlck_ctrl
= DA_EMULATE_UA_INTLLCK_CTRL
;
1494 dev
->dev_attrib
.emulate_tas
= DA_EMULATE_TAS
;
1495 dev
->dev_attrib
.emulate_tpu
= DA_EMULATE_TPU
;
1496 dev
->dev_attrib
.emulate_tpws
= DA_EMULATE_TPWS
;
1497 dev
->dev_attrib
.emulate_caw
= DA_EMULATE_CAW
;
1498 dev
->dev_attrib
.emulate_3pc
= DA_EMULATE_3PC
;
1499 dev
->dev_attrib
.pi_prot_type
= TARGET_DIF_TYPE0_PROT
;
1500 dev
->dev_attrib
.enforce_pr_isids
= DA_ENFORCE_PR_ISIDS
;
1501 dev
->dev_attrib
.force_pr_aptpl
= DA_FORCE_PR_APTPL
;
1502 dev
->dev_attrib
.is_nonrot
= DA_IS_NONROT
;
1503 dev
->dev_attrib
.emulate_rest_reord
= DA_EMULATE_REST_REORD
;
1504 dev
->dev_attrib
.max_unmap_lba_count
= DA_MAX_UNMAP_LBA_COUNT
;
1505 dev
->dev_attrib
.max_unmap_block_desc_count
=
1506 DA_MAX_UNMAP_BLOCK_DESC_COUNT
;
1507 dev
->dev_attrib
.unmap_granularity
= DA_UNMAP_GRANULARITY_DEFAULT
;
1508 dev
->dev_attrib
.unmap_granularity_alignment
=
1509 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT
;
1510 dev
->dev_attrib
.max_write_same_len
= DA_MAX_WRITE_SAME_LEN
;
1512 xcopy_lun
= &dev
->xcopy_lun
;
1513 xcopy_lun
->lun_se_dev
= dev
;
1514 init_completion(&xcopy_lun
->lun_shutdown_comp
);
1515 INIT_LIST_HEAD(&xcopy_lun
->lun_acl_list
);
1516 spin_lock_init(&xcopy_lun
->lun_acl_lock
);
1517 spin_lock_init(&xcopy_lun
->lun_sep_lock
);
1518 init_completion(&xcopy_lun
->lun_ref_comp
);
1523 int target_configure_device(struct se_device
*dev
)
1525 struct se_hba
*hba
= dev
->se_hba
;
1528 if (dev
->dev_flags
& DF_CONFIGURED
) {
1529 pr_err("se_dev->se_dev_ptr already set for storage"
1534 ret
= dev
->transport
->configure_device(dev
);
1537 dev
->dev_flags
|= DF_CONFIGURED
;
1540 * XXX: there is not much point to have two different values here..
1542 dev
->dev_attrib
.block_size
= dev
->dev_attrib
.hw_block_size
;
1543 dev
->dev_attrib
.queue_depth
= dev
->dev_attrib
.hw_queue_depth
;
1546 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
1548 dev
->dev_attrib
.hw_max_sectors
=
1549 se_dev_align_max_sectors(dev
->dev_attrib
.hw_max_sectors
,
1550 dev
->dev_attrib
.hw_block_size
);
1551 dev
->dev_attrib
.optimal_sectors
= dev
->dev_attrib
.hw_max_sectors
;
1553 dev
->dev_index
= scsi_get_new_index(SCSI_DEVICE_INDEX
);
1554 dev
->creation_time
= get_jiffies_64();
1556 ret
= core_setup_alua(dev
);
1561 * Startup the struct se_device processing thread
1563 dev
->tmr_wq
= alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM
| WQ_UNBOUND
, 1,
1564 dev
->transport
->name
);
1566 pr_err("Unable to create tmr workqueue for %s\n",
1567 dev
->transport
->name
);
1573 * Setup work_queue for QUEUE_FULL
1575 INIT_WORK(&dev
->qf_work_queue
, target_qf_do_work
);
1578 * Preload the initial INQUIRY const values if we are doing
1579 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1580 * passthrough because this is being provided by the backend LLD.
1582 if (dev
->transport
->transport_type
!= TRANSPORT_PLUGIN_PHBA_PDEV
) {
1583 strncpy(&dev
->t10_wwn
.vendor
[0], "LIO-ORG", 8);
1584 strncpy(&dev
->t10_wwn
.model
[0],
1585 dev
->transport
->inquiry_prod
, 16);
1586 strncpy(&dev
->t10_wwn
.revision
[0],
1587 dev
->transport
->inquiry_rev
, 4);
1590 scsi_dump_inquiry(dev
);
1592 spin_lock(&hba
->device_lock
);
1594 spin_unlock(&hba
->device_lock
);
1596 mutex_lock(&g_device_mutex
);
1597 list_add_tail(&dev
->g_dev_node
, &g_device_list
);
1598 mutex_unlock(&g_device_mutex
);
1603 core_alua_free_lu_gp_mem(dev
);
1605 se_release_vpd_for_dev(dev
);
1609 void target_free_device(struct se_device
*dev
)
1611 struct se_hba
*hba
= dev
->se_hba
;
1613 WARN_ON(!list_empty(&dev
->dev_sep_list
));
1615 if (dev
->dev_flags
& DF_CONFIGURED
) {
1616 destroy_workqueue(dev
->tmr_wq
);
1618 mutex_lock(&g_device_mutex
);
1619 list_del(&dev
->g_dev_node
);
1620 mutex_unlock(&g_device_mutex
);
1622 spin_lock(&hba
->device_lock
);
1624 spin_unlock(&hba
->device_lock
);
1627 core_alua_free_lu_gp_mem(dev
);
1628 core_alua_set_lba_map(dev
, NULL
, 0, 0);
1629 core_scsi3_free_all_registrations(dev
);
1630 se_release_vpd_for_dev(dev
);
1632 if (dev
->transport
->free_prot
)
1633 dev
->transport
->free_prot(dev
);
1635 dev
->transport
->free_device(dev
);
1638 int core_dev_setup_virtual_lun0(void)
1641 struct se_device
*dev
;
1642 char buf
[] = "rd_pages=8,rd_nullio=1";
1645 hba
= core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE
);
1647 return PTR_ERR(hba
);
1649 dev
= target_alloc_device(hba
, "virt_lun0");
1655 hba
->transport
->set_configfs_dev_params(dev
, buf
, sizeof(buf
));
1657 ret
= target_configure_device(dev
);
1659 goto out_free_se_dev
;
1666 target_free_device(dev
);
1668 core_delete_hba(hba
);
1673 void core_dev_release_virtual_lun0(void)
1675 struct se_hba
*hba
= lun0_hba
;
1681 target_free_device(g_lun0_dev
);
1682 core_delete_hba(hba
);