1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/net.h>
31 #include <linux/string.h>
32 #include <linux/delay.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
38 #include <linux/export.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_device.h>
44 #include <target/target_core_base.h>
45 #include <target/target_core_backend.h>
46 #include <target/target_core_fabric.h>
48 #include "target_core_internal.h"
49 #include "target_core_alua.h"
50 #include "target_core_pr.h"
51 #include "target_core_ua.h"
53 static void se_dev_start(struct se_device
*dev
);
54 static void se_dev_stop(struct se_device
*dev
);
56 static struct se_hba
*lun0_hba
;
57 static struct se_subsystem_dev
*lun0_su_dev
;
58 /* not static, needed by tpg.c */
59 struct se_device
*g_lun0_dev
;
61 int transport_lookup_cmd_lun(struct se_cmd
*se_cmd
, u32 unpacked_lun
)
63 struct se_lun
*se_lun
= NULL
;
64 struct se_session
*se_sess
= se_cmd
->se_sess
;
65 struct se_device
*dev
;
68 if (unpacked_lun
>= TRANSPORT_MAX_LUNS_PER_TPG
) {
69 se_cmd
->scsi_sense_reason
= TCM_NON_EXISTENT_LUN
;
70 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
74 spin_lock_irqsave(&se_sess
->se_node_acl
->device_list_lock
, flags
);
75 se_cmd
->se_deve
= se_sess
->se_node_acl
->device_list
[unpacked_lun
];
76 if (se_cmd
->se_deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
77 struct se_dev_entry
*deve
= se_cmd
->se_deve
;
80 deve
->total_bytes
+= se_cmd
->data_length
;
82 if ((se_cmd
->data_direction
== DMA_TO_DEVICE
) &&
83 (deve
->lun_flags
& TRANSPORT_LUNFLAGS_READ_ONLY
)) {
84 se_cmd
->scsi_sense_reason
= TCM_WRITE_PROTECTED
;
85 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
86 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
87 " Access for 0x%08x\n",
88 se_cmd
->se_tfo
->get_fabric_name(),
90 spin_unlock_irqrestore(&se_sess
->se_node_acl
->device_list_lock
, flags
);
94 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
95 deve
->write_bytes
+= se_cmd
->data_length
;
96 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
97 deve
->read_bytes
+= se_cmd
->data_length
;
101 se_lun
= deve
->se_lun
;
102 se_cmd
->se_lun
= deve
->se_lun
;
103 se_cmd
->pr_res_key
= deve
->pr_res_key
;
104 se_cmd
->orig_fe_lun
= unpacked_lun
;
105 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
107 spin_unlock_irqrestore(&se_sess
->se_node_acl
->device_list_lock
, flags
);
111 * Use the se_portal_group->tpg_virt_lun0 to allow for
112 * REPORT_LUNS, et al to be returned when no active
113 * MappedLUN=0 exists for this Initiator Port.
115 if (unpacked_lun
!= 0) {
116 se_cmd
->scsi_sense_reason
= TCM_NON_EXISTENT_LUN
;
117 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
118 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
119 " Access for 0x%08x\n",
120 se_cmd
->se_tfo
->get_fabric_name(),
125 * Force WRITE PROTECT for virtual LUN 0
127 if ((se_cmd
->data_direction
!= DMA_FROM_DEVICE
) &&
128 (se_cmd
->data_direction
!= DMA_NONE
)) {
129 se_cmd
->scsi_sense_reason
= TCM_WRITE_PROTECTED
;
130 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
134 se_lun
= &se_sess
->se_tpg
->tpg_virt_lun0
;
135 se_cmd
->se_lun
= &se_sess
->se_tpg
->tpg_virt_lun0
;
136 se_cmd
->orig_fe_lun
= 0;
137 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
140 * Determine if the struct se_lun is online.
141 * FIXME: Check for LUN_RESET + UNIT Attention
143 if (se_dev_check_online(se_lun
->lun_se_dev
) != 0) {
144 se_cmd
->scsi_sense_reason
= TCM_NON_EXISTENT_LUN
;
145 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
149 /* Directly associate cmd with se_dev */
150 se_cmd
->se_dev
= se_lun
->lun_se_dev
;
152 /* TODO: get rid of this and use atomics for stats */
153 dev
= se_lun
->lun_se_dev
;
154 spin_lock_irqsave(&dev
->stats_lock
, flags
);
156 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
157 dev
->write_bytes
+= se_cmd
->data_length
;
158 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
159 dev
->read_bytes
+= se_cmd
->data_length
;
160 spin_unlock_irqrestore(&dev
->stats_lock
, flags
);
162 spin_lock_irqsave(&se_lun
->lun_cmd_lock
, flags
);
163 list_add_tail(&se_cmd
->se_lun_node
, &se_lun
->lun_cmd_list
);
164 spin_unlock_irqrestore(&se_lun
->lun_cmd_lock
, flags
);
168 EXPORT_SYMBOL(transport_lookup_cmd_lun
);
170 int transport_lookup_tmr_lun(struct se_cmd
*se_cmd
, u32 unpacked_lun
)
172 struct se_dev_entry
*deve
;
173 struct se_lun
*se_lun
= NULL
;
174 struct se_session
*se_sess
= se_cmd
->se_sess
;
175 struct se_tmr_req
*se_tmr
= se_cmd
->se_tmr_req
;
178 if (unpacked_lun
>= TRANSPORT_MAX_LUNS_PER_TPG
) {
179 se_cmd
->scsi_sense_reason
= TCM_NON_EXISTENT_LUN
;
180 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
184 spin_lock_irqsave(&se_sess
->se_node_acl
->device_list_lock
, flags
);
185 se_cmd
->se_deve
= se_sess
->se_node_acl
->device_list
[unpacked_lun
];
186 deve
= se_cmd
->se_deve
;
188 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
189 se_tmr
->tmr_lun
= deve
->se_lun
;
190 se_cmd
->se_lun
= deve
->se_lun
;
191 se_lun
= deve
->se_lun
;
192 se_cmd
->pr_res_key
= deve
->pr_res_key
;
193 se_cmd
->orig_fe_lun
= unpacked_lun
;
195 spin_unlock_irqrestore(&se_sess
->se_node_acl
->device_list_lock
, flags
);
198 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
199 " Access for 0x%08x\n",
200 se_cmd
->se_tfo
->get_fabric_name(),
202 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
206 * Determine if the struct se_lun is online.
207 * FIXME: Check for LUN_RESET + UNIT Attention
209 if (se_dev_check_online(se_lun
->lun_se_dev
) != 0) {
210 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
214 /* Directly associate cmd with se_dev */
215 se_cmd
->se_dev
= se_lun
->lun_se_dev
;
216 se_tmr
->tmr_dev
= se_lun
->lun_se_dev
;
218 spin_lock_irqsave(&se_tmr
->tmr_dev
->se_tmr_lock
, flags
);
219 list_add_tail(&se_tmr
->tmr_list
, &se_tmr
->tmr_dev
->dev_tmr_list
);
220 spin_unlock_irqrestore(&se_tmr
->tmr_dev
->se_tmr_lock
, flags
);
224 EXPORT_SYMBOL(transport_lookup_tmr_lun
);
227 * This function is called from core_scsi3_emulate_pro_register_and_move()
228 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
229 * when a matching rtpi is found.
231 struct se_dev_entry
*core_get_se_deve_from_rtpi(
232 struct se_node_acl
*nacl
,
235 struct se_dev_entry
*deve
;
237 struct se_port
*port
;
238 struct se_portal_group
*tpg
= nacl
->se_tpg
;
241 spin_lock_irq(&nacl
->device_list_lock
);
242 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
243 deve
= nacl
->device_list
[i
];
245 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
250 pr_err("%s device entries device pointer is"
251 " NULL, but Initiator has access.\n",
252 tpg
->se_tpg_tfo
->get_fabric_name());
257 pr_err("%s device entries device pointer is"
258 " NULL, but Initiator has access.\n",
259 tpg
->se_tpg_tfo
->get_fabric_name());
262 if (port
->sep_rtpi
!= rtpi
)
265 atomic_inc(&deve
->pr_ref_count
);
266 smp_mb__after_atomic_inc();
267 spin_unlock_irq(&nacl
->device_list_lock
);
271 spin_unlock_irq(&nacl
->device_list_lock
);
276 int core_free_device_list_for_node(
277 struct se_node_acl
*nacl
,
278 struct se_portal_group
*tpg
)
280 struct se_dev_entry
*deve
;
284 if (!nacl
->device_list
)
287 spin_lock_irq(&nacl
->device_list_lock
);
288 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
289 deve
= nacl
->device_list
[i
];
291 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
295 pr_err("%s device entries device pointer is"
296 " NULL, but Initiator has access.\n",
297 tpg
->se_tpg_tfo
->get_fabric_name());
302 spin_unlock_irq(&nacl
->device_list_lock
);
303 core_disable_device_list_for_node(lun
, NULL
, deve
->mapped_lun
,
304 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
);
305 spin_lock_irq(&nacl
->device_list_lock
);
307 spin_unlock_irq(&nacl
->device_list_lock
);
309 array_free(nacl
->device_list
, TRANSPORT_MAX_LUNS_PER_TPG
);
310 nacl
->device_list
= NULL
;
315 void core_dec_lacl_count(struct se_node_acl
*se_nacl
, struct se_cmd
*se_cmd
)
317 struct se_dev_entry
*deve
;
320 spin_lock_irqsave(&se_nacl
->device_list_lock
, flags
);
321 deve
= se_nacl
->device_list
[se_cmd
->orig_fe_lun
];
323 spin_unlock_irqrestore(&se_nacl
->device_list_lock
, flags
);
326 void core_update_device_list_access(
329 struct se_node_acl
*nacl
)
331 struct se_dev_entry
*deve
;
333 spin_lock_irq(&nacl
->device_list_lock
);
334 deve
= nacl
->device_list
[mapped_lun
];
335 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
336 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
337 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
339 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
340 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
342 spin_unlock_irq(&nacl
->device_list_lock
);
345 /* core_enable_device_list_for_node():
349 int core_enable_device_list_for_node(
351 struct se_lun_acl
*lun_acl
,
354 struct se_node_acl
*nacl
,
355 struct se_portal_group
*tpg
)
357 struct se_port
*port
= lun
->lun_sep
;
358 struct se_dev_entry
*deve
;
360 spin_lock_irq(&nacl
->device_list_lock
);
362 deve
= nacl
->device_list
[mapped_lun
];
365 * Check if the call is handling demo mode -> explict LUN ACL
366 * transition. This transition must be for the same struct se_lun
367 * + mapped_lun that was setup in demo mode..
369 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
370 if (deve
->se_lun_acl
!= NULL
) {
371 pr_err("struct se_dev_entry->se_lun_acl"
372 " already set for demo mode -> explict"
373 " LUN ACL transition\n");
374 spin_unlock_irq(&nacl
->device_list_lock
);
377 if (deve
->se_lun
!= lun
) {
378 pr_err("struct se_dev_entry->se_lun does"
379 " match passed struct se_lun for demo mode"
380 " -> explict LUN ACL transition\n");
381 spin_unlock_irq(&nacl
->device_list_lock
);
384 deve
->se_lun_acl
= lun_acl
;
386 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
387 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
388 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
390 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
391 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
394 spin_unlock_irq(&nacl
->device_list_lock
);
399 deve
->se_lun_acl
= lun_acl
;
400 deve
->mapped_lun
= mapped_lun
;
401 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
;
403 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
404 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
405 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
407 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
408 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
411 deve
->creation_time
= get_jiffies_64();
412 deve
->attach_count
++;
413 spin_unlock_irq(&nacl
->device_list_lock
);
415 spin_lock_bh(&port
->sep_alua_lock
);
416 list_add_tail(&deve
->alua_port_list
, &port
->sep_alua_list
);
417 spin_unlock_bh(&port
->sep_alua_lock
);
422 /* core_disable_device_list_for_node():
426 int core_disable_device_list_for_node(
428 struct se_lun_acl
*lun_acl
,
431 struct se_node_acl
*nacl
,
432 struct se_portal_group
*tpg
)
434 struct se_port
*port
= lun
->lun_sep
;
435 struct se_dev_entry
*deve
= nacl
->device_list
[mapped_lun
];
438 * If the MappedLUN entry is being disabled, the entry in
439 * port->sep_alua_list must be removed now before clearing the
440 * struct se_dev_entry pointers below as logic in
441 * core_alua_do_transition_tg_pt() depends on these being present.
443 * deve->se_lun_acl will be NULL for demo-mode created LUNs
444 * that have not been explicitly converted to MappedLUNs ->
445 * struct se_lun_acl, but we remove deve->alua_port_list from
446 * port->sep_alua_list. This also means that active UAs and
447 * NodeACL context specific PR metadata for demo-mode
448 * MappedLUN *deve will be released below..
450 spin_lock_bh(&port
->sep_alua_lock
);
451 list_del(&deve
->alua_port_list
);
452 spin_unlock_bh(&port
->sep_alua_lock
);
454 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
455 * PR operation to complete.
457 while (atomic_read(&deve
->pr_ref_count
) != 0)
460 spin_lock_irq(&nacl
->device_list_lock
);
462 * Disable struct se_dev_entry LUN ACL mapping
464 core_scsi3_ua_release_all(deve
);
466 deve
->se_lun_acl
= NULL
;
468 deve
->creation_time
= 0;
469 deve
->attach_count
--;
470 spin_unlock_irq(&nacl
->device_list_lock
);
472 core_scsi3_free_pr_reg_from_nacl(lun
->lun_se_dev
, nacl
);
476 /* core_clear_lun_from_tpg():
480 void core_clear_lun_from_tpg(struct se_lun
*lun
, struct se_portal_group
*tpg
)
482 struct se_node_acl
*nacl
;
483 struct se_dev_entry
*deve
;
486 spin_lock_irq(&tpg
->acl_node_lock
);
487 list_for_each_entry(nacl
, &tpg
->acl_node_list
, acl_list
) {
488 spin_unlock_irq(&tpg
->acl_node_lock
);
490 spin_lock_irq(&nacl
->device_list_lock
);
491 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
492 deve
= nacl
->device_list
[i
];
493 if (lun
!= deve
->se_lun
)
495 spin_unlock_irq(&nacl
->device_list_lock
);
497 core_disable_device_list_for_node(lun
, NULL
,
498 deve
->mapped_lun
, TRANSPORT_LUNFLAGS_NO_ACCESS
,
501 spin_lock_irq(&nacl
->device_list_lock
);
503 spin_unlock_irq(&nacl
->device_list_lock
);
505 spin_lock_irq(&tpg
->acl_node_lock
);
507 spin_unlock_irq(&tpg
->acl_node_lock
);
510 static struct se_port
*core_alloc_port(struct se_device
*dev
)
512 struct se_port
*port
, *port_tmp
;
514 port
= kzalloc(sizeof(struct se_port
), GFP_KERNEL
);
516 pr_err("Unable to allocate struct se_port\n");
517 return ERR_PTR(-ENOMEM
);
519 INIT_LIST_HEAD(&port
->sep_alua_list
);
520 INIT_LIST_HEAD(&port
->sep_list
);
521 atomic_set(&port
->sep_tg_pt_secondary_offline
, 0);
522 spin_lock_init(&port
->sep_alua_lock
);
523 mutex_init(&port
->sep_tg_pt_md_mutex
);
525 spin_lock(&dev
->se_port_lock
);
526 if (dev
->dev_port_count
== 0x0000ffff) {
527 pr_warn("Reached dev->dev_port_count =="
529 spin_unlock(&dev
->se_port_lock
);
530 return ERR_PTR(-ENOSPC
);
534 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
535 * Here is the table from spc4r17 section 7.7.3.8.
537 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
541 * 1h Relative port 1, historically known as port A
542 * 2h Relative port 2, historically known as port B
543 * 3h to FFFFh Relative port 3 through 65 535
545 port
->sep_rtpi
= dev
->dev_rpti_counter
++;
549 list_for_each_entry(port_tmp
, &dev
->dev_sep_list
, sep_list
) {
551 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
554 if (port
->sep_rtpi
== port_tmp
->sep_rtpi
)
557 spin_unlock(&dev
->se_port_lock
);
562 static void core_export_port(
563 struct se_device
*dev
,
564 struct se_portal_group
*tpg
,
565 struct se_port
*port
,
568 struct se_subsystem_dev
*su_dev
= dev
->se_sub_dev
;
569 struct t10_alua_tg_pt_gp_member
*tg_pt_gp_mem
= NULL
;
571 spin_lock(&dev
->se_port_lock
);
572 spin_lock(&lun
->lun_sep_lock
);
576 spin_unlock(&lun
->lun_sep_lock
);
578 list_add_tail(&port
->sep_list
, &dev
->dev_sep_list
);
579 spin_unlock(&dev
->se_port_lock
);
581 if (su_dev
->t10_alua
.alua_type
== SPC3_ALUA_EMULATED
) {
582 tg_pt_gp_mem
= core_alua_allocate_tg_pt_gp_mem(port
);
583 if (IS_ERR(tg_pt_gp_mem
) || !tg_pt_gp_mem
) {
584 pr_err("Unable to allocate t10_alua_tg_pt"
588 spin_lock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
589 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem
,
590 su_dev
->t10_alua
.default_tg_pt_gp
);
591 spin_unlock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
592 pr_debug("%s/%s: Adding to default ALUA Target Port"
593 " Group: alua/default_tg_pt_gp\n",
594 dev
->transport
->name
, tpg
->se_tpg_tfo
->get_fabric_name());
597 dev
->dev_port_count
++;
598 port
->sep_index
= port
->sep_rtpi
; /* RELATIVE TARGET PORT IDENTIFIER */
602 * Called with struct se_device->se_port_lock spinlock held.
604 static void core_release_port(struct se_device
*dev
, struct se_port
*port
)
605 __releases(&dev
->se_port_lock
) __acquires(&dev
->se_port_lock
)
608 * Wait for any port reference for PR ALL_TG_PT=1 operation
609 * to complete in __core_scsi3_alloc_registration()
611 spin_unlock(&dev
->se_port_lock
);
612 if (atomic_read(&port
->sep_tg_pt_ref_cnt
))
614 spin_lock(&dev
->se_port_lock
);
616 core_alua_free_tg_pt_gp_mem(port
);
618 list_del(&port
->sep_list
);
619 dev
->dev_port_count
--;
624 struct se_device
*dev
,
625 struct se_portal_group
*tpg
,
628 struct se_port
*port
;
630 port
= core_alloc_port(dev
);
632 return PTR_ERR(port
);
634 lun
->lun_se_dev
= dev
;
637 atomic_inc(&dev
->dev_export_obj
.obj_access_count
);
638 core_export_port(dev
, tpg
, port
, lun
);
642 void core_dev_unexport(
643 struct se_device
*dev
,
644 struct se_portal_group
*tpg
,
647 struct se_port
*port
= lun
->lun_sep
;
649 spin_lock(&lun
->lun_sep_lock
);
650 if (lun
->lun_se_dev
== NULL
) {
651 spin_unlock(&lun
->lun_sep_lock
);
654 spin_unlock(&lun
->lun_sep_lock
);
656 spin_lock(&dev
->se_port_lock
);
657 atomic_dec(&dev
->dev_export_obj
.obj_access_count
);
658 core_release_port(dev
, port
);
659 spin_unlock(&dev
->se_port_lock
);
662 lun
->lun_se_dev
= NULL
;
665 int target_report_luns(struct se_cmd
*se_cmd
)
667 struct se_dev_entry
*deve
;
668 struct se_session
*se_sess
= se_cmd
->se_sess
;
670 u32 lun_count
= 0, offset
= 8, i
;
672 if (se_cmd
->data_length
< 16) {
673 pr_warn("REPORT LUNS allocation length %u too small\n",
674 se_cmd
->data_length
);
675 se_cmd
->scsi_sense_reason
= TCM_INVALID_CDB_FIELD
;
679 buf
= transport_kmap_data_sg(se_cmd
);
684 * If no struct se_session pointer is present, this struct se_cmd is
685 * coming via a target_core_mod PASSTHROUGH op, and not through
686 * a $FABRIC_MOD. In that case, report LUN=0 only.
689 int_to_scsilun(0, (struct scsi_lun
*)&buf
[offset
]);
694 spin_lock_irq(&se_sess
->se_node_acl
->device_list_lock
);
695 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
696 deve
= se_sess
->se_node_acl
->device_list
[i
];
697 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
700 * We determine the correct LUN LIST LENGTH even once we
701 * have reached the initial allocation length.
705 if ((offset
+ 8) > se_cmd
->data_length
)
708 int_to_scsilun(deve
->mapped_lun
, (struct scsi_lun
*)&buf
[offset
]);
711 spin_unlock_irq(&se_sess
->se_node_acl
->device_list_lock
);
714 * See SPC3 r07, page 159.
718 buf
[0] = ((lun_count
>> 24) & 0xff);
719 buf
[1] = ((lun_count
>> 16) & 0xff);
720 buf
[2] = ((lun_count
>> 8) & 0xff);
721 buf
[3] = (lun_count
& 0xff);
722 transport_kunmap_data_sg(se_cmd
);
724 target_complete_cmd(se_cmd
, GOOD
);
728 /* se_release_device_for_hba():
732 void se_release_device_for_hba(struct se_device
*dev
)
734 struct se_hba
*hba
= dev
->se_hba
;
736 if ((dev
->dev_status
& TRANSPORT_DEVICE_ACTIVATED
) ||
737 (dev
->dev_status
& TRANSPORT_DEVICE_DEACTIVATED
) ||
738 (dev
->dev_status
& TRANSPORT_DEVICE_SHUTDOWN
) ||
739 (dev
->dev_status
& TRANSPORT_DEVICE_OFFLINE_ACTIVATED
) ||
740 (dev
->dev_status
& TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
))
744 destroy_workqueue(dev
->tmr_wq
);
745 if (dev
->transport
->free_device
)
746 dev
->transport
->free_device(dev
->dev_ptr
);
749 spin_lock(&hba
->device_lock
);
750 list_del(&dev
->dev_list
);
752 spin_unlock(&hba
->device_lock
);
754 core_scsi3_free_all_registrations(dev
);
755 se_release_vpd_for_dev(dev
);
760 void se_release_vpd_for_dev(struct se_device
*dev
)
762 struct t10_vpd
*vpd
, *vpd_tmp
;
764 spin_lock(&dev
->se_sub_dev
->t10_wwn
.t10_vpd_lock
);
765 list_for_each_entry_safe(vpd
, vpd_tmp
,
766 &dev
->se_sub_dev
->t10_wwn
.t10_vpd_list
, vpd_list
) {
767 list_del(&vpd
->vpd_list
);
770 spin_unlock(&dev
->se_sub_dev
->t10_wwn
.t10_vpd_lock
);
773 /* se_free_virtual_device():
775 * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
777 int se_free_virtual_device(struct se_device
*dev
, struct se_hba
*hba
)
779 if (!list_empty(&dev
->dev_sep_list
))
782 core_alua_free_lu_gp_mem(dev
);
783 se_release_device_for_hba(dev
);
788 static void se_dev_start(struct se_device
*dev
)
790 struct se_hba
*hba
= dev
->se_hba
;
792 spin_lock(&hba
->device_lock
);
793 atomic_inc(&dev
->dev_obj
.obj_access_count
);
794 if (atomic_read(&dev
->dev_obj
.obj_access_count
) == 1) {
795 if (dev
->dev_status
& TRANSPORT_DEVICE_DEACTIVATED
) {
796 dev
->dev_status
&= ~TRANSPORT_DEVICE_DEACTIVATED
;
797 dev
->dev_status
|= TRANSPORT_DEVICE_ACTIVATED
;
798 } else if (dev
->dev_status
&
799 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
) {
801 ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
;
802 dev
->dev_status
|= TRANSPORT_DEVICE_OFFLINE_ACTIVATED
;
805 spin_unlock(&hba
->device_lock
);
808 static void se_dev_stop(struct se_device
*dev
)
810 struct se_hba
*hba
= dev
->se_hba
;
812 spin_lock(&hba
->device_lock
);
813 atomic_dec(&dev
->dev_obj
.obj_access_count
);
814 if (atomic_read(&dev
->dev_obj
.obj_access_count
) == 0) {
815 if (dev
->dev_status
& TRANSPORT_DEVICE_ACTIVATED
) {
816 dev
->dev_status
&= ~TRANSPORT_DEVICE_ACTIVATED
;
817 dev
->dev_status
|= TRANSPORT_DEVICE_DEACTIVATED
;
818 } else if (dev
->dev_status
&
819 TRANSPORT_DEVICE_OFFLINE_ACTIVATED
) {
820 dev
->dev_status
&= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED
;
821 dev
->dev_status
|= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
;
824 spin_unlock(&hba
->device_lock
);
827 int se_dev_check_online(struct se_device
*dev
)
832 spin_lock_irqsave(&dev
->dev_status_lock
, flags
);
833 ret
= ((dev
->dev_status
& TRANSPORT_DEVICE_ACTIVATED
) ||
834 (dev
->dev_status
& TRANSPORT_DEVICE_DEACTIVATED
)) ? 0 : 1;
835 spin_unlock_irqrestore(&dev
->dev_status_lock
, flags
);
840 int se_dev_check_shutdown(struct se_device
*dev
)
844 spin_lock_irq(&dev
->dev_status_lock
);
845 ret
= (dev
->dev_status
& TRANSPORT_DEVICE_SHUTDOWN
);
846 spin_unlock_irq(&dev
->dev_status_lock
);
851 static u32
se_dev_align_max_sectors(u32 max_sectors
, u32 block_size
)
853 u32 tmp
, aligned_max_sectors
;
855 * Limit max_sectors to a PAGE_SIZE aligned value for modern
856 * transport_allocate_data_tasks() operation.
858 tmp
= rounddown((max_sectors
* block_size
), PAGE_SIZE
);
859 aligned_max_sectors
= (tmp
/ block_size
);
860 if (max_sectors
!= aligned_max_sectors
) {
861 printk(KERN_INFO
"Rounding down aligned max_sectors from %u"
862 " to %u\n", max_sectors
, aligned_max_sectors
);
863 return aligned_max_sectors
;
869 void se_dev_set_default_attribs(
870 struct se_device
*dev
,
871 struct se_dev_limits
*dev_limits
)
873 struct queue_limits
*limits
= &dev_limits
->limits
;
875 dev
->se_sub_dev
->se_dev_attrib
.emulate_dpo
= DA_EMULATE_DPO
;
876 dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_write
= DA_EMULATE_FUA_WRITE
;
877 dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_read
= DA_EMULATE_FUA_READ
;
878 dev
->se_sub_dev
->se_dev_attrib
.emulate_write_cache
= DA_EMULATE_WRITE_CACHE
;
879 dev
->se_sub_dev
->se_dev_attrib
.emulate_ua_intlck_ctrl
= DA_EMULATE_UA_INTLLCK_CTRL
;
880 dev
->se_sub_dev
->se_dev_attrib
.emulate_tas
= DA_EMULATE_TAS
;
881 dev
->se_sub_dev
->se_dev_attrib
.emulate_tpu
= DA_EMULATE_TPU
;
882 dev
->se_sub_dev
->se_dev_attrib
.emulate_tpws
= DA_EMULATE_TPWS
;
883 dev
->se_sub_dev
->se_dev_attrib
.emulate_reservations
= DA_EMULATE_RESERVATIONS
;
884 dev
->se_sub_dev
->se_dev_attrib
.emulate_alua
= DA_EMULATE_ALUA
;
885 dev
->se_sub_dev
->se_dev_attrib
.enforce_pr_isids
= DA_ENFORCE_PR_ISIDS
;
886 dev
->se_sub_dev
->se_dev_attrib
.is_nonrot
= DA_IS_NONROT
;
887 dev
->se_sub_dev
->se_dev_attrib
.emulate_rest_reord
= DA_EMULATE_REST_REORD
;
889 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
890 * iblock_create_virtdevice() from struct queue_limits values
891 * if blk_queue_discard()==1
893 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_lba_count
= DA_MAX_UNMAP_LBA_COUNT
;
894 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
=
895 DA_MAX_UNMAP_BLOCK_DESC_COUNT
;
896 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity
= DA_UNMAP_GRANULARITY_DEFAULT
;
897 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity_alignment
=
898 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT
;
900 * block_size is based on subsystem plugin dependent requirements.
902 dev
->se_sub_dev
->se_dev_attrib
.hw_block_size
= limits
->logical_block_size
;
903 dev
->se_sub_dev
->se_dev_attrib
.block_size
= limits
->logical_block_size
;
905 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
907 limits
->max_hw_sectors
= se_dev_align_max_sectors(limits
->max_hw_sectors
,
908 limits
->logical_block_size
);
909 dev
->se_sub_dev
->se_dev_attrib
.hw_max_sectors
= limits
->max_hw_sectors
;
912 * Set fabric_max_sectors, which is reported in block limits
915 dev
->se_sub_dev
->se_dev_attrib
.fabric_max_sectors
= DA_FABRIC_MAX_SECTORS
;
917 * Set optimal_sectors from fabric_max_sectors, which can be
918 * lowered via configfs.
920 dev
->se_sub_dev
->se_dev_attrib
.optimal_sectors
= DA_FABRIC_MAX_SECTORS
;
922 * queue_depth is based on subsystem plugin dependent requirements.
924 dev
->se_sub_dev
->se_dev_attrib
.hw_queue_depth
= dev_limits
->hw_queue_depth
;
925 dev
->se_sub_dev
->se_dev_attrib
.queue_depth
= dev_limits
->queue_depth
;
928 int se_dev_set_max_unmap_lba_count(
929 struct se_device
*dev
,
930 u32 max_unmap_lba_count
)
932 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_lba_count
= max_unmap_lba_count
;
933 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
934 dev
, dev
->se_sub_dev
->se_dev_attrib
.max_unmap_lba_count
);
938 int se_dev_set_max_unmap_block_desc_count(
939 struct se_device
*dev
,
940 u32 max_unmap_block_desc_count
)
942 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
=
943 max_unmap_block_desc_count
;
944 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
945 dev
, dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
);
949 int se_dev_set_unmap_granularity(
950 struct se_device
*dev
,
951 u32 unmap_granularity
)
953 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity
= unmap_granularity
;
954 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
955 dev
, dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity
);
959 int se_dev_set_unmap_granularity_alignment(
960 struct se_device
*dev
,
961 u32 unmap_granularity_alignment
)
963 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity_alignment
= unmap_granularity_alignment
;
964 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
965 dev
, dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity_alignment
);
969 int se_dev_set_emulate_dpo(struct se_device
*dev
, int flag
)
971 if (flag
!= 0 && flag
!= 1) {
972 pr_err("Illegal value %d\n", flag
);
977 pr_err("dpo_emulated not supported\n");
984 int se_dev_set_emulate_fua_write(struct se_device
*dev
, int flag
)
986 if (flag
!= 0 && flag
!= 1) {
987 pr_err("Illegal value %d\n", flag
);
992 dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
993 pr_err("emulate_fua_write not supported for pSCSI\n");
996 dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_write
= flag
;
997 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
998 dev
, dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_write
);
1002 int se_dev_set_emulate_fua_read(struct se_device
*dev
, int flag
)
1004 if (flag
!= 0 && flag
!= 1) {
1005 pr_err("Illegal value %d\n", flag
);
1010 pr_err("ua read emulated not supported\n");
1017 int se_dev_set_emulate_write_cache(struct se_device
*dev
, int flag
)
1019 if (flag
!= 0 && flag
!= 1) {
1020 pr_err("Illegal value %d\n", flag
);
1024 dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1025 pr_err("emulate_write_cache not supported for pSCSI\n");
1028 dev
->se_sub_dev
->se_dev_attrib
.emulate_write_cache
= flag
;
1029 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1030 dev
, dev
->se_sub_dev
->se_dev_attrib
.emulate_write_cache
);
1034 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device
*dev
, int flag
)
1036 if ((flag
!= 0) && (flag
!= 1) && (flag
!= 2)) {
1037 pr_err("Illegal value %d\n", flag
);
1041 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1042 pr_err("dev[%p]: Unable to change SE Device"
1043 " UA_INTRLCK_CTRL while dev_export_obj: %d count"
1045 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1048 dev
->se_sub_dev
->se_dev_attrib
.emulate_ua_intlck_ctrl
= flag
;
1049 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1050 dev
, dev
->se_sub_dev
->se_dev_attrib
.emulate_ua_intlck_ctrl
);
1055 int se_dev_set_emulate_tas(struct se_device
*dev
, int flag
)
1057 if ((flag
!= 0) && (flag
!= 1)) {
1058 pr_err("Illegal value %d\n", flag
);
1062 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1063 pr_err("dev[%p]: Unable to change SE Device TAS while"
1064 " dev_export_obj: %d count exists\n", dev
,
1065 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1068 dev
->se_sub_dev
->se_dev_attrib
.emulate_tas
= flag
;
1069 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1070 dev
, (dev
->se_sub_dev
->se_dev_attrib
.emulate_tas
) ? "Enabled" : "Disabled");
1075 int se_dev_set_emulate_tpu(struct se_device
*dev
, int flag
)
1077 if ((flag
!= 0) && (flag
!= 1)) {
1078 pr_err("Illegal value %d\n", flag
);
1082 * We expect this value to be non-zero when generic Block Layer
1083 * Discard supported is detected iblock_create_virtdevice().
1085 if (flag
&& !dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
) {
1086 pr_err("Generic Block Discard not supported\n");
1090 dev
->se_sub_dev
->se_dev_attrib
.emulate_tpu
= flag
;
1091 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1096 int se_dev_set_emulate_tpws(struct se_device
*dev
, int flag
)
1098 if ((flag
!= 0) && (flag
!= 1)) {
1099 pr_err("Illegal value %d\n", flag
);
1103 * We expect this value to be non-zero when generic Block Layer
1104 * Discard supported is detected iblock_create_virtdevice().
1106 if (flag
&& !dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
) {
1107 pr_err("Generic Block Discard not supported\n");
1111 dev
->se_sub_dev
->se_dev_attrib
.emulate_tpws
= flag
;
1112 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1117 int se_dev_set_enforce_pr_isids(struct se_device
*dev
, int flag
)
1119 if ((flag
!= 0) && (flag
!= 1)) {
1120 pr_err("Illegal value %d\n", flag
);
1123 dev
->se_sub_dev
->se_dev_attrib
.enforce_pr_isids
= flag
;
1124 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev
,
1125 (dev
->se_sub_dev
->se_dev_attrib
.enforce_pr_isids
) ? "Enabled" : "Disabled");
1129 int se_dev_set_is_nonrot(struct se_device
*dev
, int flag
)
1131 if ((flag
!= 0) && (flag
!= 1)) {
1132 printk(KERN_ERR
"Illegal value %d\n", flag
);
1135 dev
->se_sub_dev
->se_dev_attrib
.is_nonrot
= flag
;
1136 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
1141 int se_dev_set_emulate_rest_reord(struct se_device
*dev
, int flag
)
1144 printk(KERN_ERR
"dev[%p]: SE Device emulatation of restricted"
1145 " reordering not implemented\n", dev
);
1148 dev
->se_sub_dev
->se_dev_attrib
.emulate_rest_reord
= flag
;
1149 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev
, flag
);
1154 * Note, this can only be called on unexported SE Device Object.
1156 int se_dev_set_queue_depth(struct se_device
*dev
, u32 queue_depth
)
1158 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1159 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1160 " dev_export_obj: %d count exists\n", dev
,
1161 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1165 pr_err("dev[%p]: Illegal ZERO value for queue"
1170 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1171 if (queue_depth
> dev
->se_sub_dev
->se_dev_attrib
.hw_queue_depth
) {
1172 pr_err("dev[%p]: Passed queue_depth: %u"
1173 " exceeds TCM/SE_Device TCQ: %u\n",
1175 dev
->se_sub_dev
->se_dev_attrib
.hw_queue_depth
);
1179 if (queue_depth
> dev
->se_sub_dev
->se_dev_attrib
.queue_depth
) {
1180 if (queue_depth
> dev
->se_sub_dev
->se_dev_attrib
.hw_queue_depth
) {
1181 pr_err("dev[%p]: Passed queue_depth:"
1182 " %u exceeds TCM/SE_Device MAX"
1183 " TCQ: %u\n", dev
, queue_depth
,
1184 dev
->se_sub_dev
->se_dev_attrib
.hw_queue_depth
);
1190 dev
->se_sub_dev
->se_dev_attrib
.queue_depth
= dev
->queue_depth
= queue_depth
;
1191 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1196 int se_dev_set_fabric_max_sectors(struct se_device
*dev
, u32 fabric_max_sectors
)
1198 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1199 pr_err("dev[%p]: Unable to change SE Device"
1200 " fabric_max_sectors while dev_export_obj: %d count exists\n",
1201 dev
, atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1204 if (!fabric_max_sectors
) {
1205 pr_err("dev[%p]: Illegal ZERO value for"
1206 " fabric_max_sectors\n", dev
);
1209 if (fabric_max_sectors
< DA_STATUS_MAX_SECTORS_MIN
) {
1210 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
1211 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev
, fabric_max_sectors
,
1212 DA_STATUS_MAX_SECTORS_MIN
);
1215 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1216 if (fabric_max_sectors
> dev
->se_sub_dev
->se_dev_attrib
.hw_max_sectors
) {
1217 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1218 " greater than TCM/SE_Device max_sectors:"
1219 " %u\n", dev
, fabric_max_sectors
,
1220 dev
->se_sub_dev
->se_dev_attrib
.hw_max_sectors
);
1224 if (fabric_max_sectors
> DA_STATUS_MAX_SECTORS_MAX
) {
1225 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1226 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1227 " %u\n", dev
, fabric_max_sectors
,
1228 DA_STATUS_MAX_SECTORS_MAX
);
1233 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1235 fabric_max_sectors
= se_dev_align_max_sectors(fabric_max_sectors
,
1236 dev
->se_sub_dev
->se_dev_attrib
.block_size
);
1238 dev
->se_sub_dev
->se_dev_attrib
.fabric_max_sectors
= fabric_max_sectors
;
1239 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1240 dev
, fabric_max_sectors
);
1244 int se_dev_set_optimal_sectors(struct se_device
*dev
, u32 optimal_sectors
)
1246 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1247 pr_err("dev[%p]: Unable to change SE Device"
1248 " optimal_sectors while dev_export_obj: %d count exists\n",
1249 dev
, atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1252 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1253 pr_err("dev[%p]: Passed optimal_sectors cannot be"
1254 " changed for TCM/pSCSI\n", dev
);
1257 if (optimal_sectors
> dev
->se_sub_dev
->se_dev_attrib
.fabric_max_sectors
) {
1258 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1259 " greater than fabric_max_sectors: %u\n", dev
,
1260 optimal_sectors
, dev
->se_sub_dev
->se_dev_attrib
.fabric_max_sectors
);
1264 dev
->se_sub_dev
->se_dev_attrib
.optimal_sectors
= optimal_sectors
;
1265 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1266 dev
, optimal_sectors
);
1270 int se_dev_set_block_size(struct se_device
*dev
, u32 block_size
)
1272 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1273 pr_err("dev[%p]: Unable to change SE Device block_size"
1274 " while dev_export_obj: %d count exists\n", dev
,
1275 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1279 if ((block_size
!= 512) &&
1280 (block_size
!= 1024) &&
1281 (block_size
!= 2048) &&
1282 (block_size
!= 4096)) {
1283 pr_err("dev[%p]: Illegal value for block_device: %u"
1284 " for SE device, must be 512, 1024, 2048 or 4096\n",
1289 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1290 pr_err("dev[%p]: Not allowed to change block_size for"
1291 " Physical Device, use for Linux/SCSI to change"
1292 " block_size for underlying hardware\n", dev
);
1296 dev
->se_sub_dev
->se_dev_attrib
.block_size
= block_size
;
1297 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1302 struct se_lun
*core_dev_add_lun(
1303 struct se_portal_group
*tpg
,
1304 struct se_device
*dev
,
1307 struct se_lun
*lun_p
;
1310 if (atomic_read(&dev
->dev_access_obj
.obj_access_count
) != 0) {
1311 pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
1312 atomic_read(&dev
->dev_access_obj
.obj_access_count
));
1313 return ERR_PTR(-EACCES
);
1316 lun_p
= core_tpg_pre_addlun(tpg
, lun
);
1320 rc
= core_tpg_post_addlun(tpg
, lun_p
,
1321 TRANSPORT_LUNFLAGS_READ_WRITE
, dev
);
1325 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1326 " CORE HBA: %u\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1327 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun_p
->unpacked_lun
,
1328 tpg
->se_tpg_tfo
->get_fabric_name(), dev
->se_hba
->hba_id
);
1330 * Update LUN maps for dynamically added initiators when
1331 * generate_node_acl is enabled.
1333 if (tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
)) {
1334 struct se_node_acl
*acl
;
1335 spin_lock_irq(&tpg
->acl_node_lock
);
1336 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
1337 if (acl
->dynamic_node_acl
&&
1338 (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
||
1339 !tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
))) {
1340 spin_unlock_irq(&tpg
->acl_node_lock
);
1341 core_tpg_add_node_to_devs(acl
, tpg
);
1342 spin_lock_irq(&tpg
->acl_node_lock
);
1345 spin_unlock_irq(&tpg
->acl_node_lock
);
1351 /* core_dev_del_lun():
1355 int core_dev_del_lun(
1356 struct se_portal_group
*tpg
,
1361 lun
= core_tpg_pre_dellun(tpg
, unpacked_lun
);
1363 return PTR_ERR(lun
);
1365 core_tpg_post_dellun(tpg
, lun
);
1367 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1368 " device object\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1369 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), unpacked_lun
,
1370 tpg
->se_tpg_tfo
->get_fabric_name());
1375 struct se_lun
*core_get_lun_from_tpg(struct se_portal_group
*tpg
, u32 unpacked_lun
)
1379 spin_lock(&tpg
->tpg_lun_lock
);
1380 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
1381 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1382 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1383 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1384 TRANSPORT_MAX_LUNS_PER_TPG
-1,
1385 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1386 spin_unlock(&tpg
->tpg_lun_lock
);
1389 lun
= tpg
->tpg_lun_list
[unpacked_lun
];
1391 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_FREE
) {
1392 pr_err("%s Logical Unit Number: %u is not free on"
1393 " Target Portal Group: %hu, ignoring request.\n",
1394 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1395 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1396 spin_unlock(&tpg
->tpg_lun_lock
);
1399 spin_unlock(&tpg
->tpg_lun_lock
);
1404 /* core_dev_get_lun():
1408 static struct se_lun
*core_dev_get_lun(struct se_portal_group
*tpg
, u32 unpacked_lun
)
1412 spin_lock(&tpg
->tpg_lun_lock
);
1413 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
1414 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1415 "_TPG-1: %u for Target Portal Group: %hu\n",
1416 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1417 TRANSPORT_MAX_LUNS_PER_TPG
-1,
1418 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1419 spin_unlock(&tpg
->tpg_lun_lock
);
1422 lun
= tpg
->tpg_lun_list
[unpacked_lun
];
1424 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) {
1425 pr_err("%s Logical Unit Number: %u is not active on"
1426 " Target Portal Group: %hu, ignoring request.\n",
1427 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1428 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1429 spin_unlock(&tpg
->tpg_lun_lock
);
1432 spin_unlock(&tpg
->tpg_lun_lock
);
1437 struct se_lun_acl
*core_dev_init_initiator_node_lun_acl(
1438 struct se_portal_group
*tpg
,
1440 char *initiatorname
,
1443 struct se_lun_acl
*lacl
;
1444 struct se_node_acl
*nacl
;
1446 if (strlen(initiatorname
) >= TRANSPORT_IQN_LEN
) {
1447 pr_err("%s InitiatorName exceeds maximum size.\n",
1448 tpg
->se_tpg_tfo
->get_fabric_name());
1452 nacl
= core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
1457 lacl
= kzalloc(sizeof(struct se_lun_acl
), GFP_KERNEL
);
1459 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1464 INIT_LIST_HEAD(&lacl
->lacl_list
);
1465 lacl
->mapped_lun
= mapped_lun
;
1466 lacl
->se_lun_nacl
= nacl
;
1467 snprintf(lacl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
1472 int core_dev_add_initiator_node_lun_acl(
1473 struct se_portal_group
*tpg
,
1474 struct se_lun_acl
*lacl
,
1479 struct se_node_acl
*nacl
;
1481 lun
= core_dev_get_lun(tpg
, unpacked_lun
);
1483 pr_err("%s Logical Unit Number: %u is not active on"
1484 " Target Portal Group: %hu, ignoring request.\n",
1485 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1486 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1490 nacl
= lacl
->se_lun_nacl
;
1494 if ((lun
->lun_access
& TRANSPORT_LUNFLAGS_READ_ONLY
) &&
1495 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
))
1496 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
1500 if (core_enable_device_list_for_node(lun
, lacl
, lacl
->mapped_lun
,
1501 lun_access
, nacl
, tpg
) < 0)
1504 spin_lock(&lun
->lun_acl_lock
);
1505 list_add_tail(&lacl
->lacl_list
, &lun
->lun_acl_list
);
1506 atomic_inc(&lun
->lun_acl_count
);
1507 smp_mb__after_atomic_inc();
1508 spin_unlock(&lun
->lun_acl_lock
);
1510 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1511 " InitiatorNode: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1512 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), unpacked_lun
, lacl
->mapped_lun
,
1513 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) ? "RW" : "RO",
1514 lacl
->initiatorname
);
1516 * Check to see if there are any existing persistent reservation APTPL
1517 * pre-registrations that need to be enabled for this LUN ACL..
1519 core_scsi3_check_aptpl_registration(lun
->lun_se_dev
, tpg
, lun
, lacl
);
1523 /* core_dev_del_initiator_node_lun_acl():
1527 int core_dev_del_initiator_node_lun_acl(
1528 struct se_portal_group
*tpg
,
1530 struct se_lun_acl
*lacl
)
1532 struct se_node_acl
*nacl
;
1534 nacl
= lacl
->se_lun_nacl
;
1538 spin_lock(&lun
->lun_acl_lock
);
1539 list_del(&lacl
->lacl_list
);
1540 atomic_dec(&lun
->lun_acl_count
);
1541 smp_mb__after_atomic_dec();
1542 spin_unlock(&lun
->lun_acl_lock
);
1544 core_disable_device_list_for_node(lun
, NULL
, lacl
->mapped_lun
,
1545 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
);
1547 lacl
->se_lun
= NULL
;
1549 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1550 " InitiatorNode: %s Mapped LUN: %u\n",
1551 tpg
->se_tpg_tfo
->get_fabric_name(),
1552 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
1553 lacl
->initiatorname
, lacl
->mapped_lun
);
1558 void core_dev_free_initiator_node_lun_acl(
1559 struct se_portal_group
*tpg
,
1560 struct se_lun_acl
*lacl
)
1562 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1563 " Mapped LUN: %u\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1564 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
),
1565 tpg
->se_tpg_tfo
->get_fabric_name(),
1566 lacl
->initiatorname
, lacl
->mapped_lun
);
1571 int core_dev_setup_virtual_lun0(void)
1574 struct se_device
*dev
;
1575 struct se_subsystem_dev
*se_dev
= NULL
;
1576 struct se_subsystem_api
*t
;
1580 hba
= core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE
);
1582 return PTR_ERR(hba
);
1587 se_dev
= kzalloc(sizeof(struct se_subsystem_dev
), GFP_KERNEL
);
1589 pr_err("Unable to allocate memory for"
1590 " struct se_subsystem_dev\n");
1594 INIT_LIST_HEAD(&se_dev
->t10_wwn
.t10_vpd_list
);
1595 spin_lock_init(&se_dev
->t10_wwn
.t10_vpd_lock
);
1596 INIT_LIST_HEAD(&se_dev
->t10_pr
.registration_list
);
1597 INIT_LIST_HEAD(&se_dev
->t10_pr
.aptpl_reg_list
);
1598 spin_lock_init(&se_dev
->t10_pr
.registration_lock
);
1599 spin_lock_init(&se_dev
->t10_pr
.aptpl_reg_lock
);
1600 INIT_LIST_HEAD(&se_dev
->t10_alua
.tg_pt_gps_list
);
1601 spin_lock_init(&se_dev
->t10_alua
.tg_pt_gps_lock
);
1602 spin_lock_init(&se_dev
->se_dev_lock
);
1603 se_dev
->t10_pr
.pr_aptpl_buf_len
= PR_APTPL_BUF_LEN
;
1604 se_dev
->t10_wwn
.t10_sub_dev
= se_dev
;
1605 se_dev
->t10_alua
.t10_sub_dev
= se_dev
;
1606 se_dev
->se_dev_attrib
.da_sub_dev
= se_dev
;
1607 se_dev
->se_dev_hba
= hba
;
1609 se_dev
->se_dev_su_ptr
= t
->allocate_virtdevice(hba
, "virt_lun0");
1610 if (!se_dev
->se_dev_su_ptr
) {
1611 pr_err("Unable to locate subsystem dependent pointer"
1612 " from allocate_virtdevice()\n");
1616 lun0_su_dev
= se_dev
;
1619 sprintf(buf
, "rd_pages=8");
1620 t
->set_configfs_dev_params(hba
, se_dev
, buf
, sizeof(buf
));
1622 dev
= t
->create_virtdevice(hba
, se_dev
, se_dev
->se_dev_su_ptr
);
1627 se_dev
->se_dev_ptr
= dev
;
1635 core_delete_hba(lun0_hba
);
1642 void core_dev_release_virtual_lun0(void)
1644 struct se_hba
*hba
= lun0_hba
;
1645 struct se_subsystem_dev
*su_dev
= lun0_su_dev
;
1651 se_free_virtual_device(g_lun0_dev
, hba
);
1654 core_delete_hba(hba
);