1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 * This file contains the iSCSI Virtual Device and Disk Transport
5 * agnostic related functions.
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/net.h>
31 #include <linux/string.h>
32 #include <linux/delay.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/smp_lock.h>
37 #include <linux/kthread.h>
41 #include <scsi/scsi.h>
43 #include <target/target_core_base.h>
44 #include <target/target_core_device.h>
45 #include <target/target_core_tpg.h>
46 #include <target/target_core_transport.h>
47 #include <target/target_core_fabric_ops.h>
49 #include "target_core_alua.h"
50 #include "target_core_hba.h"
51 #include "target_core_pr.h"
52 #include "target_core_ua.h"
54 static void se_dev_start(struct se_device
*dev
);
55 static void se_dev_stop(struct se_device
*dev
);
57 int transport_get_lun_for_cmd(
58 struct se_cmd
*se_cmd
,
62 struct se_dev_entry
*deve
;
63 struct se_lun
*se_lun
= NULL
;
64 struct se_session
*se_sess
= SE_SESS(se_cmd
);
68 spin_lock_irq(&SE_NODE_ACL(se_sess
)->device_list_lock
);
69 deve
= se_cmd
->se_deve
=
70 &SE_NODE_ACL(se_sess
)->device_list
[unpacked_lun
];
71 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
74 deve
->total_bytes
+= se_cmd
->data_length
;
76 if (se_cmd
->data_direction
== DMA_TO_DEVICE
) {
78 TRANSPORT_LUNFLAGS_READ_ONLY
) {
82 deve
->write_bytes
+= se_cmd
->data_length
;
83 } else if (se_cmd
->data_direction
==
85 deve
->read_bytes
+= se_cmd
->data_length
;
90 se_lun
= se_cmd
->se_lun
= deve
->se_lun
;
91 se_cmd
->pr_res_key
= deve
->pr_res_key
;
92 se_cmd
->orig_fe_lun
= unpacked_lun
;
93 se_cmd
->se_orig_obj_ptr
= SE_LUN(se_cmd
)->lun_se_dev
;
94 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
97 spin_unlock_irq(&SE_NODE_ACL(se_sess
)->device_list_lock
);
101 se_cmd
->scsi_sense_reason
= TCM_WRITE_PROTECTED
;
102 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
103 printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
104 " Access for 0x%08x\n",
105 CMD_TFO(se_cmd
)->get_fabric_name(),
110 * Use the se_portal_group->tpg_virt_lun0 to allow for
111 * REPORT_LUNS, et al to be returned when no active
112 * MappedLUN=0 exists for this Initiator Port.
114 if (unpacked_lun
!= 0) {
115 se_cmd
->scsi_sense_reason
= TCM_NON_EXISTENT_LUN
;
116 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
117 printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
118 " Access for 0x%08x\n",
119 CMD_TFO(se_cmd
)->get_fabric_name(),
124 * Force WRITE PROTECT for virtual LUN 0
126 if ((se_cmd
->data_direction
!= DMA_FROM_DEVICE
) &&
127 (se_cmd
->data_direction
!= DMA_NONE
)) {
128 se_cmd
->scsi_sense_reason
= TCM_WRITE_PROTECTED
;
129 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
133 printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
134 CMD_TFO(se_cmd
)->get_fabric_name());
136 se_lun
= se_cmd
->se_lun
= &se_sess
->se_tpg
->tpg_virt_lun0
;
137 se_cmd
->orig_fe_lun
= 0;
138 se_cmd
->se_orig_obj_ptr
= SE_LUN(se_cmd
)->lun_se_dev
;
139 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
143 * Determine if the struct se_lun is online.
145 /* #warning FIXME: Check for LUN_RESET + UNIT Attention */
146 if (se_dev_check_online(se_lun
->lun_se_dev
) != 0) {
147 se_cmd
->scsi_sense_reason
= TCM_NON_EXISTENT_LUN
;
148 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
153 struct se_device
*dev
= se_lun
->lun_se_dev
;
154 spin_lock(&dev
->stats_lock
);
156 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
157 dev
->write_bytes
+= se_cmd
->data_length
;
158 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
159 dev
->read_bytes
+= se_cmd
->data_length
;
160 spin_unlock(&dev
->stats_lock
);
164 * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
165 * for tracking state of struct se_cmds during LUN shutdown events.
167 spin_lock_irqsave(&se_lun
->lun_cmd_lock
, flags
);
168 list_add_tail(&se_cmd
->se_lun_list
, &se_lun
->lun_cmd_list
);
169 atomic_set(&T_TASK(se_cmd
)->transport_lun_active
, 1);
171 printk(KERN_INFO
"Adding ITT: 0x%08x to LUN LIST[%d]\n",
172 CMD_TFO(se_cmd
)->get_task_tag(se_cmd
), se_lun
->unpacked_lun
);
174 spin_unlock_irqrestore(&se_lun
->lun_cmd_lock
, flags
);
178 EXPORT_SYMBOL(transport_get_lun_for_cmd
);
180 int transport_get_lun_for_tmr(
181 struct se_cmd
*se_cmd
,
184 struct se_device
*dev
= NULL
;
185 struct se_dev_entry
*deve
;
186 struct se_lun
*se_lun
= NULL
;
187 struct se_session
*se_sess
= SE_SESS(se_cmd
);
188 struct se_tmr_req
*se_tmr
= se_cmd
->se_tmr_req
;
190 spin_lock_irq(&SE_NODE_ACL(se_sess
)->device_list_lock
);
191 deve
= se_cmd
->se_deve
=
192 &SE_NODE_ACL(se_sess
)->device_list
[unpacked_lun
];
193 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
194 se_lun
= se_cmd
->se_lun
= se_tmr
->tmr_lun
= deve
->se_lun
;
195 dev
= se_tmr
->tmr_dev
= se_lun
->lun_se_dev
;
196 se_cmd
->pr_res_key
= deve
->pr_res_key
;
197 se_cmd
->orig_fe_lun
= unpacked_lun
;
198 se_cmd
->se_orig_obj_ptr
= SE_LUN(se_cmd
)->lun_se_dev
;
199 /* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
201 spin_unlock_irq(&SE_NODE_ACL(se_sess
)->device_list_lock
);
204 printk(KERN_INFO
"TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
205 " Access for 0x%08x\n",
206 CMD_TFO(se_cmd
)->get_fabric_name(),
208 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
212 * Determine if the struct se_lun is online.
214 /* #warning FIXME: Check for LUN_RESET + UNIT Attention */
215 if (se_dev_check_online(se_lun
->lun_se_dev
) != 0) {
216 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
220 spin_lock(&dev
->se_tmr_lock
);
221 list_add_tail(&se_tmr
->tmr_list
, &dev
->dev_tmr_list
);
222 spin_unlock(&dev
->se_tmr_lock
);
226 EXPORT_SYMBOL(transport_get_lun_for_tmr
);
229 * This function is called from core_scsi3_emulate_pro_register_and_move()
230 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
231 * when a matching rtpi is found.
233 struct se_dev_entry
*core_get_se_deve_from_rtpi(
234 struct se_node_acl
*nacl
,
237 struct se_dev_entry
*deve
;
239 struct se_port
*port
;
240 struct se_portal_group
*tpg
= nacl
->se_tpg
;
243 spin_lock_irq(&nacl
->device_list_lock
);
244 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
245 deve
= &nacl
->device_list
[i
];
247 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
252 printk(KERN_ERR
"%s device entries device pointer is"
253 " NULL, but Initiator has access.\n",
254 TPG_TFO(tpg
)->get_fabric_name());
259 printk(KERN_ERR
"%s device entries device pointer is"
260 " NULL, but Initiator has access.\n",
261 TPG_TFO(tpg
)->get_fabric_name());
264 if (port
->sep_rtpi
!= rtpi
)
267 atomic_inc(&deve
->pr_ref_count
);
268 smp_mb__after_atomic_inc();
269 spin_unlock_irq(&nacl
->device_list_lock
);
273 spin_unlock_irq(&nacl
->device_list_lock
);
278 int core_free_device_list_for_node(
279 struct se_node_acl
*nacl
,
280 struct se_portal_group
*tpg
)
282 struct se_dev_entry
*deve
;
286 if (!nacl
->device_list
)
289 spin_lock_irq(&nacl
->device_list_lock
);
290 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
291 deve
= &nacl
->device_list
[i
];
293 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
297 printk(KERN_ERR
"%s device entries device pointer is"
298 " NULL, but Initiator has access.\n",
299 TPG_TFO(tpg
)->get_fabric_name());
304 spin_unlock_irq(&nacl
->device_list_lock
);
305 core_update_device_list_for_node(lun
, NULL
, deve
->mapped_lun
,
306 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
, 0);
307 spin_lock_irq(&nacl
->device_list_lock
);
309 spin_unlock_irq(&nacl
->device_list_lock
);
311 kfree(nacl
->device_list
);
312 nacl
->device_list
= NULL
;
317 void core_dec_lacl_count(struct se_node_acl
*se_nacl
, struct se_cmd
*se_cmd
)
319 struct se_dev_entry
*deve
;
321 spin_lock_irq(&se_nacl
->device_list_lock
);
322 deve
= &se_nacl
->device_list
[se_cmd
->orig_fe_lun
];
324 spin_unlock_irq(&se_nacl
->device_list_lock
);
329 void core_update_device_list_access(
332 struct se_node_acl
*nacl
)
334 struct se_dev_entry
*deve
;
336 spin_lock_irq(&nacl
->device_list_lock
);
337 deve
= &nacl
->device_list
[mapped_lun
];
338 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
339 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
340 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
342 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
343 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
345 spin_unlock_irq(&nacl
->device_list_lock
);
350 /* core_update_device_list_for_node():
354 int core_update_device_list_for_node(
356 struct se_lun_acl
*lun_acl
,
359 struct se_node_acl
*nacl
,
360 struct se_portal_group
*tpg
,
363 struct se_port
*port
= lun
->lun_sep
;
364 struct se_dev_entry
*deve
= &nacl
->device_list
[mapped_lun
];
367 * If the MappedLUN entry is being disabled, the entry in
368 * port->sep_alua_list must be removed now before clearing the
369 * struct se_dev_entry pointers below as logic in
370 * core_alua_do_transition_tg_pt() depends on these being present.
374 * deve->se_lun_acl will be NULL for demo-mode created LUNs
375 * that have not been explictly concerted to MappedLUNs ->
376 * struct se_lun_acl, but we remove deve->alua_port_list from
377 * port->sep_alua_list. This also means that active UAs and
378 * NodeACL context specific PR metadata for demo-mode
379 * MappedLUN *deve will be released below..
381 spin_lock_bh(&port
->sep_alua_lock
);
382 list_del(&deve
->alua_port_list
);
383 spin_unlock_bh(&port
->sep_alua_lock
);
386 spin_lock_irq(&nacl
->device_list_lock
);
389 * Check if the call is handling demo mode -> explict LUN ACL
390 * transition. This transition must be for the same struct se_lun
391 * + mapped_lun that was setup in demo mode..
393 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
394 if (deve
->se_lun_acl
!= NULL
) {
395 printk(KERN_ERR
"struct se_dev_entry->se_lun_acl"
396 " already set for demo mode -> explict"
397 " LUN ACL transition\n");
398 spin_unlock_irq(&nacl
->device_list_lock
);
401 if (deve
->se_lun
!= lun
) {
402 printk(KERN_ERR
"struct se_dev_entry->se_lun does"
403 " match passed struct se_lun for demo mode"
404 " -> explict LUN ACL transition\n");
405 spin_unlock_irq(&nacl
->device_list_lock
);
408 deve
->se_lun_acl
= lun_acl
;
412 deve
->se_lun_acl
= lun_acl
;
413 deve
->mapped_lun
= mapped_lun
;
414 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
;
417 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
418 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
419 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
421 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
422 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
426 spin_unlock_irq(&nacl
->device_list_lock
);
429 deve
->creation_time
= get_jiffies_64();
430 deve
->attach_count
++;
431 spin_unlock_irq(&nacl
->device_list_lock
);
433 spin_lock_bh(&port
->sep_alua_lock
);
434 list_add_tail(&deve
->alua_port_list
, &port
->sep_alua_list
);
435 spin_unlock_bh(&port
->sep_alua_lock
);
440 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
441 * PR operation to complete.
443 spin_unlock_irq(&nacl
->device_list_lock
);
444 while (atomic_read(&deve
->pr_ref_count
) != 0)
446 spin_lock_irq(&nacl
->device_list_lock
);
448 * Disable struct se_dev_entry LUN ACL mapping
450 core_scsi3_ua_release_all(deve
);
452 deve
->se_lun_acl
= NULL
;
454 deve
->creation_time
= 0;
455 deve
->attach_count
--;
456 spin_unlock_irq(&nacl
->device_list_lock
);
458 core_scsi3_free_pr_reg_from_nacl(lun
->lun_se_dev
, nacl
);
462 /* core_clear_lun_from_tpg():
466 void core_clear_lun_from_tpg(struct se_lun
*lun
, struct se_portal_group
*tpg
)
468 struct se_node_acl
*nacl
;
469 struct se_dev_entry
*deve
;
472 spin_lock_bh(&tpg
->acl_node_lock
);
473 list_for_each_entry(nacl
, &tpg
->acl_node_list
, acl_list
) {
474 spin_unlock_bh(&tpg
->acl_node_lock
);
476 spin_lock_irq(&nacl
->device_list_lock
);
477 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
478 deve
= &nacl
->device_list
[i
];
479 if (lun
!= deve
->se_lun
)
481 spin_unlock_irq(&nacl
->device_list_lock
);
483 core_update_device_list_for_node(lun
, NULL
,
484 deve
->mapped_lun
, TRANSPORT_LUNFLAGS_NO_ACCESS
,
487 spin_lock_irq(&nacl
->device_list_lock
);
489 spin_unlock_irq(&nacl
->device_list_lock
);
491 spin_lock_bh(&tpg
->acl_node_lock
);
493 spin_unlock_bh(&tpg
->acl_node_lock
);
498 static struct se_port
*core_alloc_port(struct se_device
*dev
)
500 struct se_port
*port
, *port_tmp
;
502 port
= kzalloc(sizeof(struct se_port
), GFP_KERNEL
);
504 printk(KERN_ERR
"Unable to allocate struct se_port\n");
507 INIT_LIST_HEAD(&port
->sep_alua_list
);
508 INIT_LIST_HEAD(&port
->sep_list
);
509 atomic_set(&port
->sep_tg_pt_secondary_offline
, 0);
510 spin_lock_init(&port
->sep_alua_lock
);
511 mutex_init(&port
->sep_tg_pt_md_mutex
);
513 spin_lock(&dev
->se_port_lock
);
514 if (dev
->dev_port_count
== 0x0000ffff) {
515 printk(KERN_WARNING
"Reached dev->dev_port_count =="
517 spin_unlock(&dev
->se_port_lock
);
522 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
523 * Here is the table from spc4r17 section 7.7.3.8.
525 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
529 * 1h Relative port 1, historically known as port A
530 * 2h Relative port 2, historically known as port B
531 * 3h to FFFFh Relative port 3 through 65 535
533 port
->sep_rtpi
= dev
->dev_rpti_counter
++;
534 if (!(port
->sep_rtpi
))
537 list_for_each_entry(port_tmp
, &dev
->dev_sep_list
, sep_list
) {
539 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
542 if (port
->sep_rtpi
== port_tmp
->sep_rtpi
)
545 spin_unlock(&dev
->se_port_lock
);
550 static void core_export_port(
551 struct se_device
*dev
,
552 struct se_portal_group
*tpg
,
553 struct se_port
*port
,
556 struct se_subsystem_dev
*su_dev
= SU_DEV(dev
);
557 struct t10_alua_tg_pt_gp_member
*tg_pt_gp_mem
= NULL
;
559 spin_lock(&dev
->se_port_lock
);
560 spin_lock(&lun
->lun_sep_lock
);
564 spin_unlock(&lun
->lun_sep_lock
);
566 list_add_tail(&port
->sep_list
, &dev
->dev_sep_list
);
567 spin_unlock(&dev
->se_port_lock
);
569 if (T10_ALUA(su_dev
)->alua_type
== SPC3_ALUA_EMULATED
) {
570 tg_pt_gp_mem
= core_alua_allocate_tg_pt_gp_mem(port
);
571 if (IS_ERR(tg_pt_gp_mem
) || !tg_pt_gp_mem
) {
572 printk(KERN_ERR
"Unable to allocate t10_alua_tg_pt"
576 spin_lock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
577 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem
,
578 T10_ALUA(su_dev
)->default_tg_pt_gp
);
579 spin_unlock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
580 printk(KERN_INFO
"%s/%s: Adding to default ALUA Target Port"
581 " Group: alua/default_tg_pt_gp\n",
582 TRANSPORT(dev
)->name
, TPG_TFO(tpg
)->get_fabric_name());
585 dev
->dev_port_count
++;
586 port
->sep_index
= port
->sep_rtpi
; /* RELATIVE TARGET PORT IDENTIFER */
590 * Called with struct se_device->se_port_lock spinlock held.
592 static void core_release_port(struct se_device
*dev
, struct se_port
*port
)
595 * Wait for any port reference for PR ALL_TG_PT=1 operation
596 * to complete in __core_scsi3_alloc_registration()
598 spin_unlock(&dev
->se_port_lock
);
599 if (atomic_read(&port
->sep_tg_pt_ref_cnt
))
601 spin_lock(&dev
->se_port_lock
);
603 core_alua_free_tg_pt_gp_mem(port
);
605 list_del(&port
->sep_list
);
606 dev
->dev_port_count
--;
613 struct se_device
*dev
,
614 struct se_portal_group
*tpg
,
617 struct se_port
*port
;
619 port
= core_alloc_port(dev
);
623 lun
->lun_se_dev
= dev
;
626 atomic_inc(&dev
->dev_export_obj
.obj_access_count
);
627 core_export_port(dev
, tpg
, port
, lun
);
631 void core_dev_unexport(
632 struct se_device
*dev
,
633 struct se_portal_group
*tpg
,
636 struct se_port
*port
= lun
->lun_sep
;
638 spin_lock(&lun
->lun_sep_lock
);
639 if (lun
->lun_se_dev
== NULL
) {
640 spin_unlock(&lun
->lun_sep_lock
);
643 spin_unlock(&lun
->lun_sep_lock
);
645 spin_lock(&dev
->se_port_lock
);
646 atomic_dec(&dev
->dev_export_obj
.obj_access_count
);
647 core_release_port(dev
, port
);
648 spin_unlock(&dev
->se_port_lock
);
651 lun
->lun_se_dev
= NULL
;
654 int transport_core_report_lun_response(struct se_cmd
*se_cmd
)
656 struct se_dev_entry
*deve
;
657 struct se_lun
*se_lun
;
658 struct se_session
*se_sess
= SE_SESS(se_cmd
);
659 struct se_task
*se_task
;
660 unsigned char *buf
= (unsigned char *)T_TASK(se_cmd
)->t_task_buf
;
661 u32 cdb_offset
= 0, lun_count
= 0, offset
= 8;
664 list_for_each_entry(se_task
, &T_TASK(se_cmd
)->t_task_list
, t_list
)
668 printk(KERN_ERR
"Unable to locate struct se_task for struct se_cmd\n");
669 return PYX_TRANSPORT_LU_COMM_FAILURE
;
673 * If no struct se_session pointer is present, this struct se_cmd is
674 * coming via a target_core_mod PASSTHROUGH op, and not through
675 * a $FABRIC_MOD. In that case, report LUN=0 only.
679 buf
[offset
++] = ((lun
>> 56) & 0xff);
680 buf
[offset
++] = ((lun
>> 48) & 0xff);
681 buf
[offset
++] = ((lun
>> 40) & 0xff);
682 buf
[offset
++] = ((lun
>> 32) & 0xff);
683 buf
[offset
++] = ((lun
>> 24) & 0xff);
684 buf
[offset
++] = ((lun
>> 16) & 0xff);
685 buf
[offset
++] = ((lun
>> 8) & 0xff);
686 buf
[offset
++] = (lun
& 0xff);
691 spin_lock_irq(&SE_NODE_ACL(se_sess
)->device_list_lock
);
692 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
693 deve
= &SE_NODE_ACL(se_sess
)->device_list
[i
];
694 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
696 se_lun
= deve
->se_lun
;
698 * We determine the correct LUN LIST LENGTH even once we
699 * have reached the initial allocation length.
703 if ((cdb_offset
+ 8) >= se_cmd
->data_length
)
706 lun
= cpu_to_be64(CMD_TFO(se_cmd
)->pack_lun(deve
->mapped_lun
));
707 buf
[offset
++] = ((lun
>> 56) & 0xff);
708 buf
[offset
++] = ((lun
>> 48) & 0xff);
709 buf
[offset
++] = ((lun
>> 40) & 0xff);
710 buf
[offset
++] = ((lun
>> 32) & 0xff);
711 buf
[offset
++] = ((lun
>> 24) & 0xff);
712 buf
[offset
++] = ((lun
>> 16) & 0xff);
713 buf
[offset
++] = ((lun
>> 8) & 0xff);
714 buf
[offset
++] = (lun
& 0xff);
717 spin_unlock_irq(&SE_NODE_ACL(se_sess
)->device_list_lock
);
720 * See SPC3 r07, page 159.
724 buf
[0] = ((lun_count
>> 24) & 0xff);
725 buf
[1] = ((lun_count
>> 16) & 0xff);
726 buf
[2] = ((lun_count
>> 8) & 0xff);
727 buf
[3] = (lun_count
& 0xff);
729 return PYX_TRANSPORT_SENT_TO_TRANSPORT
;
732 /* se_release_device_for_hba():
736 void se_release_device_for_hba(struct se_device
*dev
)
738 struct se_hba
*hba
= dev
->se_hba
;
740 if ((dev
->dev_status
& TRANSPORT_DEVICE_ACTIVATED
) ||
741 (dev
->dev_status
& TRANSPORT_DEVICE_DEACTIVATED
) ||
742 (dev
->dev_status
& TRANSPORT_DEVICE_SHUTDOWN
) ||
743 (dev
->dev_status
& TRANSPORT_DEVICE_OFFLINE_ACTIVATED
) ||
744 (dev
->dev_status
& TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
))
748 kthread_stop(dev
->process_thread
);
749 if (dev
->transport
->free_device
)
750 dev
->transport
->free_device(dev
->dev_ptr
);
753 spin_lock(&hba
->device_lock
);
754 list_del(&dev
->dev_list
);
756 spin_unlock(&hba
->device_lock
);
758 core_scsi3_free_all_registrations(dev
);
759 se_release_vpd_for_dev(dev
);
761 kfree(dev
->dev_status_queue_obj
);
762 kfree(dev
->dev_queue_obj
);
768 void se_release_vpd_for_dev(struct se_device
*dev
)
770 struct t10_vpd
*vpd
, *vpd_tmp
;
772 spin_lock(&DEV_T10_WWN(dev
)->t10_vpd_lock
);
773 list_for_each_entry_safe(vpd
, vpd_tmp
,
774 &DEV_T10_WWN(dev
)->t10_vpd_list
, vpd_list
) {
775 list_del(&vpd
->vpd_list
);
778 spin_unlock(&DEV_T10_WWN(dev
)->t10_vpd_lock
);
784 * Called with struct se_hba->device_lock held.
786 void se_clear_dev_ports(struct se_device
*dev
)
788 struct se_hba
*hba
= dev
->se_hba
;
790 struct se_portal_group
*tpg
;
791 struct se_port
*sep
, *sep_tmp
;
793 spin_lock(&dev
->se_port_lock
);
794 list_for_each_entry_safe(sep
, sep_tmp
, &dev
->dev_sep_list
, sep_list
) {
795 spin_unlock(&dev
->se_port_lock
);
796 spin_unlock(&hba
->device_lock
);
800 spin_lock(&lun
->lun_sep_lock
);
801 if (lun
->lun_se_dev
== NULL
) {
802 spin_unlock(&lun
->lun_sep_lock
);
805 spin_unlock(&lun
->lun_sep_lock
);
807 core_dev_del_lun(tpg
, lun
->unpacked_lun
);
809 spin_lock(&hba
->device_lock
);
810 spin_lock(&dev
->se_port_lock
);
812 spin_unlock(&dev
->se_port_lock
);
817 /* se_free_virtual_device():
819 * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
821 int se_free_virtual_device(struct se_device
*dev
, struct se_hba
*hba
)
823 spin_lock(&hba
->device_lock
);
824 se_clear_dev_ports(dev
);
825 spin_unlock(&hba
->device_lock
);
827 core_alua_free_lu_gp_mem(dev
);
828 se_release_device_for_hba(dev
);
833 static void se_dev_start(struct se_device
*dev
)
835 struct se_hba
*hba
= dev
->se_hba
;
837 spin_lock(&hba
->device_lock
);
838 atomic_inc(&dev
->dev_obj
.obj_access_count
);
839 if (atomic_read(&dev
->dev_obj
.obj_access_count
) == 1) {
840 if (dev
->dev_status
& TRANSPORT_DEVICE_DEACTIVATED
) {
841 dev
->dev_status
&= ~TRANSPORT_DEVICE_DEACTIVATED
;
842 dev
->dev_status
|= TRANSPORT_DEVICE_ACTIVATED
;
843 } else if (dev
->dev_status
&
844 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
) {
846 ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
;
847 dev
->dev_status
|= TRANSPORT_DEVICE_OFFLINE_ACTIVATED
;
850 spin_unlock(&hba
->device_lock
);
853 static void se_dev_stop(struct se_device
*dev
)
855 struct se_hba
*hba
= dev
->se_hba
;
857 spin_lock(&hba
->device_lock
);
858 atomic_dec(&dev
->dev_obj
.obj_access_count
);
859 if (atomic_read(&dev
->dev_obj
.obj_access_count
) == 0) {
860 if (dev
->dev_status
& TRANSPORT_DEVICE_ACTIVATED
) {
861 dev
->dev_status
&= ~TRANSPORT_DEVICE_ACTIVATED
;
862 dev
->dev_status
|= TRANSPORT_DEVICE_DEACTIVATED
;
863 } else if (dev
->dev_status
&
864 TRANSPORT_DEVICE_OFFLINE_ACTIVATED
) {
865 dev
->dev_status
&= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED
;
866 dev
->dev_status
|= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
;
869 spin_unlock(&hba
->device_lock
);
872 int se_dev_check_online(struct se_device
*dev
)
876 spin_lock_irq(&dev
->dev_status_lock
);
877 ret
= ((dev
->dev_status
& TRANSPORT_DEVICE_ACTIVATED
) ||
878 (dev
->dev_status
& TRANSPORT_DEVICE_DEACTIVATED
)) ? 0 : 1;
879 spin_unlock_irq(&dev
->dev_status_lock
);
884 int se_dev_check_shutdown(struct se_device
*dev
)
888 spin_lock_irq(&dev
->dev_status_lock
);
889 ret
= (dev
->dev_status
& TRANSPORT_DEVICE_SHUTDOWN
);
890 spin_unlock_irq(&dev
->dev_status_lock
);
895 void se_dev_set_default_attribs(
896 struct se_device
*dev
,
897 struct se_dev_limits
*dev_limits
)
899 struct queue_limits
*limits
= &dev_limits
->limits
;
901 DEV_ATTRIB(dev
)->emulate_dpo
= DA_EMULATE_DPO
;
902 DEV_ATTRIB(dev
)->emulate_fua_write
= DA_EMULATE_FUA_WRITE
;
903 DEV_ATTRIB(dev
)->emulate_fua_read
= DA_EMULATE_FUA_READ
;
904 DEV_ATTRIB(dev
)->emulate_write_cache
= DA_EMULATE_WRITE_CACHE
;
905 DEV_ATTRIB(dev
)->emulate_ua_intlck_ctrl
= DA_EMULATE_UA_INTLLCK_CTRL
;
906 DEV_ATTRIB(dev
)->emulate_tas
= DA_EMULATE_TAS
;
907 DEV_ATTRIB(dev
)->emulate_tpu
= DA_EMULATE_TPU
;
908 DEV_ATTRIB(dev
)->emulate_tpws
= DA_EMULATE_TPWS
;
909 DEV_ATTRIB(dev
)->emulate_reservations
= DA_EMULATE_RESERVATIONS
;
910 DEV_ATTRIB(dev
)->emulate_alua
= DA_EMULATE_ALUA
;
911 DEV_ATTRIB(dev
)->enforce_pr_isids
= DA_ENFORCE_PR_ISIDS
;
913 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
914 * iblock_create_virtdevice() from struct queue_limits values
915 * if blk_queue_discard()==1
917 DEV_ATTRIB(dev
)->max_unmap_lba_count
= DA_MAX_UNMAP_LBA_COUNT
;
918 DEV_ATTRIB(dev
)->max_unmap_block_desc_count
=
919 DA_MAX_UNMAP_BLOCK_DESC_COUNT
;
920 DEV_ATTRIB(dev
)->unmap_granularity
= DA_UNMAP_GRANULARITY_DEFAULT
;
921 DEV_ATTRIB(dev
)->unmap_granularity_alignment
=
922 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT
;
924 * block_size is based on subsystem plugin dependent requirements.
926 DEV_ATTRIB(dev
)->hw_block_size
= limits
->logical_block_size
;
927 DEV_ATTRIB(dev
)->block_size
= limits
->logical_block_size
;
929 * max_sectors is based on subsystem plugin dependent requirements.
931 DEV_ATTRIB(dev
)->hw_max_sectors
= limits
->max_hw_sectors
;
932 DEV_ATTRIB(dev
)->max_sectors
= limits
->max_sectors
;
934 * Set optimal_sectors from max_sectors, which can be lowered via
937 DEV_ATTRIB(dev
)->optimal_sectors
= limits
->max_sectors
;
939 * queue_depth is based on subsystem plugin dependent requirements.
941 DEV_ATTRIB(dev
)->hw_queue_depth
= dev_limits
->hw_queue_depth
;
942 DEV_ATTRIB(dev
)->queue_depth
= dev_limits
->queue_depth
;
945 int se_dev_set_task_timeout(struct se_device
*dev
, u32 task_timeout
)
947 if (task_timeout
> DA_TASK_TIMEOUT_MAX
) {
948 printk(KERN_ERR
"dev[%p]: Passed task_timeout: %u larger then"
949 " DA_TASK_TIMEOUT_MAX\n", dev
, task_timeout
);
952 DEV_ATTRIB(dev
)->task_timeout
= task_timeout
;
953 printk(KERN_INFO
"dev[%p]: Set SE Device task_timeout: %u\n",
960 int se_dev_set_max_unmap_lba_count(
961 struct se_device
*dev
,
962 u32 max_unmap_lba_count
)
964 DEV_ATTRIB(dev
)->max_unmap_lba_count
= max_unmap_lba_count
;
965 printk(KERN_INFO
"dev[%p]: Set max_unmap_lba_count: %u\n",
966 dev
, DEV_ATTRIB(dev
)->max_unmap_lba_count
);
970 int se_dev_set_max_unmap_block_desc_count(
971 struct se_device
*dev
,
972 u32 max_unmap_block_desc_count
)
974 DEV_ATTRIB(dev
)->max_unmap_block_desc_count
= max_unmap_block_desc_count
;
975 printk(KERN_INFO
"dev[%p]: Set max_unmap_block_desc_count: %u\n",
976 dev
, DEV_ATTRIB(dev
)->max_unmap_block_desc_count
);
980 int se_dev_set_unmap_granularity(
981 struct se_device
*dev
,
982 u32 unmap_granularity
)
984 DEV_ATTRIB(dev
)->unmap_granularity
= unmap_granularity
;
985 printk(KERN_INFO
"dev[%p]: Set unmap_granularity: %u\n",
986 dev
, DEV_ATTRIB(dev
)->unmap_granularity
);
990 int se_dev_set_unmap_granularity_alignment(
991 struct se_device
*dev
,
992 u32 unmap_granularity_alignment
)
994 DEV_ATTRIB(dev
)->unmap_granularity_alignment
= unmap_granularity_alignment
;
995 printk(KERN_INFO
"dev[%p]: Set unmap_granularity_alignment: %u\n",
996 dev
, DEV_ATTRIB(dev
)->unmap_granularity_alignment
);
1000 int se_dev_set_emulate_dpo(struct se_device
*dev
, int flag
)
1002 if ((flag
!= 0) && (flag
!= 1)) {
1003 printk(KERN_ERR
"Illegal value %d\n", flag
);
1006 if (TRANSPORT(dev
)->dpo_emulated
== NULL
) {
1007 printk(KERN_ERR
"TRANSPORT(dev)->dpo_emulated is NULL\n");
1010 if (TRANSPORT(dev
)->dpo_emulated(dev
) == 0) {
1011 printk(KERN_ERR
"TRANSPORT(dev)->dpo_emulated not supported\n");
1014 DEV_ATTRIB(dev
)->emulate_dpo
= flag
;
1015 printk(KERN_INFO
"dev[%p]: SE Device Page Out (DPO) Emulation"
1016 " bit: %d\n", dev
, DEV_ATTRIB(dev
)->emulate_dpo
);
1020 int se_dev_set_emulate_fua_write(struct se_device
*dev
, int flag
)
1022 if ((flag
!= 0) && (flag
!= 1)) {
1023 printk(KERN_ERR
"Illegal value %d\n", flag
);
1026 if (TRANSPORT(dev
)->fua_write_emulated
== NULL
) {
1027 printk(KERN_ERR
"TRANSPORT(dev)->fua_write_emulated is NULL\n");
1030 if (TRANSPORT(dev
)->fua_write_emulated(dev
) == 0) {
1031 printk(KERN_ERR
"TRANSPORT(dev)->fua_write_emulated not supported\n");
1034 DEV_ATTRIB(dev
)->emulate_fua_write
= flag
;
1035 printk(KERN_INFO
"dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
1036 dev
, DEV_ATTRIB(dev
)->emulate_fua_write
);
1040 int se_dev_set_emulate_fua_read(struct se_device
*dev
, int flag
)
1042 if ((flag
!= 0) && (flag
!= 1)) {
1043 printk(KERN_ERR
"Illegal value %d\n", flag
);
1046 if (TRANSPORT(dev
)->fua_read_emulated
== NULL
) {
1047 printk(KERN_ERR
"TRANSPORT(dev)->fua_read_emulated is NULL\n");
1050 if (TRANSPORT(dev
)->fua_read_emulated(dev
) == 0) {
1051 printk(KERN_ERR
"TRANSPORT(dev)->fua_read_emulated not supported\n");
1054 DEV_ATTRIB(dev
)->emulate_fua_read
= flag
;
1055 printk(KERN_INFO
"dev[%p]: SE Device Forced Unit Access READs: %d\n",
1056 dev
, DEV_ATTRIB(dev
)->emulate_fua_read
);
1060 int se_dev_set_emulate_write_cache(struct se_device
*dev
, int flag
)
1062 if ((flag
!= 0) && (flag
!= 1)) {
1063 printk(KERN_ERR
"Illegal value %d\n", flag
);
1066 if (TRANSPORT(dev
)->write_cache_emulated
== NULL
) {
1067 printk(KERN_ERR
"TRANSPORT(dev)->write_cache_emulated is NULL\n");
1070 if (TRANSPORT(dev
)->write_cache_emulated(dev
) == 0) {
1071 printk(KERN_ERR
"TRANSPORT(dev)->write_cache_emulated not supported\n");
1074 DEV_ATTRIB(dev
)->emulate_write_cache
= flag
;
1075 printk(KERN_INFO
"dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1076 dev
, DEV_ATTRIB(dev
)->emulate_write_cache
);
1080 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device
*dev
, int flag
)
1082 if ((flag
!= 0) && (flag
!= 1) && (flag
!= 2)) {
1083 printk(KERN_ERR
"Illegal value %d\n", flag
);
1087 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1088 printk(KERN_ERR
"dev[%p]: Unable to change SE Device"
1089 " UA_INTRLCK_CTRL while dev_export_obj: %d count"
1091 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1094 DEV_ATTRIB(dev
)->emulate_ua_intlck_ctrl
= flag
;
1095 printk(KERN_INFO
"dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1096 dev
, DEV_ATTRIB(dev
)->emulate_ua_intlck_ctrl
);
1101 int se_dev_set_emulate_tas(struct se_device
*dev
, int flag
)
1103 if ((flag
!= 0) && (flag
!= 1)) {
1104 printk(KERN_ERR
"Illegal value %d\n", flag
);
1108 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1109 printk(KERN_ERR
"dev[%p]: Unable to change SE Device TAS while"
1110 " dev_export_obj: %d count exists\n", dev
,
1111 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1114 DEV_ATTRIB(dev
)->emulate_tas
= flag
;
1115 printk(KERN_INFO
"dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1116 dev
, (DEV_ATTRIB(dev
)->emulate_tas
) ? "Enabled" : "Disabled");
1121 int se_dev_set_emulate_tpu(struct se_device
*dev
, int flag
)
1123 if ((flag
!= 0) && (flag
!= 1)) {
1124 printk(KERN_ERR
"Illegal value %d\n", flag
);
1128 * We expect this value to be non-zero when generic Block Layer
1129 * Discard supported is detected iblock_create_virtdevice().
1131 if (!(DEV_ATTRIB(dev
)->max_unmap_block_desc_count
)) {
1132 printk(KERN_ERR
"Generic Block Discard not supported\n");
1136 DEV_ATTRIB(dev
)->emulate_tpu
= flag
;
1137 printk(KERN_INFO
"dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1142 int se_dev_set_emulate_tpws(struct se_device
*dev
, int flag
)
1144 if ((flag
!= 0) && (flag
!= 1)) {
1145 printk(KERN_ERR
"Illegal value %d\n", flag
);
1149 * We expect this value to be non-zero when generic Block Layer
1150 * Discard supported is detected iblock_create_virtdevice().
1152 if (!(DEV_ATTRIB(dev
)->max_unmap_block_desc_count
)) {
1153 printk(KERN_ERR
"Generic Block Discard not supported\n");
1157 DEV_ATTRIB(dev
)->emulate_tpws
= flag
;
1158 printk(KERN_INFO
"dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1163 int se_dev_set_enforce_pr_isids(struct se_device
*dev
, int flag
)
1165 if ((flag
!= 0) && (flag
!= 1)) {
1166 printk(KERN_ERR
"Illegal value %d\n", flag
);
1169 DEV_ATTRIB(dev
)->enforce_pr_isids
= flag
;
1170 printk(KERN_INFO
"dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev
,
1171 (DEV_ATTRIB(dev
)->enforce_pr_isids
) ? "Enabled" : "Disabled");
1176 * Note, this can only be called on unexported SE Device Object.
1178 int se_dev_set_queue_depth(struct se_device
*dev
, u32 queue_depth
)
1180 u32 orig_queue_depth
= dev
->queue_depth
;
1182 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1183 printk(KERN_ERR
"dev[%p]: Unable to change SE Device TCQ while"
1184 " dev_export_obj: %d count exists\n", dev
,
1185 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1188 if (!(queue_depth
)) {
1189 printk(KERN_ERR
"dev[%p]: Illegal ZERO value for queue"
1194 if (TRANSPORT(dev
)->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1195 if (queue_depth
> DEV_ATTRIB(dev
)->hw_queue_depth
) {
1196 printk(KERN_ERR
"dev[%p]: Passed queue_depth: %u"
1197 " exceeds TCM/SE_Device TCQ: %u\n",
1199 DEV_ATTRIB(dev
)->hw_queue_depth
);
1203 if (queue_depth
> DEV_ATTRIB(dev
)->queue_depth
) {
1204 if (queue_depth
> DEV_ATTRIB(dev
)->hw_queue_depth
) {
1205 printk(KERN_ERR
"dev[%p]: Passed queue_depth:"
1206 " %u exceeds TCM/SE_Device MAX"
1207 " TCQ: %u\n", dev
, queue_depth
,
1208 DEV_ATTRIB(dev
)->hw_queue_depth
);
1214 DEV_ATTRIB(dev
)->queue_depth
= dev
->queue_depth
= queue_depth
;
1215 if (queue_depth
> orig_queue_depth
)
1216 atomic_add(queue_depth
- orig_queue_depth
, &dev
->depth_left
);
1217 else if (queue_depth
< orig_queue_depth
)
1218 atomic_sub(orig_queue_depth
- queue_depth
, &dev
->depth_left
);
1220 printk(KERN_INFO
"dev[%p]: SE Device TCQ Depth changed to: %u\n",
1225 int se_dev_set_max_sectors(struct se_device
*dev
, u32 max_sectors
)
1227 int force
= 0; /* Force setting for VDEVS */
1229 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1230 printk(KERN_ERR
"dev[%p]: Unable to change SE Device"
1231 " max_sectors while dev_export_obj: %d count exists\n",
1232 dev
, atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1235 if (!(max_sectors
)) {
1236 printk(KERN_ERR
"dev[%p]: Illegal ZERO value for"
1237 " max_sectors\n", dev
);
1240 if (max_sectors
< DA_STATUS_MAX_SECTORS_MIN
) {
1241 printk(KERN_ERR
"dev[%p]: Passed max_sectors: %u less than"
1242 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev
, max_sectors
,
1243 DA_STATUS_MAX_SECTORS_MIN
);
1246 if (TRANSPORT(dev
)->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1247 if (max_sectors
> DEV_ATTRIB(dev
)->hw_max_sectors
) {
1248 printk(KERN_ERR
"dev[%p]: Passed max_sectors: %u"
1249 " greater than TCM/SE_Device max_sectors:"
1250 " %u\n", dev
, max_sectors
,
1251 DEV_ATTRIB(dev
)->hw_max_sectors
);
1255 if (!(force
) && (max_sectors
>
1256 DEV_ATTRIB(dev
)->hw_max_sectors
)) {
1257 printk(KERN_ERR
"dev[%p]: Passed max_sectors: %u"
1258 " greater than TCM/SE_Device max_sectors"
1259 ": %u, use force=1 to override.\n", dev
,
1260 max_sectors
, DEV_ATTRIB(dev
)->hw_max_sectors
);
1263 if (max_sectors
> DA_STATUS_MAX_SECTORS_MAX
) {
1264 printk(KERN_ERR
"dev[%p]: Passed max_sectors: %u"
1265 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1266 " %u\n", dev
, max_sectors
,
1267 DA_STATUS_MAX_SECTORS_MAX
);
1272 DEV_ATTRIB(dev
)->max_sectors
= max_sectors
;
1273 printk("dev[%p]: SE Device max_sectors changed to %u\n",
1278 int se_dev_set_optimal_sectors(struct se_device
*dev
, u32 optimal_sectors
)
1280 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1281 printk(KERN_ERR
"dev[%p]: Unable to change SE Device"
1282 " optimal_sectors while dev_export_obj: %d count exists\n",
1283 dev
, atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1286 if (TRANSPORT(dev
)->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1287 printk(KERN_ERR
"dev[%p]: Passed optimal_sectors cannot be"
1288 " changed for TCM/pSCSI\n", dev
);
1291 if (optimal_sectors
> DEV_ATTRIB(dev
)->max_sectors
) {
1292 printk(KERN_ERR
"dev[%p]: Passed optimal_sectors %u cannot be"
1293 " greater than max_sectors: %u\n", dev
,
1294 optimal_sectors
, DEV_ATTRIB(dev
)->max_sectors
);
1298 DEV_ATTRIB(dev
)->optimal_sectors
= optimal_sectors
;
1299 printk(KERN_INFO
"dev[%p]: SE Device optimal_sectors changed to %u\n",
1300 dev
, optimal_sectors
);
1304 int se_dev_set_block_size(struct se_device
*dev
, u32 block_size
)
1306 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1307 printk(KERN_ERR
"dev[%p]: Unable to change SE Device block_size"
1308 " while dev_export_obj: %d count exists\n", dev
,
1309 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1313 if ((block_size
!= 512) &&
1314 (block_size
!= 1024) &&
1315 (block_size
!= 2048) &&
1316 (block_size
!= 4096)) {
1317 printk(KERN_ERR
"dev[%p]: Illegal value for block_device: %u"
1318 " for SE device, must be 512, 1024, 2048 or 4096\n",
1323 if (TRANSPORT(dev
)->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1324 printk(KERN_ERR
"dev[%p]: Not allowed to change block_size for"
1325 " Physical Device, use for Linux/SCSI to change"
1326 " block_size for underlying hardware\n", dev
);
1330 DEV_ATTRIB(dev
)->block_size
= block_size
;
1331 printk(KERN_INFO
"dev[%p]: SE Device block_size changed to %u\n",
1336 struct se_lun
*core_dev_add_lun(
1337 struct se_portal_group
*tpg
,
1339 struct se_device
*dev
,
1342 struct se_lun
*lun_p
;
1345 if (atomic_read(&dev
->dev_access_obj
.obj_access_count
) != 0) {
1346 printk(KERN_ERR
"Unable to export struct se_device while dev_access_obj: %d\n",
1347 atomic_read(&dev
->dev_access_obj
.obj_access_count
));
1351 lun_p
= core_tpg_pre_addlun(tpg
, lun
);
1352 if ((IS_ERR(lun_p
)) || !(lun_p
))
1355 if (dev
->dev_flags
& DF_READ_ONLY
)
1356 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
1358 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
1360 if (core_tpg_post_addlun(tpg
, lun_p
, lun_access
, dev
) < 0)
1363 printk(KERN_INFO
"%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1364 " CORE HBA: %u\n", TPG_TFO(tpg
)->get_fabric_name(),
1365 TPG_TFO(tpg
)->tpg_get_tag(tpg
), lun_p
->unpacked_lun
,
1366 TPG_TFO(tpg
)->get_fabric_name(), hba
->hba_id
);
1368 * Update LUN maps for dynamically added initiators when
1369 * generate_node_acl is enabled.
1371 if (TPG_TFO(tpg
)->tpg_check_demo_mode(tpg
)) {
1372 struct se_node_acl
*acl
;
1373 spin_lock_bh(&tpg
->acl_node_lock
);
1374 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
1375 if (acl
->dynamic_node_acl
) {
1376 spin_unlock_bh(&tpg
->acl_node_lock
);
1377 core_tpg_add_node_to_devs(acl
, tpg
);
1378 spin_lock_bh(&tpg
->acl_node_lock
);
1381 spin_unlock_bh(&tpg
->acl_node_lock
);
1387 /* core_dev_del_lun():
1391 int core_dev_del_lun(
1392 struct se_portal_group
*tpg
,
1398 lun
= core_tpg_pre_dellun(tpg
, unpacked_lun
, &ret
);
1402 core_tpg_post_dellun(tpg
, lun
);
1404 printk(KERN_INFO
"%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1405 " device object\n", TPG_TFO(tpg
)->get_fabric_name(),
1406 TPG_TFO(tpg
)->tpg_get_tag(tpg
), unpacked_lun
,
1407 TPG_TFO(tpg
)->get_fabric_name());
1412 struct se_lun
*core_get_lun_from_tpg(struct se_portal_group
*tpg
, u32 unpacked_lun
)
1416 spin_lock(&tpg
->tpg_lun_lock
);
1417 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
1418 printk(KERN_ERR
"%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1419 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1420 TPG_TFO(tpg
)->get_fabric_name(), unpacked_lun
,
1421 TRANSPORT_MAX_LUNS_PER_TPG
-1,
1422 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
1423 spin_unlock(&tpg
->tpg_lun_lock
);
1426 lun
= &tpg
->tpg_lun_list
[unpacked_lun
];
1428 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_FREE
) {
1429 printk(KERN_ERR
"%s Logical Unit Number: %u is not free on"
1430 " Target Portal Group: %hu, ignoring request.\n",
1431 TPG_TFO(tpg
)->get_fabric_name(), unpacked_lun
,
1432 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
1433 spin_unlock(&tpg
->tpg_lun_lock
);
1436 spin_unlock(&tpg
->tpg_lun_lock
);
1441 /* core_dev_get_lun():
1445 static struct se_lun
*core_dev_get_lun(struct se_portal_group
*tpg
, u32 unpacked_lun
)
1449 spin_lock(&tpg
->tpg_lun_lock
);
1450 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
1451 printk(KERN_ERR
"%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1452 "_TPG-1: %u for Target Portal Group: %hu\n",
1453 TPG_TFO(tpg
)->get_fabric_name(), unpacked_lun
,
1454 TRANSPORT_MAX_LUNS_PER_TPG
-1,
1455 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
1456 spin_unlock(&tpg
->tpg_lun_lock
);
1459 lun
= &tpg
->tpg_lun_list
[unpacked_lun
];
1461 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) {
1462 printk(KERN_ERR
"%s Logical Unit Number: %u is not active on"
1463 " Target Portal Group: %hu, ignoring request.\n",
1464 TPG_TFO(tpg
)->get_fabric_name(), unpacked_lun
,
1465 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
1466 spin_unlock(&tpg
->tpg_lun_lock
);
1469 spin_unlock(&tpg
->tpg_lun_lock
);
1474 struct se_lun_acl
*core_dev_init_initiator_node_lun_acl(
1475 struct se_portal_group
*tpg
,
1477 char *initiatorname
,
1480 struct se_lun_acl
*lacl
;
1481 struct se_node_acl
*nacl
;
1483 if (strlen(initiatorname
) > TRANSPORT_IQN_LEN
) {
1484 printk(KERN_ERR
"%s InitiatorName exceeds maximum size.\n",
1485 TPG_TFO(tpg
)->get_fabric_name());
1489 nacl
= core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
1494 lacl
= kzalloc(sizeof(struct se_lun_acl
), GFP_KERNEL
);
1496 printk(KERN_ERR
"Unable to allocate memory for struct se_lun_acl.\n");
1501 INIT_LIST_HEAD(&lacl
->lacl_list
);
1502 lacl
->mapped_lun
= mapped_lun
;
1503 lacl
->se_lun_nacl
= nacl
;
1504 snprintf(lacl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
1509 int core_dev_add_initiator_node_lun_acl(
1510 struct se_portal_group
*tpg
,
1511 struct se_lun_acl
*lacl
,
1516 struct se_node_acl
*nacl
;
1518 lun
= core_dev_get_lun(tpg
, unpacked_lun
);
1520 printk(KERN_ERR
"%s Logical Unit Number: %u is not active on"
1521 " Target Portal Group: %hu, ignoring request.\n",
1522 TPG_TFO(tpg
)->get_fabric_name(), unpacked_lun
,
1523 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
1527 nacl
= lacl
->se_lun_nacl
;
1531 if ((lun
->lun_access
& TRANSPORT_LUNFLAGS_READ_ONLY
) &&
1532 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
))
1533 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
1537 if (core_update_device_list_for_node(lun
, lacl
, lacl
->mapped_lun
,
1538 lun_access
, nacl
, tpg
, 1) < 0)
1541 spin_lock(&lun
->lun_acl_lock
);
1542 list_add_tail(&lacl
->lacl_list
, &lun
->lun_acl_list
);
1543 atomic_inc(&lun
->lun_acl_count
);
1544 smp_mb__after_atomic_inc();
1545 spin_unlock(&lun
->lun_acl_lock
);
1547 printk(KERN_INFO
"%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1548 " InitiatorNode: %s\n", TPG_TFO(tpg
)->get_fabric_name(),
1549 TPG_TFO(tpg
)->tpg_get_tag(tpg
), unpacked_lun
, lacl
->mapped_lun
,
1550 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) ? "RW" : "RO",
1551 lacl
->initiatorname
);
1553 * Check to see if there are any existing persistent reservation APTPL
1554 * pre-registrations that need to be enabled for this LUN ACL..
1556 core_scsi3_check_aptpl_registration(lun
->lun_se_dev
, tpg
, lun
, lacl
);
1560 /* core_dev_del_initiator_node_lun_acl():
1564 int core_dev_del_initiator_node_lun_acl(
1565 struct se_portal_group
*tpg
,
1567 struct se_lun_acl
*lacl
)
1569 struct se_node_acl
*nacl
;
1571 nacl
= lacl
->se_lun_nacl
;
1575 spin_lock(&lun
->lun_acl_lock
);
1576 list_del(&lacl
->lacl_list
);
1577 atomic_dec(&lun
->lun_acl_count
);
1578 smp_mb__after_atomic_dec();
1579 spin_unlock(&lun
->lun_acl_lock
);
1581 core_update_device_list_for_node(lun
, NULL
, lacl
->mapped_lun
,
1582 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
, 0);
1584 lacl
->se_lun
= NULL
;
1586 printk(KERN_INFO
"%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1587 " InitiatorNode: %s Mapped LUN: %u\n",
1588 TPG_TFO(tpg
)->get_fabric_name(),
1589 TPG_TFO(tpg
)->tpg_get_tag(tpg
), lun
->unpacked_lun
,
1590 lacl
->initiatorname
, lacl
->mapped_lun
);
1595 void core_dev_free_initiator_node_lun_acl(
1596 struct se_portal_group
*tpg
,
1597 struct se_lun_acl
*lacl
)
1599 printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1600 " Mapped LUN: %u\n", TPG_TFO(tpg
)->get_fabric_name(),
1601 TPG_TFO(tpg
)->tpg_get_tag(tpg
),
1602 TPG_TFO(tpg
)->get_fabric_name(),
1603 lacl
->initiatorname
, lacl
->mapped_lun
);
1608 int core_dev_setup_virtual_lun0(void)
1611 struct se_device
*dev
;
1612 struct se_subsystem_dev
*se_dev
= NULL
;
1613 struct se_subsystem_api
*t
;
1617 hba
= core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE
);
1619 return PTR_ERR(hba
);
1621 se_global
->g_lun0_hba
= hba
;
1624 se_dev
= kzalloc(sizeof(struct se_subsystem_dev
), GFP_KERNEL
);
1626 printk(KERN_ERR
"Unable to allocate memory for"
1627 " struct se_subsystem_dev\n");
1631 INIT_LIST_HEAD(&se_dev
->g_se_dev_list
);
1632 INIT_LIST_HEAD(&se_dev
->t10_wwn
.t10_vpd_list
);
1633 spin_lock_init(&se_dev
->t10_wwn
.t10_vpd_lock
);
1634 INIT_LIST_HEAD(&se_dev
->t10_reservation
.registration_list
);
1635 INIT_LIST_HEAD(&se_dev
->t10_reservation
.aptpl_reg_list
);
1636 spin_lock_init(&se_dev
->t10_reservation
.registration_lock
);
1637 spin_lock_init(&se_dev
->t10_reservation
.aptpl_reg_lock
);
1638 INIT_LIST_HEAD(&se_dev
->t10_alua
.tg_pt_gps_list
);
1639 spin_lock_init(&se_dev
->t10_alua
.tg_pt_gps_lock
);
1640 spin_lock_init(&se_dev
->se_dev_lock
);
1641 se_dev
->t10_reservation
.pr_aptpl_buf_len
= PR_APTPL_BUF_LEN
;
1642 se_dev
->t10_wwn
.t10_sub_dev
= se_dev
;
1643 se_dev
->t10_alua
.t10_sub_dev
= se_dev
;
1644 se_dev
->se_dev_attrib
.da_sub_dev
= se_dev
;
1645 se_dev
->se_dev_hba
= hba
;
1647 se_dev
->se_dev_su_ptr
= t
->allocate_virtdevice(hba
, "virt_lun0");
1648 if (!(se_dev
->se_dev_su_ptr
)) {
1649 printk(KERN_ERR
"Unable to locate subsystem dependent pointer"
1650 " from allocate_virtdevice()\n");
1654 se_global
->g_lun0_su_dev
= se_dev
;
1657 sprintf(buf
, "rd_pages=8");
1658 t
->set_configfs_dev_params(hba
, se_dev
, buf
, sizeof(buf
));
1660 dev
= t
->create_virtdevice(hba
, se_dev
, se_dev
->se_dev_su_ptr
);
1661 if (!(dev
) || IS_ERR(dev
)) {
1665 se_dev
->se_dev_ptr
= dev
;
1666 se_global
->g_lun0_dev
= dev
;
1670 se_global
->g_lun0_su_dev
= NULL
;
1672 if (se_global
->g_lun0_hba
) {
1673 core_delete_hba(se_global
->g_lun0_hba
);
1674 se_global
->g_lun0_hba
= NULL
;
1680 void core_dev_release_virtual_lun0(void)
1682 struct se_hba
*hba
= se_global
->g_lun0_hba
;
1683 struct se_subsystem_dev
*su_dev
= se_global
->g_lun0_su_dev
;
1688 if (se_global
->g_lun0_dev
)
1689 se_free_virtual_device(se_global
->g_lun0_dev
, hba
);
1692 core_delete_hba(hba
);