1 /*******************************************************************************
2 * Filename: target_core_tpg.c
4 * This file contains generic Target Portal Group related functions.
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * Nicholas A. Bellinger <nab@kernel.org>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 ******************************************************************************/
29 #include <linux/net.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
37 #include <scsi/scsi.h>
38 #include <scsi/scsi_cmnd.h>
40 #include <target/target_core_base.h>
41 #include <target/target_core_device.h>
42 #include <target/target_core_tpg.h>
43 #include <target/target_core_transport.h>
44 #include <target/target_core_fabric_ops.h>
46 #include "target_core_hba.h"
48 /* core_clear_initiator_node_from_tpg():
52 static void core_clear_initiator_node_from_tpg(
53 struct se_node_acl
*nacl
,
54 struct se_portal_group
*tpg
)
57 struct se_dev_entry
*deve
;
59 struct se_lun_acl
*acl
, *acl_tmp
;
61 spin_lock_irq(&nacl
->device_list_lock
);
62 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
63 deve
= &nacl
->device_list
[i
];
65 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
69 printk(KERN_ERR
"%s device entries device pointer is"
70 " NULL, but Initiator has access.\n",
71 TPG_TFO(tpg
)->get_fabric_name());
76 spin_unlock_irq(&nacl
->device_list_lock
);
77 core_update_device_list_for_node(lun
, NULL
, deve
->mapped_lun
,
78 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
, 0);
80 spin_lock(&lun
->lun_acl_lock
);
81 list_for_each_entry_safe(acl
, acl_tmp
,
82 &lun
->lun_acl_list
, lacl_list
) {
83 if (!(strcmp(acl
->initiatorname
,
84 nacl
->initiatorname
)) &&
85 (acl
->mapped_lun
== deve
->mapped_lun
))
90 printk(KERN_ERR
"Unable to locate struct se_lun_acl for %s,"
91 " mapped_lun: %u\n", nacl
->initiatorname
,
93 spin_unlock(&lun
->lun_acl_lock
);
94 spin_lock_irq(&nacl
->device_list_lock
);
98 list_del(&acl
->lacl_list
);
99 spin_unlock(&lun
->lun_acl_lock
);
101 spin_lock_irq(&nacl
->device_list_lock
);
104 spin_unlock_irq(&nacl
->device_list_lock
);
107 /* __core_tpg_get_initiator_node_acl():
109 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
111 struct se_node_acl
*__core_tpg_get_initiator_node_acl(
112 struct se_portal_group
*tpg
,
113 const char *initiatorname
)
115 struct se_node_acl
*acl
;
117 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
118 if (!(strcmp(acl
->initiatorname
, initiatorname
)))
125 /* core_tpg_get_initiator_node_acl():
129 struct se_node_acl
*core_tpg_get_initiator_node_acl(
130 struct se_portal_group
*tpg
,
131 unsigned char *initiatorname
)
133 struct se_node_acl
*acl
;
135 spin_lock_bh(&tpg
->acl_node_lock
);
136 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
137 if (!(strcmp(acl
->initiatorname
, initiatorname
)) &&
138 (!(acl
->dynamic_node_acl
))) {
139 spin_unlock_bh(&tpg
->acl_node_lock
);
143 spin_unlock_bh(&tpg
->acl_node_lock
);
148 /* core_tpg_add_node_to_devs():
152 void core_tpg_add_node_to_devs(
153 struct se_node_acl
*acl
,
154 struct se_portal_group
*tpg
)
159 struct se_device
*dev
;
161 spin_lock(&tpg
->tpg_lun_lock
);
162 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
163 lun
= &tpg
->tpg_lun_list
[i
];
164 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
)
167 spin_unlock(&tpg
->tpg_lun_lock
);
169 dev
= lun
->lun_se_dev
;
171 * By default in LIO-Target $FABRIC_MOD,
172 * demo_mode_write_protect is ON, or READ_ONLY;
174 if (!(TPG_TFO(tpg
)->tpg_check_demo_mode_write_protect(tpg
))) {
175 if (dev
->dev_flags
& DF_READ_ONLY
)
176 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
178 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
181 * Allow only optical drives to issue R/W in default RO
184 if (TRANSPORT(dev
)->get_device_type(dev
) == TYPE_DISK
)
185 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
187 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
190 printk(KERN_INFO
"TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
191 " access for LUN in Demo Mode\n",
192 TPG_TFO(tpg
)->get_fabric_name(),
193 TPG_TFO(tpg
)->tpg_get_tag(tpg
), lun
->unpacked_lun
,
194 (lun_access
== TRANSPORT_LUNFLAGS_READ_WRITE
) ?
195 "READ-WRITE" : "READ-ONLY");
197 core_update_device_list_for_node(lun
, NULL
, lun
->unpacked_lun
,
198 lun_access
, acl
, tpg
, 1);
199 spin_lock(&tpg
->tpg_lun_lock
);
201 spin_unlock(&tpg
->tpg_lun_lock
);
204 /* core_set_queue_depth_for_node():
208 static int core_set_queue_depth_for_node(
209 struct se_portal_group
*tpg
,
210 struct se_node_acl
*acl
)
212 if (!acl
->queue_depth
) {
213 printk(KERN_ERR
"Queue depth for %s Initiator Node: %s is 0,"
214 "defaulting to 1.\n", TPG_TFO(tpg
)->get_fabric_name(),
216 acl
->queue_depth
= 1;
222 /* core_create_device_list_for_node():
226 static int core_create_device_list_for_node(struct se_node_acl
*nacl
)
228 struct se_dev_entry
*deve
;
231 nacl
->device_list
= kzalloc(sizeof(struct se_dev_entry
) *
232 TRANSPORT_MAX_LUNS_PER_TPG
, GFP_KERNEL
);
233 if (!(nacl
->device_list
)) {
234 printk(KERN_ERR
"Unable to allocate memory for"
235 " struct se_node_acl->device_list\n");
238 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
239 deve
= &nacl
->device_list
[i
];
241 atomic_set(&deve
->ua_count
, 0);
242 atomic_set(&deve
->pr_ref_count
, 0);
243 spin_lock_init(&deve
->ua_lock
);
244 INIT_LIST_HEAD(&deve
->alua_port_list
);
245 INIT_LIST_HEAD(&deve
->ua_list
);
251 /* core_tpg_check_initiator_node_acl()
255 struct se_node_acl
*core_tpg_check_initiator_node_acl(
256 struct se_portal_group
*tpg
,
257 unsigned char *initiatorname
)
259 struct se_node_acl
*acl
;
261 acl
= core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
265 if (!(TPG_TFO(tpg
)->tpg_check_demo_mode(tpg
)))
268 acl
= TPG_TFO(tpg
)->tpg_alloc_fabric_acl(tpg
);
272 INIT_LIST_HEAD(&acl
->acl_list
);
273 INIT_LIST_HEAD(&acl
->acl_sess_list
);
274 spin_lock_init(&acl
->device_list_lock
);
275 spin_lock_init(&acl
->nacl_sess_lock
);
276 atomic_set(&acl
->acl_pr_ref_count
, 0);
277 acl
->queue_depth
= TPG_TFO(tpg
)->tpg_get_default_depth(tpg
);
278 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
280 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
281 spin_lock_init(&acl
->stats_lock
);
282 acl
->dynamic_node_acl
= 1;
284 TPG_TFO(tpg
)->set_default_node_attributes(acl
);
286 if (core_create_device_list_for_node(acl
) < 0) {
287 TPG_TFO(tpg
)->tpg_release_fabric_acl(tpg
, acl
);
291 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
292 core_free_device_list_for_node(acl
, tpg
);
293 TPG_TFO(tpg
)->tpg_release_fabric_acl(tpg
, acl
);
297 core_tpg_add_node_to_devs(acl
, tpg
);
299 spin_lock_bh(&tpg
->acl_node_lock
);
300 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
301 tpg
->num_node_acls
++;
302 spin_unlock_bh(&tpg
->acl_node_lock
);
304 printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
305 " Initiator Node: %s\n", TPG_TFO(tpg
)->get_fabric_name(),
306 TPG_TFO(tpg
)->tpg_get_tag(tpg
), acl
->queue_depth
,
307 TPG_TFO(tpg
)->get_fabric_name(), initiatorname
);
311 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl
);
313 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl
*nacl
)
315 while (atomic_read(&nacl
->acl_pr_ref_count
) != 0)
319 void core_tpg_clear_object_luns(struct se_portal_group
*tpg
)
324 spin_lock(&tpg
->tpg_lun_lock
);
325 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
326 lun
= &tpg
->tpg_lun_list
[i
];
328 if ((lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) ||
329 (lun
->lun_se_dev
== NULL
))
332 spin_unlock(&tpg
->tpg_lun_lock
);
333 ret
= core_dev_del_lun(tpg
, lun
->unpacked_lun
);
334 spin_lock(&tpg
->tpg_lun_lock
);
336 spin_unlock(&tpg
->tpg_lun_lock
);
338 EXPORT_SYMBOL(core_tpg_clear_object_luns
);
340 /* core_tpg_add_initiator_node_acl():
344 struct se_node_acl
*core_tpg_add_initiator_node_acl(
345 struct se_portal_group
*tpg
,
346 struct se_node_acl
*se_nacl
,
347 const char *initiatorname
,
350 struct se_node_acl
*acl
= NULL
;
352 spin_lock_bh(&tpg
->acl_node_lock
);
353 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
355 if (acl
->dynamic_node_acl
) {
356 acl
->dynamic_node_acl
= 0;
357 printk(KERN_INFO
"%s_TPG[%u] - Replacing dynamic ACL"
358 " for %s\n", TPG_TFO(tpg
)->get_fabric_name(),
359 TPG_TFO(tpg
)->tpg_get_tag(tpg
), initiatorname
);
360 spin_unlock_bh(&tpg
->acl_node_lock
);
362 * Release the locally allocated struct se_node_acl
363 * because * core_tpg_add_initiator_node_acl() returned
364 * a pointer to an existing demo mode node ACL.
367 TPG_TFO(tpg
)->tpg_release_fabric_acl(tpg
,
372 printk(KERN_ERR
"ACL entry for %s Initiator"
373 " Node %s already exists for TPG %u, ignoring"
374 " request.\n", TPG_TFO(tpg
)->get_fabric_name(),
375 initiatorname
, TPG_TFO(tpg
)->tpg_get_tag(tpg
));
376 spin_unlock_bh(&tpg
->acl_node_lock
);
377 return ERR_PTR(-EEXIST
);
379 spin_unlock_bh(&tpg
->acl_node_lock
);
382 printk("struct se_node_acl pointer is NULL\n");
383 return ERR_PTR(-EINVAL
);
386 * For v4.x logic the se_node_acl_s is hanging off a fabric
387 * dependent structure allocated via
388 * struct target_core_fabric_ops->fabric_make_nodeacl()
392 INIT_LIST_HEAD(&acl
->acl_list
);
393 INIT_LIST_HEAD(&acl
->acl_sess_list
);
394 spin_lock_init(&acl
->device_list_lock
);
395 spin_lock_init(&acl
->nacl_sess_lock
);
396 atomic_set(&acl
->acl_pr_ref_count
, 0);
397 acl
->queue_depth
= queue_depth
;
398 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
400 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
401 spin_lock_init(&acl
->stats_lock
);
403 TPG_TFO(tpg
)->set_default_node_attributes(acl
);
405 if (core_create_device_list_for_node(acl
) < 0) {
406 TPG_TFO(tpg
)->tpg_release_fabric_acl(tpg
, acl
);
407 return ERR_PTR(-ENOMEM
);
410 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
411 core_free_device_list_for_node(acl
, tpg
);
412 TPG_TFO(tpg
)->tpg_release_fabric_acl(tpg
, acl
);
413 return ERR_PTR(-EINVAL
);
416 spin_lock_bh(&tpg
->acl_node_lock
);
417 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
418 tpg
->num_node_acls
++;
419 spin_unlock_bh(&tpg
->acl_node_lock
);
422 printk(KERN_INFO
"%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
423 " Initiator Node: %s\n", TPG_TFO(tpg
)->get_fabric_name(),
424 TPG_TFO(tpg
)->tpg_get_tag(tpg
), acl
->queue_depth
,
425 TPG_TFO(tpg
)->get_fabric_name(), initiatorname
);
429 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl
);
431 /* core_tpg_del_initiator_node_acl():
435 int core_tpg_del_initiator_node_acl(
436 struct se_portal_group
*tpg
,
437 struct se_node_acl
*acl
,
440 struct se_session
*sess
, *sess_tmp
;
443 spin_lock_bh(&tpg
->acl_node_lock
);
444 if (acl
->dynamic_node_acl
) {
445 acl
->dynamic_node_acl
= 0;
448 list_del(&acl
->acl_list
);
449 tpg
->num_node_acls
--;
450 spin_unlock_bh(&tpg
->acl_node_lock
);
452 spin_lock_bh(&tpg
->session_lock
);
453 list_for_each_entry_safe(sess
, sess_tmp
,
454 &tpg
->tpg_sess_list
, sess_list
) {
455 if (sess
->se_node_acl
!= acl
)
458 * Determine if the session needs to be closed by our context.
460 if (!(TPG_TFO(tpg
)->shutdown_session(sess
)))
463 spin_unlock_bh(&tpg
->session_lock
);
465 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
466 * forcefully shutdown the $FABRIC_MOD session/nexus.
468 TPG_TFO(tpg
)->close_session(sess
);
470 spin_lock_bh(&tpg
->session_lock
);
472 spin_unlock_bh(&tpg
->session_lock
);
474 core_tpg_wait_for_nacl_pr_ref(acl
);
475 core_clear_initiator_node_from_tpg(acl
, tpg
);
476 core_free_device_list_for_node(acl
, tpg
);
478 printk(KERN_INFO
"%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
479 " Initiator Node: %s\n", TPG_TFO(tpg
)->get_fabric_name(),
480 TPG_TFO(tpg
)->tpg_get_tag(tpg
), acl
->queue_depth
,
481 TPG_TFO(tpg
)->get_fabric_name(), acl
->initiatorname
);
485 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl
);
487 /* core_tpg_set_initiator_node_queue_depth():
491 int core_tpg_set_initiator_node_queue_depth(
492 struct se_portal_group
*tpg
,
493 unsigned char *initiatorname
,
497 struct se_session
*sess
, *init_sess
= NULL
;
498 struct se_node_acl
*acl
;
501 spin_lock_bh(&tpg
->acl_node_lock
);
502 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
504 printk(KERN_ERR
"Access Control List entry for %s Initiator"
505 " Node %s does not exists for TPG %hu, ignoring"
506 " request.\n", TPG_TFO(tpg
)->get_fabric_name(),
507 initiatorname
, TPG_TFO(tpg
)->tpg_get_tag(tpg
));
508 spin_unlock_bh(&tpg
->acl_node_lock
);
511 if (acl
->dynamic_node_acl
) {
512 acl
->dynamic_node_acl
= 0;
515 spin_unlock_bh(&tpg
->acl_node_lock
);
517 spin_lock_bh(&tpg
->session_lock
);
518 list_for_each_entry(sess
, &tpg
->tpg_sess_list
, sess_list
) {
519 if (sess
->se_node_acl
!= acl
)
523 printk(KERN_ERR
"Unable to change queue depth for %s"
524 " Initiator Node: %s while session is"
525 " operational. To forcefully change the queue"
526 " depth and force session reinstatement"
527 " use the \"force=1\" parameter.\n",
528 TPG_TFO(tpg
)->get_fabric_name(), initiatorname
);
529 spin_unlock_bh(&tpg
->session_lock
);
531 spin_lock_bh(&tpg
->acl_node_lock
);
533 acl
->dynamic_node_acl
= 1;
534 spin_unlock_bh(&tpg
->acl_node_lock
);
538 * Determine if the session needs to be closed by our context.
540 if (!(TPG_TFO(tpg
)->shutdown_session(sess
)))
548 * User has requested to change the queue depth for a Initiator Node.
549 * Change the value in the Node's struct se_node_acl, and call
550 * core_set_queue_depth_for_node() to add the requested queue depth.
552 * Finally call TPG_TFO(tpg)->close_session() to force session
553 * reinstatement to occur if there is an active session for the
554 * $FABRIC_MOD Initiator Node in question.
556 acl
->queue_depth
= queue_depth
;
558 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
559 spin_unlock_bh(&tpg
->session_lock
);
561 * Force session reinstatement if
562 * core_set_queue_depth_for_node() failed, because we assume
563 * the $FABRIC_MOD has already the set session reinstatement
564 * bit from TPG_TFO(tpg)->shutdown_session() called above.
567 TPG_TFO(tpg
)->close_session(init_sess
);
569 spin_lock_bh(&tpg
->acl_node_lock
);
571 acl
->dynamic_node_acl
= 1;
572 spin_unlock_bh(&tpg
->acl_node_lock
);
575 spin_unlock_bh(&tpg
->session_lock
);
577 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
578 * forcefully shutdown the $FABRIC_MOD session/nexus.
581 TPG_TFO(tpg
)->close_session(init_sess
);
583 printk(KERN_INFO
"Successfuly changed queue depth to: %d for Initiator"
584 " Node: %s on %s Target Portal Group: %u\n", queue_depth
,
585 initiatorname
, TPG_TFO(tpg
)->get_fabric_name(),
586 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
588 spin_lock_bh(&tpg
->acl_node_lock
);
590 acl
->dynamic_node_acl
= 1;
591 spin_unlock_bh(&tpg
->acl_node_lock
);
595 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth
);
597 static int core_tpg_setup_virtual_lun0(struct se_portal_group
*se_tpg
)
599 /* Set in core_dev_setup_virtual_lun0() */
600 struct se_device
*dev
= se_global
->g_lun0_dev
;
601 struct se_lun
*lun
= &se_tpg
->tpg_virt_lun0
;
602 u32 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
605 lun
->unpacked_lun
= 0;
606 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
607 atomic_set(&lun
->lun_acl_count
, 0);
608 init_completion(&lun
->lun_shutdown_comp
);
609 INIT_LIST_HEAD(&lun
->lun_acl_list
);
610 INIT_LIST_HEAD(&lun
->lun_cmd_list
);
611 spin_lock_init(&lun
->lun_acl_lock
);
612 spin_lock_init(&lun
->lun_cmd_lock
);
613 spin_lock_init(&lun
->lun_sep_lock
);
615 ret
= core_tpg_post_addlun(se_tpg
, lun
, lun_access
, dev
);
622 static void core_tpg_release_virtual_lun0(struct se_portal_group
*se_tpg
)
624 struct se_lun
*lun
= &se_tpg
->tpg_virt_lun0
;
626 core_tpg_post_dellun(se_tpg
, lun
);
629 int core_tpg_register(
630 struct target_core_fabric_ops
*tfo
,
631 struct se_wwn
*se_wwn
,
632 struct se_portal_group
*se_tpg
,
633 void *tpg_fabric_ptr
,
639 se_tpg
->tpg_lun_list
= kzalloc((sizeof(struct se_lun
) *
640 TRANSPORT_MAX_LUNS_PER_TPG
), GFP_KERNEL
);
641 if (!(se_tpg
->tpg_lun_list
)) {
642 printk(KERN_ERR
"Unable to allocate struct se_portal_group->"
647 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
648 lun
= &se_tpg
->tpg_lun_list
[i
];
649 lun
->unpacked_lun
= i
;
650 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
651 atomic_set(&lun
->lun_acl_count
, 0);
652 init_completion(&lun
->lun_shutdown_comp
);
653 INIT_LIST_HEAD(&lun
->lun_acl_list
);
654 INIT_LIST_HEAD(&lun
->lun_cmd_list
);
655 spin_lock_init(&lun
->lun_acl_lock
);
656 spin_lock_init(&lun
->lun_cmd_lock
);
657 spin_lock_init(&lun
->lun_sep_lock
);
660 se_tpg
->se_tpg_type
= se_tpg_type
;
661 se_tpg
->se_tpg_fabric_ptr
= tpg_fabric_ptr
;
662 se_tpg
->se_tpg_tfo
= tfo
;
663 se_tpg
->se_tpg_wwn
= se_wwn
;
664 atomic_set(&se_tpg
->tpg_pr_ref_count
, 0);
665 INIT_LIST_HEAD(&se_tpg
->acl_node_list
);
666 INIT_LIST_HEAD(&se_tpg
->se_tpg_list
);
667 INIT_LIST_HEAD(&se_tpg
->tpg_sess_list
);
668 spin_lock_init(&se_tpg
->acl_node_lock
);
669 spin_lock_init(&se_tpg
->session_lock
);
670 spin_lock_init(&se_tpg
->tpg_lun_lock
);
672 if (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) {
673 if (core_tpg_setup_virtual_lun0(se_tpg
) < 0) {
679 spin_lock_bh(&se_global
->se_tpg_lock
);
680 list_add_tail(&se_tpg
->se_tpg_list
, &se_global
->g_se_tpg_list
);
681 spin_unlock_bh(&se_global
->se_tpg_lock
);
683 printk(KERN_INFO
"TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
684 " endpoint: %s, Portal Tag: %u\n", tfo
->get_fabric_name(),
685 (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) ?
686 "Normal" : "Discovery", (tfo
->tpg_get_wwn(se_tpg
) == NULL
) ?
687 "None" : tfo
->tpg_get_wwn(se_tpg
), tfo
->tpg_get_tag(se_tpg
));
691 EXPORT_SYMBOL(core_tpg_register
);
693 int core_tpg_deregister(struct se_portal_group
*se_tpg
)
695 struct se_node_acl
*nacl
, *nacl_tmp
;
697 printk(KERN_INFO
"TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
698 " for endpoint: %s Portal Tag %u\n",
699 (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) ?
700 "Normal" : "Discovery", TPG_TFO(se_tpg
)->get_fabric_name(),
701 TPG_TFO(se_tpg
)->tpg_get_wwn(se_tpg
),
702 TPG_TFO(se_tpg
)->tpg_get_tag(se_tpg
));
704 spin_lock_bh(&se_global
->se_tpg_lock
);
705 list_del(&se_tpg
->se_tpg_list
);
706 spin_unlock_bh(&se_global
->se_tpg_lock
);
708 while (atomic_read(&se_tpg
->tpg_pr_ref_count
) != 0)
711 * Release any remaining demo-mode generated se_node_acl that have
712 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
713 * in transport_deregister_session().
715 spin_lock_bh(&se_tpg
->acl_node_lock
);
716 list_for_each_entry_safe(nacl
, nacl_tmp
, &se_tpg
->acl_node_list
,
718 list_del(&nacl
->acl_list
);
719 se_tpg
->num_node_acls
--;
720 spin_unlock_bh(&se_tpg
->acl_node_lock
);
722 core_tpg_wait_for_nacl_pr_ref(nacl
);
723 core_free_device_list_for_node(nacl
, se_tpg
);
724 TPG_TFO(se_tpg
)->tpg_release_fabric_acl(se_tpg
, nacl
);
726 spin_lock_bh(&se_tpg
->acl_node_lock
);
728 spin_unlock_bh(&se_tpg
->acl_node_lock
);
730 if (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
)
731 core_tpg_release_virtual_lun0(se_tpg
);
733 se_tpg
->se_tpg_fabric_ptr
= NULL
;
734 kfree(se_tpg
->tpg_lun_list
);
737 EXPORT_SYMBOL(core_tpg_deregister
);
739 struct se_lun
*core_tpg_pre_addlun(
740 struct se_portal_group
*tpg
,
745 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
746 printk(KERN_ERR
"%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
747 "-1: %u for Target Portal Group: %u\n",
748 TPG_TFO(tpg
)->get_fabric_name(),
749 unpacked_lun
, TRANSPORT_MAX_LUNS_PER_TPG
-1,
750 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
751 return ERR_PTR(-EOVERFLOW
);
754 spin_lock(&tpg
->tpg_lun_lock
);
755 lun
= &tpg
->tpg_lun_list
[unpacked_lun
];
756 if (lun
->lun_status
== TRANSPORT_LUN_STATUS_ACTIVE
) {
757 printk(KERN_ERR
"TPG Logical Unit Number: %u is already active"
758 " on %s Target Portal Group: %u, ignoring request.\n",
759 unpacked_lun
, TPG_TFO(tpg
)->get_fabric_name(),
760 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
761 spin_unlock(&tpg
->tpg_lun_lock
);
762 return ERR_PTR(-EINVAL
);
764 spin_unlock(&tpg
->tpg_lun_lock
);
769 int core_tpg_post_addlun(
770 struct se_portal_group
*tpg
,
775 if (core_dev_export(lun_ptr
, tpg
, lun
) < 0)
778 spin_lock(&tpg
->tpg_lun_lock
);
779 lun
->lun_access
= lun_access
;
780 lun
->lun_status
= TRANSPORT_LUN_STATUS_ACTIVE
;
781 spin_unlock(&tpg
->tpg_lun_lock
);
786 static void core_tpg_shutdown_lun(
787 struct se_portal_group
*tpg
,
790 core_clear_lun_from_tpg(lun
, tpg
);
791 transport_clear_lun_from_sessions(lun
);
794 struct se_lun
*core_tpg_pre_dellun(
795 struct se_portal_group
*tpg
,
801 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
802 printk(KERN_ERR
"%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
803 "-1: %u for Target Portal Group: %u\n",
804 TPG_TFO(tpg
)->get_fabric_name(), unpacked_lun
,
805 TRANSPORT_MAX_LUNS_PER_TPG
-1,
806 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
807 return ERR_PTR(-EOVERFLOW
);
810 spin_lock(&tpg
->tpg_lun_lock
);
811 lun
= &tpg
->tpg_lun_list
[unpacked_lun
];
812 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) {
813 printk(KERN_ERR
"%s Logical Unit Number: %u is not active on"
814 " Target Portal Group: %u, ignoring request.\n",
815 TPG_TFO(tpg
)->get_fabric_name(), unpacked_lun
,
816 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
817 spin_unlock(&tpg
->tpg_lun_lock
);
818 return ERR_PTR(-ENODEV
);
820 spin_unlock(&tpg
->tpg_lun_lock
);
825 int core_tpg_post_dellun(
826 struct se_portal_group
*tpg
,
829 core_tpg_shutdown_lun(tpg
, lun
);
831 core_dev_unexport(lun
->lun_se_dev
, tpg
, lun
);
833 spin_lock(&tpg
->tpg_lun_lock
);
834 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
835 spin_unlock(&tpg
->tpg_lun_lock
);