1 /*******************************************************************************
2 * Filename: target_core_tpg.c
4 * This file contains generic Target Portal Group related functions.
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * Nicholas A. Bellinger <nab@kernel.org>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 ******************************************************************************/
29 #include <linux/net.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/smp_lock.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
41 #include <target/target_core_base.h>
42 #include <target/target_core_device.h>
43 #include <target/target_core_tpg.h>
44 #include <target/target_core_transport.h>
45 #include <target/target_core_fabric_ops.h>
47 #include "target_core_hba.h"
49 /* core_clear_initiator_node_from_tpg():
53 static void core_clear_initiator_node_from_tpg(
54 struct se_node_acl
*nacl
,
55 struct se_portal_group
*tpg
)
58 struct se_dev_entry
*deve
;
60 struct se_lun_acl
*acl
, *acl_tmp
;
62 spin_lock_irq(&nacl
->device_list_lock
);
63 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
64 deve
= &nacl
->device_list
[i
];
66 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
70 printk(KERN_ERR
"%s device entries device pointer is"
71 " NULL, but Initiator has access.\n",
72 TPG_TFO(tpg
)->get_fabric_name());
77 spin_unlock_irq(&nacl
->device_list_lock
);
78 core_update_device_list_for_node(lun
, NULL
, deve
->mapped_lun
,
79 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
, 0);
81 spin_lock(&lun
->lun_acl_lock
);
82 list_for_each_entry_safe(acl
, acl_tmp
,
83 &lun
->lun_acl_list
, lacl_list
) {
84 if (!(strcmp(acl
->initiatorname
,
85 nacl
->initiatorname
)) &&
86 (acl
->mapped_lun
== deve
->mapped_lun
))
91 printk(KERN_ERR
"Unable to locate struct se_lun_acl for %s,"
92 " mapped_lun: %u\n", nacl
->initiatorname
,
94 spin_unlock(&lun
->lun_acl_lock
);
95 spin_lock_irq(&nacl
->device_list_lock
);
99 list_del(&acl
->lacl_list
);
100 spin_unlock(&lun
->lun_acl_lock
);
102 spin_lock_irq(&nacl
->device_list_lock
);
105 spin_unlock_irq(&nacl
->device_list_lock
);
108 /* __core_tpg_get_initiator_node_acl():
110 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
112 struct se_node_acl
*__core_tpg_get_initiator_node_acl(
113 struct se_portal_group
*tpg
,
114 const char *initiatorname
)
116 struct se_node_acl
*acl
;
118 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
119 if (!(strcmp(acl
->initiatorname
, initiatorname
)))
126 /* core_tpg_get_initiator_node_acl():
130 struct se_node_acl
*core_tpg_get_initiator_node_acl(
131 struct se_portal_group
*tpg
,
132 unsigned char *initiatorname
)
134 struct se_node_acl
*acl
;
136 spin_lock_bh(&tpg
->acl_node_lock
);
137 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
138 if (!(strcmp(acl
->initiatorname
, initiatorname
)) &&
139 (!(acl
->dynamic_node_acl
))) {
140 spin_unlock_bh(&tpg
->acl_node_lock
);
144 spin_unlock_bh(&tpg
->acl_node_lock
);
149 /* core_tpg_add_node_to_devs():
153 void core_tpg_add_node_to_devs(
154 struct se_node_acl
*acl
,
155 struct se_portal_group
*tpg
)
160 struct se_device
*dev
;
162 spin_lock(&tpg
->tpg_lun_lock
);
163 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
164 lun
= &tpg
->tpg_lun_list
[i
];
165 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
)
168 spin_unlock(&tpg
->tpg_lun_lock
);
170 dev
= lun
->lun_se_dev
;
172 * By default in LIO-Target $FABRIC_MOD,
173 * demo_mode_write_protect is ON, or READ_ONLY;
175 if (!(TPG_TFO(tpg
)->tpg_check_demo_mode_write_protect(tpg
))) {
176 if (dev
->dev_flags
& DF_READ_ONLY
)
177 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
179 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
182 * Allow only optical drives to issue R/W in default RO
185 if (TRANSPORT(dev
)->get_device_type(dev
) == TYPE_DISK
)
186 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
188 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
191 printk(KERN_INFO
"TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
192 " access for LUN in Demo Mode\n",
193 TPG_TFO(tpg
)->get_fabric_name(),
194 TPG_TFO(tpg
)->tpg_get_tag(tpg
), lun
->unpacked_lun
,
195 (lun_access
== TRANSPORT_LUNFLAGS_READ_WRITE
) ?
196 "READ-WRITE" : "READ-ONLY");
198 core_update_device_list_for_node(lun
, NULL
, lun
->unpacked_lun
,
199 lun_access
, acl
, tpg
, 1);
200 spin_lock(&tpg
->tpg_lun_lock
);
202 spin_unlock(&tpg
->tpg_lun_lock
);
205 /* core_set_queue_depth_for_node():
209 static int core_set_queue_depth_for_node(
210 struct se_portal_group
*tpg
,
211 struct se_node_acl
*acl
)
213 if (!acl
->queue_depth
) {
214 printk(KERN_ERR
"Queue depth for %s Initiator Node: %s is 0,"
215 "defaulting to 1.\n", TPG_TFO(tpg
)->get_fabric_name(),
217 acl
->queue_depth
= 1;
223 /* core_create_device_list_for_node():
227 static int core_create_device_list_for_node(struct se_node_acl
*nacl
)
229 struct se_dev_entry
*deve
;
232 nacl
->device_list
= kzalloc(sizeof(struct se_dev_entry
) *
233 TRANSPORT_MAX_LUNS_PER_TPG
, GFP_KERNEL
);
234 if (!(nacl
->device_list
)) {
235 printk(KERN_ERR
"Unable to allocate memory for"
236 " struct se_node_acl->device_list\n");
239 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
240 deve
= &nacl
->device_list
[i
];
242 atomic_set(&deve
->ua_count
, 0);
243 atomic_set(&deve
->pr_ref_count
, 0);
244 spin_lock_init(&deve
->ua_lock
);
245 INIT_LIST_HEAD(&deve
->alua_port_list
);
246 INIT_LIST_HEAD(&deve
->ua_list
);
252 /* core_tpg_check_initiator_node_acl()
256 struct se_node_acl
*core_tpg_check_initiator_node_acl(
257 struct se_portal_group
*tpg
,
258 unsigned char *initiatorname
)
260 struct se_node_acl
*acl
;
262 acl
= core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
266 if (!(TPG_TFO(tpg
)->tpg_check_demo_mode(tpg
)))
269 acl
= TPG_TFO(tpg
)->tpg_alloc_fabric_acl(tpg
);
273 INIT_LIST_HEAD(&acl
->acl_list
);
274 INIT_LIST_HEAD(&acl
->acl_sess_list
);
275 spin_lock_init(&acl
->device_list_lock
);
276 spin_lock_init(&acl
->nacl_sess_lock
);
277 atomic_set(&acl
->acl_pr_ref_count
, 0);
278 atomic_set(&acl
->mib_ref_count
, 0);
279 acl
->queue_depth
= TPG_TFO(tpg
)->tpg_get_default_depth(tpg
);
280 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
282 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
283 spin_lock_init(&acl
->stats_lock
);
284 acl
->dynamic_node_acl
= 1;
286 TPG_TFO(tpg
)->set_default_node_attributes(acl
);
288 if (core_create_device_list_for_node(acl
) < 0) {
289 TPG_TFO(tpg
)->tpg_release_fabric_acl(tpg
, acl
);
293 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
294 core_free_device_list_for_node(acl
, tpg
);
295 TPG_TFO(tpg
)->tpg_release_fabric_acl(tpg
, acl
);
299 core_tpg_add_node_to_devs(acl
, tpg
);
301 spin_lock_bh(&tpg
->acl_node_lock
);
302 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
303 tpg
->num_node_acls
++;
304 spin_unlock_bh(&tpg
->acl_node_lock
);
306 printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
307 " Initiator Node: %s\n", TPG_TFO(tpg
)->get_fabric_name(),
308 TPG_TFO(tpg
)->tpg_get_tag(tpg
), acl
->queue_depth
,
309 TPG_TFO(tpg
)->get_fabric_name(), initiatorname
);
313 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl
);
315 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl
*nacl
)
317 while (atomic_read(&nacl
->acl_pr_ref_count
) != 0)
321 void core_tpg_wait_for_mib_ref(struct se_node_acl
*nacl
)
323 while (atomic_read(&nacl
->mib_ref_count
) != 0)
327 void core_tpg_clear_object_luns(struct se_portal_group
*tpg
)
332 spin_lock(&tpg
->tpg_lun_lock
);
333 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
334 lun
= &tpg
->tpg_lun_list
[i
];
336 if ((lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) ||
337 (lun
->lun_se_dev
== NULL
))
340 spin_unlock(&tpg
->tpg_lun_lock
);
341 ret
= core_dev_del_lun(tpg
, lun
->unpacked_lun
);
342 spin_lock(&tpg
->tpg_lun_lock
);
344 spin_unlock(&tpg
->tpg_lun_lock
);
346 EXPORT_SYMBOL(core_tpg_clear_object_luns
);
348 /* core_tpg_add_initiator_node_acl():
352 struct se_node_acl
*core_tpg_add_initiator_node_acl(
353 struct se_portal_group
*tpg
,
354 struct se_node_acl
*se_nacl
,
355 const char *initiatorname
,
358 struct se_node_acl
*acl
= NULL
;
360 spin_lock_bh(&tpg
->acl_node_lock
);
361 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
363 if (acl
->dynamic_node_acl
) {
364 acl
->dynamic_node_acl
= 0;
365 printk(KERN_INFO
"%s_TPG[%u] - Replacing dynamic ACL"
366 " for %s\n", TPG_TFO(tpg
)->get_fabric_name(),
367 TPG_TFO(tpg
)->tpg_get_tag(tpg
), initiatorname
);
368 spin_unlock_bh(&tpg
->acl_node_lock
);
370 * Release the locally allocated struct se_node_acl
371 * because * core_tpg_add_initiator_node_acl() returned
372 * a pointer to an existing demo mode node ACL.
375 TPG_TFO(tpg
)->tpg_release_fabric_acl(tpg
,
380 printk(KERN_ERR
"ACL entry for %s Initiator"
381 " Node %s already exists for TPG %u, ignoring"
382 " request.\n", TPG_TFO(tpg
)->get_fabric_name(),
383 initiatorname
, TPG_TFO(tpg
)->tpg_get_tag(tpg
));
384 spin_unlock_bh(&tpg
->acl_node_lock
);
385 return ERR_PTR(-EEXIST
);
387 spin_unlock_bh(&tpg
->acl_node_lock
);
390 printk("struct se_node_acl pointer is NULL\n");
391 return ERR_PTR(-EINVAL
);
394 * For v4.x logic the se_node_acl_s is hanging off a fabric
395 * dependent structure allocated via
396 * struct target_core_fabric_ops->fabric_make_nodeacl()
400 INIT_LIST_HEAD(&acl
->acl_list
);
401 INIT_LIST_HEAD(&acl
->acl_sess_list
);
402 spin_lock_init(&acl
->device_list_lock
);
403 spin_lock_init(&acl
->nacl_sess_lock
);
404 atomic_set(&acl
->acl_pr_ref_count
, 0);
405 acl
->queue_depth
= queue_depth
;
406 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
408 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
409 spin_lock_init(&acl
->stats_lock
);
411 TPG_TFO(tpg
)->set_default_node_attributes(acl
);
413 if (core_create_device_list_for_node(acl
) < 0) {
414 TPG_TFO(tpg
)->tpg_release_fabric_acl(tpg
, acl
);
415 return ERR_PTR(-ENOMEM
);
418 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
419 core_free_device_list_for_node(acl
, tpg
);
420 TPG_TFO(tpg
)->tpg_release_fabric_acl(tpg
, acl
);
421 return ERR_PTR(-EINVAL
);
424 spin_lock_bh(&tpg
->acl_node_lock
);
425 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
426 tpg
->num_node_acls
++;
427 spin_unlock_bh(&tpg
->acl_node_lock
);
430 printk(KERN_INFO
"%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
431 " Initiator Node: %s\n", TPG_TFO(tpg
)->get_fabric_name(),
432 TPG_TFO(tpg
)->tpg_get_tag(tpg
), acl
->queue_depth
,
433 TPG_TFO(tpg
)->get_fabric_name(), initiatorname
);
437 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl
);
439 /* core_tpg_del_initiator_node_acl():
443 int core_tpg_del_initiator_node_acl(
444 struct se_portal_group
*tpg
,
445 struct se_node_acl
*acl
,
448 struct se_session
*sess
, *sess_tmp
;
451 spin_lock_bh(&tpg
->acl_node_lock
);
452 if (acl
->dynamic_node_acl
) {
453 acl
->dynamic_node_acl
= 0;
456 list_del(&acl
->acl_list
);
457 tpg
->num_node_acls
--;
458 spin_unlock_bh(&tpg
->acl_node_lock
);
460 spin_lock_bh(&tpg
->session_lock
);
461 list_for_each_entry_safe(sess
, sess_tmp
,
462 &tpg
->tpg_sess_list
, sess_list
) {
463 if (sess
->se_node_acl
!= acl
)
466 * Determine if the session needs to be closed by our context.
468 if (!(TPG_TFO(tpg
)->shutdown_session(sess
)))
471 spin_unlock_bh(&tpg
->session_lock
);
473 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
474 * forcefully shutdown the $FABRIC_MOD session/nexus.
476 TPG_TFO(tpg
)->close_session(sess
);
478 spin_lock_bh(&tpg
->session_lock
);
480 spin_unlock_bh(&tpg
->session_lock
);
482 core_tpg_wait_for_nacl_pr_ref(acl
);
483 core_tpg_wait_for_mib_ref(acl
);
484 core_clear_initiator_node_from_tpg(acl
, tpg
);
485 core_free_device_list_for_node(acl
, tpg
);
487 printk(KERN_INFO
"%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
488 " Initiator Node: %s\n", TPG_TFO(tpg
)->get_fabric_name(),
489 TPG_TFO(tpg
)->tpg_get_tag(tpg
), acl
->queue_depth
,
490 TPG_TFO(tpg
)->get_fabric_name(), acl
->initiatorname
);
494 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl
);
496 /* core_tpg_set_initiator_node_queue_depth():
500 int core_tpg_set_initiator_node_queue_depth(
501 struct se_portal_group
*tpg
,
502 unsigned char *initiatorname
,
506 struct se_session
*sess
, *init_sess
= NULL
;
507 struct se_node_acl
*acl
;
510 spin_lock_bh(&tpg
->acl_node_lock
);
511 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
513 printk(KERN_ERR
"Access Control List entry for %s Initiator"
514 " Node %s does not exists for TPG %hu, ignoring"
515 " request.\n", TPG_TFO(tpg
)->get_fabric_name(),
516 initiatorname
, TPG_TFO(tpg
)->tpg_get_tag(tpg
));
517 spin_unlock_bh(&tpg
->acl_node_lock
);
520 if (acl
->dynamic_node_acl
) {
521 acl
->dynamic_node_acl
= 0;
524 spin_unlock_bh(&tpg
->acl_node_lock
);
526 spin_lock_bh(&tpg
->session_lock
);
527 list_for_each_entry(sess
, &tpg
->tpg_sess_list
, sess_list
) {
528 if (sess
->se_node_acl
!= acl
)
532 printk(KERN_ERR
"Unable to change queue depth for %s"
533 " Initiator Node: %s while session is"
534 " operational. To forcefully change the queue"
535 " depth and force session reinstatement"
536 " use the \"force=1\" parameter.\n",
537 TPG_TFO(tpg
)->get_fabric_name(), initiatorname
);
538 spin_unlock_bh(&tpg
->session_lock
);
540 spin_lock_bh(&tpg
->acl_node_lock
);
542 acl
->dynamic_node_acl
= 1;
543 spin_unlock_bh(&tpg
->acl_node_lock
);
547 * Determine if the session needs to be closed by our context.
549 if (!(TPG_TFO(tpg
)->shutdown_session(sess
)))
557 * User has requested to change the queue depth for a Initiator Node.
558 * Change the value in the Node's struct se_node_acl, and call
559 * core_set_queue_depth_for_node() to add the requested queue depth.
561 * Finally call TPG_TFO(tpg)->close_session() to force session
562 * reinstatement to occur if there is an active session for the
563 * $FABRIC_MOD Initiator Node in question.
565 acl
->queue_depth
= queue_depth
;
567 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
568 spin_unlock_bh(&tpg
->session_lock
);
570 * Force session reinstatement if
571 * core_set_queue_depth_for_node() failed, because we assume
572 * the $FABRIC_MOD has already the set session reinstatement
573 * bit from TPG_TFO(tpg)->shutdown_session() called above.
576 TPG_TFO(tpg
)->close_session(init_sess
);
578 spin_lock_bh(&tpg
->acl_node_lock
);
580 acl
->dynamic_node_acl
= 1;
581 spin_unlock_bh(&tpg
->acl_node_lock
);
584 spin_unlock_bh(&tpg
->session_lock
);
586 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
587 * forcefully shutdown the $FABRIC_MOD session/nexus.
590 TPG_TFO(tpg
)->close_session(init_sess
);
592 printk(KERN_INFO
"Successfuly changed queue depth to: %d for Initiator"
593 " Node: %s on %s Target Portal Group: %u\n", queue_depth
,
594 initiatorname
, TPG_TFO(tpg
)->get_fabric_name(),
595 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
597 spin_lock_bh(&tpg
->acl_node_lock
);
599 acl
->dynamic_node_acl
= 1;
600 spin_unlock_bh(&tpg
->acl_node_lock
);
604 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth
);
606 static int core_tpg_setup_virtual_lun0(struct se_portal_group
*se_tpg
)
608 /* Set in core_dev_setup_virtual_lun0() */
609 struct se_device
*dev
= se_global
->g_lun0_dev
;
610 struct se_lun
*lun
= &se_tpg
->tpg_virt_lun0
;
611 u32 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
614 lun
->unpacked_lun
= 0;
615 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
616 atomic_set(&lun
->lun_acl_count
, 0);
617 init_completion(&lun
->lun_shutdown_comp
);
618 INIT_LIST_HEAD(&lun
->lun_acl_list
);
619 INIT_LIST_HEAD(&lun
->lun_cmd_list
);
620 spin_lock_init(&lun
->lun_acl_lock
);
621 spin_lock_init(&lun
->lun_cmd_lock
);
622 spin_lock_init(&lun
->lun_sep_lock
);
624 ret
= core_tpg_post_addlun(se_tpg
, lun
, lun_access
, dev
);
631 static void core_tpg_release_virtual_lun0(struct se_portal_group
*se_tpg
)
633 struct se_lun
*lun
= &se_tpg
->tpg_virt_lun0
;
635 core_tpg_post_dellun(se_tpg
, lun
);
638 int core_tpg_register(
639 struct target_core_fabric_ops
*tfo
,
640 struct se_wwn
*se_wwn
,
641 struct se_portal_group
*se_tpg
,
642 void *tpg_fabric_ptr
,
648 se_tpg
->tpg_lun_list
= kzalloc((sizeof(struct se_lun
) *
649 TRANSPORT_MAX_LUNS_PER_TPG
), GFP_KERNEL
);
650 if (!(se_tpg
->tpg_lun_list
)) {
651 printk(KERN_ERR
"Unable to allocate struct se_portal_group->"
656 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
657 lun
= &se_tpg
->tpg_lun_list
[i
];
658 lun
->unpacked_lun
= i
;
659 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
660 atomic_set(&lun
->lun_acl_count
, 0);
661 init_completion(&lun
->lun_shutdown_comp
);
662 INIT_LIST_HEAD(&lun
->lun_acl_list
);
663 INIT_LIST_HEAD(&lun
->lun_cmd_list
);
664 spin_lock_init(&lun
->lun_acl_lock
);
665 spin_lock_init(&lun
->lun_cmd_lock
);
666 spin_lock_init(&lun
->lun_sep_lock
);
669 se_tpg
->se_tpg_type
= se_tpg_type
;
670 se_tpg
->se_tpg_fabric_ptr
= tpg_fabric_ptr
;
671 se_tpg
->se_tpg_tfo
= tfo
;
672 se_tpg
->se_tpg_wwn
= se_wwn
;
673 atomic_set(&se_tpg
->tpg_pr_ref_count
, 0);
674 INIT_LIST_HEAD(&se_tpg
->acl_node_list
);
675 INIT_LIST_HEAD(&se_tpg
->se_tpg_list
);
676 INIT_LIST_HEAD(&se_tpg
->tpg_sess_list
);
677 spin_lock_init(&se_tpg
->acl_node_lock
);
678 spin_lock_init(&se_tpg
->session_lock
);
679 spin_lock_init(&se_tpg
->tpg_lun_lock
);
681 if (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) {
682 if (core_tpg_setup_virtual_lun0(se_tpg
) < 0) {
688 spin_lock_bh(&se_global
->se_tpg_lock
);
689 list_add_tail(&se_tpg
->se_tpg_list
, &se_global
->g_se_tpg_list
);
690 spin_unlock_bh(&se_global
->se_tpg_lock
);
692 printk(KERN_INFO
"TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
693 " endpoint: %s, Portal Tag: %u\n", tfo
->get_fabric_name(),
694 (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) ?
695 "Normal" : "Discovery", (tfo
->tpg_get_wwn(se_tpg
) == NULL
) ?
696 "None" : tfo
->tpg_get_wwn(se_tpg
), tfo
->tpg_get_tag(se_tpg
));
700 EXPORT_SYMBOL(core_tpg_register
);
702 int core_tpg_deregister(struct se_portal_group
*se_tpg
)
704 printk(KERN_INFO
"TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
705 " for endpoint: %s Portal Tag %u\n",
706 (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) ?
707 "Normal" : "Discovery", TPG_TFO(se_tpg
)->get_fabric_name(),
708 TPG_TFO(se_tpg
)->tpg_get_wwn(se_tpg
),
709 TPG_TFO(se_tpg
)->tpg_get_tag(se_tpg
));
711 spin_lock_bh(&se_global
->se_tpg_lock
);
712 list_del(&se_tpg
->se_tpg_list
);
713 spin_unlock_bh(&se_global
->se_tpg_lock
);
715 while (atomic_read(&se_tpg
->tpg_pr_ref_count
) != 0)
718 if (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
)
719 core_tpg_release_virtual_lun0(se_tpg
);
721 se_tpg
->se_tpg_fabric_ptr
= NULL
;
722 kfree(se_tpg
->tpg_lun_list
);
725 EXPORT_SYMBOL(core_tpg_deregister
);
727 struct se_lun
*core_tpg_pre_addlun(
728 struct se_portal_group
*tpg
,
733 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
734 printk(KERN_ERR
"%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
735 "-1: %u for Target Portal Group: %u\n",
736 TPG_TFO(tpg
)->get_fabric_name(),
737 unpacked_lun
, TRANSPORT_MAX_LUNS_PER_TPG
-1,
738 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
739 return ERR_PTR(-EOVERFLOW
);
742 spin_lock(&tpg
->tpg_lun_lock
);
743 lun
= &tpg
->tpg_lun_list
[unpacked_lun
];
744 if (lun
->lun_status
== TRANSPORT_LUN_STATUS_ACTIVE
) {
745 printk(KERN_ERR
"TPG Logical Unit Number: %u is already active"
746 " on %s Target Portal Group: %u, ignoring request.\n",
747 unpacked_lun
, TPG_TFO(tpg
)->get_fabric_name(),
748 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
749 spin_unlock(&tpg
->tpg_lun_lock
);
750 return ERR_PTR(-EINVAL
);
752 spin_unlock(&tpg
->tpg_lun_lock
);
757 int core_tpg_post_addlun(
758 struct se_portal_group
*tpg
,
763 if (core_dev_export(lun_ptr
, tpg
, lun
) < 0)
766 spin_lock(&tpg
->tpg_lun_lock
);
767 lun
->lun_access
= lun_access
;
768 lun
->lun_status
= TRANSPORT_LUN_STATUS_ACTIVE
;
769 spin_unlock(&tpg
->tpg_lun_lock
);
774 static void core_tpg_shutdown_lun(
775 struct se_portal_group
*tpg
,
778 core_clear_lun_from_tpg(lun
, tpg
);
779 transport_clear_lun_from_sessions(lun
);
782 struct se_lun
*core_tpg_pre_dellun(
783 struct se_portal_group
*tpg
,
789 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
790 printk(KERN_ERR
"%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
791 "-1: %u for Target Portal Group: %u\n",
792 TPG_TFO(tpg
)->get_fabric_name(), unpacked_lun
,
793 TRANSPORT_MAX_LUNS_PER_TPG
-1,
794 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
795 return ERR_PTR(-EOVERFLOW
);
798 spin_lock(&tpg
->tpg_lun_lock
);
799 lun
= &tpg
->tpg_lun_list
[unpacked_lun
];
800 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) {
801 printk(KERN_ERR
"%s Logical Unit Number: %u is not active on"
802 " Target Portal Group: %u, ignoring request.\n",
803 TPG_TFO(tpg
)->get_fabric_name(), unpacked_lun
,
804 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
805 spin_unlock(&tpg
->tpg_lun_lock
);
806 return ERR_PTR(-ENODEV
);
808 spin_unlock(&tpg
->tpg_lun_lock
);
813 int core_tpg_post_dellun(
814 struct se_portal_group
*tpg
,
817 core_tpg_shutdown_lun(tpg
, lun
);
819 core_dev_unexport(lun
->lun_se_dev
, tpg
, lun
);
821 spin_lock(&tpg
->tpg_lun_lock
);
822 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
823 spin_unlock(&tpg
->tpg_lun_lock
);