dm table: reject devices without request fns
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / target / target_core_tpg.c
blobc26f67467623d80c324f93440f1c4bdd7d8cc46e
1 /*******************************************************************************
2 * Filename: target_core_tpg.c
4 * This file contains generic Target Portal Group related functions.
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * Nicholas A. Bellinger <nab@kernel.org>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 ******************************************************************************/
29 #include <linux/net.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/smp_lock.h>
35 #include <linux/in.h>
36 #include <net/sock.h>
37 #include <net/tcp.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
41 #include <target/target_core_base.h>
42 #include <target/target_core_device.h>
43 #include <target/target_core_tpg.h>
44 #include <target/target_core_transport.h>
45 #include <target/target_core_fabric_ops.h>
47 #include "target_core_hba.h"
49 /* core_clear_initiator_node_from_tpg():
53 static void core_clear_initiator_node_from_tpg(
54 struct se_node_acl *nacl,
55 struct se_portal_group *tpg)
57 int i;
58 struct se_dev_entry *deve;
59 struct se_lun *lun;
60 struct se_lun_acl *acl, *acl_tmp;
62 spin_lock_irq(&nacl->device_list_lock);
63 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
64 deve = &nacl->device_list[i];
66 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
67 continue;
69 if (!deve->se_lun) {
70 printk(KERN_ERR "%s device entries device pointer is"
71 " NULL, but Initiator has access.\n",
72 TPG_TFO(tpg)->get_fabric_name());
73 continue;
76 lun = deve->se_lun;
77 spin_unlock_irq(&nacl->device_list_lock);
78 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
79 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
81 spin_lock(&lun->lun_acl_lock);
82 list_for_each_entry_safe(acl, acl_tmp,
83 &lun->lun_acl_list, lacl_list) {
84 if (!(strcmp(acl->initiatorname,
85 nacl->initiatorname)) &&
86 (acl->mapped_lun == deve->mapped_lun))
87 break;
90 if (!acl) {
91 printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
92 " mapped_lun: %u\n", nacl->initiatorname,
93 deve->mapped_lun);
94 spin_unlock(&lun->lun_acl_lock);
95 spin_lock_irq(&nacl->device_list_lock);
96 continue;
99 list_del(&acl->lacl_list);
100 spin_unlock(&lun->lun_acl_lock);
102 spin_lock_irq(&nacl->device_list_lock);
103 kfree(acl);
105 spin_unlock_irq(&nacl->device_list_lock);
108 /* __core_tpg_get_initiator_node_acl():
110 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
112 struct se_node_acl *__core_tpg_get_initiator_node_acl(
113 struct se_portal_group *tpg,
114 const char *initiatorname)
116 struct se_node_acl *acl;
118 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
119 if (!(strcmp(acl->initiatorname, initiatorname)))
120 return acl;
123 return NULL;
126 /* core_tpg_get_initiator_node_acl():
130 struct se_node_acl *core_tpg_get_initiator_node_acl(
131 struct se_portal_group *tpg,
132 unsigned char *initiatorname)
134 struct se_node_acl *acl;
136 spin_lock_bh(&tpg->acl_node_lock);
137 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
138 if (!(strcmp(acl->initiatorname, initiatorname)) &&
139 (!(acl->dynamic_node_acl))) {
140 spin_unlock_bh(&tpg->acl_node_lock);
141 return acl;
144 spin_unlock_bh(&tpg->acl_node_lock);
146 return NULL;
149 /* core_tpg_add_node_to_devs():
153 void core_tpg_add_node_to_devs(
154 struct se_node_acl *acl,
155 struct se_portal_group *tpg)
157 int i = 0;
158 u32 lun_access = 0;
159 struct se_lun *lun;
160 struct se_device *dev;
162 spin_lock(&tpg->tpg_lun_lock);
163 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
164 lun = &tpg->tpg_lun_list[i];
165 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
166 continue;
168 spin_unlock(&tpg->tpg_lun_lock);
170 dev = lun->lun_se_dev;
172 * By default in LIO-Target $FABRIC_MOD,
173 * demo_mode_write_protect is ON, or READ_ONLY;
175 if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
176 if (dev->dev_flags & DF_READ_ONLY)
177 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
178 else
179 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
180 } else {
182 * Allow only optical drives to issue R/W in default RO
183 * demo mode.
185 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
186 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
187 else
188 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
191 printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
192 " access for LUN in Demo Mode\n",
193 TPG_TFO(tpg)->get_fabric_name(),
194 TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
195 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
196 "READ-WRITE" : "READ-ONLY");
198 core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
199 lun_access, acl, tpg, 1);
200 spin_lock(&tpg->tpg_lun_lock);
202 spin_unlock(&tpg->tpg_lun_lock);
205 /* core_set_queue_depth_for_node():
209 static int core_set_queue_depth_for_node(
210 struct se_portal_group *tpg,
211 struct se_node_acl *acl)
213 if (!acl->queue_depth) {
214 printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
215 "defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
216 acl->initiatorname);
217 acl->queue_depth = 1;
220 return 0;
223 /* core_create_device_list_for_node():
227 static int core_create_device_list_for_node(struct se_node_acl *nacl)
229 struct se_dev_entry *deve;
230 int i;
232 nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
233 TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
234 if (!(nacl->device_list)) {
235 printk(KERN_ERR "Unable to allocate memory for"
236 " struct se_node_acl->device_list\n");
237 return -1;
239 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
240 deve = &nacl->device_list[i];
242 atomic_set(&deve->ua_count, 0);
243 atomic_set(&deve->pr_ref_count, 0);
244 spin_lock_init(&deve->ua_lock);
245 INIT_LIST_HEAD(&deve->alua_port_list);
246 INIT_LIST_HEAD(&deve->ua_list);
249 return 0;
252 /* core_tpg_check_initiator_node_acl()
256 struct se_node_acl *core_tpg_check_initiator_node_acl(
257 struct se_portal_group *tpg,
258 unsigned char *initiatorname)
260 struct se_node_acl *acl;
262 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
263 if ((acl))
264 return acl;
266 if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
267 return NULL;
269 acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
270 if (!(acl))
271 return NULL;
273 INIT_LIST_HEAD(&acl->acl_list);
274 INIT_LIST_HEAD(&acl->acl_sess_list);
275 spin_lock_init(&acl->device_list_lock);
276 spin_lock_init(&acl->nacl_sess_lock);
277 atomic_set(&acl->acl_pr_ref_count, 0);
278 acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
279 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
280 acl->se_tpg = tpg;
281 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
282 spin_lock_init(&acl->stats_lock);
283 acl->dynamic_node_acl = 1;
285 TPG_TFO(tpg)->set_default_node_attributes(acl);
287 if (core_create_device_list_for_node(acl) < 0) {
288 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
289 return NULL;
292 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
293 core_free_device_list_for_node(acl, tpg);
294 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
295 return NULL;
298 core_tpg_add_node_to_devs(acl, tpg);
300 spin_lock_bh(&tpg->acl_node_lock);
301 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
302 tpg->num_node_acls++;
303 spin_unlock_bh(&tpg->acl_node_lock);
305 printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
306 " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
307 TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
308 TPG_TFO(tpg)->get_fabric_name(), initiatorname);
310 return acl;
312 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
314 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
316 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
317 cpu_relax();
320 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
322 int i, ret;
323 struct se_lun *lun;
325 spin_lock(&tpg->tpg_lun_lock);
326 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
327 lun = &tpg->tpg_lun_list[i];
329 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
330 (lun->lun_se_dev == NULL))
331 continue;
333 spin_unlock(&tpg->tpg_lun_lock);
334 ret = core_dev_del_lun(tpg, lun->unpacked_lun);
335 spin_lock(&tpg->tpg_lun_lock);
337 spin_unlock(&tpg->tpg_lun_lock);
339 EXPORT_SYMBOL(core_tpg_clear_object_luns);
341 /* core_tpg_add_initiator_node_acl():
345 struct se_node_acl *core_tpg_add_initiator_node_acl(
346 struct se_portal_group *tpg,
347 struct se_node_acl *se_nacl,
348 const char *initiatorname,
349 u32 queue_depth)
351 struct se_node_acl *acl = NULL;
353 spin_lock_bh(&tpg->acl_node_lock);
354 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
355 if ((acl)) {
356 if (acl->dynamic_node_acl) {
357 acl->dynamic_node_acl = 0;
358 printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
359 " for %s\n", TPG_TFO(tpg)->get_fabric_name(),
360 TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
361 spin_unlock_bh(&tpg->acl_node_lock);
363 * Release the locally allocated struct se_node_acl
364 * because * core_tpg_add_initiator_node_acl() returned
365 * a pointer to an existing demo mode node ACL.
367 if (se_nacl)
368 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
369 se_nacl);
370 goto done;
373 printk(KERN_ERR "ACL entry for %s Initiator"
374 " Node %s already exists for TPG %u, ignoring"
375 " request.\n", TPG_TFO(tpg)->get_fabric_name(),
376 initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
377 spin_unlock_bh(&tpg->acl_node_lock);
378 return ERR_PTR(-EEXIST);
380 spin_unlock_bh(&tpg->acl_node_lock);
382 if (!(se_nacl)) {
383 printk("struct se_node_acl pointer is NULL\n");
384 return ERR_PTR(-EINVAL);
387 * For v4.x logic the se_node_acl_s is hanging off a fabric
388 * dependent structure allocated via
389 * struct target_core_fabric_ops->fabric_make_nodeacl()
391 acl = se_nacl;
393 INIT_LIST_HEAD(&acl->acl_list);
394 INIT_LIST_HEAD(&acl->acl_sess_list);
395 spin_lock_init(&acl->device_list_lock);
396 spin_lock_init(&acl->nacl_sess_lock);
397 atomic_set(&acl->acl_pr_ref_count, 0);
398 acl->queue_depth = queue_depth;
399 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
400 acl->se_tpg = tpg;
401 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
402 spin_lock_init(&acl->stats_lock);
404 TPG_TFO(tpg)->set_default_node_attributes(acl);
406 if (core_create_device_list_for_node(acl) < 0) {
407 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
408 return ERR_PTR(-ENOMEM);
411 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
412 core_free_device_list_for_node(acl, tpg);
413 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
414 return ERR_PTR(-EINVAL);
417 spin_lock_bh(&tpg->acl_node_lock);
418 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
419 tpg->num_node_acls++;
420 spin_unlock_bh(&tpg->acl_node_lock);
422 done:
423 printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
424 " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
425 TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
426 TPG_TFO(tpg)->get_fabric_name(), initiatorname);
428 return acl;
430 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
432 /* core_tpg_del_initiator_node_acl():
436 int core_tpg_del_initiator_node_acl(
437 struct se_portal_group *tpg,
438 struct se_node_acl *acl,
439 int force)
441 struct se_session *sess, *sess_tmp;
442 int dynamic_acl = 0;
444 spin_lock_bh(&tpg->acl_node_lock);
445 if (acl->dynamic_node_acl) {
446 acl->dynamic_node_acl = 0;
447 dynamic_acl = 1;
449 list_del(&acl->acl_list);
450 tpg->num_node_acls--;
451 spin_unlock_bh(&tpg->acl_node_lock);
453 spin_lock_bh(&tpg->session_lock);
454 list_for_each_entry_safe(sess, sess_tmp,
455 &tpg->tpg_sess_list, sess_list) {
456 if (sess->se_node_acl != acl)
457 continue;
459 * Determine if the session needs to be closed by our context.
461 if (!(TPG_TFO(tpg)->shutdown_session(sess)))
462 continue;
464 spin_unlock_bh(&tpg->session_lock);
466 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
467 * forcefully shutdown the $FABRIC_MOD session/nexus.
469 TPG_TFO(tpg)->close_session(sess);
471 spin_lock_bh(&tpg->session_lock);
473 spin_unlock_bh(&tpg->session_lock);
475 core_tpg_wait_for_nacl_pr_ref(acl);
476 core_clear_initiator_node_from_tpg(acl, tpg);
477 core_free_device_list_for_node(acl, tpg);
479 printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
480 " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
481 TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
482 TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
484 return 0;
486 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
488 /* core_tpg_set_initiator_node_queue_depth():
492 int core_tpg_set_initiator_node_queue_depth(
493 struct se_portal_group *tpg,
494 unsigned char *initiatorname,
495 u32 queue_depth,
496 int force)
498 struct se_session *sess, *init_sess = NULL;
499 struct se_node_acl *acl;
500 int dynamic_acl = 0;
502 spin_lock_bh(&tpg->acl_node_lock);
503 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
504 if (!(acl)) {
505 printk(KERN_ERR "Access Control List entry for %s Initiator"
506 " Node %s does not exists for TPG %hu, ignoring"
507 " request.\n", TPG_TFO(tpg)->get_fabric_name(),
508 initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
509 spin_unlock_bh(&tpg->acl_node_lock);
510 return -ENODEV;
512 if (acl->dynamic_node_acl) {
513 acl->dynamic_node_acl = 0;
514 dynamic_acl = 1;
516 spin_unlock_bh(&tpg->acl_node_lock);
518 spin_lock_bh(&tpg->session_lock);
519 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
520 if (sess->se_node_acl != acl)
521 continue;
523 if (!force) {
524 printk(KERN_ERR "Unable to change queue depth for %s"
525 " Initiator Node: %s while session is"
526 " operational. To forcefully change the queue"
527 " depth and force session reinstatement"
528 " use the \"force=1\" parameter.\n",
529 TPG_TFO(tpg)->get_fabric_name(), initiatorname);
530 spin_unlock_bh(&tpg->session_lock);
532 spin_lock_bh(&tpg->acl_node_lock);
533 if (dynamic_acl)
534 acl->dynamic_node_acl = 1;
535 spin_unlock_bh(&tpg->acl_node_lock);
536 return -EEXIST;
539 * Determine if the session needs to be closed by our context.
541 if (!(TPG_TFO(tpg)->shutdown_session(sess)))
542 continue;
544 init_sess = sess;
545 break;
549 * User has requested to change the queue depth for a Initiator Node.
550 * Change the value in the Node's struct se_node_acl, and call
551 * core_set_queue_depth_for_node() to add the requested queue depth.
553 * Finally call TPG_TFO(tpg)->close_session() to force session
554 * reinstatement to occur if there is an active session for the
555 * $FABRIC_MOD Initiator Node in question.
557 acl->queue_depth = queue_depth;
559 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
560 spin_unlock_bh(&tpg->session_lock);
562 * Force session reinstatement if
563 * core_set_queue_depth_for_node() failed, because we assume
564 * the $FABRIC_MOD has already the set session reinstatement
565 * bit from TPG_TFO(tpg)->shutdown_session() called above.
567 if (init_sess)
568 TPG_TFO(tpg)->close_session(init_sess);
570 spin_lock_bh(&tpg->acl_node_lock);
571 if (dynamic_acl)
572 acl->dynamic_node_acl = 1;
573 spin_unlock_bh(&tpg->acl_node_lock);
574 return -EINVAL;
576 spin_unlock_bh(&tpg->session_lock);
578 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
579 * forcefully shutdown the $FABRIC_MOD session/nexus.
581 if (init_sess)
582 TPG_TFO(tpg)->close_session(init_sess);
584 printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
585 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
586 initiatorname, TPG_TFO(tpg)->get_fabric_name(),
587 TPG_TFO(tpg)->tpg_get_tag(tpg));
589 spin_lock_bh(&tpg->acl_node_lock);
590 if (dynamic_acl)
591 acl->dynamic_node_acl = 1;
592 spin_unlock_bh(&tpg->acl_node_lock);
594 return 0;
596 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
598 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
600 /* Set in core_dev_setup_virtual_lun0() */
601 struct se_device *dev = se_global->g_lun0_dev;
602 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
603 u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
604 int ret;
606 lun->unpacked_lun = 0;
607 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
608 atomic_set(&lun->lun_acl_count, 0);
609 init_completion(&lun->lun_shutdown_comp);
610 INIT_LIST_HEAD(&lun->lun_acl_list);
611 INIT_LIST_HEAD(&lun->lun_cmd_list);
612 spin_lock_init(&lun->lun_acl_lock);
613 spin_lock_init(&lun->lun_cmd_lock);
614 spin_lock_init(&lun->lun_sep_lock);
616 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
617 if (ret < 0)
618 return -1;
620 return 0;
623 static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
625 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
627 core_tpg_post_dellun(se_tpg, lun);
630 int core_tpg_register(
631 struct target_core_fabric_ops *tfo,
632 struct se_wwn *se_wwn,
633 struct se_portal_group *se_tpg,
634 void *tpg_fabric_ptr,
635 int se_tpg_type)
637 struct se_lun *lun;
638 u32 i;
640 se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
641 TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
642 if (!(se_tpg->tpg_lun_list)) {
643 printk(KERN_ERR "Unable to allocate struct se_portal_group->"
644 "tpg_lun_list\n");
645 return -ENOMEM;
648 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
649 lun = &se_tpg->tpg_lun_list[i];
650 lun->unpacked_lun = i;
651 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
652 atomic_set(&lun->lun_acl_count, 0);
653 init_completion(&lun->lun_shutdown_comp);
654 INIT_LIST_HEAD(&lun->lun_acl_list);
655 INIT_LIST_HEAD(&lun->lun_cmd_list);
656 spin_lock_init(&lun->lun_acl_lock);
657 spin_lock_init(&lun->lun_cmd_lock);
658 spin_lock_init(&lun->lun_sep_lock);
661 se_tpg->se_tpg_type = se_tpg_type;
662 se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
663 se_tpg->se_tpg_tfo = tfo;
664 se_tpg->se_tpg_wwn = se_wwn;
665 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
666 INIT_LIST_HEAD(&se_tpg->acl_node_list);
667 INIT_LIST_HEAD(&se_tpg->se_tpg_list);
668 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
669 spin_lock_init(&se_tpg->acl_node_lock);
670 spin_lock_init(&se_tpg->session_lock);
671 spin_lock_init(&se_tpg->tpg_lun_lock);
673 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
674 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
675 kfree(se_tpg);
676 return -ENOMEM;
680 spin_lock_bh(&se_global->se_tpg_lock);
681 list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
682 spin_unlock_bh(&se_global->se_tpg_lock);
684 printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
685 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
686 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
687 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
688 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
690 return 0;
692 EXPORT_SYMBOL(core_tpg_register);
694 int core_tpg_deregister(struct se_portal_group *se_tpg)
696 struct se_node_acl *nacl, *nacl_tmp;
698 printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
699 " for endpoint: %s Portal Tag %u\n",
700 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
701 "Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
702 TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
703 TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
705 spin_lock_bh(&se_global->se_tpg_lock);
706 list_del(&se_tpg->se_tpg_list);
707 spin_unlock_bh(&se_global->se_tpg_lock);
709 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
710 cpu_relax();
712 * Release any remaining demo-mode generated se_node_acl that have
713 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
714 * in transport_deregister_session().
716 spin_lock_bh(&se_tpg->acl_node_lock);
717 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
718 acl_list) {
719 list_del(&nacl->acl_list);
720 se_tpg->num_node_acls--;
721 spin_unlock_bh(&se_tpg->acl_node_lock);
723 core_tpg_wait_for_nacl_pr_ref(nacl);
724 core_free_device_list_for_node(nacl, se_tpg);
725 TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
727 spin_lock_bh(&se_tpg->acl_node_lock);
729 spin_unlock_bh(&se_tpg->acl_node_lock);
731 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
732 core_tpg_release_virtual_lun0(se_tpg);
734 se_tpg->se_tpg_fabric_ptr = NULL;
735 kfree(se_tpg->tpg_lun_list);
736 return 0;
738 EXPORT_SYMBOL(core_tpg_deregister);
740 struct se_lun *core_tpg_pre_addlun(
741 struct se_portal_group *tpg,
742 u32 unpacked_lun)
744 struct se_lun *lun;
746 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
747 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
748 "-1: %u for Target Portal Group: %u\n",
749 TPG_TFO(tpg)->get_fabric_name(),
750 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
751 TPG_TFO(tpg)->tpg_get_tag(tpg));
752 return ERR_PTR(-EOVERFLOW);
755 spin_lock(&tpg->tpg_lun_lock);
756 lun = &tpg->tpg_lun_list[unpacked_lun];
757 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
758 printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
759 " on %s Target Portal Group: %u, ignoring request.\n",
760 unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
761 TPG_TFO(tpg)->tpg_get_tag(tpg));
762 spin_unlock(&tpg->tpg_lun_lock);
763 return ERR_PTR(-EINVAL);
765 spin_unlock(&tpg->tpg_lun_lock);
767 return lun;
770 int core_tpg_post_addlun(
771 struct se_portal_group *tpg,
772 struct se_lun *lun,
773 u32 lun_access,
774 void *lun_ptr)
776 if (core_dev_export(lun_ptr, tpg, lun) < 0)
777 return -1;
779 spin_lock(&tpg->tpg_lun_lock);
780 lun->lun_access = lun_access;
781 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
782 spin_unlock(&tpg->tpg_lun_lock);
784 return 0;
787 static void core_tpg_shutdown_lun(
788 struct se_portal_group *tpg,
789 struct se_lun *lun)
791 core_clear_lun_from_tpg(lun, tpg);
792 transport_clear_lun_from_sessions(lun);
795 struct se_lun *core_tpg_pre_dellun(
796 struct se_portal_group *tpg,
797 u32 unpacked_lun,
798 int *ret)
800 struct se_lun *lun;
802 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
803 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
804 "-1: %u for Target Portal Group: %u\n",
805 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
806 TRANSPORT_MAX_LUNS_PER_TPG-1,
807 TPG_TFO(tpg)->tpg_get_tag(tpg));
808 return ERR_PTR(-EOVERFLOW);
811 spin_lock(&tpg->tpg_lun_lock);
812 lun = &tpg->tpg_lun_list[unpacked_lun];
813 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
814 printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
815 " Target Portal Group: %u, ignoring request.\n",
816 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
817 TPG_TFO(tpg)->tpg_get_tag(tpg));
818 spin_unlock(&tpg->tpg_lun_lock);
819 return ERR_PTR(-ENODEV);
821 spin_unlock(&tpg->tpg_lun_lock);
823 return lun;
826 int core_tpg_post_dellun(
827 struct se_portal_group *tpg,
828 struct se_lun *lun)
830 core_tpg_shutdown_lun(tpg, lun);
832 core_dev_unexport(lun->lun_se_dev, tpg, lun);
834 spin_lock(&tpg->tpg_lun_lock);
835 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
836 spin_unlock(&tpg->tpg_lun_lock);
838 return 0;