sparc32: Remove sun4c floppy assembler.
[linux-2.6.git] / drivers / target / target_core_tpg.c
blob70c3ffb981e72c8ee8aff1ca7bba6fe6efdbaef7
1 /*******************************************************************************
2 * Filename: target_core_tpg.c
4 * This file contains generic Target Portal Group related functions.
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * Nicholas A. Bellinger <nab@kernel.org>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 ******************************************************************************/
29 #include <linux/net.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/in.h>
35 #include <linux/export.h>
36 #include <net/sock.h>
37 #include <net/tcp.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
41 #include <target/target_core_base.h>
42 #include <target/target_core_backend.h>
43 #include <target/target_core_fabric.h>
45 #include "target_core_internal.h"
47 extern struct se_device *g_lun0_dev;
49 static DEFINE_SPINLOCK(tpg_lock);
50 static LIST_HEAD(tpg_list);
52 /* core_clear_initiator_node_from_tpg():
56 static void core_clear_initiator_node_from_tpg(
57 struct se_node_acl *nacl,
58 struct se_portal_group *tpg)
60 int i;
61 struct se_dev_entry *deve;
62 struct se_lun *lun;
63 struct se_lun_acl *acl, *acl_tmp;
65 spin_lock_irq(&nacl->device_list_lock);
66 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
67 deve = nacl->device_list[i];
69 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
70 continue;
72 if (!deve->se_lun) {
73 pr_err("%s device entries device pointer is"
74 " NULL, but Initiator has access.\n",
75 tpg->se_tpg_tfo->get_fabric_name());
76 continue;
79 lun = deve->se_lun;
80 spin_unlock_irq(&nacl->device_list_lock);
81 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
82 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
84 spin_lock(&lun->lun_acl_lock);
85 list_for_each_entry_safe(acl, acl_tmp,
86 &lun->lun_acl_list, lacl_list) {
87 if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
88 (acl->mapped_lun == deve->mapped_lun))
89 break;
92 if (!acl) {
93 pr_err("Unable to locate struct se_lun_acl for %s,"
94 " mapped_lun: %u\n", nacl->initiatorname,
95 deve->mapped_lun);
96 spin_unlock(&lun->lun_acl_lock);
97 spin_lock_irq(&nacl->device_list_lock);
98 continue;
101 list_del(&acl->lacl_list);
102 spin_unlock(&lun->lun_acl_lock);
104 spin_lock_irq(&nacl->device_list_lock);
105 kfree(acl);
107 spin_unlock_irq(&nacl->device_list_lock);
110 /* __core_tpg_get_initiator_node_acl():
112 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
114 struct se_node_acl *__core_tpg_get_initiator_node_acl(
115 struct se_portal_group *tpg,
116 const char *initiatorname)
118 struct se_node_acl *acl;
120 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
121 if (!strcmp(acl->initiatorname, initiatorname))
122 return acl;
125 return NULL;
128 /* core_tpg_get_initiator_node_acl():
132 struct se_node_acl *core_tpg_get_initiator_node_acl(
133 struct se_portal_group *tpg,
134 unsigned char *initiatorname)
136 struct se_node_acl *acl;
138 spin_lock_irq(&tpg->acl_node_lock);
139 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
140 if (!strcmp(acl->initiatorname, initiatorname) &&
141 !acl->dynamic_node_acl) {
142 spin_unlock_irq(&tpg->acl_node_lock);
143 return acl;
146 spin_unlock_irq(&tpg->acl_node_lock);
148 return NULL;
151 /* core_tpg_add_node_to_devs():
155 void core_tpg_add_node_to_devs(
156 struct se_node_acl *acl,
157 struct se_portal_group *tpg)
159 int i = 0;
160 u32 lun_access = 0;
161 struct se_lun *lun;
162 struct se_device *dev;
164 spin_lock(&tpg->tpg_lun_lock);
165 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
166 lun = tpg->tpg_lun_list[i];
167 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
168 continue;
170 spin_unlock(&tpg->tpg_lun_lock);
172 dev = lun->lun_se_dev;
174 * By default in LIO-Target $FABRIC_MOD,
175 * demo_mode_write_protect is ON, or READ_ONLY;
177 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
178 if (dev->dev_flags & DF_READ_ONLY)
179 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
180 else
181 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
182 } else {
184 * Allow only optical drives to issue R/W in default RO
185 * demo mode.
187 if (dev->transport->get_device_type(dev) == TYPE_DISK)
188 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
189 else
190 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
193 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
194 " access for LUN in Demo Mode\n",
195 tpg->se_tpg_tfo->get_fabric_name(),
196 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
197 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
198 "READ-WRITE" : "READ-ONLY");
200 core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
201 lun_access, acl, tpg, 1);
202 spin_lock(&tpg->tpg_lun_lock);
204 spin_unlock(&tpg->tpg_lun_lock);
207 /* core_set_queue_depth_for_node():
211 static int core_set_queue_depth_for_node(
212 struct se_portal_group *tpg,
213 struct se_node_acl *acl)
215 if (!acl->queue_depth) {
216 pr_err("Queue depth for %s Initiator Node: %s is 0,"
217 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
218 acl->initiatorname);
219 acl->queue_depth = 1;
222 return 0;
225 void array_free(void *array, int n)
227 void **a = array;
228 int i;
230 for (i = 0; i < n; i++)
231 kfree(a[i]);
232 kfree(a);
235 static void *array_zalloc(int n, size_t size, gfp_t flags)
237 void **a;
238 int i;
240 a = kzalloc(n * sizeof(void*), flags);
241 if (!a)
242 return NULL;
243 for (i = 0; i < n; i++) {
244 a[i] = kzalloc(size, flags);
245 if (!a[i]) {
246 array_free(a, n);
247 return NULL;
250 return a;
253 /* core_create_device_list_for_node():
257 static int core_create_device_list_for_node(struct se_node_acl *nacl)
259 struct se_dev_entry *deve;
260 int i;
262 nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
263 sizeof(struct se_dev_entry), GFP_KERNEL);
264 if (!nacl->device_list) {
265 pr_err("Unable to allocate memory for"
266 " struct se_node_acl->device_list\n");
267 return -ENOMEM;
269 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
270 deve = nacl->device_list[i];
272 atomic_set(&deve->ua_count, 0);
273 atomic_set(&deve->pr_ref_count, 0);
274 spin_lock_init(&deve->ua_lock);
275 INIT_LIST_HEAD(&deve->alua_port_list);
276 INIT_LIST_HEAD(&deve->ua_list);
279 return 0;
282 /* core_tpg_check_initiator_node_acl()
286 struct se_node_acl *core_tpg_check_initiator_node_acl(
287 struct se_portal_group *tpg,
288 unsigned char *initiatorname)
290 struct se_node_acl *acl;
292 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
293 if (acl)
294 return acl;
296 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
297 return NULL;
299 acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
300 if (!acl)
301 return NULL;
303 INIT_LIST_HEAD(&acl->acl_list);
304 INIT_LIST_HEAD(&acl->acl_sess_list);
305 kref_init(&acl->acl_kref);
306 init_completion(&acl->acl_free_comp);
307 spin_lock_init(&acl->device_list_lock);
308 spin_lock_init(&acl->nacl_sess_lock);
309 atomic_set(&acl->acl_pr_ref_count, 0);
310 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
311 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
312 acl->se_tpg = tpg;
313 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
314 spin_lock_init(&acl->stats_lock);
315 acl->dynamic_node_acl = 1;
317 tpg->se_tpg_tfo->set_default_node_attributes(acl);
319 if (core_create_device_list_for_node(acl) < 0) {
320 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
321 return NULL;
324 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
325 core_free_device_list_for_node(acl, tpg);
326 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
327 return NULL;
330 * Here we only create demo-mode MappedLUNs from the active
331 * TPG LUNs if the fabric is not explictly asking for
332 * tpg_check_demo_mode_login_only() == 1.
334 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
335 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
336 do { ; } while (0);
337 else
338 core_tpg_add_node_to_devs(acl, tpg);
340 spin_lock_irq(&tpg->acl_node_lock);
341 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
342 tpg->num_node_acls++;
343 spin_unlock_irq(&tpg->acl_node_lock);
345 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
346 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
347 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
348 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
350 return acl;
352 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
354 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
356 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
357 cpu_relax();
360 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
362 int i;
363 struct se_lun *lun;
365 spin_lock(&tpg->tpg_lun_lock);
366 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
367 lun = tpg->tpg_lun_list[i];
369 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
370 (lun->lun_se_dev == NULL))
371 continue;
373 spin_unlock(&tpg->tpg_lun_lock);
374 core_dev_del_lun(tpg, lun->unpacked_lun);
375 spin_lock(&tpg->tpg_lun_lock);
377 spin_unlock(&tpg->tpg_lun_lock);
379 EXPORT_SYMBOL(core_tpg_clear_object_luns);
381 /* core_tpg_add_initiator_node_acl():
385 struct se_node_acl *core_tpg_add_initiator_node_acl(
386 struct se_portal_group *tpg,
387 struct se_node_acl *se_nacl,
388 const char *initiatorname,
389 u32 queue_depth)
391 struct se_node_acl *acl = NULL;
393 spin_lock_irq(&tpg->acl_node_lock);
394 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
395 if (acl) {
396 if (acl->dynamic_node_acl) {
397 acl->dynamic_node_acl = 0;
398 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
399 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
400 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
401 spin_unlock_irq(&tpg->acl_node_lock);
403 * Release the locally allocated struct se_node_acl
404 * because * core_tpg_add_initiator_node_acl() returned
405 * a pointer to an existing demo mode node ACL.
407 if (se_nacl)
408 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
409 se_nacl);
410 goto done;
413 pr_err("ACL entry for %s Initiator"
414 " Node %s already exists for TPG %u, ignoring"
415 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
416 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
417 spin_unlock_irq(&tpg->acl_node_lock);
418 return ERR_PTR(-EEXIST);
420 spin_unlock_irq(&tpg->acl_node_lock);
422 if (!se_nacl) {
423 pr_err("struct se_node_acl pointer is NULL\n");
424 return ERR_PTR(-EINVAL);
427 * For v4.x logic the se_node_acl_s is hanging off a fabric
428 * dependent structure allocated via
429 * struct target_core_fabric_ops->fabric_make_nodeacl()
431 acl = se_nacl;
433 INIT_LIST_HEAD(&acl->acl_list);
434 INIT_LIST_HEAD(&acl->acl_sess_list);
435 kref_init(&acl->acl_kref);
436 init_completion(&acl->acl_free_comp);
437 spin_lock_init(&acl->device_list_lock);
438 spin_lock_init(&acl->nacl_sess_lock);
439 atomic_set(&acl->acl_pr_ref_count, 0);
440 acl->queue_depth = queue_depth;
441 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
442 acl->se_tpg = tpg;
443 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
444 spin_lock_init(&acl->stats_lock);
446 tpg->se_tpg_tfo->set_default_node_attributes(acl);
448 if (core_create_device_list_for_node(acl) < 0) {
449 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
450 return ERR_PTR(-ENOMEM);
453 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
454 core_free_device_list_for_node(acl, tpg);
455 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
456 return ERR_PTR(-EINVAL);
459 spin_lock_irq(&tpg->acl_node_lock);
460 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
461 tpg->num_node_acls++;
462 spin_unlock_irq(&tpg->acl_node_lock);
464 done:
465 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
466 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
467 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
468 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
470 return acl;
472 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
474 /* core_tpg_del_initiator_node_acl():
478 int core_tpg_del_initiator_node_acl(
479 struct se_portal_group *tpg,
480 struct se_node_acl *acl,
481 int force)
483 LIST_HEAD(sess_list);
484 struct se_session *sess, *sess_tmp;
485 unsigned long flags;
486 int rc;
488 spin_lock_irq(&tpg->acl_node_lock);
489 if (acl->dynamic_node_acl) {
490 acl->dynamic_node_acl = 0;
492 list_del(&acl->acl_list);
493 tpg->num_node_acls--;
494 spin_unlock_irq(&tpg->acl_node_lock);
496 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
497 acl->acl_stop = 1;
499 list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
500 sess_acl_list) {
501 if (sess->sess_tearing_down != 0)
502 continue;
504 target_get_session(sess);
505 list_move(&sess->sess_acl_list, &sess_list);
507 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
509 list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
510 list_del(&sess->sess_acl_list);
512 rc = tpg->se_tpg_tfo->shutdown_session(sess);
513 target_put_session(sess);
514 if (!rc)
515 continue;
516 target_put_session(sess);
518 target_put_nacl(acl);
520 * Wait for last target_put_nacl() to complete in target_complete_nacl()
521 * for active fabric session transport_deregister_session() callbacks.
523 wait_for_completion(&acl->acl_free_comp);
525 core_tpg_wait_for_nacl_pr_ref(acl);
526 core_clear_initiator_node_from_tpg(acl, tpg);
527 core_free_device_list_for_node(acl, tpg);
529 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
530 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
531 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
532 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
534 return 0;
536 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
538 /* core_tpg_set_initiator_node_queue_depth():
542 int core_tpg_set_initiator_node_queue_depth(
543 struct se_portal_group *tpg,
544 unsigned char *initiatorname,
545 u32 queue_depth,
546 int force)
548 struct se_session *sess, *init_sess = NULL;
549 struct se_node_acl *acl;
550 unsigned long flags;
551 int dynamic_acl = 0;
553 spin_lock_irq(&tpg->acl_node_lock);
554 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
555 if (!acl) {
556 pr_err("Access Control List entry for %s Initiator"
557 " Node %s does not exists for TPG %hu, ignoring"
558 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
559 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
560 spin_unlock_irq(&tpg->acl_node_lock);
561 return -ENODEV;
563 if (acl->dynamic_node_acl) {
564 acl->dynamic_node_acl = 0;
565 dynamic_acl = 1;
567 spin_unlock_irq(&tpg->acl_node_lock);
569 spin_lock_irqsave(&tpg->session_lock, flags);
570 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
571 if (sess->se_node_acl != acl)
572 continue;
574 if (!force) {
575 pr_err("Unable to change queue depth for %s"
576 " Initiator Node: %s while session is"
577 " operational. To forcefully change the queue"
578 " depth and force session reinstatement"
579 " use the \"force=1\" parameter.\n",
580 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
581 spin_unlock_irqrestore(&tpg->session_lock, flags);
583 spin_lock_irq(&tpg->acl_node_lock);
584 if (dynamic_acl)
585 acl->dynamic_node_acl = 1;
586 spin_unlock_irq(&tpg->acl_node_lock);
587 return -EEXIST;
590 * Determine if the session needs to be closed by our context.
592 if (!tpg->se_tpg_tfo->shutdown_session(sess))
593 continue;
595 init_sess = sess;
596 break;
600 * User has requested to change the queue depth for a Initiator Node.
601 * Change the value in the Node's struct se_node_acl, and call
602 * core_set_queue_depth_for_node() to add the requested queue depth.
604 * Finally call tpg->se_tpg_tfo->close_session() to force session
605 * reinstatement to occur if there is an active session for the
606 * $FABRIC_MOD Initiator Node in question.
608 acl->queue_depth = queue_depth;
610 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
611 spin_unlock_irqrestore(&tpg->session_lock, flags);
613 * Force session reinstatement if
614 * core_set_queue_depth_for_node() failed, because we assume
615 * the $FABRIC_MOD has already the set session reinstatement
616 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
618 if (init_sess)
619 tpg->se_tpg_tfo->close_session(init_sess);
621 spin_lock_irq(&tpg->acl_node_lock);
622 if (dynamic_acl)
623 acl->dynamic_node_acl = 1;
624 spin_unlock_irq(&tpg->acl_node_lock);
625 return -EINVAL;
627 spin_unlock_irqrestore(&tpg->session_lock, flags);
629 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
630 * forcefully shutdown the $FABRIC_MOD session/nexus.
632 if (init_sess)
633 tpg->se_tpg_tfo->close_session(init_sess);
635 pr_debug("Successfully changed queue depth to: %d for Initiator"
636 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
637 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
638 tpg->se_tpg_tfo->tpg_get_tag(tpg));
640 spin_lock_irq(&tpg->acl_node_lock);
641 if (dynamic_acl)
642 acl->dynamic_node_acl = 1;
643 spin_unlock_irq(&tpg->acl_node_lock);
645 return 0;
647 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
649 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
651 /* Set in core_dev_setup_virtual_lun0() */
652 struct se_device *dev = g_lun0_dev;
653 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
654 u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
655 int ret;
657 lun->unpacked_lun = 0;
658 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
659 atomic_set(&lun->lun_acl_count, 0);
660 init_completion(&lun->lun_shutdown_comp);
661 INIT_LIST_HEAD(&lun->lun_acl_list);
662 INIT_LIST_HEAD(&lun->lun_cmd_list);
663 spin_lock_init(&lun->lun_acl_lock);
664 spin_lock_init(&lun->lun_cmd_lock);
665 spin_lock_init(&lun->lun_sep_lock);
667 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
668 if (ret < 0)
669 return ret;
671 return 0;
674 static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
676 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
678 core_tpg_post_dellun(se_tpg, lun);
681 int core_tpg_register(
682 struct target_core_fabric_ops *tfo,
683 struct se_wwn *se_wwn,
684 struct se_portal_group *se_tpg,
685 void *tpg_fabric_ptr,
686 int se_tpg_type)
688 struct se_lun *lun;
689 u32 i;
691 se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
692 sizeof(struct se_lun), GFP_KERNEL);
693 if (!se_tpg->tpg_lun_list) {
694 pr_err("Unable to allocate struct se_portal_group->"
695 "tpg_lun_list\n");
696 return -ENOMEM;
699 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
700 lun = se_tpg->tpg_lun_list[i];
701 lun->unpacked_lun = i;
702 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
703 atomic_set(&lun->lun_acl_count, 0);
704 init_completion(&lun->lun_shutdown_comp);
705 INIT_LIST_HEAD(&lun->lun_acl_list);
706 INIT_LIST_HEAD(&lun->lun_cmd_list);
707 spin_lock_init(&lun->lun_acl_lock);
708 spin_lock_init(&lun->lun_cmd_lock);
709 spin_lock_init(&lun->lun_sep_lock);
712 se_tpg->se_tpg_type = se_tpg_type;
713 se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
714 se_tpg->se_tpg_tfo = tfo;
715 se_tpg->se_tpg_wwn = se_wwn;
716 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
717 INIT_LIST_HEAD(&se_tpg->acl_node_list);
718 INIT_LIST_HEAD(&se_tpg->se_tpg_node);
719 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
720 spin_lock_init(&se_tpg->acl_node_lock);
721 spin_lock_init(&se_tpg->session_lock);
722 spin_lock_init(&se_tpg->tpg_lun_lock);
724 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
725 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
726 kfree(se_tpg);
727 return -ENOMEM;
731 spin_lock_bh(&tpg_lock);
732 list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
733 spin_unlock_bh(&tpg_lock);
735 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
736 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
737 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
738 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
739 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
741 return 0;
743 EXPORT_SYMBOL(core_tpg_register);
745 int core_tpg_deregister(struct se_portal_group *se_tpg)
747 struct se_node_acl *nacl, *nacl_tmp;
749 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
750 " for endpoint: %s Portal Tag %u\n",
751 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
752 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
753 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
754 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
756 spin_lock_bh(&tpg_lock);
757 list_del(&se_tpg->se_tpg_node);
758 spin_unlock_bh(&tpg_lock);
760 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
761 cpu_relax();
763 * Release any remaining demo-mode generated se_node_acl that have
764 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
765 * in transport_deregister_session().
767 spin_lock_irq(&se_tpg->acl_node_lock);
768 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
769 acl_list) {
770 list_del(&nacl->acl_list);
771 se_tpg->num_node_acls--;
772 spin_unlock_irq(&se_tpg->acl_node_lock);
774 core_tpg_wait_for_nacl_pr_ref(nacl);
775 core_free_device_list_for_node(nacl, se_tpg);
776 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
778 spin_lock_irq(&se_tpg->acl_node_lock);
780 spin_unlock_irq(&se_tpg->acl_node_lock);
782 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
783 core_tpg_release_virtual_lun0(se_tpg);
785 se_tpg->se_tpg_fabric_ptr = NULL;
786 array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
787 return 0;
789 EXPORT_SYMBOL(core_tpg_deregister);
791 struct se_lun *core_tpg_pre_addlun(
792 struct se_portal_group *tpg,
793 u32 unpacked_lun)
795 struct se_lun *lun;
797 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
798 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
799 "-1: %u for Target Portal Group: %u\n",
800 tpg->se_tpg_tfo->get_fabric_name(),
801 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
802 tpg->se_tpg_tfo->tpg_get_tag(tpg));
803 return ERR_PTR(-EOVERFLOW);
806 spin_lock(&tpg->tpg_lun_lock);
807 lun = tpg->tpg_lun_list[unpacked_lun];
808 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
809 pr_err("TPG Logical Unit Number: %u is already active"
810 " on %s Target Portal Group: %u, ignoring request.\n",
811 unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
812 tpg->se_tpg_tfo->tpg_get_tag(tpg));
813 spin_unlock(&tpg->tpg_lun_lock);
814 return ERR_PTR(-EINVAL);
816 spin_unlock(&tpg->tpg_lun_lock);
818 return lun;
821 int core_tpg_post_addlun(
822 struct se_portal_group *tpg,
823 struct se_lun *lun,
824 u32 lun_access,
825 void *lun_ptr)
827 int ret;
829 ret = core_dev_export(lun_ptr, tpg, lun);
830 if (ret < 0)
831 return ret;
833 spin_lock(&tpg->tpg_lun_lock);
834 lun->lun_access = lun_access;
835 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
836 spin_unlock(&tpg->tpg_lun_lock);
838 return 0;
841 static void core_tpg_shutdown_lun(
842 struct se_portal_group *tpg,
843 struct se_lun *lun)
845 core_clear_lun_from_tpg(lun, tpg);
846 transport_clear_lun_from_sessions(lun);
849 struct se_lun *core_tpg_pre_dellun(
850 struct se_portal_group *tpg,
851 u32 unpacked_lun)
853 struct se_lun *lun;
855 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
856 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
857 "-1: %u for Target Portal Group: %u\n",
858 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
859 TRANSPORT_MAX_LUNS_PER_TPG-1,
860 tpg->se_tpg_tfo->tpg_get_tag(tpg));
861 return ERR_PTR(-EOVERFLOW);
864 spin_lock(&tpg->tpg_lun_lock);
865 lun = tpg->tpg_lun_list[unpacked_lun];
866 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
867 pr_err("%s Logical Unit Number: %u is not active on"
868 " Target Portal Group: %u, ignoring request.\n",
869 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
870 tpg->se_tpg_tfo->tpg_get_tag(tpg));
871 spin_unlock(&tpg->tpg_lun_lock);
872 return ERR_PTR(-ENODEV);
874 spin_unlock(&tpg->tpg_lun_lock);
876 return lun;
879 int core_tpg_post_dellun(
880 struct se_portal_group *tpg,
881 struct se_lun *lun)
883 core_tpg_shutdown_lun(tpg, lun);
885 core_dev_unexport(lun->lun_se_dev, tpg, lun);
887 spin_lock(&tpg->tpg_lun_lock);
888 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
889 spin_unlock(&tpg->tpg_lun_lock);
891 return 0;