2 * SBP2 target driver (SCSI over IEEE1394 in target mode)
4 * Copyright (C) 2011 Chris Boot <bootc@bootc.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #define KMSG_COMPONENT "sbp_target"
22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/configfs.h>
30 #include <linux/ctype.h>
31 #include <linux/firewire.h>
32 #include <linux/firewire-constants.h>
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_tcq.h>
35 #include <target/target_core_base.h>
36 #include <target/target_core_backend.h>
37 #include <target/target_core_fabric.h>
38 #include <target/target_core_fabric_configfs.h>
39 #include <target/configfs_macros.h>
40 #include <asm/unaligned.h>
42 #include "sbp_target.h"
44 static const struct target_core_fabric_ops sbp_ops
;
46 /* FireWire address region for management and command block address handlers */
47 static const struct fw_address_region sbp_register_region
= {
48 .start
= CSR_REGISTER_BASE
+ 0x10000,
49 .end
= 0x1000000000000ULL
,
52 static const u32 sbp_unit_directory_template
[] = {
53 0x1200609e, /* unit_specifier_id: NCITS/T10 */
54 0x13010483, /* unit_sw_version: 1155D Rev 4 */
55 0x3800609e, /* command_set_specifier_id: NCITS/T10 */
56 0x390104d8, /* command_set: SPC-2 */
57 0x3b000000, /* command_set_revision: 0 */
58 0x3c000001, /* firmware_revision: 1 */
61 #define SESSION_MAINTENANCE_INTERVAL HZ
63 static atomic_t login_id
= ATOMIC_INIT(0);
65 static void session_maintenance_work(struct work_struct
*);
66 static int sbp_run_transaction(struct fw_card
*, int, int, int, int,
67 unsigned long long, void *, size_t);
69 static int read_peer_guid(u64
*guid
, const struct sbp_management_request
*req
)
74 ret
= sbp_run_transaction(req
->card
, TCODE_READ_QUADLET_REQUEST
,
75 req
->node_addr
, req
->generation
, req
->speed
,
76 (CSR_REGISTER_BASE
| CSR_CONFIG_ROM
) + 3 * 4,
78 if (ret
!= RCODE_COMPLETE
)
81 ret
= sbp_run_transaction(req
->card
, TCODE_READ_QUADLET_REQUEST
,
82 req
->node_addr
, req
->generation
, req
->speed
,
83 (CSR_REGISTER_BASE
| CSR_CONFIG_ROM
) + 4 * 4,
85 if (ret
!= RCODE_COMPLETE
)
88 *guid
= (u64
)be32_to_cpu(high
) << 32 | be32_to_cpu(low
);
90 return RCODE_COMPLETE
;
93 static struct sbp_session
*sbp_session_find_by_guid(
94 struct sbp_tpg
*tpg
, u64 guid
)
96 struct se_session
*se_sess
;
97 struct sbp_session
*sess
, *found
= NULL
;
99 spin_lock_bh(&tpg
->se_tpg
.session_lock
);
100 list_for_each_entry(se_sess
, &tpg
->se_tpg
.tpg_sess_list
, sess_list
) {
101 sess
= se_sess
->fabric_sess_ptr
;
102 if (sess
->guid
== guid
)
105 spin_unlock_bh(&tpg
->se_tpg
.session_lock
);
110 static struct sbp_login_descriptor
*sbp_login_find_by_lun(
111 struct sbp_session
*session
, u32 unpacked_lun
)
113 struct sbp_login_descriptor
*login
, *found
= NULL
;
115 spin_lock_bh(&session
->lock
);
116 list_for_each_entry(login
, &session
->login_list
, link
) {
117 if (login
->login_lun
== unpacked_lun
)
120 spin_unlock_bh(&session
->lock
);
125 static int sbp_login_count_all_by_lun(
130 struct se_session
*se_sess
;
131 struct sbp_session
*sess
;
132 struct sbp_login_descriptor
*login
;
135 spin_lock_bh(&tpg
->se_tpg
.session_lock
);
136 list_for_each_entry(se_sess
, &tpg
->se_tpg
.tpg_sess_list
, sess_list
) {
137 sess
= se_sess
->fabric_sess_ptr
;
139 spin_lock_bh(&sess
->lock
);
140 list_for_each_entry(login
, &sess
->login_list
, link
) {
141 if (login
->login_lun
!= unpacked_lun
)
144 if (!exclusive
|| login
->exclusive
)
147 spin_unlock_bh(&sess
->lock
);
149 spin_unlock_bh(&tpg
->se_tpg
.session_lock
);
154 static struct sbp_login_descriptor
*sbp_login_find_by_id(
155 struct sbp_tpg
*tpg
, int login_id
)
157 struct se_session
*se_sess
;
158 struct sbp_session
*sess
;
159 struct sbp_login_descriptor
*login
, *found
= NULL
;
161 spin_lock_bh(&tpg
->se_tpg
.session_lock
);
162 list_for_each_entry(se_sess
, &tpg
->se_tpg
.tpg_sess_list
, sess_list
) {
163 sess
= se_sess
->fabric_sess_ptr
;
165 spin_lock_bh(&sess
->lock
);
166 list_for_each_entry(login
, &sess
->login_list
, link
) {
167 if (login
->login_id
== login_id
)
170 spin_unlock_bh(&sess
->lock
);
172 spin_unlock_bh(&tpg
->se_tpg
.session_lock
);
177 static u32
sbp_get_lun_from_tpg(struct sbp_tpg
*tpg
, u32 login_lun
, int *err
)
179 struct se_portal_group
*se_tpg
= &tpg
->se_tpg
;
180 struct se_lun
*se_lun
;
183 hlist_for_each_entry_rcu(se_lun
, &se_tpg
->tpg_lun_hlist
, link
) {
184 if (se_lun
->unpacked_lun
== login_lun
) {
196 static struct sbp_session
*sbp_session_create(
200 struct sbp_session
*sess
;
203 struct se_node_acl
*se_nacl
;
205 sess
= kmalloc(sizeof(*sess
), GFP_KERNEL
);
207 pr_err("failed to allocate session descriptor\n");
208 return ERR_PTR(-ENOMEM
);
211 sess
->se_sess
= transport_init_session(TARGET_PROT_NORMAL
);
212 if (IS_ERR(sess
->se_sess
)) {
213 pr_err("failed to init se_session\n");
215 ret
= PTR_ERR(sess
->se_sess
);
220 snprintf(guid_str
, sizeof(guid_str
), "%016llx", guid
);
222 se_nacl
= core_tpg_check_initiator_node_acl(&tpg
->se_tpg
, guid_str
);
224 pr_warn("Node ACL not found for %s\n", guid_str
);
226 transport_free_session(sess
->se_sess
);
229 return ERR_PTR(-EPERM
);
232 sess
->se_sess
->se_node_acl
= se_nacl
;
234 spin_lock_init(&sess
->lock
);
235 INIT_LIST_HEAD(&sess
->login_list
);
236 INIT_DELAYED_WORK(&sess
->maint_work
, session_maintenance_work
);
240 transport_register_session(&tpg
->se_tpg
, se_nacl
, sess
->se_sess
, sess
);
245 static void sbp_session_release(struct sbp_session
*sess
, bool cancel_work
)
247 spin_lock_bh(&sess
->lock
);
248 if (!list_empty(&sess
->login_list
)) {
249 spin_unlock_bh(&sess
->lock
);
252 spin_unlock_bh(&sess
->lock
);
255 cancel_delayed_work_sync(&sess
->maint_work
);
257 transport_deregister_session_configfs(sess
->se_sess
);
258 transport_deregister_session(sess
->se_sess
);
261 fw_card_put(sess
->card
);
266 static void sbp_target_agent_unregister(struct sbp_target_agent
*);
268 static void sbp_login_release(struct sbp_login_descriptor
*login
,
271 struct sbp_session
*sess
= login
->sess
;
273 /* FIXME: abort/wait on tasks */
275 sbp_target_agent_unregister(login
->tgt_agt
);
278 spin_lock_bh(&sess
->lock
);
279 list_del(&login
->link
);
280 spin_unlock_bh(&sess
->lock
);
282 sbp_session_release(sess
, cancel_work
);
288 static struct sbp_target_agent
*sbp_target_agent_register(
289 struct sbp_login_descriptor
*);
291 static void sbp_management_request_login(
292 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
293 int *status_data_size
)
295 struct sbp_tport
*tport
= agent
->tport
;
296 struct sbp_tpg
*tpg
= tport
->tpg
;
297 struct sbp_session
*sess
;
298 struct sbp_login_descriptor
*login
;
299 struct sbp_login_response_block
*response
;
302 int login_response_len
, ret
;
304 unpacked_lun
= sbp_get_lun_from_tpg(tpg
,
305 LOGIN_ORB_LUN(be32_to_cpu(req
->orb
.misc
)), &ret
);
307 pr_notice("login to unknown LUN: %d\n",
308 LOGIN_ORB_LUN(be32_to_cpu(req
->orb
.misc
)));
310 req
->status
.status
= cpu_to_be32(
311 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
312 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP
));
316 ret
= read_peer_guid(&guid
, req
);
317 if (ret
!= RCODE_COMPLETE
) {
318 pr_warn("failed to read peer GUID: %d\n", ret
);
320 req
->status
.status
= cpu_to_be32(
321 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
322 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
326 pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
329 sess
= sbp_session_find_by_guid(tpg
, guid
);
331 login
= sbp_login_find_by_lun(sess
, unpacked_lun
);
333 pr_notice("initiator already logged-in\n");
336 * SBP-2 R4 says we should return access denied, but
337 * that can confuse initiators. Instead we need to
338 * treat this like a reconnect, but send the login
339 * response block like a fresh login.
341 * This is required particularly in the case of Apple
342 * devices booting off the FireWire target, where
343 * the firmware has an active login to the target. When
344 * the OS takes control of the session it issues its own
345 * LOGIN rather than a RECONNECT. To avoid the machine
346 * waiting until the reconnect_hold expires, we can skip
347 * the ACCESS_DENIED errors to speed things up.
350 goto already_logged_in
;
355 * check exclusive bit in login request
356 * reject with access_denied if any logins present
358 if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req
->orb
.misc
)) &&
359 sbp_login_count_all_by_lun(tpg
, unpacked_lun
, 0)) {
360 pr_warn("refusing exclusive login with other active logins\n");
362 req
->status
.status
= cpu_to_be32(
363 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
364 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
369 * check exclusive bit in any existing login descriptor
370 * reject with access_denied if any exclusive logins present
372 if (sbp_login_count_all_by_lun(tpg
, unpacked_lun
, 1)) {
373 pr_warn("refusing login while another exclusive login present\n");
375 req
->status
.status
= cpu_to_be32(
376 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
377 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
382 * check we haven't exceeded the number of allowed logins
383 * reject with resources_unavailable if we have
385 if (sbp_login_count_all_by_lun(tpg
, unpacked_lun
, 0) >=
386 tport
->max_logins_per_lun
) {
387 pr_warn("max number of logins reached\n");
389 req
->status
.status
= cpu_to_be32(
390 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
391 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
396 sess
= sbp_session_create(tpg
, guid
);
398 switch (PTR_ERR(sess
)) {
400 ret
= SBP_STATUS_ACCESS_DENIED
;
403 ret
= SBP_STATUS_RESOURCES_UNAVAIL
;
407 req
->status
.status
= cpu_to_be32(
409 STATUS_RESP_REQUEST_COMPLETE
) |
410 STATUS_BLOCK_SBP_STATUS(ret
));
414 sess
->node_id
= req
->node_addr
;
415 sess
->card
= fw_card_get(req
->card
);
416 sess
->generation
= req
->generation
;
417 sess
->speed
= req
->speed
;
419 schedule_delayed_work(&sess
->maint_work
,
420 SESSION_MAINTENANCE_INTERVAL
);
423 /* only take the latest reconnect_hold into account */
424 sess
->reconnect_hold
= min(
425 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req
->orb
.misc
)),
426 tport
->max_reconnect_timeout
) - 1;
428 login
= kmalloc(sizeof(*login
), GFP_KERNEL
);
430 pr_err("failed to allocate login descriptor\n");
432 sbp_session_release(sess
, true);
434 req
->status
.status
= cpu_to_be32(
435 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
436 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
441 login
->login_lun
= unpacked_lun
;
442 login
->status_fifo_addr
= sbp2_pointer_to_addr(&req
->orb
.status_fifo
);
443 login
->exclusive
= LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req
->orb
.misc
));
444 login
->login_id
= atomic_inc_return(&login_id
);
446 login
->tgt_agt
= sbp_target_agent_register(login
);
447 if (IS_ERR(login
->tgt_agt
)) {
448 ret
= PTR_ERR(login
->tgt_agt
);
449 pr_err("failed to map command block handler: %d\n", ret
);
451 sbp_session_release(sess
, true);
454 req
->status
.status
= cpu_to_be32(
455 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
456 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
460 spin_lock_bh(&sess
->lock
);
461 list_add_tail(&login
->link
, &sess
->login_list
);
462 spin_unlock_bh(&sess
->lock
);
465 response
= kzalloc(sizeof(*response
), GFP_KERNEL
);
467 pr_err("failed to allocate login response block\n");
469 sbp_login_release(login
, true);
471 req
->status
.status
= cpu_to_be32(
472 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
473 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
477 login_response_len
= clamp_val(
478 LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req
->orb
.length
)),
479 12, sizeof(*response
));
480 response
->misc
= cpu_to_be32(
481 ((login_response_len
& 0xffff) << 16) |
482 (login
->login_id
& 0xffff));
483 response
->reconnect_hold
= cpu_to_be32(sess
->reconnect_hold
& 0xffff);
484 addr_to_sbp2_pointer(login
->tgt_agt
->handler
.offset
,
485 &response
->command_block_agent
);
487 ret
= sbp_run_transaction(sess
->card
, TCODE_WRITE_BLOCK_REQUEST
,
488 sess
->node_id
, sess
->generation
, sess
->speed
,
489 sbp2_pointer_to_addr(&req
->orb
.ptr2
), response
,
491 if (ret
!= RCODE_COMPLETE
) {
492 pr_debug("failed to write login response block: %x\n", ret
);
495 sbp_login_release(login
, true);
497 req
->status
.status
= cpu_to_be32(
498 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
499 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
505 req
->status
.status
= cpu_to_be32(
506 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
507 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
510 static void sbp_management_request_query_logins(
511 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
512 int *status_data_size
)
514 pr_notice("QUERY LOGINS not implemented\n");
515 /* FIXME: implement */
517 req
->status
.status
= cpu_to_be32(
518 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
519 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
522 static void sbp_management_request_reconnect(
523 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
524 int *status_data_size
)
526 struct sbp_tport
*tport
= agent
->tport
;
527 struct sbp_tpg
*tpg
= tport
->tpg
;
530 struct sbp_login_descriptor
*login
;
532 ret
= read_peer_guid(&guid
, req
);
533 if (ret
!= RCODE_COMPLETE
) {
534 pr_warn("failed to read peer GUID: %d\n", ret
);
536 req
->status
.status
= cpu_to_be32(
537 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
538 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
542 pr_notice("mgt_agent RECONNECT from %016llx\n", guid
);
544 login
= sbp_login_find_by_id(tpg
,
545 RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req
->orb
.misc
)));
548 pr_err("mgt_agent RECONNECT unknown login ID\n");
550 req
->status
.status
= cpu_to_be32(
551 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
552 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
556 if (login
->sess
->guid
!= guid
) {
557 pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
559 req
->status
.status
= cpu_to_be32(
560 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
561 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
565 spin_lock_bh(&login
->sess
->lock
);
566 if (login
->sess
->card
)
567 fw_card_put(login
->sess
->card
);
569 /* update the node details */
570 login
->sess
->generation
= req
->generation
;
571 login
->sess
->node_id
= req
->node_addr
;
572 login
->sess
->card
= fw_card_get(req
->card
);
573 login
->sess
->speed
= req
->speed
;
574 spin_unlock_bh(&login
->sess
->lock
);
576 req
->status
.status
= cpu_to_be32(
577 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
578 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
581 static void sbp_management_request_logout(
582 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
583 int *status_data_size
)
585 struct sbp_tport
*tport
= agent
->tport
;
586 struct sbp_tpg
*tpg
= tport
->tpg
;
588 struct sbp_login_descriptor
*login
;
590 id
= LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req
->orb
.misc
));
592 login
= sbp_login_find_by_id(tpg
, id
);
594 pr_warn("cannot find login: %d\n", id
);
596 req
->status
.status
= cpu_to_be32(
597 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
598 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN
));
602 pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
603 login
->login_lun
, login
->login_id
);
605 if (req
->node_addr
!= login
->sess
->node_id
) {
606 pr_warn("logout from different node ID\n");
608 req
->status
.status
= cpu_to_be32(
609 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
610 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
614 sbp_login_release(login
, true);
616 req
->status
.status
= cpu_to_be32(
617 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
618 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
621 static void session_check_for_reset(struct sbp_session
*sess
)
623 bool card_valid
= false;
625 spin_lock_bh(&sess
->lock
);
628 spin_lock_irq(&sess
->card
->lock
);
629 card_valid
= (sess
->card
->local_node
!= NULL
);
630 spin_unlock_irq(&sess
->card
->lock
);
633 fw_card_put(sess
->card
);
638 if (!card_valid
|| (sess
->generation
!= sess
->card
->generation
)) {
639 pr_info("Waiting for reconnect from node: %016llx\n",
643 sess
->reconnect_expires
= get_jiffies_64() +
644 ((sess
->reconnect_hold
+ 1) * HZ
);
647 spin_unlock_bh(&sess
->lock
);
650 static void session_reconnect_expired(struct sbp_session
*sess
)
652 struct sbp_login_descriptor
*login
, *temp
;
653 LIST_HEAD(login_list
);
655 pr_info("Reconnect timer expired for node: %016llx\n", sess
->guid
);
657 spin_lock_bh(&sess
->lock
);
658 list_for_each_entry_safe(login
, temp
, &sess
->login_list
, link
) {
660 list_move_tail(&login
->link
, &login_list
);
662 spin_unlock_bh(&sess
->lock
);
664 list_for_each_entry_safe(login
, temp
, &login_list
, link
) {
665 list_del(&login
->link
);
666 sbp_login_release(login
, false);
669 sbp_session_release(sess
, false);
672 static void session_maintenance_work(struct work_struct
*work
)
674 struct sbp_session
*sess
= container_of(work
, struct sbp_session
,
677 /* could be called while tearing down the session */
678 spin_lock_bh(&sess
->lock
);
679 if (list_empty(&sess
->login_list
)) {
680 spin_unlock_bh(&sess
->lock
);
683 spin_unlock_bh(&sess
->lock
);
685 if (sess
->node_id
!= -1) {
686 /* check for bus reset and make node_id invalid */
687 session_check_for_reset(sess
);
689 schedule_delayed_work(&sess
->maint_work
,
690 SESSION_MAINTENANCE_INTERVAL
);
691 } else if (!time_after64(get_jiffies_64(), sess
->reconnect_expires
)) {
692 /* still waiting for reconnect */
693 schedule_delayed_work(&sess
->maint_work
,
694 SESSION_MAINTENANCE_INTERVAL
);
696 /* reconnect timeout has expired */
697 session_reconnect_expired(sess
);
701 static int tgt_agent_rw_agent_state(struct fw_card
*card
, int tcode
, void *data
,
702 struct sbp_target_agent
*agent
)
707 case TCODE_READ_QUADLET_REQUEST
:
708 pr_debug("tgt_agent AGENT_STATE READ\n");
710 spin_lock_bh(&agent
->lock
);
711 state
= agent
->state
;
712 spin_unlock_bh(&agent
->lock
);
714 *(__be32
*)data
= cpu_to_be32(state
);
716 return RCODE_COMPLETE
;
718 case TCODE_WRITE_QUADLET_REQUEST
:
720 return RCODE_COMPLETE
;
723 return RCODE_TYPE_ERROR
;
727 static int tgt_agent_rw_agent_reset(struct fw_card
*card
, int tcode
, void *data
,
728 struct sbp_target_agent
*agent
)
731 case TCODE_WRITE_QUADLET_REQUEST
:
732 pr_debug("tgt_agent AGENT_RESET\n");
733 spin_lock_bh(&agent
->lock
);
734 agent
->state
= AGENT_STATE_RESET
;
735 spin_unlock_bh(&agent
->lock
);
736 return RCODE_COMPLETE
;
739 return RCODE_TYPE_ERROR
;
743 static int tgt_agent_rw_orb_pointer(struct fw_card
*card
, int tcode
, void *data
,
744 struct sbp_target_agent
*agent
)
746 struct sbp2_pointer
*ptr
= data
;
749 case TCODE_WRITE_BLOCK_REQUEST
:
750 spin_lock_bh(&agent
->lock
);
751 if (agent
->state
!= AGENT_STATE_SUSPENDED
&&
752 agent
->state
!= AGENT_STATE_RESET
) {
753 spin_unlock_bh(&agent
->lock
);
754 pr_notice("Ignoring ORB_POINTER write while active.\n");
755 return RCODE_CONFLICT_ERROR
;
757 agent
->state
= AGENT_STATE_ACTIVE
;
758 spin_unlock_bh(&agent
->lock
);
760 agent
->orb_pointer
= sbp2_pointer_to_addr(ptr
);
761 agent
->doorbell
= false;
763 pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
766 queue_work(system_unbound_wq
, &agent
->work
);
768 return RCODE_COMPLETE
;
770 case TCODE_READ_BLOCK_REQUEST
:
771 pr_debug("tgt_agent ORB_POINTER READ\n");
772 spin_lock_bh(&agent
->lock
);
773 addr_to_sbp2_pointer(agent
->orb_pointer
, ptr
);
774 spin_unlock_bh(&agent
->lock
);
775 return RCODE_COMPLETE
;
778 return RCODE_TYPE_ERROR
;
782 static int tgt_agent_rw_doorbell(struct fw_card
*card
, int tcode
, void *data
,
783 struct sbp_target_agent
*agent
)
786 case TCODE_WRITE_QUADLET_REQUEST
:
787 spin_lock_bh(&agent
->lock
);
788 if (agent
->state
!= AGENT_STATE_SUSPENDED
) {
789 spin_unlock_bh(&agent
->lock
);
790 pr_debug("Ignoring DOORBELL while active.\n");
791 return RCODE_CONFLICT_ERROR
;
793 agent
->state
= AGENT_STATE_ACTIVE
;
794 spin_unlock_bh(&agent
->lock
);
796 agent
->doorbell
= true;
798 pr_debug("tgt_agent DOORBELL\n");
800 queue_work(system_unbound_wq
, &agent
->work
);
802 return RCODE_COMPLETE
;
804 case TCODE_READ_QUADLET_REQUEST
:
805 return RCODE_COMPLETE
;
808 return RCODE_TYPE_ERROR
;
812 static int tgt_agent_rw_unsolicited_status_enable(struct fw_card
*card
,
813 int tcode
, void *data
, struct sbp_target_agent
*agent
)
816 case TCODE_WRITE_QUADLET_REQUEST
:
817 pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
818 /* ignored as we don't send unsolicited status */
819 return RCODE_COMPLETE
;
821 case TCODE_READ_QUADLET_REQUEST
:
822 return RCODE_COMPLETE
;
825 return RCODE_TYPE_ERROR
;
829 static void tgt_agent_rw(struct fw_card
*card
, struct fw_request
*request
,
830 int tcode
, int destination
, int source
, int generation
,
831 unsigned long long offset
, void *data
, size_t length
,
834 struct sbp_target_agent
*agent
= callback_data
;
835 struct sbp_session
*sess
= agent
->login
->sess
;
836 int sess_gen
, sess_node
, rcode
;
838 spin_lock_bh(&sess
->lock
);
839 sess_gen
= sess
->generation
;
840 sess_node
= sess
->node_id
;
841 spin_unlock_bh(&sess
->lock
);
843 if (generation
!= sess_gen
) {
844 pr_notice("ignoring request with wrong generation\n");
845 rcode
= RCODE_TYPE_ERROR
;
849 if (source
!= sess_node
) {
850 pr_notice("ignoring request from foreign node (%x != %x)\n",
852 rcode
= RCODE_TYPE_ERROR
;
856 /* turn offset into the offset from the start of the block */
857 offset
-= agent
->handler
.offset
;
859 if (offset
== 0x00 && length
== 4) {
861 rcode
= tgt_agent_rw_agent_state(card
, tcode
, data
, agent
);
862 } else if (offset
== 0x04 && length
== 4) {
864 rcode
= tgt_agent_rw_agent_reset(card
, tcode
, data
, agent
);
865 } else if (offset
== 0x08 && length
== 8) {
867 rcode
= tgt_agent_rw_orb_pointer(card
, tcode
, data
, agent
);
868 } else if (offset
== 0x10 && length
== 4) {
870 rcode
= tgt_agent_rw_doorbell(card
, tcode
, data
, agent
);
871 } else if (offset
== 0x14 && length
== 4) {
872 /* UNSOLICITED_STATUS_ENABLE */
873 rcode
= tgt_agent_rw_unsolicited_status_enable(card
, tcode
,
876 rcode
= RCODE_ADDRESS_ERROR
;
880 fw_send_response(card
, request
, rcode
);
883 static void sbp_handle_command(struct sbp_target_request
*);
884 static int sbp_send_status(struct sbp_target_request
*);
885 static void sbp_free_request(struct sbp_target_request
*);
887 static void tgt_agent_process_work(struct work_struct
*work
)
889 struct sbp_target_request
*req
=
890 container_of(work
, struct sbp_target_request
, work
);
892 pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
894 sbp2_pointer_to_addr(&req
->orb
.next_orb
),
895 sbp2_pointer_to_addr(&req
->orb
.data_descriptor
),
896 be32_to_cpu(req
->orb
.misc
));
898 if (req
->orb_pointer
>> 32)
899 pr_debug("ORB with high bits set\n");
901 switch (ORB_REQUEST_FORMAT(be32_to_cpu(req
->orb
.misc
))) {
902 case 0:/* Format specified by this standard */
903 sbp_handle_command(req
);
905 case 1: /* Reserved for future standardization */
906 case 2: /* Vendor-dependent */
907 req
->status
.status
|= cpu_to_be32(
909 STATUS_RESP_REQUEST_COMPLETE
) |
910 STATUS_BLOCK_DEAD(0) |
911 STATUS_BLOCK_LEN(1) |
912 STATUS_BLOCK_SBP_STATUS(
913 SBP_STATUS_REQ_TYPE_NOTSUPP
));
914 sbp_send_status(req
);
915 sbp_free_request(req
);
917 case 3: /* Dummy ORB */
918 req
->status
.status
|= cpu_to_be32(
920 STATUS_RESP_REQUEST_COMPLETE
) |
921 STATUS_BLOCK_DEAD(0) |
922 STATUS_BLOCK_LEN(1) |
923 STATUS_BLOCK_SBP_STATUS(
924 SBP_STATUS_DUMMY_ORB_COMPLETE
));
925 sbp_send_status(req
);
926 sbp_free_request(req
);
933 /* used to double-check we haven't been issued an AGENT_RESET */
934 static inline bool tgt_agent_check_active(struct sbp_target_agent
*agent
)
938 spin_lock_bh(&agent
->lock
);
939 active
= (agent
->state
== AGENT_STATE_ACTIVE
);
940 spin_unlock_bh(&agent
->lock
);
945 static void tgt_agent_fetch_work(struct work_struct
*work
)
947 struct sbp_target_agent
*agent
=
948 container_of(work
, struct sbp_target_agent
, work
);
949 struct sbp_session
*sess
= agent
->login
->sess
;
950 struct sbp_target_request
*req
;
952 bool doorbell
= agent
->doorbell
;
953 u64 next_orb
= agent
->orb_pointer
;
955 while (next_orb
&& tgt_agent_check_active(agent
)) {
956 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
958 spin_lock_bh(&agent
->lock
);
959 agent
->state
= AGENT_STATE_DEAD
;
960 spin_unlock_bh(&agent
->lock
);
964 req
->login
= agent
->login
;
965 req
->orb_pointer
= next_orb
;
967 req
->status
.status
= cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
968 req
->orb_pointer
>> 32));
969 req
->status
.orb_low
= cpu_to_be32(
970 req
->orb_pointer
& 0xfffffffc);
972 /* read in the ORB */
973 ret
= sbp_run_transaction(sess
->card
, TCODE_READ_BLOCK_REQUEST
,
974 sess
->node_id
, sess
->generation
, sess
->speed
,
975 req
->orb_pointer
, &req
->orb
, sizeof(req
->orb
));
976 if (ret
!= RCODE_COMPLETE
) {
977 pr_debug("tgt_orb fetch failed: %x\n", ret
);
978 req
->status
.status
|= cpu_to_be32(
980 STATUS_SRC_ORB_FINISHED
) |
982 STATUS_RESP_TRANSPORT_FAILURE
) |
983 STATUS_BLOCK_DEAD(1) |
984 STATUS_BLOCK_LEN(1) |
985 STATUS_BLOCK_SBP_STATUS(
986 SBP_STATUS_UNSPECIFIED_ERROR
));
987 spin_lock_bh(&agent
->lock
);
988 agent
->state
= AGENT_STATE_DEAD
;
989 spin_unlock_bh(&agent
->lock
);
991 sbp_send_status(req
);
992 sbp_free_request(req
);
996 /* check the next_ORB field */
997 if (be32_to_cpu(req
->orb
.next_orb
.high
) & 0x80000000) {
999 req
->status
.status
|= cpu_to_be32(STATUS_BLOCK_SRC(
1000 STATUS_SRC_ORB_FINISHED
));
1002 next_orb
= sbp2_pointer_to_addr(&req
->orb
.next_orb
);
1003 req
->status
.status
|= cpu_to_be32(STATUS_BLOCK_SRC(
1004 STATUS_SRC_ORB_CONTINUING
));
1007 if (tgt_agent_check_active(agent
) && !doorbell
) {
1008 INIT_WORK(&req
->work
, tgt_agent_process_work
);
1009 queue_work(system_unbound_wq
, &req
->work
);
1011 /* don't process this request, just check next_ORB */
1012 sbp_free_request(req
);
1015 spin_lock_bh(&agent
->lock
);
1016 doorbell
= agent
->doorbell
= false;
1018 /* check if we should carry on processing */
1020 agent
->orb_pointer
= next_orb
;
1022 agent
->state
= AGENT_STATE_SUSPENDED
;
1024 spin_unlock_bh(&agent
->lock
);
1028 static struct sbp_target_agent
*sbp_target_agent_register(
1029 struct sbp_login_descriptor
*login
)
1031 struct sbp_target_agent
*agent
;
1034 agent
= kmalloc(sizeof(*agent
), GFP_KERNEL
);
1036 return ERR_PTR(-ENOMEM
);
1038 spin_lock_init(&agent
->lock
);
1040 agent
->handler
.length
= 0x20;
1041 agent
->handler
.address_callback
= tgt_agent_rw
;
1042 agent
->handler
.callback_data
= agent
;
1044 agent
->login
= login
;
1045 agent
->state
= AGENT_STATE_RESET
;
1046 INIT_WORK(&agent
->work
, tgt_agent_fetch_work
);
1047 agent
->orb_pointer
= 0;
1048 agent
->doorbell
= false;
1050 ret
= fw_core_add_address_handler(&agent
->handler
,
1051 &sbp_register_region
);
1054 return ERR_PTR(ret
);
1060 static void sbp_target_agent_unregister(struct sbp_target_agent
*agent
)
1062 fw_core_remove_address_handler(&agent
->handler
);
1063 cancel_work_sync(&agent
->work
);
1068 * Simple wrapper around fw_run_transaction that retries the transaction several
1069 * times in case of failure, with an exponential backoff.
1071 static int sbp_run_transaction(struct fw_card
*card
, int tcode
, int destination_id
,
1072 int generation
, int speed
, unsigned long long offset
,
1073 void *payload
, size_t length
)
1075 int attempt
, ret
, delay
;
1077 for (attempt
= 1; attempt
<= 5; attempt
++) {
1078 ret
= fw_run_transaction(card
, tcode
, destination_id
,
1079 generation
, speed
, offset
, payload
, length
);
1082 case RCODE_COMPLETE
:
1083 case RCODE_TYPE_ERROR
:
1084 case RCODE_ADDRESS_ERROR
:
1085 case RCODE_GENERATION
:
1089 delay
= 5 * attempt
* attempt
;
1090 usleep_range(delay
, delay
* 2);
1098 * Wrapper around sbp_run_transaction that gets the card, destination,
1099 * generation and speed out of the request's session.
1101 static int sbp_run_request_transaction(struct sbp_target_request
*req
,
1102 int tcode
, unsigned long long offset
, void *payload
,
1105 struct sbp_login_descriptor
*login
= req
->login
;
1106 struct sbp_session
*sess
= login
->sess
;
1107 struct fw_card
*card
;
1108 int node_id
, generation
, speed
, ret
;
1110 spin_lock_bh(&sess
->lock
);
1111 card
= fw_card_get(sess
->card
);
1112 node_id
= sess
->node_id
;
1113 generation
= sess
->generation
;
1114 speed
= sess
->speed
;
1115 spin_unlock_bh(&sess
->lock
);
1117 ret
= sbp_run_transaction(card
, tcode
, node_id
, generation
, speed
,
1118 offset
, payload
, length
);
1125 static int sbp_fetch_command(struct sbp_target_request
*req
)
1127 int ret
, cmd_len
, copy_len
;
1129 cmd_len
= scsi_command_size(req
->orb
.command_block
);
1131 req
->cmd_buf
= kmalloc(cmd_len
, GFP_KERNEL
);
1135 memcpy(req
->cmd_buf
, req
->orb
.command_block
,
1136 min_t(int, cmd_len
, sizeof(req
->orb
.command_block
)));
1138 if (cmd_len
> sizeof(req
->orb
.command_block
)) {
1139 pr_debug("sbp_fetch_command: filling in long command\n");
1140 copy_len
= cmd_len
- sizeof(req
->orb
.command_block
);
1142 ret
= sbp_run_request_transaction(req
,
1143 TCODE_READ_BLOCK_REQUEST
,
1144 req
->orb_pointer
+ sizeof(req
->orb
),
1145 req
->cmd_buf
+ sizeof(req
->orb
.command_block
),
1147 if (ret
!= RCODE_COMPLETE
)
1154 static int sbp_fetch_page_table(struct sbp_target_request
*req
)
1157 struct sbp_page_table_entry
*pg_tbl
;
1159 if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req
->orb
.misc
)))
1162 pg_tbl_sz
= CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req
->orb
.misc
)) *
1163 sizeof(struct sbp_page_table_entry
);
1165 pg_tbl
= kmalloc(pg_tbl_sz
, GFP_KERNEL
);
1169 ret
= sbp_run_request_transaction(req
, TCODE_READ_BLOCK_REQUEST
,
1170 sbp2_pointer_to_addr(&req
->orb
.data_descriptor
),
1172 if (ret
!= RCODE_COMPLETE
) {
1177 req
->pg_tbl
= pg_tbl
;
1181 static void sbp_calc_data_length_direction(struct sbp_target_request
*req
,
1182 u32
*data_len
, enum dma_data_direction
*data_dir
)
1184 int data_size
, direction
, idx
;
1186 data_size
= CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req
->orb
.misc
));
1187 direction
= CMDBLK_ORB_DIRECTION(be32_to_cpu(req
->orb
.misc
));
1191 *data_dir
= DMA_NONE
;
1195 *data_dir
= direction
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
1199 for (idx
= 0; idx
< data_size
; idx
++) {
1200 *data_len
+= be16_to_cpu(
1201 req
->pg_tbl
[idx
].segment_length
);
1204 *data_len
= data_size
;
1208 static void sbp_handle_command(struct sbp_target_request
*req
)
1210 struct sbp_login_descriptor
*login
= req
->login
;
1211 struct sbp_session
*sess
= login
->sess
;
1212 int ret
, unpacked_lun
;
1214 enum dma_data_direction data_dir
;
1216 ret
= sbp_fetch_command(req
);
1218 pr_debug("sbp_handle_command: fetch command failed: %d\n", ret
);
1222 ret
= sbp_fetch_page_table(req
);
1224 pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1229 unpacked_lun
= req
->login
->login_lun
;
1230 sbp_calc_data_length_direction(req
, &data_length
, &data_dir
);
1232 pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1233 req
->orb_pointer
, unpacked_lun
, data_length
, data_dir
);
1235 /* only used for printk until we do TMRs */
1236 req
->se_cmd
.tag
= req
->orb_pointer
;
1237 if (target_submit_cmd(&req
->se_cmd
, sess
->se_sess
, req
->cmd_buf
,
1238 req
->sense_buf
, unpacked_lun
, data_length
,
1239 TCM_SIMPLE_TAG
, data_dir
, 0))
1245 req
->status
.status
|= cpu_to_be32(
1246 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
1247 STATUS_BLOCK_DEAD(0) |
1248 STATUS_BLOCK_LEN(1) |
1249 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
1250 sbp_send_status(req
);
1251 sbp_free_request(req
);
1255 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1256 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1258 static int sbp_rw_data(struct sbp_target_request
*req
)
1260 struct sbp_session
*sess
= req
->login
->sess
;
1261 int tcode
, sg_miter_flags
, max_payload
, pg_size
, speed
, node_id
,
1262 generation
, num_pte
, length
, tfr_length
,
1263 rcode
= RCODE_COMPLETE
;
1264 struct sbp_page_table_entry
*pte
;
1265 unsigned long long offset
;
1266 struct fw_card
*card
;
1267 struct sg_mapping_iter iter
;
1269 if (req
->se_cmd
.data_direction
== DMA_FROM_DEVICE
) {
1270 tcode
= TCODE_WRITE_BLOCK_REQUEST
;
1271 sg_miter_flags
= SG_MITER_FROM_SG
;
1273 tcode
= TCODE_READ_BLOCK_REQUEST
;
1274 sg_miter_flags
= SG_MITER_TO_SG
;
1277 max_payload
= 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req
->orb
.misc
));
1278 speed
= CMDBLK_ORB_SPEED(be32_to_cpu(req
->orb
.misc
));
1280 pg_size
= CMDBLK_ORB_PG_SIZE(be32_to_cpu(req
->orb
.misc
));
1282 pr_err("sbp_run_transaction: page size ignored\n");
1283 pg_size
= 0x100 << pg_size
;
1286 spin_lock_bh(&sess
->lock
);
1287 card
= fw_card_get(sess
->card
);
1288 node_id
= sess
->node_id
;
1289 generation
= sess
->generation
;
1290 spin_unlock_bh(&sess
->lock
);
1294 num_pte
= CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req
->orb
.misc
));
1302 offset
= sbp2_pointer_to_addr(&req
->orb
.data_descriptor
);
1303 length
= req
->se_cmd
.data_length
;
1306 sg_miter_start(&iter
, req
->se_cmd
.t_data_sg
, req
->se_cmd
.t_data_nents
,
1309 while (length
|| num_pte
) {
1311 offset
= (u64
)be16_to_cpu(pte
->segment_base_hi
) << 32 |
1312 be32_to_cpu(pte
->segment_base_lo
);
1313 length
= be16_to_cpu(pte
->segment_length
);
1319 sg_miter_next(&iter
);
1321 tfr_length
= min3(length
, max_payload
, (int)iter
.length
);
1323 /* FIXME: take page_size into account */
1325 rcode
= sbp_run_transaction(card
, tcode
, node_id
,
1327 offset
, iter
.addr
, tfr_length
);
1329 if (rcode
!= RCODE_COMPLETE
)
1332 length
-= tfr_length
;
1333 offset
+= tfr_length
;
1334 iter
.consumed
= tfr_length
;
1337 sg_miter_stop(&iter
);
1340 if (rcode
== RCODE_COMPLETE
) {
1341 WARN_ON(length
!= 0);
1348 static int sbp_send_status(struct sbp_target_request
*req
)
1351 struct sbp_login_descriptor
*login
= req
->login
;
1353 length
= (((be32_to_cpu(req
->status
.status
) >> 24) & 0x07) + 1) * 4;
1355 ret
= sbp_run_request_transaction(req
, TCODE_WRITE_BLOCK_REQUEST
,
1356 login
->status_fifo_addr
, &req
->status
, length
);
1357 if (ret
!= RCODE_COMPLETE
) {
1358 pr_debug("sbp_send_status: write failed: 0x%x\n", ret
);
1362 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1368 static void sbp_sense_mangle(struct sbp_target_request
*req
)
1370 struct se_cmd
*se_cmd
= &req
->se_cmd
;
1371 u8
*sense
= req
->sense_buf
;
1372 u8
*status
= req
->status
.data
;
1374 WARN_ON(se_cmd
->scsi_sense_length
< 18);
1376 switch (sense
[0] & 0x7f) { /* sfmt */
1377 case 0x70: /* current, fixed */
1380 case 0x71: /* deferred, fixed */
1383 case 0x72: /* current, descriptor */
1384 case 0x73: /* deferred, descriptor */
1387 * TODO: SBP-3 specifies what we should do with descriptor
1390 pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1392 req
->status
.status
|= cpu_to_be32(
1393 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1394 STATUS_BLOCK_DEAD(0) |
1395 STATUS_BLOCK_LEN(1) |
1396 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED
));
1400 status
[0] |= se_cmd
->scsi_status
& 0x3f;/* status */
1402 (sense
[0] & 0x80) | /* valid */
1403 ((sense
[2] & 0xe0) >> 1) | /* mark, eom, ili */
1404 (sense
[2] & 0x0f); /* sense_key */
1405 status
[2] = se_cmd
->scsi_asc
; /* sense_code */
1406 status
[3] = se_cmd
->scsi_ascq
; /* sense_qualifier */
1409 status
[4] = sense
[3];
1410 status
[5] = sense
[4];
1411 status
[6] = sense
[5];
1412 status
[7] = sense
[6];
1415 status
[8] = sense
[8];
1416 status
[9] = sense
[9];
1417 status
[10] = sense
[10];
1418 status
[11] = sense
[11];
1421 status
[12] = sense
[14];
1423 /* sense_key-dependent */
1424 status
[13] = sense
[15];
1425 status
[14] = sense
[16];
1426 status
[15] = sense
[17];
1428 req
->status
.status
|= cpu_to_be32(
1429 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1430 STATUS_BLOCK_DEAD(0) |
1431 STATUS_BLOCK_LEN(5) |
1432 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
1435 static int sbp_send_sense(struct sbp_target_request
*req
)
1437 struct se_cmd
*se_cmd
= &req
->se_cmd
;
1439 if (se_cmd
->scsi_sense_length
) {
1440 sbp_sense_mangle(req
);
1442 req
->status
.status
|= cpu_to_be32(
1443 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1444 STATUS_BLOCK_DEAD(0) |
1445 STATUS_BLOCK_LEN(1) |
1446 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
1449 return sbp_send_status(req
);
1452 static void sbp_free_request(struct sbp_target_request
*req
)
1455 kfree(req
->cmd_buf
);
1459 static void sbp_mgt_agent_process(struct work_struct
*work
)
1461 struct sbp_management_agent
*agent
=
1462 container_of(work
, struct sbp_management_agent
, work
);
1463 struct sbp_management_request
*req
= agent
->request
;
1465 int status_data_len
= 0;
1467 /* fetch the ORB from the initiator */
1468 ret
= sbp_run_transaction(req
->card
, TCODE_READ_BLOCK_REQUEST
,
1469 req
->node_addr
, req
->generation
, req
->speed
,
1470 agent
->orb_offset
, &req
->orb
, sizeof(req
->orb
));
1471 if (ret
!= RCODE_COMPLETE
) {
1472 pr_debug("mgt_orb fetch failed: %x\n", ret
);
1476 pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1477 sbp2_pointer_to_addr(&req
->orb
.ptr1
),
1478 sbp2_pointer_to_addr(&req
->orb
.ptr2
),
1479 be32_to_cpu(req
->orb
.misc
), be32_to_cpu(req
->orb
.length
),
1480 sbp2_pointer_to_addr(&req
->orb
.status_fifo
));
1482 if (!ORB_NOTIFY(be32_to_cpu(req
->orb
.misc
)) ||
1483 ORB_REQUEST_FORMAT(be32_to_cpu(req
->orb
.misc
)) != 0) {
1484 pr_err("mgt_orb bad request\n");
1488 switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req
->orb
.misc
))) {
1489 case MANAGEMENT_ORB_FUNCTION_LOGIN
:
1490 sbp_management_request_login(agent
, req
, &status_data_len
);
1493 case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS
:
1494 sbp_management_request_query_logins(agent
, req
,
1498 case MANAGEMENT_ORB_FUNCTION_RECONNECT
:
1499 sbp_management_request_reconnect(agent
, req
, &status_data_len
);
1502 case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD
:
1503 pr_notice("SET PASSWORD not implemented\n");
1505 req
->status
.status
= cpu_to_be32(
1506 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1507 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1511 case MANAGEMENT_ORB_FUNCTION_LOGOUT
:
1512 sbp_management_request_logout(agent
, req
, &status_data_len
);
1515 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK
:
1516 pr_notice("ABORT TASK not implemented\n");
1518 req
->status
.status
= cpu_to_be32(
1519 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1520 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1524 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET
:
1525 pr_notice("ABORT TASK SET not implemented\n");
1527 req
->status
.status
= cpu_to_be32(
1528 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1529 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1533 case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET
:
1534 pr_notice("LOGICAL UNIT RESET not implemented\n");
1536 req
->status
.status
= cpu_to_be32(
1537 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1538 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1542 case MANAGEMENT_ORB_FUNCTION_TARGET_RESET
:
1543 pr_notice("TARGET RESET not implemented\n");
1545 req
->status
.status
= cpu_to_be32(
1546 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1547 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1552 pr_notice("unknown management function 0x%x\n",
1553 MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req
->orb
.misc
)));
1555 req
->status
.status
= cpu_to_be32(
1556 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1557 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1562 req
->status
.status
|= cpu_to_be32(
1563 STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1564 STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len
, 4) + 1) |
1565 STATUS_BLOCK_ORB_OFFSET_HIGH(agent
->orb_offset
>> 32));
1566 req
->status
.orb_low
= cpu_to_be32(agent
->orb_offset
);
1568 /* write the status block back to the initiator */
1569 ret
= sbp_run_transaction(req
->card
, TCODE_WRITE_BLOCK_REQUEST
,
1570 req
->node_addr
, req
->generation
, req
->speed
,
1571 sbp2_pointer_to_addr(&req
->orb
.status_fifo
),
1572 &req
->status
, 8 + status_data_len
);
1573 if (ret
!= RCODE_COMPLETE
) {
1574 pr_debug("mgt_orb status write failed: %x\n", ret
);
1579 fw_card_put(req
->card
);
1582 spin_lock_bh(&agent
->lock
);
1583 agent
->state
= MANAGEMENT_AGENT_STATE_IDLE
;
1584 spin_unlock_bh(&agent
->lock
);
1587 static void sbp_mgt_agent_rw(struct fw_card
*card
,
1588 struct fw_request
*request
, int tcode
, int destination
, int source
,
1589 int generation
, unsigned long long offset
, void *data
, size_t length
,
1590 void *callback_data
)
1592 struct sbp_management_agent
*agent
= callback_data
;
1593 struct sbp2_pointer
*ptr
= data
;
1594 int rcode
= RCODE_ADDRESS_ERROR
;
1596 if (!agent
->tport
->enable
)
1599 if ((offset
!= agent
->handler
.offset
) || (length
!= 8))
1602 if (tcode
== TCODE_WRITE_BLOCK_REQUEST
) {
1603 struct sbp_management_request
*req
;
1606 spin_lock_bh(&agent
->lock
);
1607 prev_state
= agent
->state
;
1608 agent
->state
= MANAGEMENT_AGENT_STATE_BUSY
;
1609 spin_unlock_bh(&agent
->lock
);
1611 if (prev_state
== MANAGEMENT_AGENT_STATE_BUSY
) {
1612 pr_notice("ignoring management request while busy\n");
1613 rcode
= RCODE_CONFLICT_ERROR
;
1617 req
= kzalloc(sizeof(*req
), GFP_ATOMIC
);
1619 rcode
= RCODE_CONFLICT_ERROR
;
1623 req
->card
= fw_card_get(card
);
1624 req
->generation
= generation
;
1625 req
->node_addr
= source
;
1626 req
->speed
= fw_get_request_speed(request
);
1628 agent
->orb_offset
= sbp2_pointer_to_addr(ptr
);
1629 agent
->request
= req
;
1631 queue_work(system_unbound_wq
, &agent
->work
);
1632 rcode
= RCODE_COMPLETE
;
1633 } else if (tcode
== TCODE_READ_BLOCK_REQUEST
) {
1634 addr_to_sbp2_pointer(agent
->orb_offset
, ptr
);
1635 rcode
= RCODE_COMPLETE
;
1637 rcode
= RCODE_TYPE_ERROR
;
1641 fw_send_response(card
, request
, rcode
);
1644 static struct sbp_management_agent
*sbp_management_agent_register(
1645 struct sbp_tport
*tport
)
1648 struct sbp_management_agent
*agent
;
1650 agent
= kmalloc(sizeof(*agent
), GFP_KERNEL
);
1652 return ERR_PTR(-ENOMEM
);
1654 spin_lock_init(&agent
->lock
);
1655 agent
->tport
= tport
;
1656 agent
->handler
.length
= 0x08;
1657 agent
->handler
.address_callback
= sbp_mgt_agent_rw
;
1658 agent
->handler
.callback_data
= agent
;
1659 agent
->state
= MANAGEMENT_AGENT_STATE_IDLE
;
1660 INIT_WORK(&agent
->work
, sbp_mgt_agent_process
);
1661 agent
->orb_offset
= 0;
1662 agent
->request
= NULL
;
1664 ret
= fw_core_add_address_handler(&agent
->handler
,
1665 &sbp_register_region
);
1668 return ERR_PTR(ret
);
1674 static void sbp_management_agent_unregister(struct sbp_management_agent
*agent
)
1676 fw_core_remove_address_handler(&agent
->handler
);
1677 cancel_work_sync(&agent
->work
);
1681 static int sbp_check_true(struct se_portal_group
*se_tpg
)
1686 static int sbp_check_false(struct se_portal_group
*se_tpg
)
1691 static char *sbp_get_fabric_name(void)
1696 static char *sbp_get_fabric_wwn(struct se_portal_group
*se_tpg
)
1698 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
1699 struct sbp_tport
*tport
= tpg
->tport
;
1701 return &tport
->tport_name
[0];
1704 static u16
sbp_get_tag(struct se_portal_group
*se_tpg
)
1706 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
1707 return tpg
->tport_tpgt
;
1710 static u32
sbp_tpg_get_inst_index(struct se_portal_group
*se_tpg
)
1715 static void sbp_release_cmd(struct se_cmd
*se_cmd
)
1717 struct sbp_target_request
*req
= container_of(se_cmd
,
1718 struct sbp_target_request
, se_cmd
);
1720 sbp_free_request(req
);
1723 static int sbp_shutdown_session(struct se_session
*se_sess
)
1728 static void sbp_close_session(struct se_session
*se_sess
)
1733 static u32
sbp_sess_get_index(struct se_session
*se_sess
)
1738 static int sbp_write_pending(struct se_cmd
*se_cmd
)
1740 struct sbp_target_request
*req
= container_of(se_cmd
,
1741 struct sbp_target_request
, se_cmd
);
1744 ret
= sbp_rw_data(req
);
1746 req
->status
.status
|= cpu_to_be32(
1748 STATUS_RESP_TRANSPORT_FAILURE
) |
1749 STATUS_BLOCK_DEAD(0) |
1750 STATUS_BLOCK_LEN(1) |
1751 STATUS_BLOCK_SBP_STATUS(
1752 SBP_STATUS_UNSPECIFIED_ERROR
));
1753 sbp_send_status(req
);
1757 target_execute_cmd(se_cmd
);
1761 static int sbp_write_pending_status(struct se_cmd
*se_cmd
)
1766 static void sbp_set_default_node_attrs(struct se_node_acl
*nacl
)
1771 static int sbp_get_cmd_state(struct se_cmd
*se_cmd
)
1776 static int sbp_queue_data_in(struct se_cmd
*se_cmd
)
1778 struct sbp_target_request
*req
= container_of(se_cmd
,
1779 struct sbp_target_request
, se_cmd
);
1782 ret
= sbp_rw_data(req
);
1784 req
->status
.status
|= cpu_to_be32(
1785 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
1786 STATUS_BLOCK_DEAD(0) |
1787 STATUS_BLOCK_LEN(1) |
1788 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
1789 sbp_send_status(req
);
1793 return sbp_send_sense(req
);
1797 * Called after command (no data transfer) or after the write (to device)
1798 * operation is completed
1800 static int sbp_queue_status(struct se_cmd
*se_cmd
)
1802 struct sbp_target_request
*req
= container_of(se_cmd
,
1803 struct sbp_target_request
, se_cmd
);
1805 return sbp_send_sense(req
);
1808 static void sbp_queue_tm_rsp(struct se_cmd
*se_cmd
)
1812 static void sbp_aborted_task(struct se_cmd
*se_cmd
)
1817 static int sbp_check_stop_free(struct se_cmd
*se_cmd
)
1819 struct sbp_target_request
*req
= container_of(se_cmd
,
1820 struct sbp_target_request
, se_cmd
);
1822 transport_generic_free_cmd(&req
->se_cmd
, 0);
1826 static int sbp_count_se_tpg_luns(struct se_portal_group
*tpg
)
1832 hlist_for_each_entry_rcu(lun
, &tpg
->tpg_lun_hlist
, link
)
1839 static int sbp_update_unit_directory(struct sbp_tport
*tport
)
1842 int num_luns
, num_entries
, idx
= 0, mgt_agt_addr
, ret
;
1845 if (tport
->unit_directory
.data
) {
1846 fw_core_remove_descriptor(&tport
->unit_directory
);
1847 kfree(tport
->unit_directory
.data
);
1848 tport
->unit_directory
.data
= NULL
;
1851 if (!tport
->enable
|| !tport
->tpg
)
1854 num_luns
= sbp_count_se_tpg_luns(&tport
->tpg
->se_tpg
);
1857 * Number of entries in the final unit directory:
1858 * - all of those in the template
1859 * - management_agent
1860 * - unit_characteristics
1861 * - reconnect_timeout
1863 * - one for each LUN
1865 * MUST NOT include leaf or sub-directory entries
1867 num_entries
= ARRAY_SIZE(sbp_unit_directory_template
) + 4 + num_luns
;
1869 if (tport
->directory_id
!= -1)
1872 /* allocate num_entries + 4 for the header and unique ID leaf */
1873 data
= kcalloc((num_entries
+ 4), sizeof(u32
), GFP_KERNEL
);
1877 /* directory_length */
1878 data
[idx
++] = num_entries
<< 16;
1881 if (tport
->directory_id
!= -1)
1882 data
[idx
++] = (CSR_DIRECTORY_ID
<< 24) | tport
->directory_id
;
1884 /* unit directory template */
1885 memcpy(&data
[idx
], sbp_unit_directory_template
,
1886 sizeof(sbp_unit_directory_template
));
1887 idx
+= ARRAY_SIZE(sbp_unit_directory_template
);
1889 /* management_agent */
1890 mgt_agt_addr
= (tport
->mgt_agt
->handler
.offset
- CSR_REGISTER_BASE
) / 4;
1891 data
[idx
++] = 0x54000000 | (mgt_agt_addr
& 0x00ffffff);
1893 /* unit_characteristics */
1894 data
[idx
++] = 0x3a000000 |
1895 (((tport
->mgt_orb_timeout
* 2) << 8) & 0xff00) |
1898 /* reconnect_timeout */
1899 data
[idx
++] = 0x3d000000 | (tport
->max_reconnect_timeout
& 0xffff);
1901 /* unit unique ID (leaf is just after LUNs) */
1902 data
[idx
++] = 0x8d000000 | (num_luns
+ 1);
1905 hlist_for_each_entry_rcu(lun
, &tport
->tpg
->se_tpg
.tpg_lun_hlist
, link
) {
1906 struct se_device
*dev
;
1909 * rcu_dereference_raw protected by se_lun->lun_group symlink
1910 * reference to se_device->dev_group.
1912 dev
= rcu_dereference_raw(lun
->lun_se_dev
);
1913 type
= dev
->transport
->get_device_type(dev
);
1915 /* logical_unit_number */
1916 data
[idx
++] = 0x14000000 |
1917 ((type
<< 16) & 0x1f0000) |
1918 (lun
->unpacked_lun
& 0xffff);
1922 /* unit unique ID leaf */
1923 data
[idx
++] = 2 << 16;
1924 data
[idx
++] = tport
->guid
>> 32;
1925 data
[idx
++] = tport
->guid
;
1927 tport
->unit_directory
.length
= idx
;
1928 tport
->unit_directory
.key
= (CSR_DIRECTORY
| CSR_UNIT
) << 24;
1929 tport
->unit_directory
.data
= data
;
1931 ret
= fw_core_add_descriptor(&tport
->unit_directory
);
1933 kfree(tport
->unit_directory
.data
);
1934 tport
->unit_directory
.data
= NULL
;
1940 static ssize_t
sbp_parse_wwn(const char *name
, u64
*wwn
)
1947 for (cp
= name
; cp
< &name
[SBP_NAMELEN
- 1]; cp
++) {
1949 if (c
== '\n' && cp
[1] == '\0')
1960 else if (isxdigit(c
))
1961 nibble
= tolower(c
) - 'a' + 10;
1964 *wwn
= (*wwn
<< 4) | nibble
;
1969 printk(KERN_INFO
"err %u len %zu pos %u\n",
1970 err
, cp
- name
, pos
);
1974 static ssize_t
sbp_format_wwn(char *buf
, size_t len
, u64 wwn
)
1976 return snprintf(buf
, len
, "%016llx", wwn
);
1979 static int sbp_init_nodeacl(struct se_node_acl
*se_nacl
, const char *name
)
1983 if (sbp_parse_wwn(name
, &guid
) < 0)
1988 static int sbp_post_link_lun(
1989 struct se_portal_group
*se_tpg
,
1990 struct se_lun
*se_lun
)
1992 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
1994 return sbp_update_unit_directory(tpg
->tport
);
1997 static void sbp_pre_unlink_lun(
1998 struct se_portal_group
*se_tpg
,
1999 struct se_lun
*se_lun
)
2001 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2002 struct sbp_tport
*tport
= tpg
->tport
;
2005 if (sbp_count_se_tpg_luns(&tpg
->se_tpg
) == 0)
2008 ret
= sbp_update_unit_directory(tport
);
2010 pr_err("unlink LUN: failed to update unit directory\n");
2013 static struct se_portal_group
*sbp_make_tpg(
2015 struct config_group
*group
,
2018 struct sbp_tport
*tport
=
2019 container_of(wwn
, struct sbp_tport
, tport_wwn
);
2021 struct sbp_tpg
*tpg
;
2025 if (strstr(name
, "tpgt_") != name
)
2026 return ERR_PTR(-EINVAL
);
2027 if (kstrtoul(name
+ 5, 10, &tpgt
) || tpgt
> UINT_MAX
)
2028 return ERR_PTR(-EINVAL
);
2031 pr_err("Only one TPG per Unit is possible.\n");
2032 return ERR_PTR(-EBUSY
);
2035 tpg
= kzalloc(sizeof(*tpg
), GFP_KERNEL
);
2037 pr_err("Unable to allocate struct sbp_tpg\n");
2038 return ERR_PTR(-ENOMEM
);
2042 tpg
->tport_tpgt
= tpgt
;
2045 /* default attribute values */
2047 tport
->directory_id
= -1;
2048 tport
->mgt_orb_timeout
= 15;
2049 tport
->max_reconnect_timeout
= 5;
2050 tport
->max_logins_per_lun
= 1;
2052 tport
->mgt_agt
= sbp_management_agent_register(tport
);
2053 if (IS_ERR(tport
->mgt_agt
)) {
2054 ret
= PTR_ERR(tport
->mgt_agt
);
2058 ret
= core_tpg_register(&sbp_ops
, wwn
, &tpg
->se_tpg
, SCSI_PROTOCOL_SBP
);
2060 goto out_unreg_mgt_agt
;
2062 return &tpg
->se_tpg
;
2065 sbp_management_agent_unregister(tport
->mgt_agt
);
2069 return ERR_PTR(ret
);
2072 static void sbp_drop_tpg(struct se_portal_group
*se_tpg
)
2074 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2075 struct sbp_tport
*tport
= tpg
->tport
;
2077 core_tpg_deregister(se_tpg
);
2078 sbp_management_agent_unregister(tport
->mgt_agt
);
2083 static struct se_wwn
*sbp_make_tport(
2084 struct target_fabric_configfs
*tf
,
2085 struct config_group
*group
,
2088 struct sbp_tport
*tport
;
2091 if (sbp_parse_wwn(name
, &guid
) < 0)
2092 return ERR_PTR(-EINVAL
);
2094 tport
= kzalloc(sizeof(*tport
), GFP_KERNEL
);
2096 pr_err("Unable to allocate struct sbp_tport\n");
2097 return ERR_PTR(-ENOMEM
);
2101 sbp_format_wwn(tport
->tport_name
, SBP_NAMELEN
, guid
);
2103 return &tport
->tport_wwn
;
2106 static void sbp_drop_tport(struct se_wwn
*wwn
)
2108 struct sbp_tport
*tport
=
2109 container_of(wwn
, struct sbp_tport
, tport_wwn
);
2114 static ssize_t
sbp_wwn_show_attr_version(
2115 struct target_fabric_configfs
*tf
,
2118 return sprintf(page
, "FireWire SBP fabric module %s\n", SBP_VERSION
);
2121 TF_WWN_ATTR_RO(sbp
, version
);
2123 static struct configfs_attribute
*sbp_wwn_attrs
[] = {
2124 &sbp_wwn_version
.attr
,
2128 static ssize_t
sbp_tpg_show_directory_id(
2129 struct se_portal_group
*se_tpg
,
2132 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2133 struct sbp_tport
*tport
= tpg
->tport
;
2135 if (tport
->directory_id
== -1)
2136 return sprintf(page
, "implicit\n");
2138 return sprintf(page
, "%06x\n", tport
->directory_id
);
2141 static ssize_t
sbp_tpg_store_directory_id(
2142 struct se_portal_group
*se_tpg
,
2146 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2147 struct sbp_tport
*tport
= tpg
->tport
;
2150 if (tport
->enable
) {
2151 pr_err("Cannot change the directory_id on an active target.\n");
2155 if (strstr(page
, "implicit") == page
) {
2156 tport
->directory_id
= -1;
2158 if (kstrtoul(page
, 16, &val
) < 0)
2163 tport
->directory_id
= val
;
2169 static ssize_t
sbp_tpg_show_enable(
2170 struct se_portal_group
*se_tpg
,
2173 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2174 struct sbp_tport
*tport
= tpg
->tport
;
2175 return sprintf(page
, "%d\n", tport
->enable
);
2178 static ssize_t
sbp_tpg_store_enable(
2179 struct se_portal_group
*se_tpg
,
2183 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2184 struct sbp_tport
*tport
= tpg
->tport
;
2188 if (kstrtoul(page
, 0, &val
) < 0)
2190 if ((val
!= 0) && (val
!= 1))
2193 if (tport
->enable
== val
)
2197 if (sbp_count_se_tpg_luns(&tpg
->se_tpg
) == 0) {
2198 pr_err("Cannot enable a target with no LUNs!\n");
2202 /* XXX: force-shutdown sessions instead? */
2203 spin_lock_bh(&se_tpg
->session_lock
);
2204 if (!list_empty(&se_tpg
->tpg_sess_list
)) {
2205 spin_unlock_bh(&se_tpg
->session_lock
);
2208 spin_unlock_bh(&se_tpg
->session_lock
);
2211 tport
->enable
= val
;
2213 ret
= sbp_update_unit_directory(tport
);
2215 pr_err("Could not update Config ROM\n");
2222 TF_TPG_BASE_ATTR(sbp
, directory_id
, S_IRUGO
| S_IWUSR
);
2223 TF_TPG_BASE_ATTR(sbp
, enable
, S_IRUGO
| S_IWUSR
);
2225 static struct configfs_attribute
*sbp_tpg_base_attrs
[] = {
2226 &sbp_tpg_directory_id
.attr
,
2227 &sbp_tpg_enable
.attr
,
2231 static ssize_t
sbp_tpg_attrib_show_mgt_orb_timeout(
2232 struct se_portal_group
*se_tpg
,
2235 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2236 struct sbp_tport
*tport
= tpg
->tport
;
2237 return sprintf(page
, "%d\n", tport
->mgt_orb_timeout
);
2240 static ssize_t
sbp_tpg_attrib_store_mgt_orb_timeout(
2241 struct se_portal_group
*se_tpg
,
2245 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2246 struct sbp_tport
*tport
= tpg
->tport
;
2250 if (kstrtoul(page
, 0, &val
) < 0)
2252 if ((val
< 1) || (val
> 127))
2255 if (tport
->mgt_orb_timeout
== val
)
2258 tport
->mgt_orb_timeout
= val
;
2260 ret
= sbp_update_unit_directory(tport
);
2267 static ssize_t
sbp_tpg_attrib_show_max_reconnect_timeout(
2268 struct se_portal_group
*se_tpg
,
2271 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2272 struct sbp_tport
*tport
= tpg
->tport
;
2273 return sprintf(page
, "%d\n", tport
->max_reconnect_timeout
);
2276 static ssize_t
sbp_tpg_attrib_store_max_reconnect_timeout(
2277 struct se_portal_group
*se_tpg
,
2281 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2282 struct sbp_tport
*tport
= tpg
->tport
;
2286 if (kstrtoul(page
, 0, &val
) < 0)
2288 if ((val
< 1) || (val
> 32767))
2291 if (tport
->max_reconnect_timeout
== val
)
2294 tport
->max_reconnect_timeout
= val
;
2296 ret
= sbp_update_unit_directory(tport
);
2303 static ssize_t
sbp_tpg_attrib_show_max_logins_per_lun(
2304 struct se_portal_group
*se_tpg
,
2307 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2308 struct sbp_tport
*tport
= tpg
->tport
;
2309 return sprintf(page
, "%d\n", tport
->max_logins_per_lun
);
2312 static ssize_t
sbp_tpg_attrib_store_max_logins_per_lun(
2313 struct se_portal_group
*se_tpg
,
2317 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2318 struct sbp_tport
*tport
= tpg
->tport
;
2321 if (kstrtoul(page
, 0, &val
) < 0)
2323 if ((val
< 1) || (val
> 127))
2326 /* XXX: also check against current count? */
2328 tport
->max_logins_per_lun
= val
;
2333 TF_TPG_ATTRIB_ATTR(sbp
, mgt_orb_timeout
, S_IRUGO
| S_IWUSR
);
2334 TF_TPG_ATTRIB_ATTR(sbp
, max_reconnect_timeout
, S_IRUGO
| S_IWUSR
);
2335 TF_TPG_ATTRIB_ATTR(sbp
, max_logins_per_lun
, S_IRUGO
| S_IWUSR
);
2337 static struct configfs_attribute
*sbp_tpg_attrib_attrs
[] = {
2338 &sbp_tpg_attrib_mgt_orb_timeout
.attr
,
2339 &sbp_tpg_attrib_max_reconnect_timeout
.attr
,
2340 &sbp_tpg_attrib_max_logins_per_lun
.attr
,
2344 static const struct target_core_fabric_ops sbp_ops
= {
2345 .module
= THIS_MODULE
,
2347 .get_fabric_name
= sbp_get_fabric_name
,
2348 .tpg_get_wwn
= sbp_get_fabric_wwn
,
2349 .tpg_get_tag
= sbp_get_tag
,
2350 .tpg_check_demo_mode
= sbp_check_true
,
2351 .tpg_check_demo_mode_cache
= sbp_check_true
,
2352 .tpg_check_demo_mode_write_protect
= sbp_check_false
,
2353 .tpg_check_prod_mode_write_protect
= sbp_check_false
,
2354 .tpg_get_inst_index
= sbp_tpg_get_inst_index
,
2355 .release_cmd
= sbp_release_cmd
,
2356 .shutdown_session
= sbp_shutdown_session
,
2357 .close_session
= sbp_close_session
,
2358 .sess_get_index
= sbp_sess_get_index
,
2359 .write_pending
= sbp_write_pending
,
2360 .write_pending_status
= sbp_write_pending_status
,
2361 .set_default_node_attributes
= sbp_set_default_node_attrs
,
2362 .get_cmd_state
= sbp_get_cmd_state
,
2363 .queue_data_in
= sbp_queue_data_in
,
2364 .queue_status
= sbp_queue_status
,
2365 .queue_tm_rsp
= sbp_queue_tm_rsp
,
2366 .aborted_task
= sbp_aborted_task
,
2367 .check_stop_free
= sbp_check_stop_free
,
2369 .fabric_make_wwn
= sbp_make_tport
,
2370 .fabric_drop_wwn
= sbp_drop_tport
,
2371 .fabric_make_tpg
= sbp_make_tpg
,
2372 .fabric_drop_tpg
= sbp_drop_tpg
,
2373 .fabric_post_link
= sbp_post_link_lun
,
2374 .fabric_pre_unlink
= sbp_pre_unlink_lun
,
2375 .fabric_make_np
= NULL
,
2376 .fabric_drop_np
= NULL
,
2377 .fabric_init_nodeacl
= sbp_init_nodeacl
,
2379 .tfc_wwn_attrs
= sbp_wwn_attrs
,
2380 .tfc_tpg_base_attrs
= sbp_tpg_base_attrs
,
2381 .tfc_tpg_attrib_attrs
= sbp_tpg_attrib_attrs
,
2384 static int __init
sbp_init(void)
2386 return target_register_template(&sbp_ops
);
2389 static void __exit
sbp_exit(void)
2391 target_unregister_template(&sbp_ops
);
2394 MODULE_DESCRIPTION("FireWire SBP fabric driver");
2395 MODULE_LICENSE("GPL");
2396 module_init(sbp_init
);
2397 module_exit(sbp_exit
);