powerpc/mm/slice: create header files dedicated to slices
[linux-2.6/btrfs-unstable.git] / drivers / target / sbp / sbp_target.c
blobfb1003921d85af2313bae2a6197e50957f08d5da
1 /*
2 * SBP2 target driver (SCSI over IEEE1394 in target mode)
4 * Copyright (C) 2011 Chris Boot <bootc@bootc.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #define KMSG_COMPONENT "sbp_target"
22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/configfs.h>
30 #include <linux/ctype.h>
31 #include <linux/delay.h>
32 #include <linux/firewire.h>
33 #include <linux/firewire-constants.h>
34 #include <scsi/scsi_proto.h>
35 #include <scsi/scsi_tcq.h>
36 #include <target/target_core_base.h>
37 #include <target/target_core_backend.h>
38 #include <target/target_core_fabric.h>
39 #include <asm/unaligned.h>
41 #include "sbp_target.h"
43 /* FireWire address region for management and command block address handlers */
44 static const struct fw_address_region sbp_register_region = {
45 .start = CSR_REGISTER_BASE + 0x10000,
46 .end = 0x1000000000000ULL,
49 static const u32 sbp_unit_directory_template[] = {
50 0x1200609e, /* unit_specifier_id: NCITS/T10 */
51 0x13010483, /* unit_sw_version: 1155D Rev 4 */
52 0x3800609e, /* command_set_specifier_id: NCITS/T10 */
53 0x390104d8, /* command_set: SPC-2 */
54 0x3b000000, /* command_set_revision: 0 */
55 0x3c000001, /* firmware_revision: 1 */
58 #define SESSION_MAINTENANCE_INTERVAL HZ
60 static atomic_t login_id = ATOMIC_INIT(0);
62 static void session_maintenance_work(struct work_struct *);
63 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
64 unsigned long long, void *, size_t);
66 static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
68 int ret;
69 __be32 high, low;
71 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
72 req->node_addr, req->generation, req->speed,
73 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
74 &high, sizeof(high));
75 if (ret != RCODE_COMPLETE)
76 return ret;
78 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
79 req->node_addr, req->generation, req->speed,
80 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
81 &low, sizeof(low));
82 if (ret != RCODE_COMPLETE)
83 return ret;
85 *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
87 return RCODE_COMPLETE;
90 static struct sbp_session *sbp_session_find_by_guid(
91 struct sbp_tpg *tpg, u64 guid)
93 struct se_session *se_sess;
94 struct sbp_session *sess, *found = NULL;
96 spin_lock_bh(&tpg->se_tpg.session_lock);
97 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
98 sess = se_sess->fabric_sess_ptr;
99 if (sess->guid == guid)
100 found = sess;
102 spin_unlock_bh(&tpg->se_tpg.session_lock);
104 return found;
107 static struct sbp_login_descriptor *sbp_login_find_by_lun(
108 struct sbp_session *session, u32 unpacked_lun)
110 struct sbp_login_descriptor *login, *found = NULL;
112 spin_lock_bh(&session->lock);
113 list_for_each_entry(login, &session->login_list, link) {
114 if (login->login_lun == unpacked_lun)
115 found = login;
117 spin_unlock_bh(&session->lock);
119 return found;
122 static int sbp_login_count_all_by_lun(
123 struct sbp_tpg *tpg,
124 u32 unpacked_lun,
125 int exclusive)
127 struct se_session *se_sess;
128 struct sbp_session *sess;
129 struct sbp_login_descriptor *login;
130 int count = 0;
132 spin_lock_bh(&tpg->se_tpg.session_lock);
133 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
134 sess = se_sess->fabric_sess_ptr;
136 spin_lock_bh(&sess->lock);
137 list_for_each_entry(login, &sess->login_list, link) {
138 if (login->login_lun != unpacked_lun)
139 continue;
141 if (!exclusive || login->exclusive)
142 count++;
144 spin_unlock_bh(&sess->lock);
146 spin_unlock_bh(&tpg->se_tpg.session_lock);
148 return count;
151 static struct sbp_login_descriptor *sbp_login_find_by_id(
152 struct sbp_tpg *tpg, int login_id)
154 struct se_session *se_sess;
155 struct sbp_session *sess;
156 struct sbp_login_descriptor *login, *found = NULL;
158 spin_lock_bh(&tpg->se_tpg.session_lock);
159 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
160 sess = se_sess->fabric_sess_ptr;
162 spin_lock_bh(&sess->lock);
163 list_for_each_entry(login, &sess->login_list, link) {
164 if (login->login_id == login_id)
165 found = login;
167 spin_unlock_bh(&sess->lock);
169 spin_unlock_bh(&tpg->se_tpg.session_lock);
171 return found;
174 static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
176 struct se_portal_group *se_tpg = &tpg->se_tpg;
177 struct se_lun *se_lun;
179 rcu_read_lock();
180 hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
181 if (se_lun->unpacked_lun == login_lun) {
182 rcu_read_unlock();
183 *err = 0;
184 return login_lun;
187 rcu_read_unlock();
189 *err = -ENODEV;
190 return login_lun;
193 static struct sbp_session *sbp_session_create(
194 struct sbp_tpg *tpg,
195 u64 guid)
197 struct sbp_session *sess;
198 int ret;
199 char guid_str[17];
201 snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
203 sess = kmalloc(sizeof(*sess), GFP_KERNEL);
204 if (!sess)
205 return ERR_PTR(-ENOMEM);
207 spin_lock_init(&sess->lock);
208 INIT_LIST_HEAD(&sess->login_list);
209 INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
210 sess->guid = guid;
212 sess->se_sess = target_alloc_session(&tpg->se_tpg, 128,
213 sizeof(struct sbp_target_request),
214 TARGET_PROT_NORMAL, guid_str,
215 sess, NULL);
216 if (IS_ERR(sess->se_sess)) {
217 pr_err("failed to init se_session\n");
218 ret = PTR_ERR(sess->se_sess);
219 kfree(sess);
220 return ERR_PTR(ret);
223 return sess;
226 static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
228 spin_lock_bh(&sess->lock);
229 if (!list_empty(&sess->login_list)) {
230 spin_unlock_bh(&sess->lock);
231 return;
233 spin_unlock_bh(&sess->lock);
235 if (cancel_work)
236 cancel_delayed_work_sync(&sess->maint_work);
238 transport_deregister_session_configfs(sess->se_sess);
239 transport_deregister_session(sess->se_sess);
241 if (sess->card)
242 fw_card_put(sess->card);
244 kfree(sess);
247 static void sbp_target_agent_unregister(struct sbp_target_agent *);
249 static void sbp_login_release(struct sbp_login_descriptor *login,
250 bool cancel_work)
252 struct sbp_session *sess = login->sess;
254 /* FIXME: abort/wait on tasks */
256 sbp_target_agent_unregister(login->tgt_agt);
258 if (sess) {
259 spin_lock_bh(&sess->lock);
260 list_del(&login->link);
261 spin_unlock_bh(&sess->lock);
263 sbp_session_release(sess, cancel_work);
266 kfree(login);
269 static struct sbp_target_agent *sbp_target_agent_register(
270 struct sbp_login_descriptor *);
272 static void sbp_management_request_login(
273 struct sbp_management_agent *agent, struct sbp_management_request *req,
274 int *status_data_size)
276 struct sbp_tport *tport = agent->tport;
277 struct sbp_tpg *tpg = tport->tpg;
278 struct sbp_session *sess;
279 struct sbp_login_descriptor *login;
280 struct sbp_login_response_block *response;
281 u64 guid;
282 u32 unpacked_lun;
283 int login_response_len, ret;
285 unpacked_lun = sbp_get_lun_from_tpg(tpg,
286 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
287 if (ret) {
288 pr_notice("login to unknown LUN: %d\n",
289 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
291 req->status.status = cpu_to_be32(
292 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
293 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
294 return;
297 ret = read_peer_guid(&guid, req);
298 if (ret != RCODE_COMPLETE) {
299 pr_warn("failed to read peer GUID: %d\n", ret);
301 req->status.status = cpu_to_be32(
302 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
303 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
304 return;
307 pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
308 unpacked_lun, guid);
310 sess = sbp_session_find_by_guid(tpg, guid);
311 if (sess) {
312 login = sbp_login_find_by_lun(sess, unpacked_lun);
313 if (login) {
314 pr_notice("initiator already logged-in\n");
317 * SBP-2 R4 says we should return access denied, but
318 * that can confuse initiators. Instead we need to
319 * treat this like a reconnect, but send the login
320 * response block like a fresh login.
322 * This is required particularly in the case of Apple
323 * devices booting off the FireWire target, where
324 * the firmware has an active login to the target. When
325 * the OS takes control of the session it issues its own
326 * LOGIN rather than a RECONNECT. To avoid the machine
327 * waiting until the reconnect_hold expires, we can skip
328 * the ACCESS_DENIED errors to speed things up.
331 goto already_logged_in;
336 * check exclusive bit in login request
337 * reject with access_denied if any logins present
339 if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
340 sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
341 pr_warn("refusing exclusive login with other active logins\n");
343 req->status.status = cpu_to_be32(
344 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
345 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
346 return;
350 * check exclusive bit in any existing login descriptor
351 * reject with access_denied if any exclusive logins present
353 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
354 pr_warn("refusing login while another exclusive login present\n");
356 req->status.status = cpu_to_be32(
357 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
358 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
359 return;
363 * check we haven't exceeded the number of allowed logins
364 * reject with resources_unavailable if we have
366 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
367 tport->max_logins_per_lun) {
368 pr_warn("max number of logins reached\n");
370 req->status.status = cpu_to_be32(
371 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
372 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
373 return;
376 if (!sess) {
377 sess = sbp_session_create(tpg, guid);
378 if (IS_ERR(sess)) {
379 switch (PTR_ERR(sess)) {
380 case -EPERM:
381 ret = SBP_STATUS_ACCESS_DENIED;
382 break;
383 default:
384 ret = SBP_STATUS_RESOURCES_UNAVAIL;
385 break;
388 req->status.status = cpu_to_be32(
389 STATUS_BLOCK_RESP(
390 STATUS_RESP_REQUEST_COMPLETE) |
391 STATUS_BLOCK_SBP_STATUS(ret));
392 return;
395 sess->node_id = req->node_addr;
396 sess->card = fw_card_get(req->card);
397 sess->generation = req->generation;
398 sess->speed = req->speed;
400 schedule_delayed_work(&sess->maint_work,
401 SESSION_MAINTENANCE_INTERVAL);
404 /* only take the latest reconnect_hold into account */
405 sess->reconnect_hold = min(
406 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
407 tport->max_reconnect_timeout) - 1;
409 login = kmalloc(sizeof(*login), GFP_KERNEL);
410 if (!login) {
411 pr_err("failed to allocate login descriptor\n");
413 sbp_session_release(sess, true);
415 req->status.status = cpu_to_be32(
416 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
417 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
418 return;
421 login->sess = sess;
422 login->login_lun = unpacked_lun;
423 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
424 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
425 login->login_id = atomic_inc_return(&login_id);
427 login->tgt_agt = sbp_target_agent_register(login);
428 if (IS_ERR(login->tgt_agt)) {
429 ret = PTR_ERR(login->tgt_agt);
430 pr_err("failed to map command block handler: %d\n", ret);
432 sbp_session_release(sess, true);
433 kfree(login);
435 req->status.status = cpu_to_be32(
436 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
437 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
438 return;
441 spin_lock_bh(&sess->lock);
442 list_add_tail(&login->link, &sess->login_list);
443 spin_unlock_bh(&sess->lock);
445 already_logged_in:
446 response = kzalloc(sizeof(*response), GFP_KERNEL);
447 if (!response) {
448 pr_err("failed to allocate login response block\n");
450 sbp_login_release(login, true);
452 req->status.status = cpu_to_be32(
453 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
454 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
455 return;
458 login_response_len = clamp_val(
459 LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
460 12, sizeof(*response));
461 response->misc = cpu_to_be32(
462 ((login_response_len & 0xffff) << 16) |
463 (login->login_id & 0xffff));
464 response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
465 addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
466 &response->command_block_agent);
468 ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
469 sess->node_id, sess->generation, sess->speed,
470 sbp2_pointer_to_addr(&req->orb.ptr2), response,
471 login_response_len);
472 if (ret != RCODE_COMPLETE) {
473 pr_debug("failed to write login response block: %x\n", ret);
475 kfree(response);
476 sbp_login_release(login, true);
478 req->status.status = cpu_to_be32(
479 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
480 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
481 return;
484 kfree(response);
486 req->status.status = cpu_to_be32(
487 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
488 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
491 static void sbp_management_request_query_logins(
492 struct sbp_management_agent *agent, struct sbp_management_request *req,
493 int *status_data_size)
495 pr_notice("QUERY LOGINS not implemented\n");
496 /* FIXME: implement */
498 req->status.status = cpu_to_be32(
499 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
500 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
503 static void sbp_management_request_reconnect(
504 struct sbp_management_agent *agent, struct sbp_management_request *req,
505 int *status_data_size)
507 struct sbp_tport *tport = agent->tport;
508 struct sbp_tpg *tpg = tport->tpg;
509 int ret;
510 u64 guid;
511 struct sbp_login_descriptor *login;
513 ret = read_peer_guid(&guid, req);
514 if (ret != RCODE_COMPLETE) {
515 pr_warn("failed to read peer GUID: %d\n", ret);
517 req->status.status = cpu_to_be32(
518 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
519 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
520 return;
523 pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
525 login = sbp_login_find_by_id(tpg,
526 RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
528 if (!login) {
529 pr_err("mgt_agent RECONNECT unknown login ID\n");
531 req->status.status = cpu_to_be32(
532 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
533 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
534 return;
537 if (login->sess->guid != guid) {
538 pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
540 req->status.status = cpu_to_be32(
541 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
542 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
543 return;
546 spin_lock_bh(&login->sess->lock);
547 if (login->sess->card)
548 fw_card_put(login->sess->card);
550 /* update the node details */
551 login->sess->generation = req->generation;
552 login->sess->node_id = req->node_addr;
553 login->sess->card = fw_card_get(req->card);
554 login->sess->speed = req->speed;
555 spin_unlock_bh(&login->sess->lock);
557 req->status.status = cpu_to_be32(
558 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
559 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
562 static void sbp_management_request_logout(
563 struct sbp_management_agent *agent, struct sbp_management_request *req,
564 int *status_data_size)
566 struct sbp_tport *tport = agent->tport;
567 struct sbp_tpg *tpg = tport->tpg;
568 int id;
569 struct sbp_login_descriptor *login;
571 id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
573 login = sbp_login_find_by_id(tpg, id);
574 if (!login) {
575 pr_warn("cannot find login: %d\n", id);
577 req->status.status = cpu_to_be32(
578 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
579 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
580 return;
583 pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
584 login->login_lun, login->login_id);
586 if (req->node_addr != login->sess->node_id) {
587 pr_warn("logout from different node ID\n");
589 req->status.status = cpu_to_be32(
590 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
591 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
592 return;
595 sbp_login_release(login, true);
597 req->status.status = cpu_to_be32(
598 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
599 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
602 static void session_check_for_reset(struct sbp_session *sess)
604 bool card_valid = false;
606 spin_lock_bh(&sess->lock);
608 if (sess->card) {
609 spin_lock_irq(&sess->card->lock);
610 card_valid = (sess->card->local_node != NULL);
611 spin_unlock_irq(&sess->card->lock);
613 if (!card_valid) {
614 fw_card_put(sess->card);
615 sess->card = NULL;
619 if (!card_valid || (sess->generation != sess->card->generation)) {
620 pr_info("Waiting for reconnect from node: %016llx\n",
621 sess->guid);
623 sess->node_id = -1;
624 sess->reconnect_expires = get_jiffies_64() +
625 ((sess->reconnect_hold + 1) * HZ);
628 spin_unlock_bh(&sess->lock);
631 static void session_reconnect_expired(struct sbp_session *sess)
633 struct sbp_login_descriptor *login, *temp;
634 LIST_HEAD(login_list);
636 pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
638 spin_lock_bh(&sess->lock);
639 list_for_each_entry_safe(login, temp, &sess->login_list, link) {
640 login->sess = NULL;
641 list_move_tail(&login->link, &login_list);
643 spin_unlock_bh(&sess->lock);
645 list_for_each_entry_safe(login, temp, &login_list, link) {
646 list_del(&login->link);
647 sbp_login_release(login, false);
650 sbp_session_release(sess, false);
653 static void session_maintenance_work(struct work_struct *work)
655 struct sbp_session *sess = container_of(work, struct sbp_session,
656 maint_work.work);
658 /* could be called while tearing down the session */
659 spin_lock_bh(&sess->lock);
660 if (list_empty(&sess->login_list)) {
661 spin_unlock_bh(&sess->lock);
662 return;
664 spin_unlock_bh(&sess->lock);
666 if (sess->node_id != -1) {
667 /* check for bus reset and make node_id invalid */
668 session_check_for_reset(sess);
670 schedule_delayed_work(&sess->maint_work,
671 SESSION_MAINTENANCE_INTERVAL);
672 } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
673 /* still waiting for reconnect */
674 schedule_delayed_work(&sess->maint_work,
675 SESSION_MAINTENANCE_INTERVAL);
676 } else {
677 /* reconnect timeout has expired */
678 session_reconnect_expired(sess);
682 static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
683 struct sbp_target_agent *agent)
685 int state;
687 switch (tcode) {
688 case TCODE_READ_QUADLET_REQUEST:
689 pr_debug("tgt_agent AGENT_STATE READ\n");
691 spin_lock_bh(&agent->lock);
692 state = agent->state;
693 spin_unlock_bh(&agent->lock);
695 *(__be32 *)data = cpu_to_be32(state);
697 return RCODE_COMPLETE;
699 case TCODE_WRITE_QUADLET_REQUEST:
700 /* ignored */
701 return RCODE_COMPLETE;
703 default:
704 return RCODE_TYPE_ERROR;
708 static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
709 struct sbp_target_agent *agent)
711 switch (tcode) {
712 case TCODE_WRITE_QUADLET_REQUEST:
713 pr_debug("tgt_agent AGENT_RESET\n");
714 spin_lock_bh(&agent->lock);
715 agent->state = AGENT_STATE_RESET;
716 spin_unlock_bh(&agent->lock);
717 return RCODE_COMPLETE;
719 default:
720 return RCODE_TYPE_ERROR;
724 static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
725 struct sbp_target_agent *agent)
727 struct sbp2_pointer *ptr = data;
729 switch (tcode) {
730 case TCODE_WRITE_BLOCK_REQUEST:
731 spin_lock_bh(&agent->lock);
732 if (agent->state != AGENT_STATE_SUSPENDED &&
733 agent->state != AGENT_STATE_RESET) {
734 spin_unlock_bh(&agent->lock);
735 pr_notice("Ignoring ORB_POINTER write while active.\n");
736 return RCODE_CONFLICT_ERROR;
738 agent->state = AGENT_STATE_ACTIVE;
739 spin_unlock_bh(&agent->lock);
741 agent->orb_pointer = sbp2_pointer_to_addr(ptr);
742 agent->doorbell = false;
744 pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
745 agent->orb_pointer);
747 queue_work(system_unbound_wq, &agent->work);
749 return RCODE_COMPLETE;
751 case TCODE_READ_BLOCK_REQUEST:
752 pr_debug("tgt_agent ORB_POINTER READ\n");
753 spin_lock_bh(&agent->lock);
754 addr_to_sbp2_pointer(agent->orb_pointer, ptr);
755 spin_unlock_bh(&agent->lock);
756 return RCODE_COMPLETE;
758 default:
759 return RCODE_TYPE_ERROR;
763 static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
764 struct sbp_target_agent *agent)
766 switch (tcode) {
767 case TCODE_WRITE_QUADLET_REQUEST:
768 spin_lock_bh(&agent->lock);
769 if (agent->state != AGENT_STATE_SUSPENDED) {
770 spin_unlock_bh(&agent->lock);
771 pr_debug("Ignoring DOORBELL while active.\n");
772 return RCODE_CONFLICT_ERROR;
774 agent->state = AGENT_STATE_ACTIVE;
775 spin_unlock_bh(&agent->lock);
777 agent->doorbell = true;
779 pr_debug("tgt_agent DOORBELL\n");
781 queue_work(system_unbound_wq, &agent->work);
783 return RCODE_COMPLETE;
785 case TCODE_READ_QUADLET_REQUEST:
786 return RCODE_COMPLETE;
788 default:
789 return RCODE_TYPE_ERROR;
793 static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
794 int tcode, void *data, struct sbp_target_agent *agent)
796 switch (tcode) {
797 case TCODE_WRITE_QUADLET_REQUEST:
798 pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
799 /* ignored as we don't send unsolicited status */
800 return RCODE_COMPLETE;
802 case TCODE_READ_QUADLET_REQUEST:
803 return RCODE_COMPLETE;
805 default:
806 return RCODE_TYPE_ERROR;
810 static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
811 int tcode, int destination, int source, int generation,
812 unsigned long long offset, void *data, size_t length,
813 void *callback_data)
815 struct sbp_target_agent *agent = callback_data;
816 struct sbp_session *sess = agent->login->sess;
817 int sess_gen, sess_node, rcode;
819 spin_lock_bh(&sess->lock);
820 sess_gen = sess->generation;
821 sess_node = sess->node_id;
822 spin_unlock_bh(&sess->lock);
824 if (generation != sess_gen) {
825 pr_notice("ignoring request with wrong generation\n");
826 rcode = RCODE_TYPE_ERROR;
827 goto out;
830 if (source != sess_node) {
831 pr_notice("ignoring request from foreign node (%x != %x)\n",
832 source, sess_node);
833 rcode = RCODE_TYPE_ERROR;
834 goto out;
837 /* turn offset into the offset from the start of the block */
838 offset -= agent->handler.offset;
840 if (offset == 0x00 && length == 4) {
841 /* AGENT_STATE */
842 rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
843 } else if (offset == 0x04 && length == 4) {
844 /* AGENT_RESET */
845 rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
846 } else if (offset == 0x08 && length == 8) {
847 /* ORB_POINTER */
848 rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
849 } else if (offset == 0x10 && length == 4) {
850 /* DOORBELL */
851 rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
852 } else if (offset == 0x14 && length == 4) {
853 /* UNSOLICITED_STATUS_ENABLE */
854 rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
855 data, agent);
856 } else {
857 rcode = RCODE_ADDRESS_ERROR;
860 out:
861 fw_send_response(card, request, rcode);
864 static void sbp_handle_command(struct sbp_target_request *);
865 static int sbp_send_status(struct sbp_target_request *);
866 static void sbp_free_request(struct sbp_target_request *);
868 static void tgt_agent_process_work(struct work_struct *work)
870 struct sbp_target_request *req =
871 container_of(work, struct sbp_target_request, work);
873 pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
874 req->orb_pointer,
875 sbp2_pointer_to_addr(&req->orb.next_orb),
876 sbp2_pointer_to_addr(&req->orb.data_descriptor),
877 be32_to_cpu(req->orb.misc));
879 if (req->orb_pointer >> 32)
880 pr_debug("ORB with high bits set\n");
882 switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
883 case 0:/* Format specified by this standard */
884 sbp_handle_command(req);
885 return;
886 case 1: /* Reserved for future standardization */
887 case 2: /* Vendor-dependent */
888 req->status.status |= cpu_to_be32(
889 STATUS_BLOCK_RESP(
890 STATUS_RESP_REQUEST_COMPLETE) |
891 STATUS_BLOCK_DEAD(0) |
892 STATUS_BLOCK_LEN(1) |
893 STATUS_BLOCK_SBP_STATUS(
894 SBP_STATUS_REQ_TYPE_NOTSUPP));
895 sbp_send_status(req);
896 return;
897 case 3: /* Dummy ORB */
898 req->status.status |= cpu_to_be32(
899 STATUS_BLOCK_RESP(
900 STATUS_RESP_REQUEST_COMPLETE) |
901 STATUS_BLOCK_DEAD(0) |
902 STATUS_BLOCK_LEN(1) |
903 STATUS_BLOCK_SBP_STATUS(
904 SBP_STATUS_DUMMY_ORB_COMPLETE));
905 sbp_send_status(req);
906 return;
907 default:
908 BUG();
912 /* used to double-check we haven't been issued an AGENT_RESET */
913 static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
915 bool active;
917 spin_lock_bh(&agent->lock);
918 active = (agent->state == AGENT_STATE_ACTIVE);
919 spin_unlock_bh(&agent->lock);
921 return active;
924 static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
925 struct fw_card *card, u64 next_orb)
927 struct se_session *se_sess = sess->se_sess;
928 struct sbp_target_request *req;
929 int tag;
931 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
932 if (tag < 0)
933 return ERR_PTR(-ENOMEM);
935 req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
936 memset(req, 0, sizeof(*req));
937 req->se_cmd.map_tag = tag;
938 req->se_cmd.tag = next_orb;
940 return req;
943 static void tgt_agent_fetch_work(struct work_struct *work)
945 struct sbp_target_agent *agent =
946 container_of(work, struct sbp_target_agent, work);
947 struct sbp_session *sess = agent->login->sess;
948 struct sbp_target_request *req;
949 int ret;
950 bool doorbell = agent->doorbell;
951 u64 next_orb = agent->orb_pointer;
953 while (next_orb && tgt_agent_check_active(agent)) {
954 req = sbp_mgt_get_req(sess, sess->card, next_orb);
955 if (IS_ERR(req)) {
956 spin_lock_bh(&agent->lock);
957 agent->state = AGENT_STATE_DEAD;
958 spin_unlock_bh(&agent->lock);
959 return;
962 req->login = agent->login;
963 req->orb_pointer = next_orb;
965 req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
966 req->orb_pointer >> 32));
967 req->status.orb_low = cpu_to_be32(
968 req->orb_pointer & 0xfffffffc);
970 /* read in the ORB */
971 ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
972 sess->node_id, sess->generation, sess->speed,
973 req->orb_pointer, &req->orb, sizeof(req->orb));
974 if (ret != RCODE_COMPLETE) {
975 pr_debug("tgt_orb fetch failed: %x\n", ret);
976 req->status.status |= cpu_to_be32(
977 STATUS_BLOCK_SRC(
978 STATUS_SRC_ORB_FINISHED) |
979 STATUS_BLOCK_RESP(
980 STATUS_RESP_TRANSPORT_FAILURE) |
981 STATUS_BLOCK_DEAD(1) |
982 STATUS_BLOCK_LEN(1) |
983 STATUS_BLOCK_SBP_STATUS(
984 SBP_STATUS_UNSPECIFIED_ERROR));
985 spin_lock_bh(&agent->lock);
986 agent->state = AGENT_STATE_DEAD;
987 spin_unlock_bh(&agent->lock);
989 sbp_send_status(req);
990 return;
993 /* check the next_ORB field */
994 if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
995 next_orb = 0;
996 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
997 STATUS_SRC_ORB_FINISHED));
998 } else {
999 next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
1000 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
1001 STATUS_SRC_ORB_CONTINUING));
1004 if (tgt_agent_check_active(agent) && !doorbell) {
1005 INIT_WORK(&req->work, tgt_agent_process_work);
1006 queue_work(system_unbound_wq, &req->work);
1007 } else {
1008 /* don't process this request, just check next_ORB */
1009 sbp_free_request(req);
1012 spin_lock_bh(&agent->lock);
1013 doorbell = agent->doorbell = false;
1015 /* check if we should carry on processing */
1016 if (next_orb)
1017 agent->orb_pointer = next_orb;
1018 else
1019 agent->state = AGENT_STATE_SUSPENDED;
1021 spin_unlock_bh(&agent->lock);
1025 static struct sbp_target_agent *sbp_target_agent_register(
1026 struct sbp_login_descriptor *login)
1028 struct sbp_target_agent *agent;
1029 int ret;
1031 agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1032 if (!agent)
1033 return ERR_PTR(-ENOMEM);
1035 spin_lock_init(&agent->lock);
1037 agent->handler.length = 0x20;
1038 agent->handler.address_callback = tgt_agent_rw;
1039 agent->handler.callback_data = agent;
1041 agent->login = login;
1042 agent->state = AGENT_STATE_RESET;
1043 INIT_WORK(&agent->work, tgt_agent_fetch_work);
1044 agent->orb_pointer = 0;
1045 agent->doorbell = false;
1047 ret = fw_core_add_address_handler(&agent->handler,
1048 &sbp_register_region);
1049 if (ret < 0) {
1050 kfree(agent);
1051 return ERR_PTR(ret);
1054 return agent;
1057 static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
1059 fw_core_remove_address_handler(&agent->handler);
1060 cancel_work_sync(&agent->work);
1061 kfree(agent);
1065 * Simple wrapper around fw_run_transaction that retries the transaction several
1066 * times in case of failure, with an exponential backoff.
1068 static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
1069 int generation, int speed, unsigned long long offset,
1070 void *payload, size_t length)
1072 int attempt, ret, delay;
1074 for (attempt = 1; attempt <= 5; attempt++) {
1075 ret = fw_run_transaction(card, tcode, destination_id,
1076 generation, speed, offset, payload, length);
1078 switch (ret) {
1079 case RCODE_COMPLETE:
1080 case RCODE_TYPE_ERROR:
1081 case RCODE_ADDRESS_ERROR:
1082 case RCODE_GENERATION:
1083 return ret;
1085 default:
1086 delay = 5 * attempt * attempt;
1087 usleep_range(delay, delay * 2);
1091 return ret;
1095 * Wrapper around sbp_run_transaction that gets the card, destination,
1096 * generation and speed out of the request's session.
1098 static int sbp_run_request_transaction(struct sbp_target_request *req,
1099 int tcode, unsigned long long offset, void *payload,
1100 size_t length)
1102 struct sbp_login_descriptor *login = req->login;
1103 struct sbp_session *sess = login->sess;
1104 struct fw_card *card;
1105 int node_id, generation, speed, ret;
1107 spin_lock_bh(&sess->lock);
1108 card = fw_card_get(sess->card);
1109 node_id = sess->node_id;
1110 generation = sess->generation;
1111 speed = sess->speed;
1112 spin_unlock_bh(&sess->lock);
1114 ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
1115 offset, payload, length);
1117 fw_card_put(card);
1119 return ret;
1122 static int sbp_fetch_command(struct sbp_target_request *req)
1124 int ret, cmd_len, copy_len;
1126 cmd_len = scsi_command_size(req->orb.command_block);
1128 req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
1129 if (!req->cmd_buf)
1130 return -ENOMEM;
1132 memcpy(req->cmd_buf, req->orb.command_block,
1133 min_t(int, cmd_len, sizeof(req->orb.command_block)));
1135 if (cmd_len > sizeof(req->orb.command_block)) {
1136 pr_debug("sbp_fetch_command: filling in long command\n");
1137 copy_len = cmd_len - sizeof(req->orb.command_block);
1139 ret = sbp_run_request_transaction(req,
1140 TCODE_READ_BLOCK_REQUEST,
1141 req->orb_pointer + sizeof(req->orb),
1142 req->cmd_buf + sizeof(req->orb.command_block),
1143 copy_len);
1144 if (ret != RCODE_COMPLETE)
1145 return -EIO;
1148 return 0;
1151 static int sbp_fetch_page_table(struct sbp_target_request *req)
1153 int pg_tbl_sz, ret;
1154 struct sbp_page_table_entry *pg_tbl;
1156 if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
1157 return 0;
1159 pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
1160 sizeof(struct sbp_page_table_entry);
1162 pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
1163 if (!pg_tbl)
1164 return -ENOMEM;
1166 ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
1167 sbp2_pointer_to_addr(&req->orb.data_descriptor),
1168 pg_tbl, pg_tbl_sz);
1169 if (ret != RCODE_COMPLETE) {
1170 kfree(pg_tbl);
1171 return -EIO;
1174 req->pg_tbl = pg_tbl;
1175 return 0;
1178 static void sbp_calc_data_length_direction(struct sbp_target_request *req,
1179 u32 *data_len, enum dma_data_direction *data_dir)
1181 int data_size, direction, idx;
1183 data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1184 direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
1186 if (!data_size) {
1187 *data_len = 0;
1188 *data_dir = DMA_NONE;
1189 return;
1192 *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1194 if (req->pg_tbl) {
1195 *data_len = 0;
1196 for (idx = 0; idx < data_size; idx++) {
1197 *data_len += be16_to_cpu(
1198 req->pg_tbl[idx].segment_length);
1200 } else {
1201 *data_len = data_size;
1205 static void sbp_handle_command(struct sbp_target_request *req)
1207 struct sbp_login_descriptor *login = req->login;
1208 struct sbp_session *sess = login->sess;
1209 int ret, unpacked_lun;
1210 u32 data_length;
1211 enum dma_data_direction data_dir;
1213 ret = sbp_fetch_command(req);
1214 if (ret) {
1215 pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
1216 goto err;
1219 ret = sbp_fetch_page_table(req);
1220 if (ret) {
1221 pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1222 ret);
1223 goto err;
1226 unpacked_lun = req->login->login_lun;
1227 sbp_calc_data_length_direction(req, &data_length, &data_dir);
1229 pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1230 req->orb_pointer, unpacked_lun, data_length, data_dir);
1232 /* only used for printk until we do TMRs */
1233 req->se_cmd.tag = req->orb_pointer;
1234 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1235 req->sense_buf, unpacked_lun, data_length,
1236 TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF))
1237 goto err;
1239 return;
1241 err:
1242 req->status.status |= cpu_to_be32(
1243 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1244 STATUS_BLOCK_DEAD(0) |
1245 STATUS_BLOCK_LEN(1) |
1246 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1247 sbp_send_status(req);
1251 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1252 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1254 static int sbp_rw_data(struct sbp_target_request *req)
1256 struct sbp_session *sess = req->login->sess;
1257 int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
1258 generation, num_pte, length, tfr_length,
1259 rcode = RCODE_COMPLETE;
1260 struct sbp_page_table_entry *pte;
1261 unsigned long long offset;
1262 struct fw_card *card;
1263 struct sg_mapping_iter iter;
1265 if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
1266 tcode = TCODE_WRITE_BLOCK_REQUEST;
1267 sg_miter_flags = SG_MITER_FROM_SG;
1268 } else {
1269 tcode = TCODE_READ_BLOCK_REQUEST;
1270 sg_miter_flags = SG_MITER_TO_SG;
1273 max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
1274 speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
1276 pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
1277 if (pg_size) {
1278 pr_err("sbp_run_transaction: page size ignored\n");
1279 pg_size = 0x100 << pg_size;
1282 spin_lock_bh(&sess->lock);
1283 card = fw_card_get(sess->card);
1284 node_id = sess->node_id;
1285 generation = sess->generation;
1286 spin_unlock_bh(&sess->lock);
1288 if (req->pg_tbl) {
1289 pte = req->pg_tbl;
1290 num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1292 offset = 0;
1293 length = 0;
1294 } else {
1295 pte = NULL;
1296 num_pte = 0;
1298 offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
1299 length = req->se_cmd.data_length;
1302 sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
1303 sg_miter_flags);
1305 while (length || num_pte) {
1306 if (!length) {
1307 offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
1308 be32_to_cpu(pte->segment_base_lo);
1309 length = be16_to_cpu(pte->segment_length);
1311 pte++;
1312 num_pte--;
1315 sg_miter_next(&iter);
1317 tfr_length = min3(length, max_payload, (int)iter.length);
1319 /* FIXME: take page_size into account */
1321 rcode = sbp_run_transaction(card, tcode, node_id,
1322 generation, speed,
1323 offset, iter.addr, tfr_length);
1325 if (rcode != RCODE_COMPLETE)
1326 break;
1328 length -= tfr_length;
1329 offset += tfr_length;
1330 iter.consumed = tfr_length;
1333 sg_miter_stop(&iter);
1334 fw_card_put(card);
1336 if (rcode == RCODE_COMPLETE) {
1337 WARN_ON(length != 0);
1338 return 0;
1339 } else {
1340 return -EIO;
1344 static int sbp_send_status(struct sbp_target_request *req)
1346 int rc, ret = 0, length;
1347 struct sbp_login_descriptor *login = req->login;
1349 length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1351 rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1352 login->status_fifo_addr, &req->status, length);
1353 if (rc != RCODE_COMPLETE) {
1354 pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
1355 ret = -EIO;
1356 goto put_ref;
1359 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1360 req->orb_pointer);
1362 * Drop the extra ACK_KREF reference taken by target_submit_cmd()
1363 * ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
1364 * final se_cmd->cmd_kref put.
1366 put_ref:
1367 target_put_sess_cmd(&req->se_cmd);
1368 return ret;
1371 static void sbp_sense_mangle(struct sbp_target_request *req)
1373 struct se_cmd *se_cmd = &req->se_cmd;
1374 u8 *sense = req->sense_buf;
1375 u8 *status = req->status.data;
1377 WARN_ON(se_cmd->scsi_sense_length < 18);
1379 switch (sense[0] & 0x7f) { /* sfmt */
1380 case 0x70: /* current, fixed */
1381 status[0] = 0 << 6;
1382 break;
1383 case 0x71: /* deferred, fixed */
1384 status[0] = 1 << 6;
1385 break;
1386 case 0x72: /* current, descriptor */
1387 case 0x73: /* deferred, descriptor */
1388 default:
1390 * TODO: SBP-3 specifies what we should do with descriptor
1391 * format sense data
1393 pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1394 sense[0]);
1395 req->status.status |= cpu_to_be32(
1396 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1397 STATUS_BLOCK_DEAD(0) |
1398 STATUS_BLOCK_LEN(1) |
1399 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
1400 return;
1403 status[0] |= se_cmd->scsi_status & 0x3f;/* status */
1404 status[1] =
1405 (sense[0] & 0x80) | /* valid */
1406 ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */
1407 (sense[2] & 0x0f); /* sense_key */
1408 status[2] = se_cmd->scsi_asc; /* sense_code */
1409 status[3] = se_cmd->scsi_ascq; /* sense_qualifier */
1411 /* information */
1412 status[4] = sense[3];
1413 status[5] = sense[4];
1414 status[6] = sense[5];
1415 status[7] = sense[6];
1417 /* CDB-dependent */
1418 status[8] = sense[8];
1419 status[9] = sense[9];
1420 status[10] = sense[10];
1421 status[11] = sense[11];
1423 /* fru */
1424 status[12] = sense[14];
1426 /* sense_key-dependent */
1427 status[13] = sense[15];
1428 status[14] = sense[16];
1429 status[15] = sense[17];
1431 req->status.status |= cpu_to_be32(
1432 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1433 STATUS_BLOCK_DEAD(0) |
1434 STATUS_BLOCK_LEN(5) |
1435 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1438 static int sbp_send_sense(struct sbp_target_request *req)
1440 struct se_cmd *se_cmd = &req->se_cmd;
1442 if (se_cmd->scsi_sense_length) {
1443 sbp_sense_mangle(req);
1444 } else {
1445 req->status.status |= cpu_to_be32(
1446 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1447 STATUS_BLOCK_DEAD(0) |
1448 STATUS_BLOCK_LEN(1) |
1449 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1452 return sbp_send_status(req);
1455 static void sbp_free_request(struct sbp_target_request *req)
1457 struct se_cmd *se_cmd = &req->se_cmd;
1458 struct se_session *se_sess = se_cmd->se_sess;
1460 kfree(req->pg_tbl);
1461 kfree(req->cmd_buf);
1463 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
1466 static void sbp_mgt_agent_process(struct work_struct *work)
1468 struct sbp_management_agent *agent =
1469 container_of(work, struct sbp_management_agent, work);
1470 struct sbp_management_request *req = agent->request;
1471 int ret;
1472 int status_data_len = 0;
1474 /* fetch the ORB from the initiator */
1475 ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
1476 req->node_addr, req->generation, req->speed,
1477 agent->orb_offset, &req->orb, sizeof(req->orb));
1478 if (ret != RCODE_COMPLETE) {
1479 pr_debug("mgt_orb fetch failed: %x\n", ret);
1480 goto out;
1483 pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1484 sbp2_pointer_to_addr(&req->orb.ptr1),
1485 sbp2_pointer_to_addr(&req->orb.ptr2),
1486 be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
1487 sbp2_pointer_to_addr(&req->orb.status_fifo));
1489 if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
1490 ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
1491 pr_err("mgt_orb bad request\n");
1492 goto out;
1495 switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
1496 case MANAGEMENT_ORB_FUNCTION_LOGIN:
1497 sbp_management_request_login(agent, req, &status_data_len);
1498 break;
1500 case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
1501 sbp_management_request_query_logins(agent, req,
1502 &status_data_len);
1503 break;
1505 case MANAGEMENT_ORB_FUNCTION_RECONNECT:
1506 sbp_management_request_reconnect(agent, req, &status_data_len);
1507 break;
1509 case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
1510 pr_notice("SET PASSWORD not implemented\n");
1512 req->status.status = cpu_to_be32(
1513 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1514 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1516 break;
1518 case MANAGEMENT_ORB_FUNCTION_LOGOUT:
1519 sbp_management_request_logout(agent, req, &status_data_len);
1520 break;
1522 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
1523 pr_notice("ABORT TASK not implemented\n");
1525 req->status.status = cpu_to_be32(
1526 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1527 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1529 break;
1531 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
1532 pr_notice("ABORT TASK SET not implemented\n");
1534 req->status.status = cpu_to_be32(
1535 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1536 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1538 break;
1540 case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
1541 pr_notice("LOGICAL UNIT RESET not implemented\n");
1543 req->status.status = cpu_to_be32(
1544 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1545 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1547 break;
1549 case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
1550 pr_notice("TARGET RESET not implemented\n");
1552 req->status.status = cpu_to_be32(
1553 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1554 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1556 break;
1558 default:
1559 pr_notice("unknown management function 0x%x\n",
1560 MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
1562 req->status.status = cpu_to_be32(
1563 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1564 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1566 break;
1569 req->status.status |= cpu_to_be32(
1570 STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1571 STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
1572 STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
1573 req->status.orb_low = cpu_to_be32(agent->orb_offset);
1575 /* write the status block back to the initiator */
1576 ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
1577 req->node_addr, req->generation, req->speed,
1578 sbp2_pointer_to_addr(&req->orb.status_fifo),
1579 &req->status, 8 + status_data_len);
1580 if (ret != RCODE_COMPLETE) {
1581 pr_debug("mgt_orb status write failed: %x\n", ret);
1582 goto out;
1585 out:
1586 fw_card_put(req->card);
1587 kfree(req);
1589 spin_lock_bh(&agent->lock);
1590 agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1591 spin_unlock_bh(&agent->lock);
1594 static void sbp_mgt_agent_rw(struct fw_card *card,
1595 struct fw_request *request, int tcode, int destination, int source,
1596 int generation, unsigned long long offset, void *data, size_t length,
1597 void *callback_data)
1599 struct sbp_management_agent *agent = callback_data;
1600 struct sbp2_pointer *ptr = data;
1601 int rcode = RCODE_ADDRESS_ERROR;
1603 if (!agent->tport->enable)
1604 goto out;
1606 if ((offset != agent->handler.offset) || (length != 8))
1607 goto out;
1609 if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
1610 struct sbp_management_request *req;
1611 int prev_state;
1613 spin_lock_bh(&agent->lock);
1614 prev_state = agent->state;
1615 agent->state = MANAGEMENT_AGENT_STATE_BUSY;
1616 spin_unlock_bh(&agent->lock);
1618 if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
1619 pr_notice("ignoring management request while busy\n");
1620 rcode = RCODE_CONFLICT_ERROR;
1621 goto out;
1623 req = kzalloc(sizeof(*req), GFP_ATOMIC);
1624 if (!req) {
1625 rcode = RCODE_CONFLICT_ERROR;
1626 goto out;
1629 req->card = fw_card_get(card);
1630 req->generation = generation;
1631 req->node_addr = source;
1632 req->speed = fw_get_request_speed(request);
1634 agent->orb_offset = sbp2_pointer_to_addr(ptr);
1635 agent->request = req;
1637 queue_work(system_unbound_wq, &agent->work);
1638 rcode = RCODE_COMPLETE;
1639 } else if (tcode == TCODE_READ_BLOCK_REQUEST) {
1640 addr_to_sbp2_pointer(agent->orb_offset, ptr);
1641 rcode = RCODE_COMPLETE;
1642 } else {
1643 rcode = RCODE_TYPE_ERROR;
1646 out:
1647 fw_send_response(card, request, rcode);
1650 static struct sbp_management_agent *sbp_management_agent_register(
1651 struct sbp_tport *tport)
1653 int ret;
1654 struct sbp_management_agent *agent;
1656 agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1657 if (!agent)
1658 return ERR_PTR(-ENOMEM);
1660 spin_lock_init(&agent->lock);
1661 agent->tport = tport;
1662 agent->handler.length = 0x08;
1663 agent->handler.address_callback = sbp_mgt_agent_rw;
1664 agent->handler.callback_data = agent;
1665 agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1666 INIT_WORK(&agent->work, sbp_mgt_agent_process);
1667 agent->orb_offset = 0;
1668 agent->request = NULL;
1670 ret = fw_core_add_address_handler(&agent->handler,
1671 &sbp_register_region);
1672 if (ret < 0) {
1673 kfree(agent);
1674 return ERR_PTR(ret);
1677 return agent;
1680 static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
1682 fw_core_remove_address_handler(&agent->handler);
1683 cancel_work_sync(&agent->work);
1684 kfree(agent);
1687 static int sbp_check_true(struct se_portal_group *se_tpg)
1689 return 1;
1692 static int sbp_check_false(struct se_portal_group *se_tpg)
1694 return 0;
1697 static char *sbp_get_fabric_name(void)
1699 return "sbp";
1702 static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1704 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1705 struct sbp_tport *tport = tpg->tport;
1707 return &tport->tport_name[0];
1710 static u16 sbp_get_tag(struct se_portal_group *se_tpg)
1712 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1713 return tpg->tport_tpgt;
1716 static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
1718 return 1;
1721 static void sbp_release_cmd(struct se_cmd *se_cmd)
1723 struct sbp_target_request *req = container_of(se_cmd,
1724 struct sbp_target_request, se_cmd);
1726 sbp_free_request(req);
1729 static u32 sbp_sess_get_index(struct se_session *se_sess)
1731 return 0;
1734 static int sbp_write_pending(struct se_cmd *se_cmd)
1736 struct sbp_target_request *req = container_of(se_cmd,
1737 struct sbp_target_request, se_cmd);
1738 int ret;
1740 ret = sbp_rw_data(req);
1741 if (ret) {
1742 req->status.status |= cpu_to_be32(
1743 STATUS_BLOCK_RESP(
1744 STATUS_RESP_TRANSPORT_FAILURE) |
1745 STATUS_BLOCK_DEAD(0) |
1746 STATUS_BLOCK_LEN(1) |
1747 STATUS_BLOCK_SBP_STATUS(
1748 SBP_STATUS_UNSPECIFIED_ERROR));
1749 sbp_send_status(req);
1750 return ret;
1753 target_execute_cmd(se_cmd);
1754 return 0;
1757 static int sbp_write_pending_status(struct se_cmd *se_cmd)
1759 return 0;
1762 static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
1764 return;
1767 static int sbp_get_cmd_state(struct se_cmd *se_cmd)
1769 return 0;
1772 static int sbp_queue_data_in(struct se_cmd *se_cmd)
1774 struct sbp_target_request *req = container_of(se_cmd,
1775 struct sbp_target_request, se_cmd);
1776 int ret;
1778 ret = sbp_rw_data(req);
1779 if (ret) {
1780 req->status.status |= cpu_to_be32(
1781 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1782 STATUS_BLOCK_DEAD(0) |
1783 STATUS_BLOCK_LEN(1) |
1784 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1785 sbp_send_status(req);
1786 return ret;
1789 return sbp_send_sense(req);
1793 * Called after command (no data transfer) or after the write (to device)
1794 * operation is completed
1796 static int sbp_queue_status(struct se_cmd *se_cmd)
1798 struct sbp_target_request *req = container_of(se_cmd,
1799 struct sbp_target_request, se_cmd);
1801 return sbp_send_sense(req);
1804 static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1808 static void sbp_aborted_task(struct se_cmd *se_cmd)
1810 return;
1813 static int sbp_check_stop_free(struct se_cmd *se_cmd)
1815 struct sbp_target_request *req = container_of(se_cmd,
1816 struct sbp_target_request, se_cmd);
1818 return transport_generic_free_cmd(&req->se_cmd, 0);
1821 static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
1823 struct se_lun *lun;
1824 int count = 0;
1826 rcu_read_lock();
1827 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
1828 count++;
1829 rcu_read_unlock();
1831 return count;
1834 static int sbp_update_unit_directory(struct sbp_tport *tport)
1836 struct se_lun *lun;
1837 int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
1838 u32 *data;
1840 if (tport->unit_directory.data) {
1841 fw_core_remove_descriptor(&tport->unit_directory);
1842 kfree(tport->unit_directory.data);
1843 tport->unit_directory.data = NULL;
1846 if (!tport->enable || !tport->tpg)
1847 return 0;
1849 num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
1852 * Number of entries in the final unit directory:
1853 * - all of those in the template
1854 * - management_agent
1855 * - unit_characteristics
1856 * - reconnect_timeout
1857 * - unit unique ID
1858 * - one for each LUN
1860 * MUST NOT include leaf or sub-directory entries
1862 num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
1864 if (tport->directory_id != -1)
1865 num_entries++;
1867 /* allocate num_entries + 4 for the header and unique ID leaf */
1868 data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
1869 if (!data)
1870 return -ENOMEM;
1872 /* directory_length */
1873 data[idx++] = num_entries << 16;
1875 /* directory_id */
1876 if (tport->directory_id != -1)
1877 data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
1879 /* unit directory template */
1880 memcpy(&data[idx], sbp_unit_directory_template,
1881 sizeof(sbp_unit_directory_template));
1882 idx += ARRAY_SIZE(sbp_unit_directory_template);
1884 /* management_agent */
1885 mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
1886 data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
1888 /* unit_characteristics */
1889 data[idx++] = 0x3a000000 |
1890 (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
1891 SBP_ORB_FETCH_SIZE;
1893 /* reconnect_timeout */
1894 data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
1896 /* unit unique ID (leaf is just after LUNs) */
1897 data[idx++] = 0x8d000000 | (num_luns + 1);
1899 rcu_read_lock();
1900 hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
1901 struct se_device *dev;
1902 int type;
1904 * rcu_dereference_raw protected by se_lun->lun_group symlink
1905 * reference to se_device->dev_group.
1907 dev = rcu_dereference_raw(lun->lun_se_dev);
1908 type = dev->transport->get_device_type(dev);
1910 /* logical_unit_number */
1911 data[idx++] = 0x14000000 |
1912 ((type << 16) & 0x1f0000) |
1913 (lun->unpacked_lun & 0xffff);
1915 rcu_read_unlock();
1917 /* unit unique ID leaf */
1918 data[idx++] = 2 << 16;
1919 data[idx++] = tport->guid >> 32;
1920 data[idx++] = tport->guid;
1922 tport->unit_directory.length = idx;
1923 tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
1924 tport->unit_directory.data = data;
1926 ret = fw_core_add_descriptor(&tport->unit_directory);
1927 if (ret < 0) {
1928 kfree(tport->unit_directory.data);
1929 tport->unit_directory.data = NULL;
1932 return ret;
1935 static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
1937 const char *cp;
1938 char c, nibble;
1939 int pos = 0, err;
1941 *wwn = 0;
1942 for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
1943 c = *cp;
1944 if (c == '\n' && cp[1] == '\0')
1945 continue;
1946 if (c == '\0') {
1947 err = 2;
1948 if (pos != 16)
1949 goto fail;
1950 return cp - name;
1952 err = 3;
1953 if (isdigit(c))
1954 nibble = c - '0';
1955 else if (isxdigit(c))
1956 nibble = tolower(c) - 'a' + 10;
1957 else
1958 goto fail;
1959 *wwn = (*wwn << 4) | nibble;
1960 pos++;
1962 err = 4;
1963 fail:
1964 printk(KERN_INFO "err %u len %zu pos %u\n",
1965 err, cp - name, pos);
1966 return -1;
1969 static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
1971 return snprintf(buf, len, "%016llx", wwn);
1974 static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1976 u64 guid = 0;
1978 if (sbp_parse_wwn(name, &guid) < 0)
1979 return -EINVAL;
1980 return 0;
1983 static int sbp_post_link_lun(
1984 struct se_portal_group *se_tpg,
1985 struct se_lun *se_lun)
1987 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1989 return sbp_update_unit_directory(tpg->tport);
1992 static void sbp_pre_unlink_lun(
1993 struct se_portal_group *se_tpg,
1994 struct se_lun *se_lun)
1996 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1997 struct sbp_tport *tport = tpg->tport;
1998 int ret;
2000 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
2001 tport->enable = 0;
2003 ret = sbp_update_unit_directory(tport);
2004 if (ret < 0)
2005 pr_err("unlink LUN: failed to update unit directory\n");
2008 static struct se_portal_group *sbp_make_tpg(
2009 struct se_wwn *wwn,
2010 struct config_group *group,
2011 const char *name)
2013 struct sbp_tport *tport =
2014 container_of(wwn, struct sbp_tport, tport_wwn);
2016 struct sbp_tpg *tpg;
2017 unsigned long tpgt;
2018 int ret;
2020 if (strstr(name, "tpgt_") != name)
2021 return ERR_PTR(-EINVAL);
2022 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2023 return ERR_PTR(-EINVAL);
2025 if (tport->tpg) {
2026 pr_err("Only one TPG per Unit is possible.\n");
2027 return ERR_PTR(-EBUSY);
2030 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2031 if (!tpg)
2032 return ERR_PTR(-ENOMEM);
2034 tpg->tport = tport;
2035 tpg->tport_tpgt = tpgt;
2036 tport->tpg = tpg;
2038 /* default attribute values */
2039 tport->enable = 0;
2040 tport->directory_id = -1;
2041 tport->mgt_orb_timeout = 15;
2042 tport->max_reconnect_timeout = 5;
2043 tport->max_logins_per_lun = 1;
2045 tport->mgt_agt = sbp_management_agent_register(tport);
2046 if (IS_ERR(tport->mgt_agt)) {
2047 ret = PTR_ERR(tport->mgt_agt);
2048 goto out_free_tpg;
2051 ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
2052 if (ret < 0)
2053 goto out_unreg_mgt_agt;
2055 return &tpg->se_tpg;
2057 out_unreg_mgt_agt:
2058 sbp_management_agent_unregister(tport->mgt_agt);
2059 out_free_tpg:
2060 tport->tpg = NULL;
2061 kfree(tpg);
2062 return ERR_PTR(ret);
2065 static void sbp_drop_tpg(struct se_portal_group *se_tpg)
2067 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2068 struct sbp_tport *tport = tpg->tport;
2070 core_tpg_deregister(se_tpg);
2071 sbp_management_agent_unregister(tport->mgt_agt);
2072 tport->tpg = NULL;
2073 kfree(tpg);
2076 static struct se_wwn *sbp_make_tport(
2077 struct target_fabric_configfs *tf,
2078 struct config_group *group,
2079 const char *name)
2081 struct sbp_tport *tport;
2082 u64 guid = 0;
2084 if (sbp_parse_wwn(name, &guid) < 0)
2085 return ERR_PTR(-EINVAL);
2087 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2088 if (!tport)
2089 return ERR_PTR(-ENOMEM);
2091 tport->guid = guid;
2092 sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
2094 return &tport->tport_wwn;
2097 static void sbp_drop_tport(struct se_wwn *wwn)
2099 struct sbp_tport *tport =
2100 container_of(wwn, struct sbp_tport, tport_wwn);
2102 kfree(tport);
2105 static ssize_t sbp_wwn_version_show(struct config_item *item, char *page)
2107 return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
2110 CONFIGFS_ATTR_RO(sbp_wwn_, version);
2112 static struct configfs_attribute *sbp_wwn_attrs[] = {
2113 &sbp_wwn_attr_version,
2114 NULL,
2117 static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page)
2119 struct se_portal_group *se_tpg = to_tpg(item);
2120 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2121 struct sbp_tport *tport = tpg->tport;
2123 if (tport->directory_id == -1)
2124 return sprintf(page, "implicit\n");
2125 else
2126 return sprintf(page, "%06x\n", tport->directory_id);
2129 static ssize_t sbp_tpg_directory_id_store(struct config_item *item,
2130 const char *page, size_t count)
2132 struct se_portal_group *se_tpg = to_tpg(item);
2133 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2134 struct sbp_tport *tport = tpg->tport;
2135 unsigned long val;
2137 if (tport->enable) {
2138 pr_err("Cannot change the directory_id on an active target.\n");
2139 return -EBUSY;
2142 if (strstr(page, "implicit") == page) {
2143 tport->directory_id = -1;
2144 } else {
2145 if (kstrtoul(page, 16, &val) < 0)
2146 return -EINVAL;
2147 if (val > 0xffffff)
2148 return -EINVAL;
2150 tport->directory_id = val;
2153 return count;
2156 static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page)
2158 struct se_portal_group *se_tpg = to_tpg(item);
2159 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2160 struct sbp_tport *tport = tpg->tport;
2161 return sprintf(page, "%d\n", tport->enable);
2164 static ssize_t sbp_tpg_enable_store(struct config_item *item,
2165 const char *page, size_t count)
2167 struct se_portal_group *se_tpg = to_tpg(item);
2168 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2169 struct sbp_tport *tport = tpg->tport;
2170 unsigned long val;
2171 int ret;
2173 if (kstrtoul(page, 0, &val) < 0)
2174 return -EINVAL;
2175 if ((val != 0) && (val != 1))
2176 return -EINVAL;
2178 if (tport->enable == val)
2179 return count;
2181 if (val) {
2182 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
2183 pr_err("Cannot enable a target with no LUNs!\n");
2184 return -EINVAL;
2186 } else {
2187 /* XXX: force-shutdown sessions instead? */
2188 spin_lock_bh(&se_tpg->session_lock);
2189 if (!list_empty(&se_tpg->tpg_sess_list)) {
2190 spin_unlock_bh(&se_tpg->session_lock);
2191 return -EBUSY;
2193 spin_unlock_bh(&se_tpg->session_lock);
2196 tport->enable = val;
2198 ret = sbp_update_unit_directory(tport);
2199 if (ret < 0) {
2200 pr_err("Could not update Config ROM\n");
2201 return ret;
2204 return count;
2207 CONFIGFS_ATTR(sbp_tpg_, directory_id);
2208 CONFIGFS_ATTR(sbp_tpg_, enable);
2210 static struct configfs_attribute *sbp_tpg_base_attrs[] = {
2211 &sbp_tpg_attr_directory_id,
2212 &sbp_tpg_attr_enable,
2213 NULL,
2216 static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item,
2217 char *page)
2219 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2220 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2221 struct sbp_tport *tport = tpg->tport;
2222 return sprintf(page, "%d\n", tport->mgt_orb_timeout);
2225 static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item,
2226 const char *page, size_t count)
2228 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2229 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2230 struct sbp_tport *tport = tpg->tport;
2231 unsigned long val;
2232 int ret;
2234 if (kstrtoul(page, 0, &val) < 0)
2235 return -EINVAL;
2236 if ((val < 1) || (val > 127))
2237 return -EINVAL;
2239 if (tport->mgt_orb_timeout == val)
2240 return count;
2242 tport->mgt_orb_timeout = val;
2244 ret = sbp_update_unit_directory(tport);
2245 if (ret < 0)
2246 return ret;
2248 return count;
2251 static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item,
2252 char *page)
2254 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2255 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2256 struct sbp_tport *tport = tpg->tport;
2257 return sprintf(page, "%d\n", tport->max_reconnect_timeout);
2260 static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item,
2261 const char *page, size_t count)
2263 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2264 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2265 struct sbp_tport *tport = tpg->tport;
2266 unsigned long val;
2267 int ret;
2269 if (kstrtoul(page, 0, &val) < 0)
2270 return -EINVAL;
2271 if ((val < 1) || (val > 32767))
2272 return -EINVAL;
2274 if (tport->max_reconnect_timeout == val)
2275 return count;
2277 tport->max_reconnect_timeout = val;
2279 ret = sbp_update_unit_directory(tport);
2280 if (ret < 0)
2281 return ret;
2283 return count;
2286 static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item,
2287 char *page)
2289 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2290 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2291 struct sbp_tport *tport = tpg->tport;
2292 return sprintf(page, "%d\n", tport->max_logins_per_lun);
2295 static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item,
2296 const char *page, size_t count)
2298 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2299 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2300 struct sbp_tport *tport = tpg->tport;
2301 unsigned long val;
2303 if (kstrtoul(page, 0, &val) < 0)
2304 return -EINVAL;
2305 if ((val < 1) || (val > 127))
2306 return -EINVAL;
2308 /* XXX: also check against current count? */
2310 tport->max_logins_per_lun = val;
2312 return count;
2315 CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout);
2316 CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout);
2317 CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun);
2319 static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2320 &sbp_tpg_attrib_attr_mgt_orb_timeout,
2321 &sbp_tpg_attrib_attr_max_reconnect_timeout,
2322 &sbp_tpg_attrib_attr_max_logins_per_lun,
2323 NULL,
2326 static const struct target_core_fabric_ops sbp_ops = {
2327 .module = THIS_MODULE,
2328 .name = "sbp",
2329 .get_fabric_name = sbp_get_fabric_name,
2330 .tpg_get_wwn = sbp_get_fabric_wwn,
2331 .tpg_get_tag = sbp_get_tag,
2332 .tpg_check_demo_mode = sbp_check_true,
2333 .tpg_check_demo_mode_cache = sbp_check_true,
2334 .tpg_check_demo_mode_write_protect = sbp_check_false,
2335 .tpg_check_prod_mode_write_protect = sbp_check_false,
2336 .tpg_get_inst_index = sbp_tpg_get_inst_index,
2337 .release_cmd = sbp_release_cmd,
2338 .sess_get_index = sbp_sess_get_index,
2339 .write_pending = sbp_write_pending,
2340 .write_pending_status = sbp_write_pending_status,
2341 .set_default_node_attributes = sbp_set_default_node_attrs,
2342 .get_cmd_state = sbp_get_cmd_state,
2343 .queue_data_in = sbp_queue_data_in,
2344 .queue_status = sbp_queue_status,
2345 .queue_tm_rsp = sbp_queue_tm_rsp,
2346 .aborted_task = sbp_aborted_task,
2347 .check_stop_free = sbp_check_stop_free,
2349 .fabric_make_wwn = sbp_make_tport,
2350 .fabric_drop_wwn = sbp_drop_tport,
2351 .fabric_make_tpg = sbp_make_tpg,
2352 .fabric_drop_tpg = sbp_drop_tpg,
2353 .fabric_post_link = sbp_post_link_lun,
2354 .fabric_pre_unlink = sbp_pre_unlink_lun,
2355 .fabric_make_np = NULL,
2356 .fabric_drop_np = NULL,
2357 .fabric_init_nodeacl = sbp_init_nodeacl,
2359 .tfc_wwn_attrs = sbp_wwn_attrs,
2360 .tfc_tpg_base_attrs = sbp_tpg_base_attrs,
2361 .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs,
2364 static int __init sbp_init(void)
2366 return target_register_template(&sbp_ops);
2369 static void __exit sbp_exit(void)
2371 target_unregister_template(&sbp_ops);
2374 MODULE_DESCRIPTION("FireWire SBP fabric driver");
2375 MODULE_LICENSE("GPL");
2376 module_init(sbp_init);
2377 module_exit(sbp_exit);