2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
5 * Copyright (C) 2011 Texas Instruments, Inc.
7 * Written by Ilan Elias <ilane@ti.com>
10 * This file is based on hci_core.c, which was written
11 * by Maxim Krasnyansky.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/types.h>
29 #include <linux/workqueue.h>
30 #include <linux/completion.h>
31 #include <linux/sched.h>
32 #include <linux/bitops.h>
33 #include <linux/skbuff.h>
36 #include <net/nfc/nci.h>
37 #include <net/nfc/nci_core.h>
38 #include <linux/nfc.h>
40 static void nci_cmd_work(struct work_struct
*work
);
41 static void nci_rx_work(struct work_struct
*work
);
42 static void nci_tx_work(struct work_struct
*work
);
44 /* ---- NCI requests ---- */
46 void nci_req_complete(struct nci_dev
*ndev
, int result
)
48 if (ndev
->req_status
== NCI_REQ_PEND
) {
49 ndev
->req_result
= result
;
50 ndev
->req_status
= NCI_REQ_DONE
;
51 complete(&ndev
->req_completion
);
55 static void nci_req_cancel(struct nci_dev
*ndev
, int err
)
57 if (ndev
->req_status
== NCI_REQ_PEND
) {
58 ndev
->req_result
= err
;
59 ndev
->req_status
= NCI_REQ_CANCELED
;
60 complete(&ndev
->req_completion
);
64 /* Execute request and wait for completion. */
65 static int __nci_request(struct nci_dev
*ndev
,
66 void (*req
)(struct nci_dev
*ndev
, unsigned long opt
),
71 unsigned long completion_rc
;
73 ndev
->req_status
= NCI_REQ_PEND
;
75 init_completion(&ndev
->req_completion
);
77 completion_rc
= wait_for_completion_interruptible_timeout(
78 &ndev
->req_completion
,
81 nfc_dbg("wait_for_completion return %ld", completion_rc
);
83 if (completion_rc
> 0) {
84 switch (ndev
->req_status
) {
86 rc
= nci_to_errno(ndev
->req_result
);
89 case NCI_REQ_CANCELED
:
90 rc
= -ndev
->req_result
;
98 nfc_err("wait_for_completion_interruptible_timeout failed %ld",
101 rc
= ((completion_rc
== 0) ? (-ETIMEDOUT
) : (completion_rc
));
104 ndev
->req_status
= ndev
->req_result
= 0;
109 static inline int nci_request(struct nci_dev
*ndev
,
110 void (*req
)(struct nci_dev
*ndev
, unsigned long opt
),
111 unsigned long opt
, __u32 timeout
)
115 if (!test_bit(NCI_UP
, &ndev
->flags
))
118 /* Serialize all requests */
119 mutex_lock(&ndev
->req_lock
);
120 rc
= __nci_request(ndev
, req
, opt
, timeout
);
121 mutex_unlock(&ndev
->req_lock
);
126 static void nci_reset_req(struct nci_dev
*ndev
, unsigned long opt
)
128 nci_send_cmd(ndev
, NCI_OP_CORE_RESET_CMD
, 0, NULL
);
131 static void nci_init_req(struct nci_dev
*ndev
, unsigned long opt
)
133 nci_send_cmd(ndev
, NCI_OP_CORE_INIT_CMD
, 0, NULL
);
136 static void nci_init_complete_req(struct nci_dev
*ndev
, unsigned long opt
)
138 struct nci_rf_disc_map_cmd cmd
;
139 struct nci_core_conn_create_cmd conn_cmd
;
142 /* create static rf connection */
143 conn_cmd
.target_handle
= 0;
144 conn_cmd
.num_target_specific_params
= 0;
145 nci_send_cmd(ndev
, NCI_OP_CORE_CONN_CREATE_CMD
, 2, &conn_cmd
);
147 /* set rf mapping configurations */
148 cmd
.num_mapping_configs
= 0;
150 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
151 for (i
= 0; i
< ndev
->num_supported_rf_interfaces
; i
++) {
152 if (ndev
->supported_rf_interfaces
[i
] ==
153 NCI_RF_INTERFACE_ISO_DEP
) {
154 cmd
.mapping_configs
[cmd
.num_mapping_configs
]
155 .rf_protocol
= NCI_RF_PROTOCOL_ISO_DEP
;
156 cmd
.mapping_configs
[cmd
.num_mapping_configs
]
157 .mode
= NCI_DISC_MAP_MODE_BOTH
;
158 cmd
.mapping_configs
[cmd
.num_mapping_configs
]
159 .rf_interface_type
= NCI_RF_INTERFACE_ISO_DEP
;
160 cmd
.num_mapping_configs
++;
161 } else if (ndev
->supported_rf_interfaces
[i
] ==
162 NCI_RF_INTERFACE_NFC_DEP
) {
163 cmd
.mapping_configs
[cmd
.num_mapping_configs
]
164 .rf_protocol
= NCI_RF_PROTOCOL_NFC_DEP
;
165 cmd
.mapping_configs
[cmd
.num_mapping_configs
]
166 .mode
= NCI_DISC_MAP_MODE_BOTH
;
167 cmd
.mapping_configs
[cmd
.num_mapping_configs
]
168 .rf_interface_type
= NCI_RF_INTERFACE_NFC_DEP
;
169 cmd
.num_mapping_configs
++;
172 if (cmd
.num_mapping_configs
== NCI_MAX_NUM_MAPPING_CONFIGS
)
176 nci_send_cmd(ndev
, NCI_OP_RF_DISCOVER_MAP_CMD
,
177 (1 + (cmd
.num_mapping_configs
*sizeof(struct disc_map_config
))),
181 static void nci_rf_discover_req(struct nci_dev
*ndev
, unsigned long opt
)
183 struct nci_rf_disc_cmd cmd
;
184 __u32 protocols
= opt
;
186 cmd
.num_disc_configs
= 0;
188 if ((cmd
.num_disc_configs
< NCI_MAX_NUM_RF_CONFIGS
) &&
189 (protocols
& NFC_PROTO_JEWEL_MASK
190 || protocols
& NFC_PROTO_MIFARE_MASK
191 || protocols
& NFC_PROTO_ISO14443_MASK
192 || protocols
& NFC_PROTO_NFC_DEP_MASK
)) {
193 cmd
.disc_configs
[cmd
.num_disc_configs
].type
=
194 NCI_DISCOVERY_TYPE_POLL_A_PASSIVE
;
195 cmd
.disc_configs
[cmd
.num_disc_configs
].frequency
= 1;
196 cmd
.num_disc_configs
++;
199 if ((cmd
.num_disc_configs
< NCI_MAX_NUM_RF_CONFIGS
) &&
200 (protocols
& NFC_PROTO_ISO14443_MASK
)) {
201 cmd
.disc_configs
[cmd
.num_disc_configs
].type
=
202 NCI_DISCOVERY_TYPE_POLL_B_PASSIVE
;
203 cmd
.disc_configs
[cmd
.num_disc_configs
].frequency
= 1;
204 cmd
.num_disc_configs
++;
207 if ((cmd
.num_disc_configs
< NCI_MAX_NUM_RF_CONFIGS
) &&
208 (protocols
& NFC_PROTO_FELICA_MASK
209 || protocols
& NFC_PROTO_NFC_DEP_MASK
)) {
210 cmd
.disc_configs
[cmd
.num_disc_configs
].type
=
211 NCI_DISCOVERY_TYPE_POLL_F_PASSIVE
;
212 cmd
.disc_configs
[cmd
.num_disc_configs
].frequency
= 1;
213 cmd
.num_disc_configs
++;
216 nci_send_cmd(ndev
, NCI_OP_RF_DISCOVER_CMD
,
217 (1 + (cmd
.num_disc_configs
*sizeof(struct disc_config
))),
221 static void nci_rf_deactivate_req(struct nci_dev
*ndev
, unsigned long opt
)
223 struct nci_rf_deactivate_cmd cmd
;
225 cmd
.type
= NCI_DEACTIVATE_TYPE_IDLE_MODE
;
227 nci_send_cmd(ndev
, NCI_OP_RF_DEACTIVATE_CMD
,
228 sizeof(struct nci_rf_deactivate_cmd
),
232 static int nci_open_device(struct nci_dev
*ndev
)
236 mutex_lock(&ndev
->req_lock
);
238 if (test_bit(NCI_UP
, &ndev
->flags
)) {
243 if (ndev
->ops
->open(ndev
)) {
248 atomic_set(&ndev
->cmd_cnt
, 1);
250 set_bit(NCI_INIT
, &ndev
->flags
);
252 rc
= __nci_request(ndev
, nci_reset_req
, 0,
253 msecs_to_jiffies(NCI_RESET_TIMEOUT
));
256 rc
= __nci_request(ndev
, nci_init_req
, 0,
257 msecs_to_jiffies(NCI_INIT_TIMEOUT
));
261 rc
= __nci_request(ndev
, nci_init_complete_req
, 0,
262 msecs_to_jiffies(NCI_INIT_TIMEOUT
));
265 clear_bit(NCI_INIT
, &ndev
->flags
);
268 set_bit(NCI_UP
, &ndev
->flags
);
270 /* Init failed, cleanup */
271 skb_queue_purge(&ndev
->cmd_q
);
272 skb_queue_purge(&ndev
->rx_q
);
273 skb_queue_purge(&ndev
->tx_q
);
275 ndev
->ops
->close(ndev
);
280 mutex_unlock(&ndev
->req_lock
);
284 static int nci_close_device(struct nci_dev
*ndev
)
286 nci_req_cancel(ndev
, ENODEV
);
287 mutex_lock(&ndev
->req_lock
);
289 if (!test_and_clear_bit(NCI_UP
, &ndev
->flags
)) {
290 del_timer_sync(&ndev
->cmd_timer
);
291 mutex_unlock(&ndev
->req_lock
);
295 /* Drop RX and TX queues */
296 skb_queue_purge(&ndev
->rx_q
);
297 skb_queue_purge(&ndev
->tx_q
);
299 /* Flush RX and TX wq */
300 flush_workqueue(ndev
->rx_wq
);
301 flush_workqueue(ndev
->tx_wq
);
304 skb_queue_purge(&ndev
->cmd_q
);
305 atomic_set(&ndev
->cmd_cnt
, 1);
307 set_bit(NCI_INIT
, &ndev
->flags
);
308 __nci_request(ndev
, nci_reset_req
, 0,
309 msecs_to_jiffies(NCI_RESET_TIMEOUT
));
310 clear_bit(NCI_INIT
, &ndev
->flags
);
313 flush_workqueue(ndev
->cmd_wq
);
315 /* After this point our queues are empty
316 * and no works are scheduled. */
317 ndev
->ops
->close(ndev
);
322 mutex_unlock(&ndev
->req_lock
);
327 /* NCI command timer function */
328 static void nci_cmd_timer(unsigned long arg
)
330 struct nci_dev
*ndev
= (void *) arg
;
334 atomic_set(&ndev
->cmd_cnt
, 1);
335 queue_work(ndev
->cmd_wq
, &ndev
->cmd_work
);
338 static int nci_dev_up(struct nfc_dev
*nfc_dev
)
340 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
344 return nci_open_device(ndev
);
347 static int nci_dev_down(struct nfc_dev
*nfc_dev
)
349 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
353 return nci_close_device(ndev
);
356 static int nci_start_poll(struct nfc_dev
*nfc_dev
, __u32 protocols
)
358 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
363 if (test_bit(NCI_DISCOVERY
, &ndev
->flags
)) {
364 nfc_err("unable to start poll, since poll is already active");
368 if (test_bit(NCI_POLL_ACTIVE
, &ndev
->flags
)) {
369 nfc_dbg("target already active, first deactivate...");
371 rc
= nci_request(ndev
, nci_rf_deactivate_req
, 0,
372 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT
));
377 rc
= nci_request(ndev
, nci_rf_discover_req
, protocols
,
378 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT
));
381 ndev
->poll_prots
= protocols
;
386 static void nci_stop_poll(struct nfc_dev
*nfc_dev
)
388 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
392 if (!test_bit(NCI_DISCOVERY
, &ndev
->flags
)) {
393 nfc_err("unable to stop poll, since poll is not active");
397 nci_request(ndev
, nci_rf_deactivate_req
, 0,
398 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT
));
401 static int nci_activate_target(struct nfc_dev
*nfc_dev
, __u32 target_idx
,
404 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
406 nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx
, protocol
);
408 if (!test_bit(NCI_POLL_ACTIVE
, &ndev
->flags
)) {
409 nfc_err("there is no available target to activate");
413 if (ndev
->target_active_prot
) {
414 nfc_err("there is already an active target");
418 if (!(ndev
->target_available_prots
& (1 << protocol
))) {
419 nfc_err("target does not support the requested protocol 0x%x",
424 ndev
->target_active_prot
= protocol
;
425 ndev
->target_available_prots
= 0;
430 static void nci_deactivate_target(struct nfc_dev
*nfc_dev
, __u32 target_idx
)
432 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
434 nfc_dbg("entry, target_idx %d", target_idx
);
436 if (!ndev
->target_active_prot
) {
437 nfc_err("unable to deactivate target, no active target");
441 ndev
->target_active_prot
= 0;
443 if (test_bit(NCI_POLL_ACTIVE
, &ndev
->flags
)) {
444 nci_request(ndev
, nci_rf_deactivate_req
, 0,
445 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT
));
449 static int nci_data_exchange(struct nfc_dev
*nfc_dev
, __u32 target_idx
,
451 data_exchange_cb_t cb
,
454 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
456 nfc_dbg("entry, target_idx %d, len %d", target_idx
, skb
->len
);
458 if (!ndev
->target_active_prot
) {
459 nfc_err("unable to exchange data, no active target");
463 /* store cb and context to be used on receiving data */
464 ndev
->data_exchange_cb
= cb
;
465 ndev
->data_exchange_cb_context
= cb_context
;
467 return nci_send_data(ndev
, ndev
->conn_id
, skb
);
470 static struct nfc_ops nci_nfc_ops
= {
471 .dev_up
= nci_dev_up
,
472 .dev_down
= nci_dev_down
,
473 .start_poll
= nci_start_poll
,
474 .stop_poll
= nci_stop_poll
,
475 .activate_target
= nci_activate_target
,
476 .deactivate_target
= nci_deactivate_target
,
477 .data_exchange
= nci_data_exchange
,
480 /* ---- Interface to NCI drivers ---- */
483 * nci_allocate_device - allocate a new nci device
485 * @ops: device operations
486 * @supported_protocols: NFC protocols supported by the device
488 struct nci_dev
*nci_allocate_device(struct nci_ops
*ops
,
489 __u32 supported_protocols
,
493 struct nci_dev
*ndev
= NULL
;
495 nfc_dbg("entry, supported_protocols 0x%x", supported_protocols
);
497 if (!ops
->open
|| !ops
->close
|| !ops
->send
)
500 if (!supported_protocols
)
503 ndev
= kzalloc(sizeof(struct nci_dev
), GFP_KERNEL
);
508 ndev
->tx_headroom
= tx_headroom
;
509 ndev
->tx_tailroom
= tx_tailroom
;
511 ndev
->nfc_dev
= nfc_allocate_device(&nci_nfc_ops
,
513 tx_headroom
+ NCI_DATA_HDR_SIZE
,
518 nfc_set_drvdata(ndev
->nfc_dev
, ndev
);
528 EXPORT_SYMBOL(nci_allocate_device
);
531 * nci_free_device - deallocate nci device
533 * @ndev: The nci device to deallocate
535 void nci_free_device(struct nci_dev
*ndev
)
539 nfc_free_device(ndev
->nfc_dev
);
542 EXPORT_SYMBOL(nci_free_device
);
545 * nci_register_device - register a nci device in the nfc subsystem
547 * @dev: The nci device to register
549 int nci_register_device(struct nci_dev
*ndev
)
552 struct device
*dev
= &ndev
->nfc_dev
->dev
;
557 rc
= nfc_register_device(ndev
->nfc_dev
);
563 INIT_WORK(&ndev
->cmd_work
, nci_cmd_work
);
564 snprintf(name
, sizeof(name
), "%s_nci_cmd_wq", dev_name(dev
));
565 ndev
->cmd_wq
= create_singlethread_workqueue(name
);
571 INIT_WORK(&ndev
->rx_work
, nci_rx_work
);
572 snprintf(name
, sizeof(name
), "%s_nci_rx_wq", dev_name(dev
));
573 ndev
->rx_wq
= create_singlethread_workqueue(name
);
576 goto destroy_cmd_wq_exit
;
579 INIT_WORK(&ndev
->tx_work
, nci_tx_work
);
580 snprintf(name
, sizeof(name
), "%s_nci_tx_wq", dev_name(dev
));
581 ndev
->tx_wq
= create_singlethread_workqueue(name
);
584 goto destroy_rx_wq_exit
;
587 skb_queue_head_init(&ndev
->cmd_q
);
588 skb_queue_head_init(&ndev
->rx_q
);
589 skb_queue_head_init(&ndev
->tx_q
);
591 setup_timer(&ndev
->cmd_timer
, nci_cmd_timer
,
592 (unsigned long) ndev
);
594 mutex_init(&ndev
->req_lock
);
599 destroy_workqueue(ndev
->rx_wq
);
602 destroy_workqueue(ndev
->cmd_wq
);
605 nfc_unregister_device(ndev
->nfc_dev
);
610 EXPORT_SYMBOL(nci_register_device
);
613 * nci_unregister_device - unregister a nci device in the nfc subsystem
615 * @dev: The nci device to unregister
617 void nci_unregister_device(struct nci_dev
*ndev
)
621 nci_close_device(ndev
);
623 destroy_workqueue(ndev
->cmd_wq
);
624 destroy_workqueue(ndev
->rx_wq
);
625 destroy_workqueue(ndev
->tx_wq
);
627 nfc_unregister_device(ndev
->nfc_dev
);
629 EXPORT_SYMBOL(nci_unregister_device
);
632 * nci_recv_frame - receive frame from NCI drivers
634 * @skb: The sk_buff to receive
636 int nci_recv_frame(struct sk_buff
*skb
)
638 struct nci_dev
*ndev
= (struct nci_dev
*) skb
->dev
;
640 nfc_dbg("entry, len %d", skb
->len
);
642 if (!ndev
|| (!test_bit(NCI_UP
, &ndev
->flags
)
643 && !test_bit(NCI_INIT
, &ndev
->flags
))) {
648 /* Queue frame for rx worker thread */
649 skb_queue_tail(&ndev
->rx_q
, skb
);
650 queue_work(ndev
->rx_wq
, &ndev
->rx_work
);
654 EXPORT_SYMBOL(nci_recv_frame
);
656 static int nci_send_frame(struct sk_buff
*skb
)
658 struct nci_dev
*ndev
= (struct nci_dev
*) skb
->dev
;
660 nfc_dbg("entry, len %d", skb
->len
);
667 /* Get rid of skb owner, prior to sending to the driver. */
670 return ndev
->ops
->send(skb
);
673 /* Send NCI command */
674 int nci_send_cmd(struct nci_dev
*ndev
, __u16 opcode
, __u8 plen
, void *payload
)
676 struct nci_ctrl_hdr
*hdr
;
679 nfc_dbg("entry, opcode 0x%x, plen %d", opcode
, plen
);
681 skb
= nci_skb_alloc(ndev
, (NCI_CTRL_HDR_SIZE
+ plen
), GFP_KERNEL
);
683 nfc_err("no memory for command");
687 hdr
= (struct nci_ctrl_hdr
*) skb_put(skb
, NCI_CTRL_HDR_SIZE
);
688 hdr
->gid
= nci_opcode_gid(opcode
);
689 hdr
->oid
= nci_opcode_oid(opcode
);
692 nci_mt_set((__u8
*)hdr
, NCI_MT_CMD_PKT
);
693 nci_pbf_set((__u8
*)hdr
, NCI_PBF_LAST
);
696 memcpy(skb_put(skb
, plen
), payload
, plen
);
698 skb
->dev
= (void *) ndev
;
700 skb_queue_tail(&ndev
->cmd_q
, skb
);
701 queue_work(ndev
->cmd_wq
, &ndev
->cmd_work
);
706 /* ---- NCI TX Data worker thread ---- */
708 static void nci_tx_work(struct work_struct
*work
)
710 struct nci_dev
*ndev
= container_of(work
, struct nci_dev
, tx_work
);
713 nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev
->credits_cnt
));
715 /* Send queued tx data */
716 while (atomic_read(&ndev
->credits_cnt
)) {
717 skb
= skb_dequeue(&ndev
->tx_q
);
721 atomic_dec(&ndev
->credits_cnt
);
723 nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
725 nci_conn_id(skb
->data
),
726 nci_plen(skb
->data
));
732 /* ----- NCI RX worker thread (data & control) ----- */
734 static void nci_rx_work(struct work_struct
*work
)
736 struct nci_dev
*ndev
= container_of(work
, struct nci_dev
, rx_work
);
739 while ((skb
= skb_dequeue(&ndev
->rx_q
))) {
741 switch (nci_mt(skb
->data
)) {
743 nci_rsp_packet(ndev
, skb
);
747 nci_ntf_packet(ndev
, skb
);
750 case NCI_MT_DATA_PKT
:
751 nci_rx_data_packet(ndev
, skb
);
755 nfc_err("unknown MT 0x%x", nci_mt(skb
->data
));
762 /* ----- NCI TX CMD worker thread ----- */
764 static void nci_cmd_work(struct work_struct
*work
)
766 struct nci_dev
*ndev
= container_of(work
, struct nci_dev
, cmd_work
);
769 nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev
->cmd_cnt
));
771 /* Send queued command */
772 if (atomic_read(&ndev
->cmd_cnt
)) {
773 skb
= skb_dequeue(&ndev
->cmd_q
);
777 atomic_dec(&ndev
->cmd_cnt
);
779 nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
781 nci_opcode_gid(nci_opcode(skb
->data
)),
782 nci_opcode_oid(nci_opcode(skb
->data
)),
783 nci_plen(skb
->data
));
787 mod_timer(&ndev
->cmd_timer
,
788 jiffies
+ msecs_to_jiffies(NCI_CMD_TIMEOUT
));