NFC: basic NCI protocol implementation
[linux-2.6.git] / net / nfc / nci / core.c
blob895e5fdf464a384914e2194ef154a9f3c259d79a
1 /*
2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
5 * Copyright (C) 2011 Texas Instruments, Inc.
7 * Written by Ilan Elias <ilane@ti.com>
9 * Acknowledgements:
10 * This file is based on hci_core.c, which was written
11 * by Maxim Krasnyansky.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/types.h>
29 #include <linux/workqueue.h>
30 #include <linux/completion.h>
31 #include <linux/sched.h>
32 #include <linux/bitops.h>
33 #include <linux/skbuff.h>
35 #include "../nfc.h"
36 #include <net/nfc/nci.h>
37 #include <net/nfc/nci_core.h>
38 #include <linux/nfc.h>
40 static void nci_cmd_work(struct work_struct *work);
41 static void nci_rx_work(struct work_struct *work);
42 static void nci_tx_work(struct work_struct *work);
44 /* ---- NCI requests ---- */
46 void nci_req_complete(struct nci_dev *ndev, int result)
48 if (ndev->req_status == NCI_REQ_PEND) {
49 ndev->req_result = result;
50 ndev->req_status = NCI_REQ_DONE;
51 complete(&ndev->req_completion);
55 static void nci_req_cancel(struct nci_dev *ndev, int err)
57 if (ndev->req_status == NCI_REQ_PEND) {
58 ndev->req_result = err;
59 ndev->req_status = NCI_REQ_CANCELED;
60 complete(&ndev->req_completion);
64 /* Execute request and wait for completion. */
65 static int __nci_request(struct nci_dev *ndev,
66 void (*req)(struct nci_dev *ndev, unsigned long opt),
67 unsigned long opt,
68 __u32 timeout)
70 int rc = 0;
71 unsigned long completion_rc;
73 ndev->req_status = NCI_REQ_PEND;
75 init_completion(&ndev->req_completion);
76 req(ndev, opt);
77 completion_rc = wait_for_completion_interruptible_timeout(
78 &ndev->req_completion,
79 timeout);
81 nfc_dbg("wait_for_completion return %ld", completion_rc);
83 if (completion_rc > 0) {
84 switch (ndev->req_status) {
85 case NCI_REQ_DONE:
86 rc = nci_to_errno(ndev->req_result);
87 break;
89 case NCI_REQ_CANCELED:
90 rc = -ndev->req_result;
91 break;
93 default:
94 rc = -ETIMEDOUT;
95 break;
97 } else {
98 nfc_err("wait_for_completion_interruptible_timeout failed %ld",
99 completion_rc);
101 rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
104 ndev->req_status = ndev->req_result = 0;
106 return rc;
109 static inline int nci_request(struct nci_dev *ndev,
110 void (*req)(struct nci_dev *ndev, unsigned long opt),
111 unsigned long opt, __u32 timeout)
113 int rc;
115 if (!test_bit(NCI_UP, &ndev->flags))
116 return -ENETDOWN;
118 /* Serialize all requests */
119 mutex_lock(&ndev->req_lock);
120 rc = __nci_request(ndev, req, opt, timeout);
121 mutex_unlock(&ndev->req_lock);
123 return rc;
126 static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
128 nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 0, NULL);
131 static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
133 nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
136 static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
138 struct nci_rf_disc_map_cmd cmd;
139 struct nci_core_conn_create_cmd conn_cmd;
140 int i;
142 /* create static rf connection */
143 conn_cmd.target_handle = 0;
144 conn_cmd.num_target_specific_params = 0;
145 nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, &conn_cmd);
147 /* set rf mapping configurations */
148 cmd.num_mapping_configs = 0;
150 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
151 for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
152 if (ndev->supported_rf_interfaces[i] ==
153 NCI_RF_INTERFACE_ISO_DEP) {
154 cmd.mapping_configs[cmd.num_mapping_configs]
155 .rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
156 cmd.mapping_configs[cmd.num_mapping_configs]
157 .mode = NCI_DISC_MAP_MODE_BOTH;
158 cmd.mapping_configs[cmd.num_mapping_configs]
159 .rf_interface_type = NCI_RF_INTERFACE_ISO_DEP;
160 cmd.num_mapping_configs++;
161 } else if (ndev->supported_rf_interfaces[i] ==
162 NCI_RF_INTERFACE_NFC_DEP) {
163 cmd.mapping_configs[cmd.num_mapping_configs]
164 .rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
165 cmd.mapping_configs[cmd.num_mapping_configs]
166 .mode = NCI_DISC_MAP_MODE_BOTH;
167 cmd.mapping_configs[cmd.num_mapping_configs]
168 .rf_interface_type = NCI_RF_INTERFACE_NFC_DEP;
169 cmd.num_mapping_configs++;
172 if (cmd.num_mapping_configs == NCI_MAX_NUM_MAPPING_CONFIGS)
173 break;
176 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
177 (1 + (cmd.num_mapping_configs*sizeof(struct disc_map_config))),
178 &cmd);
181 static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
183 struct nci_rf_disc_cmd cmd;
184 __u32 protocols = opt;
186 cmd.num_disc_configs = 0;
188 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
189 (protocols & NFC_PROTO_JEWEL_MASK
190 || protocols & NFC_PROTO_MIFARE_MASK
191 || protocols & NFC_PROTO_ISO14443_MASK
192 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
193 cmd.disc_configs[cmd.num_disc_configs].type =
194 NCI_DISCOVERY_TYPE_POLL_A_PASSIVE;
195 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
196 cmd.num_disc_configs++;
199 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
200 (protocols & NFC_PROTO_ISO14443_MASK)) {
201 cmd.disc_configs[cmd.num_disc_configs].type =
202 NCI_DISCOVERY_TYPE_POLL_B_PASSIVE;
203 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
204 cmd.num_disc_configs++;
207 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
208 (protocols & NFC_PROTO_FELICA_MASK
209 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
210 cmd.disc_configs[cmd.num_disc_configs].type =
211 NCI_DISCOVERY_TYPE_POLL_F_PASSIVE;
212 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
213 cmd.num_disc_configs++;
216 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
217 (1 + (cmd.num_disc_configs*sizeof(struct disc_config))),
218 &cmd);
221 static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
223 struct nci_rf_deactivate_cmd cmd;
225 cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
227 nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
228 sizeof(struct nci_rf_deactivate_cmd),
229 &cmd);
232 static int nci_open_device(struct nci_dev *ndev)
234 int rc = 0;
236 mutex_lock(&ndev->req_lock);
238 if (test_bit(NCI_UP, &ndev->flags)) {
239 rc = -EALREADY;
240 goto done;
243 if (ndev->ops->open(ndev)) {
244 rc = -EIO;
245 goto done;
248 atomic_set(&ndev->cmd_cnt, 1);
250 set_bit(NCI_INIT, &ndev->flags);
252 rc = __nci_request(ndev, nci_reset_req, 0,
253 msecs_to_jiffies(NCI_RESET_TIMEOUT));
255 if (!rc) {
256 rc = __nci_request(ndev, nci_init_req, 0,
257 msecs_to_jiffies(NCI_INIT_TIMEOUT));
260 if (!rc) {
261 rc = __nci_request(ndev, nci_init_complete_req, 0,
262 msecs_to_jiffies(NCI_INIT_TIMEOUT));
265 clear_bit(NCI_INIT, &ndev->flags);
267 if (!rc) {
268 set_bit(NCI_UP, &ndev->flags);
269 } else {
270 /* Init failed, cleanup */
271 skb_queue_purge(&ndev->cmd_q);
272 skb_queue_purge(&ndev->rx_q);
273 skb_queue_purge(&ndev->tx_q);
275 ndev->ops->close(ndev);
276 ndev->flags = 0;
279 done:
280 mutex_unlock(&ndev->req_lock);
281 return rc;
284 static int nci_close_device(struct nci_dev *ndev)
286 nci_req_cancel(ndev, ENODEV);
287 mutex_lock(&ndev->req_lock);
289 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
290 del_timer_sync(&ndev->cmd_timer);
291 mutex_unlock(&ndev->req_lock);
292 return 0;
295 /* Drop RX and TX queues */
296 skb_queue_purge(&ndev->rx_q);
297 skb_queue_purge(&ndev->tx_q);
299 /* Flush RX and TX wq */
300 flush_workqueue(ndev->rx_wq);
301 flush_workqueue(ndev->tx_wq);
303 /* Reset device */
304 skb_queue_purge(&ndev->cmd_q);
305 atomic_set(&ndev->cmd_cnt, 1);
307 set_bit(NCI_INIT, &ndev->flags);
308 __nci_request(ndev, nci_reset_req, 0,
309 msecs_to_jiffies(NCI_RESET_TIMEOUT));
310 clear_bit(NCI_INIT, &ndev->flags);
312 /* Flush cmd wq */
313 flush_workqueue(ndev->cmd_wq);
315 /* After this point our queues are empty
316 * and no works are scheduled. */
317 ndev->ops->close(ndev);
319 /* Clear flags */
320 ndev->flags = 0;
322 mutex_unlock(&ndev->req_lock);
324 return 0;
327 /* NCI command timer function */
328 static void nci_cmd_timer(unsigned long arg)
330 struct nci_dev *ndev = (void *) arg;
332 nfc_dbg("entry");
334 atomic_set(&ndev->cmd_cnt, 1);
335 queue_work(ndev->cmd_wq, &ndev->cmd_work);
338 static int nci_dev_up(struct nfc_dev *nfc_dev)
340 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
342 nfc_dbg("entry");
344 return nci_open_device(ndev);
347 static int nci_dev_down(struct nfc_dev *nfc_dev)
349 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
351 nfc_dbg("entry");
353 return nci_close_device(ndev);
356 static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
358 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
359 int rc;
361 nfc_dbg("entry");
363 if (test_bit(NCI_DISCOVERY, &ndev->flags)) {
364 nfc_err("unable to start poll, since poll is already active");
365 return -EBUSY;
368 if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
369 nfc_dbg("target already active, first deactivate...");
371 rc = nci_request(ndev, nci_rf_deactivate_req, 0,
372 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
373 if (rc)
374 return -EBUSY;
377 rc = nci_request(ndev, nci_rf_discover_req, protocols,
378 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
380 if (!rc)
381 ndev->poll_prots = protocols;
383 return rc;
386 static void nci_stop_poll(struct nfc_dev *nfc_dev)
388 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
390 nfc_dbg("entry");
392 if (!test_bit(NCI_DISCOVERY, &ndev->flags)) {
393 nfc_err("unable to stop poll, since poll is not active");
394 return;
397 nci_request(ndev, nci_rf_deactivate_req, 0,
398 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
401 static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
402 __u32 protocol)
404 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
406 nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx, protocol);
408 if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
409 nfc_err("there is no available target to activate");
410 return -EINVAL;
413 if (ndev->target_active_prot) {
414 nfc_err("there is already an active target");
415 return -EBUSY;
418 if (!(ndev->target_available_prots & (1 << protocol))) {
419 nfc_err("target does not support the requested protocol 0x%x",
420 protocol);
421 return -EINVAL;
424 ndev->target_active_prot = protocol;
425 ndev->target_available_prots = 0;
427 return 0;
430 static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
432 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
434 nfc_dbg("entry, target_idx %d", target_idx);
436 if (!ndev->target_active_prot) {
437 nfc_err("unable to deactivate target, no active target");
438 return;
441 ndev->target_active_prot = 0;
443 if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
444 nci_request(ndev, nci_rf_deactivate_req, 0,
445 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
449 static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
450 struct sk_buff *skb,
451 data_exchange_cb_t cb,
452 void *cb_context)
454 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
456 nfc_dbg("entry, target_idx %d, len %d", target_idx, skb->len);
458 if (!ndev->target_active_prot) {
459 nfc_err("unable to exchange data, no active target");
460 return -EINVAL;
463 /* store cb and context to be used on receiving data */
464 ndev->data_exchange_cb = cb;
465 ndev->data_exchange_cb_context = cb_context;
467 return nci_send_data(ndev, ndev->conn_id, skb);
470 static struct nfc_ops nci_nfc_ops = {
471 .dev_up = nci_dev_up,
472 .dev_down = nci_dev_down,
473 .start_poll = nci_start_poll,
474 .stop_poll = nci_stop_poll,
475 .activate_target = nci_activate_target,
476 .deactivate_target = nci_deactivate_target,
477 .data_exchange = nci_data_exchange,
480 /* ---- Interface to NCI drivers ---- */
483 * nci_allocate_device - allocate a new nci device
485 * @ops: device operations
486 * @supported_protocols: NFC protocols supported by the device
488 struct nci_dev *nci_allocate_device(struct nci_ops *ops,
489 __u32 supported_protocols,
490 int tx_headroom,
491 int tx_tailroom)
493 struct nci_dev *ndev = NULL;
495 nfc_dbg("entry, supported_protocols 0x%x", supported_protocols);
497 if (!ops->open || !ops->close || !ops->send)
498 goto exit;
500 if (!supported_protocols)
501 goto exit;
503 ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
504 if (!ndev)
505 goto exit;
507 ndev->ops = ops;
508 ndev->tx_headroom = tx_headroom;
509 ndev->tx_tailroom = tx_tailroom;
511 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
512 supported_protocols,
513 tx_headroom + NCI_DATA_HDR_SIZE,
514 tx_tailroom);
515 if (!ndev->nfc_dev)
516 goto free_exit;
518 nfc_set_drvdata(ndev->nfc_dev, ndev);
520 goto exit;
522 free_exit:
523 kfree(ndev);
525 exit:
526 return ndev;
528 EXPORT_SYMBOL(nci_allocate_device);
531 * nci_free_device - deallocate nci device
533 * @ndev: The nci device to deallocate
535 void nci_free_device(struct nci_dev *ndev)
537 nfc_dbg("entry");
539 nfc_free_device(ndev->nfc_dev);
540 kfree(ndev);
542 EXPORT_SYMBOL(nci_free_device);
545 * nci_register_device - register a nci device in the nfc subsystem
547 * @dev: The nci device to register
549 int nci_register_device(struct nci_dev *ndev)
551 int rc;
552 struct device *dev = &ndev->nfc_dev->dev;
553 char name[32];
555 nfc_dbg("entry");
557 rc = nfc_register_device(ndev->nfc_dev);
558 if (rc)
559 goto exit;
561 ndev->flags = 0;
563 INIT_WORK(&ndev->cmd_work, nci_cmd_work);
564 snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
565 ndev->cmd_wq = create_singlethread_workqueue(name);
566 if (!ndev->cmd_wq) {
567 rc = -ENOMEM;
568 goto unreg_exit;
571 INIT_WORK(&ndev->rx_work, nci_rx_work);
572 snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
573 ndev->rx_wq = create_singlethread_workqueue(name);
574 if (!ndev->rx_wq) {
575 rc = -ENOMEM;
576 goto destroy_cmd_wq_exit;
579 INIT_WORK(&ndev->tx_work, nci_tx_work);
580 snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
581 ndev->tx_wq = create_singlethread_workqueue(name);
582 if (!ndev->tx_wq) {
583 rc = -ENOMEM;
584 goto destroy_rx_wq_exit;
587 skb_queue_head_init(&ndev->cmd_q);
588 skb_queue_head_init(&ndev->rx_q);
589 skb_queue_head_init(&ndev->tx_q);
591 setup_timer(&ndev->cmd_timer, nci_cmd_timer,
592 (unsigned long) ndev);
594 mutex_init(&ndev->req_lock);
596 goto exit;
598 destroy_rx_wq_exit:
599 destroy_workqueue(ndev->rx_wq);
601 destroy_cmd_wq_exit:
602 destroy_workqueue(ndev->cmd_wq);
604 unreg_exit:
605 nfc_unregister_device(ndev->nfc_dev);
607 exit:
608 return rc;
610 EXPORT_SYMBOL(nci_register_device);
613 * nci_unregister_device - unregister a nci device in the nfc subsystem
615 * @dev: The nci device to unregister
617 void nci_unregister_device(struct nci_dev *ndev)
619 nfc_dbg("entry");
621 nci_close_device(ndev);
623 destroy_workqueue(ndev->cmd_wq);
624 destroy_workqueue(ndev->rx_wq);
625 destroy_workqueue(ndev->tx_wq);
627 nfc_unregister_device(ndev->nfc_dev);
629 EXPORT_SYMBOL(nci_unregister_device);
632 * nci_recv_frame - receive frame from NCI drivers
634 * @skb: The sk_buff to receive
636 int nci_recv_frame(struct sk_buff *skb)
638 struct nci_dev *ndev = (struct nci_dev *) skb->dev;
640 nfc_dbg("entry, len %d", skb->len);
642 if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
643 && !test_bit(NCI_INIT, &ndev->flags))) {
644 kfree_skb(skb);
645 return -ENXIO;
648 /* Queue frame for rx worker thread */
649 skb_queue_tail(&ndev->rx_q, skb);
650 queue_work(ndev->rx_wq, &ndev->rx_work);
652 return 0;
654 EXPORT_SYMBOL(nci_recv_frame);
656 static int nci_send_frame(struct sk_buff *skb)
658 struct nci_dev *ndev = (struct nci_dev *) skb->dev;
660 nfc_dbg("entry, len %d", skb->len);
662 if (!ndev) {
663 kfree_skb(skb);
664 return -ENODEV;
667 /* Get rid of skb owner, prior to sending to the driver. */
668 skb_orphan(skb);
670 return ndev->ops->send(skb);
673 /* Send NCI command */
674 int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
676 struct nci_ctrl_hdr *hdr;
677 struct sk_buff *skb;
679 nfc_dbg("entry, opcode 0x%x, plen %d", opcode, plen);
681 skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
682 if (!skb) {
683 nfc_err("no memory for command");
684 return -ENOMEM;
687 hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
688 hdr->gid = nci_opcode_gid(opcode);
689 hdr->oid = nci_opcode_oid(opcode);
690 hdr->plen = plen;
692 nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
693 nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
695 if (plen)
696 memcpy(skb_put(skb, plen), payload, plen);
698 skb->dev = (void *) ndev;
700 skb_queue_tail(&ndev->cmd_q, skb);
701 queue_work(ndev->cmd_wq, &ndev->cmd_work);
703 return 0;
706 /* ---- NCI TX Data worker thread ---- */
708 static void nci_tx_work(struct work_struct *work)
710 struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
711 struct sk_buff *skb;
713 nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev->credits_cnt));
715 /* Send queued tx data */
716 while (atomic_read(&ndev->credits_cnt)) {
717 skb = skb_dequeue(&ndev->tx_q);
718 if (!skb)
719 return;
721 atomic_dec(&ndev->credits_cnt);
723 nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
724 nci_pbf(skb->data),
725 nci_conn_id(skb->data),
726 nci_plen(skb->data));
728 nci_send_frame(skb);
732 /* ----- NCI RX worker thread (data & control) ----- */
734 static void nci_rx_work(struct work_struct *work)
736 struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
737 struct sk_buff *skb;
739 while ((skb = skb_dequeue(&ndev->rx_q))) {
740 /* Process frame */
741 switch (nci_mt(skb->data)) {
742 case NCI_MT_RSP_PKT:
743 nci_rsp_packet(ndev, skb);
744 break;
746 case NCI_MT_NTF_PKT:
747 nci_ntf_packet(ndev, skb);
748 break;
750 case NCI_MT_DATA_PKT:
751 nci_rx_data_packet(ndev, skb);
752 break;
754 default:
755 nfc_err("unknown MT 0x%x", nci_mt(skb->data));
756 kfree_skb(skb);
757 break;
762 /* ----- NCI TX CMD worker thread ----- */
764 static void nci_cmd_work(struct work_struct *work)
766 struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
767 struct sk_buff *skb;
769 nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev->cmd_cnt));
771 /* Send queued command */
772 if (atomic_read(&ndev->cmd_cnt)) {
773 skb = skb_dequeue(&ndev->cmd_q);
774 if (!skb)
775 return;
777 atomic_dec(&ndev->cmd_cnt);
779 nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
780 nci_pbf(skb->data),
781 nci_opcode_gid(nci_opcode(skb->data)),
782 nci_opcode_oid(nci_opcode(skb->data)),
783 nci_plen(skb->data));
785 nci_send_frame(skb);
787 mod_timer(&ndev->cmd_timer,
788 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));