2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
15 * Copyright(c) 2012 Intel Corporation. All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * Intel PCIe NTB Linux driver
45 * Contact Information:
46 * Jon Mason <jon.mason@intel.com>
48 #include <linux/debugfs.h>
49 #include <linux/delay.h>
50 #include <linux/dma-mapping.h>
51 #include <linux/errno.h>
52 #include <linux/export.h>
53 #include <linux/interrupt.h>
54 #include <linux/module.h>
55 #include <linux/pci.h>
56 #include <linux/slab.h>
57 #include <linux/types.h>
58 #include <linux/ntb.h>
61 #define NTB_TRANSPORT_VERSION 3
63 static unsigned int transport_mtu
= 0x401E;
64 module_param(transport_mtu
, uint
, 0644);
65 MODULE_PARM_DESC(transport_mtu
, "Maximum size of NTB transport packets");
67 static unsigned char max_num_clients
= 2;
68 module_param(max_num_clients
, byte
, 0644);
69 MODULE_PARM_DESC(max_num_clients
, "Maximum number of NTB transport clients");
71 struct ntb_queue_entry
{
72 /* ntb_queue list reference */
73 struct list_head entry
;
74 /* pointers to data to be transfered */
85 struct ntb_transport_qp
{
86 struct ntb_transport
*transport
;
87 struct ntb_device
*ndev
;
92 u8 qp_num
; /* Only 64 QP's are allowed. 0-63 */
94 struct ntb_rx_info __iomem
*rx_info
;
95 struct ntb_rx_info
*remote_rx_info
;
97 void (*tx_handler
) (struct ntb_transport_qp
*qp
, void *qp_data
,
99 struct list_head tx_free_q
;
100 spinlock_t ntb_tx_free_q_lock
;
102 unsigned int tx_index
;
103 unsigned int tx_max_entry
;
104 unsigned int tx_max_frame
;
106 void (*rx_handler
) (struct ntb_transport_qp
*qp
, void *qp_data
,
107 void *data
, int len
);
108 struct tasklet_struct rx_work
;
109 struct list_head rx_pend_q
;
110 struct list_head rx_free_q
;
111 spinlock_t ntb_rx_pend_q_lock
;
112 spinlock_t ntb_rx_free_q_lock
;
114 unsigned int rx_index
;
115 unsigned int rx_max_entry
;
116 unsigned int rx_max_frame
;
118 void (*event_handler
) (void *data
, int status
);
119 struct delayed_work link_work
;
120 struct work_struct link_cleanup
;
122 struct dentry
*debugfs_dir
;
123 struct dentry
*debugfs_stats
;
137 struct ntb_transport_mw
{
143 struct ntb_transport_client_dev
{
144 struct list_head entry
;
148 struct ntb_transport
{
149 struct list_head entry
;
150 struct list_head client_devs
;
152 struct ntb_device
*ndev
;
153 struct ntb_transport_mw mw
[NTB_NUM_MW
];
154 struct ntb_transport_qp
*qps
;
155 unsigned int max_qps
;
156 unsigned long qp_bitmap
;
158 struct delayed_work link_work
;
159 struct work_struct link_cleanup
;
160 struct dentry
*debugfs_dir
;
164 DESC_DONE_FLAG
= 1 << 0,
165 LINK_DOWN_FLAG
= 1 << 1,
168 struct ntb_payload_header
{
186 #define QP_TO_MW(qp) ((qp) % NTB_NUM_MW)
187 #define NTB_QP_DEF_NUM_ENTRIES 100
188 #define NTB_LINK_DOWN_TIMEOUT 10
190 static int ntb_match_bus(struct device
*dev
, struct device_driver
*drv
)
192 return !strncmp(dev_name(dev
), drv
->name
, strlen(drv
->name
));
195 static int ntb_client_probe(struct device
*dev
)
197 const struct ntb_client
*drv
= container_of(dev
->driver
,
198 struct ntb_client
, driver
);
199 struct pci_dev
*pdev
= container_of(dev
->parent
, struct pci_dev
, dev
);
203 if (drv
&& drv
->probe
)
204 rc
= drv
->probe(pdev
);
211 static int ntb_client_remove(struct device
*dev
)
213 const struct ntb_client
*drv
= container_of(dev
->driver
,
214 struct ntb_client
, driver
);
215 struct pci_dev
*pdev
= container_of(dev
->parent
, struct pci_dev
, dev
);
217 if (drv
&& drv
->remove
)
225 static struct bus_type ntb_bus_type
= {
227 .match
= ntb_match_bus
,
228 .probe
= ntb_client_probe
,
229 .remove
= ntb_client_remove
,
232 static LIST_HEAD(ntb_transport_list
);
234 static int ntb_bus_init(struct ntb_transport
*nt
)
236 if (list_empty(&ntb_transport_list
)) {
237 int rc
= bus_register(&ntb_bus_type
);
242 list_add(&nt
->entry
, &ntb_transport_list
);
247 static void ntb_bus_remove(struct ntb_transport
*nt
)
249 struct ntb_transport_client_dev
*client_dev
, *cd
;
251 list_for_each_entry_safe(client_dev
, cd
, &nt
->client_devs
, entry
) {
252 dev_err(client_dev
->dev
.parent
, "%s still attached to bus, removing\n",
253 dev_name(&client_dev
->dev
));
254 list_del(&client_dev
->entry
);
255 device_unregister(&client_dev
->dev
);
258 list_del(&nt
->entry
);
260 if (list_empty(&ntb_transport_list
))
261 bus_unregister(&ntb_bus_type
);
264 static void ntb_client_release(struct device
*dev
)
266 struct ntb_transport_client_dev
*client_dev
;
267 client_dev
= container_of(dev
, struct ntb_transport_client_dev
, dev
);
273 * ntb_unregister_client_dev - Unregister NTB client device
274 * @device_name: Name of NTB client device
276 * Unregister an NTB client device with the NTB transport layer
278 void ntb_unregister_client_dev(char *device_name
)
280 struct ntb_transport_client_dev
*client
, *cd
;
281 struct ntb_transport
*nt
;
283 list_for_each_entry(nt
, &ntb_transport_list
, entry
)
284 list_for_each_entry_safe(client
, cd
, &nt
->client_devs
, entry
)
285 if (!strncmp(dev_name(&client
->dev
), device_name
,
286 strlen(device_name
))) {
287 list_del(&client
->entry
);
288 device_unregister(&client
->dev
);
291 EXPORT_SYMBOL_GPL(ntb_unregister_client_dev
);
294 * ntb_register_client_dev - Register NTB client device
295 * @device_name: Name of NTB client device
297 * Register an NTB client device with the NTB transport layer
299 int ntb_register_client_dev(char *device_name
)
301 struct ntb_transport_client_dev
*client_dev
;
302 struct ntb_transport
*nt
;
305 if (list_empty(&ntb_transport_list
))
308 list_for_each_entry(nt
, &ntb_transport_list
, entry
) {
311 client_dev
= kzalloc(sizeof(struct ntb_transport_client_dev
),
318 dev
= &client_dev
->dev
;
320 /* setup and register client devices */
321 dev_set_name(dev
, "%s%d", device_name
, i
);
322 dev
->bus
= &ntb_bus_type
;
323 dev
->release
= ntb_client_release
;
324 dev
->parent
= &ntb_query_pdev(nt
->ndev
)->dev
;
326 rc
= device_register(dev
);
332 list_add_tail(&client_dev
->entry
, &nt
->client_devs
);
339 ntb_unregister_client_dev(device_name
);
343 EXPORT_SYMBOL_GPL(ntb_register_client_dev
);
346 * ntb_register_client - Register NTB client driver
347 * @drv: NTB client driver to be registered
349 * Register an NTB client driver with the NTB transport layer
351 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
353 int ntb_register_client(struct ntb_client
*drv
)
355 drv
->driver
.bus
= &ntb_bus_type
;
357 if (list_empty(&ntb_transport_list
))
360 return driver_register(&drv
->driver
);
362 EXPORT_SYMBOL_GPL(ntb_register_client
);
365 * ntb_unregister_client - Unregister NTB client driver
366 * @drv: NTB client driver to be unregistered
368 * Unregister an NTB client driver with the NTB transport layer
370 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
372 void ntb_unregister_client(struct ntb_client
*drv
)
374 driver_unregister(&drv
->driver
);
376 EXPORT_SYMBOL_GPL(ntb_unregister_client
);
378 static ssize_t
debugfs_read(struct file
*filp
, char __user
*ubuf
, size_t count
,
381 struct ntb_transport_qp
*qp
;
383 ssize_t ret
, out_offset
, out_count
;
387 buf
= kmalloc(out_count
, GFP_KERNEL
);
391 qp
= filp
->private_data
;
393 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
395 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
396 "rx_bytes - \t%llu\n", qp
->rx_bytes
);
397 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
398 "rx_pkts - \t%llu\n", qp
->rx_pkts
);
399 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
400 "rx_ring_empty - %llu\n", qp
->rx_ring_empty
);
401 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
402 "rx_err_no_buf - %llu\n", qp
->rx_err_no_buf
);
403 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
404 "rx_err_oflow - \t%llu\n", qp
->rx_err_oflow
);
405 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
406 "rx_err_ver - \t%llu\n", qp
->rx_err_ver
);
407 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
408 "rx_buff - \t%p\n", qp
->rx_buff
);
409 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
410 "rx_index - \t%u\n", qp
->rx_index
);
411 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
412 "rx_max_entry - \t%u\n", qp
->rx_max_entry
);
414 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
415 "tx_bytes - \t%llu\n", qp
->tx_bytes
);
416 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
417 "tx_pkts - \t%llu\n", qp
->tx_pkts
);
418 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
419 "tx_ring_full - \t%llu\n", qp
->tx_ring_full
);
420 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
421 "tx_mw - \t%p\n", qp
->tx_mw
);
422 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
423 "tx_index - \t%u\n", qp
->tx_index
);
424 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
425 "tx_max_entry - \t%u\n", qp
->tx_max_entry
);
427 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
428 "\nQP Link %s\n", (qp
->qp_link
== NTB_LINK_UP
) ?
430 if (out_offset
> out_count
)
431 out_offset
= out_count
;
433 ret
= simple_read_from_buffer(ubuf
, count
, offp
, buf
, out_offset
);
438 static const struct file_operations ntb_qp_debugfs_stats
= {
439 .owner
= THIS_MODULE
,
441 .read
= debugfs_read
,
444 static void ntb_list_add(spinlock_t
*lock
, struct list_head
*entry
,
445 struct list_head
*list
)
449 spin_lock_irqsave(lock
, flags
);
450 list_add_tail(entry
, list
);
451 spin_unlock_irqrestore(lock
, flags
);
454 static struct ntb_queue_entry
*ntb_list_rm(spinlock_t
*lock
,
455 struct list_head
*list
)
457 struct ntb_queue_entry
*entry
;
460 spin_lock_irqsave(lock
, flags
);
461 if (list_empty(list
)) {
465 entry
= list_first_entry(list
, struct ntb_queue_entry
, entry
);
466 list_del(&entry
->entry
);
468 spin_unlock_irqrestore(lock
, flags
);
473 static void ntb_transport_setup_qp_mw(struct ntb_transport
*nt
,
476 struct ntb_transport_qp
*qp
= &nt
->qps
[qp_num
];
477 unsigned int rx_size
, num_qps_mw
;
478 u8 mw_num
= QP_TO_MW(qp_num
);
481 WARN_ON(nt
->mw
[mw_num
].virt_addr
== NULL
);
483 if (nt
->max_qps
% NTB_NUM_MW
&& mw_num
< nt
->max_qps
% NTB_NUM_MW
)
484 num_qps_mw
= nt
->max_qps
/ NTB_NUM_MW
+ 1;
486 num_qps_mw
= nt
->max_qps
/ NTB_NUM_MW
;
488 rx_size
= (unsigned int) nt
->mw
[mw_num
].size
/ num_qps_mw
;
489 qp
->remote_rx_info
= nt
->mw
[mw_num
].virt_addr
+
490 (qp_num
/ NTB_NUM_MW
* rx_size
);
491 rx_size
-= sizeof(struct ntb_rx_info
);
493 qp
->rx_buff
= qp
->remote_rx_info
+ 1;
494 /* Due to housekeeping, there must be atleast 2 buffs */
495 qp
->rx_max_frame
= min(transport_mtu
, rx_size
/ 2);
496 qp
->rx_max_entry
= rx_size
/ qp
->rx_max_frame
;
499 qp
->remote_rx_info
->entry
= qp
->rx_max_entry
- 1;
501 /* setup the hdr offsets with 0's */
502 for (i
= 0; i
< qp
->rx_max_entry
; i
++) {
503 void *offset
= qp
->rx_buff
+ qp
->rx_max_frame
* (i
+ 1) -
504 sizeof(struct ntb_payload_header
);
505 memset(offset
, 0, sizeof(struct ntb_payload_header
));
513 static void ntb_free_mw(struct ntb_transport
*nt
, int num_mw
)
515 struct ntb_transport_mw
*mw
= &nt
->mw
[num_mw
];
516 struct pci_dev
*pdev
= ntb_query_pdev(nt
->ndev
);
521 dma_free_coherent(&pdev
->dev
, mw
->size
, mw
->virt_addr
, mw
->dma_addr
);
522 mw
->virt_addr
= NULL
;
525 static int ntb_set_mw(struct ntb_transport
*nt
, int num_mw
, unsigned int size
)
527 struct ntb_transport_mw
*mw
= &nt
->mw
[num_mw
];
528 struct pci_dev
*pdev
= ntb_query_pdev(nt
->ndev
);
530 /* No need to re-setup */
531 if (mw
->size
== ALIGN(size
, 4096))
535 ntb_free_mw(nt
, num_mw
);
537 /* Alloc memory for receiving data. Must be 4k aligned */
538 mw
->size
= ALIGN(size
, 4096);
540 mw
->virt_addr
= dma_alloc_coherent(&pdev
->dev
, mw
->size
, &mw
->dma_addr
,
542 if (!mw
->virt_addr
) {
544 dev_err(&pdev
->dev
, "Unable to allocate MW buffer of size %d\n",
549 /* Notify HW the memory location of the receive buffer */
550 ntb_set_mw_addr(nt
->ndev
, num_mw
, mw
->dma_addr
);
555 static void ntb_qp_link_cleanup(struct work_struct
*work
)
557 struct ntb_transport_qp
*qp
= container_of(work
,
558 struct ntb_transport_qp
,
560 struct ntb_transport
*nt
= qp
->transport
;
561 struct pci_dev
*pdev
= ntb_query_pdev(nt
->ndev
);
563 if (qp
->qp_link
== NTB_LINK_DOWN
) {
564 cancel_delayed_work_sync(&qp
->link_work
);
568 if (qp
->event_handler
)
569 qp
->event_handler(qp
->cb_data
, NTB_LINK_DOWN
);
571 dev_info(&pdev
->dev
, "qp %d: Link Down\n", qp
->qp_num
);
572 qp
->qp_link
= NTB_LINK_DOWN
;
574 if (nt
->transport_link
== NTB_LINK_UP
)
575 schedule_delayed_work(&qp
->link_work
,
576 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT
));
579 static void ntb_qp_link_down(struct ntb_transport_qp
*qp
)
581 schedule_work(&qp
->link_cleanup
);
584 static void ntb_transport_link_cleanup(struct work_struct
*work
)
586 struct ntb_transport
*nt
= container_of(work
, struct ntb_transport
,
590 if (nt
->transport_link
== NTB_LINK_DOWN
)
591 cancel_delayed_work_sync(&nt
->link_work
);
593 nt
->transport_link
= NTB_LINK_DOWN
;
595 /* Pass along the info to any clients */
596 for (i
= 0; i
< nt
->max_qps
; i
++)
597 if (!test_bit(i
, &nt
->qp_bitmap
))
598 ntb_qp_link_down(&nt
->qps
[i
]);
600 /* The scratchpad registers keep the values if the remote side
601 * goes down, blast them now to give them a sane value the next
602 * time they are accessed
604 for (i
= 0; i
< MAX_SPAD
; i
++)
605 ntb_write_local_spad(nt
->ndev
, i
, 0);
608 static void ntb_transport_event_callback(void *data
, enum ntb_hw_event event
)
610 struct ntb_transport
*nt
= data
;
613 case NTB_EVENT_HW_LINK_UP
:
614 schedule_delayed_work(&nt
->link_work
, 0);
616 case NTB_EVENT_HW_LINK_DOWN
:
617 schedule_work(&nt
->link_cleanup
);
624 static void ntb_transport_link_work(struct work_struct
*work
)
626 struct ntb_transport
*nt
= container_of(work
, struct ntb_transport
,
628 struct ntb_device
*ndev
= nt
->ndev
;
629 struct pci_dev
*pdev
= ntb_query_pdev(ndev
);
633 /* send the local info, in the opposite order of the way we read it */
634 for (i
= 0; i
< NTB_NUM_MW
; i
++) {
635 rc
= ntb_write_remote_spad(ndev
, MW0_SZ_HIGH
+ (i
* 2),
636 ntb_get_mw_size(ndev
, i
) >> 32);
638 dev_err(&pdev
->dev
, "Error writing %u to remote spad %d\n",
639 (u32
)(ntb_get_mw_size(ndev
, i
) >> 32),
640 MW0_SZ_HIGH
+ (i
* 2));
644 rc
= ntb_write_remote_spad(ndev
, MW0_SZ_LOW
+ (i
* 2),
645 (u32
) ntb_get_mw_size(ndev
, i
));
647 dev_err(&pdev
->dev
, "Error writing %u to remote spad %d\n",
648 (u32
) ntb_get_mw_size(ndev
, i
),
649 MW0_SZ_LOW
+ (i
* 2));
654 rc
= ntb_write_remote_spad(ndev
, NUM_MWS
, NTB_NUM_MW
);
656 dev_err(&pdev
->dev
, "Error writing %x to remote spad %d\n",
657 NTB_NUM_MW
, NUM_MWS
);
661 rc
= ntb_write_remote_spad(ndev
, NUM_QPS
, nt
->max_qps
);
663 dev_err(&pdev
->dev
, "Error writing %x to remote spad %d\n",
664 nt
->max_qps
, NUM_QPS
);
668 rc
= ntb_write_remote_spad(ndev
, VERSION
, NTB_TRANSPORT_VERSION
);
670 dev_err(&pdev
->dev
, "Error writing %x to remote spad %d\n",
671 NTB_TRANSPORT_VERSION
, VERSION
);
675 /* Query the remote side for its info */
676 rc
= ntb_read_remote_spad(ndev
, VERSION
, &val
);
678 dev_err(&pdev
->dev
, "Error reading remote spad %d\n", VERSION
);
682 if (val
!= NTB_TRANSPORT_VERSION
)
684 dev_dbg(&pdev
->dev
, "Remote version = %d\n", val
);
686 rc
= ntb_read_remote_spad(ndev
, NUM_QPS
, &val
);
688 dev_err(&pdev
->dev
, "Error reading remote spad %d\n", NUM_QPS
);
692 if (val
!= nt
->max_qps
)
694 dev_dbg(&pdev
->dev
, "Remote max number of qps = %d\n", val
);
696 rc
= ntb_read_remote_spad(ndev
, NUM_MWS
, &val
);
698 dev_err(&pdev
->dev
, "Error reading remote spad %d\n", NUM_MWS
);
702 if (val
!= NTB_NUM_MW
)
704 dev_dbg(&pdev
->dev
, "Remote number of mws = %d\n", val
);
706 for (i
= 0; i
< NTB_NUM_MW
; i
++) {
709 rc
= ntb_read_remote_spad(ndev
, MW0_SZ_HIGH
+ (i
* 2), &val
);
711 dev_err(&pdev
->dev
, "Error reading remote spad %d\n",
712 MW0_SZ_HIGH
+ (i
* 2));
716 val64
= (u64
) val
<< 32;
718 rc
= ntb_read_remote_spad(ndev
, MW0_SZ_LOW
+ (i
* 2), &val
);
720 dev_err(&pdev
->dev
, "Error reading remote spad %d\n",
721 MW0_SZ_LOW
+ (i
* 2));
727 dev_dbg(&pdev
->dev
, "Remote MW%d size = %llu\n", i
, val64
);
729 rc
= ntb_set_mw(nt
, i
, val64
);
734 nt
->transport_link
= NTB_LINK_UP
;
736 for (i
= 0; i
< nt
->max_qps
; i
++) {
737 struct ntb_transport_qp
*qp
= &nt
->qps
[i
];
739 ntb_transport_setup_qp_mw(nt
, i
);
741 if (qp
->client_ready
== NTB_LINK_UP
)
742 schedule_delayed_work(&qp
->link_work
, 0);
748 for (i
= 0; i
< NTB_NUM_MW
; i
++)
751 if (ntb_hw_link_status(ndev
))
752 schedule_delayed_work(&nt
->link_work
,
753 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT
));
756 static void ntb_qp_link_work(struct work_struct
*work
)
758 struct ntb_transport_qp
*qp
= container_of(work
,
759 struct ntb_transport_qp
,
761 struct pci_dev
*pdev
= ntb_query_pdev(qp
->ndev
);
762 struct ntb_transport
*nt
= qp
->transport
;
765 WARN_ON(nt
->transport_link
!= NTB_LINK_UP
);
767 rc
= ntb_read_local_spad(nt
->ndev
, QP_LINKS
, &val
);
769 dev_err(&pdev
->dev
, "Error reading spad %d\n", QP_LINKS
);
773 rc
= ntb_write_remote_spad(nt
->ndev
, QP_LINKS
, val
| 1 << qp
->qp_num
);
775 dev_err(&pdev
->dev
, "Error writing %x to remote spad %d\n",
776 val
| 1 << qp
->qp_num
, QP_LINKS
);
778 /* query remote spad for qp ready bits */
779 rc
= ntb_read_remote_spad(nt
->ndev
, QP_LINKS
, &val
);
781 dev_err(&pdev
->dev
, "Error reading remote spad %d\n", QP_LINKS
);
783 dev_dbg(&pdev
->dev
, "Remote QP link status = %x\n", val
);
785 /* See if the remote side is up */
786 if (1 << qp
->qp_num
& val
) {
787 qp
->qp_link
= NTB_LINK_UP
;
789 dev_info(&pdev
->dev
, "qp %d: Link Up\n", qp
->qp_num
);
790 if (qp
->event_handler
)
791 qp
->event_handler(qp
->cb_data
, NTB_LINK_UP
);
792 } else if (nt
->transport_link
== NTB_LINK_UP
)
793 schedule_delayed_work(&qp
->link_work
,
794 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT
));
797 static void ntb_transport_init_queue(struct ntb_transport
*nt
,
800 struct ntb_transport_qp
*qp
;
801 unsigned int num_qps_mw
, tx_size
;
802 u8 mw_num
= QP_TO_MW(qp_num
);
804 qp
= &nt
->qps
[qp_num
];
808 qp
->qp_link
= NTB_LINK_DOWN
;
809 qp
->client_ready
= NTB_LINK_DOWN
;
810 qp
->event_handler
= NULL
;
812 if (nt
->max_qps
% NTB_NUM_MW
&& mw_num
< nt
->max_qps
% NTB_NUM_MW
)
813 num_qps_mw
= nt
->max_qps
/ NTB_NUM_MW
+ 1;
815 num_qps_mw
= nt
->max_qps
/ NTB_NUM_MW
;
817 tx_size
= (unsigned int) ntb_get_mw_size(qp
->ndev
, mw_num
) / num_qps_mw
;
818 qp
->rx_info
= ntb_get_mw_vbase(nt
->ndev
, mw_num
) +
819 (qp_num
/ NTB_NUM_MW
* tx_size
);
820 tx_size
-= sizeof(struct ntb_rx_info
);
822 qp
->tx_mw
= qp
->rx_info
+ 1;
823 /* Due to housekeeping, there must be atleast 2 buffs */
824 qp
->tx_max_frame
= min(transport_mtu
, tx_size
/ 2);
825 qp
->tx_max_entry
= tx_size
/ qp
->tx_max_frame
;
827 if (nt
->debugfs_dir
) {
828 char debugfs_name
[4];
830 snprintf(debugfs_name
, 4, "qp%d", qp_num
);
831 qp
->debugfs_dir
= debugfs_create_dir(debugfs_name
,
834 qp
->debugfs_stats
= debugfs_create_file("stats", S_IRUSR
,
836 &ntb_qp_debugfs_stats
);
839 INIT_DELAYED_WORK(&qp
->link_work
, ntb_qp_link_work
);
840 INIT_WORK(&qp
->link_cleanup
, ntb_qp_link_cleanup
);
842 spin_lock_init(&qp
->ntb_rx_pend_q_lock
);
843 spin_lock_init(&qp
->ntb_rx_free_q_lock
);
844 spin_lock_init(&qp
->ntb_tx_free_q_lock
);
846 INIT_LIST_HEAD(&qp
->rx_pend_q
);
847 INIT_LIST_HEAD(&qp
->rx_free_q
);
848 INIT_LIST_HEAD(&qp
->tx_free_q
);
851 int ntb_transport_init(struct pci_dev
*pdev
)
853 struct ntb_transport
*nt
;
856 nt
= kzalloc(sizeof(struct ntb_transport
), GFP_KERNEL
);
860 if (debugfs_initialized())
861 nt
->debugfs_dir
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
863 nt
->debugfs_dir
= NULL
;
865 nt
->ndev
= ntb_register_transport(pdev
, nt
);
871 nt
->max_qps
= min(nt
->ndev
->max_cbs
, max_num_clients
);
873 nt
->qps
= kcalloc(nt
->max_qps
, sizeof(struct ntb_transport_qp
),
880 nt
->qp_bitmap
= ((u64
) 1 << nt
->max_qps
) - 1;
882 for (i
= 0; i
< nt
->max_qps
; i
++)
883 ntb_transport_init_queue(nt
, i
);
885 INIT_DELAYED_WORK(&nt
->link_work
, ntb_transport_link_work
);
886 INIT_WORK(&nt
->link_cleanup
, ntb_transport_link_cleanup
);
888 rc
= ntb_register_event_callback(nt
->ndev
,
889 ntb_transport_event_callback
);
893 INIT_LIST_HEAD(&nt
->client_devs
);
894 rc
= ntb_bus_init(nt
);
898 if (ntb_hw_link_status(nt
->ndev
))
899 schedule_delayed_work(&nt
->link_work
, 0);
904 ntb_unregister_event_callback(nt
->ndev
);
908 ntb_unregister_transport(nt
->ndev
);
910 debugfs_remove_recursive(nt
->debugfs_dir
);
915 void ntb_transport_free(void *transport
)
917 struct ntb_transport
*nt
= transport
;
918 struct pci_dev
*pdev
;
921 nt
->transport_link
= NTB_LINK_DOWN
;
923 /* verify that all the qp's are freed */
924 for (i
= 0; i
< nt
->max_qps
; i
++)
925 if (!test_bit(i
, &nt
->qp_bitmap
))
926 ntb_transport_free_queue(&nt
->qps
[i
]);
930 cancel_delayed_work_sync(&nt
->link_work
);
932 debugfs_remove_recursive(nt
->debugfs_dir
);
934 ntb_unregister_event_callback(nt
->ndev
);
936 pdev
= ntb_query_pdev(nt
->ndev
);
938 for (i
= 0; i
< NTB_NUM_MW
; i
++)
942 ntb_unregister_transport(nt
->ndev
);
946 static void ntb_rx_copy_task(struct ntb_transport_qp
*qp
,
947 struct ntb_queue_entry
*entry
, void *offset
)
949 void *cb_data
= entry
->cb_data
;
950 unsigned int len
= entry
->len
;
952 memcpy(entry
->buf
, offset
, entry
->len
);
954 ntb_list_add(&qp
->ntb_rx_free_q_lock
, &entry
->entry
, &qp
->rx_free_q
);
956 if (qp
->rx_handler
&& qp
->client_ready
== NTB_LINK_UP
)
957 qp
->rx_handler(qp
, qp
->cb_data
, cb_data
, len
);
960 static int ntb_process_rxc(struct ntb_transport_qp
*qp
)
962 struct ntb_payload_header
*hdr
;
963 struct ntb_queue_entry
*entry
;
966 offset
= qp
->rx_buff
+ qp
->rx_max_frame
* qp
->rx_index
;
967 hdr
= offset
+ qp
->rx_max_frame
- sizeof(struct ntb_payload_header
);
969 entry
= ntb_list_rm(&qp
->ntb_rx_pend_q_lock
, &qp
->rx_pend_q
);
971 dev_dbg(&ntb_query_pdev(qp
->ndev
)->dev
,
972 "no buffer - HDR ver %u, len %d, flags %x\n",
973 hdr
->ver
, hdr
->len
, hdr
->flags
);
978 if (!(hdr
->flags
& DESC_DONE_FLAG
)) {
979 ntb_list_add(&qp
->ntb_rx_pend_q_lock
, &entry
->entry
,
985 if (hdr
->ver
!= (u32
) qp
->rx_pkts
) {
986 dev_dbg(&ntb_query_pdev(qp
->ndev
)->dev
,
987 "qp %d: version mismatch, expected %llu - got %u\n",
988 qp
->qp_num
, qp
->rx_pkts
, hdr
->ver
);
989 ntb_list_add(&qp
->ntb_rx_pend_q_lock
, &entry
->entry
,
995 if (hdr
->flags
& LINK_DOWN_FLAG
) {
996 ntb_qp_link_down(qp
);
998 ntb_list_add(&qp
->ntb_rx_pend_q_lock
, &entry
->entry
,
1003 dev_dbg(&ntb_query_pdev(qp
->ndev
)->dev
,
1004 "rx offset %u, ver %u - %d payload received, buf size %d\n",
1005 qp
->rx_index
, hdr
->ver
, hdr
->len
, entry
->len
);
1007 if (hdr
->len
<= entry
->len
) {
1008 entry
->len
= hdr
->len
;
1009 ntb_rx_copy_task(qp
, entry
, offset
);
1011 ntb_list_add(&qp
->ntb_rx_pend_q_lock
, &entry
->entry
,
1015 dev_dbg(&ntb_query_pdev(qp
->ndev
)->dev
,
1016 "RX overflow! Wanted %d got %d\n",
1017 hdr
->len
, entry
->len
);
1020 qp
->rx_bytes
+= hdr
->len
;
1024 /* Ensure that the data is fully copied out before clearing the flag */
1027 iowrite32(qp
->rx_index
, &qp
->rx_info
->entry
);
1030 qp
->rx_index
%= qp
->rx_max_entry
;
1035 static void ntb_transport_rx(unsigned long data
)
1037 struct ntb_transport_qp
*qp
= (struct ntb_transport_qp
*)data
;
1040 /* Limit the number of packets processed in a single interrupt to
1041 * provide fairness to others
1043 for (i
= 0; i
< qp
->rx_max_entry
; i
++) {
1044 rc
= ntb_process_rxc(qp
);
1050 static void ntb_transport_rxc_db(void *data
, int db_num
)
1052 struct ntb_transport_qp
*qp
= data
;
1054 dev_dbg(&ntb_query_pdev(qp
->ndev
)->dev
, "%s: doorbell %d received\n",
1057 tasklet_schedule(&qp
->rx_work
);
1060 static void ntb_tx_copy_task(struct ntb_transport_qp
*qp
,
1061 struct ntb_queue_entry
*entry
,
1062 void __iomem
*offset
)
1064 struct ntb_payload_header __iomem
*hdr
;
1066 memcpy_toio(offset
, entry
->buf
, entry
->len
);
1068 hdr
= offset
+ qp
->tx_max_frame
- sizeof(struct ntb_payload_header
);
1069 iowrite32(entry
->len
, &hdr
->len
);
1070 iowrite32((u32
) qp
->tx_pkts
, &hdr
->ver
);
1072 /* Ensure that the data is fully copied out before setting the flag */
1074 iowrite32(entry
->flags
| DESC_DONE_FLAG
, &hdr
->flags
);
1076 ntb_ring_sdb(qp
->ndev
, qp
->qp_num
);
1078 /* The entry length can only be zero if the packet is intended to be a
1079 * "link down" or similar. Since no payload is being sent in these
1080 * cases, there is nothing to add to the completion queue.
1082 if (entry
->len
> 0) {
1083 qp
->tx_bytes
+= entry
->len
;
1086 qp
->tx_handler(qp
, qp
->cb_data
, entry
->cb_data
,
1090 ntb_list_add(&qp
->ntb_tx_free_q_lock
, &entry
->entry
, &qp
->tx_free_q
);
1093 static int ntb_process_tx(struct ntb_transport_qp
*qp
,
1094 struct ntb_queue_entry
*entry
)
1096 void __iomem
*offset
;
1098 offset
= qp
->tx_mw
+ qp
->tx_max_frame
* qp
->tx_index
;
1100 dev_dbg(&ntb_query_pdev(qp
->ndev
)->dev
, "%lld - offset %p, tx %u, entry len %d flags %x buff %p\n",
1101 qp
->tx_pkts
, offset
, qp
->tx_index
, entry
->len
, entry
->flags
,
1103 if (qp
->tx_index
== qp
->remote_rx_info
->entry
) {
1108 if (entry
->len
> qp
->tx_max_frame
- sizeof(struct ntb_payload_header
)) {
1110 qp
->tx_handler(qp
->cb_data
, qp
, NULL
, -EIO
);
1112 ntb_list_add(&qp
->ntb_tx_free_q_lock
, &entry
->entry
,
1117 ntb_tx_copy_task(qp
, entry
, offset
);
1120 qp
->tx_index
%= qp
->tx_max_entry
;
1127 static void ntb_send_link_down(struct ntb_transport_qp
*qp
)
1129 struct pci_dev
*pdev
= ntb_query_pdev(qp
->ndev
);
1130 struct ntb_queue_entry
*entry
;
1133 if (qp
->qp_link
== NTB_LINK_DOWN
)
1136 qp
->qp_link
= NTB_LINK_DOWN
;
1137 dev_info(&pdev
->dev
, "qp %d: Link Down\n", qp
->qp_num
);
1139 for (i
= 0; i
< NTB_LINK_DOWN_TIMEOUT
; i
++) {
1140 entry
= ntb_list_rm(&qp
->ntb_tx_free_q_lock
, &qp
->tx_free_q
);
1149 entry
->cb_data
= NULL
;
1152 entry
->flags
= LINK_DOWN_FLAG
;
1154 rc
= ntb_process_tx(qp
, entry
);
1156 dev_err(&pdev
->dev
, "ntb: QP%d unable to send linkdown msg\n",
1161 * ntb_transport_create_queue - Create a new NTB transport layer queue
1162 * @rx_handler: receive callback function
1163 * @tx_handler: transmit callback function
1164 * @event_handler: event callback function
1166 * Create a new NTB transport layer queue and provide the queue with a callback
1167 * routine for both transmit and receive. The receive callback routine will be
1168 * used to pass up data when the transport has received it on the queue. The
1169 * transmit callback routine will be called when the transport has completed the
1170 * transmission of the data on the queue and the data is ready to be freed.
1172 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1174 struct ntb_transport_qp
*
1175 ntb_transport_create_queue(void *data
, struct pci_dev
*pdev
,
1176 const struct ntb_queue_handlers
*handlers
)
1178 struct ntb_queue_entry
*entry
;
1179 struct ntb_transport_qp
*qp
;
1180 struct ntb_transport
*nt
;
1181 unsigned int free_queue
;
1184 nt
= ntb_find_transport(pdev
);
1188 free_queue
= ffs(nt
->qp_bitmap
);
1192 /* decrement free_queue to make it zero based */
1195 clear_bit(free_queue
, &nt
->qp_bitmap
);
1197 qp
= &nt
->qps
[free_queue
];
1199 qp
->rx_handler
= handlers
->rx_handler
;
1200 qp
->tx_handler
= handlers
->tx_handler
;
1201 qp
->event_handler
= handlers
->event_handler
;
1203 for (i
= 0; i
< NTB_QP_DEF_NUM_ENTRIES
; i
++) {
1204 entry
= kzalloc(sizeof(struct ntb_queue_entry
), GFP_ATOMIC
);
1208 ntb_list_add(&qp
->ntb_rx_free_q_lock
, &entry
->entry
,
1212 for (i
= 0; i
< NTB_QP_DEF_NUM_ENTRIES
; i
++) {
1213 entry
= kzalloc(sizeof(struct ntb_queue_entry
), GFP_ATOMIC
);
1217 ntb_list_add(&qp
->ntb_tx_free_q_lock
, &entry
->entry
,
1221 tasklet_init(&qp
->rx_work
, ntb_transport_rx
, (unsigned long) qp
);
1223 rc
= ntb_register_db_callback(qp
->ndev
, free_queue
, qp
,
1224 ntb_transport_rxc_db
);
1228 dev_info(&pdev
->dev
, "NTB Transport QP %d created\n", qp
->qp_num
);
1233 tasklet_disable(&qp
->rx_work
);
1235 while ((entry
= ntb_list_rm(&qp
->ntb_tx_free_q_lock
, &qp
->tx_free_q
)))
1238 while ((entry
= ntb_list_rm(&qp
->ntb_rx_free_q_lock
, &qp
->rx_free_q
)))
1240 set_bit(free_queue
, &nt
->qp_bitmap
);
1244 EXPORT_SYMBOL_GPL(ntb_transport_create_queue
);
1247 * ntb_transport_free_queue - Frees NTB transport queue
1248 * @qp: NTB queue to be freed
1250 * Frees NTB transport queue
1252 void ntb_transport_free_queue(struct ntb_transport_qp
*qp
)
1254 struct pci_dev
*pdev
;
1255 struct ntb_queue_entry
*entry
;
1260 pdev
= ntb_query_pdev(qp
->ndev
);
1262 cancel_delayed_work_sync(&qp
->link_work
);
1264 ntb_unregister_db_callback(qp
->ndev
, qp
->qp_num
);
1265 tasklet_disable(&qp
->rx_work
);
1267 while ((entry
= ntb_list_rm(&qp
->ntb_rx_free_q_lock
, &qp
->rx_free_q
)))
1270 while ((entry
= ntb_list_rm(&qp
->ntb_rx_pend_q_lock
, &qp
->rx_pend_q
))) {
1271 dev_warn(&pdev
->dev
, "Freeing item from a non-empty queue\n");
1275 while ((entry
= ntb_list_rm(&qp
->ntb_tx_free_q_lock
, &qp
->tx_free_q
)))
1278 set_bit(qp
->qp_num
, &qp
->transport
->qp_bitmap
);
1280 dev_info(&pdev
->dev
, "NTB Transport QP %d freed\n", qp
->qp_num
);
1282 EXPORT_SYMBOL_GPL(ntb_transport_free_queue
);
1285 * ntb_transport_rx_remove - Dequeues enqueued rx packet
1286 * @qp: NTB queue to be freed
1287 * @len: pointer to variable to write enqueued buffers length
1289 * Dequeues unused buffers from receive queue. Should only be used during
1292 * RETURNS: NULL error value on error, or void* for success.
1294 void *ntb_transport_rx_remove(struct ntb_transport_qp
*qp
, unsigned int *len
)
1296 struct ntb_queue_entry
*entry
;
1299 if (!qp
|| qp
->client_ready
== NTB_LINK_UP
)
1302 entry
= ntb_list_rm(&qp
->ntb_rx_pend_q_lock
, &qp
->rx_pend_q
);
1306 buf
= entry
->cb_data
;
1309 ntb_list_add(&qp
->ntb_rx_free_q_lock
, &entry
->entry
, &qp
->rx_free_q
);
1313 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove
);
1316 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1317 * @qp: NTB transport layer queue the entry is to be enqueued on
1318 * @cb: per buffer pointer for callback function to use
1319 * @data: pointer to data buffer that incoming packets will be copied into
1320 * @len: length of the data buffer
1322 * Enqueue a new receive buffer onto the transport queue into which a NTB
1323 * payload can be received into.
1325 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1327 int ntb_transport_rx_enqueue(struct ntb_transport_qp
*qp
, void *cb
, void *data
,
1330 struct ntb_queue_entry
*entry
;
1335 entry
= ntb_list_rm(&qp
->ntb_rx_free_q_lock
, &qp
->rx_free_q
);
1339 entry
->cb_data
= cb
;
1343 ntb_list_add(&qp
->ntb_rx_pend_q_lock
, &entry
->entry
, &qp
->rx_pend_q
);
1347 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue
);
1350 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1351 * @qp: NTB transport layer queue the entry is to be enqueued on
1352 * @cb: per buffer pointer for callback function to use
1353 * @data: pointer to data buffer that will be sent
1354 * @len: length of the data buffer
1356 * Enqueue a new transmit buffer onto the transport queue from which a NTB
1357 * payload will be transmitted. This assumes that a lock is behing held to
1358 * serialize access to the qp.
1360 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1362 int ntb_transport_tx_enqueue(struct ntb_transport_qp
*qp
, void *cb
, void *data
,
1365 struct ntb_queue_entry
*entry
;
1368 if (!qp
|| qp
->qp_link
!= NTB_LINK_UP
|| !len
)
1371 entry
= ntb_list_rm(&qp
->ntb_tx_free_q_lock
, &qp
->tx_free_q
);
1375 entry
->cb_data
= cb
;
1380 rc
= ntb_process_tx(qp
, entry
);
1382 ntb_list_add(&qp
->ntb_tx_free_q_lock
, &entry
->entry
,
1387 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue
);
1390 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1391 * @qp: NTB transport layer queue to be enabled
1393 * Notify NTB transport layer of client readiness to use queue
1395 void ntb_transport_link_up(struct ntb_transport_qp
*qp
)
1400 qp
->client_ready
= NTB_LINK_UP
;
1402 if (qp
->transport
->transport_link
== NTB_LINK_UP
)
1403 schedule_delayed_work(&qp
->link_work
, 0);
1405 EXPORT_SYMBOL_GPL(ntb_transport_link_up
);
1408 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1409 * @qp: NTB transport layer queue to be disabled
1411 * Notify NTB transport layer of client's desire to no longer receive data on
1412 * transport queue specified. It is the client's responsibility to ensure all
1413 * entries on queue are purged or otherwise handled appropraitely.
1415 void ntb_transport_link_down(struct ntb_transport_qp
*qp
)
1417 struct pci_dev
*pdev
;
1423 pdev
= ntb_query_pdev(qp
->ndev
);
1424 qp
->client_ready
= NTB_LINK_DOWN
;
1426 rc
= ntb_read_local_spad(qp
->ndev
, QP_LINKS
, &val
);
1428 dev_err(&pdev
->dev
, "Error reading spad %d\n", QP_LINKS
);
1432 rc
= ntb_write_remote_spad(qp
->ndev
, QP_LINKS
,
1433 val
& ~(1 << qp
->qp_num
));
1435 dev_err(&pdev
->dev
, "Error writing %x to remote spad %d\n",
1436 val
& ~(1 << qp
->qp_num
), QP_LINKS
);
1438 if (qp
->qp_link
== NTB_LINK_UP
)
1439 ntb_send_link_down(qp
);
1441 cancel_delayed_work_sync(&qp
->link_work
);
1443 EXPORT_SYMBOL_GPL(ntb_transport_link_down
);
1446 * ntb_transport_link_query - Query transport link state
1447 * @qp: NTB transport layer queue to be queried
1449 * Query connectivity to the remote system of the NTB transport queue
1451 * RETURNS: true for link up or false for link down
1453 bool ntb_transport_link_query(struct ntb_transport_qp
*qp
)
1458 return qp
->qp_link
== NTB_LINK_UP
;
1460 EXPORT_SYMBOL_GPL(ntb_transport_link_query
);
1463 * ntb_transport_qp_num - Query the qp number
1464 * @qp: NTB transport layer queue to be queried
1466 * Query qp number of the NTB transport queue
1468 * RETURNS: a zero based number specifying the qp number
1470 unsigned char ntb_transport_qp_num(struct ntb_transport_qp
*qp
)
1477 EXPORT_SYMBOL_GPL(ntb_transport_qp_num
);
1480 * ntb_transport_max_size - Query the max payload size of a qp
1481 * @qp: NTB transport layer queue to be queried
1483 * Query the maximum payload size permissible on the given qp
1485 * RETURNS: the max payload size of a qp
1487 unsigned int ntb_transport_max_size(struct ntb_transport_qp
*qp
)
1492 return qp
->tx_max_frame
- sizeof(struct ntb_payload_header
);
1494 EXPORT_SYMBOL_GPL(ntb_transport_max_size
);