2 * RapidIO interconnect services
3 * (RapidIO Interconnect Specification, http://www.rapidio.org)
5 * Copyright 2005 MontaVista Software, Inc.
6 * Matt Porter <mporter@kernel.crashing.org>
8 * Copyright 2009 Integrated Device Technology, Inc.
9 * Alex Bounine <alexandre.bounine@idt.com>
10 * - Added Port-Write/Error Management initialization and handling
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 #include <linux/types.h>
19 #include <linux/kernel.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/rio.h>
24 #include <linux/rio_drv.h>
25 #include <linux/rio_ids.h>
26 #include <linux/rio_regs.h>
27 #include <linux/module.h>
28 #include <linux/spinlock.h>
29 #include <linux/slab.h>
30 #include <linux/interrupt.h>
34 static LIST_HEAD(rio_mports
);
35 static unsigned char next_portid
;
36 static DEFINE_SPINLOCK(rio_mmap_lock
);
39 * rio_local_get_device_id - Get the base/extended device id for a port
40 * @port: RIO master port from which to get the deviceid
42 * Reads the base/extended device id from the local device
43 * implementing the master port. Returns the 8/16-bit device
46 u16
rio_local_get_device_id(struct rio_mport
*port
)
50 rio_local_read_config_32(port
, RIO_DID_CSR
, &result
);
52 return (RIO_GET_DID(port
->sys_size
, result
));
56 * rio_request_inb_mbox - request inbound mailbox service
57 * @mport: RIO master port from which to allocate the mailbox resource
58 * @dev_id: Device specific pointer to pass on event
59 * @mbox: Mailbox number to claim
60 * @entries: Number of entries in inbound mailbox queue
61 * @minb: Callback to execute when inbound message is received
63 * Requests ownership of an inbound mailbox resource and binds
64 * a callback function to the resource. Returns %0 on success.
66 int rio_request_inb_mbox(struct rio_mport
*mport
,
70 void (*minb
) (struct rio_mport
* mport
, void *dev_id
, int mbox
,
76 if (mport
->ops
->open_inb_mbox
== NULL
)
79 res
= kmalloc(sizeof(struct resource
), GFP_KERNEL
);
82 rio_init_mbox_res(res
, mbox
, mbox
);
84 /* Make sure this mailbox isn't in use */
86 request_resource(&mport
->riores
[RIO_INB_MBOX_RESOURCE
],
92 mport
->inb_msg
[mbox
].res
= res
;
94 /* Hook the inbound message callback */
95 mport
->inb_msg
[mbox
].mcback
= minb
;
97 rc
= mport
->ops
->open_inb_mbox(mport
, dev_id
, mbox
, entries
);
106 * rio_release_inb_mbox - release inbound mailbox message service
107 * @mport: RIO master port from which to release the mailbox resource
108 * @mbox: Mailbox number to release
110 * Releases ownership of an inbound mailbox resource. Returns 0
111 * if the request has been satisfied.
113 int rio_release_inb_mbox(struct rio_mport
*mport
, int mbox
)
115 if (mport
->ops
->close_inb_mbox
) {
116 mport
->ops
->close_inb_mbox(mport
, mbox
);
118 /* Release the mailbox resource */
119 return release_resource(mport
->inb_msg
[mbox
].res
);
125 * rio_request_outb_mbox - request outbound mailbox service
126 * @mport: RIO master port from which to allocate the mailbox resource
127 * @dev_id: Device specific pointer to pass on event
128 * @mbox: Mailbox number to claim
129 * @entries: Number of entries in outbound mailbox queue
130 * @moutb: Callback to execute when outbound message is sent
132 * Requests ownership of an outbound mailbox resource and binds
133 * a callback function to the resource. Returns 0 on success.
135 int rio_request_outb_mbox(struct rio_mport
*mport
,
139 void (*moutb
) (struct rio_mport
* mport
, void *dev_id
, int mbox
, int slot
))
142 struct resource
*res
;
144 if (mport
->ops
->open_outb_mbox
== NULL
)
147 res
= kmalloc(sizeof(struct resource
), GFP_KERNEL
);
150 rio_init_mbox_res(res
, mbox
, mbox
);
152 /* Make sure this outbound mailbox isn't in use */
154 request_resource(&mport
->riores
[RIO_OUTB_MBOX_RESOURCE
],
160 mport
->outb_msg
[mbox
].res
= res
;
162 /* Hook the inbound message callback */
163 mport
->outb_msg
[mbox
].mcback
= moutb
;
165 rc
= mport
->ops
->open_outb_mbox(mport
, dev_id
, mbox
, entries
);
174 * rio_release_outb_mbox - release outbound mailbox message service
175 * @mport: RIO master port from which to release the mailbox resource
176 * @mbox: Mailbox number to release
178 * Releases ownership of an inbound mailbox resource. Returns 0
179 * if the request has been satisfied.
181 int rio_release_outb_mbox(struct rio_mport
*mport
, int mbox
)
183 if (mport
->ops
->close_outb_mbox
) {
184 mport
->ops
->close_outb_mbox(mport
, mbox
);
186 /* Release the mailbox resource */
187 return release_resource(mport
->outb_msg
[mbox
].res
);
193 * rio_setup_inb_dbell - bind inbound doorbell callback
194 * @mport: RIO master port to bind the doorbell callback
195 * @dev_id: Device specific pointer to pass on event
196 * @res: Doorbell message resource
197 * @dinb: Callback to execute when doorbell is received
199 * Adds a doorbell resource/callback pair into a port's
200 * doorbell event list. Returns 0 if the request has been
204 rio_setup_inb_dbell(struct rio_mport
*mport
, void *dev_id
, struct resource
*res
,
205 void (*dinb
) (struct rio_mport
* mport
, void *dev_id
, u16 src
, u16 dst
,
209 struct rio_dbell
*dbell
;
211 if (!(dbell
= kmalloc(sizeof(struct rio_dbell
), GFP_KERNEL
))) {
218 dbell
->dev_id
= dev_id
;
220 list_add_tail(&dbell
->node
, &mport
->dbells
);
227 * rio_request_inb_dbell - request inbound doorbell message service
228 * @mport: RIO master port from which to allocate the doorbell resource
229 * @dev_id: Device specific pointer to pass on event
230 * @start: Doorbell info range start
231 * @end: Doorbell info range end
232 * @dinb: Callback to execute when doorbell is received
234 * Requests ownership of an inbound doorbell resource and binds
235 * a callback function to the resource. Returns 0 if the request
236 * has been satisfied.
238 int rio_request_inb_dbell(struct rio_mport
*mport
,
242 void (*dinb
) (struct rio_mport
* mport
, void *dev_id
, u16 src
,
247 struct resource
*res
= kmalloc(sizeof(struct resource
), GFP_KERNEL
);
250 rio_init_dbell_res(res
, start
, end
);
252 /* Make sure these doorbells aren't in use */
254 request_resource(&mport
->riores
[RIO_DOORBELL_RESOURCE
],
260 /* Hook the doorbell callback */
261 rc
= rio_setup_inb_dbell(mport
, dev_id
, res
, dinb
);
270 * rio_release_inb_dbell - release inbound doorbell message service
271 * @mport: RIO master port from which to release the doorbell resource
272 * @start: Doorbell info range start
273 * @end: Doorbell info range end
275 * Releases ownership of an inbound doorbell resource and removes
276 * callback from the doorbell event list. Returns 0 if the request
277 * has been satisfied.
279 int rio_release_inb_dbell(struct rio_mport
*mport
, u16 start
, u16 end
)
281 int rc
= 0, found
= 0;
282 struct rio_dbell
*dbell
;
284 list_for_each_entry(dbell
, &mport
->dbells
, node
) {
285 if ((dbell
->res
->start
== start
) && (dbell
->res
->end
== end
)) {
291 /* If we can't find an exact match, fail */
297 /* Delete from list */
298 list_del(&dbell
->node
);
300 /* Release the doorbell resource */
301 rc
= release_resource(dbell
->res
);
303 /* Free the doorbell event */
311 * rio_request_outb_dbell - request outbound doorbell message range
312 * @rdev: RIO device from which to allocate the doorbell resource
313 * @start: Doorbell message range start
314 * @end: Doorbell message range end
316 * Requests ownership of a doorbell message range. Returns a resource
317 * if the request has been satisfied or %NULL on failure.
319 struct resource
*rio_request_outb_dbell(struct rio_dev
*rdev
, u16 start
,
322 struct resource
*res
= kmalloc(sizeof(struct resource
), GFP_KERNEL
);
325 rio_init_dbell_res(res
, start
, end
);
327 /* Make sure these doorbells aren't in use */
328 if (request_resource(&rdev
->riores
[RIO_DOORBELL_RESOURCE
], res
)
339 * rio_release_outb_dbell - release outbound doorbell message range
340 * @rdev: RIO device from which to release the doorbell resource
341 * @res: Doorbell resource to be freed
343 * Releases ownership of a doorbell message range. Returns 0 if the
344 * request has been satisfied.
346 int rio_release_outb_dbell(struct rio_dev
*rdev
, struct resource
*res
)
348 int rc
= release_resource(res
);
356 * rio_request_inb_pwrite - request inbound port-write message service
357 * @rdev: RIO device to which register inbound port-write callback routine
358 * @pwcback: Callback routine to execute when port-write is received
360 * Binds a port-write callback function to the RapidIO device.
361 * Returns 0 if the request has been satisfied.
363 int rio_request_inb_pwrite(struct rio_dev
*rdev
,
364 int (*pwcback
)(struct rio_dev
*rdev
, union rio_pw_msg
*msg
, int step
))
368 spin_lock(&rio_global_list_lock
);
369 if (rdev
->pwcback
!= NULL
)
372 rdev
->pwcback
= pwcback
;
374 spin_unlock(&rio_global_list_lock
);
377 EXPORT_SYMBOL_GPL(rio_request_inb_pwrite
);
380 * rio_release_inb_pwrite - release inbound port-write message service
381 * @rdev: RIO device which registered for inbound port-write callback
383 * Removes callback from the rio_dev structure. Returns 0 if the request
384 * has been satisfied.
386 int rio_release_inb_pwrite(struct rio_dev
*rdev
)
390 spin_lock(&rio_global_list_lock
);
392 rdev
->pwcback
= NULL
;
396 spin_unlock(&rio_global_list_lock
);
399 EXPORT_SYMBOL_GPL(rio_release_inb_pwrite
);
402 * rio_map_inb_region -- Map inbound memory region.
403 * @mport: Master port.
404 * @local: physical address of memory region to be mapped
405 * @rbase: RIO base address assigned to this window
406 * @size: Size of the memory region
407 * @rflags: Flags for mapping.
409 * Return: 0 -- Success.
411 * This function will create the mapping from RIO space to local memory.
413 int rio_map_inb_region(struct rio_mport
*mport
, dma_addr_t local
,
414 u64 rbase
, u32 size
, u32 rflags
)
419 if (!mport
->ops
->map_inb
)
421 spin_lock_irqsave(&rio_mmap_lock
, flags
);
422 rc
= mport
->ops
->map_inb(mport
, local
, rbase
, size
, rflags
);
423 spin_unlock_irqrestore(&rio_mmap_lock
, flags
);
426 EXPORT_SYMBOL_GPL(rio_map_inb_region
);
429 * rio_unmap_inb_region -- Unmap the inbound memory region
430 * @mport: Master port
431 * @lstart: physical address of memory region to be unmapped
433 void rio_unmap_inb_region(struct rio_mport
*mport
, dma_addr_t lstart
)
436 if (!mport
->ops
->unmap_inb
)
438 spin_lock_irqsave(&rio_mmap_lock
, flags
);
439 mport
->ops
->unmap_inb(mport
, lstart
);
440 spin_unlock_irqrestore(&rio_mmap_lock
, flags
);
442 EXPORT_SYMBOL_GPL(rio_unmap_inb_region
);
445 * rio_mport_get_physefb - Helper function that returns register offset
446 * for Physical Layer Extended Features Block.
447 * @port: Master port to issue transaction
448 * @local: Indicate a local master port or remote device access
449 * @destid: Destination ID of the device
450 * @hopcount: Number of switch hops to the device
453 rio_mport_get_physefb(struct rio_mport
*port
, int local
,
454 u16 destid
, u8 hopcount
)
459 ext_ftr_ptr
= rio_mport_get_efb(port
, local
, destid
, hopcount
, 0);
461 while (ext_ftr_ptr
) {
463 rio_local_read_config_32(port
, ext_ftr_ptr
,
466 rio_mport_read_config_32(port
, destid
, hopcount
,
467 ext_ftr_ptr
, &ftr_header
);
469 ftr_header
= RIO_GET_BLOCK_ID(ftr_header
);
470 switch (ftr_header
) {
472 case RIO_EFB_SER_EP_ID_V13P
:
473 case RIO_EFB_SER_EP_REC_ID_V13P
:
474 case RIO_EFB_SER_EP_FREE_ID_V13P
:
475 case RIO_EFB_SER_EP_ID
:
476 case RIO_EFB_SER_EP_REC_ID
:
477 case RIO_EFB_SER_EP_FREE_ID
:
478 case RIO_EFB_SER_EP_FREC_ID
:
486 ext_ftr_ptr
= rio_mport_get_efb(port
, local
, destid
,
487 hopcount
, ext_ftr_ptr
);
494 * rio_get_comptag - Begin or continue searching for a RIO device by component tag
495 * @comp_tag: RIO component tag to match
496 * @from: Previous RIO device found in search, or %NULL for new search
498 * Iterates through the list of known RIO devices. If a RIO device is
499 * found with a matching @comp_tag, a pointer to its device
500 * structure is returned. Otherwise, %NULL is returned. A new search
501 * is initiated by passing %NULL to the @from argument. Otherwise, if
502 * @from is not %NULL, searches continue from next device on the global
505 struct rio_dev
*rio_get_comptag(u32 comp_tag
, struct rio_dev
*from
)
508 struct rio_dev
*rdev
;
510 spin_lock(&rio_global_list_lock
);
511 n
= from
? from
->global_list
.next
: rio_devices
.next
;
513 while (n
&& (n
!= &rio_devices
)) {
515 if (rdev
->comp_tag
== comp_tag
)
521 spin_unlock(&rio_global_list_lock
);
526 * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port.
527 * @rdev: Pointer to RIO device control structure
528 * @pnum: Switch port number to set LOCKOUT bit
529 * @lock: Operation : set (=1) or clear (=0)
531 int rio_set_port_lockout(struct rio_dev
*rdev
, u32 pnum
, int lock
)
535 rio_read_config_32(rdev
,
536 rdev
->phys_efptr
+ RIO_PORT_N_CTL_CSR(pnum
),
539 regval
|= RIO_PORT_N_CTL_LOCKOUT
;
541 regval
&= ~RIO_PORT_N_CTL_LOCKOUT
;
543 rio_write_config_32(rdev
,
544 rdev
->phys_efptr
+ RIO_PORT_N_CTL_CSR(pnum
),
550 * rio_chk_dev_route - Validate route to the specified device.
551 * @rdev: RIO device failed to respond
552 * @nrdev: Last active device on the route to rdev
553 * @npnum: nrdev's port number on the route to rdev
555 * Follows a route to the specified RIO device to determine the last available
556 * device (and corresponding RIO port) on the route.
559 rio_chk_dev_route(struct rio_dev
*rdev
, struct rio_dev
**nrdev
, int *npnum
)
562 int p_port
, rc
= -EIO
;
563 struct rio_dev
*prev
= NULL
;
565 /* Find switch with failed RIO link */
566 while (rdev
->prev
&& (rdev
->prev
->pef
& RIO_PEF_SWITCH
)) {
567 if (!rio_read_config_32(rdev
->prev
, RIO_DEV_ID_CAR
, &result
)) {
577 p_port
= prev
->rswitch
->route_table
[rdev
->destid
];
579 if (p_port
!= RIO_INVALID_ROUTE
) {
580 pr_debug("RIO: link failed on [%s]-P%d\n",
581 rio_name(prev
), p_port
);
586 pr_debug("RIO: failed to trace route to %s\n", rio_name(rdev
));
592 * rio_mport_chk_dev_access - Validate access to the specified device.
593 * @mport: Master port to send transactions
594 * @destid: Device destination ID in network
595 * @hopcount: Number of hops into the network
598 rio_mport_chk_dev_access(struct rio_mport
*mport
, u16 destid
, u8 hopcount
)
603 while (rio_mport_read_config_32(mport
, destid
, hopcount
,
604 RIO_DEV_ID_CAR
, &tmp
)) {
606 if (i
== RIO_MAX_CHK_RETRY
)
615 * rio_chk_dev_access - Validate access to the specified device.
616 * @rdev: Pointer to RIO device control structure
618 static int rio_chk_dev_access(struct rio_dev
*rdev
)
620 return rio_mport_chk_dev_access(rdev
->net
->hport
,
621 rdev
->destid
, rdev
->hopcount
);
625 * rio_get_input_status - Sends a Link-Request/Input-Status control symbol and
626 * returns link-response (if requested).
627 * @rdev: RIO devive to issue Input-status command
628 * @pnum: Device port number to issue the command
629 * @lnkresp: Response from a link partner
632 rio_get_input_status(struct rio_dev
*rdev
, int pnum
, u32
*lnkresp
)
638 /* Read from link maintenance response register
639 * to clear valid bit */
640 rio_read_config_32(rdev
,
641 rdev
->phys_efptr
+ RIO_PORT_N_MNT_RSP_CSR(pnum
),
646 /* Issue Input-status command */
647 rio_write_config_32(rdev
,
648 rdev
->phys_efptr
+ RIO_PORT_N_MNT_REQ_CSR(pnum
),
651 /* Exit if the response is not expected */
656 while (checkcount
--) {
658 rio_read_config_32(rdev
,
659 rdev
->phys_efptr
+ RIO_PORT_N_MNT_RSP_CSR(pnum
),
661 if (regval
& RIO_PORT_N_MNT_RSP_RVAL
) {
671 * rio_clr_err_stopped - Clears port Error-stopped states.
672 * @rdev: Pointer to RIO device control structure
673 * @pnum: Switch port number to clear errors
674 * @err_status: port error status (if 0 reads register from device)
676 static int rio_clr_err_stopped(struct rio_dev
*rdev
, u32 pnum
, u32 err_status
)
678 struct rio_dev
*nextdev
= rdev
->rswitch
->nextdev
[pnum
];
680 u32 far_ackid
, far_linkstat
, near_ackid
;
683 rio_read_config_32(rdev
,
684 rdev
->phys_efptr
+ RIO_PORT_N_ERR_STS_CSR(pnum
),
687 if (err_status
& RIO_PORT_N_ERR_STS_PW_OUT_ES
) {
688 pr_debug("RIO_EM: servicing Output Error-Stopped state\n");
690 * Send a Link-Request/Input-Status control symbol
692 if (rio_get_input_status(rdev
, pnum
, ®val
)) {
693 pr_debug("RIO_EM: Input-status response timeout\n");
697 pr_debug("RIO_EM: SP%d Input-status response=0x%08x\n",
699 far_ackid
= (regval
& RIO_PORT_N_MNT_RSP_ASTAT
) >> 5;
700 far_linkstat
= regval
& RIO_PORT_N_MNT_RSP_LSTAT
;
701 rio_read_config_32(rdev
,
702 rdev
->phys_efptr
+ RIO_PORT_N_ACK_STS_CSR(pnum
),
704 pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum
, regval
);
705 near_ackid
= (regval
& RIO_PORT_N_ACK_INBOUND
) >> 24;
706 pr_debug("RIO_EM: SP%d far_ackID=0x%02x far_linkstat=0x%02x" \
707 " near_ackID=0x%02x\n",
708 pnum
, far_ackid
, far_linkstat
, near_ackid
);
711 * If required, synchronize ackIDs of near and
714 if ((far_ackid
!= ((regval
& RIO_PORT_N_ACK_OUTSTAND
) >> 8)) ||
715 (far_ackid
!= (regval
& RIO_PORT_N_ACK_OUTBOUND
))) {
716 /* Align near outstanding/outbound ackIDs with
719 rio_write_config_32(rdev
,
720 rdev
->phys_efptr
+ RIO_PORT_N_ACK_STS_CSR(pnum
),
722 (far_ackid
<< 8) | far_ackid
);
723 /* Align far outstanding/outbound ackIDs with
728 rio_write_config_32(nextdev
,
729 nextdev
->phys_efptr
+
730 RIO_PORT_N_ACK_STS_CSR(RIO_GET_PORT_NUM(nextdev
->swpinfo
)),
732 (near_ackid
<< 8) | near_ackid
);
734 pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n");
737 rio_read_config_32(rdev
,
738 rdev
->phys_efptr
+ RIO_PORT_N_ERR_STS_CSR(pnum
),
740 pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum
, err_status
);
743 if ((err_status
& RIO_PORT_N_ERR_STS_PW_INP_ES
) && nextdev
) {
744 pr_debug("RIO_EM: servicing Input Error-Stopped state\n");
745 rio_get_input_status(nextdev
,
746 RIO_GET_PORT_NUM(nextdev
->swpinfo
), NULL
);
749 rio_read_config_32(rdev
,
750 rdev
->phys_efptr
+ RIO_PORT_N_ERR_STS_CSR(pnum
),
752 pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum
, err_status
);
755 return (err_status
& (RIO_PORT_N_ERR_STS_PW_OUT_ES
|
756 RIO_PORT_N_ERR_STS_PW_INP_ES
)) ? 1 : 0;
760 * rio_inb_pwrite_handler - process inbound port-write message
761 * @pw_msg: pointer to inbound port-write message
763 * Processes an inbound port-write message. Returns 0 if the request
764 * has been satisfied.
766 int rio_inb_pwrite_handler(union rio_pw_msg
*pw_msg
)
768 struct rio_dev
*rdev
;
769 u32 err_status
, em_perrdet
, em_ltlerrdet
;
772 rdev
= rio_get_comptag((pw_msg
->em
.comptag
& RIO_CTAG_UDEVID
), NULL
);
774 /* Device removed or enumeration error */
775 pr_debug("RIO: %s No matching device for CTag 0x%08x\n",
776 __func__
, pw_msg
->em
.comptag
);
780 pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev
));
785 for (i
= 0; i
< RIO_PW_MSG_SIZE
/sizeof(u32
);) {
786 pr_debug("0x%02x: %08x %08x %08x %08x\n",
787 i
*4, pw_msg
->raw
[i
], pw_msg
->raw
[i
+ 1],
788 pw_msg
->raw
[i
+ 2], pw_msg
->raw
[i
+ 3]);
794 /* Call an external service function (if such is registered
795 * for this device). This may be the service for endpoints that send
796 * device-specific port-write messages. End-point messages expected
797 * to be handled completely by EP specific device driver.
798 * For switches rc==0 signals that no standard processing required.
800 if (rdev
->pwcback
!= NULL
) {
801 rc
= rdev
->pwcback(rdev
, pw_msg
, 0);
806 portnum
= pw_msg
->em
.is_port
& 0xFF;
808 /* Check if device and route to it are functional:
809 * Sometimes devices may send PW message(s) just before being
810 * powered down (or link being lost).
812 if (rio_chk_dev_access(rdev
)) {
813 pr_debug("RIO: device access failed - get link partner\n");
814 /* Scan route to the device and identify failed link.
815 * This will replace device and port reported in PW message.
816 * PW message should not be used after this point.
818 if (rio_chk_dev_route(rdev
, &rdev
, &portnum
)) {
819 pr_err("RIO: Route trace for %s failed\n",
826 /* For End-point devices processing stops here */
827 if (!(rdev
->pef
& RIO_PEF_SWITCH
))
830 if (rdev
->phys_efptr
== 0) {
831 pr_err("RIO_PW: Bad switch initialization for %s\n",
837 * Process the port-write notification from switch
839 if (rdev
->rswitch
->em_handle
)
840 rdev
->rswitch
->em_handle(rdev
, portnum
);
842 rio_read_config_32(rdev
,
843 rdev
->phys_efptr
+ RIO_PORT_N_ERR_STS_CSR(portnum
),
845 pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum
, err_status
);
847 if (err_status
& RIO_PORT_N_ERR_STS_PORT_OK
) {
849 if (!(rdev
->rswitch
->port_ok
& (1 << portnum
))) {
850 rdev
->rswitch
->port_ok
|= (1 << portnum
);
851 rio_set_port_lockout(rdev
, portnum
, 0);
852 /* Schedule Insertion Service */
853 pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n",
854 rio_name(rdev
), portnum
);
857 /* Clear error-stopped states (if reported).
858 * Depending on the link partner state, two attempts
859 * may be needed for successful recovery.
861 if (err_status
& (RIO_PORT_N_ERR_STS_PW_OUT_ES
|
862 RIO_PORT_N_ERR_STS_PW_INP_ES
)) {
863 if (rio_clr_err_stopped(rdev
, portnum
, err_status
))
864 rio_clr_err_stopped(rdev
, portnum
, 0);
866 } else { /* if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) */
868 if (rdev
->rswitch
->port_ok
& (1 << portnum
)) {
869 rdev
->rswitch
->port_ok
&= ~(1 << portnum
);
870 rio_set_port_lockout(rdev
, portnum
, 1);
872 rio_write_config_32(rdev
,
874 RIO_PORT_N_ACK_STS_CSR(portnum
),
875 RIO_PORT_N_ACK_CLEAR
);
877 /* Schedule Extraction Service */
878 pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n",
879 rio_name(rdev
), portnum
);
883 rio_read_config_32(rdev
,
884 rdev
->em_efptr
+ RIO_EM_PN_ERR_DETECT(portnum
), &em_perrdet
);
886 pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
887 portnum
, em_perrdet
);
888 /* Clear EM Port N Error Detect CSR */
889 rio_write_config_32(rdev
,
890 rdev
->em_efptr
+ RIO_EM_PN_ERR_DETECT(portnum
), 0);
893 rio_read_config_32(rdev
,
894 rdev
->em_efptr
+ RIO_EM_LTL_ERR_DETECT
, &em_ltlerrdet
);
896 pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
898 /* Clear EM L/T Layer Error Detect CSR */
899 rio_write_config_32(rdev
,
900 rdev
->em_efptr
+ RIO_EM_LTL_ERR_DETECT
, 0);
903 /* Clear remaining error bits and Port-Write Pending bit */
904 rio_write_config_32(rdev
,
905 rdev
->phys_efptr
+ RIO_PORT_N_ERR_STS_CSR(portnum
),
910 EXPORT_SYMBOL_GPL(rio_inb_pwrite_handler
);
913 * rio_mport_get_efb - get pointer to next extended features block
914 * @port: Master port to issue transaction
915 * @local: Indicate a local master port or remote device access
916 * @destid: Destination ID of the device
917 * @hopcount: Number of switch hops to the device
918 * @from: Offset of current Extended Feature block header (if 0 starts
919 * from ExtFeaturePtr)
922 rio_mport_get_efb(struct rio_mport
*port
, int local
, u16 destid
,
923 u8 hopcount
, u32 from
)
929 rio_local_read_config_32(port
, RIO_ASM_INFO_CAR
,
932 rio_mport_read_config_32(port
, destid
, hopcount
,
933 RIO_ASM_INFO_CAR
, ®_val
);
934 return reg_val
& RIO_EXT_FTR_PTR_MASK
;
937 rio_local_read_config_32(port
, from
, ®_val
);
939 rio_mport_read_config_32(port
, destid
, hopcount
,
941 return RIO_GET_BLOCK_ID(reg_val
);
946 * rio_mport_get_feature - query for devices' extended features
947 * @port: Master port to issue transaction
948 * @local: Indicate a local master port or remote device access
949 * @destid: Destination ID of the device
950 * @hopcount: Number of switch hops to the device
951 * @ftr: Extended feature code
953 * Tell if a device supports a given RapidIO capability.
954 * Returns the offset of the requested extended feature
955 * block within the device's RIO configuration space or
956 * 0 in case the device does not support it. Possible
959 * %RIO_EFB_PAR_EP_ID LP/LVDS EP Devices
961 * %RIO_EFB_PAR_EP_REC_ID LP/LVDS EP Recovery Devices
963 * %RIO_EFB_PAR_EP_FREE_ID LP/LVDS EP Free Devices
965 * %RIO_EFB_SER_EP_ID LP/Serial EP Devices
967 * %RIO_EFB_SER_EP_REC_ID LP/Serial EP Recovery Devices
969 * %RIO_EFB_SER_EP_FREE_ID LP/Serial EP Free Devices
972 rio_mport_get_feature(struct rio_mport
* port
, int local
, u16 destid
,
973 u8 hopcount
, int ftr
)
975 u32 asm_info
, ext_ftr_ptr
, ftr_header
;
978 rio_local_read_config_32(port
, RIO_ASM_INFO_CAR
, &asm_info
);
980 rio_mport_read_config_32(port
, destid
, hopcount
,
981 RIO_ASM_INFO_CAR
, &asm_info
);
983 ext_ftr_ptr
= asm_info
& RIO_EXT_FTR_PTR_MASK
;
985 while (ext_ftr_ptr
) {
987 rio_local_read_config_32(port
, ext_ftr_ptr
,
990 rio_mport_read_config_32(port
, destid
, hopcount
,
991 ext_ftr_ptr
, &ftr_header
);
992 if (RIO_GET_BLOCK_ID(ftr_header
) == ftr
)
994 if (!(ext_ftr_ptr
= RIO_GET_BLOCK_PTR(ftr_header
)))
1002 * rio_get_asm - Begin or continue searching for a RIO device by vid/did/asm_vid/asm_did
1003 * @vid: RIO vid to match or %RIO_ANY_ID to match all vids
1004 * @did: RIO did to match or %RIO_ANY_ID to match all dids
1005 * @asm_vid: RIO asm_vid to match or %RIO_ANY_ID to match all asm_vids
1006 * @asm_did: RIO asm_did to match or %RIO_ANY_ID to match all asm_dids
1007 * @from: Previous RIO device found in search, or %NULL for new search
1009 * Iterates through the list of known RIO devices. If a RIO device is
1010 * found with a matching @vid, @did, @asm_vid, @asm_did, the reference
1011 * count to the device is incrememted and a pointer to its device
1012 * structure is returned. Otherwise, %NULL is returned. A new search
1013 * is initiated by passing %NULL to the @from argument. Otherwise, if
1014 * @from is not %NULL, searches continue from next device on the global
1015 * list. The reference count for @from is always decremented if it is
1018 struct rio_dev
*rio_get_asm(u16 vid
, u16 did
,
1019 u16 asm_vid
, u16 asm_did
, struct rio_dev
*from
)
1021 struct list_head
*n
;
1022 struct rio_dev
*rdev
;
1024 WARN_ON(in_interrupt());
1025 spin_lock(&rio_global_list_lock
);
1026 n
= from
? from
->global_list
.next
: rio_devices
.next
;
1028 while (n
&& (n
!= &rio_devices
)) {
1029 rdev
= rio_dev_g(n
);
1030 if ((vid
== RIO_ANY_ID
|| rdev
->vid
== vid
) &&
1031 (did
== RIO_ANY_ID
|| rdev
->did
== did
) &&
1032 (asm_vid
== RIO_ANY_ID
|| rdev
->asm_vid
== asm_vid
) &&
1033 (asm_did
== RIO_ANY_ID
|| rdev
->asm_did
== asm_did
))
1040 rdev
= rio_dev_get(rdev
);
1041 spin_unlock(&rio_global_list_lock
);
1046 * rio_get_device - Begin or continue searching for a RIO device by vid/did
1047 * @vid: RIO vid to match or %RIO_ANY_ID to match all vids
1048 * @did: RIO did to match or %RIO_ANY_ID to match all dids
1049 * @from: Previous RIO device found in search, or %NULL for new search
1051 * Iterates through the list of known RIO devices. If a RIO device is
1052 * found with a matching @vid and @did, the reference count to the
1053 * device is incrememted and a pointer to its device structure is returned.
1054 * Otherwise, %NULL is returned. A new search is initiated by passing %NULL
1055 * to the @from argument. Otherwise, if @from is not %NULL, searches
1056 * continue from next device on the global list. The reference count for
1057 * @from is always decremented if it is not %NULL.
1059 struct rio_dev
*rio_get_device(u16 vid
, u16 did
, struct rio_dev
*from
)
1061 return rio_get_asm(vid
, did
, RIO_ANY_ID
, RIO_ANY_ID
, from
);
1065 * rio_std_route_add_entry - Add switch route table entry using standard
1066 * registers defined in RIO specification rev.1.3
1067 * @mport: Master port to issue transaction
1068 * @destid: Destination ID of the device
1069 * @hopcount: Number of switch hops to the device
1070 * @table: routing table ID (global or port-specific)
1071 * @route_destid: destID entry in the RT
1072 * @route_port: destination port for specified destID
1074 int rio_std_route_add_entry(struct rio_mport
*mport
, u16 destid
, u8 hopcount
,
1075 u16 table
, u16 route_destid
, u8 route_port
)
1077 if (table
== RIO_GLOBAL_TABLE
) {
1078 rio_mport_write_config_32(mport
, destid
, hopcount
,
1079 RIO_STD_RTE_CONF_DESTID_SEL_CSR
,
1081 rio_mport_write_config_32(mport
, destid
, hopcount
,
1082 RIO_STD_RTE_CONF_PORT_SEL_CSR
,
1091 * rio_std_route_get_entry - Read switch route table entry (port number)
1092 * associated with specified destID using standard registers defined in RIO
1093 * specification rev.1.3
1094 * @mport: Master port to issue transaction
1095 * @destid: Destination ID of the device
1096 * @hopcount: Number of switch hops to the device
1097 * @table: routing table ID (global or port-specific)
1098 * @route_destid: destID entry in the RT
1099 * @route_port: returned destination port for specified destID
1101 int rio_std_route_get_entry(struct rio_mport
*mport
, u16 destid
, u8 hopcount
,
1102 u16 table
, u16 route_destid
, u8
*route_port
)
1106 if (table
== RIO_GLOBAL_TABLE
) {
1107 rio_mport_write_config_32(mport
, destid
, hopcount
,
1108 RIO_STD_RTE_CONF_DESTID_SEL_CSR
, route_destid
);
1109 rio_mport_read_config_32(mport
, destid
, hopcount
,
1110 RIO_STD_RTE_CONF_PORT_SEL_CSR
, &result
);
1112 *route_port
= (u8
)result
;
1119 * rio_std_route_clr_table - Clear swotch route table using standard registers
1120 * defined in RIO specification rev.1.3.
1121 * @mport: Master port to issue transaction
1122 * @destid: Destination ID of the device
1123 * @hopcount: Number of switch hops to the device
1124 * @table: routing table ID (global or port-specific)
1126 int rio_std_route_clr_table(struct rio_mport
*mport
, u16 destid
, u8 hopcount
,
1129 u32 max_destid
= 0xff;
1130 u32 i
, pef
, id_inc
= 1, ext_cfg
= 0;
1131 u32 port_sel
= RIO_INVALID_ROUTE
;
1133 if (table
== RIO_GLOBAL_TABLE
) {
1134 rio_mport_read_config_32(mport
, destid
, hopcount
,
1137 if (mport
->sys_size
) {
1138 rio_mport_read_config_32(mport
, destid
, hopcount
,
1139 RIO_SWITCH_RT_LIMIT
,
1141 max_destid
&= RIO_RT_MAX_DESTID
;
1144 if (pef
& RIO_PEF_EXT_RT
) {
1145 ext_cfg
= 0x80000000;
1147 port_sel
= (RIO_INVALID_ROUTE
<< 24) |
1148 (RIO_INVALID_ROUTE
<< 16) |
1149 (RIO_INVALID_ROUTE
<< 8) |
1153 for (i
= 0; i
<= max_destid
;) {
1154 rio_mport_write_config_32(mport
, destid
, hopcount
,
1155 RIO_STD_RTE_CONF_DESTID_SEL_CSR
,
1157 rio_mport_write_config_32(mport
, destid
, hopcount
,
1158 RIO_STD_RTE_CONF_PORT_SEL_CSR
,
1168 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1170 static bool rio_chan_filter(struct dma_chan
*chan
, void *arg
)
1172 struct rio_dev
*rdev
= arg
;
1174 /* Check that DMA device belongs to the right MPORT */
1175 return (rdev
->net
->hport
==
1176 container_of(chan
->device
, struct rio_mport
, dma
));
1180 * rio_request_dma - request RapidIO capable DMA channel that supports
1181 * specified target RapidIO device.
1182 * @rdev: RIO device control structure
1184 * Returns pointer to allocated DMA channel or NULL if failed.
1186 struct dma_chan
*rio_request_dma(struct rio_dev
*rdev
)
1188 dma_cap_mask_t mask
;
1189 struct dma_chan
*dchan
;
1192 dma_cap_set(DMA_SLAVE
, mask
);
1193 dchan
= dma_request_channel(mask
, rio_chan_filter
, rdev
);
1197 EXPORT_SYMBOL_GPL(rio_request_dma
);
1200 * rio_release_dma - release specified DMA channel
1201 * @dchan: DMA channel to release
1203 void rio_release_dma(struct dma_chan
*dchan
)
1205 dma_release_channel(dchan
);
1207 EXPORT_SYMBOL_GPL(rio_release_dma
);
1210 * rio_dma_prep_slave_sg - RapidIO specific wrapper
1211 * for device_prep_slave_sg callback defined by DMAENGINE.
1212 * @rdev: RIO device control structure
1213 * @dchan: DMA channel to configure
1214 * @data: RIO specific data descriptor
1215 * @direction: DMA data transfer direction (TO or FROM the device)
1216 * @flags: dmaengine defined flags
1218 * Initializes RapidIO capable DMA channel for the specified data transfer.
1219 * Uses DMA channel private extension to pass information related to remote
1220 * target RIO device.
1221 * Returns pointer to DMA transaction descriptor or NULL if failed.
1223 struct dma_async_tx_descriptor
*rio_dma_prep_slave_sg(struct rio_dev
*rdev
,
1224 struct dma_chan
*dchan
, struct rio_dma_data
*data
,
1225 enum dma_transfer_direction direction
, unsigned long flags
)
1227 struct dma_async_tx_descriptor
*txd
= NULL
;
1228 struct rio_dma_ext rio_ext
;
1230 if (dchan
->device
->device_prep_slave_sg
== NULL
) {
1231 pr_err("%s: prep_rio_sg == NULL\n", __func__
);
1235 rio_ext
.destid
= rdev
->destid
;
1236 rio_ext
.rio_addr_u
= data
->rio_addr_u
;
1237 rio_ext
.rio_addr
= data
->rio_addr
;
1238 rio_ext
.wr_type
= data
->wr_type
;
1240 txd
= dmaengine_prep_rio_sg(dchan
, data
->sg
, data
->sg_len
,
1241 direction
, flags
, &rio_ext
);
1245 EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg
);
1247 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1249 static void rio_fixup_device(struct rio_dev
*dev
)
1253 static int rio_init(void)
1255 struct rio_dev
*dev
= NULL
;
1257 while ((dev
= rio_get_device(RIO_ANY_ID
, RIO_ANY_ID
, dev
)) != NULL
) {
1258 rio_fixup_device(dev
);
1263 static struct workqueue_struct
*rio_wq
;
1265 struct rio_disc_work
{
1266 struct work_struct work
;
1267 struct rio_mport
*mport
;
1270 static void disc_work_handler(struct work_struct
*_work
)
1272 struct rio_disc_work
*work
;
1274 work
= container_of(_work
, struct rio_disc_work
, work
);
1275 pr_debug("RIO: discovery work for mport %d %s\n",
1276 work
->mport
->id
, work
->mport
->name
);
1277 rio_disc_mport(work
->mport
);
1280 int rio_init_mports(void)
1282 struct rio_mport
*port
;
1283 struct rio_disc_work
*work
;
1290 * First, run enumerations and check if we need to perform discovery
1291 * on any of the registered mports.
1293 list_for_each_entry(port
, &rio_mports
, node
) {
1294 if (port
->host_deviceid
>= 0)
1295 rio_enum_mport(port
);
1304 * If we have mports that require discovery schedule a discovery work
1305 * for each of them. If the code below fails to allocate needed
1306 * resources, exit without error to keep results of enumeration
1308 * TODO: Implement restart of dicovery process for all or
1309 * individual discovering mports.
1311 rio_wq
= alloc_workqueue("riodisc", 0, 0);
1313 pr_err("RIO: unable allocate rio_wq\n");
1317 work
= kcalloc(n
, sizeof *work
, GFP_KERNEL
);
1319 pr_err("RIO: no memory for work struct\n");
1320 destroy_workqueue(rio_wq
);
1325 list_for_each_entry(port
, &rio_mports
, node
) {
1326 if (port
->host_deviceid
< 0) {
1327 work
[n
].mport
= port
;
1328 INIT_WORK(&work
[n
].work
, disc_work_handler
);
1329 queue_work(rio_wq
, &work
[n
].work
);
1334 flush_workqueue(rio_wq
);
1335 pr_debug("RIO: destroy discovery workqueue\n");
1336 destroy_workqueue(rio_wq
);
1345 device_initcall_sync(rio_init_mports
);
1347 static int hdids
[RIO_MAX_MPORTS
+ 1];
1349 static int rio_get_hdid(int index
)
1351 if (!hdids
[0] || hdids
[0] <= index
|| index
>= RIO_MAX_MPORTS
)
1354 return hdids
[index
+ 1];
1357 static int rio_hdid_setup(char *str
)
1359 (void)get_options(str
, ARRAY_SIZE(hdids
), hdids
);
1363 __setup("riohdid=", rio_hdid_setup
);
1365 int rio_register_mport(struct rio_mport
*port
)
1367 if (next_portid
>= RIO_MAX_MPORTS
) {
1368 pr_err("RIO: reached specified max number of mports\n");
1372 port
->id
= next_portid
++;
1373 port
->host_deviceid
= rio_get_hdid(port
->id
);
1374 list_add_tail(&port
->node
, &rio_mports
);
1378 EXPORT_SYMBOL_GPL(rio_local_get_device_id
);
1379 EXPORT_SYMBOL_GPL(rio_get_device
);
1380 EXPORT_SYMBOL_GPL(rio_get_asm
);
1381 EXPORT_SYMBOL_GPL(rio_request_inb_dbell
);
1382 EXPORT_SYMBOL_GPL(rio_release_inb_dbell
);
1383 EXPORT_SYMBOL_GPL(rio_request_outb_dbell
);
1384 EXPORT_SYMBOL_GPL(rio_release_outb_dbell
);
1385 EXPORT_SYMBOL_GPL(rio_request_inb_mbox
);
1386 EXPORT_SYMBOL_GPL(rio_release_inb_mbox
);
1387 EXPORT_SYMBOL_GPL(rio_request_outb_mbox
);
1388 EXPORT_SYMBOL_GPL(rio_release_outb_mbox
);