1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
6 #include <soc/tegra/ivc.h>
8 #define TEGRA_IVC_ALIGN 64
11 * IVC channel reset protocol.
13 * Each end uses its tx_channel.state to indicate its synchronization state.
15 enum tegra_ivc_state
{
17 * This value is zero for backwards compatibility with services that
18 * assume channels to be initially zeroed. Such channels are in an
19 * initially valid state, but cannot be asynchronously reset, and must
20 * maintain a valid state at all times.
22 * The transmitting end can enter the established state from the sync or
23 * ack state when it observes the receiving endpoint in the ack or
24 * established state, indicating that has cleared the counters in our
27 TEGRA_IVC_STATE_ESTABLISHED
= 0,
30 * If an endpoint is observed in the sync state, the remote endpoint is
31 * allowed to clear the counters it owns asynchronously with respect to
32 * the current endpoint. Therefore, the current endpoint is no longer
33 * allowed to communicate.
38 * When the transmitting end observes the receiving end in the sync
39 * state, it can clear the w_count and r_count and transition to the ack
40 * state. If the remote endpoint observes us in the ack state, it can
41 * return to the established state once it has cleared its counters.
47 * This structure is divided into two-cache aligned parts, the first is only
48 * written through the tx.channel pointer, while the second is only written
49 * through the rx.channel pointer. This delineates ownership of the cache
50 * lines, which is critical to performance and necessary in non-cache coherent
53 struct tegra_ivc_header
{
56 /* fields owned by the transmitting end */
61 u8 pad
[TEGRA_IVC_ALIGN
];
65 /* fields owned by the receiving end */
67 u8 pad
[TEGRA_IVC_ALIGN
];
71 #define tegra_ivc_header_read_field(hdr, field) \
72 iosys_map_rd_field(hdr, 0, struct tegra_ivc_header, field)
74 #define tegra_ivc_header_write_field(hdr, field, value) \
75 iosys_map_wr_field(hdr, 0, struct tegra_ivc_header, field, value)
77 static inline void tegra_ivc_invalidate(struct tegra_ivc
*ivc
, dma_addr_t phys
)
82 dma_sync_single_for_cpu(ivc
->peer
, phys
, TEGRA_IVC_ALIGN
,
86 static inline void tegra_ivc_flush(struct tegra_ivc
*ivc
, dma_addr_t phys
)
91 dma_sync_single_for_device(ivc
->peer
, phys
, TEGRA_IVC_ALIGN
,
95 static inline bool tegra_ivc_empty(struct tegra_ivc
*ivc
, struct iosys_map
*map
)
98 * This function performs multiple checks on the same values with
99 * security implications, so create snapshots with READ_ONCE() to
100 * ensure that these checks use the same values.
102 u32 tx
= tegra_ivc_header_read_field(map
, tx
.count
);
103 u32 rx
= tegra_ivc_header_read_field(map
, rx
.count
);
106 * Perform an over-full check to prevent denial of service attacks
107 * where a server could be easily fooled into believing that there's
108 * an extremely large number of frames ready, since receivers are not
109 * expected to check for full or over-full conditions.
111 * Although the channel isn't empty, this is an invalid case caused by
112 * a potentially malicious peer, so returning empty is safer, because
113 * it gives the impression that the channel has gone silent.
115 if (tx
- rx
> ivc
->num_frames
)
121 static inline bool tegra_ivc_full(struct tegra_ivc
*ivc
, struct iosys_map
*map
)
123 u32 tx
= tegra_ivc_header_read_field(map
, tx
.count
);
124 u32 rx
= tegra_ivc_header_read_field(map
, rx
.count
);
127 * Invalid cases where the counters indicate that the queue is over
128 * capacity also appear full.
130 return tx
- rx
>= ivc
->num_frames
;
133 static inline u32
tegra_ivc_available(struct tegra_ivc
*ivc
, struct iosys_map
*map
)
135 u32 tx
= tegra_ivc_header_read_field(map
, tx
.count
);
136 u32 rx
= tegra_ivc_header_read_field(map
, rx
.count
);
139 * This function isn't expected to be used in scenarios where an
140 * over-full situation can lead to denial of service attacks. See the
141 * comment in tegra_ivc_empty() for an explanation about special
142 * over-full considerations.
147 static inline void tegra_ivc_advance_tx(struct tegra_ivc
*ivc
)
149 unsigned int count
= tegra_ivc_header_read_field(&ivc
->tx
.map
, tx
.count
);
151 tegra_ivc_header_write_field(&ivc
->tx
.map
, tx
.count
, count
+ 1);
153 if (ivc
->tx
.position
== ivc
->num_frames
- 1)
154 ivc
->tx
.position
= 0;
159 static inline void tegra_ivc_advance_rx(struct tegra_ivc
*ivc
)
161 unsigned int count
= tegra_ivc_header_read_field(&ivc
->rx
.map
, rx
.count
);
163 tegra_ivc_header_write_field(&ivc
->rx
.map
, rx
.count
, count
+ 1);
165 if (ivc
->rx
.position
== ivc
->num_frames
- 1)
166 ivc
->rx
.position
= 0;
171 static inline int tegra_ivc_check_read(struct tegra_ivc
*ivc
)
173 unsigned int offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
177 * tx.channel->state is set locally, so it is not synchronized with
178 * state from the remote peer. The remote peer cannot reset its
179 * transmit counters until we've acknowledged its synchronization
180 * request, so no additional synchronization is required because an
181 * asynchronous transition of rx.channel->state to
182 * TEGRA_IVC_STATE_ACK is not allowed.
184 state
= tegra_ivc_header_read_field(&ivc
->tx
.map
, tx
.state
);
185 if (state
!= TEGRA_IVC_STATE_ESTABLISHED
)
189 * Avoid unnecessary invalidations when performing repeated accesses
190 * to an IVC channel by checking the old queue pointers first.
192 * Synchronization is only necessary when these pointers indicate
195 if (!tegra_ivc_empty(ivc
, &ivc
->rx
.map
))
198 tegra_ivc_invalidate(ivc
, ivc
->rx
.phys
+ offset
);
200 if (tegra_ivc_empty(ivc
, &ivc
->rx
.map
))
206 static inline int tegra_ivc_check_write(struct tegra_ivc
*ivc
)
208 unsigned int offset
= offsetof(struct tegra_ivc_header
, rx
.count
);
211 state
= tegra_ivc_header_read_field(&ivc
->tx
.map
, tx
.state
);
212 if (state
!= TEGRA_IVC_STATE_ESTABLISHED
)
215 if (!tegra_ivc_full(ivc
, &ivc
->tx
.map
))
218 tegra_ivc_invalidate(ivc
, ivc
->tx
.phys
+ offset
);
220 if (tegra_ivc_full(ivc
, &ivc
->tx
.map
))
226 static int tegra_ivc_frame_virt(struct tegra_ivc
*ivc
, const struct iosys_map
*header
,
227 unsigned int frame
, struct iosys_map
*map
)
229 size_t offset
= sizeof(struct tegra_ivc_header
) + ivc
->frame_size
* frame
;
231 if (WARN_ON(frame
>= ivc
->num_frames
))
234 *map
= IOSYS_MAP_INIT_OFFSET(header
, offset
);
239 static inline dma_addr_t
tegra_ivc_frame_phys(struct tegra_ivc
*ivc
,
243 unsigned long offset
;
245 offset
= sizeof(struct tegra_ivc_header
) + ivc
->frame_size
* frame
;
247 return phys
+ offset
;
250 static inline void tegra_ivc_invalidate_frame(struct tegra_ivc
*ivc
,
256 if (!ivc
->peer
|| WARN_ON(frame
>= ivc
->num_frames
))
259 phys
= tegra_ivc_frame_phys(ivc
, phys
, frame
) + offset
;
261 dma_sync_single_for_cpu(ivc
->peer
, phys
, size
, DMA_FROM_DEVICE
);
264 static inline void tegra_ivc_flush_frame(struct tegra_ivc
*ivc
,
270 if (!ivc
->peer
|| WARN_ON(frame
>= ivc
->num_frames
))
273 phys
= tegra_ivc_frame_phys(ivc
, phys
, frame
) + offset
;
275 dma_sync_single_for_device(ivc
->peer
, phys
, size
, DMA_TO_DEVICE
);
278 /* directly peek at the next frame rx'ed */
279 int tegra_ivc_read_get_next_frame(struct tegra_ivc
*ivc
, struct iosys_map
*map
)
283 if (WARN_ON(ivc
== NULL
))
286 err
= tegra_ivc_check_read(ivc
);
291 * Order observation of ivc->rx.position potentially indicating new
292 * data before data read.
296 tegra_ivc_invalidate_frame(ivc
, ivc
->rx
.phys
, ivc
->rx
.position
, 0,
299 return tegra_ivc_frame_virt(ivc
, &ivc
->rx
.map
, ivc
->rx
.position
, map
);
301 EXPORT_SYMBOL(tegra_ivc_read_get_next_frame
);
303 int tegra_ivc_read_advance(struct tegra_ivc
*ivc
)
305 unsigned int rx
= offsetof(struct tegra_ivc_header
, rx
.count
);
306 unsigned int tx
= offsetof(struct tegra_ivc_header
, tx
.count
);
310 * No read barriers or synchronization here: the caller is expected to
311 * have already observed the channel non-empty. This check is just to
312 * catch programming errors.
314 err
= tegra_ivc_check_read(ivc
);
318 tegra_ivc_advance_rx(ivc
);
320 tegra_ivc_flush(ivc
, ivc
->rx
.phys
+ rx
);
323 * Ensure our write to ivc->rx.position occurs before our read from
329 * Notify only upon transition from full to non-full. The available
330 * count can only asynchronously increase, so the worst possible
331 * side-effect will be a spurious notification.
333 tegra_ivc_invalidate(ivc
, ivc
->rx
.phys
+ tx
);
335 if (tegra_ivc_available(ivc
, &ivc
->rx
.map
) == ivc
->num_frames
- 1)
336 ivc
->notify(ivc
, ivc
->notify_data
);
340 EXPORT_SYMBOL(tegra_ivc_read_advance
);
342 /* directly poke at the next frame to be tx'ed */
343 int tegra_ivc_write_get_next_frame(struct tegra_ivc
*ivc
, struct iosys_map
*map
)
347 err
= tegra_ivc_check_write(ivc
);
351 return tegra_ivc_frame_virt(ivc
, &ivc
->tx
.map
, ivc
->tx
.position
, map
);
353 EXPORT_SYMBOL(tegra_ivc_write_get_next_frame
);
355 /* advance the tx buffer */
356 int tegra_ivc_write_advance(struct tegra_ivc
*ivc
)
358 unsigned int tx
= offsetof(struct tegra_ivc_header
, tx
.count
);
359 unsigned int rx
= offsetof(struct tegra_ivc_header
, rx
.count
);
362 err
= tegra_ivc_check_write(ivc
);
366 tegra_ivc_flush_frame(ivc
, ivc
->tx
.phys
, ivc
->tx
.position
, 0,
370 * Order any possible stores to the frame before update of
375 tegra_ivc_advance_tx(ivc
);
376 tegra_ivc_flush(ivc
, ivc
->tx
.phys
+ tx
);
379 * Ensure our write to ivc->tx.position occurs before our read from
385 * Notify only upon transition from empty to non-empty. The available
386 * count can only asynchronously decrease, so the worst possible
387 * side-effect will be a spurious notification.
389 tegra_ivc_invalidate(ivc
, ivc
->tx
.phys
+ rx
);
391 if (tegra_ivc_available(ivc
, &ivc
->tx
.map
) == 1)
392 ivc
->notify(ivc
, ivc
->notify_data
);
396 EXPORT_SYMBOL(tegra_ivc_write_advance
);
398 void tegra_ivc_reset(struct tegra_ivc
*ivc
)
400 unsigned int offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
402 tegra_ivc_header_write_field(&ivc
->tx
.map
, tx
.state
, TEGRA_IVC_STATE_SYNC
);
403 tegra_ivc_flush(ivc
, ivc
->tx
.phys
+ offset
);
404 ivc
->notify(ivc
, ivc
->notify_data
);
406 EXPORT_SYMBOL(tegra_ivc_reset
);
409 * =======================================================
410 * IVC State Transition Table - see tegra_ivc_notified()
411 * =======================================================
413 * local remote action
414 * ----- ------ -----------------------------------
416 * SYNC ACK reset counters; move to EST; notify
417 * SYNC SYNC reset counters; move to ACK; notify
418 * ACK EST move to EST; notify
419 * ACK ACK move to EST; notify
420 * ACK SYNC reset counters; move to ACK; notify
423 * EST SYNC reset counters; move to ACK; notify
425 * ===============================================================
428 int tegra_ivc_notified(struct tegra_ivc
*ivc
)
430 unsigned int offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
431 enum tegra_ivc_state rx_state
, tx_state
;
433 /* Copy the receiver's state out of shared memory. */
434 tegra_ivc_invalidate(ivc
, ivc
->rx
.phys
+ offset
);
435 rx_state
= tegra_ivc_header_read_field(&ivc
->rx
.map
, tx
.state
);
436 tx_state
= tegra_ivc_header_read_field(&ivc
->tx
.map
, tx
.state
);
438 if (rx_state
== TEGRA_IVC_STATE_SYNC
) {
439 offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
442 * Order observation of TEGRA_IVC_STATE_SYNC before stores
443 * clearing tx.channel.
448 * Reset tx.channel counters. The remote end is in the SYNC
449 * state and won't make progress until we change our state,
450 * so the counters are not in use at this time.
452 tegra_ivc_header_write_field(&ivc
->tx
.map
, tx
.count
, 0);
453 tegra_ivc_header_write_field(&ivc
->rx
.map
, rx
.count
, 0);
455 ivc
->tx
.position
= 0;
456 ivc
->rx
.position
= 0;
459 * Ensure that counters appear cleared before new state can be
465 * Move to ACK state. We have just cleared our counters, so it
466 * is now safe for the remote end to start using these values.
468 tegra_ivc_header_write_field(&ivc
->tx
.map
, tx
.state
, TEGRA_IVC_STATE_ACK
);
469 tegra_ivc_flush(ivc
, ivc
->tx
.phys
+ offset
);
472 * Notify remote end to observe state transition.
474 ivc
->notify(ivc
, ivc
->notify_data
);
476 } else if (tx_state
== TEGRA_IVC_STATE_SYNC
&&
477 rx_state
== TEGRA_IVC_STATE_ACK
) {
478 offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
481 * Order observation of ivc_state_sync before stores clearing
487 * Reset tx.channel counters. The remote end is in the ACK
488 * state and won't make progress until we change our state,
489 * so the counters are not in use at this time.
491 tegra_ivc_header_write_field(&ivc
->tx
.map
, tx
.count
, 0);
492 tegra_ivc_header_write_field(&ivc
->rx
.map
, rx
.count
, 0);
494 ivc
->tx
.position
= 0;
495 ivc
->rx
.position
= 0;
498 * Ensure that counters appear cleared before new state can be
504 * Move to ESTABLISHED state. We know that the remote end has
505 * already cleared its counters, so it is safe to start
506 * writing/reading on this channel.
508 tegra_ivc_header_write_field(&ivc
->tx
.map
, tx
.state
, TEGRA_IVC_STATE_ESTABLISHED
);
509 tegra_ivc_flush(ivc
, ivc
->tx
.phys
+ offset
);
512 * Notify remote end to observe state transition.
514 ivc
->notify(ivc
, ivc
->notify_data
);
516 } else if (tx_state
== TEGRA_IVC_STATE_ACK
) {
517 offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
520 * At this point, we have observed the peer to be in either
521 * the ACK or ESTABLISHED state. Next, order observation of
522 * peer state before storing to tx.channel.
527 * Move to ESTABLISHED state. We know that we have previously
528 * cleared our counters, and we know that the remote end has
529 * cleared its counters, so it is safe to start writing/reading
532 tegra_ivc_header_write_field(&ivc
->tx
.map
, tx
.state
, TEGRA_IVC_STATE_ESTABLISHED
);
533 tegra_ivc_flush(ivc
, ivc
->tx
.phys
+ offset
);
536 * Notify remote end to observe state transition.
538 ivc
->notify(ivc
, ivc
->notify_data
);
542 * There is no need to handle any further action. Either the
543 * channel is already fully established, or we are waiting for
544 * the remote end to catch up with our current state. Refer
545 * to the diagram in "IVC State Transition Table" above.
549 if (tx_state
!= TEGRA_IVC_STATE_ESTABLISHED
)
554 EXPORT_SYMBOL(tegra_ivc_notified
);
556 size_t tegra_ivc_align(size_t size
)
558 return ALIGN(size
, TEGRA_IVC_ALIGN
);
560 EXPORT_SYMBOL(tegra_ivc_align
);
562 unsigned tegra_ivc_total_queue_size(unsigned queue_size
)
564 if (!IS_ALIGNED(queue_size
, TEGRA_IVC_ALIGN
)) {
565 pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
566 __func__
, queue_size
, TEGRA_IVC_ALIGN
);
570 return queue_size
+ sizeof(struct tegra_ivc_header
);
572 EXPORT_SYMBOL(tegra_ivc_total_queue_size
);
574 static int tegra_ivc_check_params(unsigned long rx
, unsigned long tx
,
575 unsigned int num_frames
, size_t frame_size
)
577 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header
, tx
.count
),
579 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header
, rx
.count
),
581 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header
),
584 if ((uint64_t)num_frames
* (uint64_t)frame_size
>= 0x100000000UL
) {
585 pr_err("num_frames * frame_size overflows\n");
589 if (!IS_ALIGNED(frame_size
, TEGRA_IVC_ALIGN
)) {
590 pr_err("frame size not adequately aligned: %zu\n", frame_size
);
595 * The headers must at least be aligned enough for counters
596 * to be accessed atomically.
598 if (!IS_ALIGNED(rx
, TEGRA_IVC_ALIGN
)) {
599 pr_err("IVC channel start not aligned: %#lx\n", rx
);
603 if (!IS_ALIGNED(tx
, TEGRA_IVC_ALIGN
)) {
604 pr_err("IVC channel start not aligned: %#lx\n", tx
);
609 if (rx
+ frame_size
* num_frames
> tx
) {
610 pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
611 rx
, frame_size
* num_frames
, tx
);
615 if (tx
+ frame_size
* num_frames
> rx
) {
616 pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
617 tx
, frame_size
* num_frames
, rx
);
625 static inline void iosys_map_copy(struct iosys_map
*dst
, const struct iosys_map
*src
)
630 static inline unsigned long iosys_map_get_address(const struct iosys_map
*map
)
633 return (unsigned long)map
->vaddr_iomem
;
635 return (unsigned long)map
->vaddr
;
638 static inline void *iosys_map_get_vaddr(const struct iosys_map
*map
)
640 if (WARN_ON(map
->is_iomem
))
646 int tegra_ivc_init(struct tegra_ivc
*ivc
, struct device
*peer
, const struct iosys_map
*rx
,
647 dma_addr_t rx_phys
, const struct iosys_map
*tx
, dma_addr_t tx_phys
,
648 unsigned int num_frames
, size_t frame_size
,
649 void (*notify
)(struct tegra_ivc
*ivc
, void *data
),
655 if (WARN_ON(!ivc
|| !notify
))
659 * All sizes that can be returned by communication functions should
662 if (frame_size
> INT_MAX
)
665 err
= tegra_ivc_check_params(iosys_map_get_address(rx
), iosys_map_get_address(tx
),
666 num_frames
, frame_size
);
670 queue_size
= tegra_ivc_total_queue_size(num_frames
* frame_size
);
673 ivc
->rx
.phys
= dma_map_single(peer
, iosys_map_get_vaddr(rx
), queue_size
,
675 if (dma_mapping_error(peer
, ivc
->rx
.phys
))
678 ivc
->tx
.phys
= dma_map_single(peer
, iosys_map_get_vaddr(tx
), queue_size
,
680 if (dma_mapping_error(peer
, ivc
->tx
.phys
)) {
681 dma_unmap_single(peer
, ivc
->rx
.phys
, queue_size
,
686 ivc
->rx
.phys
= rx_phys
;
687 ivc
->tx
.phys
= tx_phys
;
690 iosys_map_copy(&ivc
->rx
.map
, rx
);
691 iosys_map_copy(&ivc
->tx
.map
, tx
);
693 ivc
->notify
= notify
;
694 ivc
->notify_data
= data
;
695 ivc
->frame_size
= frame_size
;
696 ivc
->num_frames
= num_frames
;
699 * These values aren't necessarily correct until the channel has been
702 ivc
->tx
.position
= 0;
703 ivc
->rx
.position
= 0;
707 EXPORT_SYMBOL(tegra_ivc_init
);
709 void tegra_ivc_cleanup(struct tegra_ivc
*ivc
)
712 size_t size
= tegra_ivc_total_queue_size(ivc
->num_frames
*
715 dma_unmap_single(ivc
->peer
, ivc
->rx
.phys
, size
,
717 dma_unmap_single(ivc
->peer
, ivc
->tx
.phys
, size
,
721 EXPORT_SYMBOL(tegra_ivc_cleanup
);