1 // SPDX-License-Identifier: GPL-2.0
3 * Generic Counter character device interface
4 * Copyright (C) 2020 William Breathitt Gray
6 #include <linux/cdev.h>
7 #include <linux/counter.h>
9 #include <linux/errno.h>
10 #include <linux/export.h>
12 #include <linux/kfifo.h>
13 #include <linux/list.h>
14 #include <linux/mutex.h>
15 #include <linux/nospec.h>
16 #include <linux/poll.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/timekeeping.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/wait.h>
24 #include "counter-chrdev.h"
26 struct counter_comp_node
{
28 struct counter_component component
;
29 struct counter_comp comp
;
33 #define counter_comp_read_is_equal(a, b) \
34 (a.action_read == b.action_read || \
35 a.device_u8_read == b.device_u8_read || \
36 a.count_u8_read == b.count_u8_read || \
37 a.signal_u8_read == b.signal_u8_read || \
38 a.device_u32_read == b.device_u32_read || \
39 a.count_u32_read == b.count_u32_read || \
40 a.signal_u32_read == b.signal_u32_read || \
41 a.device_u64_read == b.device_u64_read || \
42 a.count_u64_read == b.count_u64_read || \
43 a.signal_u64_read == b.signal_u64_read || \
44 a.signal_array_u32_read == b.signal_array_u32_read || \
45 a.device_array_u64_read == b.device_array_u64_read || \
46 a.count_array_u64_read == b.count_array_u64_read || \
47 a.signal_array_u64_read == b.signal_array_u64_read)
49 #define counter_comp_read_is_set(comp) \
50 (comp.action_read || \
51 comp.device_u8_read || \
52 comp.count_u8_read || \
53 comp.signal_u8_read || \
54 comp.device_u32_read || \
55 comp.count_u32_read || \
56 comp.signal_u32_read || \
57 comp.device_u64_read || \
58 comp.count_u64_read || \
59 comp.signal_u64_read || \
60 comp.signal_array_u32_read || \
61 comp.device_array_u64_read || \
62 comp.count_array_u64_read || \
63 comp.signal_array_u64_read)
65 static ssize_t
counter_chrdev_read(struct file
*filp
, char __user
*buf
,
66 size_t len
, loff_t
*f_ps
)
68 struct counter_device
*const counter
= filp
->private_data
;
75 if (len
< sizeof(struct counter_event
))
79 if (kfifo_is_empty(&counter
->events
)) {
80 if (filp
->f_flags
& O_NONBLOCK
)
83 err
= wait_event_interruptible(counter
->events_wait
,
84 !kfifo_is_empty(&counter
->events
) ||
92 if (mutex_lock_interruptible(&counter
->events_out_lock
))
94 err
= kfifo_to_user(&counter
->events
, buf
, len
, &copied
);
95 mutex_unlock(&counter
->events_out_lock
);
103 static __poll_t
counter_chrdev_poll(struct file
*filp
,
104 struct poll_table_struct
*pollt
)
106 struct counter_device
*const counter
= filp
->private_data
;
112 poll_wait(filp
, &counter
->events_wait
, pollt
);
114 if (!kfifo_is_empty(&counter
->events
))
115 events
= EPOLLIN
| EPOLLRDNORM
;
120 static void counter_events_list_free(struct list_head
*const events_list
)
122 struct counter_event_node
*p
, *n
;
123 struct counter_comp_node
*q
, *o
;
125 list_for_each_entry_safe(p
, n
, events_list
, l
) {
126 /* Free associated component nodes */
127 list_for_each_entry_safe(q
, o
, &p
->comp_list
, l
) {
132 /* Free event node */
138 static int counter_set_event_node(struct counter_device
*const counter
,
139 struct counter_watch
*const watch
,
140 const struct counter_comp_node
*const cfg
)
142 struct counter_event_node
*event_node
;
144 struct counter_comp_node
*comp_node
;
146 /* Search for event in the list */
147 list_for_each_entry(event_node
, &counter
->next_events_list
, l
)
148 if (event_node
->event
== watch
->event
&&
149 event_node
->channel
== watch
->channel
)
152 /* If event is not already in the list */
153 if (&event_node
->l
== &counter
->next_events_list
) {
154 /* Allocate new event node */
155 event_node
= kmalloc(sizeof(*event_node
), GFP_KERNEL
);
159 /* Configure event node and add to the list */
160 event_node
->event
= watch
->event
;
161 event_node
->channel
= watch
->channel
;
162 INIT_LIST_HEAD(&event_node
->comp_list
);
163 list_add(&event_node
->l
, &counter
->next_events_list
);
166 /* Check if component watch has already been set before */
167 list_for_each_entry(comp_node
, &event_node
->comp_list
, l
)
168 if (comp_node
->parent
== cfg
->parent
&&
169 counter_comp_read_is_equal(comp_node
->comp
, cfg
->comp
)) {
171 goto exit_free_event_node
;
174 /* Allocate component node */
175 comp_node
= kmalloc(sizeof(*comp_node
), GFP_KERNEL
);
178 goto exit_free_event_node
;
182 /* Add component node to event node */
183 list_add_tail(&comp_node
->l
, &event_node
->comp_list
);
185 exit_free_event_node
:
186 /* Free event node if no one else is watching */
187 if (list_empty(&event_node
->comp_list
)) {
188 list_del(&event_node
->l
);
195 static int counter_enable_events(struct counter_device
*const counter
)
200 mutex_lock(&counter
->n_events_list_lock
);
201 spin_lock_irqsave(&counter
->events_list_lock
, flags
);
203 counter_events_list_free(&counter
->events_list
);
204 list_replace_init(&counter
->next_events_list
,
205 &counter
->events_list
);
207 if (counter
->ops
->events_configure
)
208 err
= counter
->ops
->events_configure(counter
);
210 spin_unlock_irqrestore(&counter
->events_list_lock
, flags
);
211 mutex_unlock(&counter
->n_events_list_lock
);
216 static int counter_disable_events(struct counter_device
*const counter
)
221 spin_lock_irqsave(&counter
->events_list_lock
, flags
);
223 counter_events_list_free(&counter
->events_list
);
225 if (counter
->ops
->events_configure
)
226 err
= counter
->ops
->events_configure(counter
);
228 spin_unlock_irqrestore(&counter
->events_list_lock
, flags
);
230 mutex_lock(&counter
->n_events_list_lock
);
232 counter_events_list_free(&counter
->next_events_list
);
234 mutex_unlock(&counter
->n_events_list_lock
);
239 static int counter_get_ext(const struct counter_comp
*const ext
,
240 const size_t num_ext
, const size_t component_id
,
241 size_t *const ext_idx
, size_t *const id
)
243 struct counter_array
*element
;
246 for (*ext_idx
= 0; *ext_idx
< num_ext
; (*ext_idx
)++) {
247 if (*id
== component_id
)
250 if (ext
[*ext_idx
].type
== COUNTER_COMP_ARRAY
) {
251 element
= ext
[*ext_idx
].priv
;
253 if (component_id
- *id
< element
->length
)
256 *id
+= element
->length
;
264 static int counter_add_watch(struct counter_device
*const counter
,
265 const unsigned long arg
)
267 void __user
*const uwatch
= (void __user
*)arg
;
268 struct counter_watch watch
;
269 struct counter_comp_node comp_node
= {};
271 struct counter_comp
*ext
;
273 size_t ext_idx
, ext_id
;
276 if (copy_from_user(&watch
, uwatch
, sizeof(watch
)))
279 if (watch
.component
.type
== COUNTER_COMPONENT_NONE
)
282 parent
= watch
.component
.parent
;
284 /* Configure parent component info for comp node */
285 switch (watch
.component
.scope
) {
286 case COUNTER_SCOPE_DEVICE
:
288 num_ext
= counter
->num_ext
;
290 case COUNTER_SCOPE_SIGNAL
:
291 if (parent
>= counter
->num_signals
)
293 parent
= array_index_nospec(parent
, counter
->num_signals
);
295 comp_node
.parent
= counter
->signals
+ parent
;
297 ext
= counter
->signals
[parent
].ext
;
298 num_ext
= counter
->signals
[parent
].num_ext
;
300 case COUNTER_SCOPE_COUNT
:
301 if (parent
>= counter
->num_counts
)
303 parent
= array_index_nospec(parent
, counter
->num_counts
);
305 comp_node
.parent
= counter
->counts
+ parent
;
307 ext
= counter
->counts
[parent
].ext
;
308 num_ext
= counter
->counts
[parent
].num_ext
;
314 id
= watch
.component
.id
;
316 /* Configure component info for comp node */
317 switch (watch
.component
.type
) {
318 case COUNTER_COMPONENT_SIGNAL
:
319 if (watch
.component
.scope
!= COUNTER_SCOPE_SIGNAL
)
322 comp_node
.comp
.type
= COUNTER_COMP_SIGNAL_LEVEL
;
323 comp_node
.comp
.signal_u32_read
= counter
->ops
->signal_read
;
325 case COUNTER_COMPONENT_COUNT
:
326 if (watch
.component
.scope
!= COUNTER_SCOPE_COUNT
)
329 comp_node
.comp
.type
= COUNTER_COMP_U64
;
330 comp_node
.comp
.count_u64_read
= counter
->ops
->count_read
;
332 case COUNTER_COMPONENT_FUNCTION
:
333 if (watch
.component
.scope
!= COUNTER_SCOPE_COUNT
)
336 comp_node
.comp
.type
= COUNTER_COMP_FUNCTION
;
337 comp_node
.comp
.count_u32_read
= counter
->ops
->function_read
;
339 case COUNTER_COMPONENT_SYNAPSE_ACTION
:
340 if (watch
.component
.scope
!= COUNTER_SCOPE_COUNT
)
342 if (id
>= counter
->counts
[parent
].num_synapses
)
344 id
= array_index_nospec(id
, counter
->counts
[parent
].num_synapses
);
346 comp_node
.comp
.type
= COUNTER_COMP_SYNAPSE_ACTION
;
347 comp_node
.comp
.action_read
= counter
->ops
->action_read
;
348 comp_node
.comp
.priv
= counter
->counts
[parent
].synapses
+ id
;
350 case COUNTER_COMPONENT_EXTENSION
:
351 err
= counter_get_ext(ext
, num_ext
, id
, &ext_idx
, &ext_id
);
355 comp_node
.comp
= ext
[ext_idx
];
360 if (!counter_comp_read_is_set(comp_node
.comp
))
364 mutex_lock(&counter
->n_events_list_lock
);
366 if (counter
->ops
->watch_validate
) {
367 err
= counter
->ops
->watch_validate(counter
, &watch
);
372 comp_node
.component
= watch
.component
;
374 err
= counter_set_event_node(counter
, &watch
, &comp_node
);
377 mutex_unlock(&counter
->n_events_list_lock
);
382 static long counter_chrdev_ioctl(struct file
*filp
, unsigned int cmd
,
385 struct counter_device
*const counter
= filp
->private_data
;
388 mutex_lock(&counter
->ops_exist_lock
);
394 case COUNTER_ADD_WATCH_IOCTL
:
395 ret
= counter_add_watch(counter
, arg
);
397 case COUNTER_ENABLE_EVENTS_IOCTL
:
398 ret
= counter_enable_events(counter
);
400 case COUNTER_DISABLE_EVENTS_IOCTL
:
401 ret
= counter_disable_events(counter
);
409 mutex_unlock(&counter
->ops_exist_lock
);
414 static int counter_chrdev_open(struct inode
*inode
, struct file
*filp
)
416 struct counter_device
*const counter
= container_of(inode
->i_cdev
,
420 get_device(&counter
->dev
);
421 filp
->private_data
= counter
;
423 return nonseekable_open(inode
, filp
);
426 static int counter_chrdev_release(struct inode
*inode
, struct file
*filp
)
428 struct counter_device
*const counter
= filp
->private_data
;
431 mutex_lock(&counter
->ops_exist_lock
);
434 /* Free any lingering held memory */
435 counter_events_list_free(&counter
->events_list
);
436 counter_events_list_free(&counter
->next_events_list
);
441 ret
= counter_disable_events(counter
);
443 mutex_unlock(&counter
->ops_exist_lock
);
448 mutex_unlock(&counter
->ops_exist_lock
);
450 put_device(&counter
->dev
);
455 static const struct file_operations counter_fops
= {
456 .owner
= THIS_MODULE
,
457 .read
= counter_chrdev_read
,
458 .poll
= counter_chrdev_poll
,
459 .unlocked_ioctl
= counter_chrdev_ioctl
,
460 .open
= counter_chrdev_open
,
461 .release
= counter_chrdev_release
,
464 int counter_chrdev_add(struct counter_device
*const counter
)
466 /* Initialize Counter events lists */
467 INIT_LIST_HEAD(&counter
->events_list
);
468 INIT_LIST_HEAD(&counter
->next_events_list
);
469 spin_lock_init(&counter
->events_list_lock
);
470 mutex_init(&counter
->n_events_list_lock
);
471 init_waitqueue_head(&counter
->events_wait
);
472 spin_lock_init(&counter
->events_in_lock
);
473 mutex_init(&counter
->events_out_lock
);
475 /* Initialize character device */
476 cdev_init(&counter
->chrdev
, &counter_fops
);
478 /* Allocate Counter events queue */
479 return kfifo_alloc(&counter
->events
, 64, GFP_KERNEL
);
482 void counter_chrdev_remove(struct counter_device
*const counter
)
484 kfifo_free(&counter
->events
);
487 static int counter_get_array_data(struct counter_device
*const counter
,
488 const enum counter_scope scope
,
490 const struct counter_comp
*const comp
,
491 const size_t idx
, u64
*const value
)
493 const struct counter_array
*const element
= comp
->priv
;
497 switch (element
->type
) {
498 case COUNTER_COMP_SIGNAL_POLARITY
:
499 if (scope
!= COUNTER_SCOPE_SIGNAL
)
501 ret
= comp
->signal_array_u32_read(counter
, parent
, idx
,
505 case COUNTER_COMP_U64
:
507 case COUNTER_SCOPE_DEVICE
:
508 return comp
->device_array_u64_read(counter
, idx
, value
);
509 case COUNTER_SCOPE_SIGNAL
:
510 return comp
->signal_array_u64_read(counter
, parent
, idx
,
512 case COUNTER_SCOPE_COUNT
:
513 return comp
->count_array_u64_read(counter
, parent
, idx
,
523 static int counter_get_data(struct counter_device
*const counter
,
524 const struct counter_comp_node
*const comp_node
,
527 const struct counter_comp
*const comp
= &comp_node
->comp
;
528 const enum counter_scope scope
= comp_node
->component
.scope
;
529 const size_t id
= comp_node
->component
.id
;
530 struct counter_signal
*const signal
= comp_node
->parent
;
531 struct counter_count
*const count
= comp_node
->parent
;
534 const struct counter_comp
*ext
;
536 size_t ext_idx
, ext_id
;
539 if (comp_node
->component
.type
== COUNTER_COMPONENT_NONE
)
542 switch (comp
->type
) {
543 case COUNTER_COMP_U8
:
544 case COUNTER_COMP_BOOL
:
546 case COUNTER_SCOPE_DEVICE
:
547 ret
= comp
->device_u8_read(counter
, &value_u8
);
549 case COUNTER_SCOPE_SIGNAL
:
550 ret
= comp
->signal_u8_read(counter
, signal
, &value_u8
);
552 case COUNTER_SCOPE_COUNT
:
553 ret
= comp
->count_u8_read(counter
, count
, &value_u8
);
560 case COUNTER_COMP_SIGNAL_LEVEL
:
561 case COUNTER_COMP_FUNCTION
:
562 case COUNTER_COMP_ENUM
:
563 case COUNTER_COMP_COUNT_DIRECTION
:
564 case COUNTER_COMP_COUNT_MODE
:
565 case COUNTER_COMP_SIGNAL_POLARITY
:
567 case COUNTER_SCOPE_DEVICE
:
568 ret
= comp
->device_u32_read(counter
, &value_u32
);
570 case COUNTER_SCOPE_SIGNAL
:
571 ret
= comp
->signal_u32_read(counter
, signal
,
574 case COUNTER_SCOPE_COUNT
:
575 ret
= comp
->count_u32_read(counter
, count
, &value_u32
);
582 case COUNTER_COMP_U64
:
584 case COUNTER_SCOPE_DEVICE
:
585 return comp
->device_u64_read(counter
, value
);
586 case COUNTER_SCOPE_SIGNAL
:
587 return comp
->signal_u64_read(counter
, signal
, value
);
588 case COUNTER_SCOPE_COUNT
:
589 return comp
->count_u64_read(counter
, count
, value
);
593 case COUNTER_COMP_SYNAPSE_ACTION
:
594 ret
= comp
->action_read(counter
, count
, comp
->priv
, &value_u32
);
597 case COUNTER_COMP_ARRAY
:
599 case COUNTER_SCOPE_DEVICE
:
601 num_ext
= counter
->num_ext
;
603 case COUNTER_SCOPE_SIGNAL
:
605 num_ext
= signal
->num_ext
;
607 case COUNTER_SCOPE_COUNT
:
609 num_ext
= count
->num_ext
;
614 ret
= counter_get_ext(ext
, num_ext
, id
, &ext_idx
, &ext_id
);
618 return counter_get_array_data(counter
, scope
, comp_node
->parent
,
619 comp
, id
- ext_id
, value
);
626 * counter_push_event - queue event for userspace reading
627 * @counter: pointer to Counter structure
628 * @event: triggered event
629 * @channel: event channel
631 * Note: If no one is watching for the respective event, it is silently
634 void counter_push_event(struct counter_device
*const counter
, const u8 event
,
637 struct counter_event ev
;
638 unsigned int copied
= 0;
640 struct counter_event_node
*event_node
;
641 struct counter_comp_node
*comp_node
;
643 ev
.timestamp
= ktime_get_ns();
644 ev
.watch
.event
= event
;
645 ev
.watch
.channel
= channel
;
647 /* Could be in an interrupt context, so use a spin lock */
648 spin_lock_irqsave(&counter
->events_list_lock
, flags
);
650 /* Search for event in the list */
651 list_for_each_entry(event_node
, &counter
->events_list
, l
)
652 if (event_node
->event
== event
&&
653 event_node
->channel
== channel
)
656 /* If event is not in the list */
657 if (&event_node
->l
== &counter
->events_list
)
660 /* Read and queue relevant comp for userspace */
661 list_for_each_entry(comp_node
, &event_node
->comp_list
, l
) {
662 ev
.watch
.component
= comp_node
->component
;
663 ev
.status
= -counter_get_data(counter
, comp_node
, &ev
.value
);
665 copied
+= kfifo_in_spinlocked_noirqsave(&counter
->events
, &ev
,
666 1, &counter
->events_in_lock
);
670 spin_unlock_irqrestore(&counter
->events_list_lock
, flags
);
673 wake_up_poll(&counter
->events_wait
, EPOLLIN
);
675 EXPORT_SYMBOL_NS_GPL(counter_push_event
, COUNTER
);