2 * RT-Mutex-tester: scriptable tester for rt mutexes
4 * started by Thomas Gleixner:
6 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
9 #include <linux/kthread.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/spinlock.h>
13 #include <linux/sysdev.h>
14 #include <linux/timer.h>
15 #include <linux/freezer.h>
19 #define MAX_RT_TEST_THREADS 8
20 #define MAX_RT_TEST_MUTEXES 8
22 static spinlock_t rttest_lock
;
23 static atomic_t rttest_event
;
25 struct test_thread_data
{
28 int mutexes
[MAX_RT_TEST_MUTEXES
];
30 struct sys_device sysdev
;
33 static struct test_thread_data thread_data
[MAX_RT_TEST_THREADS
];
34 static struct task_struct
*threads
[MAX_RT_TEST_THREADS
];
35 static struct rt_mutex mutexes
[MAX_RT_TEST_MUTEXES
];
39 RTTEST_SCHEDOT
, /* 1 Sched other, data = nice */
40 RTTEST_SCHEDRT
, /* 2 Sched fifo, data = prio */
41 RTTEST_LOCK
, /* 3 Lock uninterruptible, data = lockindex */
42 RTTEST_LOCKNOWAIT
, /* 4 Lock uninterruptible no wait in wakeup, data = lockindex */
43 RTTEST_LOCKINT
, /* 5 Lock interruptible, data = lockindex */
44 RTTEST_LOCKINTNOWAIT
, /* 6 Lock interruptible no wait in wakeup, data = lockindex */
45 RTTEST_LOCKCONT
, /* 7 Continue locking after the wakeup delay */
46 RTTEST_UNLOCK
, /* 8 Unlock, data = lockindex */
47 /* 9, 10 - reserved for BKL commemoration */
48 RTTEST_SIGNAL
= 11, /* 11 Signal other test thread, data = thread id */
49 RTTEST_RESETEVENT
= 98, /* 98 Reset event counter */
50 RTTEST_RESET
= 99, /* 99 Reset all pending operations */
53 static int handle_op(struct test_thread_data
*td
, int lockwakeup
)
55 int i
, id
, ret
= -EINVAL
;
63 td
->mutexes
[td
->opdata
] = 1;
64 td
->event
= atomic_add_return(1, &rttest_event
);
68 for (i
= 0; i
< MAX_RT_TEST_MUTEXES
; i
++) {
69 if (td
->mutexes
[i
] == 4) {
70 rt_mutex_unlock(&mutexes
[i
]);
76 case RTTEST_RESETEVENT
:
77 atomic_set(&rttest_event
, 0);
88 case RTTEST_LOCKNOWAIT
:
90 if (id
< 0 || id
>= MAX_RT_TEST_MUTEXES
)
94 td
->event
= atomic_add_return(1, &rttest_event
);
95 rt_mutex_lock(&mutexes
[id
]);
96 td
->event
= atomic_add_return(1, &rttest_event
);
101 case RTTEST_LOCKINTNOWAIT
:
103 if (id
< 0 || id
>= MAX_RT_TEST_MUTEXES
)
107 td
->event
= atomic_add_return(1, &rttest_event
);
108 ret
= rt_mutex_lock_interruptible(&mutexes
[id
], 0);
109 td
->event
= atomic_add_return(1, &rttest_event
);
110 td
->mutexes
[id
] = ret
? 0 : 4;
111 return ret
? -EINTR
: 0;
115 if (id
< 0 || id
>= MAX_RT_TEST_MUTEXES
|| td
->mutexes
[id
] != 4)
118 td
->event
= atomic_add_return(1, &rttest_event
);
119 rt_mutex_unlock(&mutexes
[id
]);
120 td
->event
= atomic_add_return(1, &rttest_event
);
131 * Schedule replacement for rtsem_down(). Only called for threads with
132 * PF_MUTEX_TESTER set.
134 * This allows us to have finegrained control over the event flow.
137 void schedule_rt_mutex_test(struct rt_mutex
*mutex
)
140 struct test_thread_data
*td
;
142 /* We have to lookup the task */
143 for (tid
= 0; tid
< MAX_RT_TEST_THREADS
; tid
++) {
144 if (threads
[tid
] == current
)
148 BUG_ON(tid
== MAX_RT_TEST_THREADS
);
150 td
= &thread_data
[tid
];
158 case RTTEST_LOCKNOWAIT
:
159 case RTTEST_LOCKINTNOWAIT
:
160 if (mutex
!= &mutexes
[dat
])
163 if (td
->mutexes
[dat
] != 1)
166 td
->mutexes
[dat
] = 2;
167 td
->event
= atomic_add_return(1, &rttest_event
);
180 if (mutex
!= &mutexes
[dat
])
183 if (td
->mutexes
[dat
] != 2)
186 td
->mutexes
[dat
] = 3;
187 td
->event
= atomic_add_return(1, &rttest_event
);
190 case RTTEST_LOCKNOWAIT
:
191 case RTTEST_LOCKINTNOWAIT
:
192 if (mutex
!= &mutexes
[dat
])
195 if (td
->mutexes
[dat
] != 2)
198 td
->mutexes
[dat
] = 1;
199 td
->event
= atomic_add_return(1, &rttest_event
);
209 set_current_state(TASK_INTERRUPTIBLE
);
211 if (td
->opcode
> 0) {
214 set_current_state(TASK_RUNNING
);
215 ret
= handle_op(td
, 1);
216 set_current_state(TASK_INTERRUPTIBLE
);
217 if (td
->opcode
== RTTEST_LOCKCONT
)
222 /* Wait for the next command to be executed */
226 /* Restore previous command and data */
231 static int test_func(void *data
)
233 struct test_thread_data
*td
= data
;
236 current
->flags
|= PF_MUTEX_TESTER
;
238 allow_signal(SIGHUP
);
242 set_current_state(TASK_INTERRUPTIBLE
);
244 if (td
->opcode
> 0) {
245 set_current_state(TASK_RUNNING
);
246 ret
= handle_op(td
, 0);
247 set_current_state(TASK_INTERRUPTIBLE
);
251 /* Wait for the next command to be executed */
255 if (signal_pending(current
))
256 flush_signals(current
);
258 if(kthread_should_stop())
265 * sysfs_test_command - interface for test commands
266 * @dev: thread reference
267 * @buf: command for actual step
268 * @count: length of buffer
274 static ssize_t
sysfs_test_command(struct sys_device
*dev
, struct sysdev_attribute
*attr
,
275 const char *buf
, size_t count
)
277 struct sched_param schedpar
;
278 struct test_thread_data
*td
;
280 int op
, dat
, tid
, ret
;
282 td
= container_of(dev
, struct test_thread_data
, sysdev
);
285 /* strings from sysfs write are not 0 terminated! */
286 if (count
>= sizeof(cmdbuf
))
290 if (buf
[count
-1] == '\n')
295 memcpy(cmdbuf
, buf
, count
);
298 if (sscanf(cmdbuf
, "%d:%d", &op
, &dat
) != 2)
303 schedpar
.sched_priority
= 0;
304 ret
= sched_setscheduler(threads
[tid
], SCHED_NORMAL
, &schedpar
);
307 set_user_nice(current
, 0);
311 schedpar
.sched_priority
= dat
;
312 ret
= sched_setscheduler(threads
[tid
], SCHED_FIFO
, &schedpar
);
318 send_sig(SIGHUP
, threads
[tid
], 0);
326 wake_up_process(threads
[tid
]);
333 * sysfs_test_status - sysfs interface for rt tester
334 * @dev: thread to query
335 * @buf: char buffer to be filled with thread status info
337 static ssize_t
sysfs_test_status(struct sys_device
*dev
, struct sysdev_attribute
*attr
,
340 struct test_thread_data
*td
;
341 struct task_struct
*tsk
;
345 td
= container_of(dev
, struct test_thread_data
, sysdev
);
346 tsk
= threads
[td
->sysdev
.id
];
348 spin_lock(&rttest_lock
);
350 curr
+= sprintf(curr
,
351 "O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, M:",
352 td
->opcode
, td
->event
, tsk
->state
,
353 (MAX_RT_PRIO
- 1) - tsk
->prio
,
354 (MAX_RT_PRIO
- 1) - tsk
->normal_prio
,
357 for (i
= MAX_RT_TEST_MUTEXES
- 1; i
>=0 ; i
--)
358 curr
+= sprintf(curr
, "%d", td
->mutexes
[i
]);
360 spin_unlock(&rttest_lock
);
362 curr
+= sprintf(curr
, ", T: %p, R: %p\n", tsk
,
363 mutexes
[td
->sysdev
.id
].owner
);
368 static SYSDEV_ATTR(status
, 0600, sysfs_test_status
, NULL
);
369 static SYSDEV_ATTR(command
, 0600, NULL
, sysfs_test_command
);
371 static struct sysdev_class rttest_sysclass
= {
375 static int init_test_thread(int id
)
377 thread_data
[id
].sysdev
.cls
= &rttest_sysclass
;
378 thread_data
[id
].sysdev
.id
= id
;
380 threads
[id
] = kthread_run(test_func
, &thread_data
[id
], "rt-test-%d", id
);
381 if (IS_ERR(threads
[id
]))
382 return PTR_ERR(threads
[id
]);
384 return sysdev_register(&thread_data
[id
].sysdev
);
387 static int init_rttest(void)
391 spin_lock_init(&rttest_lock
);
393 for (i
= 0; i
< MAX_RT_TEST_MUTEXES
; i
++)
394 rt_mutex_init(&mutexes
[i
]);
396 ret
= sysdev_class_register(&rttest_sysclass
);
400 for (i
= 0; i
< MAX_RT_TEST_THREADS
; i
++) {
401 ret
= init_test_thread(i
);
404 ret
= sysdev_create_file(&thread_data
[i
].sysdev
, &attr_status
);
407 ret
= sysdev_create_file(&thread_data
[i
].sysdev
, &attr_command
);
412 printk("Initializing RT-Tester: %s\n", ret
? "Failed" : "OK" );
417 device_initcall(init_rttest
);