2 * RTC subsystem, dev interface
4 * Copyright (C) 2005 Tower Technologies
5 * Author: Alessandro Zummo <a.zummo@towertech.it>
7 * based on arch/arm/common/rtctime.c
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/module.h>
15 #include <linux/rtc.h>
16 #include <linux/sched.h>
19 static dev_t rtc_devt
;
21 #define RTC_DEV_MAX 16 /* 16 RTCs should be enough for everyone... */
23 static int rtc_dev_open(struct inode
*inode
, struct file
*file
)
26 struct rtc_device
*rtc
= container_of(inode
->i_cdev
,
27 struct rtc_device
, char_dev
);
28 const struct rtc_class_ops
*ops
= rtc
->ops
;
30 if (test_and_set_bit_lock(RTC_DEV_BUSY
, &rtc
->flags
))
33 file
->private_data
= rtc
;
35 err
= ops
->open
? ops
->open(rtc
->dev
.parent
) : 0;
37 spin_lock_irq(&rtc
->irq_lock
);
39 spin_unlock_irq(&rtc
->irq_lock
);
44 /* something has gone wrong */
45 clear_bit_unlock(RTC_DEV_BUSY
, &rtc
->flags
);
49 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
51 * Routine to poll RTC seconds field for change as often as possible,
52 * after first RTC_UIE use timer to reduce polling
54 static void rtc_uie_task(struct work_struct
*work
)
56 struct rtc_device
*rtc
=
57 container_of(work
, struct rtc_device
, uie_task
);
62 err
= rtc_read_time(rtc
, &tm
);
64 spin_lock_irq(&rtc
->irq_lock
);
65 if (rtc
->stop_uie_polling
|| err
) {
66 rtc
->uie_task_active
= 0;
67 } else if (rtc
->oldsecs
!= tm
.tm_sec
) {
68 num
= (tm
.tm_sec
+ 60 - rtc
->oldsecs
) % 60;
69 rtc
->oldsecs
= tm
.tm_sec
;
70 rtc
->uie_timer
.expires
= jiffies
+ HZ
- (HZ
/10);
71 rtc
->uie_timer_active
= 1;
72 rtc
->uie_task_active
= 0;
73 add_timer(&rtc
->uie_timer
);
74 } else if (schedule_work(&rtc
->uie_task
) == 0) {
75 rtc
->uie_task_active
= 0;
77 spin_unlock_irq(&rtc
->irq_lock
);
79 rtc_update_irq(rtc
, num
, RTC_UF
| RTC_IRQF
);
81 static void rtc_uie_timer(unsigned long data
)
83 struct rtc_device
*rtc
= (struct rtc_device
*)data
;
86 spin_lock_irqsave(&rtc
->irq_lock
, flags
);
87 rtc
->uie_timer_active
= 0;
88 rtc
->uie_task_active
= 1;
89 if ((schedule_work(&rtc
->uie_task
) == 0))
90 rtc
->uie_task_active
= 0;
91 spin_unlock_irqrestore(&rtc
->irq_lock
, flags
);
94 static int clear_uie(struct rtc_device
*rtc
)
96 spin_lock_irq(&rtc
->irq_lock
);
97 if (rtc
->uie_irq_active
) {
98 rtc
->stop_uie_polling
= 1;
99 if (rtc
->uie_timer_active
) {
100 spin_unlock_irq(&rtc
->irq_lock
);
101 del_timer_sync(&rtc
->uie_timer
);
102 spin_lock_irq(&rtc
->irq_lock
);
103 rtc
->uie_timer_active
= 0;
105 if (rtc
->uie_task_active
) {
106 spin_unlock_irq(&rtc
->irq_lock
);
107 flush_scheduled_work();
108 spin_lock_irq(&rtc
->irq_lock
);
110 rtc
->uie_irq_active
= 0;
112 spin_unlock_irq(&rtc
->irq_lock
);
116 static int set_uie(struct rtc_device
*rtc
)
121 err
= rtc_read_time(rtc
, &tm
);
124 spin_lock_irq(&rtc
->irq_lock
);
125 if (!rtc
->uie_irq_active
) {
126 rtc
->uie_irq_active
= 1;
127 rtc
->stop_uie_polling
= 0;
128 rtc
->oldsecs
= tm
.tm_sec
;
129 rtc
->uie_task_active
= 1;
130 if (schedule_work(&rtc
->uie_task
) == 0)
131 rtc
->uie_task_active
= 0;
134 spin_unlock_irq(&rtc
->irq_lock
);
138 int rtc_dev_update_irq_enable_emul(struct rtc_device
*rtc
, unsigned int enabled
)
143 return clear_uie(rtc
);
145 EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul
);
147 #endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
150 rtc_dev_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
152 struct rtc_device
*rtc
= file
->private_data
;
154 DECLARE_WAITQUEUE(wait
, current
);
158 if (count
!= sizeof(unsigned int) && count
< sizeof(unsigned long))
161 add_wait_queue(&rtc
->irq_queue
, &wait
);
163 __set_current_state(TASK_INTERRUPTIBLE
);
165 spin_lock_irq(&rtc
->irq_lock
);
166 data
= rtc
->irq_data
;
168 spin_unlock_irq(&rtc
->irq_lock
);
174 if (file
->f_flags
& O_NONBLOCK
) {
178 if (signal_pending(current
)) {
184 set_current_state(TASK_RUNNING
);
185 remove_wait_queue(&rtc
->irq_queue
, &wait
);
188 /* Check for any data updates */
189 if (rtc
->ops
->read_callback
)
190 data
= rtc
->ops
->read_callback(rtc
->dev
.parent
,
193 if (sizeof(int) != sizeof(long) &&
194 count
== sizeof(unsigned int))
195 ret
= put_user(data
, (unsigned int __user
*)buf
) ?:
196 sizeof(unsigned int);
198 ret
= put_user(data
, (unsigned long __user
*)buf
) ?:
199 sizeof(unsigned long);
204 static unsigned int rtc_dev_poll(struct file
*file
, poll_table
*wait
)
206 struct rtc_device
*rtc
= file
->private_data
;
209 poll_wait(file
, &rtc
->irq_queue
, wait
);
211 data
= rtc
->irq_data
;
213 return (data
!= 0) ? (POLLIN
| POLLRDNORM
) : 0;
216 static long rtc_dev_ioctl(struct file
*file
,
217 unsigned int cmd
, unsigned long arg
)
220 struct rtc_device
*rtc
= file
->private_data
;
221 const struct rtc_class_ops
*ops
= rtc
->ops
;
223 struct rtc_wkalrm alarm
;
224 void __user
*uarg
= (void __user
*) arg
;
226 err
= mutex_lock_interruptible(&rtc
->ops_lock
);
230 /* check that the calling task has appropriate permissions
231 * for certain ioctls. doing this check here is useful
232 * to avoid duplicate code in each driver.
237 if (!capable(CAP_SYS_TIME
))
242 if (arg
> rtc
->max_user_freq
&& !capable(CAP_SYS_RESOURCE
))
247 if (rtc
->irq_freq
> rtc
->max_user_freq
&&
248 !capable(CAP_SYS_RESOURCE
))
256 /* try the driver's ioctl interface */
258 err
= ops
->ioctl(rtc
->dev
.parent
, cmd
, arg
);
259 if (err
!= -ENOIOCTLCMD
) {
260 mutex_unlock(&rtc
->ops_lock
);
265 /* if the driver does not provide the ioctl interface
266 * or if that particular ioctl was not implemented
267 * (-ENOIOCTLCMD), we will try to emulate here.
269 * Drivers *SHOULD NOT* provide ioctl implementations
270 * for these requests. Instead, provide methods to
271 * support the following code, so that the RTC's main
272 * features are accessible without using ioctls.
274 * RTC and alarm times will be in UTC, by preference,
275 * but dual-booting with MS-Windows implies RTCs must
276 * use the local wall clock time.
281 mutex_unlock(&rtc
->ops_lock
);
283 err
= rtc_read_alarm(rtc
, &alarm
);
287 if (copy_to_user(uarg
, &alarm
.time
, sizeof(tm
)))
292 mutex_unlock(&rtc
->ops_lock
);
294 if (copy_from_user(&alarm
.time
, uarg
, sizeof(tm
)))
299 alarm
.time
.tm_wday
= -1;
300 alarm
.time
.tm_yday
= -1;
301 alarm
.time
.tm_isdst
= -1;
303 /* RTC_ALM_SET alarms may be up to 24 hours in the future.
304 * Rather than expecting every RTC to implement "don't care"
305 * for day/month/year fields, just force the alarm to have
306 * the right values for those fields.
308 * RTC_WKALM_SET should be used instead. Not only does it
309 * eliminate the need for a separate RTC_AIE_ON call, it
310 * doesn't have the "alarm 23:59:59 in the future" race.
312 * NOTE: some legacy code may have used invalid fields as
313 * wildcards, exposing hardware "periodic alarm" capabilities.
314 * Not supported here.
317 unsigned long now
, then
;
319 err
= rtc_read_time(rtc
, &tm
);
322 rtc_tm_to_time(&tm
, &now
);
324 alarm
.time
.tm_mday
= tm
.tm_mday
;
325 alarm
.time
.tm_mon
= tm
.tm_mon
;
326 alarm
.time
.tm_year
= tm
.tm_year
;
327 err
= rtc_valid_tm(&alarm
.time
);
330 rtc_tm_to_time(&alarm
.time
, &then
);
332 /* alarm may need to wrap into tomorrow */
334 rtc_time_to_tm(now
+ 24 * 60 * 60, &tm
);
335 alarm
.time
.tm_mday
= tm
.tm_mday
;
336 alarm
.time
.tm_mon
= tm
.tm_mon
;
337 alarm
.time
.tm_year
= tm
.tm_year
;
341 return rtc_set_alarm(rtc
, &alarm
);
344 mutex_unlock(&rtc
->ops_lock
);
346 err
= rtc_read_time(rtc
, &tm
);
350 if (copy_to_user(uarg
, &tm
, sizeof(tm
)))
355 mutex_unlock(&rtc
->ops_lock
);
357 if (copy_from_user(&tm
, uarg
, sizeof(tm
)))
360 return rtc_set_time(rtc
, &tm
);
363 err
= rtc_irq_set_state(rtc
, NULL
, 1);
367 err
= rtc_irq_set_state(rtc
, NULL
, 0);
371 mutex_unlock(&rtc
->ops_lock
);
372 return rtc_alarm_irq_enable(rtc
, 1);
375 mutex_unlock(&rtc
->ops_lock
);
376 return rtc_alarm_irq_enable(rtc
, 0);
379 mutex_unlock(&rtc
->ops_lock
);
380 return rtc_update_irq_enable(rtc
, 1);
383 mutex_unlock(&rtc
->ops_lock
);
384 return rtc_update_irq_enable(rtc
, 0);
387 err
= rtc_irq_set_freq(rtc
, NULL
, arg
);
391 err
= put_user(rtc
->irq_freq
, (unsigned long __user
*)uarg
);
398 * There were no RTC clocks before 1900.
410 err
= put_user(rtc_epoch
, (unsigned long __user
*)uarg
);
414 mutex_unlock(&rtc
->ops_lock
);
415 if (copy_from_user(&alarm
, uarg
, sizeof(alarm
)))
418 return rtc_set_alarm(rtc
, &alarm
);
421 mutex_unlock(&rtc
->ops_lock
);
422 err
= rtc_read_alarm(rtc
, &alarm
);
426 if (copy_to_user(uarg
, &alarm
, sizeof(alarm
)))
436 mutex_unlock(&rtc
->ops_lock
);
440 static int rtc_dev_fasync(int fd
, struct file
*file
, int on
)
442 struct rtc_device
*rtc
= file
->private_data
;
443 return fasync_helper(fd
, file
, on
, &rtc
->async_queue
);
446 static int rtc_dev_release(struct inode
*inode
, struct file
*file
)
448 struct rtc_device
*rtc
= file
->private_data
;
450 /* We shut down the repeating IRQs that userspace enabled,
451 * since nothing is listening to them.
452 * - Update (UIE) ... currently only managed through ioctls
453 * - Periodic (PIE) ... also used through rtc_*() interface calls
455 * Leave the alarm alone; it may be set to trigger a system wakeup
456 * later, or be used by kernel code, and is a one-shot event anyway.
459 /* Keep ioctl until all drivers are converted */
460 rtc_dev_ioctl(file
, RTC_UIE_OFF
, 0);
461 rtc_update_irq_enable(rtc
, 0);
462 rtc_irq_set_state(rtc
, NULL
, 0);
464 if (rtc
->ops
->release
)
465 rtc
->ops
->release(rtc
->dev
.parent
);
467 clear_bit_unlock(RTC_DEV_BUSY
, &rtc
->flags
);
471 static const struct file_operations rtc_dev_fops
= {
472 .owner
= THIS_MODULE
,
474 .read
= rtc_dev_read
,
475 .poll
= rtc_dev_poll
,
476 .unlocked_ioctl
= rtc_dev_ioctl
,
477 .open
= rtc_dev_open
,
478 .release
= rtc_dev_release
,
479 .fasync
= rtc_dev_fasync
,
482 /* insertion/removal hooks */
484 void rtc_dev_prepare(struct rtc_device
*rtc
)
489 if (rtc
->id
>= RTC_DEV_MAX
) {
490 pr_debug("%s: too many RTC devices\n", rtc
->name
);
494 rtc
->dev
.devt
= MKDEV(MAJOR(rtc_devt
), rtc
->id
);
496 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
497 INIT_WORK(&rtc
->uie_task
, rtc_uie_task
);
498 setup_timer(&rtc
->uie_timer
, rtc_uie_timer
, (unsigned long)rtc
);
501 cdev_init(&rtc
->char_dev
, &rtc_dev_fops
);
502 rtc
->char_dev
.owner
= rtc
->owner
;
505 void rtc_dev_add_device(struct rtc_device
*rtc
)
507 if (cdev_add(&rtc
->char_dev
, rtc
->dev
.devt
, 1))
508 printk(KERN_WARNING
"%s: failed to add char device %d:%d\n",
509 rtc
->name
, MAJOR(rtc_devt
), rtc
->id
);
511 pr_debug("%s: dev (%d:%d)\n", rtc
->name
,
512 MAJOR(rtc_devt
), rtc
->id
);
515 void rtc_dev_del_device(struct rtc_device
*rtc
)
518 cdev_del(&rtc
->char_dev
);
521 void __init
rtc_dev_init(void)
525 err
= alloc_chrdev_region(&rtc_devt
, 0, RTC_DEV_MAX
, "rtc");
527 printk(KERN_ERR
"%s: failed to allocate char dev region\n",
531 void __exit
rtc_dev_exit(void)
534 unregister_chrdev_region(rtc_devt
, RTC_DEV_MAX
);