2 * RTC subsystem, dev interface
4 * Copyright (C) 2005 Tower Technologies
5 * Author: Alessandro Zummo <a.zummo@towertech.it>
7 * based on arch/arm/common/rtctime.c
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/module.h>
15 #include <linux/rtc.h>
18 static dev_t rtc_devt
;
20 #define RTC_DEV_MAX 16 /* 16 RTCs should be enough for everyone... */
22 static int rtc_dev_open(struct inode
*inode
, struct file
*file
)
25 struct rtc_device
*rtc
= container_of(inode
->i_cdev
,
26 struct rtc_device
, char_dev
);
27 const struct rtc_class_ops
*ops
= rtc
->ops
;
29 if (test_and_set_bit_lock(RTC_DEV_BUSY
, &rtc
->flags
))
32 file
->private_data
= rtc
;
34 err
= ops
->open
? ops
->open(rtc
->dev
.parent
) : 0;
36 spin_lock_irq(&rtc
->irq_lock
);
38 spin_unlock_irq(&rtc
->irq_lock
);
43 /* something has gone wrong */
44 clear_bit_unlock(RTC_DEV_BUSY
, &rtc
->flags
);
48 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
50 * Routine to poll RTC seconds field for change as often as possible,
51 * after first RTC_UIE use timer to reduce polling
53 static void rtc_uie_task(struct work_struct
*work
)
55 struct rtc_device
*rtc
=
56 container_of(work
, struct rtc_device
, uie_task
);
61 err
= rtc_read_time(rtc
, &tm
);
64 spin_lock(&rtc
->irq_lock
);
65 if (rtc
->stop_uie_polling
|| err
) {
66 rtc
->uie_task_active
= 0;
67 } else if (rtc
->oldsecs
!= tm
.tm_sec
) {
68 num
= (tm
.tm_sec
+ 60 - rtc
->oldsecs
) % 60;
69 rtc
->oldsecs
= tm
.tm_sec
;
70 rtc
->uie_timer
.expires
= jiffies
+ HZ
- (HZ
/10);
71 rtc
->uie_timer_active
= 1;
72 rtc
->uie_task_active
= 0;
73 add_timer(&rtc
->uie_timer
);
74 } else if (schedule_work(&rtc
->uie_task
) == 0) {
75 rtc
->uie_task_active
= 0;
77 spin_unlock(&rtc
->irq_lock
);
79 rtc_update_irq(rtc
, num
, RTC_UF
| RTC_IRQF
);
82 static void rtc_uie_timer(unsigned long data
)
84 struct rtc_device
*rtc
= (struct rtc_device
*)data
;
87 spin_lock_irqsave(&rtc
->irq_lock
, flags
);
88 rtc
->uie_timer_active
= 0;
89 rtc
->uie_task_active
= 1;
90 if ((schedule_work(&rtc
->uie_task
) == 0))
91 rtc
->uie_task_active
= 0;
92 spin_unlock_irqrestore(&rtc
->irq_lock
, flags
);
95 static int clear_uie(struct rtc_device
*rtc
)
97 spin_lock_irq(&rtc
->irq_lock
);
98 if (rtc
->uie_irq_active
) {
99 rtc
->stop_uie_polling
= 1;
100 if (rtc
->uie_timer_active
) {
101 spin_unlock_irq(&rtc
->irq_lock
);
102 del_timer_sync(&rtc
->uie_timer
);
103 spin_lock_irq(&rtc
->irq_lock
);
104 rtc
->uie_timer_active
= 0;
106 if (rtc
->uie_task_active
) {
107 spin_unlock_irq(&rtc
->irq_lock
);
108 flush_scheduled_work();
109 spin_lock_irq(&rtc
->irq_lock
);
111 rtc
->uie_irq_active
= 0;
113 spin_unlock_irq(&rtc
->irq_lock
);
117 static int set_uie(struct rtc_device
*rtc
)
122 err
= rtc_read_time(rtc
, &tm
);
125 spin_lock_irq(&rtc
->irq_lock
);
126 if (!rtc
->uie_irq_active
) {
127 rtc
->uie_irq_active
= 1;
128 rtc
->stop_uie_polling
= 0;
129 rtc
->oldsecs
= tm
.tm_sec
;
130 rtc
->uie_task_active
= 1;
131 if (schedule_work(&rtc
->uie_task
) == 0)
132 rtc
->uie_task_active
= 0;
135 spin_unlock_irq(&rtc
->irq_lock
);
139 int rtc_dev_update_irq_enable_emul(struct rtc_device
*rtc
, unsigned int enabled
)
144 return clear_uie(rtc
);
146 EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul
);
148 #endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
151 rtc_dev_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
153 struct rtc_device
*rtc
= file
->private_data
;
155 DECLARE_WAITQUEUE(wait
, current
);
159 if (count
!= sizeof(unsigned int) && count
< sizeof(unsigned long))
162 add_wait_queue(&rtc
->irq_queue
, &wait
);
164 __set_current_state(TASK_INTERRUPTIBLE
);
166 spin_lock_irq(&rtc
->irq_lock
);
167 data
= rtc
->irq_data
;
169 spin_unlock_irq(&rtc
->irq_lock
);
175 if (file
->f_flags
& O_NONBLOCK
) {
179 if (signal_pending(current
)) {
185 set_current_state(TASK_RUNNING
);
186 remove_wait_queue(&rtc
->irq_queue
, &wait
);
189 /* Check for any data updates */
190 if (rtc
->ops
->read_callback
)
191 data
= rtc
->ops
->read_callback(rtc
->dev
.parent
,
194 if (sizeof(int) != sizeof(long) &&
195 count
== sizeof(unsigned int))
196 ret
= put_user(data
, (unsigned int __user
*)buf
) ?:
197 sizeof(unsigned int);
199 ret
= put_user(data
, (unsigned long __user
*)buf
) ?:
200 sizeof(unsigned long);
205 static unsigned int rtc_dev_poll(struct file
*file
, poll_table
*wait
)
207 struct rtc_device
*rtc
= file
->private_data
;
210 poll_wait(file
, &rtc
->irq_queue
, wait
);
212 data
= rtc
->irq_data
;
214 return (data
!= 0) ? (POLLIN
| POLLRDNORM
) : 0;
217 static long rtc_dev_ioctl(struct file
*file
,
218 unsigned int cmd
, unsigned long arg
)
221 struct rtc_device
*rtc
= file
->private_data
;
222 const struct rtc_class_ops
*ops
= rtc
->ops
;
224 struct rtc_wkalrm alarm
;
225 void __user
*uarg
= (void __user
*) arg
;
227 err
= mutex_lock_interruptible(&rtc
->ops_lock
);
231 /* check that the calling task has appropriate permissions
232 * for certain ioctls. doing this check here is useful
233 * to avoid duplicate code in each driver.
238 if (!capable(CAP_SYS_TIME
))
243 if (arg
> rtc
->max_user_freq
&& !capable(CAP_SYS_RESOURCE
))
248 if (rtc
->irq_freq
> rtc
->max_user_freq
&&
249 !capable(CAP_SYS_RESOURCE
))
257 /* try the driver's ioctl interface */
259 err
= ops
->ioctl(rtc
->dev
.parent
, cmd
, arg
);
260 if (err
!= -ENOIOCTLCMD
) {
261 mutex_unlock(&rtc
->ops_lock
);
266 /* if the driver does not provide the ioctl interface
267 * or if that particular ioctl was not implemented
268 * (-ENOIOCTLCMD), we will try to emulate here.
270 * Drivers *SHOULD NOT* provide ioctl implementations
271 * for these requests. Instead, provide methods to
272 * support the following code, so that the RTC's main
273 * features are accessible without using ioctls.
275 * RTC and alarm times will be in UTC, by preference,
276 * but dual-booting with MS-Windows implies RTCs must
277 * use the local wall clock time.
282 mutex_unlock(&rtc
->ops_lock
);
284 err
= rtc_read_alarm(rtc
, &alarm
);
288 if (copy_to_user(uarg
, &alarm
.time
, sizeof(tm
)))
293 mutex_unlock(&rtc
->ops_lock
);
295 if (copy_from_user(&alarm
.time
, uarg
, sizeof(tm
)))
300 alarm
.time
.tm_wday
= -1;
301 alarm
.time
.tm_yday
= -1;
302 alarm
.time
.tm_isdst
= -1;
304 /* RTC_ALM_SET alarms may be up to 24 hours in the future.
305 * Rather than expecting every RTC to implement "don't care"
306 * for day/month/year fields, just force the alarm to have
307 * the right values for those fields.
309 * RTC_WKALM_SET should be used instead. Not only does it
310 * eliminate the need for a separate RTC_AIE_ON call, it
311 * doesn't have the "alarm 23:59:59 in the future" race.
313 * NOTE: some legacy code may have used invalid fields as
314 * wildcards, exposing hardware "periodic alarm" capabilities.
315 * Not supported here.
318 unsigned long now
, then
;
320 err
= rtc_read_time(rtc
, &tm
);
323 rtc_tm_to_time(&tm
, &now
);
325 alarm
.time
.tm_mday
= tm
.tm_mday
;
326 alarm
.time
.tm_mon
= tm
.tm_mon
;
327 alarm
.time
.tm_year
= tm
.tm_year
;
328 err
= rtc_valid_tm(&alarm
.time
);
331 rtc_tm_to_time(&alarm
.time
, &then
);
333 /* alarm may need to wrap into tomorrow */
335 rtc_time_to_tm(now
+ 24 * 60 * 60, &tm
);
336 alarm
.time
.tm_mday
= tm
.tm_mday
;
337 alarm
.time
.tm_mon
= tm
.tm_mon
;
338 alarm
.time
.tm_year
= tm
.tm_year
;
342 return rtc_set_alarm(rtc
, &alarm
);
345 mutex_unlock(&rtc
->ops_lock
);
347 err
= rtc_read_time(rtc
, &tm
);
351 if (copy_to_user(uarg
, &tm
, sizeof(tm
)))
356 mutex_unlock(&rtc
->ops_lock
);
358 if (copy_from_user(&tm
, uarg
, sizeof(tm
)))
361 return rtc_set_time(rtc
, &tm
);
364 err
= rtc_irq_set_state(rtc
, NULL
, 1);
368 err
= rtc_irq_set_state(rtc
, NULL
, 0);
372 mutex_unlock(&rtc
->ops_lock
);
373 return rtc_alarm_irq_enable(rtc
, 1);
376 mutex_unlock(&rtc
->ops_lock
);
377 return rtc_alarm_irq_enable(rtc
, 0);
380 mutex_unlock(&rtc
->ops_lock
);
381 return rtc_update_irq_enable(rtc
, 1);
384 mutex_unlock(&rtc
->ops_lock
);
385 return rtc_update_irq_enable(rtc
, 0);
388 err
= rtc_irq_set_freq(rtc
, NULL
, arg
);
392 err
= put_user(rtc
->irq_freq
, (unsigned long __user
*)uarg
);
399 * There were no RTC clocks before 1900.
411 err
= put_user(rtc_epoch
, (unsigned long __user
*)uarg
);
415 mutex_unlock(&rtc
->ops_lock
);
416 if (copy_from_user(&alarm
, uarg
, sizeof(alarm
)))
419 return rtc_set_alarm(rtc
, &alarm
);
422 mutex_unlock(&rtc
->ops_lock
);
423 err
= rtc_read_alarm(rtc
, &alarm
);
427 if (copy_to_user(uarg
, &alarm
, sizeof(alarm
)))
437 mutex_unlock(&rtc
->ops_lock
);
441 static int rtc_dev_fasync(int fd
, struct file
*file
, int on
)
443 struct rtc_device
*rtc
= file
->private_data
;
444 return fasync_helper(fd
, file
, on
, &rtc
->async_queue
);
447 static int rtc_dev_release(struct inode
*inode
, struct file
*file
)
449 struct rtc_device
*rtc
= file
->private_data
;
451 /* We shut down the repeating IRQs that userspace enabled,
452 * since nothing is listening to them.
453 * - Update (UIE) ... currently only managed through ioctls
454 * - Periodic (PIE) ... also used through rtc_*() interface calls
456 * Leave the alarm alone; it may be set to trigger a system wakeup
457 * later, or be used by kernel code, and is a one-shot event anyway.
460 /* Keep ioctl until all drivers are converted */
461 rtc_dev_ioctl(file
, RTC_UIE_OFF
, 0);
462 rtc_update_irq_enable(rtc
, 0);
463 rtc_irq_set_state(rtc
, NULL
, 0);
465 if (rtc
->ops
->release
)
466 rtc
->ops
->release(rtc
->dev
.parent
);
468 clear_bit_unlock(RTC_DEV_BUSY
, &rtc
->flags
);
472 static const struct file_operations rtc_dev_fops
= {
473 .owner
= THIS_MODULE
,
475 .read
= rtc_dev_read
,
476 .poll
= rtc_dev_poll
,
477 .unlocked_ioctl
= rtc_dev_ioctl
,
478 .open
= rtc_dev_open
,
479 .release
= rtc_dev_release
,
480 .fasync
= rtc_dev_fasync
,
483 /* insertion/removal hooks */
485 void rtc_dev_prepare(struct rtc_device
*rtc
)
490 if (rtc
->id
>= RTC_DEV_MAX
) {
491 pr_debug("%s: too many RTC devices\n", rtc
->name
);
495 rtc
->dev
.devt
= MKDEV(MAJOR(rtc_devt
), rtc
->id
);
497 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
498 INIT_WORK(&rtc
->uie_task
, rtc_uie_task
);
499 setup_timer(&rtc
->uie_timer
, rtc_uie_timer
, (unsigned long)rtc
);
502 cdev_init(&rtc
->char_dev
, &rtc_dev_fops
);
503 rtc
->char_dev
.owner
= rtc
->owner
;
506 void rtc_dev_add_device(struct rtc_device
*rtc
)
508 if (cdev_add(&rtc
->char_dev
, rtc
->dev
.devt
, 1))
509 printk(KERN_WARNING
"%s: failed to add char device %d:%d\n",
510 rtc
->name
, MAJOR(rtc_devt
), rtc
->id
);
512 pr_debug("%s: dev (%d:%d)\n", rtc
->name
,
513 MAJOR(rtc_devt
), rtc
->id
);
516 void rtc_dev_del_device(struct rtc_device
*rtc
)
519 cdev_del(&rtc
->char_dev
);
522 void __init
rtc_dev_init(void)
526 err
= alloc_chrdev_region(&rtc_devt
, 0, RTC_DEV_MAX
, "rtc");
528 printk(KERN_ERR
"%s: failed to allocate char dev region\n",
532 void __exit
rtc_dev_exit(void)
535 unregister_chrdev_region(rtc_devt
, RTC_DEV_MAX
);