2 * RTC subsystem, dev interface
4 * Copyright (C) 2005 Tower Technologies
5 * Author: Alessandro Zummo <a.zummo@towertech.it>
7 * based on arch/arm/common/rtctime.c
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/module.h>
15 #include <linux/rtc.h>
18 static dev_t rtc_devt
;
20 #define RTC_DEV_MAX 16 /* 16 RTCs should be enough for everyone... */
22 static int rtc_dev_open(struct inode
*inode
, struct file
*file
)
25 struct rtc_device
*rtc
= container_of(inode
->i_cdev
,
26 struct rtc_device
, char_dev
);
27 const struct rtc_class_ops
*ops
= rtc
->ops
;
29 /* We keep the lock as long as the device is in use
30 * and return immediately if busy
32 if (!(mutex_trylock(&rtc
->char_lock
)))
35 file
->private_data
= rtc
;
37 err
= ops
->open
? ops
->open(rtc
->dev
.parent
) : 0;
39 spin_lock_irq(&rtc
->irq_lock
);
41 spin_unlock_irq(&rtc
->irq_lock
);
46 /* something has gone wrong, release the lock */
47 mutex_unlock(&rtc
->char_lock
);
51 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
53 * Routine to poll RTC seconds field for change as often as possible,
54 * after first RTC_UIE use timer to reduce polling
56 static void rtc_uie_task(struct work_struct
*work
)
58 struct rtc_device
*rtc
=
59 container_of(work
, struct rtc_device
, uie_task
);
64 err
= rtc_read_time(rtc
, &tm
);
67 spin_lock(&rtc
->irq_lock
);
68 if (rtc
->stop_uie_polling
|| err
) {
69 rtc
->uie_task_active
= 0;
70 } else if (rtc
->oldsecs
!= tm
.tm_sec
) {
71 num
= (tm
.tm_sec
+ 60 - rtc
->oldsecs
) % 60;
72 rtc
->oldsecs
= tm
.tm_sec
;
73 rtc
->uie_timer
.expires
= jiffies
+ HZ
- (HZ
/10);
74 rtc
->uie_timer_active
= 1;
75 rtc
->uie_task_active
= 0;
76 add_timer(&rtc
->uie_timer
);
77 } else if (schedule_work(&rtc
->uie_task
) == 0) {
78 rtc
->uie_task_active
= 0;
80 spin_unlock(&rtc
->irq_lock
);
82 rtc_update_irq(rtc
, num
, RTC_UF
| RTC_IRQF
);
85 static void rtc_uie_timer(unsigned long data
)
87 struct rtc_device
*rtc
= (struct rtc_device
*)data
;
90 spin_lock_irqsave(&rtc
->irq_lock
, flags
);
91 rtc
->uie_timer_active
= 0;
92 rtc
->uie_task_active
= 1;
93 if ((schedule_work(&rtc
->uie_task
) == 0))
94 rtc
->uie_task_active
= 0;
95 spin_unlock_irqrestore(&rtc
->irq_lock
, flags
);
98 static void clear_uie(struct rtc_device
*rtc
)
100 spin_lock_irq(&rtc
->irq_lock
);
101 if (rtc
->irq_active
) {
102 rtc
->stop_uie_polling
= 1;
103 if (rtc
->uie_timer_active
) {
104 spin_unlock_irq(&rtc
->irq_lock
);
105 del_timer_sync(&rtc
->uie_timer
);
106 spin_lock_irq(&rtc
->irq_lock
);
107 rtc
->uie_timer_active
= 0;
109 if (rtc
->uie_task_active
) {
110 spin_unlock_irq(&rtc
->irq_lock
);
111 flush_scheduled_work();
112 spin_lock_irq(&rtc
->irq_lock
);
116 spin_unlock_irq(&rtc
->irq_lock
);
119 static int set_uie(struct rtc_device
*rtc
)
124 err
= rtc_read_time(rtc
, &tm
);
127 spin_lock_irq(&rtc
->irq_lock
);
128 if (!rtc
->irq_active
) {
130 rtc
->stop_uie_polling
= 0;
131 rtc
->oldsecs
= tm
.tm_sec
;
132 rtc
->uie_task_active
= 1;
133 if (schedule_work(&rtc
->uie_task
) == 0)
134 rtc
->uie_task_active
= 0;
137 spin_unlock_irq(&rtc
->irq_lock
);
140 #endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
143 rtc_dev_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
145 struct rtc_device
*rtc
= to_rtc_device(file
->private_data
);
147 DECLARE_WAITQUEUE(wait
, current
);
151 if (count
!= sizeof(unsigned int) && count
< sizeof(unsigned long))
154 add_wait_queue(&rtc
->irq_queue
, &wait
);
156 __set_current_state(TASK_INTERRUPTIBLE
);
158 spin_lock_irq(&rtc
->irq_lock
);
159 data
= rtc
->irq_data
;
161 spin_unlock_irq(&rtc
->irq_lock
);
167 if (file
->f_flags
& O_NONBLOCK
) {
171 if (signal_pending(current
)) {
177 set_current_state(TASK_RUNNING
);
178 remove_wait_queue(&rtc
->irq_queue
, &wait
);
181 /* Check for any data updates */
182 if (rtc
->ops
->read_callback
)
183 data
= rtc
->ops
->read_callback(rtc
->dev
.parent
,
186 if (sizeof(int) != sizeof(long) &&
187 count
== sizeof(unsigned int))
188 ret
= put_user(data
, (unsigned int __user
*)buf
) ?:
189 sizeof(unsigned int);
191 ret
= put_user(data
, (unsigned long __user
*)buf
) ?:
192 sizeof(unsigned long);
197 static unsigned int rtc_dev_poll(struct file
*file
, poll_table
*wait
)
199 struct rtc_device
*rtc
= to_rtc_device(file
->private_data
);
202 poll_wait(file
, &rtc
->irq_queue
, wait
);
204 data
= rtc
->irq_data
;
206 return (data
!= 0) ? (POLLIN
| POLLRDNORM
) : 0;
209 static int rtc_dev_ioctl(struct inode
*inode
, struct file
*file
,
210 unsigned int cmd
, unsigned long arg
)
213 struct rtc_device
*rtc
= file
->private_data
;
214 const struct rtc_class_ops
*ops
= rtc
->ops
;
216 struct rtc_wkalrm alarm
;
217 void __user
*uarg
= (void __user
*) arg
;
219 /* check that the calling task has appropriate permissions
220 * for certain ioctls. doing this check here is useful
221 * to avoid duplicate code in each driver.
226 if (!capable(CAP_SYS_TIME
))
231 if (arg
> rtc
->max_user_freq
&& !capable(CAP_SYS_RESOURCE
))
236 if (!capable(CAP_SYS_RESOURCE
))
241 /* avoid conflicting IRQ users */
242 if (cmd
== RTC_PIE_ON
|| cmd
== RTC_PIE_OFF
|| cmd
== RTC_IRQP_SET
) {
243 spin_lock_irq(&rtc
->irq_task_lock
);
246 spin_unlock_irq(&rtc
->irq_task_lock
);
252 /* try the driver's ioctl interface */
254 err
= ops
->ioctl(rtc
->dev
.parent
, cmd
, arg
);
255 if (err
!= -ENOIOCTLCMD
)
259 /* if the driver does not provide the ioctl interface
260 * or if that particular ioctl was not implemented
261 * (-ENOIOCTLCMD), we will try to emulate here.
266 err
= rtc_read_alarm(rtc
, &alarm
);
270 if (copy_to_user(uarg
, &alarm
.time
, sizeof(tm
)))
275 if (copy_from_user(&alarm
.time
, uarg
, sizeof(tm
)))
280 alarm
.time
.tm_wday
= -1;
281 alarm
.time
.tm_yday
= -1;
282 alarm
.time
.tm_isdst
= -1;
284 /* RTC_ALM_SET alarms may be up to 24 hours in the future.
285 * Rather than expecting every RTC to implement "don't care"
286 * for day/month/year fields, just force the alarm to have
287 * the right values for those fields.
289 * RTC_WKALM_SET should be used instead. Not only does it
290 * eliminate the need for a separate RTC_AIE_ON call, it
291 * doesn't have the "alarm 23:59:59 in the future" race.
293 * NOTE: some legacy code may have used invalid fields as
294 * wildcards, exposing hardware "periodic alarm" capabilities.
295 * Not supported here.
298 unsigned long now
, then
;
300 err
= rtc_read_time(rtc
, &tm
);
303 rtc_tm_to_time(&tm
, &now
);
305 alarm
.time
.tm_mday
= tm
.tm_mday
;
306 alarm
.time
.tm_mon
= tm
.tm_mon
;
307 alarm
.time
.tm_year
= tm
.tm_year
;
308 err
= rtc_valid_tm(&alarm
.time
);
311 rtc_tm_to_time(&alarm
.time
, &then
);
313 /* alarm may need to wrap into tomorrow */
315 rtc_time_to_tm(now
+ 24 * 60 * 60, &tm
);
316 alarm
.time
.tm_mday
= tm
.tm_mday
;
317 alarm
.time
.tm_mon
= tm
.tm_mon
;
318 alarm
.time
.tm_year
= tm
.tm_year
;
322 err
= rtc_set_alarm(rtc
, &alarm
);
326 err
= rtc_read_time(rtc
, &tm
);
330 if (copy_to_user(uarg
, &tm
, sizeof(tm
)))
335 if (copy_from_user(&tm
, uarg
, sizeof(tm
)))
338 err
= rtc_set_time(rtc
, &tm
);
342 if (ops
->irq_set_freq
)
343 err
= put_user(rtc
->irq_freq
, (unsigned long __user
*)uarg
);
349 if (ops
->irq_set_freq
)
350 err
= rtc_irq_set_freq(rtc
, rtc
->irq_task
, arg
);
359 * There were no RTC clocks before 1900.
371 err
= put_user(rtc_epoch
, (unsigned long __user
*)uarg
);
375 if (copy_from_user(&alarm
, uarg
, sizeof(alarm
)))
378 err
= rtc_set_alarm(rtc
, &alarm
);
382 err
= rtc_read_alarm(rtc
, &alarm
);
386 if (copy_to_user(uarg
, &alarm
, sizeof(alarm
)))
390 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
406 static int rtc_dev_release(struct inode
*inode
, struct file
*file
)
408 struct rtc_device
*rtc
= to_rtc_device(file
->private_data
);
410 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
413 if (rtc
->ops
->release
)
414 rtc
->ops
->release(rtc
->dev
.parent
);
416 mutex_unlock(&rtc
->char_lock
);
420 static int rtc_dev_fasync(int fd
, struct file
*file
, int on
)
422 struct rtc_device
*rtc
= to_rtc_device(file
->private_data
);
423 return fasync_helper(fd
, file
, on
, &rtc
->async_queue
);
426 static const struct file_operations rtc_dev_fops
= {
427 .owner
= THIS_MODULE
,
429 .read
= rtc_dev_read
,
430 .poll
= rtc_dev_poll
,
431 .ioctl
= rtc_dev_ioctl
,
432 .open
= rtc_dev_open
,
433 .release
= rtc_dev_release
,
434 .fasync
= rtc_dev_fasync
,
437 /* insertion/removal hooks */
439 void rtc_dev_prepare(struct rtc_device
*rtc
)
444 if (rtc
->id
>= RTC_DEV_MAX
) {
445 pr_debug("%s: too many RTC devices\n", rtc
->name
);
449 rtc
->dev
.devt
= MKDEV(MAJOR(rtc_devt
), rtc
->id
);
451 mutex_init(&rtc
->char_lock
);
452 spin_lock_init(&rtc
->irq_lock
);
453 init_waitqueue_head(&rtc
->irq_queue
);
454 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
455 INIT_WORK(&rtc
->uie_task
, rtc_uie_task
);
456 setup_timer(&rtc
->uie_timer
, rtc_uie_timer
, (unsigned long)rtc
);
459 cdev_init(&rtc
->char_dev
, &rtc_dev_fops
);
460 rtc
->char_dev
.owner
= rtc
->owner
;
463 void rtc_dev_add_device(struct rtc_device
*rtc
)
465 if (cdev_add(&rtc
->char_dev
, rtc
->dev
.devt
, 1))
466 printk(KERN_WARNING
"%s: failed to add char device %d:%d\n",
467 rtc
->name
, MAJOR(rtc_devt
), rtc
->id
);
469 pr_debug("%s: dev (%d:%d)\n", rtc
->name
,
470 MAJOR(rtc_devt
), rtc
->id
);
473 void rtc_dev_del_device(struct rtc_device
*rtc
)
476 cdev_del(&rtc
->char_dev
);
479 void __init
rtc_dev_init(void)
483 err
= alloc_chrdev_region(&rtc_devt
, 0, RTC_DEV_MAX
, "rtc");
485 printk(KERN_ERR
"%s: failed to allocate char dev region\n",
489 void __exit
rtc_dev_exit(void)
492 unregister_chrdev_region(rtc_devt
, RTC_DEV_MAX
);